├── .gitignore ├── Install-Required-Tools-on-Ubuntu.md ├── LICENSE ├── README.md ├── all-in-one.sh ├── big-dipper-as-block-explorer ├── .gitignore ├── 1_install-bdjuno.sh ├── 2_install-bdjuno.sh ├── 3_install-hasura.sh ├── 4_install-front-end.sh ├── README.md ├── _config.sh ├── cleanup.sh └── template.config.yaml ├── blockchain-in-docker ├── .gitignore ├── 1_prepare-genesis.sh ├── 2_build-docker-image.sh ├── README.md ├── _config.sh ├── _make_binary.sh ├── cleanup.sh ├── gov-sample-proposals │ ├── denom-cosmos-on-evmos │ │ ├── 2-gov-register-coin.json │ │ └── README.md │ └── evmos │ │ ├── 1-gov_min_deposit_change.json │ │ └── README.md ├── template.DockerfileX └── template.networkX.yml ├── env.sh ├── hermes-as-ibc-relayer ├── .gitignore ├── README.md ├── _make_binary.sh ├── cleanup.sh ├── create-relayer.sh └── template-config.toml ├── keys └── README.md ├── sample.cosmos-and-evmos.override-env.sh └── sample.expose-big-dipper-ui.override-env.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | data/ 3 | extra_func.lic 4 | override-env.sh 5 | *.swp 6 | *.bak 7 | tmp_* 8 | tmp/ 9 | 10 | source-code*/ 11 | 12 | *genesis.json 13 | *config.toml 14 | *app.toml 15 | rel_*_seed.key 16 | -------------------------------------------------------------------------------- /Install-Required-Tools-on-Ubuntu.md: -------------------------------------------------------------------------------- 1 | I use `Ubuntu 22.04 LTS` machine for development purpose so I will provide some command lines that helps you install tools required by scripts within this repo 2 | 3 | Update system first `sudo apt-get update -y` 4 | 5 | - Go 1.20.2 6 | > cd /tmp 7 | 8 | > wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz 9 | 10 | > sudo tar -zxvf go1.20.2.linux-amd64.tar.gz -C /usr/local/ 11 | 12 | > mkdir ~/go 13 | 14 | > echo -e "\nexport GOPATH=\\$HOME/go\nexport PATH=\\$PATH:/usr/local/go/bin:\\$GOPATH/bin" >> ~/.bashrc 15 | 16 | - jq 17 | > sudo apt-get install jq -y 18 | 19 | - yq & tomlq 20 | > sudo apt update -y && sudo apt install python3-pip -y && pip3 install yq 21 | 22 | - docker 23 | > sudo apt-get update -y 24 | 25 | > sudo apt-get install ca-certificates curl gnupg lsb-release -y 26 | 27 | > sudo mkdir -p /etc/apt/keyrings 28 | 29 | > curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 30 | 31 | > echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 32 | 33 | > sudo apt-get update -y 34 | 35 | > sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin -y 36 | 37 | > sudo groupadd docker 38 | 39 | > sudo usermod -aG docker $USER 40 | 41 | - docker-compose 42 | > mkdir -p ~/.docker/cli-plugins/ 43 | 44 | > curl -SL https://github.com/docker/compose/releases/download/v2.6.0/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose 45 | 46 | > chmod +x ~/.docker/cli-plugins/docker-compose 47 | 48 | > sudo ln -s ~/.docker/cli-plugins/docker-compose /usr/bin/docker-compose 49 | 50 | - Rust 51 | > curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 52 | 53 | - psql (PostgreSQL client) 54 | > sudo apt install postgresql-client -y 55 | 56 | - NodeJS 57 | > curl -sL https://deb.nodesource.com/setup_16.x -o /tmp/nodesource_setup.sh 58 | 59 | > sudo bash /tmp/nodesource_setup.sh 60 | 61 | > sudo apt-get install -y nodejs 62 | 63 | - Yarn 64 | > curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - 65 | 66 | > echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list 67 | 68 | > sudo apt update -y && sudo apt-get install -y yarn 69 | 70 | - hasura-cli 71 | > curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | bash 72 | 73 | #### Remember to relog to all new PATH update takes effect 74 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Victor Pham 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Here you can find sample scripts and notes of how to [setup multiple EVMOS chains](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/tree/main/blockchain-in-docker) and [connect them via an IBC (using Hermes as relayer)](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/tree/main/hermes-as-ibc-relayer) to transfer tokens & coins cross chains, as well as [block explorers for them](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/tree/main/big-dipper-as-block-explorer) (using Big Dipper 2.0 for Cosmos based chains) 2 | 3 | I wrote hundred lines of comments which explains every steps in each script file and each readme file, hope that helps you guys in exploring Evmos & Cosmos Ecosystem 4 | 5 | Not only EVMOS, you can use this script to build for other chains like cosmos,... Look at [this](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/sample.cosmos-and-evmos.override-env.sh) to know how to do it 6 | 7 | #### Pre-requisites 8 | - [Go 1.20.2+](https://go.dev/doc/install) to build chains 9 | - [jq](https://stedolan.github.io/jq/download) 10 | - [yq + tomlq (build on top of jq)](https://github.com/kislyuk/yq) 11 | - [Rust 1.60+](https://www.rust-lang.org/tools/install) to build Hermes - IBC relayer 12 | - [docker](https://docs.docker.com/engine/install/) & [docker compose](https://docs.docker.com/compose/install/) 13 | - Node JS & Yarn to build & run Big Dipper 2.0 for Cosmos based chains 14 | - PostgreSQL client (psql) for postgres 12.5 to setup Big Dipper 15 | - [Hasura cli](https://hasura.io/docs/latest/graphql/core/hasura-cli/install-hasura-cli/) for bdjuno 16 | 17 | You can find commands to install the above tools [here](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/Install-Required-Tools-on-Ubuntu.md) 18 | 19 | I created all of the following samples on an Ubuntu 22.04 LTS machine with 4 CPU 16 GB RAM. It can run 20 | - 2 [EVMOS](https://github.com/evmos/evmos/tree/v12.1.6) chains 21 | - 1 [Hermes](https://github.com/informalsystems/ibc-rs/tree/v1.5.0) as IBC relayer to connect those chains 22 | - 2 [bdjuno & hasura](https://github.com/forbole/bdjuno/tree/chains/evmos/mainnet) as block explorer's backend & graphql service 23 | - 2 [Big Dipper 2.0](https://github.com/forbole/big-dipper-2.0-cosmos) as block explorer's frontend 24 | 25 | without any performance issue (CPU ~20-80% per core, ram ~50% via htop) 26 | 27 | ### List of samples: 28 | - [Script to build EVMOS chains and run with docker compose](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/blockchain-in-docker) 29 | - [Script to build & setup Hermes as an IBC relayer](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/hermes-as-ibc-relayer) 30 | - [Script to setup Big Dipper (backend bdjuno with hasura as graphql service & frontend Big Dipper 2.0 for Cosmos based chains)](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/big-dipper-as-block-explorer) 31 | 32 | ### Software versions: 33 | 34 | - [Evmos v12.1.6](https://github.com/evmos/evmos/tree/v12.1.6) 35 | - [Hermes v1.5.0](https://github.com/informalsystems/ibc-rs/tree/v1.5.0) 36 | - [bdjuno branch evmos](https://github.com/forbole/bdjuno/tree/chains/evmos/mainnet) 37 | - [Big Dipper 2.0 for Cosmos chains v2.x](https://github.com/forbole/big-dipper-2.0-cosmos) 38 | 39 | #### Customization 40 | - You can change their version by updating file [env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh) 41 | - This sample is using keyring `test`, you may need to change to `file` for secure your test env if needed in [env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh) 42 | - You can create `override-env.sh` file and override values of [env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh) 43 | - The [sample.cosmos-and-evmos.override-env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/sample.cosmos-and-evmos.override-env.sh) file contains example which tell you how to override variables of [env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh) and make the chain 1 use [Cosmos (gaia) source](https://github.com/cosmos/gaia/tree/v10.0.0), rename it to `override-env.sh` to make it effectives 44 | - The [sample.expose-big-dipper-ui.override-env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/sample.expose-big-dipper-ui.override-env.sh) file contains example which tell you how to override variables of [env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh) and make the block explorer UI (Big Dipper 2.0) accessible from outside world, rename it to `override-env.sh` to make it effectives 45 | 46 | #### Default ports used in these samples 47 | 48 | | Sample name | Port No. for chain 1 | Port No. for chain 2 | Desc | 49 | | --- | --- | --- | --- | 50 | | Blockchain in docker | 26657 | 36657 | **RPC** | 51 | | Blockchain in docker | 8545 | 18545 | **Json RPC** | 52 | | Blockchain in docker | 8546 | 18546 | **Websocket Json RPC** | 53 | | Blockchain in docker | 1317 | 11317 | **REST API** | 54 | | Blockchain in docker | 9090 | 19090 | gRPC | 55 | | Blockchain in docker | 26656 | 36656 | P2P | 56 | | Big Dipper as Block Explorer | 3800 | 3802 | **Big Dipper front end** | 57 | | Big Dipper as Block Explorer | 8080 | 8082 | Hasura's graphql service | 58 | | Big Dipper as Block Explorer | 3000 | 3002 | bdjuno hasura action base | 59 | 60 | #### Default service name & container name used in these samples 61 | | Sample name | Name for chain 1 | Name for chain 2 | Desc | 62 | | --- | --- | --- | --- | 63 | | Blockchain in docker | vtevmos10 | vtevmos20 | Node 0 (validator 1) of EVMOS chain run via docker-compose | 64 | | Blockchain in docker | vtevmos11 | vtevmos21 | Node 1 (validator 2) of EVMOS chain run via docker-compose | 65 | | Blockchain in docker | vtevmos12 | vtevmos22 | Node 2 (validator 3) of EVMOS chain run via docker-compose | 66 | | Hermes as IBC relayer | (hermes-svc as a shared service for both chains) | (hermes-svc as a shared service) | Hermes connects both chains (Debian* daemon service) | 67 | | Big Dipper as Block Explorer | bdjuno-svc1 | bdjuno-svc2 | bdjuno app (Debian* daemon service) | 68 | | Big Dipper as Block Explorer | bdjdb1 | bdjdb2 | bdjuno & hasura database (docker) | 69 | | Big Dipper as Block Explorer | bdjhasura1 | bdjhasura2 | Hasura graphql service (docker) | 70 | | Big Dipper as Block Explorer | bd2-svc1 | bd2-svc2 | Big Dipper front end (Debian* daemon service) | 71 | 72 | `(Not recommended) Tips:` There is script named [all-in-one.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/all-in-one.sh), if you are using an OS which supports `systemd` and some required tools, you can create everything just by running this script (EVMOS x2 + Big Dipper x2 + Hermes) 73 | -------------------------------------------------------------------------------- /all-in-one.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will setup 4 | # - 2 chains 5 | # - 1 Hermes as IBC relayer 6 | # - 2 Big Dipper as Block Explorer 7 | # In a single command! 8 | # It is not recommended to use this script 9 | 10 | if [ ! -f "./env.sh" ]; then 11 | echo >&2 "ERR: Wrong working directory" 12 | exit 1 13 | fi 14 | 15 | show_required_tools() { 16 | MSG="'$1' tool is required" 17 | echo >&2 "ERR: $MSG" 18 | echo >&2 "______" 19 | echo >&2 "The app requires following tools:" 20 | echo >&2 "- jq" 21 | echo >&2 " + https://stedolan.github.io/jq/download/" 22 | echo >&2 " + Hint: sudo apt install jq -y" 23 | echo >&2 "- yq & tomlq" 24 | echo >&2 " + https://github.com/kislyuk/yq/" 25 | echo >&2 " + Hint: sudo apt install python3-pip -y && pip3 install yq" 26 | echo >&2 "- go" 27 | echo >&2 " + https://go.dev/doc/install" 28 | echo >&2 "- docker & docker-compose" 29 | echo >&2 "- Rust & cargo" 30 | echo >&2 " + https://www.rust-lang.org/tools/install" 31 | echo >&2 " + Hint: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh" 32 | echo >&2 "- psql (PostgreSQL client)" 33 | echo >&2 " + Hint: sudo apt install postgresql-client" 34 | echo >&2 "- npm" 35 | echo >&2 "- yarn" 36 | echo >&2 "- hasura-cli" 37 | echo >&2 " + https://hasura.io/docs/latest/graphql/core/hasura-cli/install-hasura-cli/" 38 | echo >&2 " + Hint: curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | bash" 39 | echo >&2 "______" 40 | echo >&2 "ERR: $MSG" 41 | } 42 | 43 | command -v jq > /dev/null 2>&1 || { show_required_tools 'jq'; exit 1; } 44 | command -v yq > /dev/null 2>&1 || { show_required_tools 'yq'; exit 1; } 45 | command -v tomlq > /dev/null 2>&1 || { show_required_tools 'tomlq'; exit 1; } 46 | command -v bc > /dev/null 2>&1 || { show_required_tools 'bc'; exit 1; } 47 | command -v make > /dev/null 2>&1 || { show_required_tools 'make'; exit 1; } 48 | command -v go > /dev/null 2>&1 || { show_required_tools 'go'; exit 1; } 49 | command -v docker > /dev/null 2>&1 || { show_required_tools 'docker'; exit 1; } 50 | command -v 'docker-compose' > /dev/null 2>&1 || { show_required_tools 'docker-compose'; exit 1; } 51 | command -v cargo > /dev/null 2>&1 || { show_required_tools 'cargo'; exit 1; } 52 | command -v psql > /dev/null 2>&1 || { show_required_tools 'psql'; exit 1; } 53 | command -v npm > /dev/null 2>&1 || { show_required_tools 'npm'; exit 1; } 54 | command -v yarn > /dev/null 2>&1 || { show_required_tools 'yarn'; exit 1; } 55 | 56 | source "./env.sh" 57 | if [ -f "$BD_HASURA_BINARY" ]; then 58 | echo 59 | elif [ command -v hasura > /dev/null 2>&1 ]; then 60 | echo 61 | else 62 | show_required_tools 'hasura-cli' 63 | exit 1 64 | fi 65 | if [ -f "./override-env.sh" ]; then 66 | source "./override-env.sh" 67 | fi 68 | 69 | echo "Run everything requires:" 70 | echo "- Your machine should have 4 Cores and 16 Gb of RAM (usage: ~20%-80% per core and 50% ram)" 71 | echo "- Required ports are available" 72 | echo "- OS supports systemd" 73 | echo "Running this script is NOT recommended, you better run the scripts separately by yourself to deeply understand" 74 | read -p "ARE YOU SURE still want to run this script? (Y/n)" -n 1 -r 75 | echo 76 | if [[ $REPLY =~ ^[Yy]$ ]] 77 | then 78 | command -v systemctl > /dev/null 2>&1 79 | if [ $? -ne 0 ] || [ ! -d "/etc/systemd/system" ] ; then 80 | echo >&2 "`systemd` is required!!! You better prepare an Ubuntu machine and try this later.." 81 | exit 1 82 | fi 83 | echo " ! OK, let's go" 84 | else 85 | echo "Give up!" 86 | exit 0 87 | fi 88 | 89 | export FORCE_EXTRA_FUNC=1 # Auto create services and start them (sudo systemctl enable * & sudo systemctl start *) 90 | export HERMES_NO_CONFIRM_BALANCE=1 91 | 92 | AIO_CUR_DIR=$(pwd) 93 | AIO_DIR_BD="./big-dipper-as-block-explorer" 94 | AIO_DIR_HERMES="./hermes-as-ibc-relayer" 95 | AIO_DIR_CHAIN="./blockchain-in-docker" 96 | 97 | GAS_PRICE_1="$(bc <<< "20 * (10^$CHAIN_1_GAS_DENOM_EXPONENT)")$CHAIN_1_MIN_DENOM_SYMBOL" 98 | GAS_PRICE_2="$(bc <<< "20 * (10^$CHAIN_2_GAS_DENOM_EXPONENT)")$CHAIN_2_MIN_DENOM_SYMBOL" 99 | 100 | echo "[Clean up previous setup]" 101 | 102 | echo "> [Big Dipper]" 103 | cd "$AIO_DIR_BD" 104 | ./cleanup.sh 105 | cd "$AIO_CUR_DIR" 106 | 107 | echo "> [Hermes]" 108 | cd "$AIO_DIR_HERMES" 109 | ./cleanup.sh 110 | cd "$AIO_CUR_DIR" 111 | 112 | echo "> [Chains]" 113 | cd "$AIO_DIR_CHAIN" 114 | ./cleanup.sh 115 | cd "$AIO_CUR_DIR" 116 | 117 | echo "[Setup]" 118 | cd "$AIO_DIR_CHAIN" 119 | echo "> [Chain 1]" 120 | ./1_prepare-genesis.sh 1 121 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (genesis)"; exit 1; } 122 | sleep 2 123 | ./2_build-docker-image.sh 1 124 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (build docker image)"; exit 1; } 125 | sleep 2 126 | docker-compose -f network1.yml up -d 127 | echo "> [Chain 2]" 128 | ./1_prepare-genesis.sh 2 129 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (genesis)"; exit 1; } 130 | sleep 2 131 | ./2_build-docker-image.sh 2 132 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (build docker image)"; exit 1; } 133 | sleep 2 134 | docker-compose -f network2.yml up -d 135 | sleep 20 136 | 137 | cd "$AIO_CUR_DIR" 138 | cd "$AIO_DIR_HERMES" 139 | if [ -f "./override-env.sh" ]; then 140 | source "./override-env.sh" 141 | fi 142 | echo "> [Load up token for IBC account on chain 1]" 143 | echo "Keyring: $KEYRING" 144 | if [ "$KEYRING" = "test" ]; then 145 | docker exec -it vtevmos11 bash -c "$CHAIN_1_DAEMON_BINARY_NAME tx bank send $VAL_2_KEY_NAME $REL_1_ADDR $(bc <<< "$HERMES_RESERVED_FEE * (10^$HERMES_CFG_CHAIN_1_DENOM_EXPONENT)")$HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL --gas-prices $GAS_PRICE_1 --home /.evmosd1 --node 'tcp://127.0.0.1:26657' --yes" 146 | else 147 | docker exec -it vtevmos11 bash -c "echo '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD' | $CHAIN_1_DAEMON_BINARY_NAME tx bank send $VAL_2_KEY_NAME $REL_1_ADDR $(bc <<< "$HERMES_RESERVED_FEE * (10^$HERMES_CFG_CHAIN_1_DENOM_EXPONENT)")$HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL --gas-prices $GAS_PRICE_1 --home /.evmosd1 --node 'tcp://127.0.0.1:26657' --yes" 148 | fi 149 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed"; exit 1; } 150 | echo "> [Load up token for IBC account on chain 2]" 151 | if [ "$KEYRING" = "test" ]; then 152 | docker exec -it vtevmos21 bash -c "$CHAIN_2_DAEMON_BINARY_NAME tx bank send $VAL_2_KEY_NAME $REL_2_ADDR $(bc <<< "$HERMES_RESERVED_FEE * (10^$HERMES_CFG_CHAIN_2_DENOM_EXPONENT)")$HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL --gas-prices $GAS_PRICE_2 --home /.evmosd2 --node 'tcp://127.0.0.1:26657' --yes" 153 | else 154 | docker exec -it vtevmos21 bash -c "echo '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD' | $CHAIN_2_DAEMON_BINARY_NAME tx bank send $VAL_2_KEY_NAME $REL_2_ADDR $(bc <<< "$HERMES_RESERVED_FEE * (10^$HERMES_CFG_CHAIN_2_DENOM_EXPONENT)")$HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL --gas-prices $GAS_PRICE_2 --home /.evmosd2 --node 'tcp://127.0.0.1:26657' --yes" 155 | fi 156 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed"; exit 1; } 157 | 158 | echo "> [Hermes as IBC relayer]" 159 | ./create-relayer.sh 160 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed"; exit 1; } 161 | cd "$AIO_CUR_DIR" 162 | 163 | cd "$AIO_DIR_BD" 164 | echo "> [bdjuno for chain 1]" 165 | ./1_install-bdjuno.sh 1 166 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (step 1 bdjuno)"; exit 1; } 167 | ./2_install-bdjuno.sh 1 168 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (step 2 bdjuno)"; exit 1; } 169 | ./3_install-hasura.sh 1 170 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (hasura)"; exit 1; } 171 | echo "> [bdjuno for chain 2]" 172 | ./1_install-bdjuno.sh 2 173 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (step 1 bdjuno)"; exit 1; } 174 | ./2_install-bdjuno.sh 2 175 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (step 2 bdjuno)"; exit 1; } 176 | ./3_install-hasura.sh 2 177 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed (hasura)"; exit 1; } 178 | echo "> [Big Dipper UI for chain 1]" 179 | ./4_install-front-end.sh 1 180 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed"; exit 1; } 181 | echo "> [Big Dipper UI for chain 2]" 182 | ./4_install-front-end.sh 2 183 | [ $? -eq 0 ] || { echo >&2 "ERR AIO: Operation failed"; exit 1; } 184 | 185 | echo "Finished" 186 | echo "Alright, all the services are Expected to be started, no need to do anything else" 187 | echo "To make sure everything working well, you need to" 188 | echo "1. Check 3 validator containers & make sure they are proceducing block" 189 | echo "2. Make sure bdjuno & hasura services are ok" 190 | echo "3. Go to block explorer UI and check things there" 191 | echo "4. Check Hermes is working well" 192 | echo "5. Make sure the service files at '/etc/systemd/system/*.service' has correct working directort and execution path (in case you changed any repo/branch)" 193 | -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/.gitignore: -------------------------------------------------------------------------------- 1 | .bdjuno*/ -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/1_install-bdjuno.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v docker > /dev/null 2>&1 || { echo >&2 "ERR: docker is required"; exit 1; } 4 | command -v psql > /dev/null 2>&1 || { echo >&2 "ERR: psql is required, you first need to install psql client. Hint: sudo apt install postgresql-client"; exit 1; } 5 | 6 | source ../env.sh 7 | 8 | CHAIN_NO=$1 9 | 10 | if [ -f "./override-env.sh" ]; then 11 | source "./override-env.sh" 12 | fi 13 | 14 | if [ -f "./_config.sh" ]; then 15 | source "./_config.sh" 16 | else 17 | echo >&2 "ERR: Wrong working directory" 18 | echo >&2 "Scripts must be executed within [big-dipper-as-block-explorer] directory" 19 | exit 1 20 | fi 21 | 22 | # Validate input 23 | if [ "$CHAIN_NO" = "1" ]; then 24 | echo "Chain 1" 25 | elif [ "$CHAIN_NO" = "2" ]; then 26 | echo "Chain 2" 27 | else 28 | echo >&2 'ERR: Missing or incorrect chain no as first argument, valid input is 1 or 2' 29 | echo >&2 'For example:' 30 | echo >&2 " $0 1" 31 | echo >&2 " or: $0 2" 32 | exit 1 33 | fi 34 | 35 | PG_CON_NAME="bdjdb$CHAIN_NO" 36 | PG_VOL_NAME="bdjdb$CHAIN_NO" 37 | 38 | # Stop service if exists 39 | [ $DISABLE_SYSTEMCTL -eq 0 ] && { 40 | echo "Stopping $BD_SERVICE_NAME service"; 41 | sudo systemctl stop $BD_SERVICE_NAME > /dev/null 2>&1; 42 | sudo systemctl disable $BD_SERVICE_NAME > /dev/null 2>&1; 43 | } 44 | 45 | echo 'Remove existing docker container & volumes' 46 | sleep 3 47 | docker rm -f $PG_CON_NAME 48 | docker volume rm -f $PG_VOL_NAME 49 | 50 | # Remake 51 | echo "Remake database and expose port $PG_PORT" 52 | docker run \ 53 | --restart unless-stopped \ 54 | --name $PG_CON_NAME \ 55 | -d \ 56 | -p $PG_PORT:5432 \ 57 | -e POSTGRES_PASSWORD=$BD_CFG_PG_USR_PASS \ 58 | -v $PG_VOL_NAME:/data/db \ 59 | postgres:12.5 60 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to create a PostgreSQL container"; } 61 | 62 | echo 'Waiting DB up' 63 | sleep 20 64 | 65 | echo "- Creating database $BD_PG_DB" 66 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "CREATE DATABASE $BD_PG_DB;" 67 | [ $? -eq 0 ] || { echo >&2 "ERR: Operation failed!"; } 68 | echo "- Creating user $BD_PG_USER" 69 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "CREATE USER $BD_PG_USER WITH ENCRYPTED PASSWORD '$BD_PG_PASS';" 70 | [ $? -eq 0 ] || { echo >&2 "ERR: Operation failed!"; } 71 | echo "- Grant all privileges on db $BD_PG_DB to user $BD_PG_USER" 72 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "GRANT ALL PRIVILEGES ON DATABASE $BD_PG_DB TO $BD_PG_USER;" 73 | [ $? -eq 0 ] || { echo >&2 "ERR: Operation failed!"; } 74 | 75 | # Check bdjuno source 76 | # If the repo is different with config, show a warning 77 | if [ -d "./$BD_SOURCE_DIR" ]; then 78 | echo "bdjuno repo exists" 79 | echo "Checking repo url & branch name" 80 | CHK_RES_1="$(git --git-dir "./$BD_SOURCE_DIR"/.git --work-tree "./$BD_SOURCE_DIR" config --get remote.origin.url)" 81 | if [ $? -ne 0 ] || [ -z "$CHK_RES_1" ]; then 82 | echo "WARN! Unable to check remote origin url of git repo at $BD_SOURCE_DIR" 83 | sleep 2 84 | elif [ "$CHK_RES_1" != "$BD_GIT_REPO" ]; then 85 | echo "WARN! Git repo Url does not match" 86 | echo "Expected: '$BD_GIT_REPO'" 87 | echo "Actual: '$CHK_RES_1'" 88 | echo "You should check it (script will continue execution after 10s)" 89 | sleep 10 90 | fi 91 | CHK_RES_2="$(git --git-dir "./$BD_SOURCE_DIR"/.git --work-tree "./$BD_SOURCE_DIR" rev-parse --abbrev-ref HEAD)" 92 | if [ $? -ne 0 ] || [ -z "$CHK_RES_2" ]; then 93 | echo "WARN! Unable to check branch of git repo at $BD_SOURCE_DIR" 94 | sleep 2 95 | elif [ "$CHK_RES_2" = "HEAD" ]; then 96 | echo "WARN! Can not check branch" 97 | elif [ "$CHK_RES_2" != "$BD_GIT_BRANCH" ]; then 98 | echo "WARN! Git Branch does not match" 99 | echo "Expected: '$BD_GIT_BRANCH'" 100 | echo "Actual: '$CHK_RES_2'" 101 | echo "You should check it (script will continue execution after 10s)" 102 | sleep 10 103 | fi 104 | else 105 | echo "Downloading bdjuno source code from branch $BD_GIT_BRANCH" 106 | git clone "$BD_GIT_REPO" --branch "$BD_GIT_BRANCH" --single-branch "$BD_SOURCE_DIR" 107 | 108 | if [ $? -ne 0 ]; then 109 | echo >&2 "ERR: Git clone bdjuno branch $BD_GIT_BRANCH failed" 110 | exit 1 111 | fi 112 | fi 113 | 114 | SCHEMA_DIR="./$BD_SOURCE_DIR/database/schema" 115 | 116 | if [ ! -d "$SCHEMA_DIR" ]; then 117 | echo >&2 "ERR: Schema dir $SCHEMA_DIR could not be found" 118 | exit 1 119 | fi 120 | 121 | CUR_DIR=$(pwd) 122 | cd "$SCHEMA_DIR" 123 | echo "- Run sql files" 124 | ls -1 | while read line ; do PGPASSWORD=$BD_PG_PASS psql -h 127.0.0.1 -p $PG_PORT -d $BD_PG_DB -U $BD_PG_USER -f $line ; done 125 | 126 | cd "$CUR_DIR" 127 | cd "./$BD_SOURCE_DIR" 128 | echo 129 | echo 'Compiling bdjuno' 130 | make install 131 | [ $? -ne 0 ] && { echo >&2 "ERR: Failed to compile"; exit 1; } 132 | echo "Rename $BD_BINARY_ORIGIN into $BD_BINARY" 133 | mv "$BD_BINARY_ORIGIN" "$BD_BINARY" 134 | cd "$CUR_DIR" 135 | 136 | echo "Init bdjuno" 137 | rm -rf "$BD_HOME" 138 | $BD_BINARY init --home "$BD_HOME" 139 | 140 | echo 'Config bdjuno' 141 | CONFIG_YAML="$BD_HOME/config.yaml" 142 | cp "./template.config.yaml" "$CONFIG_YAML" 143 | 144 | if [[ "$OSTYPE" == "darwin"* ]]; then 145 | sed -i '' "s/p_bech32_prefix/$ACCOUNT_PREFIX/g" "$CONFIG_YAML" 146 | sed -i '' "s,p_rpc_addr,$RPC_ADDR,g" "$CONFIG_YAML" 147 | sed -i '' "s,p_grpc_addr,$GRPC_ADDR,g" "$CONFIG_YAML" 148 | sed -i '' "s/p_db_name/$BD_PG_DB/g" "$CONFIG_YAML" 149 | sed -i '' "s/p_db_user/$BD_PG_USER/g" "$CONFIG_YAML" 150 | sed -i '' "s/p_db_port/$PG_PORT/g" "$CONFIG_YAML" 151 | sed -i '' "s/p_db_pass/$BD_CFG_PG_USR_PASS/g" "$CONFIG_YAML" 152 | sed -i '' "s/p_token_denom/$DENOM_SYMBOL/g" "$CONFIG_YAML" 153 | sed -i '' "s/p_token_min_denom/$MIN_DENOM_SYMBOL/g" "$CONFIG_YAML" 154 | sed -i '' "s/p_denom_exponent/$DENOM_EXPONENT/g" "$CONFIG_YAML" 155 | sed -i '' "s/p_action_port/$BD_HASURA_ACTIONBASE_PORT/g" "$CONFIG_YAML" 156 | else 157 | sed -i "s/p_bech32_prefix/$ACCOUNT_PREFIX/g" "$CONFIG_YAML" 158 | sed -i "s,p_rpc_addr,$RPC_ADDR,g" "$CONFIG_YAML" 159 | sed -i "s,p_grpc_addr,$GRPC_ADDR,g" "$CONFIG_YAML" 160 | sed -i "s/p_db_name/$BD_PG_DB/g" "$CONFIG_YAML" 161 | sed -i "s/p_db_user/$BD_PG_USER/g" "$CONFIG_YAML" 162 | sed -i "s/p_db_port/$PG_PORT/g" "$CONFIG_YAML" 163 | sed -i "s/p_db_pass/$BD_CFG_PG_USR_PASS/g" "$CONFIG_YAML" 164 | sed -i "s/p_token_denom/$DENOM_SYMBOL/g" "$CONFIG_YAML" 165 | sed -i "s/p_token_min_denom/$MIN_DENOM_SYMBOL/g" "$CONFIG_YAML" 166 | sed -i "s/p_denom_exponent/$DENOM_EXPONENT/g" "$CONFIG_YAML" 167 | sed -i "s/p_action_port/$BD_HASURA_ACTIONBASE_PORT/g" "$CONFIG_YAML" 168 | fi 169 | 170 | echo "Step 1 done!" 171 | echo "- Postgres SQL db was exposed to port $PG_PORT" 172 | echo "- bdjuno home: $BD_HOME" 173 | echo "- hasura action base was exposed to port : $BD_HASURA_ACTIONBASE_PORT" 174 | echo "Now you need to:" 175 | echo "- copy genesis.json of the chain and put it into $BD_HOME folder (expect file $BD_HOME/genesis.json)" 176 | echo '// TODO: try to add mint module to config.yaml' 177 | 178 | echo "After that you can move to step 2 (run ./2_install-bdjuno.sh)" 179 | -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/2_install-bdjuno.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ../env.sh 4 | 5 | CHAIN_NO=$1 6 | 7 | if [ -f "./override-env.sh" ]; then 8 | source "./override-env.sh" 9 | fi 10 | 11 | if [ -f "./_config.sh" ]; then 12 | source "./_config.sh" 13 | else 14 | echo >&2 "ERR: Wrong working directory" 15 | echo >&2 "Scripts must be executed within [big-dipper-as-block-explorer] directory" 16 | exit 1 17 | fi 18 | 19 | # Validate input 20 | if [ "$CHAIN_NO" = "1" ]; then 21 | echo "Chain 1" 22 | elif [ "$CHAIN_NO" = "2" ]; then 23 | echo "Chain 2" 24 | else 25 | echo >&2 'ERR: Missing or incorrect chain no as first argument, valid input is 1 or 2' 26 | echo >&2 'For example:' 27 | echo >&2 " $0 1" 28 | echo >&2 " or: $0 2" 29 | exit 1 30 | fi 31 | 32 | # Stop service if exists 33 | [ $DISABLE_SYSTEMCTL -eq 0 ] && { 34 | echo "Stopping $BD_SERVICE_NAME service"; 35 | sudo systemctl stop $BD_SERVICE_NAME > /dev/null 2>&1; 36 | } 37 | 38 | # Parse 39 | GENESIS_JSON="$BD_HOME/genesis.json" 40 | if [ ! -f "$GENESIS_JSON" ]; then 41 | if [ $EXTRA_FUNC -eq 1 ]; then 42 | cp '../blockchain-in-docker/.evmosd'$CHAIN_NO'0/config/genesis.json' "$GENESIS_JSON" 43 | if [ ! -f "$GENESIS_JSON" ]; then 44 | echo >&2 "ERR: Please copy genesis.json from your chain into $BD_HOME" 45 | exit 1 46 | fi 47 | else 48 | echo >&2 "ERR: Missing genesis.json file (expect: $GENESIS_JSON)" 49 | echo "Please copy that file from your chain" 50 | exit 1 51 | fi 52 | fi 53 | echo "Parsing genesis file" 54 | $BD_BINARY parse genesis-file --genesis-file-path "$GENESIS_JSON" --home "$BD_HOME" 55 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to parse genesis.json!"; exit 1; } 56 | ## Check chain id 57 | GENESIS_CHAIN_ID=$(cat "$GENESIS_JSON" | jq .chain_id | head -n 1 | tr -d '"') 58 | 59 | if [ "$GENESIS_CHAIN_ID" != "$CHAIN_ID" ]; then 60 | echo >&2 "ERR: Mis-match chain id, expect [$CHAIN_ID] but found [$GENESIS_CHAIN_ID] on genesis.json" 61 | exit 1 62 | fi 63 | 64 | 65 | # Re-Start service 66 | if [ $DISABLE_SYSTEMCTL -eq 0 ]; then 67 | SERVICE_FILE="/etc/systemd/system/$BD_SERVICE_NAME.service" 68 | echo 69 | if [ -f "$SERVICE_FILE" ]; then 70 | echo "You are ready to restart $BD_SERVICE_NAME service (sudo systemctl restart $BD_SERVICE_NAME)" 71 | 72 | [ $EXTRA_FUNC -eq 1 ] && sudo systemctl start $BD_SERVICE_NAME 73 | else 74 | echo "You can paste the following content to $SERVICE_FILE file to create a daemon service" 75 | echo "sudo vi $SERVICE_FILE" 76 | echo 77 | 78 | CUR_DIR=$(pwd) 79 | cd "$BD_HOME" 80 | WORKING_DIR=$(pwd) 81 | cd "$CUR_DIR" 82 | 83 | SCRIPT_CONTENT="[Unit] 84 | \nDescription=BDJuno parser 85 | \nConditionPathExists=$BD_BINARY 86 | \nAfter=network-online.target 87 | 88 | \n[Service] 89 | \nUser=$USER 90 | \nWorkingDirectory=$WORKING_DIR 91 | \nExecStart=$BD_BINARY start --home $BD_HOME 92 | \nRestart=always 93 | \nRestartSec=3 94 | \nLimitNOFILE=4096 95 | 96 | \n[Install] 97 | \nWantedBy=multi-user.target" 98 | echo -e $SCRIPT_CONTENT 99 | echo 100 | echo "sudo systemctl enable $BD_SERVICE_NAME" 101 | echo "sudo systemctl start $BD_SERVICE_NAME" 102 | 103 | [ $EXTRA_FUNC -eq 1 ] && { 104 | echo 'Creating service '$BD_SERVICE_NAME; 105 | echo -e $SCRIPT_CONTENT | sudo tee $SERVICE_FILE > /dev/null; 106 | sudo systemctl daemon-reload; 107 | sudo systemctl enable $BD_SERVICE_NAME; 108 | sudo systemctl start $BD_SERVICE_NAME; 109 | } 110 | fi 111 | fi 112 | 113 | echo 'Finished bdjuno installtion' 114 | echo "Notice!!! Make sure the service file at '/etc/systemd/system/$BD_SERVICE_NAME.service' has correct working directort and execution path (in case you changed any repo/branch)" 115 | echo 116 | echo 'Now move to install Hasura by running 3_install-hasura.sh' 117 | 118 | 119 | -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/3_install-hasura.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v docker > /dev/null 2>&1 || { echo >&2 "ERR: docker is required"; exit 1; } 4 | command -v psql > /dev/null 2>&1 || { echo >&2 "ERR: psql is required, you first need to install psql client. Hint: sudo apt install postgresql-client"; exit 1; } 5 | 6 | source ../env.sh 7 | 8 | CHAIN_NO=$1 9 | 10 | if [ -f "./override-env.sh" ]; then 11 | source "./override-env.sh" 12 | fi 13 | 14 | if [ -f "./_config.sh" ]; then 15 | source "./_config.sh" 16 | else 17 | echo >&2 "ERR: Wrong working directory" 18 | echo >&2 "Scripts must be executed within [big-dipper-as-block-explorer] directory" 19 | exit 1 20 | fi 21 | 22 | # Validate input 23 | if [ "$CHAIN_NO" = "1" ]; then 24 | echo "Chain 1" 25 | elif [ "$CHAIN_NO" = "2" ]; then 26 | echo "Chain 2" 27 | else 28 | echo >&2 'ERR: Missing or incorrect chain no as first argument, valid input is 1 or 2' 29 | echo >&2 'For example:' 30 | echo >&2 " $0 1" 31 | echo >&2 " or: $0 2" 32 | exit 1 33 | fi 34 | 35 | if [ -f "$BD_HASURA_BINARY" ]; then 36 | echo "Hasura binary exists" 37 | elif [ command -v hasura > /dev/null 2>&1 ]; then 38 | export BD_HASURA_BINARY="hasura" 39 | else 40 | echo >&2 "ERR: hasura-cli is required, more info: https://hasura.io/docs/latest/graphql/core/hasura-cli/install-hasura-cli/ . Hint: curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | bash" 41 | exit 1 42 | fi 43 | 44 | BD_HASURA_CON_NAME="bdjhasura$CHAIN_NO" 45 | 46 | echo 'Remove existing containers' 47 | docker rm -f $BD_HASURA_CON_NAME 48 | sleep 1 49 | 50 | echo 'Preparing DB' 51 | echo 'Dropping old data if exists' 52 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "DROP DATABASE $BD_PG_HASURA_META_DB;" 53 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "DROP DATABASE $BD_PG_HASURA_DB;" 54 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "REASSIGN OWNED BY $BD_PG_HASURA_USER TO postgres;" 55 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "DROP OWNED BY $BD_PG_HASURA_USER;" 56 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "DROP USER $BD_PG_HASURA_USER;" 57 | 58 | echo 'Create new DB and user' 59 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "CREATE DATABASE $BD_PG_HASURA_META_DB;" 60 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "CREATE DATABASE $BD_PG_HASURA_DB;" 61 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "CREATE USER $BD_PG_HASURA_USER WITH ENCRYPTED PASSWORD '$BD_PG_HASURA_PASS';" 62 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "GRANT ALL PRIVILEGES ON DATABASE $BD_PG_HASURA_META_DB TO $BD_PG_HASURA_USER;" 63 | PGPASSWORD=$BD_CFG_PG_USR_PASS psql -h 127.0.0.1 -p $PG_PORT -d postgres -U postgres -c "GRANT ALL PRIVILEGES ON DATABASE $BD_PG_HASURA_DB TO $BD_PG_HASURA_USER;" 64 | 65 | echo 'Create hasura container' 66 | docker run \ 67 | --restart unless-stopped \ 68 | --name $BD_HASURA_CON_NAME \ 69 | -d \ 70 | -p $BD_HASURA_PORT:8080 \ 71 | -e "HASURA_GRAPHQL_METADATA_DATABASE_URL=postgres://postgres:$BD_CFG_PG_USR_PASS@172.17.0.1:$PG_PORT/$BD_PG_HASURA_META_DB" \ 72 | -e "HASURA_GRAPHQL_DATABASE_URL=postgres://$BD_PG_USER:$BD_PG_PASS@172.17.0.1:$PG_PORT/$BD_PG_DB" \ 73 | -e "PG_DATABASE_URL=postgres://$BD_PG_HASURA_USER:$BD_PG_HASURA_PASS@172.17.0.1:$PG_PORT/$BD_PG_HASURA_DB" \ 74 | -e HASURA_GRAPHQL_ENABLE_CONSOLE=true \ 75 | -e HASURA_GRAPHQL_DEV_MODE=true \ 76 | -e HASURA_GRAPHQL_ENABLED_LOG_TYPES="startup, http-log, webhook-log, websocket-log, query-log" \ 77 | -e HASURA_GRAPHQL_UNAUTHORIZED_ROLE="anonymous" \ 78 | -e "HASURA_GRAPHQL_ADMIN_SECRET=$BD_HASURA_SECRET" \ 79 | -e ACTION_BASE_URL="http://172.17.0.1:$BD_HASURA_ACTIONBASE_PORT" \ 80 | hasura/graphql-engine:v2.7.0 81 | 82 | echo 'Wait hasura up' 83 | sleep 3 84 | 85 | echo 'Applying Hasura metadata' 86 | 87 | cd "$BD_SOURCE_DIR/hasura/" 88 | 89 | $BD_HASURA_BINARY metadata apply --endpoint http://localhost:$BD_HASURA_PORT --admin-secret $BD_HASURA_SECRET 90 | 91 | echo "Done" 92 | echo "- Hasura graphql was exposed on port $BD_HASURA_PORT which use data from action base at port $BD_HASURA_ACTIONBASE_PORT" 93 | echo "Run the following command to test" 94 | echo "curl http://localhost:$BD_HASURA_ACTIONBASE_PORT/account_balance --data '{ \"input\": { \"address\": \"''\"} }'" 95 | -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/4_install-front-end.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v npm > /dev/null 2>&1 || { echo >&2 "ERR: npm is required"; exit 1; } 4 | command -v yarn > /dev/null 2>&1 || { echo >&2 "ERR: yarn is required"; exit 1; } 5 | 6 | source ../env.sh 7 | 8 | CHAIN_NO=$1 9 | 10 | if [ -f "./override-env.sh" ]; then 11 | source "./override-env.sh" 12 | fi 13 | 14 | if [ -f "./_config.sh" ]; then 15 | source "./_config.sh" 16 | else 17 | echo >&2 "ERR: Wrong working directory" 18 | echo >&2 "Scripts must be executed within [big-dipper-as-block-explorer] directory" 19 | exit 1 20 | fi 21 | 22 | # Validate input 23 | if [ "$CHAIN_NO" = "1" ]; then 24 | echo "Chain 1" 25 | elif [ "$CHAIN_NO" = "2" ]; then 26 | echo "Chain 2" 27 | else 28 | echo >&2 'ERR: Missing or incorrect chain no as first argument, valid input is 1 or 2' 29 | echo >&2 'For example:' 30 | echo >&2 " $0 1" 31 | echo >&2 " or: $0 2" 32 | exit 1 33 | fi 34 | 35 | # Stop service if exists 36 | [ $DISABLE_SYSTEMCTL -eq 0 ] && { 37 | echo "Stopping $BD2_SERVICE_NAME service"; 38 | sudo systemctl stop $BD2_SERVICE_NAME > /dev/null 2>&1; 39 | sudo systemctl disable $BD2_SERVICE_NAME > /dev/null 2>&1; 40 | } 41 | 42 | # Check Big Dipper 2.0 source 43 | if [ -d "$BD2_SOURCE_DIR" ]; then 44 | echo "Big Dipper 2.0 repo exists" 45 | echo "Checking repo url & branch name" 46 | CHK_RES_1="$(git --git-dir "./$BD2_SOURCE_DIR"/.git --work-tree "./$BD2_SOURCE_DIR" config --get remote.origin.url)" 47 | if [ $? -ne 0 ] || [ -z "$CHK_RES_1" ]; then 48 | echo "WARN! Unable to check remote origin url of git repo at $BD2_SOURCE_DIR" 49 | sleep 2 50 | elif [ "$CHK_RES_1" != "$BD2_GIT_REPO" ]; then 51 | echo "WARN! Git repo Url does not match" 52 | echo "Expected: '$BD2_GIT_REPO'" 53 | echo "Actual: '$CHK_RES_1'" 54 | echo "You should check it (script will continue execution after 10s)" 55 | sleep 10 56 | fi 57 | CHK_RES_2="$(git --git-dir "./$BD2_SOURCE_DIR"/.git --work-tree "./$BD2_SOURCE_DIR" rev-parse --abbrev-ref HEAD)" 58 | if [ $? -ne 0 ] || [ -z "$CHK_RES_2" ]; then 59 | echo "WARN! Unable to check branch of git repo at $BD2_SOURCE_DIR" 60 | sleep 2 61 | elif [ "$CHK_RES_2" = "HEAD" ]; then 62 | echo "WARN! Can not check branch" 63 | elif [ "$CHK_RES_2" != "$BD2_BRANCH" ]; then 64 | echo "WARN! Git Branch does not match" 65 | echo "Expected: '$BD2_BRANCH'" 66 | echo "Actual: '$CHK_RES_2'" 67 | echo "You should check it (script will continue execution after 10s)" 68 | sleep 10 69 | fi 70 | else 71 | echo "Downloading Big Dipper 2.0 source code from branch $BD2_BRANCH" 72 | git clone "$BD2_GIT_REPO" --branch "$BD2_BRANCH" --single-branch "$BD2_SOURCE_DIR" 73 | 74 | if [ $? -ne 0 ]; then 75 | echo >&2 "ERR: Git clone Big Dipper 2.0 from branch $BD2_BRANCH was failed" 76 | exit 1 77 | fi 78 | fi 79 | 80 | # npm environment variables 81 | NPM_ENV="$BD2_SOURCE_DIR/.env" 82 | echo "Setting up file $NPM_ENV" 83 | echo -e " 84 | \nGRAPHQL_URL=http://$BD2_PUBLIC_DOMAIN:$BD_HASURA_PORT/v1/graphql 85 | \nNEXT_PUBLIC_GRAPHQL_URL=http://$BD2_PUBLIC_DOMAIN:$BD_HASURA_PORT/v1/graphql 86 | \nGRAPHQL_WS=ws://$BD2_PUBLIC_DOMAIN:$BD_HASURA_PORT/v1/graphql 87 | \nNEXT_PUBLIC_GRAPHQL_WS=ws://$BD2_PUBLIC_DOMAIN:$BD_HASURA_PORT/v1/graphql 88 | \nNODE_ENV=test 89 | \nPORT=$BD2_PORT 90 | \nRPC_WEBSOCKET=ws://$BD2_PUBLIC_RPC_26657/websocket 91 | \nNEXT_PUBLIC_RPC_WEBSOCKET=ws://$BD2_PUBLIC_RPC_26657/websocket 92 | \nNEXT_PUBLIC_CHAIN_TYPE=Devnet 93 | \nPROJECT_NAME=$BD2_PROJECT_NAME 94 | " > "$NPM_ENV" 95 | 96 | cp "$NPM_ENV" "$BD2_UI_DIR" 97 | 98 | # BD2 chain config 99 | BD2_CHAIN_CONFIG_MAINNET="$BD2_UI_DIR/src/chain.json" 100 | BD2_CHAIN_CONFIG_TMP="$BD2_UI_DIR/src/tmp_chain.json" 101 | echo "Setting up file chain config" 102 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chainName="'$CHAIN_NAME'"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 103 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.title="'$CHAIN_NAME' Block Explorer"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 104 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].network="'$CHAIN_ID'"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 105 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].chainType="Devnet"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 106 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].genesis["height"]=1' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 107 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].prefix["consensus"]="'$DENOM_SYMBOL'valcons"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 108 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].prefix["validator"]="'$DENOM_SYMBOL'valoper"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 109 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].prefix["account"]="'$ACCOUNT_PREFIX'"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 110 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].primaryTokenUnit="'$MIN_DENOM_SYMBOL'"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 111 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].votingPowerTokenUnit="'$MIN_DENOM_SYMBOL'"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 112 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].tokenUnits["'$MIN_DENOM_SYMBOL'"]["display"]="'$DENOM_SYMBOL'"' > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 113 | cat "$BD2_CHAIN_CONFIG_MAINNET" | jq '.chains[0].tokenUnits["'$MIN_DENOM_SYMBOL'"]["exponent"]='$DENOM_EXPONENT > $BD2_CHAIN_CONFIG_TMP && mv $BD2_CHAIN_CONFIG_TMP $BD2_CHAIN_CONFIG_MAINNET 114 | 115 | # BD2 codegen config 116 | BD2_CODEGEN_YML="$BD2_UI_DIR/codegen.yml" 117 | BD2_CODEGEN_YML_TMP="$BD2_UI_DIR/tmp_codegen.yml" 118 | echo "Setting up file $BD2_CODEGEN_YML" 119 | echo "- Update graphql schema" 120 | cat "$BD2_CODEGEN_YML" | yq '.generates["./src/graphql/types/general_types.ts"]["schema"]="http://'$BD2_PUBLIC_DOMAIN':'$BD_HASURA_PORT'/v1/graphql"' -Y > "$BD2_CODEGEN_YML_TMP" && mv "$BD2_CODEGEN_YML_TMP" "$BD2_CODEGEN_YML" 121 | 122 | CUR_DIR=$(pwd) 123 | cd "$BD2_SOURCE_DIR" 124 | WORKING_DIR=$(pwd) 125 | echo "Working dir: $WORKING_DIR" 126 | cd "$CUR_DIR" 127 | cd "$BD2_UI_DIR" 128 | # Build 129 | ## Gen code 130 | echo 'Fix error files' 131 | FILE_TOKEN_PRICE='./src/graphql/general/token_price.graphql' 132 | TMP_FILE_TOKEN_PRICE='./src/graphql/general/tmp.token_price.graphql' 133 | sed '/id/d' "$FILE_TOKEN_PRICE" > "$TMP_FILE_TOKEN_PRICE" && mv "$TMP_FILE_TOKEN_PRICE" "$FILE_TOKEN_PRICE" 134 | echo 'Generating code' 135 | yarn run graphql:codegen > /dev/null 2>&1 136 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to run graphql:codegen"; exit 1; } 137 | echo 'Build' 138 | cd "$WORKING_DIR" 139 | yarn install 140 | corepack enable 141 | yarn run build 142 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to build in root source dir"; exit 1; } 143 | 144 | cd "$CUR_DIR" 145 | cd "$BD2_UI_DIR" 146 | corepack enable 147 | yarn run build 148 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to build in web app source dir"; exit 1; } 149 | 150 | cd "$CUR_DIR" 151 | 152 | # Re-Start service 153 | if [ $DISABLE_SYSTEMCTL -eq 0 ]; then 154 | SERVICE_FILE="/etc/systemd/system/$BD2_SERVICE_NAME.service" 155 | echo 156 | if [ -f "$SERVICE_FILE" ]; then 157 | echo "You are ready to restart $BD2_SERVICE_NAME service (sudo systemctl restart $BD2_SERVICE_NAME)" 158 | 159 | [ $EXTRA_FUNC -eq 1 ] && sudo systemctl start $BD2_SERVICE_NAME 160 | else 161 | echo "You can paste the following content to $SERVICE_FILE file to create a daemon service" 162 | echo "sudo vi $SERVICE_FILE" 163 | echo 164 | 165 | SCRIPT_CONTENT="[Unit] 166 | \nDescription=Big Dipper 2.0 for $DENOM_SYMBOL chain (network $CHAIN_NO) 167 | \nAfter=network-online.target 168 | 169 | \n[Service] 170 | \nUser=$USER 171 | \nWorkingDirectory=$WORKING_DIR 172 | \nExecStart=$(which yarn) run start 173 | \nRestart=always 174 | \nRestartSec=3 175 | \nLimitNOFILE=4096 176 | 177 | \n[Install] 178 | \nWantedBy=multi-user.target" 179 | echo -e $SCRIPT_CONTENT 180 | echo 181 | echo "sudo systemctl enable $BD2_SERVICE_NAME" 182 | echo "sudo systemctl start $BD2_SERVICE_NAME" 183 | 184 | [ $EXTRA_FUNC -eq 1 ] && { 185 | echo 'Creating service '$BD2_SERVICE_NAME; 186 | echo -e $SCRIPT_CONTENT | sudo tee $SERVICE_FILE > /dev/null; 187 | sudo systemctl daemon-reload; 188 | sudo systemctl enable $BD2_SERVICE_NAME; 189 | sudo systemctl start $BD2_SERVICE_NAME; 190 | } 191 | fi 192 | else 193 | echo "OK, you can run it now" 194 | echo "Hint: npm run dev" 195 | fi 196 | 197 | echo "Notice!!! Make sure the service file at '/etc/systemd/system/$BD2_SERVICE_NAME.service' has correct working directort and execution path (in case you changed any repo/branch)" 198 | -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/README.md: -------------------------------------------------------------------------------- 1 | ### I. Prepare 2 | - **IMPORTANT** Open file `env.sh`, locate the variables with prefix `BD_CFG_CHAIN_*` and updated based on your need 3 | 4 | ### II. Create the BDJUNO & Big Dipper 2.0 UI 5 | - Create for chain 1 6 | > $ ./1_install-bdjuno.sh 1 7 | 8 | > $ ./2_install-bdjuno.sh 1 9 | 10 | > $ ./3_install-hasura.sh 1 11 | 12 | > $ ./4_install-front-end.sh 1 13 | 14 | Frontend port: `3800` 15 | 16 | - Create for chain 2 17 | > $ ./1_install-bdjuno.sh 2 18 | 19 | > $ ./2_install-bdjuno.sh 2 20 | 21 | > $ ./3_install-hasura.sh 2 22 | 23 | > $ ./4_install-front-end.sh 2 24 | 25 | Frontend port: `3802` 26 | 27 | ### III. Customization 28 | You can custom things just by editting keys in the [env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh) file 29 | 30 | Here are some of them: 31 | 32 | | Key | Default value | Explain | 33 | | --- | --- | --- | 34 | | `BD2_CFG_CHAIN_1_PORT` | 3800 | Port to open Big Dipper 2.0 as Block Explorer UI for chain 1 | 35 | | `BD2_CFG_CHAIN_1_PUBLIC_DOMAIN` | 127.0.0.1 | Domain to bind the UI on (for chain 1) | 36 | | `BD2_CFG_CHAIN_1_PUBLIC_RPC_26657` | 127.0.0.1:26657 | Public RPC endpoint of chain 1 | 37 | | `BD2_CFG_CHAIN_1_PROJECT_NAME` | web-evmos | Chain-specific app dir within [BD2 web ui project](https://github.com/forbole/big-dipper-2.0-cosmos/tree/main/apps) | 38 | | `BD2_CFG_CHAIN_1_CHAIN_NAME` | evmos | Chain name to be set in `./app/web-x/src/chain.json` | 39 | | `BD_CFG_CHAIN_1_HASURA_PORT` | 8080 | Port to open hasura graphql service for chain 1 | 40 | | `BD_CFG_CHAIN_1_HASURA_ACTIONBASE_PORT` | 3000 | Port for bdjuno to open hasura action for chain 1 | 41 | | `BD_CFG_CHAIN_1_PG_PORT` | 5432 | Port to expose bdjuno & hasura's Postgres DB which stores data for chain 1 | 42 | | `BD_CFG_CHAIN_1_RPC_ADDR` | 127.0.0.1:26657 | RPC endpoint of chain 1 | 43 | | `BD_CFG_CHAIN_1_GRPC_ADDR` | 127.0.0.1:9090 | gRPC endpoint of chain 1 | 44 | | `BD_CFG_CHAIN_1_ID` | evmos_9006-1 | Chain id of chain 1 | 45 | | `BD2_CFG_CHAIN_2_PORT` | 3802 | Port to open Big Dipper 2.0 as Block Explorer UI for chain 2 | 46 | | `BD2_CFG_CHAIN_2_PUBLIC_DOMAIN` | 127.0.0.1 | Domain to bind the UI on (for chain 2) | 47 | | `BD2_CFG_CHAIN_2_PUBLIC_RPC_26657` | 127.0.0.1:36657 | Public RPC endpoint of chain 2 | 48 | | `BD2_CFG_CHAIN_2_PROJECT_NAME` | web-evmos | Chain-specific app dir within [BD2 web ui project](https://github.com/forbole/big-dipper-2.0-cosmos/tree/main/apps) | 49 | | `BD2_CFG_CHAIN_2_CHAIN_NAME` | evmos | Chain name to be set in `./app/web-x/src/chain.json` | 50 | | `BD_CFG_CHAIN_2_HASURA_PORT` | 8082 | Port to open hasura graphql service for chain 2 | 51 | | `BD_CFG_CHAIN_2_HASURA_ACTIONBASE_PORT` | 3002 | Port for bdjuno to open hasura action for chain 2 | 52 | | `BD_CFG_CHAIN_2_PG_PORT` | 15432 | Port to expose bdjuno & hasura's Postgres DB which stores data for chain 2 | 53 | | `BD_CFG_CHAIN_2_RPC_ADDR` | 127.0.0.1:36657 | RPC endpoint of chain 2 | 54 | | `BD_CFG_CHAIN_2_GRPC_ADDR` | 127.0.0.1:19090 | gRPC endpoint of chain 2 | 55 | | `BD_CFG_CHAIN_2_ID` | evmos_9007-1 | Chain id of chain 2 | 56 | -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$CHAIN_NO" = "1" ]; then 4 | export CHAIN_ID="$BD_CFG_CHAIN_1_ID" 5 | export PG_PORT=$BD_CFG_CHAIN_1_PG_PORT 6 | export ACCOUNT_PREFIX=$BD_CFG_CHAIN_1_ACCOUNT_PREFIX 7 | export RPC_ADDR=$BD_CFG_CHAIN_1_RPC_ADDR 8 | export GRPC_ADDR=$BD_CFG_CHAIN_1_GRPC_ADDR 9 | export BD_GIT_REPO="$BD_CFG_CHAIN_1_GIT_REPO" 10 | export BD_GIT_BRANCH="$BD_CFG_CHAIN_1_GIT_REPO_BRANCH" 11 | export BD_HASURA_PORT=$BD_CFG_CHAIN_1_HASURA_PORT 12 | export BD_HASURA_ACTIONBASE_PORT=$BD_CFG_CHAIN_1_HASURA_ACTIONBASE_PORT 13 | export DENOM_SYMBOL="$BD_CFG_CHAIN_1_DENOM_SYMBOL" 14 | export DENOM_EXPONENT=$BD_CFG_CHAIN_1_DENOM_EXPONENT 15 | export MIN_DENOM_SYMBOL="$BD_CFG_CHAIN_1_MIN_DENOM_SYMBOL" 16 | export BD2_GIT_REPO="$BD2_CFG_CHAIN_1_GIT_REPO" 17 | export BD2_BRANCH="$BD2_CFG_CHAIN_1_BRANCH" 18 | export BD2_PORT=$BD2_CFG_CHAIN_1_PORT 19 | export BD2_PUBLIC_DOMAIN="$BD2_CFG_CHAIN_1_PUBLIC_DOMAIN" 20 | export BD2_PUBLIC_RPC_26657="$BD2_CFG_CHAIN_1_PUBLIC_RPC_26657" 21 | export BD2_PROJECT_NAME="$BD2_CFG_CHAIN_1_PROJECT_NAME" 22 | export BD2_CHAIN_NAME="$BD2_CFG_CHAIN_1_CHAIN_NAME" 23 | elif [ "$CHAIN_NO" = "2" ]; then 24 | export CHAIN_ID="$BD_CFG_CHAIN_2_ID" 25 | export PG_PORT=$BD_CFG_CHAIN_2_PG_PORT 26 | export ACCOUNT_PREFIX=$BD_CFG_CHAIN_2_ACCOUNT_PREFIX 27 | export RPC_ADDR=$BD_CFG_CHAIN_2_RPC_ADDR 28 | export GRPC_ADDR=$BD_CFG_CHAIN_2_GRPC_ADDR 29 | export BD_GIT_REPO="$BD_CFG_CHAIN_2_GIT_REPO" 30 | export BD_GIT_BRANCH="$BD_CFG_CHAIN_2_GIT_REPO_BRANCH" 31 | export BD_HASURA_PORT=$BD_CFG_CHAIN_2_HASURA_PORT 32 | export BD_HASURA_ACTIONBASE_PORT=$BD_CFG_CHAIN_2_HASURA_ACTIONBASE_PORT 33 | export DENOM_SYMBOL="$BD_CFG_CHAIN_2_DENOM_SYMBOL" 34 | export DENOM_EXPONENT=$BD_CFG_CHAIN_2_DENOM_EXPONENT 35 | export MIN_DENOM_SYMBOL="$BD_CFG_CHAIN_2_MIN_DENOM_SYMBOL" 36 | export BD2_GIT_REPO="$BD2_CFG_CHAIN_2_GIT_REPO" 37 | export BD2_BRANCH="$BD2_CFG_CHAIN_2_BRANCH" 38 | export BD2_PORT=$BD2_CFG_CHAIN_2_PORT 39 | export BD2_PUBLIC_DOMAIN="$BD2_CFG_CHAIN_2_PUBLIC_DOMAIN" 40 | export BD2_PUBLIC_RPC_26657="$BD2_CFG_CHAIN_2_PUBLIC_RPC_26657" 41 | export BD2_PROJECT_NAME="$BD2_CFG_CHAIN_2_PROJECT_NAME" 42 | export BD2_CHAIN_NAME="$BD2_CFG_CHAIN_2_CHAIN_NAME" 43 | fi 44 | 45 | export CHAIN_NAME=$(echo $BD2_CHAIN_NAME | tr '[:upper:]' '[:lower:]') 46 | 47 | echo "Creating big dipper as block explorer for $CHAIN_NAME chain $CHAIN_ID" 48 | echo "- Denom: $DENOM_SYMBOL ($DENOM_EXPONENT digits unit: $MIN_DENOM_SYMBOL)" 49 | echo "- RPC: $RPC_ADDR" 50 | echo "- gRPC: $GRPC_ADDR" 51 | echo "- Postgres port: $PG_PORT" 52 | echo "- Account prefix: $ACCOUNT_PREFIX" 53 | echo "- Expose UI at port: $BD2_PORT" 54 | echo "- bdjuno repo $BD_GIT_REPO branch $BD_GIT_BRANCH" 55 | echo "- bd2 repo $BD2_GIT_REPO branch $BD2_BRANCH" 56 | 57 | export BD_HOME=$(pwd)"/.bdjuno$CHAIN_NO" 58 | export BD_SERVICE_NAME="bdjuno-svc$CHAIN_NO" 59 | export BD_SOURCE_DIR="source-code-bdjuno-$DENOM_SYMBOL-$CHAIN_NO" 60 | export BD2_SERVICE_NAME="bd2-svc$CHAIN_NO" 61 | export BD2_SOURCE_DIR="source-code-bd2-$DENOM_SYMBOL-$CHAIN_NO" 62 | export BD2_UI_DIR="$BD2_SOURCE_DIR/apps/$BD2_PROJECT_NAME" 63 | export BD_BINARY="$GOPATH/bin/bdjuno$CHAIN_NO" 64 | -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ../env.sh 4 | 5 | if [ $DISABLE_SYSTEMCTL -eq 0 ]; then 6 | echo "Stopping bdjuno-svc1 services" 7 | sudo systemctl stop bdjuno-svc1 > /dev/null 2>&1 8 | sudo systemctl disable bdjuno-svc1 > /dev/null 2>&1 9 | echo "Stopping bdjuno-svc2 services" 10 | sudo systemctl stop bdjuno-svc2 > /dev/null 2>&1 11 | sudo systemctl disable bdjuno-svc2 > /dev/null 2>&1 12 | echo "Stopping bd2-svc1 services" 13 | sudo systemctl stop bd2-svc1 > /dev/null 2>&1 14 | sudo systemctl disable bd2-svc1 > /dev/null 2>&1 15 | echo "Stopping bd2-svc2 services" 16 | sudo systemctl stop bd2-svc2 > /dev/null 2>&1 17 | sudo systemctl disable bd2-svc2 > /dev/null 2>&1 18 | fi 19 | 20 | echo 'Remove existing docker container & volumes' 21 | docker rm -f bdjdb1 > /dev/null 2>&1 22 | docker volume rm -f bdjdb1 > /dev/null 2>&1 23 | docker rm -f bdjhasura1 > /dev/null 2>&1 24 | docker rm -f bdjdb2 > /dev/null 2>&1 25 | docker volume rm -f bdjdb2 > /dev/null 2>&1 26 | docker rm -f bdjhasura2 > /dev/null 2>&1 -------------------------------------------------------------------------------- /big-dipper-as-block-explorer/template.config.yaml: -------------------------------------------------------------------------------- 1 | chain: 2 | bech32_prefix: p_bech32_prefix 3 | modules: 4 | - modules 5 | - messages 6 | - auth 7 | - bank 8 | - consensus 9 | - gov 10 | - pricefeed 11 | - slashing 12 | - staking 13 | - distribution 14 | - actions 15 | node: 16 | type: remote 17 | config: 18 | rpc: 19 | client_name: juno 20 | address: http://p_rpc_addr 21 | max_connections: 20 22 | grpc: 23 | address: p_grpc_addr 24 | insecure: true 25 | parsing: 26 | workers: 1 27 | start_height: 1 28 | average_block_time: 5s 29 | listen_new_blocks: true 30 | parse_old_blocks: true 31 | parse_genesis: true 32 | database: 33 | url: postgresql://p_db_user:p_db_pass@localhost:p_db_port/p_db_name?sslmode=disable&search_path=public 34 | max_open_connections: 1 35 | max_idle_connections: 1 36 | partition_size: 100000 37 | partition_batch: 1000 38 | logging: 39 | level: debug 40 | format: text 41 | actions: 42 | host: 127.0.0.1 43 | port: 3000 44 | pruning: 45 | keep_recent: 100 46 | keep_every: 500 47 | interval: 10 48 | telemetry: 49 | port: 5000 50 | pricefeed: 51 | tokens: 52 | - name: p_token_denom 53 | units: 54 | - denom: p_token_min_denom 55 | exponent: 0 56 | - denom: p_token_denom 57 | exponent: p_denom_exponent 58 | price_id: evmos 59 | -------------------------------------------------------------------------------- /blockchain-in-docker/.gitignore: -------------------------------------------------------------------------------- 1 | .evmosd*/ 2 | network*.yml 3 | Dockerfile* 4 | 5 | 6 | -------------------------------------------------------------------------------- /blockchain-in-docker/1_prepare-genesis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v docker > /dev/null 2>&1 || { echo >&2 "ERR: docker is required"; exit 1; } 4 | command -v 'docker-compose' > /dev/null 2>&1 || { echo >&2 "ERR: docker-compose is required"; exit 1; } 5 | 6 | source ../env.sh 7 | 8 | CHAIN_NO=$1 9 | 10 | if [ -f "./override-env.sh" ]; then 11 | source "./override-env.sh" 12 | fi 13 | 14 | if [ -f "./_config.sh" ]; then 15 | source "./_config.sh" 16 | else 17 | echo >&2 "ERR: Wrong working directory" 18 | echo >&2 "Scripts must be executed within [blockchain-in-docker] directory" 19 | exit 1 20 | fi 21 | 22 | # Validate input 23 | if [ "$CHAIN_NO" = "1" ]; then 24 | echo "Chain 1" 25 | elif [ "$CHAIN_NO" = "2" ]; then 26 | echo "Chain 2" 27 | else 28 | echo >&2 'ERR: Missing or incorrect chain no as first argument, valid input is 1 or 2' 29 | echo >&2 'For example:' 30 | echo >&2 " $0 1" 31 | echo >&2 " or: $0 2" 32 | exit 1 33 | fi 34 | 35 | if [ "$CHAIN_TYPE" = "evmos" ]; then 36 | if [ "$HD_COINTYPE" -ne "60" ]; then 37 | echo "Chain type 'evmos' requires coin type 60 (check variable 'CHAIN_${CHAIN_NO}_COINTYPE')" 38 | fi 39 | fi 40 | 41 | if [ "$KEYRING" = "file" ]; then 42 | echo "Keyring: file" 43 | elif [ "$KEYRING" = "test" ]; then 44 | echo "Keyring: test **WARNING** only use keyring-backend=test for development purpose on local machine or you must secure your cloud env by whitelist some IP addresses, otherwise someone will take all your token, even tho it's only a test env" 45 | else 46 | echo >&2 "ERR: Non supported keyring mode = $KEYRING, only support 'file' & 'test'" 47 | exit 1 48 | fi 49 | 50 | # Binary 51 | export BINARY="$GOPATH/bin/$DAEMON_BINARY_NAME" 52 | 53 | # Check & Install evmosd binary if not exists 54 | ./_make_binary.sh 55 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to check & build daemon binary '$DAEMON_BINARY_NAME' at $BINARY"; } 56 | 57 | VAL_HOME_1=$VAL_HOME_PREFIX'0' 58 | VAL_HOME_2=$VAL_HOME_PREFIX'1' 59 | VAL_HOME_3=$VAL_HOME_PREFIX'2' 60 | 61 | CONTAINER_PREFIX="vtevmos"$CHAIN_NO 62 | # Cleanup 63 | echo 'Clean up previous setup' 64 | rm -rf $VAL_HOME_1'/' 65 | rm -rf $VAL_HOME_2'/' 66 | rm -rf $VAL_HOME_3'/' 67 | 68 | # Init 69 | echo 'Init home folders for 3 validators' 70 | ## Keyring 71 | $BINARY config keyring-backend $KEYRING --home $VAL_HOME_1 72 | $BINARY config keyring-backend $KEYRING --home $VAL_HOME_2 73 | $BINARY config keyring-backend $KEYRING --home $VAL_HOME_3 74 | ## Chain ID 75 | $BINARY config chain-id $CHAIN_ID --home $VAL_HOME_1 76 | $BINARY config chain-id $CHAIN_ID --home $VAL_HOME_2 77 | $BINARY config chain-id $CHAIN_ID --home $VAL_HOME_3 78 | ## Genesis 79 | VAL_MONIKER=$MONIKER'-'$VAL_1_KEY_NAME 80 | $BINARY init $VAL_MONIKER --chain-id $CHAIN_ID --home $VAL_HOME_1 > /dev/null 2>&1 81 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to init chain on node 0"; exit 1; } 82 | VAL_MONIKER=$MONIKER'-'$VAL_2_KEY_NAME 83 | $BINARY init $VAL_MONIKER --chain-id $CHAIN_ID --home $VAL_HOME_2 > /dev/null 2>&1 84 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to init pseudo chain for node 1"; exit 1; } 85 | VAL_MONIKER=$MONIKER'-'$VAL_3_KEY_NAME 86 | $BINARY init $VAL_MONIKER --chain-id $CHAIN_ID --home $VAL_HOME_3 > /dev/null 2>&1 87 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to init pseudo chain for node 2"; exit 1; } 88 | 89 | # Import validator keys 90 | echo "Import validator keys for chain no $CHAIN_NO id $CHAIN_ID" 91 | if [ "$KEYRING" = "test" ]; then 92 | echo "- Validator 1, key name '$VAL_1_KEY_NAME'" 93 | ( echo "$VAL_1_SEED"; ) | $BINARY keys add "$VAL_1_KEY_NAME" --recover --keyring-backend "test" --home "$VAL_HOME_1" 94 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 95 | echo "- Validator 2, key name '$VAL_2_KEY_NAME'" 96 | ( echo "$VAL_2_SEED"; ) | $BINARY keys add "$VAL_2_KEY_NAME" --recover --keyring-backend "test" --home "$VAL_HOME_1" 97 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 98 | echo "- Validator 3, key name '$VAL_3_KEY_NAME'" 99 | ( echo "$VAL_3_SEED"; ) | $BINARY keys add "$VAL_3_KEY_NAME" --recover --keyring-backend "test" --home "$VAL_HOME_1" 100 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 101 | else 102 | if [ "$CHAIN_TYPE" = "evmos" ]; then 103 | echo "- Validator 1, key name '$VAL_1_KEY_NAME'" 104 | echo "** Due to evmos daemon bug, it is not possible to import seed & encryption password automatically at the same time, please copy & paste the following seed:" 105 | echo "___" 106 | echo "$VAL_1_SEED" 107 | echo "___" 108 | echo "and encryption password '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD' to encrypt seed phrase" 109 | echo "of validator 1" 110 | $BINARY keys add "$VAL_1_KEY_NAME" --recover --keyring-backend "file" --home "$VAL_HOME_1" 111 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 112 | echo "- Validator 2, key name '$VAL_2_KEY_NAME'" 113 | echo "** Due to evmos daemon bug, it is not possible to import seed & encryption password automatically at the same time, please copy & paste the following seed:" 114 | echo "___" 115 | echo "$VAL_2_SEED" 116 | echo "___" 117 | echo "and encryption password '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD' to encrypt seed phrase" 118 | echo "of validator 2" 119 | $BINARY keys add "$VAL_2_KEY_NAME" --recover --keyring-backend "file" --home "$VAL_HOME_1" 120 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 121 | echo "- Validator 3, key name '$VAL_3_KEY_NAME'" 122 | echo "** Due to evmos daemon bug, it is not possible to import seed & encryption password automatically at the same time, please copy & paste the following seed:" 123 | echo "___" 124 | echo "$VAL_3_SEED" 125 | echo "___" 126 | echo "and encryption password '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD' to encrypt seed phrase" 127 | echo "of validator 3" 128 | $BINARY keys add "$VAL_3_KEY_NAME" --recover --keyring-backend "file" --home "$VAL_HOME_1" 129 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 130 | else 131 | echo "- Validator 1, key name '$VAL_1_KEY_NAME', encryption password: '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD', seed phase '$VAL_1_SEED'" 132 | ( echo "$VAL_1_SEED"; echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD"; echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD"; ) | $BINARY keys add "$VAL_1_KEY_NAME" --recover --keyring-backend "file" --home "$VAL_HOME_1" 133 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 134 | echo "- Validator 2, key name '$VAL_2_KEY_NAME', encryption password: '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD', seed phase '$VAL_2_SEED'" 135 | ( echo "$VAL_2_SEED"; echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD"; echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD"; ) | $BINARY keys add "$VAL_2_KEY_NAME" --recover --keyring-backend "file" --home "$VAL_HOME_1" 136 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 137 | echo "- Validator 3, key name '$VAL_3_KEY_NAME', encryption password: '$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD', seed phase '$VAL_3_SEED'" 138 | ( echo "$VAL_3_SEED"; echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD"; echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD"; ) | $BINARY keys add "$VAL_3_KEY_NAME" --recover --keyring-backend "file" --home "$VAL_HOME_1" 139 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to import"; exit 1; } 140 | fi 141 | fi 142 | 143 | ## Extract 144 | echo '- Get wallet address' 145 | if [ "$KEYRING" = "test" ]; then 146 | export VAL_1_ADDR="$($BINARY keys show $VAL_1_KEY_NAME --keyring-backend $KEYRING --home "$VAL_HOME_1" --address)" 147 | export VAL_2_ADDR="$($BINARY keys show $VAL_2_KEY_NAME --keyring-backend $KEYRING --home "$VAL_HOME_1" --address)" 148 | export VAL_3_ADDR="$($BINARY keys show $VAL_3_KEY_NAME --keyring-backend $KEYRING --home "$VAL_HOME_1" --address)" 149 | else 150 | export VAL_1_ADDR="$(echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY keys show $VAL_1_KEY_NAME --keyring-backend $KEYRING --home "$VAL_HOME_1" --address)" 151 | export VAL_2_ADDR="$(echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY keys show $VAL_2_KEY_NAME --keyring-backend $KEYRING --home "$VAL_HOME_1" --address)" 152 | export VAL_3_ADDR="$(echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY keys show $VAL_3_KEY_NAME --keyring-backend $KEYRING --home "$VAL_HOME_1" --address)" 153 | fi 154 | echo " + $VAL_1_KEY_NAME: $VAL_1_ADDR" 155 | echo " + $VAL_2_KEY_NAME: $VAL_2_ADDR" 156 | echo " + $VAL_3_KEY_NAME: $VAL_3_ADDR" 157 | echo "- Clone keys to home of other validators" 158 | cp -r "$VAL_HOME_1/keyring-$KEYRING" "$VAL_HOME_2/" 159 | cp -r "$VAL_HOME_1/keyring-$KEYRING" "$VAL_HOME_3/" 160 | 161 | ## Verify 162 | ADDR_PATTERN="$ACCOUNT_PREFIX[0-9][a-z0-9]+" 163 | if [[ $VAL_1_ADDR =~ $ADDR_PATTERN ]]; then 164 | echo 165 | else 166 | echo >&2 "ERR: Validator 1 '$VAL_1_KEY_NAME' wallet address '$VAL_1_ADDR' does not starts with '$ACCOUNT_PREFIX', did you forget to update the 'CHAIN_"$CHAIN_NO"_ACCOUNT_PREFIX' var" 167 | exit 1 168 | fi 169 | 170 | # Calculate balance & stake & claim info for validators 171 | ## Balance 172 | export VAL_1_BALANCE=$(bc <<< "10^$DENOM_EXPONENT * $VAL_1_RAW_BALANCE") 173 | export VAL_2_BALANCE=$(bc <<< "10^$DENOM_EXPONENT * $VAL_2_RAW_BALANCE") 174 | export VAL_3_BALANCE=$(bc <<< "10^$DENOM_EXPONENT * $VAL_3_RAW_BALANCE") 175 | ## Stake 176 | export VAL_1_STAKE=$(bc <<< "10^$DENOM_EXPONENT * $VAL_1_RAW_STAKE") 177 | export VAL_2_STAKE=$(bc <<< "10^$DENOM_EXPONENT * $VAL_2_RAW_STAKE") 178 | export VAL_3_STAKE=$(bc <<< "10^$DENOM_EXPONENT * $VAL_3_RAW_STAKE") 179 | ## Claim 180 | if [ $DISABLE_CLAIM -eq 0 ]; then 181 | export VAL_1_CLAIM=$(bc <<< "10^$DENOM_EXPONENT * $VAL_1_RAW_CLAIM") 182 | export VAL_2_CLAIM=$(bc <<< "10^$DENOM_EXPONENT * $VAL_2_RAW_CLAIM") 183 | export VAL_3_CLAIM=$(bc <<< "10^$DENOM_EXPONENT * $VAL_3_RAW_CLAIM") 184 | else 185 | export VAL_1_CLAIM=0 186 | export VAL_2_CLAIM=0 187 | export VAL_3_CLAIM=0 188 | fi 189 | 190 | # Update genesis.json 191 | GENESIS_JSON="$VAL_HOME_1/config/genesis.json" 192 | GENESIS_JSON_TMP="$VAL_HOME_1/config/tmp_genesis.json" 193 | echo "Updating genesis.json" 194 | ## Change number of validators 195 | if [ ! -z "$NUMBER_OF_VALIDATOR" ] && [ $NUMBER_OF_VALIDATOR -gt 0 ]; then 196 | echo "- Limit number of validator of the chain to maximum $NUMBER_OF_VALIDATOR validators at [app_state > staking > params > max_validators]" 197 | cat $GENESIS_JSON | jq '.app_state["staking"]["params"]["max_validators"]="'$NUMBER_OF_VALIDATOR'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 198 | fi 199 | ## Change denom metadata 200 | echo '- Add denom metadata at [app_state > bank > denom_metadata]' 201 | cat $GENESIS_JSON | jq '.app_state["bank"]["denom_metadata"] += [{"description": "The native EVM, governance and staking token of the '$CHAIN_NAME' Hub", "denom_units": [{"denom": "'$MIN_DENOM_SYMBOL'", "exponent": 0}, {"denom": "'$GAS_DENOM_SYMBOL'", "exponent": '$GAS_DENOM_EXPONENT'}, {"denom": "'$DENOM_SYMBOL'", "exponent": '$DENOM_EXPONENT'}],"base": "'$MIN_DENOM_SYMBOL'", "display": "'$DENOM_SYMBOL'", "name": "'$DENOM_SYMBOL'", "symbol": "'$DENOM_SYMBOL'"}]' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 202 | ## Change parameter token denominations to *min denom symbol (eg aevmos)* 203 | echo "- Change token denomination to $MIN_DENOM_SYMBOL" 204 | echo ' + [app_state > staking > params > bond_denom]' 205 | cat $GENESIS_JSON | jq '.app_state["staking"]["params"]["bond_denom"]="'$MIN_DENOM_SYMBOL'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 206 | echo ' + [app_state > crisis > constant_fee > denom]' 207 | cat $GENESIS_JSON | jq '.app_state["crisis"]["constant_fee"]["denom"]="'$MIN_DENOM_SYMBOL'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 208 | echo ' + [app_state > gov > deposit_params > min_deposit[0] > denom]' 209 | cat $GENESIS_JSON | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="'$MIN_DENOM_SYMBOL'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 210 | if [ "$CHAIN_TYPE" = "evmos" ]; then 211 | echo ' + [app_state > evm > params > evm_denom]' 212 | cat $GENESIS_JSON | jq '.app_state["evm"]["params"]["evm_denom"]="'$MIN_DENOM_SYMBOL'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 213 | echo ' + [app_state > inflation > params > mint_denom]' 214 | cat $GENESIS_JSON | jq '.app_state["inflation"]["params"]["mint_denom"]="'$MIN_DENOM_SYMBOL'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 215 | echo ' + [app_state > claims > params > claims_denom]' 216 | cat $GENESIS_JSON | jq '.app_state["claims"]["params"]["claims_denom"]="'$MIN_DENOM_SYMBOL'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 217 | fi 218 | echo ' + [app_state > mint > params > mint_denom]' 219 | cat $GENESIS_JSON | jq '.app_state["mint"]["params"]["mint_denom"]="'$MIN_DENOM_SYMBOL'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 220 | ## Set gas limit 221 | CONS_BLOCK_GAS_LIMIT=10000000 222 | echo "- Set gas limit per block in [consensus_params > block > max_gas] to $CONS_BLOCK_GAS_LIMIT" 223 | cat $GENESIS_JSON | jq '.consensus_params["block"]["max_gas"]="'$CONS_BLOCK_GAS_LIMIT'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 224 | ## Set claims start time 225 | current_date=$(date -u +"%Y-%m-%dT%TZ") 226 | if [ $DISABLE_CLAIM -eq 0 ]; then 227 | echo "- Set claim start time in [app_state > claims > params > airdrop_start_time] to $current_date" 228 | cat $GENESIS_JSON | jq -r --arg current_date "$current_date" '.app_state["claims"]["params"]["airdrop_start_time"]=$current_date' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 229 | fi 230 | ## Set claims records for validator account 231 | if [ $DISABLE_CLAIM -eq 0 ]; then 232 | echo "- Set claim records for 3 validators in [app_state > claims > claims_records]" 233 | echo " + Validator $VAL_1_ADDR (node 0) can claim "$(bc <<< "$VAL_1_CLAIM / (10^$DENOM_EXPONENT)")$DENOM_SYMBOL 234 | echo " + Validator $VAL_2_ADDR (node 1) can claim "$(bc <<< "$VAL_2_CLAIM / (10^$DENOM_EXPONENT)")$DENOM_SYMBOL 235 | echo " + Validator $VAL_3_ADDR (node 2) can claim "$(bc <<< "$VAL_3_CLAIM / (10^$DENOM_EXPONENT)")$DENOM_SYMBOL 236 | cat $GENESIS_JSON | jq '.app_state["claims"]["claims_records"]=[{"initial_claimable_amount":"'$VAL_1_CLAIM'", "actions_completed":[false, false, false, false],"address":"'$VAL_1_ADDR'"},{"initial_claimable_amount":"'$VAL_2_CLAIM'", "actions_completed":[false, false, false, false],"address":"'$VAL_2_ADDR'"},{"initial_claimable_amount":"'$VAL_3_CLAIM'", "actions_completed":[false, false, false, false],"address":"'$VAL_3_ADDR'"}]' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 237 | fi 238 | ## Set claims decay 239 | duration_until_decay="86400s" 240 | duration_of_decay="2592000s" 241 | echo "- Set duration until decay in [app_state > claims > params > duration_until_decay] to $duration_until_decay" 242 | cat $GENESIS_JSON | jq '.app_state["claims"]["params"]["duration_until_decay"]="'$duration_until_decay'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 243 | echo "- Set duration of decay in [app_state > claims > params > duration_of_decay] to $duration_of_decay" 244 | cat $GENESIS_JSON | jq '.app_state["claims"]["params"]["duration_of_decay"]="'$duration_of_decay'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 245 | if [ $DISABLE_CLAIM -eq 0 ]; then 246 | ## Claim module account: 247 | ### 0xA61808Fe40fEb8B3433778BBC2ecECCAA47c8c47 || evmos15cvq3ljql6utxseh0zau9m8ve2j8erz89m5wkz 248 | amount_to_claim=$(bc <<< "$VAL_1_CLAIM + $VAL_2_CLAIM + $VAL_3_CLAIM") 249 | echo '- Claimn module account addr '$EVMOS_CLAIM_MODULE_ACCOUNT', total '$(bc <<< "$amount_to_claim / (10^$DENOM_EXPONENT)")' '$DENOM_SYMBOL 250 | cat $GENESIS_JSON | jq '.app_state["bank"]["balances"] += [{"address":"'$EVMOS_CLAIM_MODULE_ACCOUNT'","coins":[{"denom":"'$MIN_DENOM_SYMBOL'", "amount":"'$amount_to_claim'"}]}]' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 251 | fi 252 | ## Gov deposit 253 | echo '- Set minimum deposit to '$MINIMUM_GOV_DEPOSIT' '$DENOM_SYMBOL' by setting [app_state > gov > deposit_params > min_deposit[0] > amount]' 254 | cat $GENESIS_JSON | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["amount"]="'$(bc <<< "$MINIMUM_GOV_DEPOSIT * (10^$DENOM_EXPONENT)")'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 255 | ## Gov voting period 256 | echo '- Set voting period to '$VOTING_PERIOID_IN_MINUTES' minutes by setting [app_state > gov > voting_params > voting_period]' 257 | cat $GENESIS_JSON | jq '.app_state["gov"]["voting_params"]["voting_period"]="'$VOTING_PERIOID_IN_MINUTES'm"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 258 | 259 | # Update app.toml 260 | update_app() { 261 | VAL_HOME=$1 262 | APP_TOML="$VAL_HOME/config/app.toml" 263 | APP_TOML_TMP="$VAL_HOME/config/tmp_app.toml" 264 | echo "Updating app.toml in $VAL_HOME" 265 | echo '- Enable API by setting [api > enable] to "true"' 266 | cat $APP_TOML | tomlq '.api["enable"]=true' --toml-output > $APP_TOML_TMP && mv $APP_TOML_TMP $APP_TOML 267 | echo '- Enable Swagger (access via http://host/swagger/) by setting [api > swagger] to "true"' 268 | cat $APP_TOML | tomlq '.api["swagger"]=true' --toml-output > $APP_TOML_TMP && mv $APP_TOML_TMP $APP_TOML 269 | echo "- Bind API to 0.0.0.0:1317 by updating [api > address]" 270 | cat $APP_TOML | tomlq '.api["address"]="tcp://0.0.0.0:1317"' --toml-output > $APP_TOML_TMP && mv $APP_TOML_TMP $APP_TOML 271 | echo "- Bind Json-RPC to 0.0.0.0:8545 by updating [json-rpc > address]" 272 | cat $APP_TOML | tomlq '."json-rpc"["address"]="0.0.0.0:8545"' --toml-output > $APP_TOML_TMP && mv $APP_TOML_TMP $APP_TOML 273 | echo "- Bind Websocket Json-RPC to 0.0.0.0:8546 by updating [json-rpc > ws-address]" 274 | cat $APP_TOML | tomlq '."json-rpc"["ws-address"]="0.0.0.0:8546"' --toml-output > $APP_TOML_TMP && mv $APP_TOML_TMP $APP_TOML 275 | echo "- Bind gRPC to 0.0.0.0:9090 by updating [grpc > address]" 276 | cat $APP_TOML | tomlq '.grpc["address"]="0.0.0.0:9090"' --toml-output > $APP_TOML_TMP && mv $APP_TOML_TMP $APP_TOML 277 | } 278 | 279 | update_app $VAL_HOME_1 280 | update_app $VAL_HOME_2 281 | update_app $VAL_HOME_3 282 | 283 | # Allocate genesis accounts 284 | echo 'Allocating genesis accounts' 285 | if [ "$KEYRING" = "test" ]; then 286 | $BINARY add-genesis-account "$VAL_1_KEY_NAME" "$VAL_1_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_1" 287 | $BINARY add-genesis-account "$VAL_2_KEY_NAME" "$VAL_2_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_1" 288 | $BINARY add-genesis-account "$VAL_3_KEY_NAME" "$VAL_3_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_1" 289 | # To generate create validator tx for validator 2 & 3 290 | $BINARY add-genesis-account "$VAL_2_KEY_NAME" "$VAL_2_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_2" 291 | $BINARY add-genesis-account "$VAL_3_KEY_NAME" "$VAL_3_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_3" 292 | else 293 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY add-genesis-account "$VAL_1_KEY_NAME" "$VAL_1_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_1" 294 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY add-genesis-account "$VAL_2_KEY_NAME" "$VAL_2_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_1" 295 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY add-genesis-account "$VAL_3_KEY_NAME" "$VAL_3_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_1" 296 | # To generate create validator tx for validator 2 & 3 297 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY add-genesis-account "$VAL_2_KEY_NAME" "$VAL_2_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_2" 298 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY add-genesis-account "$VAL_3_KEY_NAME" "$VAL_3_BALANCE"$MIN_DENOM_SYMBOL --keyring-backend $KEYRING --home "$VAL_HOME_3" 299 | fi 300 | 301 | # Update total supply + claim values in genesis.json 302 | total_supply=$(bc <<< "$VAL_1_BALANCE + $VAL_2_BALANCE + $VAL_3_BALANCE + $VAL_1_CLAIM + $VAL_2_CLAIM + $VAL_3_CLAIM") 303 | echo 'Update original total supply = '$(bc <<< "$total_supply / (10^$DENOM_EXPONENT)")' '$DENOM_SYMBOL' into [app_state > bank > supply[0] > amount]' 304 | cat $GENESIS_JSON | jq '.app_state["bank"]["supply"][0]["amount"]="'$total_supply'"' > $GENESIS_JSON_TMP && mv $GENESIS_JSON_TMP $GENESIS_JSON 305 | 306 | # Sign genesis transaction 307 | echo 'Generate genesis staking transaction '$(bc <<< "$VAL_1_STAKE / (10^$DENOM_EXPONENT)")' '$DENOM_SYMBOL' for validator '$VAL_1_KEY_NAME 308 | if [ "$KEYRING" = "test" ]; then 309 | $BINARY gentx $VAL_1_KEY_NAME "$VAL_1_STAKE"$MIN_DENOM_SYMBOL \ 310 | --commission-rate="$VAL_COMMISSION_RATE" \ 311 | --commission-max-rate="$VAL_COMMISSION_RATE_MAX" \ 312 | --commission-max-change-rate="$VAL_COMMISSION_CHANGE_RATE_MAX" \ 313 | --min-self-delegation="$VAL_MIN_SELF_DELEGATION" \ 314 | --keyring-backend $KEYRING \ 315 | --chain-id $CHAIN_ID \ 316 | --home $VAL_HOME_1 > /dev/null 2>&1 317 | else 318 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY gentx $VAL_1_KEY_NAME "$VAL_1_STAKE"$MIN_DENOM_SYMBOL \ 319 | --commission-rate="$VAL_COMMISSION_RATE" \ 320 | --commission-max-rate="$VAL_COMMISSION_RATE_MAX" \ 321 | --commission-max-change-rate="$VAL_COMMISSION_CHANGE_RATE_MAX" \ 322 | --min-self-delegation="$VAL_MIN_SELF_DELEGATION" \ 323 | --keyring-backend $KEYRING \ 324 | --chain-id $CHAIN_ID \ 325 | --home $VAL_HOME_1 > /dev/null 2>&1 326 | fi 327 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to create genesis tx for validator 1"; exit 1; } 328 | 329 | echo 'Generate genesis staking transaction '$(bc <<< "$VAL_2_STAKE / (10^$DENOM_EXPONENT)")' '$DENOM_SYMBOL' for validator '$VAL_2_KEY_NAME 330 | if [ "$KEYRING" = "test" ]; then 331 | $BINARY gentx $VAL_2_KEY_NAME "$VAL_2_STAKE"$MIN_DENOM_SYMBOL \ 332 | --commission-rate="$VAL_COMMISSION_RATE" \ 333 | --commission-max-rate="$VAL_COMMISSION_RATE_MAX" \ 334 | --commission-max-change-rate="$VAL_COMMISSION_CHANGE_RATE_MAX" \ 335 | --min-self-delegation="$VAL_MIN_SELF_DELEGATION" \ 336 | --keyring-backend $KEYRING \ 337 | --chain-id $CHAIN_ID \ 338 | --home $VAL_HOME_2 > /dev/null 2>&1 339 | else 340 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY gentx $VAL_2_KEY_NAME "$VAL_2_STAKE"$MIN_DENOM_SYMBOL \ 341 | --commission-rate="$VAL_COMMISSION_RATE" \ 342 | --commission-max-rate="$VAL_COMMISSION_RATE_MAX" \ 343 | --commission-max-change-rate="$VAL_COMMISSION_CHANGE_RATE_MAX" \ 344 | --min-self-delegation="$VAL_MIN_SELF_DELEGATION" \ 345 | --keyring-backend $KEYRING \ 346 | --chain-id $CHAIN_ID \ 347 | --home $VAL_HOME_2 > /dev/null 2>&1 348 | fi 349 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to create genesis tx for validator 2"; exit 1; } 350 | echo "Copy generated tx to $VAL_HOME_1/config/gentx" 351 | cp $VAL_HOME_2/config/gentx/gentx-* $VAL_HOME_1/config/gentx/ 352 | 353 | echo 'Generate genesis staking transaction '$(bc <<< "$VAL_3_STAKE / (10^$DENOM_EXPONENT)")' '$DENOM_SYMBOL' for validator '$VAL_3_KEY_NAME 354 | if [ "$KEYRING" = "test" ]; then 355 | $BINARY gentx $VAL_3_KEY_NAME "$VAL_3_STAKE"$MIN_DENOM_SYMBOL \ 356 | --commission-rate="$VAL_COMMISSION_RATE" \ 357 | --commission-max-rate="$VAL_COMMISSION_RATE_MAX" \ 358 | --commission-max-change-rate="$VAL_COMMISSION_CHANGE_RATE_MAX" \ 359 | --min-self-delegation="$VAL_MIN_SELF_DELEGATION" \ 360 | --keyring-backend $KEYRING \ 361 | --chain-id $CHAIN_ID \ 362 | --home $VAL_HOME_3 > /dev/null 2>&1 363 | else 364 | echo "$VAL_KEYRING_FILE_ENCRYPTION_PASSWORD" | $BINARY gentx $VAL_3_KEY_NAME "$VAL_3_STAKE"$MIN_DENOM_SYMBOL \ 365 | --commission-rate="$VAL_COMMISSION_RATE" \ 366 | --commission-max-rate="$VAL_COMMISSION_RATE_MAX" \ 367 | --commission-max-change-rate="$VAL_COMMISSION_CHANGE_RATE_MAX" \ 368 | --min-self-delegation="$VAL_MIN_SELF_DELEGATION" \ 369 | --keyring-backend $KEYRING \ 370 | --chain-id $CHAIN_ID \ 371 | --home $VAL_HOME_3 > /dev/null 2>&1 372 | fi 373 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to create genesis tx for validator 3"; exit 1; } 374 | echo "Copy generated tx to $VAL_HOME_1/config/gentx" 375 | cp $VAL_HOME_3/config/gentx/gentx-* $VAL_HOME_1/config/gentx/ 376 | 377 | # Collect genesis tx to genesis.json 378 | echo "Collecting genesis transactions into genesis.json" 379 | $BINARY collect-gentxs --home $VAL_HOME_1 > /dev/null 2>&1 380 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to collect genesis transactions"; exit 1; } 381 | 382 | # Validate genesis.json 383 | $BINARY validate-genesis --home $VAL_HOME_1 384 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to validate genesis"; exit 1; } 385 | 386 | 387 | # Update config.toml 388 | TENDERMINT_NODE_ID_1=$($BINARY tendermint show-node-id --home $VAL_HOME_1) 389 | TENDERMINT_NODE_ID_2=$($BINARY tendermint show-node-id --home $VAL_HOME_2) 390 | TENDERMINT_NODE_ID_3=$($BINARY tendermint show-node-id --home $VAL_HOME_3) 391 | update_config() { 392 | VAL_HOME=$1 393 | CONFIG_TOML="$VAL_HOME/config/config.toml" 394 | CONFIG_TOML_TMP="$VAL_HOME/config/tmp_config.toml" 395 | echo "Updating config.toml in $VAL_HOME" 396 | ## Update seed nodes 397 | echo '- Add seeds to [p2p > seeds]' 398 | cat $CONFIG_TOML | tomlq '.p2p["seeds"]="'$TENDERMINT_NODE_ID_1'@'$CONTAINER_PREFIX'0:26656,'$TENDERMINT_NODE_ID_2'@'$CONTAINER_PREFIX'1:26656,'$TENDERMINT_NODE_ID_3'@'$CONTAINER_PREFIX'2:26656"' --toml-output > $CONFIG_TOML_TMP && mv $CONFIG_TOML_TMP $CONFIG_TOML 399 | echo '- Add persistent peers to [p2p > persistent_peers]' 400 | cat $CONFIG_TOML | tomlq '.p2p["persistent_peers"]="'$TENDERMINT_NODE_ID_1'@'$CONTAINER_PREFIX'0:26656,'$TENDERMINT_NODE_ID_2'@'$CONTAINER_PREFIX'1:26656,'$TENDERMINT_NODE_ID_3'@'$CONTAINER_PREFIX'2:26656"' --toml-output > $CONFIG_TOML_TMP && mv $CONFIG_TOML_TMP $CONFIG_TOML 401 | ## Disable create empty block 402 | ###echo '- Disable create empty block by setting [root > create_empty_blocks] to false' 403 | ###cat $CONFIG_TOML | tomlq '.["create_empty_blocks"]=false' --toml-output > $CONFIG_TOML_TMP && mv $CONFIG_TOML_TMP $CONFIG_TOML 404 | ## Expose ports 405 | echo "- Bind RPC to 0.0.0.0:26657 by updating [rpc > laddr]" 406 | cat $CONFIG_TOML | tomlq '.rpc["laddr"]="tcp://0.0.0.0:26657"' --toml-output > $CONFIG_TOML_TMP && mv $CONFIG_TOML_TMP $CONFIG_TOML 407 | echo "- Bind Peer to 0.0.0.0:26656 by updating [p2p > laddr]" 408 | cat $CONFIG_TOML | tomlq '.p2p["laddr"]="tcp://0.0.0.0:26656"' --toml-output > $CONFIG_TOML_TMP && mv $CONFIG_TOML_TMP $CONFIG_TOML 409 | } 410 | 411 | update_config $VAL_HOME_1 412 | update_config $VAL_HOME_2 413 | update_config $VAL_HOME_3 414 | 415 | # Copy 416 | echo 'Copy genesis.json' 417 | echo '- Copying genesis.json from node 0 to node 1' 418 | cp "$VAL_HOME_1/config/genesis.json" "$VAL_HOME_2/config/genesis.json" 419 | echo '- Copying genesis.json from node 0 to node 2' 420 | cp "$VAL_HOME_1/config/genesis.json" "$VAL_HOME_3/config/genesis.json" 421 | 422 | # Update config.toml part 3 423 | echo "Updating config.toml part 3" 424 | echo '- Clean seeds of node 0' 425 | cat $CONFIG_TOML | tomlq '.p2p["seeds"]=""' --toml-output > $CONFIG_TOML_TMP && mv $CONFIG_TOML_TMP $CONFIG_TOML 426 | 427 | echo '### Done, you can move to next step' 428 | -------------------------------------------------------------------------------- /blockchain-in-docker/2_build-docker-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v docker > /dev/null 2>&1 || { echo >&2 "ERR: docker is required"; exit 1; } 4 | command -v 'docker-compose' > /dev/null 2>&1 || { echo >&2 "ERR: docker-compose is required"; exit 1; } 5 | 6 | source ../env.sh 7 | 8 | CHAIN_NO=$1 9 | 10 | if [ -f "./override-env.sh" ]; then 11 | source "./override-env.sh" 12 | fi 13 | 14 | if [ -f "./_config.sh" ]; then 15 | source "./_config.sh" 16 | else 17 | echo >&2 "ERR: Wrong working directory" 18 | echo >&2 "Scripts must be executed within [blockchain-in-docker] directory" 19 | exit 1 20 | fi 21 | 22 | # Validate input 23 | if [ "$CHAIN_NO" = "1" ]; then 24 | echo "Chain 1" 25 | export PORT_26657="$CHAIN_1_EXPOSE_RPC_TO_PORT" 26 | export PORT_9090="$CHAIN_1_EXPOSE_GRPC_TO_PORT" 27 | export PORT_8545="$CHAIN_1_EXPOSE_JSON_RPC_TO_PORT" 28 | export PORT_8546="$CHAIN_1_EXPOSE_WEBSOCKET_JSON_RPC_TO_PORT" 29 | export PORT_1317="$CHAIN_1_EXPOSE_REST_API_TO_PORT" 30 | export PORT_26656="$CHAIN_1_EXPOSE_P2P_TO_PORT" 31 | elif [ "$CHAIN_NO" = "2" ]; then 32 | echo "Chain 2" 33 | export PORT_26657="$CHAIN_2_EXPOSE_RPC_TO_PORT" 34 | export PORT_9090="$CHAIN_2_EXPOSE_GRPC_TO_PORT" 35 | export PORT_8545="$CHAIN_2_EXPOSE_JSON_RPC_TO_PORT" 36 | export PORT_8546="$CHAIN_2_EXPOSE_WEBSOCKET_JSON_RPC_TO_PORT" 37 | export PORT_1317="$CHAIN_2_EXPOSE_REST_API_TO_PORT" 38 | export PORT_26656="$CHAIN_2_EXPOSE_P2P_TO_PORT" 39 | else 40 | echo >&2 'ERR: Missing or incorrect chain no as first argument, valid input is 1 or 2' 41 | echo >&2 'For example:' 42 | echo >&2 " $0 1" 43 | echo >&2 " or: $0 2" 44 | exit 1 45 | fi 46 | 47 | export DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME_PREFIX''$CHAIN_NO 48 | DOCKER_COMPOSE_FILE="network$CHAIN_NO.yml" 49 | 50 | if [ -f "$DOCKER_COMPOSE_FILE" ]; then 51 | docker-compose -f "$DOCKER_COMPOSE_FILE" down 52 | fi 53 | 54 | # Check source 55 | if [ -d "$SOURCE_CODE_DIR" ]; then 56 | echo "$CHAIN_NAME repo exists at $SOURCE_CODE_DIR" 57 | echo "Checking repo url & branch name" 58 | CHK_RES_1="$(git --git-dir "./$SOURCE_CODE_DIR"/.git --work-tree "./$SOURCE_CODE_DIR" config --get remote.origin.url)" 59 | if [ $? -ne 0 ] || [ -z "$CHK_RES_1" ]; then 60 | echo "WARN! Unable to check remote origin url of git repo at $SOURCE_CODE_DIR" 61 | sleep 2 62 | elif [ "$CHK_RES_1" != "$GIT_REPO" ]; then 63 | echo "WARN! Git repo Url does not match" 64 | echo "Expected: '$GIT_REPO'" 65 | echo "Actual: '$CHK_RES_1'" 66 | echo "You should check it (script will continue execution after 10s)" 67 | sleep 10 68 | fi 69 | CHK_RES_2="$(git --git-dir "./$SOURCE_CODE_DIR"/.git --work-tree "./$SOURCE_CODE_DIR" rev-parse --abbrev-ref HEAD)" 70 | if [ $? -ne 0 ] || [ -z "$CHK_RES_2" ]; then 71 | echo "WARN! Unable to check branch of git repo at $SOURCE_CODE_DIR" 72 | sleep 2 73 | elif [ "$CHK_RES_2" = "HEAD" ]; then 74 | echo "WARN! Can not check branch" 75 | elif [ "$CHK_RES_2" != "$GIT_BRANCH" ]; then 76 | echo "WARN! Git Branch does not match" 77 | echo "Expected: '$GIT_BRANCH'" 78 | echo "Actual: '$CHK_RES_2'" 79 | echo "You should check it (script will continue execution after 10s)" 80 | sleep 10 81 | fi 82 | else 83 | echo "Downloading $CHAIN_NAME source code $GIT_BRANCH" 84 | git clone "$GIT_REPO" --branch "$GIT_BRANCH" --single-branch "$SOURCE_CODE_DIR" 85 | 86 | if [ $? -ne 0 ]; then 87 | echo >&2 "ERR: Git clone $CHAIN_NAME from branch $GIT_BRANCH has failed" 88 | exit 1 89 | fi 90 | fi 91 | 92 | # Remove previous image 93 | echo "Remove previous docker image $DOCKER_IMAGE_NAME" 94 | docker rmi "$DOCKER_IMAGE_NAME" 95 | 96 | # Create Dockerfile 97 | DOCKER_FILE="Dockerfile$CHAIN_NO" 98 | echo "Creating docker file: $DOCKER_FILE" 99 | cp template.DockerfileX "$DOCKER_FILE" 100 | if [[ "$OSTYPE" == "darwin"* ]]; then 101 | sed -i '' "s,_p_src_dir_,$SOURCE_CODE_DIR,g" "$DOCKER_FILE" 102 | sed -i '' "s/_p_daemon_binary_/$DAEMON_BINARY_NAME/g" "$DOCKER_FILE" 103 | sed -i '' "s/_p_home_prefix_/$VAL_HOME_PREFIX/g" "$DOCKER_FILE" 104 | sed -i '' "s/_p_user_name_/$USER/g" "$DOCKER_FILE" 105 | sed -i '' "s/_p_user_id_/$(id -u)/g" "$DOCKER_FILE" 106 | sed -i '' "s/_p_group_id_/$(id -g)/g" "$DOCKER_FILE" 107 | else 108 | sed -i "s,_p_src_dir_,$SOURCE_CODE_DIR,g" "$DOCKER_FILE" 109 | sed -i "s/_p_daemon_binary_/$DAEMON_BINARY_NAME/g" "$DOCKER_FILE" 110 | sed -i "s/_p_home_prefix_/$VAL_HOME_PREFIX/g" "$DOCKER_FILE" 111 | sed -i "s/_p_user_name_/$USER/g" "$DOCKER_FILE" 112 | sed -i "s/_p_user_id_/$(id -u)/g" "$DOCKER_FILE" 113 | sed -i "s/_p_group_id_/$(id -g)/g" "$DOCKER_FILE" 114 | fi 115 | 116 | # Docker build 117 | echo "Build new docker image $DOCKER_IMAGE_NAME" 118 | docker build -t "$DOCKER_IMAGE_NAME" -f "$DOCKER_FILE" . 119 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to build docker image"; exit 1; } 120 | 121 | # Create docker-compose yml 122 | DOCKER_COMPOSE_FILE="network$CHAIN_NO.yml" 123 | echo "Creating docker compose file: $DOCKER_COMPOSE_FILE" 124 | cp template.networkX.yml "$DOCKER_COMPOSE_FILE" 125 | if [[ "$OSTYPE" == "darwin"* ]]; then 126 | sed -i '' "s/_p_chain_no_/$CHAIN_NO/g" "$DOCKER_COMPOSE_FILE" 127 | sed -i '' "s/_p_26657_/$PORT_26657/g" "$DOCKER_COMPOSE_FILE" 128 | sed -i '' "s/_p_9090_/$PORT_9090/g" "$DOCKER_COMPOSE_FILE" 129 | sed -i '' "s/_p_8545_/$PORT_8545/g" "$DOCKER_COMPOSE_FILE" 130 | sed -i '' "s/_p_8546_/$PORT_8546/g" "$DOCKER_COMPOSE_FILE" 131 | sed -i '' "s/_p_1317_/$PORT_1317/g" "$DOCKER_COMPOSE_FILE" 132 | sed -i '' "s/_p_26656_/$PORT_26656/g" "$DOCKER_COMPOSE_FILE" 133 | sed -i '' "s/_p_image_prefix_/$DOCKER_IMAGE_NAME_PREFIX/g" "$DOCKER_COMPOSE_FILE" 134 | sed -i '' "s/_p_home_prefix_/$VAL_HOME_PREFIX/g" "$DOCKER_COMPOSE_FILE" 135 | else 136 | sed -i "s/_p_chain_no_/$CHAIN_NO/g" "$DOCKER_COMPOSE_FILE" 137 | sed -i "s/_p_26657_/$PORT_26657/g" "$DOCKER_COMPOSE_FILE" 138 | sed -i "s/_p_9090_/$PORT_9090/g" "$DOCKER_COMPOSE_FILE" 139 | sed -i "s/_p_8545_/$PORT_8545/g" "$DOCKER_COMPOSE_FILE" 140 | sed -i "s/_p_8546_/$PORT_8546/g" "$DOCKER_COMPOSE_FILE" 141 | sed -i "s/_p_1317_/$PORT_1317/g" "$DOCKER_COMPOSE_FILE" 142 | sed -i "s/_p_26656_/$PORT_26656/g" "$DOCKER_COMPOSE_FILE" 143 | sed -i "s/_p_image_prefix_/$DOCKER_IMAGE_NAME_PREFIX/g" "$DOCKER_COMPOSE_FILE" 144 | sed -i "s/_p_home_prefix_/$VAL_HOME_PREFIX/g" "$DOCKER_COMPOSE_FILE" 145 | fi 146 | echo '- Expose ports:' 147 | echo "+ 26657 => $PORT_26657" 148 | echo "+ 9090 => $PORT_9090" 149 | echo "+ 8545 => $PORT_8545" 150 | echo "+ 8546 => $PORT_8546" 151 | echo "+ 1317 => $PORT_1317" 152 | echo "+ 26658 => $PORT_26656" 153 | 154 | # Finish 155 | echo 'Done' 156 | echo 'You can start them now' 157 | echo "$ docker-compose -f \"$DOCKER_COMPOSE_FILE\" up -d" 158 | -------------------------------------------------------------------------------- /blockchain-in-docker/README.md: -------------------------------------------------------------------------------- 1 | ### This helps you create 2 EVMOS chains ([v12.1.6](https://github.com/evmos/evmos/tree/v12.1.6)) with 3 validators on each chain and run with docker compose 2 | 3 | ### I. Create first chain 4 | 5 | > $ ./1_prepare-genesis.sh 1 6 | 7 | > $ ./2_build-docker-image.sh 1 8 | 9 | > $ docker-compose -f network1.yml up -d 10 | 11 | To turn it off 12 | > $ docker-compose -f network1.yml down 13 | 14 | ### II. Create second chain 15 | 16 | > $ ./1_prepare-genesis.sh 2 17 | 18 | > $ ./2_build-docker-image.sh 2 19 | 20 | > $ docker-compose -f network2.yml up -d 21 | 22 | To turn it off 23 | > $ docker-compose -f network2.yml down 24 | 25 | Now you can [follow this sample](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/tree/main/hermes-as-ibc-relayer) to create an IBC relayer to connect them 26 | 27 | ### III. Network stats 28 | 29 | | Network | Chain ID | RPC | Json RPC | Websocket Json RPC | REST | gRPC | P2P | 30 | | --- | --- | --- | --- | --- | --- | --- | --- | 31 | | 1 | evmos_9000-5 | 26657 | 8545 | 8546 | 1317 | 9090 | 26656 | 32 | | 2 | evmos_9000-6 | 36657 | 18545 | 18546 | 11317 | 19090 | 36656 | 33 | 34 | - Validators (same on both chains) 35 | + evmosvaloper1wuqvcpuunf7r5rg7xutqddhw55grfzc7ewkz9a 36 | + 50m EVMOS 37 | + 7k coin staked 38 | + EVMOS wallet addr: evmos1wuqvcpuunf7r5rg7xutqddhw55grfzc75qejyq 39 | + Mnemonic: spoil senior door access upset floor decorate shield high punch senior tape pigeon base slogan height clever buffalo cat report poem weapon labor satoshi 40 | + ETH private key: FC3F58B007A017166DE5A340C7A2641EB37CF37081D6F9013636CEBFBAF7B1FE 41 | + Key name: val1 42 | + evmosvaloper1zxgt4pwzzsv02z24g80lc5rhtsp0prw046yxss 43 | + 50m EVMOS 44 | + 3k coin staked 45 | + EVMOS wallet addr: evmos1zxgt4pwzzsv02z24g80lc5rhtsp0prw0c5tk3d 46 | + Mnemonic: width produce brush hour horse retreat play flag fresh broken measure culture scare broken erupt pilot buzz embody depend topic behind rigid fan battle 47 | + ETH private key: 0172DC491B5ACD04DD378D3FD8FD9F41A0D701E070941474FADECD72E1E085B9 48 | + Key name: val2 49 | + evmosvaloper1vcy9v4jp0sd4hysqqcuwleytxre3ms4cmv5ajl 50 | + 50m EVMOS 51 | + 3k coin staked 52 | + EVMOS wallet addr: evmos1vcy9v4jp0sd4hysqqcuwleytxre3ms4ckzmdnz 53 | + Mnemonic: stage grid emotion thumb safe myth chair dizzy beyond casual select polar hover retire master neglect shift zero trigger section token replace truly father 54 | + ETH private key: E0D83C6054597638469CC91A46F14B7F62705297912524059629E4674302928F 55 | + Key name: val3 56 | 57 | ### IV. Customization 58 | You can custom your chain just by editting keys in the [env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh) file 59 | 60 | Here are some of them: 61 | 62 | | Key | Default value | Explain | 63 | | --- | --- | --- | 64 | | `CHAIN_1_GIT_REPO` | https://github.com/evmos/evmos.git | Git repo to be used to build chain 1 | 65 | | `CHAIN_1_GIT_REPO_BRANCH` | v12.1.6 | Git branch to be used to build chain 1 | 66 | | `CHAIN_1_ID` | evmos_9000-5 | Chain id of chain 1 | 67 | | `CHAIN_1_TYPE` | evmos | Chain type of chain 1 | 68 | | `CHAIN_1_COINTYPE` | 60 | [HD key derivation path](https://docs.evmos.org/protocol/concepts/accounts#evmos-accounts) for chain 1's accounts | 69 | | `CHAIN_2_GIT_REPO` | https://github.com/evmos/evmos.git | Git repo to be used to build chain 2 | 70 | | `CHAIN_2_GIT_REPO_BRANCH` | v12.1.6 | Git branch to be used to build chain 2 | 71 | | `CHAIN_2_ID` | evmos_9000-6 | Chain id of chain 2 | 72 | | `CHAIN_2_TYPE` | evmos | Chain type of chain 2 | 73 | | `CHAIN_2_COINTYPE` | 60 | [HD key derivation path](https://docs.evmos.org/protocol/concepts/accounts#evmos-accounts) for chain 2's accounts | 74 | 75 | Performance: 2 chains created by this sample can run in parallel smoothly in a 4 Core 4 Gb RAM machine 76 | 77 | This sample is using keyring-backend is `test`, you may need to change to `file` ([env.sh](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/blob/main/env.sh)) to secure your test env if needed 78 | 79 | ### V. Mapped volumes 80 | 81 | | Network | Chain ID | Container name | Home dir within container | Mapped home dir in host machine | 82 | | --- | --- | --- | --- | --- | 83 | | 1 | evmos_9000-5 | vtevmos10 (val1) | /.evmosd1 | /path/to/EVMOS-sample-scripts/blockchain-in-docker/.evmosd10 | 84 | | 1 | evmos_9000-5 | vtevmos11 (val2) | /.evmosd1 | /path/to/EVMOS-sample-scripts/blockchain-in-docker/.evmosd11 | 85 | | 1 | evmos_9000-5 | vtevmos12 (val3) | /.evmosd1 | /path/to/EVMOS-sample-scripts/blockchain-in-docker/.evmosd12 | 86 | | 2 | evmos_9000-6 | vtevmos20 (val1) | /.evmosd2 | /path/to/EVMOS-sample-scripts/blockchain-in-docker/.evmosd20 | 87 | | 2 | evmos_9000-6 | vtevmos21 (val2) | /.evmosd2 | /path/to/EVMOS-sample-scripts/blockchain-in-docker/.evmosd21 | 88 | | 2 | evmos_9000-6 | vtevmos22 (val3) | /.evmosd2 | /path/to/EVMOS-sample-scripts/blockchain-in-docker/.evmosd22 | 89 | 90 | The [gov-sample-proposals](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/tree/main/blockchain-in-docker/gov-sample-proposals) also mapped to `/gov-sample-proposals` vol within each container 91 | -------------------------------------------------------------------------------- /blockchain-in-docker/_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$CHAIN_NO" = "1" ]; then 4 | export CHAIN_TYPE="$CHAIN_1_TYPE" 5 | export CHAIN_ID="$CHAIN_1_ID" 6 | export HD_COINTYPE=$CHAIN_1_COINTYPE 7 | export GIT_REPO="$CHAIN_1_GIT_REPO" 8 | export GIT_BRANCH="$CHAIN_1_GIT_REPO_BRANCH" 9 | export DAEMON_BINARY_NAME="$CHAIN_1_DAEMON_BINARY_NAME" 10 | export DENOM_EXPONENT=$CHAIN_1_DENOM_EXPONENT 11 | export GAS_DENOM_EXPONENT=$CHAIN_1_GAS_DENOM_EXPONENT 12 | export DENOM_SYMBOL="$CHAIN_1_DENOM_SYMBOL" 13 | export MIN_DENOM_SYMBOL="$CHAIN_1_MIN_DENOM_SYMBOL" 14 | export GAS_DENOM_SYMBOL="$CHAIN_1_GAS_DENOM_SYMBOL" 15 | export VAL_1_SEED="$VAL_1_CHAIN_1_SEED" 16 | export VAL_2_SEED="$VAL_2_CHAIN_1_SEED" 17 | export VAL_3_SEED="$VAL_3_CHAIN_1_SEED" 18 | export ACCOUNT_PREFIX="$CHAIN_1_ACCOUNT_PREFIX" 19 | export EVMOS_CLAIM_MODULE_ACCOUNT="$EVMOS_CHAIN_1_CLAIM_MODULE_ACCOUNT" 20 | elif [ "$CHAIN_NO" = "2" ]; then 21 | export CHAIN_TYPE="$CHAIN_2_TYPE" 22 | export CHAIN_ID="$CHAIN_2_ID" 23 | export HD_COINTYPE=$CHAIN_2_COINTYPE 24 | export GIT_REPO="$CHAIN_2_GIT_REPO" 25 | export GIT_BRANCH="$CHAIN_2_GIT_REPO_BRANCH" 26 | export DAEMON_BINARY_NAME="$CHAIN_2_DAEMON_BINARY_NAME" 27 | export DENOM_EXPONENT=$CHAIN_2_DENOM_EXPONENT 28 | export GAS_DENOM_EXPONENT=$CHAIN_2_GAS_DENOM_EXPONENT 29 | export DENOM_SYMBOL="$CHAIN_2_DENOM_SYMBOL" 30 | export MIN_DENOM_SYMBOL="$CHAIN_2_MIN_DENOM_SYMBOL" 31 | export GAS_DENOM_SYMBOL="$CHAIN_2_GAS_DENOM_SYMBOL" 32 | export VAL_1_SEED="$VAL_1_CHAIN_2_SEED" 33 | export VAL_2_SEED="$VAL_2_CHAIN_2_SEED" 34 | export VAL_3_SEED="$VAL_3_CHAIN_2_SEED" 35 | export ACCOUNT_PREFIX="$CHAIN_2_ACCOUNT_PREFIX" 36 | export EVMOS_CLAIM_MODULE_ACCOUNT="$EVMOS_CHAIN_2_CLAIM_MODULE_ACCOUNT" 37 | fi 38 | 39 | export SOURCE_CODE_DIR="./source-code-$DENOM_SYMBOL-$GIT_BRANCH" 40 | export CHAIN_NAME=$(echo $DENOM_SYMBOL | tr '[:lower:]' '[:upper:]') 41 | export VAL_HOME_PREFIX=".evmosd$CHAIN_NO" 42 | export CHAIN_TYPE=$(echo $CHAIN_TYPE | tr '[:upper:]' '[:lower:]') 43 | 44 | if [ "$CHAIN_TYPE" = "evmos" ]; then 45 | export DISABLE_CLAIM=0 46 | else 47 | export DISABLE_CLAIM=1 48 | fi 49 | 50 | echo "Creating $CHAIN_NAME network with chain id $CHAIN_ID" 51 | echo "- Account prefix: $ACCOUNT_PREFIX" 52 | echo "- Chain type: $CHAIN_TYPE" 53 | echo "- Denom symbol: $DENOM_SYMBOL ($DENOM_EXPONENT digits unit: $MIN_DENOM_SYMBOL, $GAS_DENOM_EXPONENT digits gas unit: $GAS_DENOM_SYMBOL)" 54 | echo "- Source code on repo: $GIT_REPO" 55 | echo "- Source code on branch: $GIT_BRANCH" 56 | echo "- Download source code to dir: $SOURCE_CODE_DIR" 57 | echo "- Expect deamon binary name: $DAEMON_BINARY_NAME" 58 | if [ $DISABLE_CLAIM -eq 0 ]; then 59 | echo "- Claim module account: $EVMOS_CLAIM_MODULE_ACCOUNT" 60 | fi -------------------------------------------------------------------------------- /blockchain-in-docker/_make_binary.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SOURCE_CODE_DIR="./source-code-$DENOM_SYMBOL-$GIT_BRANCH" 4 | 5 | # If the repo is different with config, show a warning 6 | if [ -d "$SOURCE_CODE_DIR" ]; then 7 | echo "$CHAIN_NAME repo exists at $SOURCE_CODE_DIR" 8 | echo "Checking repo url & branch name" 9 | CHK_RES_1="$(git --git-dir "./$SOURCE_CODE_DIR"/.git --work-tree "./$SOURCE_CODE_DIR" config --get remote.origin.url)" 10 | if [ $? -ne 0 ] || [ -z "$CHK_RES_1" ]; then 11 | echo "WARN! Unable to check remote origin url of git repo at $SOURCE_CODE_DIR" 12 | sleep 2 13 | elif [ "$CHK_RES_1" != "$GIT_REPO" ]; then 14 | echo "WARN! Git repo Url does not match" 15 | echo "Expected: '$GIT_REPO'" 16 | echo "Actual: '$CHK_RES_1'" 17 | echo "You should check it (script will continue execution after 10s)" 18 | sleep 10 19 | fi 20 | CHK_RES_2="$(git --git-dir "./$SOURCE_CODE_DIR"/.git --work-tree "./$SOURCE_CODE_DIR" rev-parse --abbrev-ref HEAD)" 21 | if [ $? -ne 0 ] || [ -z "$CHK_RES_2" ]; then 22 | echo "WARN! Unable to check branch of git repo at $SOURCE_CODE_DIR" 23 | sleep 2 24 | elif [ "$CHK_RES_2" = "HEAD" ]; then 25 | echo "WARN! Can not check branch" 26 | elif [ "$CHK_RES_2" != "$GIT_BRANCH" ]; then 27 | echo "WARN! Git Branch does not match" 28 | echo "Expected: '$GIT_BRANCH'" 29 | echo "Actual: '$CHK_RES_2'" 30 | echo "You should check it (script will continue execution after 10s)" 31 | sleep 10 32 | fi 33 | else 34 | echo "Downloading $CHAIN_NAME source code $GIT_BRANCH" 35 | git clone "$GIT_REPO" --branch "$GIT_BRANCH" --single-branch "$SOURCE_CODE_DIR" 36 | 37 | if [ $? -ne 0 ]; then 38 | echo >&2 "ERR: Git clone $CHAIN_NAME branch $GIT_BRANCH failed" 39 | exit 1 40 | fi 41 | fi 42 | 43 | CUR_DIR=$(pwd) 44 | cd "$SOURCE_CODE_DIR" 45 | echo "Compiling '$DAEMON_BINARY_NAME'. If this is the first time you compile, it will take time, you can enjoy a cup of coffee and comeback later" 46 | make install 47 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to compile $DAEMON_BINARY_NAME"; exit 1; } 48 | cd "$CUR_DIR" 49 | 50 | if [ ! -f "$BINARY" ]; then 51 | echo >&2 "ERR: Chain's source code was compiled but binary '$DAEMON_BINARY_NAME' could not be found" 52 | echo "You must find it and put it into PATH environment variable" 53 | echo "(It usually compile and moved to $GOPATH/bin)" 54 | exit 1 55 | fi -------------------------------------------------------------------------------- /blockchain-in-docker/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v 'docker-compose' > /dev/null 2>&1 || { echo >&2 "ERR: docker-compose is required"; exit 1; } 4 | 5 | down() { 6 | CHAIN_NO=$1 7 | DCF="network$CHAIN_NO.yml" 8 | 9 | if [ -f "$DCF" ]; then 10 | echo "Shutting down chain $CHAIN_NO" 11 | docker-compose -f "$DCF" down 12 | else 13 | echo "WARN! Can not shutdown containers of chain $CHAIN_NO because docker compose file $DCF could not be found" 14 | fi 15 | } 16 | 17 | down 1 18 | down 2 -------------------------------------------------------------------------------- /blockchain-in-docker/gov-sample-proposals/denom-cosmos-on-evmos/2-gov-register-coin.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "The native staking and governance token of the Cosmos chain", 3 | "denom_units": [ 4 | { 5 | "denom": "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", 6 | "exponent": 0, 7 | "aliases": [ 8 | "uatom" 9 | ] 10 | }, 11 | { 12 | "denom": "ATOM", 13 | "exponent": 6 14 | } 15 | ], 16 | "base": "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", 17 | "display": "ATOM", 18 | "name": "ATOM", 19 | "symbol": "ATOM" 20 | } -------------------------------------------------------------------------------- /blockchain-in-docker/gov-sample-proposals/denom-cosmos-on-evmos/README.md: -------------------------------------------------------------------------------- 1 | ### Register denom of ATOM (via IBC) on EVMOS chain (`ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2`) 2 | 3 | #### Submit proposal 4 | > docker exec -it vtevmos20 bash 5 | 6 | > evmosd tx gov submit-proposal register-coin "/gov-sample-proposals/denom-cosmos-on-evmos/2-gov-register-coin.json" --from val1 --node tcp://127.0.0.1:26657 --home /.evmosd2 --gas auto --deposit 65000000000000000000aevmos --title "Register IBC denom for native staking and governance token of the Cosmos chain" --description "The native staking and governance token of the Cosmos chain" 7 | 8 | #### Accept it 9 | > evmosd tx gov vote 1 yes --gas auto --from val1 --home /.evmosd2 --yes 10 | 11 | > evmosd tx gov vote 1 yes --gas auto --from val2 --home /.evmosd2 --yes 12 | 13 | > evmosd tx gov vote 1 yes --gas auto --from val3 --home /.evmosd2 --yes -------------------------------------------------------------------------------- /blockchain-in-docker/gov-sample-proposals/evmos/1-gov_min_deposit_change.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Decrease the minimum deposit amount for governance proposals and maximum deposit period", 3 | "description": "If successful, this parameter-change governance proposal that will change the minimum deposit from 64 EVMOS to 100000 aevmos and max deposit period changes from 14 days to 7 days", 4 | "changes": [ 5 | { 6 | "subspace": "gov", 7 | "key": "depositparams", 8 | "value": { 9 | "min_deposit": [ 10 | { 11 | "denom": "aevmos", 12 | "amount": "100000" 13 | } 14 | ], 15 | "max_deposit_period": "604800000000000" 16 | } 17 | } 18 | ], 19 | "deposit": "65000000000000000000aevmos" 20 | } -------------------------------------------------------------------------------- /blockchain-in-docker/gov-sample-proposals/evmos/README.md: -------------------------------------------------------------------------------- 1 | ### Decrease the minimum deposit amount for governance proposals and maximum deposit period 2 | #### If successful, this parameter-change governance proposal that will change the minimum deposit from 64 EVMOS to 100000 aevmos and max deposit period changes from 14 days to 7 days 3 | 1. Submit proposal 4 | > docker exec -it vtevmos20 bash 5 | 6 | > cd /gov-sample-proposals/evmos/ 7 | 8 | > evmosd tx gov submit-proposal param-change "./1-gov_min_deposit_change.json" --from val1 --node tcp://127.0.0.1:26657 --home /.evmosd2 --gas 300000 9 | 10 | You can check the proposal via commands or [web UI like big dipper](https://github.com/VictorTrustyDev/EVMOS-sample-scripts/tree/main/big-dipper-as-block-explorer) 11 | 12 | 2. Vote 13 | > docker exec -it vtevmos20 bash 14 | 15 | > evmosd tx gov vote `[proposal id eg 1]` yes --gas auto --from val1 --home /.evmosd2 16 | 17 | > docker exec -it vtevmos21 bash 18 | 19 | > evmosd tx gov vote `[proposal id eg 1]` yes --gas auto --from val2 --home /.evmosd2 20 | 21 | > docker exec -it vtevmos22 bash 22 | 23 | > evmosd tx gov vote `[proposal id eg 1]` yes --gas auto --from val3 --home /.evmosd2 24 | 25 | Change will take effects after voting period ends (default 14 days) 26 | 27 | _Trick: You can reduce voting period by override the `VOTING_PERIOID_IN_MINUTES` variable (eg: export VOTING_PERIOID_IN_MINUTES=15) so the voting periods only last for 15 minutes_ 28 | 29 | 3. Result: 30 | - Previous: 31 | > evmosd q params subspace gov depositparams --chain-id evmos_9000-6 --node tcp://127.0.0.1:36657 32 | 33 | > key: depositparams 34 | 35 | > subspace: gov 36 | 37 | > value: '{"min_deposit":[{"denom":"aevmos","amount":"64000000000000000000"}],"max_deposit_period":"172800000000000"}' 38 | 39 | - Later: 40 | > evmosd q params subspace gov depositparams --chain-id evmos_9000-6 --node tcp://127.0.0.1:36657 41 | 42 | > key: depositparams 43 | 44 | > subspace: gov 45 | 46 | > value: '{"min_deposit":[{"denom":"aevmos","amount":"100000"}],"max_deposit_period":"604800000000000"}' 47 | -------------------------------------------------------------------------------- /blockchain-in-docker/template.DockerfileX: -------------------------------------------------------------------------------- 1 | FROM golang:1.20.4-bullseye AS build-env 2 | 3 | RUN apt-get update -y 4 | RUN apt-get install git -y 5 | 6 | WORKDIR /go/victortrusty.dev 7 | 8 | COPY _p_src_dir_ . 9 | 10 | RUN make -B build 11 | 12 | FROM golang:1.20.4-bullseye 13 | 14 | COPY --from=build-env /go/victortrusty.dev/build/_p_daemon_binary_ /usr/bin/_p_daemon_binary_ 15 | 16 | RUN apt-get update -y 17 | RUN apt-get install ca-certificates jq -y 18 | RUN useradd -m _p_user_name_ --uid=_p_user_id_ 19 | 20 | USER _p_user_id_:_p_group_id_ 21 | 22 | WORKDIR /root 23 | 24 | EXPOSE 26656 26657 1317 9090 8545 8546 25 | 26 | ENTRYPOINT /usr/bin/_p_daemon_binary_ start --home "/_p_home_prefix_" 27 | -------------------------------------------------------------------------------- /blockchain-in-docker/template.networkX.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | networks: 3 | vtevmosnw_p_chain_no_: 4 | driver: bridge 5 | services: 6 | vtevmos_p_chain_no_0: 7 | container_name: vtevmos_p_chain_no_0 8 | ports: 9 | - "_p_8545_:8545" 10 | - "_p_8546_:8546" 11 | - "_p_26657_:26657" 12 | - "_p_1317_:1317" 13 | - "_p_9090_:9090" 14 | - "_p_26656_:26656" 15 | networks: 16 | - vtevmosnw_p_chain_no_ 17 | environment: 18 | - NODE_IDX=0 19 | restart: always 20 | image: _p_image_prefix__p_chain_no_ 21 | volumes: 22 | - ./gov-sample-proposals:/gov-sample-proposals 23 | - ./_p_home_prefix_0:/_p_home_prefix_ 24 | vtevmos_p_chain_no_1: 25 | container_name: vtevmos_p_chain_no_1 26 | networks: 27 | - vtevmosnw_p_chain_no_ 28 | environment: 29 | - NODE_IDX=1 30 | depends_on: 31 | - vtevmos_p_chain_no_0 32 | restart: always 33 | image: _p_image_prefix__p_chain_no_ 34 | volumes: 35 | - ./gov-sample-proposals:/gov-sample-proposals 36 | - ./_p_home_prefix_1:/_p_home_prefix_ 37 | vtevmos_p_chain_no_2: 38 | container_name: vtevmos_p_chain_no_2 39 | networks: 40 | - vtevmosnw_p_chain_no_ 41 | environment: 42 | - NODE_IDX=2 43 | depends_on: 44 | - vtevmos_p_chain_no_0 45 | restart: always 46 | image: _p_image_prefix__p_chain_no_ 47 | volumes: 48 | - ./gov-sample-proposals:/gov-sample-proposals 49 | - ./_p_home_prefix_2:/_p_home_prefix_ -------------------------------------------------------------------------------- /env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ### IMPORTANT NOTICE 4 | ### It is not recommended to modify this file directly. Instead you can create a file named `override-env.sh` and write modified values there 5 | 6 | # Pre-requisites 7 | command -v jq > /dev/null 2>&1 || { echo >&2 "ERR: jq not installed. More info: https://stedolan.github.io/jq/download/ (Hint: sudo apt install jq -y)"; exit 1; } 8 | command -v yq > /dev/null 2>&1 || { echo >&2 "ERR: yq not installed. More info: https://github.com/kislyuk/yq/ (Hint: sudo apt install python3-pip -y && pip3 install yq)"; exit 1; } 9 | command -v tomlq > /dev/null 2>&1 || { echo >&2 "ERR: tomlq not installed, it is expected to be delivered within yq package"; exit 1; } 10 | command -v bc > /dev/null 2>&1 || { echo >&2 "ERR: bc command could not be found"; exit 1; } 11 | command -v make > /dev/null 2>&1 || { echo >&2 "ERR: make command could not be found"; exit 1; } 12 | command -v go > /dev/null 2>&1 || { echo >&2 "ERR: go was not installed. More info: https://go.dev/doc/install"; exit 1; } 13 | 14 | # Configurations 15 | 16 | ## Keyring 17 | ### Must be lowercase 18 | export KEYRING="test" # Valid values are 'file' or 'test', must be 'file' for cloud or production env (DANGER: keyring test will allow transfer token from validator without key) 19 | 20 | ## 21 | export NOTICE_DEV_ENV="This sample scripts was developed on an Ubuntu 22.04 LTS machine" 22 | 23 | ## Network 24 | export MONIKER='moniker' 25 | export NUMBER_OF_VALIDATOR=5 # Maximum number of active validators. If number of validators more than this, only the first X validators will be choosen (order by power) 26 | export MINIMUM_GOV_DEPOSIT=64 # Minimum amount of coins required to submit a proposal 27 | export VOTING_PERIOID_IN_MINUTES=2880 # 2880 minutes = 2 days 28 | ### Chain 1 29 | export CHAIN_1_TYPE="evmos" # valid values are 'evmos' or 'cosmos' only, must be 'evmos' for evmos chain, otherwise 'cosmos' for other chains 30 | export CHAIN_1_ID="evmos_9000-5" 31 | export CHAIN_1_COINTYPE=60 # 60 for EVMOS, 118 for Cosmos 32 | export CHAIN_1_ACCOUNT_PREFIX="evmos" 33 | export CHAIN_1_GIT_REPO="https://github.com/evmos/evmos.git" 34 | export CHAIN_1_GIT_REPO_BRANCH="v12.1.6" # must belong to repo $CHAIN_1_GIT_REPO 35 | export CHAIN_1_DAEMON_BINARY_NAME="evmosd" 36 | export CHAIN_1_DENOM_SYMBOL="evmos" 37 | export CHAIN_1_MIN_DENOM_SYMBOL="aevmos" 38 | export CHAIN_1_GAS_DENOM_SYMBOL="nevmos" 39 | export CHAIN_1_DENOM_EXPONENT=18 # no of digits (18 for evmos, 6 for cosmos atom) 40 | export CHAIN_1_GAS_DENOM_EXPONENT=9 # no of digits (9 for evmos, 3 for cosmos atom) 41 | export CHAIN_1_EXPOSE_RPC_TO_PORT=26657 42 | export CHAIN_1_EXPOSE_GRPC_TO_PORT=9090 43 | export CHAIN_1_EXPOSE_JSON_RPC_TO_PORT=8545 44 | export CHAIN_1_EXPOSE_WEBSOCKET_JSON_RPC_TO_PORT=8546 45 | export CHAIN_1_EXPOSE_REST_API_TO_PORT=1317 46 | export CHAIN_1_EXPOSE_P2P_TO_PORT=26656 47 | ### Chain 2 48 | export CHAIN_2_TYPE="evmos" # valid values are 'evmos' or 'cosmos' only, must be 'evmos' for evmos chain, otherwise 'cosmos' for other chains 49 | export CHAIN_2_ID="evmos_9000-6" 50 | export CHAIN_2_COINTYPE=60 # 60 for EVMOS, 118 for Cosmos 51 | export CHAIN_2_ACCOUNT_PREFIX="evmos" 52 | export CHAIN_2_GIT_REPO="https://github.com/evmos/evmos.git" 53 | export CHAIN_2_GIT_REPO_BRANCH="v12.1.6" # must belong to repo $CHAIN_2_GIT_REPO 54 | export CHAIN_2_DAEMON_BINARY_NAME="evmosd" 55 | export CHAIN_2_DENOM_SYMBOL="evmos" 56 | export CHAIN_2_MIN_DENOM_SYMBOL="aevmos" 57 | export CHAIN_2_GAS_DENOM_SYMBOL="nevmos" 58 | export CHAIN_2_DENOM_EXPONENT=18 # no of digits (18 for evmos, 6 for cosmos atom) 59 | export CHAIN_2_GAS_DENOM_EXPONENT=9 # no of digits (9 for evmos, 3 for cosmos atom) 60 | export CHAIN_2_EXPOSE_RPC_TO_PORT=36657 61 | export CHAIN_2_EXPOSE_GRPC_TO_PORT=19090 62 | export CHAIN_2_EXPOSE_JSON_RPC_TO_PORT=18545 63 | export CHAIN_2_EXPOSE_WEBSOCKET_JSON_RPC_TO_PORT=18546 64 | export CHAIN_2_EXPOSE_REST_API_TO_PORT=11317 65 | export CHAIN_2_EXPOSE_P2P_TO_PORT=36656 66 | #### (EVMOS specific) 67 | ### Chain 1 68 | export EVMOS_CHAIN_1_CLAIM_MODULE_ACCOUNT="evmos15cvq3ljql6utxseh0zau9m8ve2j8erz89m5wkz" 69 | ### Chain 2 70 | export EVMOS_CHAIN_2_CLAIM_MODULE_ACCOUNT="evmos15cvq3ljql6utxseh0zau9m8ve2j8erz89m5wkz" 71 | 72 | ## Validators 73 | export VAL_KEYRING_FILE_ENCRYPTION_PASSWORD="11111111" 74 | export VAL_COMMISSION_RATE=0.05 # 5% 75 | export VAL_COMMISSION_RATE_MAX=0.20 # 20% 76 | export VAL_COMMISSION_CHANGE_RATE_MAX=0.01 # 1% 77 | export VAL_MIN_SELF_DELEGATION=1000000 78 | export VAL_GAS_LIMIT_CREATE_VALIDATOR=300000 79 | ### Validator 1 80 | export VAL_1_KEY_NAME="val1" 81 | export VAL_1_RAW_BALANCE=50000000 # Validator 1 init with this amount of coin in balance 82 | export VAL_1_RAW_STAKE=7000 # Validator 1 will stake this amount 83 | export VAL_1_RAW_CLAIM=1000 # Validator 1 can claim this amount, evmos only 84 | #### Chain 1 85 | export VAL_1_CHAIN_1_SEED="spoil senior door access upset floor decorate shield high punch senior tape pigeon base slogan height clever buffalo cat report poem weapon labor satoshi" 86 | #### Chain 2 87 | export VAL_1_CHAIN_2_SEED="spoil senior door access upset floor decorate shield high punch senior tape pigeon base slogan height clever buffalo cat report poem weapon labor satoshi" 88 | ### Validator 2 89 | export VAL_2_KEY_NAME="val2" 90 | export VAL_2_RAW_BALANCE=50000000 # Validator 2 init with this amount of coin in balance 91 | export VAL_2_RAW_STAKE=3000 # Validator 2 will stake this amount 92 | export VAL_2_RAW_CLAIM=1000 # Validator 2 can claim this amount, evmos only 93 | #### Chain 1 94 | export VAL_2_CHAIN_1_SEED="width produce brush hour horse retreat play flag fresh broken measure culture scare broken erupt pilot buzz embody depend topic behind rigid fan battle" 95 | #### Chain 2 96 | export VAL_2_CHAIN_2_SEED="width produce brush hour horse retreat play flag fresh broken measure culture scare broken erupt pilot buzz embody depend topic behind rigid fan battle" 97 | ### Validator 3 98 | export VAL_3_KEY_NAME="val3" 99 | export VAL_3_RAW_BALANCE=50000000 # Validator 3 init with this amount of coin in balance 100 | export VAL_3_RAW_STAKE=3000 # Validator 3 will stake this amount 101 | export VAL_3_RAW_CLAIM=1000 # Validator 3 can claim this amount, evmos only 102 | #### Chain 1 103 | export VAL_3_CHAIN_1_SEED="stage grid emotion thumb safe myth chair dizzy beyond casual select polar hover retire master neglect shift zero trigger section token replace truly father" 104 | #### Chain 2 105 | export VAL_3_CHAIN_2_SEED="stage grid emotion thumb safe myth chair dizzy beyond casual select polar hover retire master neglect shift zero trigger section token replace truly father" 106 | 107 | ## Hermes (IBC Relayer) 108 | export HERMES_GIT_REPO="https://github.com/informalsystems/ibc-rs.git" 109 | export HERMES_GIT_REPO_BRANCH="v1.5.0" # Must belong to repo $HERMES_GIT_REPO 110 | export HERMES_BINARY="hermes" 111 | export HERMES_SOURCE_DIR_PREFIX="source-code" # do NOT modify 112 | export HERMES_HOME_DIR=".hermes" 113 | export HERMES_RESERVED_FEE=100 # will be transfered to relayer's account and reserved for relay purpose 114 | ### Chain 1 115 | export HERMES_CFG_CHAIN_1_ID="$CHAIN_1_ID" 116 | export HERMES_CFG_CHAIN_1_RPC_ADDR="127.0.0.1:$CHAIN_1_EXPOSE_RPC_TO_PORT" 117 | export HERMES_CFG_CHAIN_1_GRPC_ADDR="127.0.0.1:$CHAIN_1_EXPOSE_GRPC_TO_PORT" 118 | export HERMES_CFG_CHAIN_1_ACCOUNT_PREFIX="$CHAIN_1_ACCOUNT_PREFIX" 119 | export HERMES_CFG_CHAIN_1_KEY_NAME="evmoskey" 120 | export HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL="$CHAIN_1_MIN_DENOM_SYMBOL" 121 | export HERMES_CFG_CHAIN_1_DENOM_EXPONENT=$CHAIN_1_DENOM_EXPONENT # no of digits (18 for evmos, 6 for cosmos atom) 122 | ### Chain 2 123 | export HERMES_CFG_CHAIN_2_ID="$CHAIN_2_ID" 124 | export HERMES_CFG_CHAIN_2_RPC_ADDR="127.0.0.1:$CHAIN_2_EXPOSE_RPC_TO_PORT" 125 | export HERMES_CFG_CHAIN_2_GRPC_ADDR="127.0.0.1:$CHAIN_2_EXPOSE_GRPC_TO_PORT" 126 | export HERMES_CFG_CHAIN_2_ACCOUNT_PREFIX="$CHAIN_2_ACCOUNT_PREFIX" 127 | export HERMES_CFG_CHAIN_2_KEY_NAME="evmoskey" 128 | export HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL="$CHAIN_2_MIN_DENOM_SYMBOL" 129 | export HERMES_CFG_CHAIN_2_DENOM_EXPONENT=$CHAIN_2_DENOM_EXPONENT # no of digits 130 | 131 | ## Relayer account 132 | ## Since relayer require an account on each chain with coins to sign and broadcast tx 133 | ## so you have to provide account and load them some coins (for example: $HERMES_RESERVED_FEE) 134 | ### Chain 1 135 | export REL_1_SEED="raw course slim hockey salt crawl sick safe december during armed fragile" 136 | export REL_1_ADDR="evmos1metw56lk3k4vhkh0vzxlr8p4mzpjvttmagvekp" # Wallet address of the above seed on EVMOS chain 137 | ### Chain 2 138 | export REL_2_SEED="raw course slim hockey salt crawl sick safe december during armed fragile" 139 | export REL_2_ADDR="evmos1metw56lk3k4vhkh0vzxlr8p4mzpjvttmagvekp" # Wallet address of the above seed on EVMOS chain 140 | 141 | ## Big Dipper (bdjuno) 142 | export BD_BINARY_ORIGIN="$GOPATH/bin/bdjuno" 143 | export BD_PG_DB="bdjuno" 144 | export BD_PG_USER="bdjuno" 145 | export BD_PG_PASS="6N4QtFYMt7h972uazrWTckmMvFZWIje" 146 | export BD_HASURA_BINARY=/usr/local/bin/hasura 147 | export BD_HASURA_SECRET="myadminsecretkey" 148 | export BD_PG_HASURA_META_DB="hasurameta" 149 | export BD_PG_HASURA_DB="hasura" 150 | export BD_PG_HASURA_USER="hasura" 151 | export BD_PG_HASURA_PASS="PX2RNvtZ4m7fntnbRrtySB4ROG5EKk4J" 152 | export BD_CFG_PG_USR_PASS="6N4QtFYMt7h972uazrWTckmMvFZWIje" # Password of default user postgres 153 | ### Chain 1 154 | export BD_CFG_CHAIN_1_GIT_REPO="https://github.com/forbole/bdjuno.git" 155 | export BD_CFG_CHAIN_1_GIT_REPO_BRANCH="chains/evmos/mainnet" # must belong to repo $BD_CFG_CHAIN_1_GIT_REPO 156 | export BD_CFG_CHAIN_1_PG_PORT=5432 157 | export BD_CFG_CHAIN_1_ACCOUNT_PREFIX="$CHAIN_1_ACCOUNT_PREFIX" 158 | export BD_CFG_CHAIN_1_RPC_ADDR="127.0.0.1:$CHAIN_1_EXPOSE_RPC_TO_PORT" 159 | export BD_CFG_CHAIN_1_GRPC_ADDR="127.0.0.1:$CHAIN_1_EXPOSE_GRPC_TO_PORT" 160 | export BD_CFG_CHAIN_1_ID="$CHAIN_1_ID" 161 | export BD_CFG_CHAIN_1_HASURA_PORT=8080 162 | export BD_CFG_CHAIN_1_HASURA_ACTIONBASE_PORT=3000 163 | export BD_CFG_CHAIN_1_DENOM_SYMBOL="$CHAIN_1_DENOM_SYMBOL" # evmos/atom/... 164 | export BD_CFG_CHAIN_1_MIN_DENOM_SYMBOL="$CHAIN_1_MIN_DENOM_SYMBOL" # aevmos/uatom/... 165 | export BD_CFG_CHAIN_1_DENOM_EXPONENT=$CHAIN_1_DENOM_EXPONENT # no of digits (18 for evmos, 6 for cosmos atom) 166 | ### Chain 2 167 | export BD_CFG_CHAIN_2_GIT_REPO="https://github.com/forbole/bdjuno.git" 168 | export BD_CFG_CHAIN_2_GIT_REPO_BRANCH="chains/evmos/mainnet" # must belong to repo $BD_CFG_CHAIN_2_GIT_REPO 169 | export BD_CFG_CHAIN_2_PG_PORT=15432 170 | export BD_CFG_CHAIN_2_ACCOUNT_PREFIX="$CHAIN_2_ACCOUNT_PREFIX" 171 | export BD_CFG_CHAIN_2_RPC_ADDR="127.0.0.1:$CHAIN_2_EXPOSE_RPC_TO_PORT" 172 | export BD_CFG_CHAIN_2_GRPC_ADDR="127.0.0.1:$CHAIN_2_EXPOSE_GRPC_TO_PORT" 173 | export BD_CFG_CHAIN_2_ID="$CHAIN_2_ID" 174 | export BD_CFG_CHAIN_2_HASURA_PORT=8082 175 | export BD_CFG_CHAIN_2_HASURA_ACTIONBASE_PORT=3002 176 | export BD_CFG_CHAIN_2_DENOM_SYMBOL="$CHAIN_2_DENOM_SYMBOL" # evmos/atom/... 177 | export BD_CFG_CHAIN_2_MIN_DENOM_SYMBOL="$CHAIN_2_MIN_DENOM_SYMBOL" # aevmos/uatom... 178 | export BD_CFG_CHAIN_2_DENOM_EXPONENT=$CHAIN_2_DENOM_EXPONENT # no of digits (18 for evmos, 6 for cosmos atom) 179 | ### Big Dipper 2.0 for Cosmos based chains 180 | ### Chain 1 181 | export BD2_CFG_CHAIN_1_GIT_REPO="https://github.com/forbole/big-dipper-2.0-cosmos.git" 182 | export BD2_CFG_CHAIN_1_BRANCH="web-evmos@2.14.1" 183 | export BD2_CFG_CHAIN_1_PROJECT_NAME="web-evmos" # project name under ./apps/ 184 | export BD2_CFG_CHAIN_1_CHAIN_NAME="evmos" # chain name to be set into ./apps/web-x/src/chain.json 185 | export BD2_CFG_CHAIN_1_PORT=3800 186 | export BD2_CFG_CHAIN_1_PUBLIC_DOMAIN="127.0.0.1" 187 | export BD2_CFG_CHAIN_1_PUBLIC_RPC_26657="127.0.0.1:$CHAIN_1_EXPOSE_RPC_TO_PORT" 188 | ### Chain 2 189 | export BD2_CFG_CHAIN_2_GIT_REPO="https://github.com/forbole/big-dipper-2.0-cosmos.git" 190 | export BD2_CFG_CHAIN_2_BRANCH="web-evmos@2.14.1" 191 | export BD2_CFG_CHAIN_2_PROJECT_NAME="web-evmos" # project name under ./apps/ 192 | export BD2_CFG_CHAIN_2_CHAIN_NAME="evmos" # chain name to be set into ./apps/web-x/src/chain.json 193 | export BD2_CFG_CHAIN_2_PORT=3802 194 | export BD2_CFG_CHAIN_2_PUBLIC_DOMAIN="127.0.0.1" 195 | export BD2_CFG_CHAIN_2_PUBLIC_RPC_26657="127.0.0.1:$CHAIN_2_EXPOSE_RPC_TO_PORT" 196 | 197 | ## Reflects by above config (edit at your own risk) 198 | export HERMES_SERVICE_NAME=$HERMES_BINARY'-svc' 199 | ### Docker 200 | export DOCKER_IMAGE_NAME_PREFIX="evmos.victortrusty.dev:c" 201 | 202 | # Others # Just skip this part, don't read, no more custom-able here 203 | echo $NOTICE_DEV_ENV 204 | if [ -z "$GOPATH" ]; then 205 | echo >&2 "ERR: Missing GOPATH environment variable, should be '$HOME/go'" 206 | exit 1 207 | fi 208 | command -v systemctl > /dev/null 2>&1 209 | if [ $? -eq 0 ]; then 210 | export DISABLE_SYSTEMCTL=0 211 | elif [ ! -d "/etc/systemd/system" ]; then 212 | export DISABLE_SYSTEMCTL=1 213 | else 214 | export DISABLE_SYSTEMCTL=1 215 | fi 216 | if [ "$FORCE_EXTRA_FUNC" = "1" ]; then 217 | export EXTRA_FUNC=1 218 | elif [ -f "./extra_func.lic" ]; then 219 | export EXTRA_FUNC=1 220 | elif [ -f "../extra_func.lic" ]; then 221 | export EXTRA_FUNC=1 222 | else 223 | export EXTRA_FUNC=0 224 | fi 225 | if [ -f "../override-env.sh" ]; then 226 | source "../override-env.sh" 227 | fi 228 | -------------------------------------------------------------------------------- /hermes-as-ibc-relayer/.gitignore: -------------------------------------------------------------------------------- 1 | !template-config.toml 2 | 3 | .hermes/ 4 | -------------------------------------------------------------------------------- /hermes-as-ibc-relayer/README.md: -------------------------------------------------------------------------------- 1 | ### This helps you create a IBC relayer service using Hermes ([v1.5.0](https://github.com/informalsystems/ibc-rs/tree/v1.5.0)) to transfer coins between 2 chains 2 | 3 | ### I. Prepare 4 | - **IMPORTANT** Open file `env.sh`, locate the variables with prefix `HERMES_CFG_CHAIN_*` and updated based on your need 5 | - Hermes require an account on each chain with some coins reserved for broadcast tx purpose 6 | + Account used by default: 7 | + Seed: `raw course slim hockey salt crawl sick safe december during armed fragile` 8 | + EVMOS wallet address: `evmos1metw56lk3k4vhkh0vzxlr8p4mzpjvttmagvekp` 9 | + Cosmos wallet address: `cosmos16euecy8mnkhdpnr4y3346h44v0gqj67zwc4laf` 10 | + You need to manually transfer some coins (EVMOS) to this address, so the relayer can use this account to broadcast some needed transactions 11 | > `$ evmosd tx bank send val1 evmos1metw56lk3k4vhkh0vzxlr8p4mzpjvttmagvekp 100000000000000000000aevmos --node tcp://127.0.0.1:26657` 12 | 13 | ### II. Create the IBC relayer 14 | > $ ./create-relayer.sh 15 | 16 | You can check the IBC token hash created by the following command: 17 | > $ evmosd q bank balances evmos1metw56lk3k4vhkh0vzxlr8p4mzpjvttmagvekp --node tcp://127.0.0.1:26657 | grep denom | grep ibc 18 | 19 | Sample output: 20 | > denom: `ibc/0E8BF52B5A990E16C4AF2E5ED426503F3F0B12067FB2B4B660015A64CCE38EA0` 21 | 22 | _The coin on chain A (eg aevmos) represents as denom `ibc/0E8BF...38EA0` on chain B, this hash changes on different relayers/channels_ 23 | 24 | ### III. Attention 25 | - The tendermint light client id, connection id and channel id will be appends as comment lines to `.hermes/config.toml` file 26 | - If you send your coin (EVMOS) to a chain using different relayers (thus different channel id), your token will represent as a different `IBC/_hash_` 27 | - (One time only) Before you can transfer coin to another chain using IBC, you must use the relayer account to transfer in order to init an `IBC/_hash_` for the token. **This script already helps you with that** 28 | -------------------------------------------------------------------------------- /hermes-as-ibc-relayer/_make_binary.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export HERMES_SOURCE_DIR="$HERMES_SOURCE_DIR_PREFIX-$HERMES_GIT_REPO_BRANCH" 4 | # If the repo is different with config, show a warning 5 | if [ -d "$HERMES_SOURCE_DIR" ]; then 6 | echo "Hermes repo exists at $HERMES_SOURCE_DIR" 7 | echo "Checking repo url & branch name" 8 | CHK_RES_1="$(git --git-dir "./$HERMES_SOURCE_DIR"/.git --work-tree "./$HERMES_SOURCE_DIR" config --get remote.origin.url)" 9 | if [ $? -ne 0 ] || [ -z "$CHK_RES_1" ]; then 10 | echo "WARN! Unable to check remote origin url of git repo at $HERMES_SOURCE_DIR" 11 | sleep 2 12 | elif [ "$CHK_RES_1" != "$HERMES_GIT_REPO" ]; then 13 | echo "WARN! Git repo Url does not match" 14 | echo "Expected: '$HERMES_GIT_REPO'" 15 | echo "Actual: '$CHK_RES_1'" 16 | echo "You should check it (script will continue execution after 10s)" 17 | sleep 10 18 | fi 19 | CHK_RES_2="$(git --git-dir "./$HERMES_SOURCE_DIR"/.git --work-tree "./$HERMES_SOURCE_DIR" rev-parse --abbrev-ref HEAD)" 20 | if [ $? -ne 0 ] || [ -z "$CHK_RES_2" ]; then 21 | echo "WARN! Unable to check branch of git repo at $HERMES_SOURCE_DIR" 22 | sleep 2 23 | elif [ "$CHK_RES_2" = "HEAD" ]; then 24 | echo "WARN! Can not check branch" 25 | elif [ "$CHK_RES_2" != "$HERMES_GIT_REPO_BRANCH" ]; then 26 | echo "WARN! Git Branch does not match" 27 | echo "Expected: '$HERMES_GIT_REPO_BRANCH'" 28 | echo "Actual: '$CHK_RES_2'" 29 | echo "You should check it (script will continue execution after 10s)" 30 | sleep 10 31 | fi 32 | else 33 | echo "Downloading Hermes source code $HERMES_GIT_REPO_BRANCH" 34 | git clone "$HERMES_GIT_REPO" --branch "$HERMES_GIT_REPO_BRANCH" --single-branch "$HERMES_SOURCE_DIR" 35 | 36 | if [ $? -ne 0 ]; then 37 | echo >&2 "ERR: Git clone Hermes $HERMES_GIT_REPO_BRANCH failed" 38 | exit 1 39 | fi 40 | fi 41 | 42 | CUR_DIR=$(pwd) 43 | cd "$HERMES_SOURCE_DIR" 44 | echo "Compiling $HERMES_BINARY. If this is the first time you compile, it will take time, you can enjoy a cup of coffee and comeback later" 45 | sleep 3 46 | cargo build --release --bin $HERMES_BINARY 47 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to compile Hermes"; exit 1; } 48 | cd "$CUR_DIR" 49 | 50 | if [ ! -f "$BINARY" ]; then 51 | echo >&2 "ERR: Hermes source code was compiled but binary $HERMES_BINARY could not be found" 52 | exit 1 53 | fi -------------------------------------------------------------------------------- /hermes-as-ibc-relayer/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v 'docker-compose' > /dev/null 2>&1 || { echo >&2 "ERR: docker-compose is required"; exit 1; } 4 | 5 | source ../env.sh 6 | 7 | if [ $DISABLE_SYSTEMCTL -eq 0 ]; then 8 | echo "Stopping $HERMES_SERVICE_NAME service" 9 | sudo systemctl stop $HERMES_SERVICE_NAME > /dev/null 2>&1 10 | sudo systemctl disable $HERMES_SERVICE_NAME > /dev/null 2>&1 11 | else 12 | echo "I don't know what to do, this script only be used to stop hermes service name [$HERMES_SERVICE_NAME] on systems that supports 'systemd'" 13 | fi -------------------------------------------------------------------------------- /hermes-as-ibc-relayer/create-relayer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | command -v cargo > /dev/null 2>&1 || { echo >&2 "ERR: Rust & Cargo was not installed. More info: https://www.rust-lang.org/tools/install . Hint: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh"; exit 1; } 4 | 5 | source ../env.sh 6 | 7 | if [ -f "./override-env.sh" ]; then 8 | source "./override-env.sh" 9 | fi 10 | 11 | if [ "$HERMES_NO_CONFIRM_BALANCE" != "1" ]; then 12 | echo "Hermes require an account on each chain with some coins reserved for broadcast tx purpose, so based on config" 13 | if [ "$REL_1_ADDR" = "$REL_2_ADDR" ]; then 14 | echo "- Account $REL_1_ADDR will be used for both chains $HERMES_CFG_CHAIN_1_ID and $HERMES_CFG_CHAIN_1_ID" 15 | echo "Are you sure the above account has coin balance on both chains?" 16 | else 17 | echo "- Account $REL_1_ADDR will be used for chain $HERMES_CFG_CHAIN_1_ID" 18 | echo "- Account $REL_2_ADDR will be used for chain $HERMES_CFG_CHAIN_2_ID" 19 | echo "Are you sure the above accounts have coin balance on it's chain?" 20 | fi 21 | 22 | read -p "(Y/n)" -n 1 -r 23 | echo 24 | if [[ $REPLY =~ ^[Yy]$ ]] 25 | then 26 | echo " ! Good" 27 | else 28 | echo "Go prepare yourself" 29 | echo "Hint: you can do this" 30 | echo " docker exec -it vtevmos11 bash" 31 | echo " $CHAIN_1_DAEMON_BINARY_NAME tx bank send $VAL_2_KEY_NAME $REL_1_ADDR "$(bc <<< "$HERMES_RESERVED_FEE * (10^$HERMES_CFG_CHAIN_1_DENOM_EXPONENT)")"$HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL --home /.evmosd1 --node tcp://127.0.0.1:26657 --yes --gas-prices $(bc <<< "20 * 10 ^ $CHAIN_1_GAS_DENOM_EXPONENT ")$CHAIN_1_MIN_DENOM_SYMBOL" 32 | echo " docker exec -it vtevmos21 bash" 33 | echo " $CHAIN_2_DAEMON_BINARY_NAME tx bank send $VAL_2_KEY_NAME $REL_2_ADDR "$(bc <<< "$HERMES_RESERVED_FEE * (10^$HERMES_CFG_CHAIN_2_DENOM_EXPONENT)")"$HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL --home /.evmosd2 --node tcp://127.0.0.1:26657 --yes --gas-prices $(bc <<< "20 * 10 ^ $CHAIN_2_GAS_DENOM_EXPONENT ")$CHAIN_2_MIN_DENOM_SYMBOL" 34 | exit 0 35 | fi 36 | fi 37 | 38 | [ $DISABLE_SYSTEMCTL -eq 0 ] && { 39 | echo "Stopping $HERMES_SERVICE_NAME service"; 40 | sudo systemctl stop $HERMES_SERVICE_NAME; 41 | sudo systemctl disable $HERMES_SERVICE_NAME; 42 | } 43 | 44 | echo "Remove previous setup" 45 | rm -rf "$HERMES_HOME_DIR" 46 | 47 | echo "Init new home dir" 48 | mkdir -p "$HERMES_HOME_DIR" 49 | 50 | # Binary 51 | export HERMES_SOURCE_DIR="$HERMES_SOURCE_DIR_PREFIX-$HERMES_GIT_REPO_BRANCH" 52 | export BINARY=$(pwd)'/'$HERMES_SOURCE_DIR'/target/release/'$HERMES_BINARY 53 | 54 | # Check & Install hermes binary if not exists 55 | ./_make_binary.sh 56 | [ $? -eq 0 ] || { echo >&2 "ERR: Failed to check & build $HERMES_BINARY binary at $BINARY"; } 57 | 58 | echo 'You can custom config by editing keys with prefix [HERMES_CFG_CHAIN_*] in [env.sh] file' 59 | sleep 3 60 | 61 | echo "Chain ID: $HERMES_CFG_CHAIN_1_ID and $HERMES_CFG_CHAIN_2_ID" 62 | echo "Chain 1:" 63 | echo "- RPC: $HERMES_CFG_CHAIN_1_RPC_ADDR" 64 | echo "- gRPC: $HERMES_CFG_CHAIN_1_GRPC_ADDR" 65 | echo "- Account prefix: $HERMES_CFG_CHAIN_1_ACCOUNT_PREFIX" 66 | echo "- Relayer tx broadcast account key name: $HERMES_CFG_CHAIN_1_KEY_NAME" 67 | echo "- Gas price denom: $HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL" 68 | echo "Chain 2:" 69 | echo "- RPC: $HERMES_CFG_CHAIN_2_RPC_ADDR" 70 | echo "- gRPC: $HERMES_CFG_CHAIN_2_GRPC_ADDR" 71 | echo "- Account prefix: $HERMES_CFG_CHAIN_2_ACCOUNT_PREFIX" 72 | echo "- Relayer tx broadcast account key name: $HERMES_CFG_CHAIN_2_KEY_NAME" 73 | echo "- Gas price denom: $HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL" 74 | sleep 3 75 | 76 | if [ $CHAIN_1_COINTYPE -eq 60 ] || [ "$CHAIN_1_TYPE" = "evmos" ]; then 77 | GAS_PRICE_1="$(bc <<< "20 * 10 ^ ($HERMES_CFG_CHAIN_1_DENOM_EXPONENT/2)")" 78 | else 79 | GAS_PRICE_1=2 80 | fi 81 | if [ $CHAIN_2_COINTYPE -eq 60 ] || [ "$CHAIN_2_TYPE" = "evmos" ]; then 82 | GAS_PRICE_2="$(bc <<< "20 * 10 ^ ($HERMES_CFG_CHAIN_2_DENOM_EXPONENT/2)")" 83 | else 84 | GAS_PRICE_2=2 85 | fi 86 | 87 | echo "Initializing file config.toml" 88 | CONFIG_TOML=$HERMES_HOME_DIR'/config.toml' 89 | cp "./template-config.toml" "$CONFIG_TOML" 90 | if [[ "$OSTYPE" == "darwin"* ]]; then 91 | sed -i '' "s/chain1_id/$HERMES_CFG_CHAIN_1_ID/g" $CONFIG_TOML 92 | sed -i '' "s,chain1_rpc_addr,$HERMES_CFG_CHAIN_1_RPC_ADDR,g" $CONFIG_TOML 93 | sed -i '' "s,chain1_grpc_addr,$HERMES_CFG_CHAIN_1_GRPC_ADDR,g" $CONFIG_TOML 94 | sed -i '' "s/chain1_account_prefix/$HERMES_CFG_CHAIN_1_ACCOUNT_PREFIX/g" $CONFIG_TOML 95 | sed -i '' "s/chain1_key_name/$HERMES_CFG_CHAIN_1_KEY_NAME/g" $CONFIG_TOML 96 | sed -i '' "s/chain1_gas_price_amt/$GAS_PRICE_1/g" $CONFIG_TOML 97 | sed -i '' "s/chain1_gas_price_denom/$HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL/g" $CONFIG_TOML 98 | if [ $CHAIN_1_COINTYPE -eq 60 ] || [ "$CHAIN_1_TYPE" = "evmos" ]; then 99 | sed -i '' "s#chain1_address_type#{ derivation = 'ethermint', proto_type = { pk_type = '/ethermint.crypto.v1.ethsecp256k1.PubKey' } }#g" $CONFIG_TOML 100 | else 101 | sed -i '' "s#chain1_address_type#{ derivation = 'cosmos' }#g" $CONFIG_TOML 102 | fi 103 | sed -i '' "s/chain2_id/$HERMES_CFG_CHAIN_2_ID/g" $CONFIG_TOML 104 | sed -i '' "s,chain2_rpc_addr,$HERMES_CFG_CHAIN_2_RPC_ADDR,g" $CONFIG_TOML 105 | sed -i '' "s,chain2_grpc_addr,$HERMES_CFG_CHAIN_2_GRPC_ADDR,g" $CONFIG_TOML 106 | sed -i '' "s/chain2_account_prefix/$HERMES_CFG_CHAIN_2_ACCOUNT_PREFIX/g" $CONFIG_TOML 107 | sed -i '' "s/chain2_key_name/$HERMES_CFG_CHAIN_2_KEY_NAME/g" $CONFIG_TOML 108 | sed -i '' "s/chain2_gas_price_amt/$GAS_PRICE_2/g" $CONFIG_TOML 109 | sed -i '' "s/chain2_gas_price_denom/$HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL/g" $CONFIG_TOML 110 | if [ $CHAIN_2_COINTYPE -eq 60 ] || [ "$CHAIN_2_TYPE" = "evmos" ]; then 111 | sed -i '' "s#chain2_address_type#{ derivation = 'ethermint', proto_type = { pk_type = '/ethermint.crypto.v1.ethsecp256k1.PubKey' } }#g" $CONFIG_TOML 112 | else 113 | sed -i '' "s#chain2_address_type#{ derivation = 'cosmos' }#g" $CONFIG_TOML 114 | fi 115 | else 116 | sed -i "s/chain1_id/$HERMES_CFG_CHAIN_1_ID/g" $CONFIG_TOML 117 | sed -i "s,chain1_rpc_addr,$HERMES_CFG_CHAIN_1_RPC_ADDR,g" $CONFIG_TOML 118 | sed -i "s,chain1_grpc_addr,$HERMES_CFG_CHAIN_1_GRPC_ADDR,g" $CONFIG_TOML 119 | sed -i "s/chain1_account_prefix/$HERMES_CFG_CHAIN_1_ACCOUNT_PREFIX/g" $CONFIG_TOML 120 | sed -i "s/chain1_key_name/$HERMES_CFG_CHAIN_1_KEY_NAME/g" $CONFIG_TOML 121 | sed -i "s/chain1_gas_price_amt/$GAS_PRICE_1/g" $CONFIG_TOML 122 | sed -i "s/chain1_gas_price_denom/$HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL/g" $CONFIG_TOML 123 | if [ $CHAIN_1_COINTYPE -eq 60 ]; then 124 | sed -i "s#chain1_address_type#{ derivation = 'ethermint', proto_type = { pk_type = '/ethermint.crypto.v1.ethsecp256k1.PubKey' } }#g" $CONFIG_TOML 125 | else 126 | sed -i "s#chain1_address_type#{ derivation = 'cosmos' }#g" $CONFIG_TOML 127 | fi 128 | sed -i "s/chain2_id/$HERMES_CFG_CHAIN_2_ID/g" $CONFIG_TOML 129 | sed -i "s,chain2_rpc_addr,$HERMES_CFG_CHAIN_2_RPC_ADDR,g" $CONFIG_TOML 130 | sed -i "s,chain2_grpc_addr,$HERMES_CFG_CHAIN_2_GRPC_ADDR,g" $CONFIG_TOML 131 | sed -i "s/chain2_account_prefix/$HERMES_CFG_CHAIN_2_ACCOUNT_PREFIX/g" $CONFIG_TOML 132 | sed -i "s/chain2_key_name/$HERMES_CFG_CHAIN_2_KEY_NAME/g" $CONFIG_TOML 133 | sed -i "s/chain2_gas_price_amt/$GAS_PRICE_2/g" $CONFIG_TOML 134 | sed -i "s/chain2_gas_price_denom/$HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL/g" $CONFIG_TOML 135 | if [ $CHAIN_2_COINTYPE -eq 60 ]; then 136 | sed -i "s#chain2_address_type#{ derivation = 'ethermint', proto_type = { pk_type = '/ethermint.crypto.v1.ethsecp256k1.PubKey' } }#g" $CONFIG_TOML 137 | else 138 | sed -i "s#chain2_address_type#{ derivation = 'cosmos' }#g" $CONFIG_TOML 139 | fi 140 | fi 141 | 142 | # 143 | echo "Restore replayer account" 144 | echo "-Importing seed" 145 | echo " [$REL_1_SEED]" 146 | echo " as relayer account for chain $HERMES_CFG_CHAIN_1_ID" 147 | REL_1_SEED_FILE="rel_1_seed.key" 148 | echo "$REL_1_SEED" > "$REL_1_SEED_FILE" 149 | $BINARY --config "$CONFIG_TOML" keys add --mnemonic-file "$REL_1_SEED_FILE" --hd-path "m/44'/$CHAIN_1_COINTYPE'/0'/0/0" --chain "$HERMES_CFG_CHAIN_1_ID" --key-name "$HERMES_CFG_CHAIN_1_KEY_NAME" 150 | echo "-Importing seed" 151 | echo " [$REL_2_SEED]" 152 | echo " as relayer account for chain $HERMES_CFG_CHAIN_2_ID" 153 | REL_2_SEED_FILE="rel_2_seed.key" 154 | echo "$REL_2_SEED" > "$REL_2_SEED_FILE" 155 | $BINARY --config "$CONFIG_TOML" keys add --mnemonic-file "$REL_2_SEED_FILE" --hd-path "m/44'/$CHAIN_2_COINTYPE'/0'/0/0" --chain "$HERMES_CFG_CHAIN_2_ID" --key-name "$HERMES_CFG_CHAIN_2_KEY_NAME" 156 | ## Extract addr 157 | export CHECK_REL_1_ADDR="$($BINARY --config "$CONFIG_TOML" keys list --chain "$HERMES_CFG_CHAIN_1_ID" | grep "$HERMES_CFG_CHAIN_1_KEY_NAME" | sed 's/.*\('$CHAIN_1_ACCOUNT_PREFIX'[a-z0-9]*\).*/\1/')" 158 | if [ -z "$CHECK_REL_1_ADDR" ]; then 159 | echo >&2 "ERR: Relayer account on $HERMES_CFG_CHAIN_1_ID was imported but could not befound! Did you set the following variables correctly?" 160 | echo " + HERMES_CFG_CHAIN_1_KEY_NAME=$HERMES_CFG_CHAIN_1_KEY_NAME" 161 | echo " + CHAIN_1_ACCOUNT_PREFIX=$CHAIN_1_ACCOUNT_PREFIX" 162 | exit 1 163 | elif [ "$CHECK_REL_1_ADDR" == "$REL_1_ADDR" ]; then 164 | echo "- Relayer wallet addr on $HERMES_CFG_CHAIN_1_ID is $REL_1_ADDR" 165 | else 166 | echo >&2 "ERR: Relayer account on $HERMES_CFG_CHAIN_1_ID after import has wallet address is '$CHECK_REL_1_ADDR', it is different with configuration variable 'REL_1_ADDR'=$REL_1_ADDR" 167 | exit 1 168 | fi 169 | export CHECK_REL_2_ADDR="$($BINARY --config "$CONFIG_TOML" keys list --chain "$HERMES_CFG_CHAIN_2_ID" | grep "$HERMES_CFG_CHAIN_2_KEY_NAME" | sed 's/.*\('$CHAIN_2_ACCOUNT_PREFIX'[a-z0-9]*\).*/\1/')" 170 | if [ -z "$CHECK_REL_2_ADDR" ]; then 171 | echo >&2 "ERR: Relayer account on $HERMES_CFG_CHAIN_2_ID was imported but could not befound! Did you set the following variables correctly?" 172 | echo " + HERMES_CFG_CHAIN_2_KEY_NAME=$HERMES_CFG_CHAIN_2_KEY_NAME" 173 | echo " + CHAIN_2_ACCOUNT_PREFIX=$CHAIN_2_ACCOUNT_PREFIX" 174 | exit 1 175 | elif [ "$CHECK_REL_2_ADDR" == "$REL_2_ADDR" ]; then 176 | echo "- Relayer wallet addr on $HERMES_CFG_CHAIN_2_ID is $REL_2_ADDR" 177 | else 178 | echo >&2 "ERR: Relayer account on $HERMES_CFG_CHAIN_2_ID after import has wallet address is '$CHECK_REL_2_ADDR', it is different with configuration variable 'REL_2_ADDR'=$REL_2_ADDR" 179 | exit 1 180 | fi 181 | 182 | echo "Creating client, connection and channels" 183 | echo '- Creating client' 184 | RES_CREATE_CLIENT_1_TO_2=$($BINARY --config $CONFIG_TOML create client --host-chain $HERMES_CFG_CHAIN_1_ID --reference-chain $HERMES_CFG_CHAIN_2_ID) 185 | TENDERMINT_CLIENT_1_TO_2=$(echo $RES_CREATE_CLIENT_1_TO_2 | grep -o '07-tendermint-[0-9]*') 186 | echo ' > Client 1 to 2: '$TENDERMINT_CLIENT_1_TO_2 187 | [ -z "$TENDERMINT_CLIENT_1_TO_2" ] && { 188 | echo >&2 "Response:"; 189 | echo >&2 "$RES_CREATE_CLIENT_1_TO_2"; 190 | echo >&2 "ERR: Unable to create tendermint light client on chain 1"; 191 | exit 1; 192 | } 193 | 194 | RES_CREATE_CLIENT_2_TO_1=$($BINARY --config $CONFIG_TOML create client --host-chain $HERMES_CFG_CHAIN_2_ID --reference-chain $HERMES_CFG_CHAIN_1_ID) 195 | TENDERMINT_CLIENT_2_TO_1=$(echo $RES_CREATE_CLIENT_2_TO_1 | grep -o '07-tendermint-[0-9]*' | head -n 1) 196 | echo ' > Client 2 to 1: '$TENDERMINT_CLIENT_2_TO_1 197 | [ -z "$TENDERMINT_CLIENT_2_TO_1" ] && { 198 | echo >&2 "Response:"; 199 | echo >&2 "$RES_CREATE_CLIENT_2_TO_1"; 200 | echo >&2 "ERR: Unable to create tendermint light client on chain 2"; 201 | exit 1; 202 | } 203 | 204 | echo '- Creating connection' 205 | RES_CREATE_CONN_1_TO_2=$($BINARY --config $CONFIG_TOML create connection --a-chain $HERMES_CFG_CHAIN_1_ID --a-client $TENDERMINT_CLIENT_1_TO_2 --b-client $TENDERMINT_CLIENT_2_TO_1) 206 | CONN_1_TO_2=$(echo $RES_CREATE_CONN_1_TO_2 | grep -o 'connection-[0-9]*' | head -n 1) 207 | echo ' > Connection 1 to 2: '$CONN_1_TO_2 208 | [ -z "$CONN_1_TO_2" ] && { 209 | echo >&2 "Response:"; 210 | echo >&2 "$RES_CREATE_CONN_1_TO_2"; 211 | echo >&2 "ERR: Unable to create connection on chain 1"; 212 | exit 1; 213 | } 214 | 215 | CONN_2_TO_1="$CONN_1_TO_2" 216 | 217 | sleep 5 218 | echo ' + Testing connection 1' 219 | $BINARY --config $CONFIG_TOML query connection end --chain $HERMES_CFG_CHAIN_1_ID --connection $CONN_1_TO_2 | grep 'Open' 220 | sleep 2 221 | 222 | echo '- Creating channel' 223 | 224 | RES_CREATE_CHAN_1_TO_2=$($BINARY --config $CONFIG_TOML create channel --a-chain $HERMES_CFG_CHAIN_1_ID --a-connection $CONN_1_TO_2 --a-port transfer --b-port transfer) 225 | CHAN_1_TO_2=$(echo $RES_CREATE_CHAN_1_TO_2 | grep -o 'channel-[0-9]*' | head -n 1) 226 | [ -z "$CHAN_1_TO_2" ] && { 227 | echo >&2 "Response:"; 228 | echo >&2 "$RES_CREATE_CHAN_1_TO_2"; 229 | echo >&2 "ERR: Unable to create channel on chain 1"; 230 | exit 1; 231 | } 232 | 233 | CHAN_2_TO_1="$CHAN_1_TO_2" 234 | 235 | echo ' + Testing channel 1' 236 | #$BINARY --config $CONFIG_TOML query channel end $HERMES_CFG_CHAIN_1_ID transfer $CHAN_1_TO_2 | grep 'Open' 237 | 238 | if [[ "$OSTYPE" == "darwin"* ]]; then 239 | sed -i '' "s/NoteClient1/Client 1 to 2: $TENDERMINT_CLIENT_1_TO_2/g" $CONFIG_TOML 240 | sed -i '' "s/NoteClient2/Client 2 to 1: $TENDERMINT_CLIENT_2_TO_1/g" $CONFIG_TOML 241 | sed -i '' "s/NoteConnection1/Connection 1 to 2: $CONN_1_TO_2/g" $CONFIG_TOML 242 | sed -i '' "s/NoteConnection2/Connection 2 to 1: $CONN_2_TO_1/g" $CONFIG_TOML 243 | sed -i '' "s/NoteChannel1/Channel 1 to 2: $CHAN_1_TO_2/g" $CONFIG_TOML 244 | sed -i '' "s/NoteChannel2/Channel 2 to 1: $CHAN_2_TO_1/g" $CONFIG_TOML 245 | else 246 | sed -i "s/NoteClient1/Client 1 to 2: $TENDERMINT_CLIENT_1_TO_2/g" $CONFIG_TOML 247 | sed -i "s/NoteClient2/Client 2 to 1: $TENDERMINT_CLIENT_2_TO_1/g" $CONFIG_TOML 248 | sed -i "s/NoteConnection1/Connection 1 to 2: $CONN_1_TO_2/g" $CONFIG_TOML 249 | sed -i "s/NoteConnection2/Connection 2 to 1: $CONN_2_TO_1/g" $CONFIG_TOML 250 | sed -i "s/NoteChannel1/Channel 1 to 2: $CHAN_1_TO_2/g" $CONFIG_TOML 251 | sed -i "s/NoteChannel2/Channel 2 to 1: $CHAN_2_TO_1/g" $CONFIG_TOML 252 | fi 253 | 254 | echo 'Initialize token hash on opposite channel' 255 | echo "- Init for $HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL on $HERMES_CFG_CHAIN_2_ID" 256 | echo ' + FT-Transfer from '$HERMES_CFG_CHAIN_1_ID' to '$HERMES_CFG_CHAIN_2_ID 257 | $BINARY --config $CONFIG_TOML tx ft-transfer --dst-chain $HERMES_CFG_CHAIN_2_ID --src-chain $HERMES_CFG_CHAIN_1_ID --src-port transfer --src-channel $CHAN_1_TO_2 --timeout-seconds 1000 --amount 1000 --denom $HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL 258 | EXIT_CODE=$? 259 | sleep 2 260 | [ $EXIT_CODE -eq 0 ] || { echo >&2 "ERR: Operation failed (ft-transfer)"; exit 1; } 261 | 262 | $BINARY --config $CONFIG_TOML tx packet-recv --dst-chain $HERMES_CFG_CHAIN_2_ID --src-chain $HERMES_CFG_CHAIN_1_ID --src-port transfer --src-channel $CHAN_1_TO_2 263 | sleep 2 264 | [ $EXIT_CODE -eq 0 ] || { echo >&2 "ERR: Operation failed (packet-recv)"; exit 1; } 265 | 266 | echo "- Init for $HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL on $HERMES_CFG_CHAIN_1_ID" 267 | echo ' + FT-Transfer from '$HERMES_CFG_CHAIN_2_ID' to '$HERMES_CFG_CHAIN_1_ID 268 | $BINARY --config $CONFIG_TOML tx ft-transfer --dst-chain $HERMES_CFG_CHAIN_1_ID --src-chain $HERMES_CFG_CHAIN_2_ID --src-port transfer --src-channel $CHAN_1_TO_2 --timeout-seconds 1000 --amount 1000 --denom $HERMES_CFG_CHAIN_2_GAS_PRICE_DENOM_SYMBOL 269 | EXIT_CODE=$? 270 | sleep 2 271 | [ $EXIT_CODE -eq 0 ] || { echo >&2 "ERR: Operation failed (ft-transfer)"; exit 1; } 272 | 273 | $BINARY --config $CONFIG_TOML tx packet-recv --dst-chain $HERMES_CFG_CHAIN_1_ID --src-chain $HERMES_CFG_CHAIN_2_ID --src-port transfer --src-channel $CHAN_2_TO_1 274 | sleep 2 275 | [ $EXIT_CODE -eq 0 ] || { echo >&2 "ERR: Operation failed (packet-recv)"; exit 1; } 276 | 277 | $BINARY --config $CONFIG_TOML tx packet-ack --dst-chain $HERMES_CFG_CHAIN_2_ID --src-chain $HERMES_CFG_CHAIN_1_ID --src-port transfer --src-channel $CHAN_1_TO_2 278 | sleep 2 279 | [ $EXIT_CODE -eq 0 ] || { echo >&2 "ERR: Operation failed (packet-ack)"; exit 1; } 280 | 281 | $BINARY --config $CONFIG_TOML tx packet-ack --dst-chain $HERMES_CFG_CHAIN_1_ID --src-chain $HERMES_CFG_CHAIN_2_ID --src-port transfer --src-channel $CHAN_2_TO_1 282 | sleep 2 283 | [ $EXIT_CODE -eq 0 ] || { echo >&2 "ERR: Operation failed (packet-ack)"; exit 1; } 284 | 285 | echo 'Information summary' 286 | echo '- Client 1 to 2: '$TENDERMINT_CLIENT_1_TO_2 287 | echo '- Client 2 to 1: '$TENDERMINT_CLIENT_2_TO_1 288 | echo '- Connection 1 to 2: '$CONN_1_TO_2 289 | echo '- Connection 2 to 1: '$CONN_2_TO_1 290 | echo '- Channel 1 to 2: '$CHAN_1_TO_2 291 | echo '- Channel 2 to 1: '$CHAN_2_TO_1 292 | echo "> The above information was saved as comment lines in [$CONFIG_TOML], you can review it any time" 293 | echo '## NOTICE: Always run hermes binary with path of config.toml as input:' 294 | echo " > $BINARY --config $CONFIG_TOML [command]" 295 | 296 | echo 'Final steps need to be done' 297 | echo '- Update config file '$CONFIG_TOML 298 | echo ' + Add [chains.packet_filter] for chain '$HERMES_CFG_CHAIN_1_ID' allows transfer via '$CHAN_1_TO_2 299 | echo ' + Add [chains.packet_filter] for chain '$HERMES_CFG_CHAIN_2_ID' allows transfer via '$CHAN_2_TO_1 300 | echo ' + Validate config' 301 | echo ' $ '$BINARY' --config '$CONFIG_TOML' config validate' 302 | 303 | # Re-Start service 304 | if [ $DISABLE_SYSTEMCTL -eq 0 ]; then 305 | SERVICE_FILE="/etc/systemd/system/$HERMES_SERVICE_NAME.service" 306 | echo 307 | if [ -f "$SERVICE_FILE" ]; then 308 | echo "You are ready to restart $HERMES_SERVICE_NAME service (sudo systemctl restart $HERMES_SERVICE_NAME)" 309 | 310 | [ $EXTRA_FUNC -eq 1 ] && sudo systemctl start $HERMES_SERVICE_NAME 311 | else 312 | echo "You can paste the following content to $SERVICE_FILE file to create a daemon service" 313 | echo "sudo vi $SERVICE_FILE" 314 | echo 315 | 316 | WORKING_DIR=$(pwd) 317 | 318 | SCRIPT_CONTENT="[Unit] 319 | \nDescription=Hermes as IBC Relayer ($HERMES_BINARY) 320 | \nConditionPathExists=$BINARY 321 | \nAfter=network.target 322 | \n[Service] 323 | \nType=simple 324 | \nUser=$USER 325 | \nWorkingDirectory=$WORKING_DIR 326 | \nExecStart=$BINARY --config $CONFIG_TOML --json start 327 | \nRestart=always 328 | \nRestartSec=2 329 | \n[Install] 330 | \nWantedBy=multi-user.target" 331 | echo -e $SCRIPT_CONTENT 332 | echo 333 | echo "sudo systemctl enable $HERMES_SERVICE_NAME" 334 | echo "sudo systemctl start $HERMES_SERVICE_NAME" 335 | 336 | [ $EXTRA_FUNC -eq 1 ] && { 337 | echo 'Creating service '$HERMES_SERVICE_NAME; 338 | echo -e $SCRIPT_CONTENT | sudo tee $SERVICE_FILE > /dev/null; 339 | sudo systemctl daemon-reload; 340 | sudo systemctl enable $HERMES_SERVICE_NAME; 341 | sudo systemctl start $HERMES_SERVICE_NAME; 342 | } 343 | fi 344 | fi 345 | 346 | echo '### Done' 347 | echo "Notice!!! Make sure the service file at '/etc/systemd/system/$HERMES_SERVICE_NAME.service' has correct working directort and execution path (in case you changed any repo/branch)" 348 | -------------------------------------------------------------------------------- /hermes-as-ibc-relayer/template-config.toml: -------------------------------------------------------------------------------- 1 | # The global section has parameters that apply globally to the relayer operation. 2 | [global] 3 | 4 | # Specify the verbosity for the relayer logging output. Default: 'info' 5 | # Valid options are 'error', 'warn', 'info', 'debug', 'trace'. 6 | log_level = 'info' 7 | 8 | 9 | # Specify the mode to be used by the relayer. [Required] 10 | [mode] 11 | 12 | # Specify the client mode. 13 | [mode.clients] 14 | 15 | # Whether or not to enable the client workers. [Required] 16 | enabled = true 17 | 18 | # Whether or not to enable periodic refresh of clients. [Default: true] 19 | # This feature only applies to clients that underlie an open channel. 20 | # For Tendermint clients, the frequency at which Hermes refreshes them is 2/3 of their 21 | # trusting period (e.g., refresh every ~9 days if the trusting period is 14 days). 22 | # Note: Even if this is disabled, clients will be refreshed automatically if 23 | # there is activity on a connection or channel they are involved with. 24 | refresh = true 25 | 26 | # Whether or not to enable misbehaviour detection for clients. [Default: false] 27 | misbehaviour = true 28 | 29 | # Specify the connections mode. 30 | [mode.connections] 31 | 32 | # Whether or not to enable the connection workers for handshake completion. [Required] 33 | enabled = false 34 | 35 | # Specify the channels mode. 36 | [mode.channels] 37 | 38 | # Whether or not to enable the channel workers for handshake completion. [Required] 39 | enabled = false 40 | 41 | # Specify the packets mode. 42 | [mode.packets] 43 | 44 | # Whether or not to enable the packet workers. [Required] 45 | enabled = true 46 | 47 | # Parametrize the periodic packet clearing feature. 48 | # Interval (in number of blocks) at which pending packets 49 | # should be periodically cleared. A value of '0' will disable 50 | # periodic packet clearing. [Default: 100] 51 | clear_interval = 100 52 | 53 | # Whether or not to clear packets on start. [Default: false] 54 | clear_on_start = true 55 | 56 | # Toggle the transaction confirmation mechanism. 57 | # The tx confirmation mechanism periodically queries the `/tx_search` RPC 58 | # endpoint to check that previously-submitted transactions 59 | # (to any chain in this config file) have been successfully delivered. 60 | # If they have not been, and `clear_interval = 0`, then those packets are 61 | # queued up for re-submission. 62 | # Experimental feature. Affects telemetry if set to false. 63 | # [Default: true] 64 | tx_confirmation = true 65 | 66 | # The REST section defines parameters for Hermes' built-in RESTful API. 67 | # https://hermes.informal.systems/rest.html 68 | [rest] 69 | 70 | # Whether or not to enable the REST service. Default: false 71 | enabled = false 72 | 73 | # Specify the IPv4/6 host over which the built-in HTTP server will serve the RESTful 74 | # API requests. Default: 127.0.0.1 75 | host = '0.0.0.0' 76 | 77 | # Specify the port over which the built-in HTTP server will serve the restful API 78 | # requests. Default: 3000 79 | port = 3000 80 | 81 | 82 | # The telemetry section defines parameters for Hermes' built-in telemetry capabilities. 83 | # https://hermes.informal.systems/telemetry.html 84 | [telemetry] 85 | 86 | # Whether or not to enable the telemetry service. Default: false 87 | enabled = true 88 | 89 | # Specify the IPv4/6 host over which the built-in HTTP server will serve the metrics 90 | # gathered by the telemetry service. Default: 127.0.0.1 91 | host = '0.0.0.0' 92 | 93 | # Specify the port over which the built-in HTTP server will serve the metrics gathered 94 | # by the telemetry service. Default: 3001 95 | port = 3001 96 | 97 | 98 | # A chains section includes parameters related to a chain and the full node to which 99 | # the relayer can send transactions and queries. 100 | [[chains]] 101 | 102 | # Specify the chain ID. Required 103 | id = 'chain1_id' 104 | 105 | # Specify the RPC address and port where the chain RPC server listens on. Required 106 | rpc_addr = 'http://chain1_rpc_addr' 107 | 108 | # Specify the GRPC address and port where the chain GRPC server listens on. Required 109 | grpc_addr = 'http://chain1_grpc_addr' 110 | 111 | # Specify the WebSocket address and port where the chain WebSocket server 112 | # listens on. Required 113 | websocket_addr = 'ws://chain1_rpc_addr/websocket' 114 | 115 | # Specify the maximum amount of time (duration) that the RPC requests should 116 | # take before timing out. Default: 10s (10 seconds) 117 | # Note: Hermes uses this parameter _only_ in `start` mode; for all other CLIs, 118 | # Hermes uses a large preconfigured timeout (on the order of minutes). 119 | rpc_timeout = '10s' 120 | 121 | # Specify the prefix used by the chain. Required 122 | account_prefix = 'chain1_account_prefix' 123 | 124 | # Specify the name of the private key to use for signing transactions. Required 125 | # See the Adding Keys chapter for more information about managing signing keys: 126 | # https://hermes.informal.systems/documentation/commands/keys/index.html#adding-keys 127 | key_name = 'chain1_key_name' 128 | 129 | # Specify the address type which determines: 130 | # 1) address derivation; 131 | # 2) how to retrieve and decode accounts and pubkeys; 132 | # 3) the message signing method. 133 | # The current configuration options are for Cosmos SDK and Ethermint. 134 | # 135 | # Example configuration for chains based on Ethermint library: 136 | # 137 | # address_type = { derivation = 'ethermint', proto_type = { pk_type = '/ethermint.crypto.v1.ethsecp256k1.PubKey' } } 138 | # 139 | # Default: { derivation = 'cosmos' }, i.e. address derivation as in Cosmos SDK. 140 | # Warning: This is an advanced feature! Modify with caution. 141 | address_type = chain1_address_type 142 | 143 | # Specify the store prefix used by the on-chain IBC modules. Required 144 | # Recommended value for Cosmos SDK: 'ibc' 145 | store_prefix = 'ibc' 146 | 147 | # Specify the default amount of gas to be used in case the tx simulation fails, 148 | # and Hermes cannot estimate the amount of gas needed. 149 | # Default: 100 000 150 | default_gas = 100000 151 | 152 | # Specify the maximum amount of gas to be used as the gas limit for a transaction. 153 | # Default: 400 000 154 | max_gas = 400000 155 | 156 | # Specify the price per gas used of the fee to submit a transaction and 157 | # the denomination of the fee. Required 158 | gas_price = { price = chain1_gas_price_amt, denom = 'chain1_gas_price_denom' } 159 | 160 | # Specify the ratio by which to increase the gas estimate used to compute the fee, 161 | # to account for potential estimation error. Default: 0.1, ie. 10%. 162 | # Valid range: 0.0 to 1.0 (inclusive) 163 | gas_multiplier = 1.1 164 | 165 | # Specify how many IBC messages at most to include in a single transaction. 166 | # Default: 30 167 | max_msg_num = 30 168 | 169 | # Specify the maximum size, in bytes, of each transaction that Hermes will submit. 170 | # Default: 2097152 (2 MiB) 171 | max_tx_size = 2097152 172 | 173 | # Specify the maximum amount of time to tolerate a clock drift. 174 | # The clock drift parameter defines how much new (untrusted) header's time 175 | # can drift into the future. Default: 5s 176 | clock_drift = '5s' 177 | 178 | # Specify the maximum time per block for this chain. 179 | # The block time together with the clock drift are added to the source drift to estimate 180 | # the maximum clock drift when creating a client on this chain. Default: 30s 181 | # For cosmos-SDK chains a good approximation is `timeout_propose` + `timeout_commit` 182 | # Note: This MUST be the same as the `max_expected_time_per_block` genesis parameter for Tendermint chains. 183 | max_block_time = '30s' 184 | 185 | # Specify the amount of time to be used as the light client trusting period. 186 | # It should be significantly less than the unbonding period 187 | # (e.g. unbonding period = 3 weeks, trusting period = 2 weeks). 188 | # Default: 2/3 of the `unbonding period` for Cosmos SDK chains 189 | trusting_period = '9days' 190 | 191 | # Specify the trust threshold for the light client, ie. the maximum fraction of validators 192 | # which have changed between two blocks. 193 | # Default: { numerator = '1', denominator = '3' }, ie. 1/3. 194 | # Warning: This is an advanced feature! Modify with caution. 195 | trust_threshold = { numerator = '1', denominator = '3' } 196 | 197 | # Specify a string that Hermes will use as a memo for each transaction it submits 198 | # to this chain. The string is limited to 50 characters. Default: '' (empty). 199 | # Note: Hermes will append to the string defined here additional 200 | # operational debugging information, e.g., relayer build version. 201 | memo_prefix = '' 202 | 203 | # This section specifies the filters for policy based relaying. 204 | # 205 | # Default: no policy / filters, allow all packets on all channels. 206 | # 207 | # Only packet filtering based on channel identifier can be specified. 208 | # A channel filter has two fields: 209 | # 1. `policy` - one of two types are supported: 210 | # - 'allow': permit relaying _only on_ the port/channel id in the list below, 211 | # - 'deny': permit relaying on any channel _except for_ the list below. 212 | # 2. `list` - the list of channels specified by the port and channel identifiers. 213 | # Optionally, each element may also contains wildcards, for eg. 'ica*' 214 | # to match all identifiers starting with 'ica' or '*' to match all identifiers. 215 | # 216 | # Example configuration of a channel filter, only allowing packet relaying on 217 | # channel with port ID 'transfer' and channel ID 'channel-0', as well as on 218 | # all ICA channels. 219 | # 220 | # [chains.packet_filter] 221 | # policy = 'allow' 222 | # list = [ 223 | # ['ica*', '*'], 224 | # ['transfer', 'channel-0'], 225 | # ] 226 | 227 | # Specify that the transaction fees should be payed from this fee granter's account. 228 | # Optional. If unspecified (the default behavior), then no fee granter is used, and 229 | # the account specified in `key_name` will pay the tx fees for all transactions 230 | # submitted to this chain. 231 | # fee_granter = '' 232 | 233 | # [chains.packet_filter] 234 | # policy = 'allow' 235 | # list = [ 236 | # ['transfer', 'channel-?'], 237 | # ] 238 | 239 | [[chains]] 240 | id = 'chain2_id' 241 | rpc_addr = 'http://chain2_rpc_addr' 242 | grpc_addr = 'http://chain2_grpc_addr' 243 | websocket_addr = 'ws://chain2_rpc_addr/websocket' 244 | rpc_timeout = '10s' 245 | account_prefix = 'chain2_account_prefix' 246 | key_name = 'chain2_key_name' 247 | store_prefix = 'ibc' 248 | default_gas = 100000 249 | max_gas = 400000 250 | gas_price = { price = chain2_gas_price_amt, denom = 'chain2_gas_price_denom' } 251 | gas_multiplier = 1.1 252 | max_msg_num = 30 253 | max_tx_size = 2097152 254 | clock_drift = '5s' 255 | max_block_time = '30s' 256 | trusting_period = '9days' 257 | trust_threshold = { numerator = '1', denominator = '3' } 258 | address_type = chain2_address_type 259 | 260 | # [chains.packet_filter] 261 | # policy = 'allow' 262 | # list = [ 263 | # ['transfer', 'channel-?'], 264 | # ] 265 | 266 | # Notes 267 | # NoteClient1 268 | # NoteClient2 269 | # NoteConnection1 270 | # NoteConnection2 271 | # NoteChannel1 272 | # NoteChannel2 273 | -------------------------------------------------------------------------------- /keys/README.md: -------------------------------------------------------------------------------- 1 | #### The following keys are being used to create 3 validators on both chains 2 | 3 | - evmosvaloper1wuqvcpuunf7r5rg7xutqddhw55grfzc7ewkz9a 4 | + Wallet Addr: evmos1wuqvcpuunf7r5rg7xutqddhw55grfzc75qejyq 5 | + Mnemonic: spoil senior door access upset floor decorate shield high punch senior tape pigeon base slogan height clever buffalo cat report poem weapon labor satoshi 6 | + ETH private key: FC3F58B007A017166DE5A340C7A2641EB37CF37081D6F9013636CEBFBAF7B1FE 7 | + Cosmos addr: cosmos1r0ku6275wrtud9vtvzulvgrk8vznan7xzeuepz 8 | - evmosvaloper1zxgt4pwzzsv02z24g80lc5rhtsp0prw046yxss 9 | + Wallet Addr: evmos1zxgt4pwzzsv02z24g80lc5rhtsp0prw0c5tk3d 10 | + Mnemonic: width produce brush hour horse retreat play flag fresh broken measure culture scare broken erupt pilot buzz embody depend topic behind rigid fan battle 11 | + ETH private key: 0172DC491B5ACD04DD378D3FD8FD9F41A0D701E070941474FADECD72E1E085B9 12 | + Cosmos addr: cosmos1fwqsdptgfvtjvhxu2eem3pgvjwn3yup24n75kh 13 | - evmosvaloper1vcy9v4jp0sd4hysqqcuwleytxre3ms4cmv5ajl 14 | + Wallet Addr: evmos1vcy9v4jp0sd4hysqqcuwleytxre3ms4ckzmdnz 15 | + Mnemonic: stage grid emotion thumb safe myth chair dizzy beyond casual select polar hover retire master neglect shift zero trigger section token replace truly father 16 | + ETH private key: E0D83C6054597638469CC91A46F14B7F62705297912524059629E4674302928F 17 | + Cosmos addr: cosmos1czg02jwz4rvu405mzxnw7h9zkzu7pm39wl6h5e 18 | 19 | #### The following is being used to create reserved account for paying IBC fee on both chains 20 | - evmos1metw56lk3k4vhkh0vzxlr8p4mzpjvttmagvekp (for both chains) 21 | + Mnemonic: raw course slim hockey salt crawl sick safe december during armed fragile 22 | + Cosmos addr: cosmos16euecy8mnkhdpnr4y3346h44v0gqj67zwc4laf -------------------------------------------------------------------------------- /sample.cosmos-and-evmos.override-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # You can rename this file into `override-env.sh` to make the first network become Cosmos (gaia) 4 | 5 | export KEYRING="file" # Secure your chain otherwise someone will absolutely transfer your token without your permission if you use keyring 'test' 6 | export VAL_KEYRING_FILE_ENCRYPTION_PASSWORD="11111111" 7 | 8 | export CHAIN_1_TYPE="cosmos" # valid values are 'evmos' or 'cosmos' only, must be 'evmos' for evmos chain, otherwise 'cosmos' for other chains 9 | export CHAIN_1_ID="cosmos-8" 10 | export CHAIN_1_COINTYPE=118 # 60 for EVMOS, 118 for Cosmos 11 | export CHAIN_1_ACCOUNT_PREFIX="cosmos" 12 | export CHAIN_1_GIT_REPO="https://github.com/cosmos/gaia" 13 | export CHAIN_1_GIT_REPO_BRANCH="v10.0.0" 14 | export CHAIN_1_DAEMON_BINARY_NAME="gaiad" 15 | export CHAIN_1_DENOM_SYMBOL="atom" 16 | export CHAIN_1_MIN_DENOM_SYMBOL="uatom" 17 | export CHAIN_1_GAS_DENOM_SYMBOL="natom" 18 | export CHAIN_1_DENOM_EXPONENT=6 19 | export CHAIN_1_GAS_DENOM_EXPONENT=3 20 | 21 | export REL_1_SEED="raw course slim hockey salt crawl sick safe december during armed fragile" 22 | export REL_1_ADDR="cosmos16euecy8mnkhdpnr4y3346h44v0gqj67zwc4laf" # Wallet address of the above seed on Cosmos chain 23 | 24 | # IBC Hermes 25 | export HERMES_CFG_CHAIN_1_ID="$CHAIN_1_ID" 26 | export HERMES_CFG_CHAIN_1_ACCOUNT_PREFIX="$CHAIN_1_ACCOUNT_PREFIX" 27 | export HERMES_CFG_CHAIN_1_GAS_PRICE_DENOM_SYMBOL="$CHAIN_1_MIN_DENOM_SYMBOL" 28 | export HERMES_CFG_CHAIN_1_DENOM_EXPONENT=$CHAIN_1_DENOM_EXPONENT # no of digits 29 | 30 | # Big Dipper 31 | export BD_CFG_CHAIN_1_GIT_REPO="https://github.com/forbole/bdjuno.git" 32 | export BD_CFG_CHAIN_1_GIT_REPO_BRANCH="chains/cosmos/mainnet" 33 | export BD_CFG_CHAIN_1_ACCOUNT_PREFIX="$CHAIN_1_ACCOUNT_PREFIX" 34 | export BD_CFG_CHAIN_1_ID="$CHAIN_1_ID" 35 | export BD_CFG_CHAIN_1_DENOM_SYMBOL="$CHAIN_1_DENOM_SYMBOL" 36 | export BD_CFG_CHAIN_1_MIN_DENOM_SYMBOL="$CHAIN_1_MIN_DENOM_SYMBOL" 37 | export BD_CFG_CHAIN_1_DENOM_EXPONENT=$CHAIN_1_DENOM_EXPONENT 38 | export BD2_CFG_CHAIN_1_GIT_REPO="https://github.com/forbole/big-dipper-2.0-cosmos.git" 39 | export BD2_CFG_CHAIN_1_BRANCH="web-cosmos@2.15.1" 40 | export BD2_CFG_CHAIN_1_PROJECT_NAME="web-cosmos" 41 | export BD2_CFG_CHAIN_1_CHAIN_NAME="cosmos" 42 | -------------------------------------------------------------------------------- /sample.expose-big-dipper-ui.override-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # You can rename this file into `override-env.sh` to make the block explorer become accessible from outside world 4 | # Use a domain that does not force https (like .dev, .world,...) because all expose ports of this project is http only 5 | 6 | export KEYRING="file" # Secure your chain otherwise someone will absolutely transfer your token without your permission if you use keyring 'test' 7 | export VAL_KEYRING_FILE_ENCRYPTION_PASSWORD="11111111" 8 | 9 | export BD2_CFG_CHAIN_1_PUBLIC_DOMAIN="bigdipper.example.com" 10 | export BD2_CFG_CHAIN_1_PUBLIC_RPC_26657="rpc.example.com:$CHAIN_1_EXPOSE_RPC_TO_PORT" 11 | 12 | export BD2_CFG_CHAIN_2_PUBLIC_DOMAIN="bigdipper.example.com" 13 | export BD2_CFG_CHAIN_2_PUBLIC_RPC_26657="rpc.example.com:$CHAIN_2_EXPOSE_RPC_TO_PORT" --------------------------------------------------------------------------------