├── .circleci └── config.yml ├── .eslintrc.json ├── .gitignore ├── .npmrc-auth ├── GUIDE_ANSIBLE.md ├── GUIDE_COMPLETE.md ├── LICENSE ├── README.md ├── ansible ├── .yamllint ├── README.md ├── inventory.sample ├── main.yml ├── main_backup_keystore.yml ├── main_debug.yml ├── main_journalctl_vacuum.yml ├── main_restart_service.yml ├── main_restore_db.yml ├── main_resync.yml ├── main_rotate_keys.yml ├── main_show_multiaddr.yaml ├── main_update_binary.yml ├── roles │ ├── journalctl-vacuum │ │ └── tasks │ │ │ └── main.yml │ ├── nginx-auth │ │ ├── .yamllint │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── node_exporter.nginx.conf │ │ │ └── polkadot_metrics.nginx.conf │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── Dockerfile.j2 │ │ │ │ ├── INSTALL.rst │ │ │ │ ├── converge.yml │ │ │ │ ├── molecule.yml │ │ │ │ └── tests │ │ │ │ ├── test_default.py │ │ │ │ └── test_nginx_auth.py │ │ └── tasks │ │ │ └── main.yml │ ├── node-exporter │ │ ├── .yamllint │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── node_exporter.service │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── Dockerfile.j2 │ │ │ │ ├── INSTALL.rst │ │ │ │ ├── converge.yml │ │ │ │ ├── molecule.yml │ │ │ │ └── tests │ │ │ │ ├── test_default.py │ │ │ │ └── test_node_exporter.py │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-backup-keystore │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-debug │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-restart-service │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-restore-db │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-resync │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-rotate-keys │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-update-binary │ │ ├── .yamllint │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── Dockerfile.j2 │ │ │ │ ├── INSTALL.rst │ │ │ │ ├── converge.yml │ │ │ │ ├── molecule.yml │ │ │ │ └── tests │ │ │ │ ├── test_default.py │ │ │ │ └── test_polkadot.py │ │ └── tasks │ │ │ └── main.yml │ ├── polkadot-validator │ │ ├── .yamllint │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── journald.conf │ │ │ └── nginx.conf │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── Dockerfile.j2 │ │ │ │ ├── INSTALL.rst │ │ │ │ ├── converge.yml │ │ │ │ ├── molecule.yml │ │ │ │ └── tests │ │ │ │ ├── expected.yaml │ │ │ │ ├── test_default.py │ │ │ │ ├── test_firewall.py │ │ │ │ └── test_polkadot.py │ │ ├── tasks │ │ │ ├── firewall.yml │ │ │ ├── journald.yml │ │ │ ├── main.yml │ │ │ ├── proxy.yml │ │ │ ├── service.yml │ │ │ ├── session.yml │ │ │ └── user.yml │ │ └── templates │ │ │ ├── polkadot.service.j2 │ │ │ ├── proxy.conf.j2 │ │ │ └── session.yaml.j2 │ └── show-multiaddr │ │ └── tasks │ │ └── main.yml └── setup.sh ├── config ├── main.sample.json ├── main.template.json └── main.withBackup.sample.json ├── package.json ├── scripts ├── binaryUpgradeTest.json ├── deploy.sh ├── integrationTest.sh ├── patch.sh └── test.json ├── src ├── index.js └── lib │ ├── actions │ ├── clean.js │ ├── plan.js │ ├── restoreDB.js │ ├── rotateKeys.js │ ├── sync.js │ └── updateBinary.js │ ├── application.js │ ├── async.js │ ├── clients │ ├── ansible.js │ └── terraform.js │ ├── cmd.js │ ├── config.js │ ├── env.js │ ├── files.js │ ├── platform.js │ ├── project.js │ ├── ssh.js │ ├── tpl.js │ └── version.js ├── terraform ├── aws │ ├── backend.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── variables.tf │ └── versions.tf ├── azure │ ├── backend.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── variables.tf │ └── versions.tf ├── digitalocean │ ├── backend.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── variables.tf │ └── versions.tf ├── gcp │ ├── backend.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── variables.tf │ └── versions.tf ├── hetzner │ ├── README.md │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── setup_users.sh │ ├── variables.tf │ └── versions.tf ├── packet │ ├── backend.tf │ ├── main.tf │ ├── output.tf │ ├── provider.tf │ ├── variables.tf │ └── versions.tf └── remote-state │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── test ├── index.js └── lib │ ├── cmd.js │ ├── files.js │ ├── tpl.js │ └── version.js ├── tpl ├── ansible_inventory └── tfvars └── yarn.lock /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | ansibleIntegrationTest: &ansibleIntegrationTest 2 | docker: 3 | - image: web3f/secure-validator-ci:v1.2.6 4 | steps: 5 | - checkout 6 | - setup_remote_docker 7 | - run: 8 | description: execute molecule tests for the ansible role at $ROLE_PATH 9 | command: | 10 | pip3 install molecule-docker 11 | cd $ROLE_PATH 12 | molecule test 13 | 14 | version: 2 15 | 16 | jobs: 17 | unitTests: 18 | docker: 19 | - image: web3f/node:v0.1.2 20 | steps: 21 | - checkout 22 | - run: yarn 23 | - run: 24 | name: Run tests 25 | command: | 26 | yarn test 27 | 28 | polkadotValidatorIntegrationTest: 29 | environment: 30 | ROLE_PATH: ansible/roles/polkadot-validator 31 | <<: *ansibleIntegrationTest 32 | 33 | nodeExporterIntegrationTest: 34 | environment: 35 | ROLE_PATH: ansible/roles/node-exporter 36 | <<: *ansibleIntegrationTest 37 | 38 | updateBinaryIntegrationTest: 39 | environment: 40 | ROLE_PATH: ansible/roles/polkadot-update-binary 41 | <<: *ansibleIntegrationTest 42 | 43 | nginxAuthIntegrationTest: 44 | environment: 45 | ROLE_PATH: ansible/roles/nginx-auth 46 | <<: *ansibleIntegrationTest 47 | 48 | integrationTest: 49 | docker: 50 | - image: web3f/secure-validator-ci:v1.2.6 51 | steps: 52 | - checkout 53 | - run: 54 | name: Copy credentials 55 | command: echo $GOOGLE_APPLICATION_CREDENTIALS_CONTENT > $(pwd)/credentials.json 56 | - run: 57 | name: Yarn install 58 | command: yarn 59 | - run: 60 | name: integrationTest 61 | command: ./scripts/integrationTest.sh 62 | 63 | publishPackage: 64 | docker: 65 | - image: web3f/node:v0.1.2 66 | steps: 67 | - checkout 68 | - run: 69 | name: Publish package 70 | command: npm publish --userconfig=.npmrc-auth 71 | 72 | workflows: 73 | version: 2 74 | test-deploy: 75 | jobs: 76 | - unitTests: 77 | filters: 78 | tags: 79 | only: /.*/ 80 | - polkadotValidatorIntegrationTest: 81 | filters: 82 | tags: 83 | only: /.*/ 84 | requires: 85 | - unitTests 86 | - nodeExporterIntegrationTest: 87 | filters: 88 | tags: 89 | only: /.*/ 90 | requires: 91 | - unitTests 92 | - updateBinaryIntegrationTest: 93 | filters: 94 | tags: 95 | only: /.*/ 96 | requires: 97 | - unitTests 98 | - nginxAuthIntegrationTest: 99 | filters: 100 | tags: 101 | only: /.*/ 102 | requires: 103 | - unitTests 104 | - integrationTest: 105 | filters: 106 | tags: 107 | only: /.*/ 108 | requires: 109 | - polkadotValidatorIntegrationTest 110 | - nodeExporterIntegrationTest 111 | - updateBinaryIntegrationTest 112 | - nginxAuthIntegrationTest 113 | - publishPackage: 114 | filters: 115 | tags: 116 | only: /^v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$/ 117 | branches: 118 | ignore: /.*/ 119 | requires: 120 | - integrationTest 121 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "node": true, 4 | "commonjs": true, 5 | "es6": true, 6 | "mocha": true 7 | }, 8 | "extends": "eslint:recommended", 9 | "globals": { 10 | "Atomics": "readonly", 11 | "SharedArrayBuffer": "readonly" 12 | }, 13 | "parserOptions": { 14 | "ecmaVersion": 2020 15 | }, 16 | "rules": { 17 | "no-console": 0, 18 | "no-useless-escape": 0, 19 | "no-async-promise-executor": 0 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | build/* 3 | ansible/inventory 4 | ansible/**/__pycache__ 5 | yarn-error.log 6 | .env 7 | public_keyfile 8 | public_keyfile.pub 9 | validator_keyfile 10 | validator_keyfile.pub 11 | credentials.json 12 | config/* 13 | !config/*template* 14 | !config/*sample* 15 | -------------------------------------------------------------------------------- /.npmrc-auth: -------------------------------------------------------------------------------- 1 | //registry.npmjs.org/:_authToken=${NPM_TOKEN} -------------------------------------------------------------------------------- /GUIDE_ANSIBLE.md: -------------------------------------------------------------------------------- 1 | # Ansible Guide 2 | 3 | This repo contains collections of Ansible scripts inside the [ansible/](ansible) 4 | directory, so called "Roles", which are responsible for the provisioning of 5 | all configured nodes. It automatically sets up the [Application 6 | Layer](README.md/#application-layer) and manages updates for Polkadot 7 | software releases. 8 | 9 | There is a main Ansible Playbook that orchestrates all the roles, it gets 10 | executed locally on your machine, then connects to the configured nodes and sets 11 | up the required tooling. Firewalls, Polkadot nodes and all its dependencies are 12 | installed by issuing a single command. No manual intervention into the remote 13 | nodes is required. 14 | 15 | ## Prerequisites 16 | 17 | * [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) 18 | (v2.8+) 19 | 20 | On Debian-based systems this can be installed with `sudo apt install ansible` 21 | from the standard repositories. 22 | 23 | * Running Debian-based nodes 24 | 25 | The nodes require configured SSH access, but don't need any other preparatory 26 | work. It's up to you on how many nodes you want to use. This setup assumes the 27 | remote users have `sudo` privileges with the same `sudo` password. 28 | Alternatively, [additional 29 | configuration](https://docs.ansible.com/ansible/latest/user_guide/become.html) 30 | is required. 31 | 32 | It's recommended to setup SSH pubkey authentication for the nodes and to add the 33 | access keys to the SSH agent. 34 | 35 | ## Inventory 36 | 37 | All required data is saved in a [Ansible 38 | inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html), 39 | which by default is placed under `/etc/ansible/hosts` (but you can create it 40 | anywhere you want) and must only be configured once. Most values from the 41 | [SAMPLE FILE](ansible/inventory.sample) can be copied. Only a handful of entries 42 | must be adjusted. 43 | 44 | For each node, the following information must be configured in the Ansible 45 | inventory: 46 | 47 | * IP address or URL. 48 | * SSH user (as `ansible_user`). It's encouraged NOT to use `root`. 49 | * (optional) The telemetry URL (e.g. `wss://telemetry.polkadot.io/submit/`, 50 | where the info can then be seen under https://telemetry.polkadot.io). 51 | * (optional) The logging filter. 52 | 53 | The other default values from the sample inventory can be left as is. 54 | 55 | **NOTE**: Telemetry information exposes IP address, among other information. For 56 | this reason it's highly encouraged to use a [private telemetry 57 | server](https://github.com/paritytech/substrate-telemetry) and not to expose the 58 | validator to a public server. 59 | 60 | ### Setup Validator 61 | 62 | Setup the validator node by specifying a `[validator_]` host, including its 63 | required variables. `` should start at `0` and increment for each other 64 | validator (assuming you have more than one validator). 65 | 66 | Example: 67 | 68 | ```ini 69 | [validator_0] 70 | 147.75.76.65 71 | 72 | [validator_0:vars] 73 | ansible_user=alice 74 | telemetry_url=wss://telemetry.polkadot.io/submit/ 75 | logging_filter='sync=trace,afg=trace,babe=debug' 76 | 77 | [validator_1] 78 | 162.12.35.55 79 | 80 | [validator_1:vars] 81 | ansible_user=bob 82 | telemetry_url=wss://telemetry.polkadot.io/submit/ 83 | logging_filter='sync=trace,afg=trace,babe=debug' 84 | ``` 85 | 86 | ### Grouping Validators 87 | 88 | All nodes to be setup must be grouped under `[validator:children]`. 89 | 90 | Example: 91 | 92 | ```ini 93 | [validator:children] 94 | validator_0 95 | validator_1 96 | ``` 97 | 98 | ### Specify common variables 99 | 100 | Finally, define the common variables for all the nodes. 101 | 102 | Important variables which should vary from the [sample inventory](ansible/inventory.sample): 103 | 104 | * `project` - The name for how each node should be prefixed for the telemetry 105 | name. 106 | * `polkadot_binary_url` - This is the URL from were Ansible will 107 | download the Polkadot binary. Binary releases are available in the official 108 | [Parity Releases repo](https://github.com/paritytech/polkadot/releases) or the 109 | [W3F Releases repo](https://github.com/w3f/polkadot/releases). 110 | * `polkadot_binary_checksum` - The SHA256 checksum of the Polkadot binary which 111 | Ansible verifies during execution. Must be prefixed with `sha256:`. 112 | * `chain` - The chain to work on, such as `kusama` or `polkadot`. 113 | * `polkadot_network_id` - The network identifier, such as `ksmcc3` (for Kusama) 114 | or `polkadot`. 115 | * `node_exporter_enabled` - Enable or disable the setup of [Node 116 | Exporter](https://github.com/prometheus/node_exporter). It's up to you whether 117 | you want it or not. 118 | 119 | The other default values from the sample inventory can be left as is. 120 | 121 | Example: 122 | 123 | ```ini 124 | [all:vars] 125 | # The name for how each node should be prefixed for the telemetry name 126 | project=alice-in-wonderland 127 | 128 | # Can be left as is. 129 | ansible_ssh_common_args='-o StrictHostKeyChecking=no -o ConnectTimeout=15' 130 | build_dir=$HOME/.config/polkadot-secure-validator/build/w3f/ansible 131 | 132 | # Specify which `polkadot` binary to install. Checksum is verified during execution. 133 | polkadot_binary_url='https://github.com/paritytech/polkadot/releases/download/v0.8.2/polkadot' 134 | polkadot_binary_checksum='sha256:349b786476de9188b79817cab48fc6fc030908ac0e8e2a46a1600625b1990758' 135 | 136 | # Specify the chain/network. 137 | polkadot_network_id=polkadot 138 | chain=polkadot 139 | 140 | # Nginx authentication settings. 141 | nginx_user='prometheus' 142 | nginx_password='nginx_password' 143 | 144 | # Node exporter settings. Disabled by default. 145 | node_exporter_enabled='false' 146 | node_exporter_binary_url='https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz' 147 | node_exporter_binary_checksum='sha256:b2503fd932f85f4e5baf161268854bf5d22001869b84f00fd2d1f57b51b72424' 148 | 149 | # Polkadot service restart settings. Enabled to restart every hour. 150 | polkadot_restart_enabled='true' 151 | polkadot_restart_minute='0' 152 | polkadot_restart_hour='*' 153 | polkadot_restart_day='*' 154 | polkadot_restart_month='*' 155 | polkadot_restart_weekday='*' 156 | 157 | # Optional: Restore the chain db from a .7z snapshot 158 | polkadot_db_snapshot_url='https://ksm-rocksdb.polkashots.io/kusama-6658753.RocksDb.7z' 159 | polkadot_db_snapshot_checksum='sha256:4f61a99e4b00acb335aff52f2383880d53b30617c0ae67ac47c611e7bf6971ff' 160 | ``` 161 | 162 | ## Execution 163 | 164 | Download the required files. 165 | 166 | ```console 167 | user@pc:~$ git clone https://github.com/w3f/polkadot-secure-validator.git 168 | user@pc:~$ cd polkadot-secure-validator/ansible 169 | ``` 170 | 171 | Once the inventory file is configured, simply run the setup script and specify 172 | the `sudo` password for the remote machines. 173 | 174 | **NOTE**: If no inventory path is specified, it will try to look for 175 | `ansible/inventory.yml` by default. 176 | 177 | ```console 178 | user@pc:~/polkadot-secure-validator/ansible$ chmod +x setup.sh 179 | user@pc:~/polkadot-secure-validator/ansible$ ./setup.sh my_inventory.yml 180 | Sudo password for remote servers: 181 | >> Pulling upstream changes... [OK] 182 | >> Testing Ansible availability... [OK] 183 | >> Finding validator hosts... [OK] 184 | hosts (2): 185 | 147.75.76.65 186 | 162.12.35.55 187 | >> Testing connectivity to hosts... [OK] 188 | >> Executing Ansible Playbook... 189 | 190 | ... 191 | ``` 192 | 193 | Alternatively, execute the Playbook manually ("become" implies `sudo` 194 | privileges). 195 | 196 | ```console 197 | user@pc:~/polkadot-secure-validator/ansible$ ansible-playbook -i my_inventory.yml main.yml --become --ask-become 198 | ``` 199 | 200 | The `setup.sh` script handles some extra functionality, such as downloading the 201 | newest upstream changes and checking connectivity of remote hosts including 202 | privilege escalation. This script/Playbook can be executed over and over again. 203 | 204 | Additional Playbooks are provided besides `main.yml`, but those are outside the 205 | scope of this guide. 206 | 207 | ### Updating Polkadot 208 | 209 | To update the Polkadot version, simply adjust those two lines in the Ansible 210 | inventory: 211 | 212 | ```ini 213 | polkadot_binary_url='...' 214 | polkadot_binary_checksum='sha256:...' 215 | ``` 216 | 217 | Then just execute `setup.sh` again. 218 | -------------------------------------------------------------------------------- /GUIDE_COMPLETE.md: -------------------------------------------------------------------------------- 1 | # Terraform Guide 2 | 3 | This repo has code for creating a complete implementation of both layers 4 | described in [Workflow](README.md/#workflow). This can be done on any host with 5 | NodeJS, Yarn and Git installed. 6 | 7 | ### Prerequisites 8 | 9 | Before using polkadot-secure-validator you need to have installed: 10 | 11 | * NodeJS v14 or above (we recommend using [nvm](https://github.com/nvm-sh/nvm)) 12 | 13 | * [Yarn](https://yarnpkg.com/lang/en/docs/install) 14 | 15 | * [Terraform](https://www.terraform.io/downloads.html) (the snap package available via your package manager will not work) 16 | 17 | * [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) (v2.8+, available through pip) 18 | 19 | You will need credentials as environment variables for all the infrastructure providers 20 | used in the platform creation phase. The tool now supports AWS, Azure, GCP and packet, 21 | these are the required variables: 22 | 23 | * AWS: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` of an IAM account with EC2 24 | and VPC write access. 25 | * Azure: `ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, `ARM_SUBSCRIPTION_ID`, 26 | `ARM_TENANT_ID`, `TF_VAR_client_id` (same as `ARM_CLIENT_ID`), 27 | `TF_VAR_client_secret` (same as `ARM_CLIENT_SECRET`). All these credentials 28 | should correspond to a service principal with at least a `Contributor` role, 29 | see [here](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal) 30 | for details or [create an issue](https://github.com/w3f/polkadot-secure-validator/issues/new) for 31 | finer grained access control. 32 | * GCP: `GOOGLE_APPLICATION_CREDENTIALS` (path to json file with credentials of 33 | the service account you want to use; this service account needs to have write 34 | access to compute and network resources). 35 | * PACKET: `TF_VAR_auth_token`. 36 | * DigitalOcean: `TF_VAR_do_token`. 37 | * Hetzner: `TF_VAR_hcloud_token`. 38 | 39 | The tool allows you to specify which providers to use, so you don't need to have 40 | accounts in all of them, see [here](https://github.com/w3f/polkadot-secure-validator/blob/master/config/main.sample.json) 41 | for an example of how to define the providers. You could use, for instance, 42 | packet for the validators and GCP for the public nodes. Keep in mind that, the 43 | more distributed your public nodes, the fewer opportunities to be affected by 44 | potential incidents in the respective cloud providers. 45 | 46 | You need two additional environment variables to allow ansible to connect to the 47 | created machines: 48 | 49 | * `SSH_ID_RSA_PUBLIC`: path to private SSH key you want to use for the public 50 | nodes. 51 | 52 | * `SSH_ID_RSA_VALIDATOR`: path to private SSH key you want to use for the 53 | validators. 54 | 55 | You can easily create and add them to your ssh-agent as follows: 56 | 57 | ```bash 58 | $ ssh-keygen -m PEM -f 59 | $ ssh-add 60 | ``` 61 | 62 | Note: The key paths MUST be added to `ssh-add`. 63 | 64 | ### Provider Account Setup & Configuration 65 | 66 | * Create a project in Google Cloud Provider (GCP). This is currently the only supported backend for Terrafrom state changes. Copy the project ID for the next step. 67 | * Create a project in the cloud Provider account of your choice with the name of the ID that you copied in the GCP project. e.g. `My-Project-1234567` 68 | - Note: Under the hood Terraform uses the `projectId` field in your `config/main.json` to store the state using this format. 69 | * The ssh username in `config.main.json` only works with `root` with some providers such as digitalocean. 70 | * Inside your `config/main.json` the `machineType` field must have a slug value. e.g. for [DigitalOcean](https://slugs.do-api.dev/). `s-4vcpu-8gb-amd`. 71 | 72 | 73 | ### Synchronization 74 | 75 | ``` 76 | $ git clone https://github.com/w3f/secure-validator 77 | $ cd secure-validator 78 | $ yarn 79 | $ cp config/main.template.json config/main.json 80 | # now you should complete and customize config/main.json, using main.sample.json as a reference 81 | $ yarn sync -c config/main.json 82 | ``` 83 | 84 | You can also just provision a set of previously created machines with the 85 | [ansible code](ansible). We have provided an [example 86 | inventory](ansible/inventory.sample) that you can customize. See the [Ansible 87 | Guide](GUIDE_ANSIBLE.md) for more. 88 | 89 | The `sync` command is idempotent, unless there are errors it will always have 90 | the same results. You can execute it as much as you want, it will only make 91 | changes when the actual infrastructure state doesn't match the desired state. 92 | 93 | ### Restore db 94 | 95 | ``` 96 | $ yarn restore-db -c config/main.json 97 | ``` 98 | 99 | A possibile configuration can be: 100 | 101 | ```json 102 | ... 103 | "validators": { 104 | "additionalFlags": "--unsafe-pruning --pruning 1000", 105 | "dbSnapshot": { 106 | "url": "https://ksm-rocksdb.polkashots.io/kusama-6658753.RocksDb.7z", 107 | "checksum": "sha256:4f61a99e4b00acb335aff52f2383880d53b30617c0ae67ac47c611e7bf6971ff" 108 | }, 109 | ... 110 | } 111 | ``` 112 | 113 | ### Update Binary 114 | 115 | ``` 116 | $ yarn update-binary -c config/main.json 117 | ``` 118 | 119 | ### Rotate Keys 120 | 121 | ``` 122 | $ yarn rotate-keys -c config/main.json 123 | ``` 124 | 125 | ### Cleaning up 126 | 127 | You can remove all the created infrastructure with: 128 | 129 | ``` 130 | $ yarn clean -c config/main.json 131 | ``` 132 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | NOTE: this repository isn't actively maintained 2 | 3 | # Polkadot Validator Setup 4 | 5 | This repo describes a potential setup for a Polkadot or Kusama validator that aims to 6 | prevent some types of potential attacks at the TCP layer and below. 7 | The [Workflow](#workflow) section describes the [Platform Layer](#platform-layer) 8 | and the [Application Layer](#application-layer) in more detail. 9 | 10 | ## Usage 11 | 12 | There are two ways of using this repository: 13 | 14 | * **Platform & Application Layer** 15 | 16 | Configure credentials for infrastructure providers such as AWS, Azure, GCP, digitalocean, 17 | and/or Packet, then execute the Terraform process to automatically deploy the 18 | required machines ([Platform Layer](#platform-layer)) and setup the 19 | [Application Layer](#application-layer). 20 | 21 | See the [Complete Guide](GUIDE_COMPLETE.md) for more. 22 | 23 | * **Application Layer** 24 | 25 | Setup Debian-based machines yourself, which only need basic SSH access and 26 | configure those in an inventory. The Ansible scripts will setup the entire 27 | [Application Layer](#application-layer). 28 | 29 | See the [Ansible Guide](GUIDE_ANSIBLE.md) for more. 30 | 31 | ## Structure 32 | 33 | The secure validator setup is composed of one or more validators that run with a local 34 | instance of NGINX as a reverse TCP proxy in front of them. The validators are instructed to: 35 | * advertise themselves with the public IP of the node and the port where the 36 | reverse proxy is listening. 37 | * bind to the localhost interface, so that they only allow incoming connections from the 38 | proxy. 39 | 40 | The setup also configures a firewall in which the default p2p port is closed for 41 | incoming connections and only the proxy port is open. 42 | 43 | ## Workflow 44 | 45 | The secure validator setup is structured in two layers, an underlying platform 46 | and the applications that run on top of it. 47 | 48 | ### Platform Layer 49 | 50 | Validators are created using the terraform modules located at [terraform](/terraform) 51 | directory. We have created code for several providers but it is possible to add new 52 | ones, please reach out if you are interested in any provider currently not available. 53 | 54 | Besides the actual machines the terraform modules create the minimum required networking 55 | infrastructure for adding firewall rules to protect the nodes. 56 | 57 | ### Application Layer 58 | 59 | This is done through the ansible playbook and polkadot-validator role located at 60 | [ansible](/ansible), basically the role performs these actions: 61 | 62 | * Software firewall setup, for the validator we only allow the proxy, SSH and, if 63 | enabled, node-exporter ports. 64 | * Configure journald to tune log storage. 65 | * Create polkadot user and group. 66 | * Configure NGINX proxy 67 | * Setup polkadot service, including binary download. 68 | * Polkadot session management, create session keys if they are not present. 69 | * Setup node-exporter if the configuration includes it. 70 | 71 | # Note about upgrades from the sentries setup 72 | 73 | The current version of polkadot-secure-validator doesn't allow to create and configure 74 | sentry nodes. Although the terraform files and ansible roles of this latest version 75 | can be applied on setups created with previous versions, the validators would be configured 76 | to work without sentries and to connect to the network using the local reverse proxy instead. 77 | 78 | If you created the sentries with a previous version of this tool through terraform following 79 | the complete workflow, then they will not be deleted automatically when running this new version. 80 | In short, the old sentries will no longer be used by the validators and it will be up to you to 81 | remove them manually. 82 | -------------------------------------------------------------------------------- /ansible/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /ansible/README.md: -------------------------------------------------------------------------------- 1 | See the [Ansible Guide](../GUIDE_ANSIBLE.md). -------------------------------------------------------------------------------- /ansible/inventory.sample: -------------------------------------------------------------------------------- 1 | # Specify the nodes to setup. You can add more or remove entries, as you wish. 2 | 3 | # Validator 0 4 | [validator_0] 5 | 147.75.76.65 6 | 7 | # NOTE: optional variables can just be removed. 8 | [validator_0:vars] 9 | ansible_user=alice 10 | # Set an individual node name (optional) 11 | node_name='Alice_Validator' 12 | # Setup one or multiple telemetry endpoints (optional) 13 | telemetry_url='wss://telemetry.polkadot.io/submit/,wss://telemetry-backend.w3f.community/submit' 14 | # Only log specify levels, e.g. warnings (optional) 15 | logging_filter='sync=warn,afg=warn,babe=warn' 16 | # Location for database, keys, etc (optional) 17 | #base_path='/mnt/volume' 18 | # Setup a nginx reverse proxy in front of the binary (optional). Disabled by default. 19 | enable_reverse_proxy = false 20 | # Any additional flags passed on to the 'polkadot' binary (optional) 21 | additional_flags = '--no-prometheus --no-mdns' 22 | 23 | # Validator 1 24 | [validator_1] 25 | 162.12.35.55 26 | 27 | [validator_1:vars] 28 | ansible_user=bob 29 | telemetry_url=wss://telemetry.polkadot.io/submit/ 30 | logging_filter='sync=warn,afg=warn,babe=warn' 31 | 32 | # ## Group all nodes 33 | [validator:children] 34 | validator_0 35 | validator_1 36 | 37 | # Common variables 38 | [all:vars] 39 | # Project name. Will be used as a prefix for the auto-generated node names 40 | # if an individual `nodeName` is not specified. 41 | project=alice-in-wonderland 42 | 43 | # Can be left as is. 44 | ansible_ssh_common_args='-o StrictHostKeyChecking=no -o ConnectTimeout=15' 45 | build_dir=$HOME/.config/polkadot-secure-validator/build/w3f/ansible 46 | 47 | # Specify which `polkadot` binary to install. Checksum is verified during execution. 48 | polkadot_binary_url='https://github.com/paritytech/polkadot/releases/download/v0.9.1/polkadot' 49 | polkadot_binary_checksum='sha256:00185307376ca0bacf28504e76d4c61ebf84abfba6c31780c9966325add83e1e' 50 | 51 | # Specify the chain/network. 52 | # 53 | # For Polkadot: 54 | # ``` 55 | # chain=polkadot 56 | # polkadot_network_id=polkadot 57 | # ``` 58 | # 59 | # For Kusama: 60 | # ``` 61 | # chain=kusama 62 | # polkadot_network_id=ksmcc3 63 | # ``` 64 | polkadot_network_id=polkadot 65 | chain=polkadot 66 | 67 | # Nginx authentication settings (for Prometheus). 68 | nginx_user='prometheus' 69 | nginx_password='nginx_password' 70 | 71 | # Node exporter settings. Disabled by default. 72 | node_exporter_enabled='false' 73 | node_exporter_binary_url='https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz' 74 | node_exporter_binary_checksum='sha256:b2503fd932f85f4e5baf161268854bf5d22001869b84f00fd2d1f57b51b72424' 75 | 76 | # Polkadot service restart settings. Disabled by default (recommended). Adjust values accordingly. 77 | polkadot_restart_enabled='false' 78 | polkadot_restart_minute='0' 79 | polkadot_restart_hour='0' 80 | polkadot_restart_day='29' 81 | polkadot_restart_month='2' 82 | polkadot_restart_weekday='1' 83 | 84 | # Optional: Restore the chain db from a .7z snapshot 85 | polkadot_db_snapshot_url='https://ksm-rocksdb.polkashots.io/kusama-6658753.RocksDb.7z' 86 | polkadot_db_snapshot_checksum='sha256:4f61a99e4b00acb335aff52f2383880d53b30617c0ae67ac47c611e7bf6971ff' 87 | -------------------------------------------------------------------------------- /ansible/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Wait for system ready 3 | hosts: all 4 | gather_facts: no 5 | tasks: 6 | - name: Wait for nodes to become reachable 7 | wait_for_connection: 8 | 9 | - name: validator 10 | hosts: validator 11 | become: yes 12 | roles: 13 | - polkadot-validator 14 | 15 | - name: nginx-auth 16 | hosts: validator 17 | become: yes 18 | strategy: free 19 | roles: 20 | - nginx-auth 21 | 22 | - name: node-exporter 23 | hosts: validator 24 | become: yes 25 | strategy: free 26 | roles: 27 | - { role: node-exporter, when: node_exporter_enabled|bool } 28 | -------------------------------------------------------------------------------- /ansible/main_backup_keystore.yml: -------------------------------------------------------------------------------- 1 | - hosts: validator 2 | become: yes 3 | roles: 4 | - polkadot-backup-keystore 5 | -------------------------------------------------------------------------------- /ansible/main_debug.yml: -------------------------------------------------------------------------------- 1 | - hosts: validator 2 | become: yes 3 | roles: 4 | - polkadot-debug 5 | -------------------------------------------------------------------------------- /ansible/main_journalctl_vacuum.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: yes 3 | roles: 4 | - journalctl-vacuum 5 | -------------------------------------------------------------------------------- /ansible/main_restart_service.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: yes 3 | roles: 4 | - polkadot-restart-service 5 | -------------------------------------------------------------------------------- /ansible/main_restore_db.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: yes 3 | strategy: free 4 | roles: 5 | - polkadot-restore-db 6 | -------------------------------------------------------------------------------- /ansible/main_resync.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: yes 3 | roles: 4 | - polkadot-resync 5 | -------------------------------------------------------------------------------- /ansible/main_rotate_keys.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: yes 3 | roles: 4 | - polkadot-rotate-keys 5 | -------------------------------------------------------------------------------- /ansible/main_show_multiaddr.yaml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: yes 3 | gather_facts: false 4 | roles: 5 | - show-multiaddr 6 | -------------------------------------------------------------------------------- /ansible/main_update_binary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Wait for system ready 3 | hosts: all 4 | gather_facts: no 5 | tasks: 6 | - name: Wait for nodes to become reachable 7 | wait_for_connection: 8 | 9 | - name: validator update binary 10 | hosts: validator 11 | become: yes 12 | roles: 13 | - polkadot-update-binary -------------------------------------------------------------------------------- /ansible/roles/journalctl-vacuum/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: journalctl vacuum 2 | shell: | 3 | set -o pipefail 4 | journalctl --vacuum-time=2d 5 | args: 6 | executable: /bin/bash 7 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should 10 | be mentioned here. For instance, if the role uses the EC2 module, it may be a 11 | good idea to mention in this section that the boto package is required. 12 | 13 | Role Variables 14 | -------------- 15 | 16 | A description of the settable variables for this role should go here, including 17 | any variables that are in defaults/main.yml, vars/main.yml, and any variables 18 | that can/should be set via parameters to the role. Any variables that are read 19 | from other roles and/or the global scope (ie. hostvars, group vars, etc.) should 20 | be mentioned here as well. 21 | 22 | Dependencies 23 | ------------ 24 | 25 | A list of other roles hosted on Galaxy should go here, plus any details in 26 | regards to parameters that may need to be set for other roles, or variables that 27 | are used from other roles. 28 | 29 | Example Playbook 30 | ---------------- 31 | 32 | Including an example of how to use your role (for instance, with variables 33 | passed in as parameters) is always nice for users too: 34 | 35 | - hosts: servers 36 | roles: 37 | - { role: node-exporter, x: 42 } 38 | 39 | License 40 | ------- 41 | 42 | BSD 43 | 44 | Author Information 45 | ------------------ 46 | 47 | An optional section for the role authors to include contact information, or a 48 | website (HTML is not allowed). 49 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/defaults/main.yml: -------------------------------------------------------------------------------- 1 | nginx_user: prometheus 2 | nginx_password: nginx_password 3 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/files/node_exporter.nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 0.0.0.0:9100; 3 | location / { 4 | proxy_pass http://localhost:9101/; 5 | 6 | auth_basic "Prometheus"; 7 | auth_basic_user_file ".htpasswd"; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/files/polkadot_metrics.nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 0.0.0.0:9616; 3 | location / { 4 | proxy_pass http://localhost:9615/; 5 | 6 | auth_basic "Prometheus"; 7 | auth_basic_user_file ".htpasswd"; 8 | } 9 | } -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | {% if item.env is defined %} 10 | {% for var, value in item.env.items() %} 11 | {% if value %} 12 | ENV {{ var }} {{ value }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endif %} 16 | 17 | RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates iproute2 wget nginx && apt-get clean; \ 18 | elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash iproute && dnf clean all; \ 19 | elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash iproute && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 20 | elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml iproute2 && zypper clean -a; \ 21 | elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ 22 | elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates iproute2 && xbps-remove -O; fi 23 | 24 | RUN wget https://raw.githubusercontent.com/gdraheim/docker-systemctl-replacement/master/files/docker/systemctl.py -O /usr/bin/systemctl && \ 25 | chmod a+x /usr/bin/systemctl && \ 26 | test -L /bin/systemctl || ln -sf /usr/bin/systemctl /bin/systemctl && \ 27 | rm -f /sbin/init && touch /sbin/systemd && ln -sf /sbin/systemd /sbin/init 28 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * Docker Engine 9 | 10 | Install 11 | ======= 12 | 13 | Please refer to the `Virtual environment`_ documentation for installation best 14 | practices. If not using a virtual environment, please consider passing the 15 | widely recommended `'--user' flag`_ when invoking ``pip``. 16 | 17 | .. _Virtual environment: https://virtualenv.pypa.io/en/latest/ 18 | .. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site 19 | 20 | .. code-block:: bash 21 | 22 | $ pip install 'molecule[docker]' 23 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | roles: 5 | - role: nginx-auth 6 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: docker 6 | lint: | 7 | set -e 8 | yamllint . 9 | flake8 10 | platforms: 11 | - name: nginx-auth-validator-node 12 | image: ubuntu:bionic 13 | groups: 14 | - validator 15 | provisioner: 16 | name: ansible 17 | lint: 18 | name: ansible-lint 19 | inventory: 20 | host_vars: 21 | nginx-auth-validator-node: 22 | node_exporter_enabled: 'true' 23 | verifier: 24 | name: testinfra 25 | lint: 26 | name: flake8 27 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE'] 7 | ).get_hosts('all') 8 | 9 | 10 | def test_hosts_file(host): 11 | f = host.file('/etc/hosts') 12 | 13 | assert f.exists 14 | assert f.user == 'root' 15 | assert f.group == 'root' 16 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/molecule/default/tests/test_nginx_auth.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.parametrize("name", [ 5 | ("nginx"), 6 | ("apache2-utils") 7 | ]) 8 | def test_packages(host, name): 9 | pkg = host.package(name) 10 | assert pkg.is_installed 11 | 12 | 13 | def test_htpasswd(host): 14 | htpasswd = host.file("/etc/nginx/.htpasswd") 15 | assert htpasswd.exists 16 | assert htpasswd.user == 'www-data' 17 | assert htpasswd.group == 'www-data' 18 | assert htpasswd.mode == 0o600 19 | 20 | 21 | def test_polkadot_service_file(host): 22 | if host.ansible.get_variables()['node_exporter_enabled'] == "true": 23 | cfg = host.file("/etc/nginx/sites-enabled/node-exporter.conf") 24 | assert cfg.exists 25 | assert cfg.user == 'root' 26 | assert cfg.group == 'root' 27 | assert cfg.mode == 0o644 28 | assert cfg.contains('proxy_pass http://localhost:9101/;') 29 | assert cfg.contains('auth_basic_user_file ".htpasswd";') 30 | 31 | 32 | def test_nginx_config(host): 33 | cfg = host.file("/etc/nginx/sites-enabled/polkadot-metrics.conf") 34 | assert cfg.exists 35 | assert cfg.user == 'root' 36 | assert cfg.group == 'root' 37 | assert cfg.mode == 0o644 38 | assert cfg.contains('proxy_pass http://localhost:9615/;') 39 | assert cfg.contains('auth_basic_user_file ".htpasswd";') 40 | 41 | 42 | def test_nginx_running_and_enabled(host): 43 | nginx = host.service("nginx") 44 | assert nginx.is_running 45 | assert nginx.is_enabled 46 | -------------------------------------------------------------------------------- /ansible/roles/nginx-auth/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: check if .htpasswd file already exists 2 | stat: 3 | path: /etc/nginx/.htpasswd 4 | register: htpasswd_file 5 | 6 | - name: remove libapr related packages 7 | apt: 8 | pkg: 9 | - libapr1 10 | - libaprutil1 11 | state: absent 12 | update_cache: yes 13 | autoremove: yes 14 | when: not htpasswd_file.stat.exists 15 | 16 | - name: install apache2-utils 17 | apt: 18 | name: apache2-utils 19 | state: present 20 | update_cache: yes 21 | when: not htpasswd_file.stat.exists 22 | 23 | - name: generate .htpasswd file 24 | shell: | 25 | set -o pipefail 26 | htpasswd -bc /etc/nginx/.htpasswd {{ nginx_user }} {{ nginx_password }} 27 | chmod 600 /etc/nginx/.htpasswd 28 | chown www-data:www-data /etc/nginx/.htpasswd 29 | args: 30 | executable: /bin/bash 31 | changed_when: true 32 | tags: molecule-idempotence-notest 33 | 34 | - name: create nginx config for node exporter 35 | copy: 36 | src: node_exporter.nginx.conf 37 | dest: /etc/nginx/sites-enabled/node-exporter.conf 38 | when: node_exporter_enabled|bool 39 | 40 | - name: create nginx config for polkadot metrics 41 | copy: 42 | src: polkadot_metrics.nginx.conf 43 | dest: /etc/nginx/sites-enabled/polkadot-metrics.conf 44 | 45 | - name: restart nginx service 46 | systemd: 47 | name: nginx 48 | state: restarted 49 | daemon_reload: yes 50 | enabled: yes 51 | tags: molecule-idempotence-notest 52 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should 10 | be mentioned here. For instance, if the role uses the EC2 module, it may be a 11 | good idea to mention in this section that the boto package is required. 12 | 13 | Role Variables 14 | -------------- 15 | 16 | A description of the settable variables for this role should go here, including 17 | any variables that are in defaults/main.yml, vars/main.yml, and any variables 18 | that can/should be set via parameters to the role. Any variables that are read 19 | from other roles and/or the global scope (ie. hostvars, group vars, etc.) should 20 | be mentioned here as well. 21 | 22 | Dependencies 23 | ------------ 24 | 25 | A list of other roles hosted on Galaxy should go here, plus any details in 26 | regards to parameters that may need to be set for other roles, or variables that 27 | are used from other roles. 28 | 29 | Example Playbook 30 | ---------------- 31 | 32 | Including an example of how to use your role (for instance, with variables 33 | passed in as parameters) is always nice for users too: 34 | 35 | - hosts: servers 36 | roles: 37 | - { role: node-exporter, x: 42 } 38 | 39 | License 40 | ------- 41 | 42 | BSD 43 | 44 | Author Information 45 | ------------------ 46 | 47 | An optional section for the role authors to include contact information, or a 48 | website (HTML is not allowed). 49 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | node_exporter_binary_url: https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz 2 | node_exporter_binary_checksum: 'sha256:3369b76cd2b0ba678b6d618deab320e565c3d93ccb5c2a0d5db51a53857768ae' 3 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/files/node_exporter.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Node Exporter 3 | 4 | [Service] 5 | User=root 6 | Group=root 7 | ExecStart=/usr/local/bin/node_exporter --web.listen-address="localhost:9101" 8 | 9 | Restart=always 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | {% if item.env is defined %} 10 | {% for var, value in item.env.items() %} 11 | {% if value %} 12 | ENV {{ var }} {{ value }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endif %} 16 | 17 | RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates iproute2 wget nginx && apt-get clean; \ 18 | elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash iproute && dnf clean all; \ 19 | elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash iproute && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 20 | elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml iproute2 && zypper clean -a; \ 21 | elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ 22 | elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates iproute2 && xbps-remove -O; fi 23 | 24 | RUN wget https://raw.githubusercontent.com/gdraheim/docker-systemctl-replacement/master/files/docker/systemctl.py -O /usr/bin/systemctl && \ 25 | chmod a+x /usr/bin/systemctl && \ 26 | test -L /bin/systemctl || ln -sf /usr/bin/systemctl /bin/systemctl && \ 27 | rm -f /sbin/init && touch /sbin/systemd && ln -sf /sbin/systemd /sbin/init 28 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * Docker Engine 9 | 10 | Install 11 | ======= 12 | 13 | Please refer to the `Virtual environment`_ documentation for installation best 14 | practices. If not using a virtual environment, please consider passing the 15 | widely recommended `'--user' flag`_ when invoking ``pip``. 16 | 17 | .. _Virtual environment: https://virtualenv.pypa.io/en/latest/ 18 | .. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site 19 | 20 | .. code-block:: bash 21 | 22 | $ pip install 'molecule[docker]' 23 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | roles: 5 | - role: node-exporter 6 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: docker 6 | lint: | 7 | set -e 8 | yamllint . 9 | flake8 10 | platforms: 11 | - name: instance 12 | image: ubuntu:bionic 13 | provisioner: 14 | name: ansible 15 | lint: 16 | name: ansible-lint 17 | verifier: 18 | name: testinfra 19 | lint: 20 | name: flake8 21 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE'] 7 | ).get_hosts('all') 8 | 9 | 10 | def test_hosts_file(host): 11 | f = host.file('/etc/hosts') 12 | 13 | assert f.exists 14 | assert f.user == 'root' 15 | assert f.group == 'root' 16 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/molecule/default/tests/test_node_exporter.py: -------------------------------------------------------------------------------- 1 | def test_node_exporter(host): 2 | binary = host.file("/usr/local/bin/node_exporter") 3 | assert binary.exists 4 | assert binary.user == 'root' 5 | assert binary.group == 'root' 6 | assert binary.mode == 0o755 7 | 8 | 9 | def test_node_exporter_unit(host): 10 | unit = host.file("/etc/systemd/system/node_exporter.service") 11 | assert unit.exists 12 | assert unit.user == 'root' 13 | assert unit.group == 'root' 14 | assert unit.mode == 0o600 15 | assert unit.contains('--web.listen-address="localhost:9101"') 16 | 17 | 18 | def test_node_exporter_running_and_enabled(host): 19 | nginx = host.service("node_exporter") 20 | assert nginx.is_running 21 | assert nginx.is_enabled 22 | -------------------------------------------------------------------------------- /ansible/roles/node-exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: create node_exporter download dir 2 | file: 3 | path: /root/node_exporter 4 | state: directory 5 | mode: '0755' 6 | 7 | - name: get current node_exporter sha256 8 | shell: | 9 | sha256sum /root/node_exporter/archive.tar.gz | sed -n 's/^\(\S*\).*/sha256:\1/p' 10 | args: 11 | executable: /bin/bash 12 | register: node_exporter_sha256 13 | 14 | - name: download node_exporter 15 | get_url: 16 | url: '{{ node_exporter_binary_url | quote }}' 17 | checksum: '{{ node_exporter_binary_checksum | quote }}' 18 | dest: /root/node_exporter/archive.tar.gz 19 | mode: '0700' 20 | when: node_exporter_sha256.stdout != node_exporter_binary_checksum 21 | 22 | - name: unarchive node_exporter 23 | unarchive: 24 | src: /root/node_exporter/archive.tar.gz 25 | remote_src: yes 26 | dest: /root/node_exporter 27 | when: node_exporter_sha256.stdout != node_exporter_binary_checksum 28 | 29 | - name: copy node_exporter binary 30 | shell: | 31 | set -o pipefail 32 | cp /root/node_exporter/node_exporter*linux-amd64/node_exporter /usr/local/bin/ 33 | chmod 755 /usr/local/bin/node_exporter 34 | chown root:root /usr/local/bin/node_exporter 35 | args: 36 | executable: /bin/bash 37 | changed_when: False 38 | when: node_exporter_sha256.stdout != node_exporter_binary_checksum 39 | 40 | - name: create node_exporter systemd unit 41 | copy: 42 | src: node_exporter.service 43 | dest: /etc/systemd/system/node_exporter.service 44 | owner: root 45 | group: root 46 | mode: '600' 47 | 48 | - name: start node_exporter service 49 | systemd: 50 | name: node_exporter 51 | state: restarted 52 | daemon_reload: yes 53 | enabled: yes 54 | changed_when: false 55 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-backup-keystore/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: backup keystore 2 | copy: 3 | src: "/home/polkadot/.local/share/polkadot/chains/{{ polkadot_network_id }}/keystore" 4 | dest: /home/polkadot/keystore 5 | remote_src: yes 6 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-debug/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: get command output 2 | shell: | 3 | set -o pipefail 4 | journalctl -u polkadot --no-pager > /root/polkadot.log 5 | tail -n 10000 /root/polkadot.log | grep "Waiting to import block" 6 | become: yes 7 | args: 8 | executable: /bin/bash 9 | changed_when: False 10 | register: command_output 11 | 12 | - name: result 13 | debug: 14 | msg: "Command result: {{ command_output.stdout }}" 15 | 16 | - name: remove log 17 | file: 18 | path: /root/polkadot.log 19 | state: absent 20 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-restart-service/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart polkadot service 2 | systemd: 3 | name: polkadot.service 4 | state: restarted 5 | daemon_reload: yes 6 | enabled: yes 7 | changed_when: false 8 | 9 | #curl -H "Content-Type: application/json" -d '{"id":1, "jsonrpc":"2.0", "method": "system_health", "params":[]}' http://localhost:9933 10 | - name: wait for polkadot service started 11 | uri: 12 | url: http://localhost:9933 13 | status_code: 200 14 | method: "POST" 15 | body_format: json 16 | body: | 17 | { "jsonrpc":"2.0", "method":"system_health", "params":[], "id":1 } 18 | register: result 19 | until: result.status == 200 20 | retries: 12 21 | delay: 5 22 | tags: molecule-notest 23 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-restore-db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install 7z 2 | apt: 3 | name: p7zip-full 4 | state: present 5 | update_cache: yes 6 | 7 | - name: db_dest_path as fact 8 | set_fact: 9 | db_dest_path: '{{ base_path }}/chains/{{ polkadot_network_id }}' 10 | when: base_path is defined 11 | 12 | - name: db_dest_path as fact 13 | set_fact: 14 | db_dest_path: '/home/polkadot/.local/share/polkadot/chains/{{ polkadot_network_id }}' 15 | when: base_path is undefined 16 | 17 | - name: download db 18 | get_url: 19 | url: '{{ polkadot_db_snapshot_url }}' 20 | checksum: '{{ polkadot_db_snapshot_checksum | quote }}' 21 | dest: '{{ db_dest_path }}/db.new.7z' 22 | mode: '0700' 23 | owner: 'polkadot' 24 | group: 'polkadot' 25 | 26 | - name: stop polkadot service 27 | systemd: 28 | name: polkadot.service 29 | state: stopped 30 | 31 | - name: unpack db 32 | shell: | 33 | set -o pipefail 34 | cd {{ db_dest_path }} 35 | mv db db.back 36 | 7z x db.new.7z 37 | rm db.new.7z 38 | chown -R polkadot:polkadot db/ 39 | args: 40 | executable: /bin/bash 41 | 42 | - name: start polkadot service 43 | systemd: 44 | name: polkadot.service 45 | state: started 46 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-resync/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: stop polkadot service 2 | systemd: 3 | name: polkadot.service 4 | state: stopped 5 | 6 | - name: purge chain 7 | shell: | 8 | set -o pipefail 9 | /usr/local/bin/polkadot purge-chain -y 10 | become: yes 11 | become_user: polkadot 12 | args: 13 | executable: /bin/bash 14 | changed_when: False 15 | 16 | - name: start polkadot service 17 | systemd: 18 | name: polkadot.service 19 | state: started 20 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-rotate-keys/tasks/main.yml: -------------------------------------------------------------------------------- 1 | #curl -H "Content-Type: application/json" -d '{"id":1, "jsonrpc":"2.0", "method": "author_rotateKeys", "params":[]}' http://localhost:9933 2 | - name: rotate server keys 3 | uri: 4 | url: http://localhost:9933 5 | method: "POST" 6 | body_format: json 7 | body: | 8 | { "jsonrpc":"2.0", "method":"author_rotateKeys", "params":[], "id":1 } 9 | register: rotate_keys 10 | 11 | - name: show rotateKeys output 12 | debug: 13 | var: rotate_keys 14 | 15 | - name: save rotateKeys output 16 | copy: 17 | content: "{{ rotate_keys.json }}" 18 | dest: /home/polkadot/rotate_keys.log -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/.yamllint: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | braces: 5 | max-spaces-inside: 1 6 | level: error 7 | brackets: 8 | max-spaces-inside: 1 9 | level: error 10 | line-length: disable 11 | truthy: disable 12 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | {% if item.env is defined %} 10 | {% for var, value in item.env.items() %} 11 | {% if value %} 12 | ENV {{ var }} {{ value }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endif %} 16 | 17 | RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates iptables wget && apt-get clean; \ 18 | elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \ 19 | elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 20 | elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ 21 | elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ 22 | elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi 23 | 24 | RUN wget https://raw.githubusercontent.com/gdraheim/docker-systemctl-replacement/master/files/docker/systemctl.py -O /usr/bin/systemctl && \ 25 | chmod a+x /usr/bin/systemctl && \ 26 | test -L /bin/systemctl || ln -sf /usr/bin/systemctl /bin/systemctl && \ 27 | rm -f /sbin/init && touch /sbin/systemd && ln -sf /sbin/systemd /sbin/init 28 | 29 | RUN useradd -m -u 1000 -U -s /bin/sh polkadot 30 | 31 | RUN echo "[Unit]\nDescription=My Script\n[Service]\nType=forking\nExecStart=/bin/echo svc\n[Install]\nWantedBy=multi-user.target" > /etc/systemd/system/polkadot.service 32 | 33 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * Docker Engine 9 | 10 | Install 11 | ======= 12 | 13 | Please refer to the `Virtual environment`_ documentation for installation best 14 | practices. If not using a virtual environment, please consider passing the 15 | widely recommended `'--user' flag`_ when invoking ``pip``. 16 | 17 | .. _Virtual environment: https://virtualenv.pypa.io/en/latest/ 18 | .. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site 19 | 20 | .. code-block:: bash 21 | 22 | $ pip install 'molecule[docker]' 23 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: validator 4 | roles: 5 | - role: polkadot-update-binary 6 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: docker 6 | lint: | 7 | set -e 8 | yamllint . 9 | flake8 10 | platforms: 11 | - name: polkadot-update-binary-validator-node 12 | image: ubuntu:bionic 13 | groups: 14 | - validator 15 | provisioner: 16 | name: ansible 17 | lint: 18 | name: ansible-lint 19 | inventory: 20 | host_vars: 21 | polkadot-update-binary-validator-node: 22 | polkadot_network_id: ksmcc3 23 | polkadot_binary_url: 'https://github.com/w3f/polkadot/releases/download/v0.8.23/polkadot' 24 | polkadot_binary_checksum: 'sha256:cdf31d39ed54e66489d1afe74ed7549d5bcdf8ff479759e8fc476d17d069901e' 25 | verifier: 26 | name: testinfra 27 | lint: 28 | name: flake8 29 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 7 | 8 | 9 | def test_hosts_file(host): 10 | f = host.file('/etc/hosts') 11 | 12 | assert f.exists 13 | assert f.user == 'root' 14 | assert f.group == 'root' 15 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/molecule/default/tests/test_polkadot.py: -------------------------------------------------------------------------------- 1 | def test_polkadot_user(host): 2 | user = host.user('polkadot') 3 | assert user.exists 4 | 5 | group = host.group('polkadot') 6 | assert group.exists 7 | 8 | assert user.gid == group.gid 9 | 10 | 11 | def test_polkadot_binary(host): 12 | binary = host.file('/usr/local/bin/polkadot') 13 | assert binary.exists 14 | assert binary.user == 'polkadot' 15 | assert binary.group == 'polkadot' 16 | assert binary.mode == 0o755 17 | 18 | 19 | def test_polkadot_service_file(host): 20 | if host.ansible.get_variables()['inventory_hostname'] == 'validator': 21 | svc = host.file('/etc/systemd/system/polkadot.service') 22 | assert svc.exists 23 | 24 | 25 | def test_polkadot_running_and_enabled(host): 26 | if host.ansible.get_variables()['inventory_hostname'] == 'validator': 27 | polkadot = host.service("polkadot.service") 28 | assert polkadot.is_running 29 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-update-binary/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: get currently polkadot sha256 2 | shell: | 3 | sha256sum /usr/local/bin/polkadot | sed -n 's/^\(\S*\).*/sha256:\1/p' 4 | args: 5 | executable: /bin/bash 6 | register: polkadot_sha256 7 | 8 | - name: download polkadot binary 9 | get_url: 10 | url: '{{ polkadot_binary_url | quote }}' 11 | checksum: '{{ polkadot_binary_checksum | quote }}' 12 | dest: /usr/local/bin/polkadot-new 13 | force: true 14 | mode: '0700' 15 | owner: 'polkadot' 16 | group: 'polkadot' 17 | when: polkadot_sha256.stdout != polkadot_binary_checksum 18 | 19 | 20 | - name: stop polkadot service 21 | systemd: 22 | name: polkadot.service 23 | state: stopped 24 | daemon_reload: yes 25 | tags: molecule-idempotence-notest 26 | 27 | 28 | 29 | - name: substitute new polkadot binary 30 | shell: | 31 | set -o pipefail 32 | 33 | if [ -f /usr/local/bin/polkadot-new ]; then 34 | cp /usr/local/bin/polkadot-new /usr/local/bin/polkadot 35 | chown polkadot:polkadot /usr/local/bin/polkadot 36 | chmod 755 /usr/local/bin/polkadot 37 | fi 38 | args: 39 | executable: /bin/bash 40 | changed_when: False 41 | when: polkadot_sha256.stdout != polkadot_binary_checksum 42 | 43 | 44 | - name: restart polkadot service 45 | import_role: 46 | name: polkadot-restart-service 47 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/.yamllint: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | braces: 5 | max-spaces-inside: 1 6 | level: error 7 | brackets: 8 | max-spaces-inside: 1 9 | level: error 10 | line-length: disable 11 | truthy: disable 12 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | No pre-requisites. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | Chain to use for the validator, example `kusama` which is an early, unaudited and unrefined release of Polkadot. Kusama will serve as a proving ground, allowing teams and developers to build and deploy a parachain or try out Polkadot’s governance, staking, nomination and validation functionality in a real environment. 15 | 16 | ``` 17 | chain: kusama 18 | ``` 19 | 20 | Project specified to start in the service file, if not set, defaults to `project`. 21 | 22 | ``` 23 | project 24 | ``` 25 | 26 | Dependencies 27 | ------------ 28 | 29 | Example Playbook 30 | ---------------- 31 | 32 | - hosts: validator 33 | become: yes 34 | roles: 35 | - polkadot-validator 36 | 37 | License 38 | ------- 39 | 40 | BSD 41 | 42 | Author Information 43 | ------------------ 44 | 45 | An optional section for the role authors to include contact information, or a 46 | website (HTML is not allowed). 47 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for polkadot-validator 3 | proxy_port: 80 4 | p2p_port: 30333 5 | chain: "kusama" 6 | polkadot_network_id: "ksmcc3" 7 | subkey_binary_url: 'https://github.com/w3f/substrate/releases/download/e0f3fa/subkey' 8 | subkey_binary_checksum: 'sha256:f74a06442e76c3bb97d27a168b9710ef062ae5640ad82e7d42b8fb613f8be9d9' 9 | build_dir: '/tmp' 10 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/files/journald.conf: -------------------------------------------------------------------------------- 1 | [Journal] 2 | Storage=persistent 3 | #Compress=yes 4 | #Seal=yes 5 | #SplitMode=uid 6 | #SyncIntervalSec=5m 7 | RateLimitIntervalSec=30s 8 | RateLimitBurst=20000 9 | SystemMaxUse=50G 10 | #SystemKeepFree= 11 | SystemMaxFileSize=512M 12 | SystemMaxFiles=100 13 | #RuntimeMaxUse= 14 | #RuntimeKeepFree= 15 | #RuntimeMaxFileSize= 16 | #RuntimeMaxFiles=100 17 | #MaxRetentionSec= 18 | #MaxFileSec=1month 19 | #ForwardToSyslog=yes 20 | #ForwardToKMsg=no 21 | #ForwardToConsole=no 22 | #ForwardToWall=yes 23 | #TTYPath=/dev/console 24 | #MaxLevelStore=debug 25 | #MaxLevelSyslog=debug 26 | #MaxLevelKMsg=notice 27 | #MaxLevelConsole=info 28 | #MaxLevelWall=emerg 29 | #LineMax=48K 30 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/files/nginx.conf: -------------------------------------------------------------------------------- 1 | user www-data www-data; 2 | 3 | load_module /usr/lib/nginx/modules/ngx_stream_module.so; 4 | 5 | stream { 6 | include streams-enabled/*; 7 | } 8 | 9 | http { 10 | include sites-enabled/*; 11 | } 12 | 13 | events{ 14 | } 15 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates iptables wget && apt-get clean; \ 10 | elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash && dnf clean all; \ 11 | elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 12 | elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ 13 | elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ 14 | elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi 15 | 16 | RUN wget https://raw.githubusercontent.com/gdraheim/docker-systemctl-replacement/master/files/docker/systemctl.py -O /usr/bin/systemctl && \ 17 | chmod a+x /usr/bin/systemctl && \ 18 | test -L /bin/systemctl || ln -sf /usr/bin/systemctl /bin/systemctl && \ 19 | rm -f /sbin/init && touch /sbin/systemd && ln -sf /sbin/systemd /sbin/init 20 | 21 | # mock several system binaries in the docker test image to allow the role converge 22 | RUN echo "#!/bin/sh\nsleep 1" > /bin/modprobe && \ 23 | chmod a+x /bin/modprobe && \ 24 | rm -f /sbin/sysctl && \ 25 | echo "#!/bin/sh\nsleep 1" > /sbin/sysctl && \ 26 | chmod a+x /sbin/sysctl && \ 27 | rm -f /sbin/iptables && \ 28 | echo "#!/bin/sh\nsleep 1 && echo iptables v1.6.1" > /sbin/iptables && \ 29 | chmod a+x /sbin/iptables && \ 30 | rm -f /sbin/ip6tables && \ 31 | echo "#!/bin/sh\nsleep 1 && echo ip6tables v1.6.1" > /sbin/ip6tables && \ 32 | chmod a+x /sbin/ip6tables && \ 33 | rm -f /sbin/ip6tables-restore && \ 34 | echo "#!/bin/sh\nsleep 1" > /sbin/ip6tables-restore && \ 35 | chmod a+x /sbin/ip6tables-restore && \ 36 | echo "#!/bin/sh\nsleep 1" > /bin/journalctl && \ 37 | chmod a+x /bin/journalctl 38 | 39 | RUN echo "[Unit]\nDescription=My Script\n[Service]\nType=forking\nExecStart=/bin/echo svc\n[Install]\nWantedBy=multi-user.target" > /etc/systemd/system/systemd-journald.service 40 | 41 | RUN echo IyEvdXNyL2Jpbi9lbnYgcHl0aG9uCmZyb20gQmFzZUhUVFBTZXJ2ZXIgaW1wb3J0IEJhc2VIVFRQUmVxdWVzdEhhbmRsZXIsIEhUVFBTZXJ2ZXIKaW1wb3J0IG9zCgpLRVlfUEFUSCA9ICIvaG9tZS9wb2xrYWRvdC8ubG9jYWwvc2hhcmUvcG9sa2Fkb3QvY2hhaW5zL2tzbWEva2V5c3RvcmUva2V5IgoKCmNsYXNzIFMoQmFzZUhUVFBSZXF1ZXN0SGFuZGxlcik6CiAgICBkZWYgX3NldF9oZWFkZXJzKHNlbGYpOgogICAgICAgIHNlbGYuc2VuZF9yZXNwb25zZSgyMDApCiAgICAgICAgc2VsZi5zZW5kX2hlYWRlcigiQ29udGVudC10eXBlIiwgImFwcGxpY2F0dGlvbi9qc29uIikKICAgICAgICBzZWxmLmVuZF9oZWFkZXJzKCkKCiAgICBkZWYgZG9fUE9TVChzZWxmKToKICAgICAgICBzZWxmLl9zZXRfaGVhZGVycygpCgogICAgICAgIGJhc2VkaXIgPSBvcy5wYXRoLmRpcm5hbWUoS0VZX1BBVEgpCiAgICAgICAgaWYgbm90IG9zLnBhdGguZXhpc3RzKGJhc2VkaXIpOgogICAgICAgICAgICBvcy5tYWtlZGlycyhiYXNlZGlyKQogICAgICAgIHdpdGggb3BlbihLRVlfUEFUSCwgImEiKToKICAgICAgICAgICAgb3MudXRpbWUoS0VZX1BBVEgsIE5vbmUpCgogICAgICAgIHNlbGYud2ZpbGUud3JpdGUoInt9IikKCgpkZWYgcnVuKHNlcnZlcl9jbGFzcz1IVFRQU2VydmVyLCBoYW5kbGVyX2NsYXNzPVMsIHBvcnQ9OTkzMyk6CiAgICBzZXJ2ZXJfYWRkcmVzcyA9ICgiIiwgcG9ydCkKICAgIGh0dHBkID0gc2VydmVyX2NsYXNzKHNlcnZlcl9hZGRyZXNzLCBoYW5kbGVyX2NsYXNzKQogICAgaHR0cGQuc2VydmVfZm9yZXZlcigpCgoKaWYgX19uYW1lX18gPT0gIl9fbWFpbl9fIjoKICAgIHJ1bigpCg== | base64 -d > /usr/local/bin/polkadot && chmod a+x /usr/local/bin/polkadot 42 | 43 | RUN useradd -m -u 1000 -U -s /bin/sh polkadot 44 | 45 | RUN echo 'echo 0' > /usr/local/bin/journalctl && chmod a+x /usr/local/bin/journalctl 46 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * Docker Engine 9 | 10 | Install 11 | ======= 12 | 13 | Please refer to the `Virtual environment`_ documentation for installation best 14 | practices. If not using a virtual environment, please consider passing the 15 | widely recommended `'--user' flag`_ when invoking ``pip``. 16 | 17 | .. _Virtual environment: https://virtualenv.pypa.io/en/latest/ 18 | .. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site 19 | 20 | .. code-block:: bash 21 | 22 | $ pip install 'molecule[docker]' 23 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: validator 4 | roles: 5 | - role: polkadot-validator 6 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: docker 6 | lint: | 7 | set -e 8 | yamllint . 9 | flake8 10 | platforms: 11 | - name: polkadot-validator-validator-node 12 | image: ubuntu:bionic 13 | groups: 14 | - validator 15 | provisioner: 16 | name: ansible 17 | lint: 18 | name: ansible-lint 19 | inventory: 20 | host_vars: 21 | polkadot-validator-validator-node: 22 | polkadot_network_id: ksmcc3 23 | polkadot_binary_url: 'https://github.com/w3f/polkadot/releases/download/v0.8.23/polkadot' 24 | polkadot_binary_checksum: 'sha256:cdf31d39ed54e66489d1afe74ed7549d5bcdf8ff479759e8fc476d17d069901e' 25 | verifier: 26 | name: testinfra 27 | lint: 28 | name: flake8 29 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/tests/expected.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | babe: 3 | address: '5GeJbrGHZRnAWUzNR19UqtSkusSWf4VKzQW2nBcfuW1zJaSC' 4 | public_key: '0xca90fe1aa661510c1d8d805cc8051a5fdc29f119913ace7d6b32d2b01198fc59' 5 | gran: 6 | address: '5CH1MpYjcu7WoviS18P52kRD4XK5T7YXYVmWydhVoTornPJ5' 7 | public_key: '0x0961dd29a35d9c520482c0f1c2693ecdacc94674a15bc9911431aa8361bcd583' 8 | imon: 9 | address: '5HN1Ua1gMVX2D9mu7vMivg4FDKun5cHECqCh7EvfGNt1bAdk' 10 | public_key: '0xea5fa6622c63f8cc140d3c7dcdca6ab6559d2f497198d384e04db97ced60bd44' 11 | para: 12 | address: '5Gj5pmY7H8x68yLVKXfmsz8pVXxExaWhqUdTvVdoY5b3ZJ3M' 13 | public_key: '0xce36351eb9bc59eecc4d84d240c62661a0f27a0cf24cdd12b5710736d1942930' 14 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 7 | 8 | 9 | def test_hosts_file(host): 10 | f = host.file('/etc/hosts') 11 | 12 | assert f.exists 13 | assert f.user == 'root' 14 | assert f.group == 'root' 15 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/tests/test_firewall.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.parametrize("name", [ 5 | ("ufw"), 6 | ]) 7 | def test_packages(host, name): 8 | pkg = host.package(name) 9 | assert pkg.is_installed 10 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/molecule/default/tests/test_polkadot.py: -------------------------------------------------------------------------------- 1 | def test_polkadot_user(host): 2 | user = host.user('polkadot') 3 | assert user.exists 4 | 5 | group = host.group('polkadot') 6 | assert group.exists 7 | 8 | assert user.gid == group.gid 9 | 10 | 11 | def test_polkadot_binary(host): 12 | binary = host.file('/usr/local/bin/polkadot') 13 | assert binary.exists 14 | assert binary.user == 'polkadot' 15 | assert binary.group == 'polkadot' 16 | assert binary.mode == 0o755 17 | 18 | 19 | def test_polkadot_service_file(host): 20 | if host.ansible.get_variables()['inventory_hostname'] == 'validator': 21 | svc = host.file('/etc/systemd/system/polkadot.service') 22 | assert svc.exists 23 | assert svc.user == 'root' 24 | assert svc.group == 'root' 25 | assert svc.mode == 0o600 26 | assert svc.contains('Restart=always') 27 | 28 | 29 | def test_polkadot_running_and_enabled(host): 30 | if host.ansible.get_variables()['inventory_hostname'] == 'validator': 31 | polkadot = host.service("polkadot.service") 32 | assert polkadot.is_running 33 | # assert polkadot.is_enabled 34 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/tasks/firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install packages 3 | apt: 4 | name: ufw 5 | state: present 6 | update_cache: yes 7 | 8 | - name: ufw already enabled 9 | command: ufw status verbose 10 | register: ufw_status_result 11 | changed_when: False 12 | 13 | - name: open ssh port 14 | command: ufw allow 22/tcp 15 | when: not ufw_status_result.stdout is search("22/tcp.*ALLOW IN.*Anywhere") 16 | 17 | - name: open node_exporter port 18 | command: ufw allow 9100/tcp 19 | when: 20 | - node_exporter_enabled|default(false)|bool 21 | - not ufw_status_result.stdout is search("9100/tcp.*ALLOW IN.*Anywhere") 22 | 23 | - name: open polkadot_metrics port 24 | command: ufw allow 9616/tcp 25 | when: 26 | - node_exporter_enabled|default(false)|bool 27 | - not ufw_status_result.stdout is search("9616/tcp.*ALLOW IN.*Anywhere") 28 | 29 | - name: open p2p port 30 | command: ufw allow {{ p2p_port }}/tcp 31 | when: 32 | - not enable_reverse_proxy|default(false)|bool or not enableReverseProxy|default(false)|bool 33 | - not ufw_status_result.stdout is search(p2p_port ~ "/tcp.*ALLOW IN.*Anywhere") 34 | 35 | - name: close p2p port 36 | command: ufw deny {{ p2p_port }}/tcp 37 | when: 38 | - enable_reverse_proxy|default(false)|bool or enableReverseProxy|default(false)|bool 39 | - not ufw_status_result.stdout is search(p2p_port ~ "/tcp.*DENY IN.*Anywhere") 40 | 41 | - name: open proxy port 42 | command: ufw allow {{ proxy_port }}/tcp 43 | when: 44 | - enable_reverse_proxy|default(false)|bool or enableReverseProxy|default(false)|bool 45 | - not ufw_status_result.stdout is search(proxy_port ~ "/tcp.*ALLOW IN.*Anywhere") 46 | 47 | - name: enable firewall 48 | shell: | 49 | set -o pipefail 50 | echo "y" | ufw enable 51 | args: 52 | executable: /bin/bash 53 | when: not ufw_status_result.stdout is search("Status.* active") 54 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/tasks/journald.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy journald config 3 | copy: 4 | src: journald.conf 5 | dest: /etc/systemd/journald.conf 6 | 7 | - name: restart journald 8 | systemd: 9 | name: systemd-journald 10 | state: restarted 11 | daemon_reload: yes 12 | enabled: yes 13 | changed_when: false 14 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for polkadot-validator 3 | - name: firewall setup 4 | import_tasks: firewall.yml 5 | 6 | - name: journald config 7 | import_tasks: journald.yml 8 | 9 | - name: user and group 10 | import_tasks: user.yml 11 | 12 | - name: proxy setup 13 | import_tasks: proxy.yml 14 | 15 | - name: service setup 16 | import_tasks: service.yml 17 | 18 | - name: install binary 19 | import_role: 20 | name: polkadot-update-binary 21 | 22 | - name: session management 23 | import_tasks: session.yml 24 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/tasks/proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install nginx package 3 | apt: 4 | name: nginx 5 | state: present 6 | update_cache: yes 7 | 8 | - name: create nginx config 9 | copy: 10 | src: nginx.conf 11 | dest: /etc/nginx/nginx.conf 12 | 13 | - name: create streams dir 14 | file: 15 | path: /etc/nginx/streams-enabled 16 | state: directory 17 | 18 | - name: remove nginx default site 19 | file: 20 | path: /etc/nginx/sites-enabled/default 21 | state: absent 22 | 23 | - name: create proxy service file 24 | template: 25 | src: proxy.conf.j2 26 | dest: /etc/nginx/streams-enabled/polkadot-proxy.conf 27 | mode: 0600 28 | when: 29 | - enable_reverse_proxy|default(false)|bool or enableReverseProxy|default(false)|bool 30 | 31 | - name: restart nginx service 32 | systemd: 33 | name: nginx 34 | state: restarted 35 | daemon_reload: yes 36 | enabled: yes 37 | changed_when: false 38 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/tasks/service.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: cronjob for restarting polkadot service 3 | cron: 4 | name: "restart polkadot" 5 | minute: "{{ polkadot_restart_minute }}" 6 | hour: "{{ polkadot_restart_hour }}" 7 | day: "{{ polkadot_restart_day }}" 8 | month: "{{ polkadot_restart_month }}" 9 | weekday: "{{ polkadot_restart_weekday }}" 10 | job: "/bin/systemctl restart polkadot.service" 11 | when: "polkadot_restart_enabled|default(false)|bool" 12 | 13 | - name: register public ip 14 | uri: 15 | url: https://api.ipify.org?format=json 16 | register: public_ip 17 | 18 | - name: create polkadot service file 19 | template: 20 | src: polkadot.service.j2 21 | dest: /etc/systemd/system/polkadot.service 22 | owner: root 23 | group: root 24 | mode: 0600 25 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/tasks/session.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if keys already exist 3 | shell: | 4 | set -o pipefail 5 | if [ -d /home/polkadot/.local/share/polkadot/chains/{{ polkadot_network_id }}/keystore ]; then 6 | ls /home/polkadot/.local/share/polkadot/chains/{{ polkadot_network_id }}/keystore -1U | wc -l 7 | else 8 | mkdir -p /home/polkadot/.local/share/polkadot/chains/{{ polkadot_network_id }}/ 9 | chown -R polkadot:polkadot /home/polkadot/.local/share/polkadot 10 | echo 0 11 | fi 12 | args: 13 | executable: /bin/bash 14 | register: keystore_files 15 | changed_when: False 16 | 17 | - name: initialize server keys 18 | import_role: 19 | name: polkadot-rotate-keys 20 | when: keystore_files.stdout == "0" 21 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/tasks/user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create polkadot group 3 | group: 4 | name: polkadot 5 | state: present 6 | 7 | - name: add polkadot user 8 | user: 9 | name: polkadot 10 | group: polkadot 11 | append: yes 12 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/templates/polkadot.service.j2: -------------------------------------------------------------------------------- 1 | {# --- PREPARE VARIABLES #} 2 | {% set node_name = hostvars[inventory_hostname].node_name|default(None) %} 3 | {% set execution = hostvars[inventory_hostname].execution|default(None) %} 4 | {% set wasm_execution = hostvars[inventory_hostname].wasm_execution|default(None) %} 5 | {% set telemetry_url = hostvars[inventory_hostname].telemetry_url|default(None) %} 6 | {% set enable_reverse_proxy = hostvars[inventory_hostname].enable_reverse_proxy|default(None) %} 7 | {% set logging_filter = hostvars[inventory_hostname].logging_filter|default(None) %} 8 | {% set base_path = hostvars[inventory_hostname].base_path|default(None) %} 9 | {% set additional_flags = hostvars[inventory_hostname].additional_flags|default(None) %} 10 | {# ## Legacy/deprecated flags #} 11 | {% if node_name is none %} 12 | {% set node_name = hostvars[inventory_hostname].nodeName|default(None) %} 13 | {% endif %} 14 | {% if telemetry_url is none %} 15 | {% set telemetry_url = hostvars[inventory_hostname].telemetryUrl|default(None) %} 16 | {% endif %} 17 | {% if enable_reverse_proxy is none %} 18 | {% set enable_reverse_proxy = hostvars[inventory_hostname].enableReverseProxy|default(None) %} 19 | {% endif %} 20 | {% if logging_filter is none %} 21 | {% set logging_filter = hostvars[inventory_hostname].loggingFilter|default(None) %} 22 | {% endif %} 23 | {% set additionalCommonFlags = hostvars[inventory_hostname].polkadot_additional_common_flags|default(None) %} 24 | {% set additionalValidatorFlags = hostvars[inventory_hostname].polkadot_additional_validator_flags|default(None) %} 25 | {# --- #} 26 | [Unit] 27 | Description=Polkadot Node 28 | 29 | [Service] 30 | User=polkadot 31 | Group=polkadot 32 | ExecStart=/usr/local/bin/polkadot \ 33 | {% if execution is not none and execution|length %} 34 | --execution {{ execution }} \ 35 | {% endif %} 36 | {% if node_name is not none and node_name|length %} 37 | --name {{ node_name }} \ 38 | {% else %} 39 | --name {{ project|default('project') }}-sv-validator-{{ groups['validator'].index(inventory_hostname) }} \ 40 | {% endif %} 41 | {% if wasm_execution is not none and wasm_execution|length %} 42 | --wasm-execution {{ wasm_execution }} \ 43 | {% else %} 44 | --wasm-execution Compiled \ 45 | {% endif %} 46 | {% if telemetry_url is not none and telemetry_url|length %} 47 | {% set urls = telemetry_url.split(',') %} 48 | {% for url in urls %} 49 | --telemetry-url '{{ url }} 1' \ 50 | {% endfor %} 51 | {% else %} 52 | --no-telemetry \ 53 | {% endif %} 54 | {% if enable_reverse_proxy is not none and enable_reverse_proxy == 'true' %} 55 | --public-addr=/ip4/{{ hostvars[inventory_hostname].public_ip.json.ip }}/tcp/{{ proxy_port }} \ 56 | {% endif %} 57 | {% if logging_filter is not none and logging_filter|length %} 58 | -l{{ logging_filter }} \ 59 | {% endif %} 60 | {% if base_path is not none and base_path|length %} 61 | --base-path '{{ base_path }}' \ 62 | {% endif %} 63 | {% if additional_flags is not none and additional_flags|length %} 64 | {{ additional_flags }} \ 65 | {% endif %} 66 | {# --- DEPRECATED --- #} 67 | {% if additionalCommonFlags is not none and additionalCommonFlags|length %} 68 | {{ additionalCommonFlags }} \ 69 | {% endif %} 70 | {% if additionalValidatorFlags is not none and additionalValidatorFlags|length %} 71 | {{ additionalValidatorFlags }} \ 72 | {% endif %} 73 | {# --- #} 74 | --validator \ 75 | --chain={{ chain }} \ 76 | --listen-addr=/ip4/127.0.0.1/tcp/{{ p2p_port }} \ 77 | --rpc-methods=Unsafe 78 | 79 | Restart=always 80 | RestartSec=60 81 | 82 | [Install] 83 | WantedBy=multi-user.target 84 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/templates/proxy.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen 0.0.0.0:{{ proxy_port }}; 3 | proxy_pass localhost:{{ p2p_port }}; 4 | } 5 | -------------------------------------------------------------------------------- /ansible/roles/polkadot-validator/templates/session.yaml.j2: -------------------------------------------------------------------------------- 1 | {% for item in session.results %} 2 | {{ item.stdout }} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /ansible/roles/show-multiaddr/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: save peerId 2 | uri: 3 | url: http://localhost:9933 4 | method: "POST" 5 | body_format: json 6 | body: | 7 | { "jsonrpc":"2.0", "method":"system_localPeerId", "params":[], "id":1 } 8 | register: peerId 9 | until: peerId.status == 200 10 | retries: 10 11 | delay: 5 12 | 13 | - name: set peer id as fact 14 | set_fact: 15 | p2p_peer_id: "{{ peerId.json.result }}" 16 | 17 | - name: result 18 | debug: 19 | msg: "/ip4/{{ hostvars[inventory_hostname].vpnpeer_address }}/tcp/30333/p2p/{{ p2p_peer_id }}" 20 | -------------------------------------------------------------------------------- /ansible/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function handle_error() { 4 | if (( $? )) ; then 5 | echo -e "[\e[31mERROR\e[39m]" 6 | echo -e >&2 "CAUSE:\n $1" 7 | exit 1 8 | else 9 | echo -e "[\e[32mOK\e[39m]" 10 | fi 11 | } 12 | 13 | ANSIBLE_FILES_DIR="$(dirname "$0")" 14 | INVENTORY="${1:-${ANSIBLE_FILES_DIR}/inventory.yml}" 15 | 16 | echo -n ">> Checking inventory file (${INVENTORY}) exists and is readable... " 17 | [ -r "${INVENTORY}" ]; handle_error "Please check https://github.com/w3f/polkadot-secure-validator/blob/master/GUIDE_ANSIBLE.md#inventory" 18 | 19 | echo -n ">> Pulling upstream changes... " 20 | out=$((git pull origin master) 2>&1) 21 | handle_error "$out" 22 | 23 | echo -n ">> Testing Ansible availability... " 24 | out=$((ansible --version) 2>&1) 25 | handle_error "$out" 26 | 27 | echo -n ">> Finding validator hosts... " 28 | out=$((ansible validator -i ${INVENTORY} --list-hosts) 2>/dev/null) 29 | if [[ $out == *"hosts (0)"* ]]; then 30 | out="No hosts found, exiting..." 31 | (exit 1) 32 | handle_error "$out" 33 | else 34 | echo -e "[\e[32mOK\e[39m]" 35 | echo "$out" 36 | fi 37 | 38 | echo -n ">> Testing connectivity to hosts... " 39 | out=$((ansible all -i ${INVENTORY} -m ping) 2>&1) 40 | handle_error "$out" 41 | 42 | echo "Sudo password for remote servers:" 43 | read -s SUDO_PW 44 | 45 | echo -n ">> Testing sudo access... " 46 | out=$((ansible all -i ${INVENTORY} -m ping --become --extra-vars "ansible_become_pass='$SUDO_PW'") 2>&1) 47 | handle_error "$out" 48 | 49 | echo ">> Executing Ansible Playbook..." 50 | 51 | ansible-playbook -i ${INVENTORY} ${ANSIBLE_FILES_DIR}/main.yml --become --extra-vars "ansible_become_pass='$SUDO_PW'" 52 | 53 | echo ">> Done!" 54 | -------------------------------------------------------------------------------- /config/main.sample.json: -------------------------------------------------------------------------------- 1 | { 2 | "project": "w3f", 3 | "polkadotBinary": { 4 | "url": "https://github.com/paritytech/polkadot/releases/download/v0.8.29/polkadot", 5 | "checksum": "sha256:0b27d0cb99ca60c08c78102a9d2f513d89dfec8dbd6fdeba8b952a420cdc9fd2" 6 | }, 7 | "nodeExporter": { 8 | "enabled": true, 9 | "binary": { 10 | "url": "https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz", 11 | "checksum": "sha256:3369b76cd2b0ba678b6d618deab320e565c3d93ccb5c2a0d5db51a53857768ae" 12 | } 13 | }, 14 | "polkadotRestart": { 15 | "enabled": true, 16 | "minute": "50", 17 | "hour": "4,12,20" 18 | }, 19 | "chain": "kusama", 20 | "polkadotNetworkId": "ksmcc3", 21 | "state": { 22 | "project": "my_gcp_state_project" 23 | }, 24 | "validators": { 25 | "telemetryUrl": "wss://my.private.telemetry.endpoint", 26 | "loggingFilter": "sync=trace,afg=trace,babe=debug", 27 | "nodes": [ 28 | { 29 | "provider": "packet", 30 | "machineType": "c1.small.x86", 31 | "count": 1, 32 | "location": "ewr1", 33 | "projectId": "my_packet_project", 34 | "nodeName": "myNodeName", 35 | "sshUser": "myName", 36 | "image": "ubuntu_20_04" 37 | } 38 | ] 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /config/main.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "project": "", 3 | "polkadotBinary": { 4 | "url": "", 5 | "checksum": "sha256:" 6 | }, 7 | "nodeExporter": { 8 | "enabled": true, 9 | "binary": { 10 | "url": "https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz", 11 | "checksum": "sha256:b2503fd932f85f4e5baf161268854bf5d22001869b84f00fd2d1f57b51b72424" 12 | } 13 | }, 14 | "chain": "", 15 | "polkadotNetworkId": "", 16 | "state": { 17 | "project": "" 18 | }, 19 | "validators": { 20 | "telemetryUrl": "", 21 | "loggingFilter": "sync=trace,afg=trace,babe=debug", 22 | "nodes": [ 23 | { 24 | "provider": "packet", 25 | "machineType": "c1.small.x86", 26 | "count": 1, 27 | "location": "", 28 | "projectId": "", 29 | "sshUser": "root" 30 | } 31 | ] 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /config/main.withBackup.sample.json: -------------------------------------------------------------------------------- 1 | { 2 | "project": "w3f", 3 | "polkadotBinary": { 4 | "url": "https://github.com/paritytech/polkadot/releases/download/v0.8.29/polkadot", 5 | "checksum": "sha256:0b27d0cb99ca60c08c78102a9d2f513d89dfec8dbd6fdeba8b952a420cdc9fd2" 6 | }, 7 | "nodeExporter": { 8 | "enabled": true, 9 | "binary": { 10 | "url": "https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz", 11 | "checksum": "sha256:3369b76cd2b0ba678b6d618deab320e565c3d93ccb5c2a0d5db51a53857768ae" 12 | } 13 | }, 14 | "polkadotRestart": { 15 | "enabled": true, 16 | "minute": "50", 17 | "hour": "4,12,20" 18 | }, 19 | "chain": "kusama", 20 | "polkadotNetworkId": "ksmcc3", 21 | "state": { 22 | "project": "my_gcp_state_project" 23 | }, 24 | "validators": { 25 | "additionalFlags": "--unsafe-pruning --pruning 1000", 26 | "dbSnapshot": { 27 | "url": "https://ksm-rocksdb.polkashots.io/kusama-6658753.RocksDb.7z", 28 | "checksum": "sha256:4f61a99e4b00acb335aff52f2383880d53b30617c0ae67ac47c611e7bf6971ff" 29 | }, 30 | "telemetryUrl": "wss://my.private.telemetry.endpoint", 31 | "loggingFilter": "sync=trace,afg=trace,babe=debug", 32 | "nodes": [ 33 | { 34 | "provider": "packet", 35 | "machineType": "c1.small.x86", 36 | "count": 1, 37 | "location": "ewr1", 38 | "projectId": "my_packet_project", 39 | "nodeName": "myNodeName", 40 | "sshUser": "myName", 41 | "image": "ubuntu_20_04" 42 | } 43 | ] 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "polkadot-secure-validator", 3 | "version": "3.4.2", 4 | "main": "src/index.js", 5 | "repository": "https://github.com/w3f/polkadot-secure-validator", 6 | "author": "W3F Infrastructure Team ", 7 | "license": "GPL-3.0", 8 | "scripts": { 9 | "test": "mocha --reporter spec --recursive", 10 | "sync": "node . sync", 11 | "clean": "node . clean", 12 | "plan": "node . plan", 13 | "restore-db": "node . restore-db", 14 | "rotate-keys": "node . rotate-keys", 15 | "update-binary": "node . update-binary", 16 | "lint": "eslint .", 17 | "pretest": "yarn lint" 18 | }, 19 | "bin": { 20 | "polkadot-secure-validator": "src/index.js" 21 | }, 22 | "files": [ 23 | "/ansible", 24 | "/src", 25 | "/terraform", 26 | "/tpl" 27 | ], 28 | "dependencies": { 29 | "chalk": "^2.4.2", 30 | "commander": "^2.20.0", 31 | "dotenv": "^8.2.0", 32 | "fs-extra": "^8.1.0", 33 | "handlebars": "^4.7.6", 34 | "node-forge": "^0.10.0", 35 | "ospath": "^1.2.2" 36 | }, 37 | "devDependencies": { 38 | "chai": "^4.2.0", 39 | "eslint": "^6.6.0", 40 | "mocha": "^6.2.3", 41 | "tmp": "^0.1.0" 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /scripts/binaryUpgradeTest.json: -------------------------------------------------------------------------------- 1 | { 2 | "project": "integration-test", 3 | "polkadotBinary": { 4 | "url": "https://github.com/w3f/polkadot/releases/download/v0.8.25/polkadot", 5 | "checksum": "sha256:500a355ca6e40333450e8b45a6d7146bd281b0445e4a45d9831836781d67e836" 6 | }, 7 | "nodeExporter": { 8 | "enabled": true, 9 | "binary": { 10 | "url": "https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz", 11 | "checksum": "sha256:b2503fd932f85f4e5baf161268854bf5d22001869b84f00fd2d1f57b51b72424" 12 | } 13 | }, 14 | "polkadotNetworkId": "ksmcc3", 15 | "state": { 16 | "project": "development-252112" 17 | }, 18 | "validators": { 19 | "nodes": [ 20 | { 21 | "provider": "gcp", 22 | "machineType": "n1-standard-1", 23 | "count": 1, 24 | "location": "us-east1", 25 | "zone": "us-east1-b", 26 | "projectId": "development-252112", 27 | "sshUser": "w3fadmin", 28 | "image": "2004" 29 | } 30 | ] 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | set -o pipefail 4 | 5 | eval $(ssh-agent) 6 | for key in $SSH_ID_RSA_PUBLIC $SSH_ID_RSA_VALIDATOR; do 7 | chmod 600 "$key" 8 | ssh-add "$key" 9 | done 10 | 11 | yarn 12 | 13 | if [ ! -z "${POLKADOT_SECURE_VALIDATOR_CONFIG_FILE}" ]; then 14 | yarn sync -c "${POLKADOT_SECURE_VALIDATOR_CONFIG_FILE}" 15 | else 16 | yarn sync 17 | fi 18 | -------------------------------------------------------------------------------- /scripts/integrationTest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Provision 5 | eval `ssh-agent -s` 6 | ssh-add -D 7 | 8 | teardown(){ 9 | # Destroy 10 | yarn clean -c scripts/test.json 11 | } 12 | 13 | trap teardown EXIT 14 | 15 | export SSH_ID_RSA_PUBLIC=$(pwd)/public_keyfile 16 | export SSH_ID_RSA_VALIDATOR=$(pwd)/validator_keyfile 17 | export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/credentials.json 18 | 19 | if [ -f "$SSH_ID_RSA_PUBLIC" ]; then 20 | echo "$SSH_ID_RSA_PUBLIC exist" 21 | else 22 | echo "$SSH_ID_RSA_PUBLIC does not exist" 23 | ssh-keygen -f public_keyfile -P "" -C "SSH_ID_RSA_PUBLIC" -m PEM 24 | fi 25 | ssh-add public_keyfile 26 | 27 | if [ -f "$SSH_ID_RSA_VALIDATOR" ]; then 28 | echo "$SSH_ID_RSA_VALIDATOR exist" 29 | else 30 | echo "$SSH_ID_RSA_VALIDATOR does not exist" 31 | ssh-keygen -f validator_keyfile -P "" -C "SSH_ID_RSA_VALIDATOR" -m PEM 32 | fi 33 | ssh-add validator_keyfile 34 | ssh-add -L 35 | 36 | # Install 37 | yarn plan -c scripts/test.json 38 | yarn sync -c scripts/test.json 39 | 40 | # Upgrade 41 | yarn update-binary -c scripts/binaryUpgradeTest.json 42 | -------------------------------------------------------------------------------- /scripts/patch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | 5 | echo patching... 6 | result=$(cat config/main.json | jq '.polkadotBinary.url = "'${url}'"') && echo "${result}" > config/main.sample.json 7 | result=$(cat config/main.json | jq '.polkadotBinary.checksum = "'${checksum}'"') && echo "${result}" > config/main.sample.json 8 | 9 | result=$(cat config/main.json | jq '.polkadotBinary.url = "'${url}'"') && echo "${result}" > scripts/binaryUpgradeTest.json 10 | result=$(cat config/main.json | jq '.polkadotBinary.checksum = "'${checksum}'"') && echo "${result}" > scripts/binaryUpgradeTest.json 11 | -------------------------------------------------------------------------------- /scripts/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "project": "integration-test", 3 | "polkadotBinary": { 4 | "url": "https://github.com/w3f/polkadot/releases/download/v0.8.22/polkadot", 5 | "checksum": "sha256:7bfe10a0e46385dfb488dcc05ecffbb3075dcbd1d469e44b67a0f5d870185a05" 6 | }, 7 | "nodeExporter": { 8 | "enabled": true, 9 | "binary": { 10 | "url": "https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz", 11 | "checksum": "sha256:b2503fd932f85f4e5baf161268854bf5d22001869b84f00fd2d1f57b51b72424" 12 | } 13 | }, 14 | "polkadotNetworkId": "ksmcc3", 15 | "state": { 16 | "project": "development-252112" 17 | }, 18 | "validators": { 19 | "nodes": [ 20 | { 21 | "provider": "gcp", 22 | "machineType": "n1-standard-1", 23 | "count": 1, 24 | "location": "us-east1", 25 | "zone": "us-east1-b", 26 | "projectId": "development-252112", 27 | "sshUser": "w3fadmin", 28 | "image": "2004" 29 | } 30 | ] 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const path = require('path'); 4 | const process = require('process'); 5 | const program = require('commander'); 6 | require('dotenv').config({path: path.resolve(process.cwd(), '.env')}); 7 | require('dotenv').config({path: path.resolve(process.cwd(), 'config/.env')}); 8 | 9 | const clean = require('./lib/actions/clean'); 10 | const sync = require('./lib/actions/sync'); 11 | const plan = require('./lib/actions/plan'); 12 | const version = require('./lib/version'); 13 | const updateBinary = require('./lib/actions/updateBinary'); 14 | const restoreDB = require('./lib/actions/restoreDB'); 15 | const rotateKeys = require('./lib/actions/rotateKeys'); 16 | 17 | 18 | program 19 | .version(version.show()); 20 | 21 | program 22 | .command('sync') 23 | .description('Synchronizes the infrastructure.') 24 | .option('-c, --config [path]', 'Path to config file.', './config/main.json') 25 | .action(sync.do); 26 | 27 | program 28 | .command('clean') 29 | .description('Removes all the resources.') 30 | .option('-c, --config [path]', 'Path to config file.', './config/main.json') 31 | .action(clean.do); 32 | 33 | program 34 | .command('plan') 35 | .description('Shows changes in the infrastructure layer that would be performed by sync.') 36 | .option('-c, --config [path]', 'Path to config file.', './config/main.json') 37 | .action(plan.do); 38 | 39 | program 40 | .command('update-binary') 41 | .description('Update the nodes binary.') 42 | .option('-c, --config [path]', 'Path to config file.', './config/main.json') 43 | .action(updateBinary.do); 44 | 45 | program 46 | .command('restore-db') 47 | .description('Restore the nodes DB.') 48 | .option('-c, --config [path]', 'Path to config file.', './config/main.json') 49 | .action(restoreDB.do); 50 | 51 | program 52 | .command('rotate-keys') 53 | .description('Rotate the nodes keys.') 54 | .option('-c, --config [path]', 'Path to config file.', './config/main.json') 55 | .action(rotateKeys.do); 56 | 57 | program.allowUnknownOption(false); 58 | 59 | const parsed = program.parse(process.argv); 60 | if (! parsed || !(parsed.args && parsed.args.length > 0 && (typeof (parsed.args[0] === 'object')))) { 61 | program.outputHelp(); 62 | } 63 | -------------------------------------------------------------------------------- /src/lib/actions/clean.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const process = require('process'); 3 | 4 | const config = require('../config.js'); 5 | const { Platform } = require('../platform.js'); 6 | 7 | 8 | module.exports = { 9 | do: async (cmd) => { 10 | const cfg = config.read(cmd.config); 11 | 12 | console.log(chalk.yellow('Cleaning platform...')); 13 | const platform = new Platform(cfg); 14 | try { 15 | await platform.clean(); 16 | } catch (e) { 17 | console.log(chalk.red(`Could not clean platform: ${e.message}`)); 18 | process.exit(-1); 19 | } 20 | console.log(chalk.green('Done')); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/lib/actions/plan.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const process = require('process'); 3 | 4 | const config = require('../config.js'); 5 | const { Platform } = require('../platform.js'); 6 | 7 | 8 | module.exports = { 9 | do: async (cmd) => { 10 | const cfg = config.read(cmd.config); 11 | 12 | console.log(chalk.yellow('Calculating plan...')); 13 | const platform = new Platform(cfg); 14 | 15 | try { 16 | await platform.plan(); 17 | } catch (e) { 18 | console.log(chalk.red(`Could not calculate plan: ${e.message}`)); 19 | process.exit(-1); 20 | } 21 | console.log(chalk.green('Done')); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/lib/actions/restoreDB.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const process = require('process'); 3 | 4 | const config = require('../config.js'); 5 | const { Platform } = require('../platform.js'); 6 | const { Application } = require('../application.js'); 7 | 8 | 9 | module.exports = { 10 | do: async (cmd) => { 11 | const cfg = config.read(cmd.config); 12 | 13 | console.log(chalk.yellow('Restoring Database...')); 14 | const platform = new Platform(cfg); 15 | let platformResult; 16 | try { 17 | platformResult = await platform.output(); 18 | } catch (e) { 19 | console.log(chalk.red(`Could not get output from platform: ${e.message}`)); 20 | process.exit(-1); 21 | } 22 | console.log(chalk.green('Done')); 23 | 24 | console.log(chalk.yellow('Restoring application Database...')); 25 | const app = new Application(cfg, platformResult); 26 | try { 27 | await app.restoreDB(); 28 | } catch (e) { 29 | console.log(chalk.red(`Could not restore application database: ${e.message}`)); 30 | process.exit(-1); 31 | } 32 | console.log(chalk.green('Done')); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/lib/actions/rotateKeys.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const process = require('process'); 3 | 4 | const config = require('../config.js'); 5 | const { Platform } = require('../platform.js'); 6 | const { Application } = require('../application.js'); 7 | 8 | 9 | module.exports = { 10 | do: async (cmd) => { 11 | const cfg = config.read(cmd.config); 12 | 13 | console.log(chalk.yellow('Rotating Keys...')); 14 | const platform = new Platform(cfg); 15 | let platformResult; 16 | try { 17 | platformResult = await platform.output(); 18 | } catch (e) { 19 | console.log(chalk.red(`Could not get output from platform: ${e.message}`)); 20 | process.exit(-1); 21 | } 22 | console.log(chalk.green('Done')); 23 | 24 | console.log(chalk.yellow('Rotating application Keys...')); 25 | const app = new Application(cfg, platformResult); 26 | try { 27 | await app.rotateKeys(); 28 | } catch (e) { 29 | console.log(chalk.red(`Could not rotate application Keys: ${e.message}`)); 30 | process.exit(-1); 31 | } 32 | console.log(chalk.green('Done')); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/lib/actions/sync.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const process = require('process'); 3 | 4 | const config = require('../config.js'); 5 | const { Platform } = require('../platform.js'); 6 | const { Application } = require('../application.js'); 7 | 8 | 9 | module.exports = { 10 | do: async (cmd) => { 11 | const cfg = config.read(cmd.config); 12 | 13 | console.log(chalk.yellow('Syncing platform...')); 14 | const platform = new Platform(cfg); 15 | let platformResult; 16 | try { 17 | platformResult = await platform.sync(); 18 | } catch (e) { 19 | console.log(chalk.red(`Could not sync platform: ${e.message}`)); 20 | process.exit(-1); 21 | } 22 | console.log(chalk.green('Done')); 23 | 24 | console.log(chalk.yellow('Syncing application...')); 25 | const app = new Application(cfg, platformResult); 26 | try { 27 | await app.sync(); 28 | } catch (e) { 29 | console.log(chalk.red(`Could not sync application: ${e.message}`)); 30 | process.exit(-1); 31 | } 32 | console.log(chalk.green('Done')); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/lib/actions/updateBinary.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const process = require('process'); 3 | 4 | const config = require('../config.js'); 5 | const { Platform } = require('../platform.js'); 6 | const { Application } = require('../application.js'); 7 | 8 | 9 | module.exports = { 10 | do: async (cmd) => { 11 | const cfg = config.read(cmd.config); 12 | 13 | console.log(chalk.yellow('Updating binary...')); 14 | const platform = new Platform(cfg); 15 | let platformResult; 16 | try { 17 | platformResult = await platform.output(); 18 | } catch (e) { 19 | console.log(chalk.red(`Could not get output from platform: ${e.message}`)); 20 | process.exit(-1); 21 | } 22 | console.log(chalk.green('Done')); 23 | 24 | console.log(chalk.yellow('Updating application binary...')); 25 | const app = new Application(cfg, platformResult); 26 | try { 27 | await app.updateBinary(); 28 | } catch (e) { 29 | console.log(chalk.red(`Could not update application binary: ${e.message}`)); 30 | process.exit(-1); 31 | } 32 | console.log(chalk.green('Done')); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/lib/application.js: -------------------------------------------------------------------------------- 1 | const { Ansible } = require('./clients/ansible'); 2 | 3 | 4 | class Application { 5 | constructor(cfg, platformResult={}) { 6 | const ansibleCfg = JSON.parse(JSON.stringify(cfg)); 7 | 8 | for (let counter = 0; counter < ansibleCfg.validators.nodes.length; counter++) { 9 | ansibleCfg.validators.nodes[counter].ipAddresses = platformResult.validatorIpAddresses[counter]; 10 | } 11 | 12 | if(ansibleCfg.publicNodes) { 13 | for (let counter = 0; counter < ansibleCfg.publicNodes.nodes.length; counter++) { 14 | ansibleCfg.publicNodes.nodes[counter].ipAddresses = platformResult.publicNodesIpAddresses[counter]; 15 | } 16 | } 17 | 18 | this.ansible = new Ansible(ansibleCfg); 19 | } 20 | 21 | async sync() { 22 | return this.ansible.runCommonPlaybook("main.yml") 23 | } 24 | 25 | async updateBinary() { 26 | return this.ansible.runCommonPlaybook("main_update_binary.yml") 27 | } 28 | 29 | async restoreDB() { 30 | return this.ansible.runCommonPlaybook("main_restore_db.yml") 31 | } 32 | 33 | async rotateKeys() { 34 | return this.ansible.runCommonPlaybook("main_rotate_keys.yml") 35 | } 36 | 37 | async clean() { 38 | return this.ansible.clean(); 39 | } 40 | } 41 | 42 | module.exports = { 43 | Application 44 | } 45 | -------------------------------------------------------------------------------- /src/lib/async.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | forEach: async (array, callback) => { 3 | for (let index = 0; index < array.length; index++) { 4 | await callback(array[index], index, array); 5 | } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/lib/clients/ansible.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | 3 | const cmd = require('../cmd'); 4 | const { Project } = require('../project'); 5 | const tpl = require('../tpl'); 6 | const { nginxUsername, nginxPassword } = require('../env'); 7 | 8 | const inventoryFileName = 'inventory' 9 | 10 | 11 | class Ansible { 12 | constructor(cfg) { 13 | this.config = JSON.parse(JSON.stringify(cfg)); 14 | 15 | this.ansiblePath = path.join(__dirname, '..', '..', '..', 'ansible'); 16 | this.options = { 17 | cwd: this.ansiblePath, 18 | verbose: true 19 | }; 20 | } 21 | 22 | async runCommonPlaybook(playbookName) { 23 | const inventoryPath = this._writeInventory(); 24 | return this._cmd(`${playbookName} -f 30 -i "${inventoryPath}"`); 25 | } 26 | 27 | async clean() { 28 | 29 | } 30 | 31 | async _cmd(command, options = {}) { 32 | const actualOptions = Object.assign({}, this.options, options); 33 | return cmd.exec(`ansible-playbook ${command}`, actualOptions); 34 | } 35 | 36 | _writeInventory() { 37 | const origin = path.resolve(__dirname, '..', '..', '..', 'tpl', 'ansible_inventory'); 38 | const project = new Project(this.config); 39 | const buildDir = path.join(project.path(), 'ansible'); 40 | const target = path.join(buildDir, inventoryFileName); 41 | 42 | const validators = this._genTplNodes(this.config.validators); 43 | const validatorTelemetryUrl = this.config.validators.telemetryUrl; 44 | const validatorLoggingFilter = this.config.validators.loggingFilter; 45 | const polkadotAdditionalValidatorFlags = this.config.validators.additionalFlags; 46 | 47 | let publicNodes = []; 48 | let publicTelemetryUrl = ''; 49 | let publicLoggingFilter=''; 50 | let polkadotAdditionalPublicFlags = ''; 51 | if (this.config.publicNodes) { 52 | publicNodes = this._genTplNodes(this.config.publicNodes, validators.length); 53 | publicTelemetryUrl = this.config.publicNodes.telemetryUrl; 54 | publicLoggingFilter = this.config.publicNodes.loggingFilter; 55 | polkadotAdditionalPublicFlags = this.config.publicNodes.additionalFlags; 56 | } 57 | 58 | const data = { 59 | project: this.config.project, 60 | 61 | polkadotBinaryUrl: this.config.polkadotBinary.url, 62 | polkadotBinaryChecksum: this.config.polkadotBinary.checksum, 63 | chain: this.config.chain || 'kusama', 64 | polkadotNetworkId: this.config.polkadotNetworkId || 'ksmcc2', 65 | 66 | validators, 67 | publicNodes, 68 | 69 | validatorTelemetryUrl, 70 | publicTelemetryUrl, 71 | 72 | validatorLoggingFilter, 73 | publicLoggingFilter, 74 | 75 | buildDir, 76 | 77 | polkadotAdditionalCommonFlags: this.config.additionalFlags, 78 | polkadotAdditionalValidatorFlags, 79 | polkadotAdditionalPublicFlags, 80 | 81 | nginxUsername: nginxUsername, 82 | nginxPassword: nginxPassword 83 | }; 84 | 85 | if (this.config.nodeExporter?.enabled) { 86 | data.nodeExporterEnabled = true; 87 | data.nodeExporterBinaryUrl = this.config.nodeExporter.binary.url; 88 | data.nodeExporterBinaryChecksum = this.config.nodeExporter.binary.checksum; 89 | } else { 90 | data.nodeExporterEnabled = false; 91 | } 92 | 93 | if (this.config.polkadotRestart?.enabled) { 94 | data.polkadotRestartEnabled = true; 95 | data.polkadotRestartMinute = this.config.polkadotRestart.minute || '*'; 96 | data.polkadotRestartHour = this.config.polkadotRestart.hour || '*'; 97 | data.polkadotRestartDay = this.config.polkadotRestart.day || '*'; 98 | data.polkadotRestartMonth = this.config.polkadotRestart.month || '*'; 99 | data.polkadotRestartWeekDay = this.config.polkadotRestart.weekDay || '*'; 100 | } else { 101 | data.polkadotRestartEnabled = false; 102 | } 103 | 104 | if(this.config.validators.dbSnapshot?.url != undefined && this.config.validators.dbSnapshot?.checksum != undefined){ 105 | data.dbSnapshotUrl = this.config.validators.dbSnapshot.url; 106 | data.dbSnapshotChecksum = this.config.validators.dbSnapshot.checksum; 107 | } 108 | 109 | tpl.create(origin, target, data); 110 | 111 | return target; 112 | } 113 | 114 | _genTplNodes(nodeSet, offset=0) { 115 | const output = []; 116 | const vpnAddressBase = '10.0.0'; 117 | let counter = offset; 118 | 119 | nodeSet.nodes.forEach((node) => { 120 | node.ipAddresses.forEach((ipAddress) => { 121 | counter++; 122 | const item = { 123 | ipAddress, 124 | sshUser: node.sshUser, 125 | vpnAddress: `${vpnAddressBase}.${counter}`, 126 | }; 127 | if(node.nodeName){ 128 | item.nodeName=node.nodeName 129 | } 130 | output.push(item); 131 | }); 132 | }); 133 | return output; 134 | } 135 | } 136 | 137 | module.exports = { 138 | Ansible 139 | } 140 | -------------------------------------------------------------------------------- /src/lib/clients/terraform.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const path = require('path'); 3 | 4 | const cmd = require('../cmd'); 5 | const { Project } = require('../project'); 6 | const ssh = require('../ssh'); 7 | const tpl = require('../tpl'); 8 | 9 | 10 | class Terraform { 11 | constructor(cfg) { 12 | this.config = JSON.parse(JSON.stringify(cfg)); 13 | 14 | const project = new Project(cfg); 15 | this.terraformOriginPath = path.join(__dirname, '..', '..', '..', 'terraform'); 16 | this.terraformFilesPath = path.join(project.path(), 'terraform'); 17 | 18 | this.options = { 19 | verbose: true 20 | }; 21 | } 22 | 23 | async initNodes() { 24 | await this._initNodes('validator',this.config.validators.nodes) 25 | this.config.publicNodes && await this._initNodes('publicNode',this.config.publicNodes.nodes) 26 | } 27 | 28 | async sync(method='apply') { 29 | this._initializeTerraform(); 30 | try { 31 | await this._initState(); 32 | } catch(e) { 33 | console.log(`Allowed error creating state backend: ${e.message}`); 34 | } 35 | 36 | const sshKeys = ssh.keys(); 37 | 38 | let validatorSyncPromises = []; 39 | try { 40 | validatorSyncPromises = await this._create('validator', sshKeys.validatorPublicKey, this.config.validators.nodes, method); 41 | } catch(e) { 42 | console.log(`Could not get validator sync promises: ${e.message}`); 43 | } 44 | 45 | let publicNodeSyncPromises = []; 46 | if(this.config.publicNodes){ 47 | try { 48 | publicNodeSyncPromises = await this._create('publicNode', sshKeys.publicNodePublicKey, this.config.publicNodes.nodes, method); 49 | } catch(e) { 50 | console.log(`Could not get publicNodes sync promises: ${e.message}`); 51 | } 52 | } 53 | const syncPromises = validatorSyncPromises.concat(publicNodeSyncPromises) 54 | 55 | return Promise.all(syncPromises); 56 | } 57 | 58 | async clean() { 59 | this._initializeTerraform(); 60 | let validatorCleanPromises = []; 61 | try { 62 | validatorCleanPromises = await this._destroy('validator',this.config.validators.nodes); 63 | } catch(e) { 64 | console.log(`Could not get validator clean promises: ${e.message}`); 65 | } 66 | 67 | let publicNodesCleanPromises = []; 68 | if(this.config.publicNodes){ 69 | try { 70 | publicNodesCleanPromises = await this._destroy('publicNode', this.config.publicNodes.nodes); 71 | } catch(e) { 72 | console.log(`Could not get publicNodes clean promises: ${e.message}`); 73 | } 74 | } 75 | const cleanPromises = validatorCleanPromises.concat(publicNodesCleanPromises); 76 | 77 | return Promise.all(cleanPromises); 78 | } 79 | 80 | nodeOutput(type, counter, outputField) { 81 | const cwd = this._terraformNodeDirPath(type, counter); 82 | const options = { cwd }; 83 | 84 | return this._cmd(`output -json ${outputField}`, options); 85 | } 86 | 87 | async _create(type, sshKey, nodes, method='apply') { 88 | const createPromises = []; 89 | 90 | for (let counter = 0; counter < nodes.length; counter++) { 91 | const cwd = this._terraformNodeDirPath(type, counter); 92 | const backendConfig = this._backendConfig(type, counter); 93 | const nodeName = this._nodeName(type, counter); 94 | createPromises.push(new Promise(async (resolve) => { 95 | const options = { cwd }; 96 | await this._initCmd(backendConfig,options); 97 | this._createVarsFile(cwd, nodes[counter], sshKey, nodeName); 98 | 99 | let cmd = method; 100 | if (method === 'apply'){ 101 | cmd += ' -auto-approve'; 102 | } 103 | 104 | await this._cmd(cmd, options); 105 | 106 | resolve(true); 107 | })); 108 | } 109 | return createPromises; 110 | } 111 | 112 | async _destroy(type, nodes) { 113 | const destroyPromises = []; 114 | 115 | for (let counter = 0; counter < nodes.length; counter++) { 116 | const cwd = this._terraformNodeDirPath(type, counter) 117 | const backendConfig = this._backendConfig(type, counter); 118 | destroyPromises.push(new Promise(async (resolve) => { 119 | const options = { cwd }; 120 | await this._initCmd(backendConfig,options); 121 | await this._cmd('destroy -lock=false -auto-approve', options); 122 | 123 | resolve(true); 124 | })); 125 | } 126 | return destroyPromises; 127 | } 128 | 129 | async _cmd(command, options = {}) { 130 | const actualOptions = Object.assign({}, this.options, options); 131 | return cmd.exec(`terraform ${command}`, actualOptions); 132 | } 133 | 134 | async _initCmd(backendConfig, options) { 135 | await this._cmd(`init -var state_project=${this.config.state.project} -backend-config=bucket=${backendConfig.bucket} -backend-config=prefix=${backendConfig.prefix}`, options); 136 | } 137 | 138 | async _initState(){ 139 | const cwd = this._terraformNodeDirPath('remote-state'); 140 | const options = { cwd }; 141 | 142 | await this._cmd(`init -var state_project=${this.config.state.project}`, options); 143 | const bucketName = this._bucketName() 144 | return this._cmd(`apply -var state_project=${this.config.state.project} -var name=${bucketName} -auto-approve`, options); 145 | } 146 | 147 | _createVarsFile(cwd, node, sshKey, nodeName) { 148 | const data = { 149 | stateProject: this.config.state.project, 150 | publicKey: sshKey, 151 | sshUser: node.sshUser, 152 | machineType: node.machineType, 153 | location: node.location, 154 | zone: node.zone, 155 | projectId: node.projectId, 156 | nodeCount: node.count, 157 | name: nodeName 158 | } 159 | 160 | if(node.image) { 161 | data.image = node.image; 162 | } 163 | 164 | const source = path.join(__dirname, '..', '..', '..', 'tpl', 'tfvars'); 165 | const target = path.join(cwd, 'terraform.tfvars'); 166 | 167 | tpl.create(source, target, data); 168 | } 169 | 170 | _initializeTerraform() { 171 | fs.removeSync(this.terraformFilesPath); 172 | fs.ensureDirSync(this.terraformFilesPath); 173 | 174 | this._copyTerraformFiles('remote-state', 0, 'remote-state'); 175 | for (let counter = 0; counter < this.config.validators.nodes.length; counter++) { 176 | this._copyTerraformFiles('validator', counter, this.config.validators.nodes[counter].provider); 177 | } 178 | 179 | if (this.config.publicNodes){ 180 | for (let counter = 0; counter < this.config.publicNodes.nodes.length; counter++) { 181 | this._copyTerraformFiles('publicNode', counter, this.config.publicNodes.nodes[counter].provider); 182 | } 183 | } 184 | } 185 | 186 | async _initNodes(type,nodes,){ 187 | for (let counter = 0; counter < nodes.length; counter++) { 188 | const cwd = this._terraformNodeDirPath(type, counter); 189 | const backendConfig = this._backendConfig(type, counter); 190 | const options = { cwd }; 191 | await this._initCmd(backendConfig,options); 192 | } 193 | } 194 | 195 | _copyTerraformFiles(type, counter, provider) { 196 | const targetDirPath = this._terraformNodeDirPath(type, counter); 197 | const originDirPath = path.join(this.terraformOriginPath, provider); 198 | fs.ensureDirSync(targetDirPath); 199 | 200 | const nodeName = this._nodeName(type, counter); 201 | const name = `${nodeName}-${this.config.project}`; 202 | 203 | fs.readdirSync(originDirPath).forEach((item) => { 204 | const origin = path.join(originDirPath, item); 205 | const target = path.join(targetDirPath, item); 206 | const data = { 207 | name 208 | }; 209 | tpl.create(origin, target, data); 210 | }); 211 | } 212 | 213 | _terraformNodeDirPath(type, counter=0) { 214 | const dirName = this._nodeName(type, counter); 215 | return path.join(this.terraformFilesPath, dirName); 216 | } 217 | 218 | _backendConfig(type, counter) { 219 | const bucket = this._bucketName(); 220 | const prefix = this._nodeName(type, counter); 221 | 222 | return { bucket, prefix }; 223 | } 224 | 225 | _bucketName() { 226 | return `${this.config.project}-sv-tf-state` 227 | } 228 | 229 | _nodeName(type, counter) { 230 | const name = `${type}${counter}`; 231 | return name.toLowerCase(); 232 | } 233 | } 234 | 235 | module.exports = { 236 | Terraform 237 | } 238 | -------------------------------------------------------------------------------- /src/lib/cmd.js: -------------------------------------------------------------------------------- 1 | const { Buffer } = require('buffer'); 2 | const { spawn } = require('child_process'); 3 | 4 | 5 | module.exports = { 6 | splitCommandAndArgs: function (command) { 7 | const regex = new RegExp('"[^"]+"|[\\S]+', 'g'); 8 | return command.match(regex).map(s => s.replace(/"/g, '')); 9 | }, exec: async (command, options={}) => { 10 | return new Promise((resolve, reject) => { 11 | let items = module.exports.splitCommandAndArgs(command); 12 | 13 | const child = spawn(items[0], items.slice(1), options); 14 | if(options.detached) { 15 | child.unref(); 16 | resolve(child.pid); 17 | return; 18 | } 19 | let match = false; 20 | let output = new Buffer.from(''); 21 | 22 | child.stdout.on('data', (data) => { 23 | if (options.matcher && options.matcher.test(data)) { 24 | match = true; 25 | child.kill('SIGTERM'); 26 | resolve(); 27 | return; 28 | } 29 | output = Buffer.concat([output, data]); 30 | if (options.verbose) { 31 | console.log(data.toString()); 32 | } 33 | }); 34 | 35 | child.stderr.on('data', (data) => { 36 | output = Buffer.concat([output, data]); 37 | if (options.verbose) { 38 | console.log(data.toString()); 39 | } 40 | }); 41 | 42 | child.on('close', (code) => { 43 | if (code !== 0 && !match) { 44 | console.error(`Command execution failed with code: ${code}`); 45 | reject(new Error(code)); 46 | } 47 | else { 48 | resolve(output); 49 | } 50 | }); 51 | }); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/lib/config.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | const process = require('process'); 3 | 4 | const files = require('./files'); 5 | 6 | 7 | module.exports = { 8 | read: (rawCfgPath) => { 9 | const cfgPath = path.resolve(process.cwd(), rawCfgPath); 10 | return files.readJSON(cfgPath); 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/lib/env.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | validatorSshPrivateKeyPath: process.env.SSH_ID_RSA_VALIDATOR, 3 | publicNodeSshPrivateKeyPath: process.env.SSH_ID_RSA_PUBLIC, 4 | nginxUsername: process.env.NGINX_USERNAME || "prometheus", 5 | nginxPassword: process.env.NGINX_PASSWORD || "nginx_password", 6 | }; 7 | -------------------------------------------------------------------------------- /src/lib/files.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | 3 | module.exports = { 4 | readJSON: (filePath) => { 5 | const rawContent = fs.readFileSync(filePath); 6 | 7 | return JSON.parse(rawContent); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/lib/platform.js: -------------------------------------------------------------------------------- 1 | const asyncUtils = require('./async.js'); 2 | const { Terraform } = require('./clients/terraform'); 3 | 4 | 5 | class Platform { 6 | constructor(cfg) { 7 | this.config = JSON.parse(JSON.stringify(cfg)); 8 | 9 | this.tf = new Terraform(this.config); 10 | } 11 | 12 | async sync() { 13 | await this.tf.sync('apply'); 14 | const validatorIpAddresses = await this._getValidatorIpAddresses(); 15 | const publicNodesIpAddresses = await this._getPublicNodesIpAddresses(); 16 | return { validatorIpAddresses, publicNodesIpAddresses }; 17 | } 18 | 19 | async output() { 20 | await this.tf.initNodes(); 21 | const validatorIpAddresses = await this._getValidatorIpAddresses(); 22 | const publicNodesIpAddresses = await this._getPublicNodesIpAddresses(); 23 | return { validatorIpAddresses, publicNodesIpAddresses }; 24 | } 25 | 26 | async plan() { 27 | return this.tf.sync('plan'); 28 | } 29 | 30 | async clean() { 31 | return this.tf.clean(); 32 | } 33 | 34 | async _extractOutput(type, nodeSet) { 35 | const output = []; 36 | await asyncUtils.forEach(nodeSet, async (node, index) => { 37 | const ipAddress = await this.tf.nodeOutput(type, index, 'ip_address'); 38 | output.push(JSON.parse(ipAddress.toString())); 39 | }); 40 | return output; 41 | } 42 | 43 | async _getValidatorIpAddresses() { 44 | return await this._extractOutput('validator', this.config.validators.nodes); 45 | } 46 | 47 | async _getPublicNodesIpAddresses() { 48 | if(this.config.publicNodes){ 49 | return await this._extractOutput('publicNode', this.config.publicNodes.nodes); 50 | } 51 | return [] 52 | } 53 | } 54 | 55 | module.exports = { 56 | Platform 57 | } 58 | -------------------------------------------------------------------------------- /src/lib/project.js: -------------------------------------------------------------------------------- 1 | const ospath = require('ospath'); 2 | const path = require('path'); 3 | 4 | 5 | class Project { 6 | constructor(cfg) { 7 | this.name = cfg.project; 8 | } 9 | 10 | path() { 11 | return path.join(ospath.data(), 'polkadot-secure-validator', 'build', this.name); 12 | } 13 | } 14 | 15 | module.exports = { 16 | Project 17 | } 18 | -------------------------------------------------------------------------------- /src/lib/ssh.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | const fs = require('fs-extra'); 3 | const forge = require('node-forge'); 4 | 5 | const { validatorSshPrivateKeyPath, publicNodeSshPrivateKeyPath } = require('./env'); 6 | 7 | 8 | module.exports = { 9 | keys: () => { 10 | if(!validatorSshPrivateKeyPath) { 11 | console.log(chalk.red('Please, export the path of the file with the private SSH key you want to use on validators as the environment variable SSH_ID_RSA_VALIDATOR')); 12 | process.exit(-1); 13 | } 14 | if(!publicNodeSshPrivateKeyPath) { 15 | console.log(chalk.red('Please, export the path of the file with the private SSH key you want to use on public nodes as the environment variable SSH_ID_RSA_PUBLIC')); 16 | process.exit(-1); 17 | } 18 | 19 | const validatorPublicKey = publicKeyFromPrivateKeyPath(validatorSshPrivateKeyPath); 20 | const publicNodePublicKey = publicKeyFromPrivateKeyPath(publicNodeSshPrivateKeyPath); 21 | 22 | return {validatorPublicKey, publicNodePublicKey}; 23 | } 24 | } 25 | 26 | function publicKeyFromPrivateKeyPath(privateKeyPath) { 27 | const privateKey = fs.readFileSync(privateKeyPath); 28 | 29 | const forgePrivateKey = forge.pki.privateKeyFromPem(privateKey); 30 | const forgePublicKey = forge.pki.setRsaPublicKey(forgePrivateKey.n, forgePrivateKey.e); 31 | 32 | return forge.ssh.publicKeyToOpenSSH(forgePublicKey).trim(); 33 | } 34 | -------------------------------------------------------------------------------- /src/lib/tpl.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const Handlebars = require('handlebars'); 3 | const path = require('path'); 4 | 5 | 6 | Handlebars.registerHelper('raw', function(options) { 7 | return options.fn(); 8 | }); 9 | 10 | module.exports = { 11 | create: (source, target, data) => { 12 | const sourceTpl = fs.readFileSync(source).toString(); 13 | const template = Handlebars.compile(sourceTpl); 14 | const contents = template(data); 15 | 16 | const targetDir = path.dirname(target); 17 | fs.mkdirSync(targetDir, {recursive: true}); 18 | 19 | fs.writeFileSync(target, contents); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/lib/version.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | 3 | const files = require('./files'); 4 | 5 | module.exports = { 6 | show: () => { 7 | const targetPath = path.join(path.dirname(module.filename), '..', '..', 'package.json'); 8 | const pkg = files.readJSON(targetPath); 9 | 10 | return pkg.version; 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /terraform/aws/backend.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.state_project 3 | version = "~>2.15" 4 | } 5 | 6 | terraform { 7 | backend "gcs" { 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /terraform/aws/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "key-{{ name }}" { 2 | key_name = "{{ name }}" 3 | public_key = var.public_key 4 | } 5 | 6 | resource "aws_vpc" "main-{{ name }}" { 7 | cidr_block = "172.26.0.0/16" 8 | 9 | enable_dns_hostnames = true 10 | 11 | enable_dns_support = true 12 | 13 | tags = { 14 | Name = "{{ name }}" 15 | } 16 | } 17 | 18 | resource "aws_subnet" "main-{{ name }}" { 19 | cidr_block = "${cidrsubnet(aws_vpc.main-{{ name }}.cidr_block, 3, 1)}" 20 | 21 | vpc_id = "${aws_vpc.main-{{ name }}.id}" 22 | 23 | availability_zone = var.zone 24 | 25 | map_public_ip_on_launch = true 26 | } 27 | 28 | resource "aws_internet_gateway" "main-{{ name }}" { 29 | vpc_id = "${aws_vpc.main-{{ name }}.id}" 30 | 31 | tags = { 32 | Name = "{{ name }}" 33 | } 34 | } 35 | 36 | resource "aws_route_table" "main-{{ name }}" { 37 | vpc_id = "${aws_vpc.main-{{ name }}.id}" 38 | 39 | route { 40 | cidr_block = "0.0.0.0/0" 41 | gateway_id = "${aws_internet_gateway.main-{{ name }}.id}" 42 | } 43 | 44 | tags = { 45 | Name = "{{ name }}" 46 | } 47 | } 48 | 49 | resource "aws_route_table_association" "main-{{ name }}" { 50 | subnet_id = "${aws_subnet.main-{{ name }}.id}" 51 | route_table_id = "${aws_route_table.main-{{ name }}.id}" 52 | } 53 | 54 | resource "aws_security_group" "main-{{ name }}" { 55 | name = "externalssh" 56 | vpc_id = "${aws_vpc.main-{{ name }}.id}" 57 | } 58 | 59 | resource "aws_security_group_rule" "externalssh-{{ name }}" { 60 | type = "ingress" 61 | from_port = 22 62 | to_port = 22 63 | protocol = "tcp" 64 | cidr_blocks = ["0.0.0.0/0"] 65 | 66 | security_group_id = "${aws_security_group.main-{{ name }}.id}" 67 | } 68 | 69 | resource "aws_security_group_rule" "p2p-{{ name }}" { 70 | type = "ingress" 71 | from_port = 30333 72 | to_port = 30333 73 | protocol = "tcp" 74 | cidr_blocks = ["0.0.0.0/0"] 75 | 76 | security_group_id = "${aws_security_group.main-{{ name }}.id}" 77 | } 78 | 79 | resource "aws_security_group_rule" "p2p-proxy-{{ name }}" { 80 | type = "ingress" 81 | from_port = 80 82 | to_port = 80 83 | protocol = "tcp" 84 | cidr_blocks = ["0.0.0.0/0"] 85 | 86 | security_group_id = "${aws_security_group.main-{{ name }}.id}" 87 | } 88 | 89 | resource "aws_security_group_rule" "vpn-{{ name }}" { 90 | type = "ingress" 91 | from_port = 51820 92 | to_port = 51820 93 | protocol = "udp" 94 | cidr_blocks = ["0.0.0.0/0"] 95 | 96 | security_group_id = "${aws_security_group.main-{{ name }}.id}" 97 | } 98 | 99 | resource "aws_security_group_rule" "node-exporter-{{ name }}" { 100 | type = "ingress" 101 | from_port = 9100 102 | to_port = 9100 103 | protocol = "tcp" 104 | cidr_blocks = ["0.0.0.0/0"] 105 | 106 | security_group_id = "${aws_security_group.main-{{ name }}.id}" 107 | } 108 | 109 | resource "aws_security_group_rule" "node-metrics-{{ name }}" { 110 | type = "ingress" 111 | from_port = 9616 112 | to_port = 9616 113 | protocol = "tcp" 114 | cidr_blocks = ["0.0.0.0/0"] 115 | 116 | security_group_id = "${aws_security_group.main-{{ name }}.id}" 117 | } 118 | 119 | resource "aws_security_group_rule" "allow_all-{{ name }}" { 120 | type = "egress" 121 | from_port = 0 122 | to_port = 0 123 | protocol = "-1" 124 | cidr_blocks = ["0.0.0.0/0"] 125 | 126 | security_group_id = "${aws_security_group.main-{{ name }}.id}" 127 | } 128 | 129 | resource "aws_instance" "main-{{ name }}" { 130 | ami = var.image 131 | instance_type = var.machine_type 132 | key_name = "{{ name }}" 133 | count = var.node_count 134 | 135 | subnet_id = "${aws_subnet.main-{{ name }}.id}" 136 | vpc_security_group_ids = ["${aws_security_group.main-{{ name }}.id}"] 137 | 138 | root_block_device { 139 | volume_size = 150 140 | } 141 | 142 | tags = { 143 | Name = "{{name}}-${count.index}" 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /terraform/aws/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | value = "${aws_instance.main-{{ name }}.*.public_ip}" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/aws/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.location 3 | version = "~>2.28" 4 | } 5 | -------------------------------------------------------------------------------- /terraform/aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "state_project" { 2 | default = "my_project" 3 | } 4 | 5 | variable "project_id" { 6 | default = "my_project" 7 | } 8 | 9 | variable "location" { 10 | default = "eu-central-1" 11 | } 12 | 13 | variable "zone" { 14 | default = "eu-central-1a" 15 | } 16 | 17 | variable "machine_type" { 18 | default = "m4.large" 19 | } 20 | 21 | variable "public_key" { 22 | default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDqaZLcaObIN87RVHf+eI+TvXEAyFe9hCDBnJFohM0KYZYgqfihpyBgwCzF1RzC2w1/+ypwZ4Lv8CNnFp22C2p03ANoeXfoJS3jPDeIr6a1PvzH9qPx+zNc6kEW5aD8oA2KuJB1+plPZ881toW2WBk6Y0n5vI3CEo2UFiXjWC4uCsMhvhmhOXtQiXlEOgighkE3jZqiPUQduJ+FPl5rqCd+yMVpSTOYR5/cOCmhfLv2ogyBkxQV7cAKJZqIVKG3XK8axXHHrIx5gBMAT3HDYWg20S8gffZhEK1a7iLhzGYznCG2C+V72msUFjWyOSTw/vaaBr4cy9rAi0lkajgcfi+n devops@web3.foundation" 23 | } 24 | 25 | variable "ssh_user" { 26 | default = "" 27 | } 28 | 29 | variable "node_count" { 30 | default = 1 31 | } 32 | 33 | variable "name" { 34 | default = "node" 35 | } 36 | 37 | variable "image" { 38 | default = "ami-0e6273fe5a9a1ad93" 39 | } 40 | -------------------------------------------------------------------------------- /terraform/aws/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/azure/backend.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.state_project 3 | version = "~>2.15" 4 | } 5 | 6 | terraform { 7 | backend "gcs" { 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /terraform/azure/main.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_resource_group" "main-{{ name }}" { 2 | name = "{{name}}" 3 | location = var.location 4 | } 5 | 6 | resource "azurerm_virtual_network" "main-{{ name }}" { 7 | name = "{{name}}" 8 | address_space = ["10.0.0.0/16"] 9 | location = "${azurerm_resource_group.main-{{ name }}.location}" 10 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 11 | } 12 | 13 | resource "azurerm_subnet" "internal-{{ name }}" { 14 | name = "{{name}}" 15 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 16 | virtual_network_name = "${azurerm_virtual_network.main-{{ name }}.name}" 17 | address_prefix = "10.0.2.0/24" 18 | } 19 | 20 | resource "azurerm_network_interface" "main-{{ name }}" { 21 | name = "{{name}}-${count.index}" 22 | location = "${azurerm_resource_group.main-{{ name }}.location}" 23 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 24 | count = var.node_count 25 | 26 | ip_configuration { 27 | name = "testconfiguration1" 28 | subnet_id = "${azurerm_subnet.internal-{{ name }}.id}" 29 | private_ip_address_allocation = "Dynamic" 30 | public_ip_address_id = "${azurerm_public_ip.main-{{ name }}[count.index].id}" 31 | } 32 | } 33 | 34 | resource "azurerm_public_ip" "main-{{ name }}" { 35 | name = "{{name}}-${count.index}" 36 | location = "${azurerm_resource_group.main-{{ name }}.location}" 37 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 38 | allocation_method = "Static" 39 | sku = "Standard" 40 | idle_timeout_in_minutes = 30 41 | count = var.node_count 42 | 43 | tags = { 44 | name = "{{name}}-${count.index}" 45 | } 46 | } 47 | 48 | resource "azurerm_virtual_machine" "main-{{ name }}" { 49 | name = "{{name}}-${count.index}" 50 | location = "${azurerm_resource_group.main-{{ name }}.location}" 51 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 52 | network_interface_ids = ["${azurerm_network_interface.main-{{ name }}[count.index].id}"] 53 | vm_size = "Standard_DS1_v2" 54 | count = var.node_count 55 | 56 | delete_os_disk_on_termination = true 57 | 58 | delete_data_disks_on_termination = true 59 | 60 | storage_image_reference { 61 | publisher = "Canonical" 62 | offer = "UbuntuServer" 63 | sku = var.image 64 | version = "latest" 65 | } 66 | storage_os_disk { 67 | name = "myosdisk-${count.index}" 68 | caching = "ReadWrite" 69 | create_option = "FromImage" 70 | managed_disk_type = "Standard_LRS" 71 | disk_size_gb = 400 72 | } 73 | os_profile { 74 | computer_name = "{{name}}-${count.index}" 75 | admin_username = var.ssh_user 76 | } 77 | 78 | os_profile_linux_config { 79 | disable_password_authentication = true 80 | ssh_keys { 81 | key_data = var.public_key 82 | path = "/home/${var.ssh_user}/.ssh/authorized_keys" 83 | } 84 | } 85 | tags = { 86 | name = "{{name}}-${count.index}" 87 | } 88 | } 89 | 90 | data "azurerm_public_ip" "main-{{ name }}" { 91 | name = "${azurerm_public_ip.main-{{ name }}[count.index].name}" 92 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 93 | count = var.node_count 94 | } 95 | 96 | resource "azurerm_network_security_group" "main-{{ name }}" { 97 | name = "{{name}}" 98 | location = "${azurerm_resource_group.main-{{ name }}.location}" 99 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 100 | 101 | tags = { 102 | name = "{{name}}" 103 | } 104 | } 105 | 106 | resource "azurerm_network_security_rule" "outbound-{{ name }}" { 107 | name = "ssh" 108 | priority = 100 109 | direction = "Outbound" 110 | access = "Allow" 111 | protocol = "Tcp" 112 | source_port_range = "*" 113 | destination_port_range = "*" 114 | source_address_prefix = "*" 115 | destination_address_prefix = "*" 116 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 117 | network_security_group_name = "${azurerm_network_security_group.main-{{ name }}.name}" 118 | } 119 | 120 | resource "azurerm_network_security_rule" "sshIn-{{ name }}" { 121 | name = "sshIn" 122 | priority = 100 123 | direction = "Inbound" 124 | access = "Allow" 125 | protocol = "Tcp" 126 | source_port_range = "*" 127 | destination_port_range = "22" 128 | source_address_prefix = "*" 129 | destination_address_prefix = "*" 130 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 131 | network_security_group_name = "${azurerm_network_security_group.main-{{ name }}.name}" 132 | } 133 | 134 | resource "azurerm_network_security_rule" "p2pIn-{{ name }}" { 135 | name = "p2pIn" 136 | priority = 101 137 | direction = "Inbound" 138 | access = "Allow" 139 | protocol = "Tcp" 140 | source_port_range = "*" 141 | destination_port_range = "30333" 142 | source_address_prefix = "*" 143 | destination_address_prefix = "*" 144 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 145 | network_security_group_name = "${azurerm_network_security_group.main-{{ name }}.name}" 146 | } 147 | 148 | resource "azurerm_network_security_rule" "p2pIn-proxy-{{ name }}" { 149 | name = "p2pIn-proxy" 150 | priority = 101 151 | direction = "Inbound" 152 | access = "Allow" 153 | protocol = "Tcp" 154 | source_port_range = "*" 155 | destination_port_range = "80" 156 | source_address_prefix = "*" 157 | destination_address_prefix = "*" 158 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 159 | network_security_group_name = "${azurerm_network_security_group.main-{{ name }}.name}" 160 | } 161 | 162 | resource "azurerm_network_security_rule" "vpnIn-{{ name }}" { 163 | name = "vpnIn" 164 | priority = 102 165 | direction = "Inbound" 166 | access = "Allow" 167 | protocol = "Udp" 168 | source_port_range = "*" 169 | destination_port_range = "51820" 170 | source_address_prefix = "*" 171 | destination_address_prefix = "*" 172 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 173 | network_security_group_name = "${azurerm_network_security_group.main-{{ name }}.name}" 174 | } 175 | 176 | resource "azurerm_network_security_rule" "node-exporter-{{ name }}" { 177 | name = "nodeExporterIn" 178 | priority = 103 179 | direction = "Inbound" 180 | access = "Allow" 181 | protocol = "Tcp" 182 | source_port_range = "*" 183 | destination_port_range = "9100" 184 | source_address_prefix = "*" 185 | destination_address_prefix = "*" 186 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 187 | network_security_group_name = "${azurerm_network_security_group.main-{{ name }}.name}" 188 | } 189 | 190 | resource "azurerm_network_security_rule" "node-exporter-{{ name }}" { 191 | name = "nodeMetricsIn" 192 | priority = 104 193 | direction = "Inbound" 194 | access = "Allow" 195 | protocol = "Tcp" 196 | source_port_range = "*" 197 | destination_port_range = "9616" 198 | source_address_prefix = "*" 199 | destination_address_prefix = "*" 200 | resource_group_name = "${azurerm_resource_group.main-{{ name }}.name}" 201 | network_security_group_name = "${azurerm_network_security_group.main-{{ name }}.name}" 202 | } 203 | 204 | resource "azurerm_subnet_network_security_group_association" "main-{{ name }}" { 205 | subnet_id = "${azurerm_subnet.internal-{{ name }}.id}" 206 | network_security_group_id = "${azurerm_network_security_group.main-{{ name }}.id}" 207 | } 208 | -------------------------------------------------------------------------------- /terraform/azure/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | value = "${data.azurerm_public_ip.main-{{ name }}.*.ip_address}" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/azure/provider.tf: -------------------------------------------------------------------------------- 1 | provider "azurerm" { 2 | version = "~>1.34" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/azure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "state_project" { 2 | default = "polkadot-benchmarks" 3 | } 4 | 5 | variable "project_id" { 6 | default = "polkadot-benchmarks" 7 | } 8 | 9 | variable "client_id" {} 10 | variable "client_secret" {} 11 | 12 | variable "location" { 13 | default = "japanwest" 14 | } 15 | 16 | variable "zone" { 17 | default = "" 18 | } 19 | 20 | variable "machine_type" { 21 | default = "Standard_D2s_v3" 22 | } 23 | 24 | variable "public_key" { 25 | default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDqaZLcaObIN87RVHf+eI+TvXEAyFe9hCDBnJFohM0KYZYgqfihpyBgwCzF1RzC2w1/+ypwZ4Lv8CNnFp22C2p03ANoeXfoJS3jPDeIr6a1PvzH9qPx+zNc6kEW5aD8oA2KuJB1+plPZ881toW2WBk6Y0n5vI3CEo2UFiXjWC4uCsMhvhmhOXtQiXlEOgighkE3jZqiPUQduJ+FPl5rqCd+yMVpSTOYR5/cOCmhfLv2ogyBkxQV7cAKJZqIVKG3XK8axXHHrIx5gBMAT3HDYWg20S8gffZhEK1a7iLhzGYznCG2C+V72msUFjWyOSTw/vaaBr4cy9rAi0lkajgcfi+n devops@web3.foundation" 26 | } 27 | 28 | variable "ssh_user" { 29 | default = "admin" 30 | } 31 | 32 | variable "node_count" { 33 | default = 1 34 | } 35 | 36 | variable "name" { 37 | default = "node" 38 | } 39 | 40 | variable "image" { 41 | default = "18.04-LTS" 42 | } 43 | -------------------------------------------------------------------------------- /terraform/azure/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/digitalocean/backend.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.state_project 3 | version = "~>2.15" 4 | } 5 | 6 | terraform { 7 | backend "gcs" { 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /terraform/digitalocean/main.tf: -------------------------------------------------------------------------------- 1 | data "digitalocean_project" "default" { 2 | name = var.project_id 3 | } 4 | 5 | resource "digitalocean_droplet" "default" { 6 | image = var.image 7 | name = "{{name}}-${count.index}" 8 | region = var.location 9 | size = var.machine_type 10 | ssh_keys = [digitalocean_ssh_key.default.fingerprint] 11 | count = var.node_count 12 | } 13 | 14 | resource "digitalocean_ssh_key" "default" { 15 | name = "Polkadot-{{ name }}" 16 | public_key = var.public_key 17 | } 18 | 19 | resource "digitalocean_project_resources" "droplet-in-project" { 20 | project = data.digitalocean_project.default.id 21 | resources = digitalocean_droplet.default.*.urn 22 | } 23 | -------------------------------------------------------------------------------- /terraform/digitalocean/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | value = digitalocean_droplet.default.*.ipv4_address 3 | } 4 | -------------------------------------------------------------------------------- /terraform/digitalocean/provider.tf: -------------------------------------------------------------------------------- 1 | provider "digitalocean" { 2 | token = var.do_token 3 | version = "~> 1.16" 4 | } 5 | 6 | terraform { 7 | required_providers { 8 | digitalocean = { 9 | source = "digitalocean/digitalocean" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /terraform/digitalocean/variables.tf: -------------------------------------------------------------------------------- 1 | variable "state_project" { 2 | default = "" 3 | } 4 | 5 | variable "do_token" { 6 | } 7 | 8 | variable "name" { 9 | default = "w3f" 10 | } 11 | 12 | variable "public_key" { 13 | default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC00G0RD6FW6Vn/kH7POW8qT9oqlsqNNtoNlGsnt/OYa+4ketST4OdT7aXaD2exRSPtrV6nyAUnr03KCH0a7B5YdAvWwhly+P27jAwcwnlJ0XizbmoPR4oA8I1UXksj3jbzqtyj1cyY6zdnWwVYW7vmFzTfrPUjFEvJRWBiwK4gqlPaUDQaOcAtQ38fGPpv0X4pS5K42fxfihOEDApDKf12AB/8Rsd98uymR9lUZ8YElAmnia1ql3xWLC6JP04VfNP3NWYG27jVfggAY9hGCdJ0SPhg4qqUQ9CD9WW2P0yovHdgzeUj0dZINWED3fG0N4TimfXDNAAY6lMGryELxCpN devops@web3.foundation" 14 | } 15 | 16 | variable "node_count" { 17 | default = 1 18 | } 19 | 20 | variable "machine_type" { 21 | default = "s-1vcpu-2gb" 22 | } 23 | 24 | variable "ssh_user" { 25 | default = "" 26 | } 27 | 28 | variable "location" { 29 | default = "fra1" 30 | } 31 | 32 | variable "project_id" { 33 | default = "my_project_id" 34 | } 35 | 36 | variable "zone" { 37 | default = "unused" 38 | } 39 | 40 | variable "image" { 41 | default = "ubuntu-18-04-x64" 42 | } 43 | -------------------------------------------------------------------------------- /terraform/digitalocean/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/gcp/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "gcs" { 3 | } 4 | } 5 | -------------------------------------------------------------------------------- /terraform/gcp/main.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "ssh-p2p-{{ name }}" { 2 | name = "ssh-p2p-proxy-{{ name }}" 3 | network = "default" 4 | 5 | allow { 6 | protocol = "tcp" 7 | ports = ["22", "30333", "80"] 8 | } 9 | 10 | source_ranges = ["0.0.0.0/0"] 11 | target_tags = ["{{ name }}"] 12 | } 13 | 14 | resource "google_compute_firewall" "vpn-{{ name }}" { 15 | name = "vpn-{{ name }}" 16 | network = "default" 17 | 18 | allow { 19 | protocol = "udp" 20 | ports = ["51820"] 21 | } 22 | 23 | source_ranges = ["0.0.0.0/0"] 24 | target_tags = ["{{ name }}"] 25 | } 26 | 27 | resource "google_compute_firewall" "node-exporter-{{ name }}" { 28 | name = "node-exporter-{{ name }}" 29 | network = "default" 30 | 31 | allow { 32 | protocol = "tcp" 33 | ports = ["9100","9616"] 34 | } 35 | 36 | source_ranges = ["0.0.0.0/0"] 37 | target_tags = ["{{ name }}"] 38 | } 39 | 40 | resource "google_compute_instance" "main-{{ name }}" { 41 | name = "{{name}}-${count.index}" 42 | machine_type = var.machine_type 43 | zone = var.zone 44 | tags = ["{{ name }}"] 45 | count = var.node_count 46 | allow_stopping_for_update = false 47 | 48 | boot_disk { 49 | initialize_params { 50 | image = "ubuntu-os-cloud/ubuntu-${var.image}-lts" 51 | size = 400 52 | } 53 | } 54 | 55 | network_interface { 56 | network = "default" 57 | 58 | access_config { 59 | # Ephemeral 60 | } 61 | } 62 | 63 | depends_on = [google_compute_firewall.ssh-p2p-{{ name }}, google_compute_firewall.vpn-{{ name }}] 64 | 65 | service_account { 66 | scopes = ["compute-ro"] 67 | } 68 | 69 | metadata = { 70 | ssh-keys = "${var.ssh_user}:${var.public_key}" 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /terraform/gcp/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | value = "${google_compute_instance.main-{{ name }}.*.network_interface.0.access_config.0.nat_ip}" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/gcp/provider.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.project_id 3 | version = "~>2.16" 4 | } 5 | -------------------------------------------------------------------------------- /terraform/gcp/variables.tf: -------------------------------------------------------------------------------- 1 | variable "state_project" { 2 | default = "polkadot-benchmarks" 3 | } 4 | 5 | variable "project_id" { 6 | default = "polkadot-benchmarks" 7 | } 8 | 9 | variable "location" { 10 | default = "us-east1" 11 | } 12 | 13 | variable "zone" { 14 | default = "us-east1-b" 15 | } 16 | 17 | variable "machine_type" { 18 | default = "n1-standard-2" 19 | } 20 | 21 | variable "public_key" { 22 | default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC00G0RD6FW6Vn/kH7POW8qT9oqlsqNNtoNlGsnt/OYa+4ketST4OdT7aXaD2exRSPtrV6nyAUnr03KCH0a7B5YdAvWwhly+P27jAwcwnlJ0XizbmoPR4oA8I1UXksj3jbzqtyj1cyY6zdnWwVYW7vmFzTfrPUjFEvJRWBiwK4gqlPaUDQaOcAtQ38fGPpv0X4pS5K42fxfihOEDApDKf12AB/8Rsd98uymR9lUZ8YElAmnia1ql3xWLC6JP04VfNP3NWYG27jVfggAY9hGCdJ0SPhg4qqUQ9CD9WW2P0yovHdgzeUj0dZINWED3fG0N4TimfXDNAAY6lMGryELxCpN devops@web3.foundation" 23 | } 24 | 25 | variable "ssh_user" { 26 | default = "admin" 27 | } 28 | 29 | variable "node_count" { 30 | default = 1 31 | } 32 | 33 | variable "name" { 34 | default = "node" 35 | } 36 | 37 | variable "image" { 38 | default = "1804" 39 | } 40 | -------------------------------------------------------------------------------- /terraform/gcp/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/hetzner/README.md: -------------------------------------------------------------------------------- 1 | # Hetzner specific deployment 2 | 3 | ### Ansible user setup 4 | 5 | The included script `setup_users.sh` will create a `ssh_user` user with the specified password hash. This password can also be used when applying the ansible playbook with: 6 | 7 | `ansible-playbook main.yml --become --extra-vars "ansible_become_pass='$SUDO_PW'"` 8 | 9 | The password hash can be obtained by running: 10 | `openssl passwd -6` on linux machines (input should be SUDO_PW) 11 | 12 | ### Hcloud token 13 | 14 | You need to create a Hetzner API token to be able to use their service. Follow the [official docs](https://docs.hetzner.cloud/). 15 | 16 | ### SSH Keys 17 | Hetzner won't let you deploy the same key twice (no override), which becomes problematic during re-applying the state. 18 | 19 | The specified resource `hcloud_ssh_key` will be deployed if no key with the existing name exists in the Hetzner console. 20 | 21 | If your workflow is that of apply-reapply state, rather than apply-destroy, you should probably remove the `hcloud_ssh_key` resource, and instead independently deploy tke key, by creating the ssh key locally & uploading to your Hetzner console. Make sure that `public_key_name` (Name) & `public_key` (SSH Key) match. 22 | 23 | ### Terraform variables 24 | 25 | It is recommended that you specify your Hetzner variables as env variables, e.g. 26 | ``` 27 | export TF_VAR_public_key="ssh-rsa..." 28 | export TF_VAR_public_key_name="" 29 | export TF_VAR_hcloud_token="" 30 | export TF_VAR_password_hash="" 31 | ``` 32 | -------------------------------------------------------------------------------- /terraform/hetzner/main.tf: -------------------------------------------------------------------------------- 1 | resource "hcloud_server" "validator" { 2 | server_type = var.server_type 3 | image = var.image 4 | name = "${var.name}-${count.index}" 5 | count = var.node_count 6 | location = var.location 7 | ssh_keys = [hcloud_ssh_key.default.id] 8 | user_data = templatefile("setup_users.sh", { user = var.ssh_user, public_key = var.public_key, password_hash = var.password_hash }) 9 | } 10 | 11 | resource "hcloud_ssh_key" "default" { 12 | name = var.public_key_name 13 | public_key = var.public_key 14 | } 15 | -------------------------------------------------------------------------------- /terraform/hetzner/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | value = hcloud_server.validator.*.ipv4_address 3 | } 4 | -------------------------------------------------------------------------------- /terraform/hetzner/provider.tf: -------------------------------------------------------------------------------- 1 | provider "hcloud" { 2 | token = var.hcloud_token 3 | } 4 | -------------------------------------------------------------------------------- /terraform/hetzner/setup_users.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # TODO Change to -M and nologin? 4 | useradd -Ds /bin/bash 5 | useradd -m -G sudo ${user} -p '${password_hash}' | true 6 | 7 | mkdir -p /home/${user}/.ssh 8 | 9 | chown -R ${user}:${user} /home/${user}/.ssh 10 | 11 | echo ${public_key} >> /home/${user}/.ssh/authorized_keys 12 | -------------------------------------------------------------------------------- /terraform/hetzner/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | default = "w3f" 3 | } 4 | 5 | variable "hcloud_token" { 6 | default = "your token" 7 | } 8 | 9 | variable "public_key" { 10 | default = "" 11 | } 12 | 13 | variable "public_key_name" { 14 | default = "key-name" 15 | } 16 | 17 | variable "node_count" { 18 | type = number 19 | default = 1 20 | } 21 | 22 | variable "server_type" { 23 | default = "cx41" 24 | } 25 | 26 | variable "server_type_monitoring" { 27 | default = "cx21" 28 | } 29 | 30 | variable "location" { 31 | default = "nbg1" 32 | } 33 | 34 | variable "image" { 35 | default = "ubuntu-20.04" 36 | } 37 | 38 | variable "password_hash" { 39 | default = "$6$Y.TqqXVTsCf91DQl$u72Gkgnb5gLkVjwNLhznzf/j740mfhhtVaH.6k0ghIkBEoQqXi0uSI8iYjiC486LpMM16c0GD8mmPwsfRq5NC1" 40 | } 41 | 42 | variable "ssh_user" { 43 | default = "polkadot" 44 | } 45 | -------------------------------------------------------------------------------- /terraform/hetzner/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13" 3 | required_providers { 4 | hcloud = { 5 | source = "hetznercloud/hcloud" 6 | version = "1.26.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /terraform/packet/backend.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.state_project 3 | version = "~>2.15" 4 | } 5 | 6 | terraform { 7 | backend "gcs" { 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /terraform/packet/main.tf: -------------------------------------------------------------------------------- 1 | #resource "packet_ssh_key" "key-{{ name }}" { 2 | # name = var.name 3 | # public_key = var.public_key 4 | #} 5 | 6 | resource "packet_device" "validator-{{ name }}" { 7 | hostname = "{{name}}-${count.index}" 8 | plan = var.machine_type 9 | facilities = [var.location] 10 | operating_system = var.image 11 | billing_cycle = "hourly" 12 | project_id = var.project_id 13 | # depends_on = ["packet_ssh_key.key-{{ name }}"] 14 | count = var.node_count 15 | } 16 | -------------------------------------------------------------------------------- /terraform/packet/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | value = "${packet_device.validator-{{ name }}.*.network.0.address}" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/packet/provider.tf: -------------------------------------------------------------------------------- 1 | provider "packet" { 2 | auth_token = var.auth_token 3 | version = "~>2.3" 4 | } 5 | -------------------------------------------------------------------------------- /terraform/packet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "state_project" { 2 | default = "" 3 | } 4 | 5 | variable "project_id" { 6 | default = "my_project_id" 7 | } 8 | 9 | variable "auth_token" {} 10 | 11 | variable "location" { 12 | default = "ewr1" 13 | } 14 | 15 | variable "zone" { 16 | default = "" 17 | } 18 | 19 | variable "machine_type" { 20 | default = "t1.small.x86" 21 | } 22 | 23 | variable "public_key" { 24 | default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC00G0RD6FW6Vn/kH7POW8qT9oqlsqNNtoNlGsnt/OYa+4ketST4OdT7aXaD2exRSPtrV6nyAUnr03KCH0a7B5YdAvWwhly+P27jAwcwnlJ0XizbmoPR4oA8I1UXksj3jbzqtyj1cyY6zdnWwVYW7vmFzTfrPUjFEvJRWBiwK4gqlPaUDQaOcAtQ38fGPpv0X4pS5K42fxfihOEDApDKf12AB/8Rsd98uymR9lUZ8YElAmnia1ql3xWLC6JP04VfNP3NWYG27jVfggAY9hGCdJ0SPhg4qqUQ9CD9WW2P0yovHdgzeUj0dZINWED3fG0N4TimfXDNAAY6lMGryELxCpN devops@web3.foundation" 25 | } 26 | 27 | variable "ssh_user" { 28 | default = "" 29 | } 30 | 31 | variable "node_count" { 32 | default = 1 33 | } 34 | 35 | variable "name" { 36 | default = "w3f" 37 | } 38 | 39 | variable "image" { 40 | default = "ubuntu_18_04" 41 | } 42 | -------------------------------------------------------------------------------- /terraform/packet/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/remote-state/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.state_project 3 | version = "~>2.15" 4 | } 5 | 6 | resource "google_storage_bucket" "imagestore" { 7 | name = var.name 8 | force_destroy = true 9 | } 10 | -------------------------------------------------------------------------------- /terraform/remote-state/variables.tf: -------------------------------------------------------------------------------- 1 | variable "state_project" { 2 | default = "my_project" 3 | } 4 | 5 | variable "name" { 6 | default = "my_name" 7 | } 8 | -------------------------------------------------------------------------------- /terraform/remote-state/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } 4 | -------------------------------------------------------------------------------- /test/index.js: -------------------------------------------------------------------------------- 1 | describe('run', () => { 2 | }); 3 | -------------------------------------------------------------------------------- /test/lib/cmd.js: -------------------------------------------------------------------------------- 1 | const {splitCommandAndArgs} = require('../../src/lib/cmd'); 2 | 3 | require('chai').should() 4 | 5 | describe('Command splitting', () => { 6 | 7 | it('preserves args with spaces in but in quotes', () => { 8 | splitCommandAndArgs(`ansible-playbook main.yml -f 30 -i "/Users/user/Library/Application Support/polkadot-secure-validator/build/w3f/ansible/inventory"`) 9 | .should.deep.eq( 10 | [ 11 | 'ansible-playbook', 12 | 'main.yml', 13 | '-f', 14 | '30', 15 | '-i', 16 | '/Users/user/Library/Application Support/polkadot-secure-validator/build/w3f/ansible/inventory' 17 | ] 18 | ); 19 | 20 | }); 21 | 22 | it('preserves args ine key=value format', () => { 23 | splitCommandAndArgs(`terraform init -var state_project=kusama-infrastructure-state`) 24 | .should.deep.eq(['terraform', 'init', '-var', 'state_project=kusama-infrastructure-state']) 25 | }); 26 | }); 27 | -------------------------------------------------------------------------------- /test/lib/files.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const tmp = require('tmp'); 3 | 4 | const subject = require('../../src/lib/files') 5 | 6 | require('chai').should() 7 | 8 | 9 | describe('Files', () => { 10 | before(() => { 11 | tmp.setGracefulCleanup(); 12 | }); 13 | 14 | describe('readJSON', () => { 15 | it('should return a JSON from existing JSON files', () => { 16 | const tmpobj = tmp.fileSync(); 17 | 18 | fs.writeFileSync(tmpobj.name, '{"field1": "value1", "field2": "value2"}'); 19 | 20 | const result = subject.readJSON(tmpobj.name); 21 | 22 | result['field1'].should.equal('value1'); 23 | result['field2'].should.equal('value2'); 24 | }); 25 | }); 26 | }); 27 | -------------------------------------------------------------------------------- /test/lib/tpl.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const tmp = require('tmp'); 3 | 4 | const subject = require('../../src/lib/tpl') 5 | 6 | require('chai').should() 7 | 8 | 9 | describe('Tpl', () => { 10 | before(() => { 11 | tmp.setGracefulCleanup(); 12 | }); 13 | 14 | describe('create', () => { 15 | it('should create templated files', () => { 16 | const origin = tmp.fileSync(); 17 | const value= 'a=b'; 18 | fs.writeFileSync(origin.name, 'value is {{{ value }}}'); 19 | 20 | const target = tmp.fileSync(); 21 | 22 | const data = { value }; 23 | 24 | subject.create(origin.name, target.name, data); 25 | 26 | const actual = fs.readFileSync(target.name).toString(); 27 | const expected = 'value is a=b'; 28 | 29 | actual.should.eq(expected); 30 | }); 31 | }); 32 | }); 33 | -------------------------------------------------------------------------------- /test/lib/version.js: -------------------------------------------------------------------------------- 1 | const subject = require('../../src/lib/version'); 2 | 3 | require('chai').should() 4 | 5 | 6 | describe('version', () => { 7 | describe('show', () => { 8 | it('returns a semver', () => { 9 | subject.show().should.match(/\d+\.\d+\.\d+/); 10 | }); 11 | }); 12 | }); 13 | -------------------------------------------------------------------------------- /tpl/ansible_inventory: -------------------------------------------------------------------------------- 1 | {{#each validators }} 2 | [validator_{{@index}}] 3 | {{ this.ipAddress }} 4 | 5 | [validator_{{@index}}:vars] 6 | ansible_user={{ this.sshUser }} 7 | telemetryUrl={{ ../validatorTelemetryUrl }} 8 | loggingFilter='{{{ ../validatorLoggingFilter }}}' 9 | nodeName={{ this.nodeName }} 10 | 11 | {{/each}} 12 | 13 | [validator:children] 14 | {{#each validators }} 15 | validator_{{@index}} 16 | {{/each}} 17 | 18 | 19 | [all:vars] 20 | project={{ project }} 21 | ansible_ssh_common_args='-o StrictHostKeyChecking=no -o ConnectTimeout=25 -o ControlMaster=no -o UserKnownHostsFile=/dev/null' 22 | polkadot_binary_url='{{ polkadotBinaryUrl }}' 23 | polkadot_binary_checksum='{{ polkadotBinaryChecksum }}' 24 | chain='{{ chain }}' 25 | polkadot_network_id='{{ polkadotNetworkId }}' 26 | build_dir={{ buildDir }} 27 | nginx_user='{{ nginxUsername }}' 28 | nginx_password='{{ nginxPassword }}' 29 | node_exporter_enabled='{{ nodeExporterEnabled }}' 30 | node_exporter_binary_url='{{ nodeExporterBinaryUrl }}' 31 | node_exporter_binary_checksum='{{ nodeExporterBinaryChecksum }}' 32 | polkadot_restart_enabled='{{ polkadotRestartEnabled }}' 33 | polkadot_restart_minute='{{ polkadotRestartMinute }}' 34 | polkadot_restart_hour='{{ polkadotRestartHour }}' 35 | polkadot_restart_day='{{ polkadotRestartDay }}' 36 | polkadot_restart_month='{{ polkadotRestartMonth }}' 37 | polkadot_restart_weekday='{{ polkadotRestartWeekDay }}' 38 | polkadot_additional_common_flags='{{{ polkadotAdditionalCommonFlags }}}' 39 | polkadot_additional_validator_flags='{{{ polkadotAdditionalValidatorFlags }}}' 40 | polkadot_additional_public_flags='{{{ polkadotAdditionalPublicFlags }}}' 41 | polkadot_db_snapshot_url='{{{ dbSnapshotUrl }}}' 42 | polkadot_db_snapshot_checksum='{{{ dbSnapshotChecksum }}}' 43 | -------------------------------------------------------------------------------- /tpl/tfvars: -------------------------------------------------------------------------------- 1 | state_project = "{{ stateProject }}" 2 | public_key = "{{{ publicKey }}}" 3 | ssh_user = "{{ sshUser }}" 4 | machine_type = "{{ machineType }}" 5 | location = "{{ location }}" 6 | zone = "{{ zone }}" 7 | project_id = "{{ projectId }}" 8 | node_count = {{ nodeCount }} 9 | name = "{{ name }}" 10 | {{#if image}} 11 | image = "{{ image }}" 12 | {{/if}} 13 | --------------------------------------------------------------------------------