├── meta └── runtime.yml ├── roles ├── nginx │ ├── README.md │ ├── files │ │ └── reload-nginx-config │ ├── .ansible-lint │ ├── templates │ │ ├── override.conf.j2 │ │ ├── site-default.j2 │ │ ├── nginx.conf.j2 │ │ ├── site-connect.j2 │ │ └── site-rpc.j2 │ ├── vars │ │ └── main.yml │ ├── molecule │ │ └── default │ │ │ ├── README.md │ │ │ ├── templates │ │ │ ├── http-stub.service.j2 │ │ │ ├── pebble.service.j2 │ │ │ ├── websocat.service.j2 │ │ │ └── pebble-config.json.j2 │ │ │ ├── converge.yml │ │ │ ├── verify.yml │ │ │ ├── molecule.yml │ │ │ ├── group_vars │ │ │ └── all.yml │ │ │ ├── files │ │ │ ├── pebble │ │ │ │ ├── cert.pem │ │ │ │ └── key.pem │ │ │ ├── test1.pem │ │ │ └── test2.pem │ │ │ └── prepare.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── certs-loop.yml │ │ ├── remove.yml │ │ ├── letsencrypt.yml │ │ ├── letsencrypt-loop.yml │ │ ├── tests.yml │ │ ├── certs.yml │ │ ├── sites.yml │ │ └── main.yml │ ├── .yamllint │ └── defaults │ │ └── main.yml ├── nginx_exporter │ ├── README.md │ ├── .ansible-lint │ ├── vars │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── .service.j2 │ ├── .yamllint │ └── tasks │ │ └── main.yml ├── state_exporter │ ├── README.md │ ├── .ansible-lint │ ├── handlers │ │ └── main.yml │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── .service.j2 │ ├── .yamllint │ ├── tasks │ │ └── main.yml │ └── files │ │ └── exporter.py ├── ws_health_exporter │ ├── molecule │ │ └── default │ │ │ ├── .gitignore │ │ │ ├── collections.yml │ │ │ ├── converge.yml │ │ │ ├── molecule.yml │ │ │ ├── verify.yml │ │ │ ├── group_vars │ │ │ └── all.yml │ │ │ └── prepare.yml │ ├── README.md │ ├── .ansible-lint │ ├── handlers │ │ └── main.yml │ ├── vars │ │ └── main.yml │ ├── .yamllint │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── .service.j2 │ └── tasks │ │ └── main.yml ├── key_inject │ ├── .ansible-lint │ ├── handlers │ │ └── main.yml │ ├── .yamllint │ ├── tasks │ │ ├── main.yml │ │ ├── check_session_key.yml │ │ └── inject.yml │ ├── defaults │ │ └── main.yml │ └── README.md ├── node_backup │ ├── .ansible-lint │ ├── molecule │ │ └── default │ │ │ ├── collections.yml │ │ │ ├── converge.yml │ │ │ ├── README.md │ │ │ ├── molecule.yml │ │ │ ├── verify.yml │ │ │ ├── group_vars │ │ │ └── all.yml │ │ │ └── prepare.yml │ ├── templates │ │ ├── node-backup.service.j2 │ │ ├── node-backup.timer.j2 │ │ ├── common-backup.sh.j2 │ │ ├── node-backup-exporter.service.j2 │ │ └── rclone │ │ │ └── rclone.conf.j2 │ ├── handlers │ │ └── main.yml │ ├── README.md │ ├── .yamllint │ ├── vars │ │ └── main.yml │ ├── tasks │ │ ├── exporter.yml │ │ ├── requirements.yml │ │ ├── main.yml │ │ ├── job.yml │ │ └── tests.yml │ └── defaults │ │ └── main.yml ├── secure_apt │ ├── .ansible-lint │ ├── vars │ │ └── main.yml │ ├── defaults │ │ └── main.yml │ ├── README.md │ ├── .yamllint │ └── tasks │ │ └── main.yml └── node │ ├── molecule │ ├── default │ │ ├── converge.yml │ │ ├── prepare.yml │ │ ├── molecule.yml │ │ ├── group_vars │ │ │ └── all.yml │ │ └── verify.yml │ ├── parachain │ │ ├── converge.yml │ │ ├── prepare.yml │ │ ├── molecule.yml │ │ ├── group_vars │ │ │ └── all.yml │ │ └── verify.yml │ ├── parachain_remote_rc │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── converge.yml │ │ ├── molecule.yml │ │ ├── prepare.yml │ │ └── verify.yml │ └── README.md │ ├── tasks │ ├── 1000-post-tasks.yml │ ├── 002-restart.yml │ ├── 500-memory-profiler.yml │ ├── includes │ │ └── _delete_db_folder.yml │ ├── 300-wipe.yml │ ├── 200-prepare.yml │ ├── 700-get-chainid.yml │ ├── 900-systemd.yml │ ├── 801-restore-chain-tar.yml │ ├── 001-health-check.yml │ ├── 100-tests.yml │ ├── main.yml │ ├── 800-restore-chain.yml │ ├── 600-chain.yml │ ├── 803-restore-chain-http.yml │ └── 400-binary.yml │ ├── .ansible-lint │ ├── templates │ ├── annotation.j2 │ ├── node.service.j2 │ └── env.j2 │ ├── handlers │ └── main.yml │ ├── meta │ └── main.yml │ ├── .yamllint │ ├── README.md │ └── vars │ └── main.yml ├── .gitignore ├── .github └── workflows │ ├── pr-check-version.yml │ ├── pr-node.yml │ ├── pr-nginx.yml │ ├── pr-node-backup.yml │ ├── pr-secure-apt.yml │ ├── pr-state-exporter.yml │ ├── pr-nginx-exporter.yml │ ├── pr-ws-health-exporter.yml │ ├── requirements-molecule.txt │ ├── reusable-galaxy-deploy.yml │ ├── branch-main.yml │ ├── reusable-molecule.yml │ └── reusable-check-version.yml ├── README.md ├── galaxy.yml └── plugins ├── filter └── subkey.py └── README.md /meta/runtime.yml: -------------------------------------------------------------------------------- 1 | requires_ansible: ">=2.15.6" -------------------------------------------------------------------------------- /roles/nginx/README.md: -------------------------------------------------------------------------------- 1 | # nginx ansible role 2 | -------------------------------------------------------------------------------- /roles/nginx_exporter/README.md: -------------------------------------------------------------------------------- 1 | # nginx_exporter ansible role 2 | -------------------------------------------------------------------------------- /roles/state_exporter/README.md: -------------------------------------------------------------------------------- 1 | # state_exporter ansible role 2 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/molecule/default/.gitignore: -------------------------------------------------------------------------------- 1 | collections 2 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/README.md: -------------------------------------------------------------------------------- 1 | # ws_health_exporter ansible role 2 | -------------------------------------------------------------------------------- /roles/key_inject/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | -------------------------------------------------------------------------------- /roles/nginx_exporter/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | -------------------------------------------------------------------------------- /roles/node_backup/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | -------------------------------------------------------------------------------- /roles/secure_apt/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | -------------------------------------------------------------------------------- /roles/state_exporter/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | -------------------------------------------------------------------------------- /roles/nginx/files/reload-nginx-config: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /bin/systemctl reload nginx 3 | -------------------------------------------------------------------------------- /roles/nginx/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | - name[template] 5 | -------------------------------------------------------------------------------- /roles/nginx/templates/override.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | LimitNOFILE={{ nginx_worker_rlimit_nofile }} -------------------------------------------------------------------------------- /roles/nginx/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | _nginx_custom_certs_base_path: /etc/nginx/tls-certs/ 4 | -------------------------------------------------------------------------------- /roles/secure_apt/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _secure_apt_keyring_folder: /usr/local/share/keyring 3 | -------------------------------------------------------------------------------- /roles/nginx_exporter/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _nginx_exporter_file: /usr/local/bin/nginx-prometheus-exporter 3 | -------------------------------------------------------------------------------- /roles/key_inject/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart service 3 | ansible.builtin.systemd: 4 | state: restarted 5 | name: "{{ node_app_name }}" 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .*.swp 2 | /*json 3 | /*key 4 | *private.key 5 | *service-account-key.json 6 | *.private_key_encrypted 7 | /ansible/collections 8 | venv 9 | .idea 10 | -------------------------------------------------------------------------------- /roles/node_backup/molecule/default/collections.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: https://github.com/paritytech/ansible-polkadot.git 4 | type: git 5 | version: main 6 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/molecule/default/collections.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: https://github.com/paritytech/ansible-polkadot.git 3 | type: git 4 | version: main 5 | -------------------------------------------------------------------------------- /roles/secure_apt/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | secure_apt_keyserver: keyserver.ubuntu.com 3 | 4 | secure_apt_key: "" 5 | 6 | secure_apt_repositories: [] 7 | secure_apt_update_cache: true 8 | -------------------------------------------------------------------------------- /roles/node_backup/templates/node-backup.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Node backup systemd service 3 | 4 | [Service] 5 | Type=oneshot 6 | ExecStart={{ _node_backup_scripts_path }}/common.sh 7 | -------------------------------------------------------------------------------- /.github/workflows/pr-check-version.yml: -------------------------------------------------------------------------------- 1 | name: check Galaxy version 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check-version: 8 | uses: ./.github/workflows/reusable-check-version.yml 9 | -------------------------------------------------------------------------------- /roles/node_backup/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | tasks: 5 | - name: Include node backup 6 | ansible.builtin.include_role: 7 | name: node_backup 8 | -------------------------------------------------------------------------------- /roles/node/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | gather_facts: true 5 | tasks: 6 | - name: Include node 7 | ansible.builtin.include_role: 8 | name: node 9 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | gather_facts: true 5 | tasks: 6 | - name: Include node 7 | ansible.builtin.include_role: 8 | name: node 9 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/README.md: -------------------------------------------------------------------------------- 1 | ### Molecule 2 | #### Docker 3 | Test role with docker driver 4 | ```shell 5 | molecule create 6 | molecule converge 7 | molecule verify 8 | molecule destroy 9 | ``` 10 | 11 | -------------------------------------------------------------------------------- /roles/nginx_exporter/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart nginx-exporter 3 | ansible.builtin.systemd: 4 | name: "{{ nginx_exporter_name }}" 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | -------------------------------------------------------------------------------- /roles/state_exporter/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart state-exporter 3 | ansible.builtin.systemd: 4 | name: "{{ state_exporter_name }}" 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | -------------------------------------------------------------------------------- /roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload nginx config 3 | ansible.builtin.systemd: 4 | name: nginx 5 | state: reloaded 6 | enabled: true 7 | daemon_reload: true 8 | ignore_errors: "{{ ansible_check_mode }}" 9 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart ws-health-exporter 3 | ansible.builtin.systemd: 4 | name: "{{ _ws_health_exporter_name }}" 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | -------------------------------------------------------------------------------- /roles/state_exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | state_exporter_name: state-exporter 4 | state_exporter_user: parity 5 | state_exporter_file: /home/{{ state_exporter_user }}/bin/{{ state_exporter_name }}.py 6 | state_exporter_debug: false 7 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | _ws_health_exporter_name: ws-health-exporter 4 | _ws_health_exporter_file: "{{ ws_health_exporter_base_path }}/exporter.py" 5 | _ws_health_exporter_venv: "{{ ws_health_exporter_base_path }}/venv" 6 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | gather_facts: false 5 | tasks: 6 | - name: Include ws_health_exporter 7 | ansible.builtin.include_role: 8 | name: ws_health_exporter 9 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/templates/http-stub.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=websocat systemd service 3 | 4 | [Service] 5 | ExecStart=/usr/bin/python3 -m http.server 9933 6 | 7 | Restart=always 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: converge 3 | hosts: all 4 | tasks: 5 | - name: converge | deploy nginx without wipe 6 | ansible.builtin.include_role: 7 | name: nginx 8 | vars: 9 | nginx_remove_enable: false 10 | -------------------------------------------------------------------------------- /roles/node/tasks/1000-post-tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Post tasks | Remove temporary directory 3 | ansible.builtin.file: 4 | path: "{{ _node_temp_dir.path }}" 5 | state: absent 6 | check_mode: false 7 | changed_when: false 8 | when: _node_temp_dir.path is defined 9 | -------------------------------------------------------------------------------- /roles/node_backup/templates/node-backup.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Node backup systemd timer 3 | 4 | [Timer] 5 | {% for time in node_backup_schedule %} 6 | OnCalendar={{ time }} 7 | {% endfor %} 8 | Persistent=true 9 | 10 | [Install] 11 | WantedBy=timers.target 12 | -------------------------------------------------------------------------------- /roles/nginx/tasks/certs-loop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx | certs | copy {{ item }} 3 | ansible.builtin.copy: 4 | src: "{{ item }}" 5 | dest: "{{ _nginx_custom_certs_base_path }}{{ item }}" 6 | owner: root 7 | group: root 8 | mode: "0600" 9 | notify: reload nginx config 10 | -------------------------------------------------------------------------------- /roles/nginx_exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | nginx_exporter_name: nginx-exporter 4 | nginx_exporter_user: www-data 5 | nginx_exporter_binary: https://github.com/nginxinc/nginx-prometheus-exporter/releases/download/v0.10.0/nginx-prometheus-exporter_0.10.0_linux_amd64.tar.gz 6 | nginx_metric_port: 8080 7 | -------------------------------------------------------------------------------- /roles/node/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - name[casing] 4 | - empty-string-compare # Don't compare to empty string 5 | - experimental # all rules tagged as experimental 6 | - "306" # Ignore not setting pipefail - required for sh shell 7 | - name[template] # Style enforcement 8 | - ignore-errors 9 | -------------------------------------------------------------------------------- /roles/node_backup/templates/common-backup.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | {% for target in _node_backup_targets %} 4 | now=$(date +"%Y%m%d-%H%M%S") 5 | unbuffer bash {{ _node_backup_scripts_path }}/{{ target.id }}.sh "${now}" 2>&1 | tee "{{ _node_backup_log_path }}/{{ target.service_name }}-${now}.txt" 6 | {% endfor %} -------------------------------------------------------------------------------- /roles/nginx/molecule/default/templates/pebble.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=pebble systemd service 3 | 4 | [Service] 5 | Environment="PEBBLE_WFE_NONCEREJECT=20" 6 | ExecStart={{ pebble_binary }} -config {{ pebble_conf_dir }}pebble-config.json 7 | Restart=always 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /roles/node/templates/annotation.j2: -------------------------------------------------------------------------------- 1 | ansible_annotation{role="{{ node_role }}",version="{{ _node_binary_version_from_url }}",commit_hash="{{ _node_commit_hash }}",event="{{ node_prometheus_file_exporter_event }}",{% if node_parachain_chain != '' %}chain="{{ node_parachain_chain }}"{%- else %}chain="{{ node_chain }}"{% endif %}} 1 2 | -------------------------------------------------------------------------------- /roles/node/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart service {{ node_handler_id }} 3 | ansible.builtin.include_tasks: tasks/002-restart.yml 4 | when: node_start_service | bool 5 | 6 | - name: health check {{ node_handler_id }} 7 | ansible.builtin.include_tasks: tasks/001-health-check.yml 8 | when: node_start_service | bool 9 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/templates/websocat.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=websocat systemd service 3 | 4 | [Service] 5 | ExecStart={{ websocat_binary }} --oneshot -b ws-l:127.0.0.1:9944 tcp:127.0.0.1:9933 6 | Restart=always 7 | Wants=netcat.service 8 | After=netcat.service 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /roles/node/tasks/002-restart.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart | Restart service 3 | ansible.builtin.systemd: 4 | name: "{{ node_app_name }}" 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | notify: health check {{ node_handler_id }} 9 | ignore_errors: "{{ not _node_systemd_unit_file_stat.stat.exists }}" 10 | -------------------------------------------------------------------------------- /roles/state_exporter/templates/.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Node backup exporter systemd service 3 | 4 | [Service] 5 | Environment=PYTHONUNBUFFERED=True 6 | ExecStart={{ state_exporter_file }}{% if state_exporter_debug %} debug{% endif %} 7 | 8 | Restart=always 9 | User={{ state_exporter_user }} 10 | Group={{ state_exporter_user}} 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /roles/node_backup/templates/node-backup-exporter.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Node backup exporter systemd service 3 | 4 | [Service] 5 | Environment=PYTHONUNBUFFERED=True 6 | ExecStart={{ _node_backup_venv_path }}/bin/python3 {{ _node_backup_exporter_file }} 7 | Restart=always 8 | User={{ node_backup_user }} 9 | Group={{ node_backup_user }} 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /roles/node_backup/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart node-backup exporter 3 | ansible.builtin.systemd: 4 | name: node-backup-exporter 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | 9 | - name: restart node-backup timer 10 | ansible.builtin.systemd: 11 | name: node-backup.timer 12 | state: restarted 13 | enabled: true 14 | daemon_reload: true 15 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/templates/pebble-config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "pebble": { 3 | "listenAddress": "127.0.0.1:14000", 4 | "managementListenAddress": "127.0.0.1:15000", 5 | "certificate": "{{ pebble_conf_dir }}cert.pem", 6 | "privateKey": "{{ pebble_conf_dir }}key.pem", 7 | "httpPort": 80, 8 | "tlsPort": 443, 9 | "ocspResponderURL": "", 10 | "externalAccountBindingRequired": false 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /roles/node/molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: false 5 | pre_tasks: 6 | - name: Install python 7 | ansible.builtin.raw: apt -y update && apt install -y python3 8 | changed_when: false 9 | - name: Install required packages 10 | ansible.builtin.apt: 11 | name: 12 | - gpg 13 | update_cache: false 14 | changed_when: false 15 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: false 5 | pre_tasks: 6 | - name: Install Python 7 | ansible.builtin.raw: apt -y update && apt install -y python3 8 | changed_when: false 9 | - name: Install required packages 10 | ansible.builtin.apt: 11 | name: 12 | - gpg 13 | update_cache: false 14 | changed_when: false 15 | -------------------------------------------------------------------------------- /roles/nginx_exporter/templates/.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nginx exporter systemd service 3 | After=nginx.service 4 | Requires=nginx.service 5 | PartOf=nginx.service 6 | 7 | [Service] 8 | ExecStart={{ _nginx_exporter_file }} -nginx.scrape-uri http://127.0.0.1:{{ nginx_metric_port }}/stub_status 9 | 10 | Restart=always 11 | User={{ nginx_exporter_user }} 12 | Group={{ nginx_exporter_user}} 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/nginx/templates/site-default.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80 default_server; 3 | server_name _; 4 | 5 | location /.well-known/acme-challenge { 6 | root /var/www/letsencrypt; 7 | try_files $uri $uri/ =404; 8 | } 9 | 10 | location / { 11 | rewrite ^ https://$host$request_uri? permanent; 12 | } 13 | } 14 | 15 | server { 16 | listen 8080; 17 | location = /stub_status { 18 | stub_status; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /roles/node_backup/molecule/default/README.md: -------------------------------------------------------------------------------- 1 | ### Collection 2 | 3 | Molecula should install collection automatically, If id did not happened run: 4 | ```commandline 5 | mkdir molecule/default/collections 6 | ansible-polkadot collection install -f -r molecule/default/collections.yml -p ./molecule/default/collections 7 | ``` 8 | 9 | ### Molecule 10 | #### Docker 11 | Test role with docker driver 12 | ```shell 13 | molecule create 14 | molecule converge 15 | molecule destroy 16 | ``` 17 | 18 | 19 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain_remote_rc/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Molecule 3 | ansible_user: root 4 | 5 | # Common 6 | node_app_name: dummy 7 | node_binary_version: v0.9.430 8 | node_legacy_rpc_flags: false 9 | node_parachain_rpc_port: 9954 10 | node_binary: https://github.com/paritytech/cumulus/releases/download/{{ node_binary_version }}/polkadot-parachain 11 | node_binary_signature: https://github.com/paritytech/cumulus/releases/download/{{ node_binary_version }}/polkadot-parachain.asc 12 | node_enable_public_ip_detection: false 13 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain_remote_rc/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | gather_facts: true 5 | tasks: 6 | - name: parachain 7 | ansible.builtin.include_role: 8 | name: node 9 | vars: 10 | node_app_name: parachain-shell 11 | node_parachain_chain: shell 12 | node_parachain_role: collator 13 | node_parachain_chain_backup_restoring_type: none 14 | node_parachain_relay_chain_rpc_urls: [ws://127.0.0.1:9944] 15 | node_prometheus_file_exporter_path: /tmp/substrate-pc.prom 16 | -------------------------------------------------------------------------------- /roles/node/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Devops Team 3 | description: Substrate/Polkadot node deployment ansible role 4 | company: paritytech 5 | license: GPL-2.0-or-later 6 | 7 | min_ansible_version: "2.10" 8 | 9 | platforms: 10 | - name: Debian 11 | versions: 12 | - all 13 | 14 | galaxy_tags: 15 | - parity 16 | - validator 17 | - parachain 18 | - substarte 19 | - polkadot 20 | - kusama 21 | - westend 22 | - paseo 23 | - collator 24 | - rpc 25 | 26 | dependencies: [] 27 | -------------------------------------------------------------------------------- /roles/node_backup/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: ${DRIVER:-docker} 6 | platforms: 7 | - name: molecule-instance-node-backup 8 | # DOCKER 9 | image: paritytech/debian11:latest 10 | command: ${MOLECULE_DOCKER_COMMAND:-""} 11 | privileged: true 12 | pre_build_image: true 13 | 14 | provisioner: 15 | name: ansible 16 | options: 17 | diff: true 18 | config_options: 19 | defaults: 20 | callbacks_enabled: timer 21 | verifier: 22 | name: ansible 23 | options: 24 | diff: true 25 | -------------------------------------------------------------------------------- /roles/secure_apt/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A role to apply an APT repository + key securely as the apt_key Ansible module is deprecated 5 | 6 | Requirements 7 | -------------- 8 | 9 | * You have to be able to use `become` 10 | 11 | Example Playbook 12 | ---------------- 13 | 14 | - hosts: servers 15 | roles: 16 | - paritytech.common.secure_apt 17 | vars: 18 | secure_apt_key: B53DC80D13EDEF05 19 | secure_apt_repositories: 20 | - https://packages.cloud.google.com/apt cloud-sdk-{{ ansible_distribution_release }} main 21 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain_remote_rc/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: ${DRIVER:-docker} 6 | platforms: 7 | - name: molecule-instance-node-parachain-remote-rc 8 | # DOCKER 9 | image: paritytech/debian11:latest 10 | command: ${MOLECULE_DOCKER_COMMAND:-""} 11 | privileged: true 12 | pre_build_image: true 13 | provisioner: 14 | name: ansible 15 | options: 16 | D: true 17 | config_options: 18 | defaults: 19 | callbacks_enabled: timer 20 | verifier: 21 | name: ansible 22 | options: 23 | D: true 24 | -------------------------------------------------------------------------------- /roles/node_backup/README.md: -------------------------------------------------------------------------------- 1 | node_backup 2 | ========= 3 | This role will template out the backup script and the backup Prometheus exporter. Also, it creates the relevant systemd units.
4 | The nodes that we deploy on the same instance, are normal substrate nodes that are syncing the chain. 5 | The backup is made from the local database. These nodes don't have to do any other work other than synchronization.
6 | Nodes are stopped during the backup process of the given chain because otherwise, the database will be changing during 7 | the backup. It corrupts the backup. 8 |

9 | -------------------------------------------------------------------------------- /.github/workflows/pr-node.yml: -------------------------------------------------------------------------------- 1 | name: check PR (node) 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - roles/node/** 7 | - .github/** 8 | push: 9 | paths: 10 | - roles/node/** 11 | - .github/** 12 | branches: 13 | - '!main' 14 | - '**' 15 | 16 | jobs: 17 | run-molecule-tests: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | molecule-driver: [docker] 22 | uses: ./.github/workflows/reusable-molecule.yml 23 | with: 24 | role-name: node 25 | molecule-driver: ${{ matrix.molecule-driver }} 26 | -------------------------------------------------------------------------------- /.github/workflows/pr-nginx.yml: -------------------------------------------------------------------------------- 1 | name: check PR (nginx) 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - roles/nginx/** 7 | - .github/** 8 | push: 9 | paths: 10 | - roles/nginx/** 11 | - .github/** 12 | branches: 13 | - '!main' 14 | - '**' 15 | 16 | jobs: 17 | run-molecule-tests: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | molecule-driver: [docker] 22 | uses: ./.github/workflows/reusable-molecule.yml 23 | with: 24 | role-name: nginx 25 | molecule-driver: ${{ matrix.molecule-driver }} 26 | -------------------------------------------------------------------------------- /roles/node/tasks/500-memory-profiler.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Memory profiler | Create directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | mode: "0755" 7 | owner: "{{ node_user }}" 8 | group: "{{ node_user }}" 9 | loop: 10 | - "{{ _node_memory_profiler_log_path }}" 11 | 12 | - name: Memory profiler | Download 13 | ansible.builtin.unarchive: 14 | src: "{{ node_memory_profiler_binary }}" 15 | dest: "{{ _node_binary_path }}" 16 | remote_src: true 17 | owner: "{{ node_user }}" 18 | group: "{{ node_user }}" 19 | mode: "0644" 20 | -------------------------------------------------------------------------------- /.github/workflows/pr-node-backup.yml: -------------------------------------------------------------------------------- 1 | name: check PR (node_backup) 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - roles/node_backup/** 7 | - .github/** 8 | push: 9 | paths: 10 | - roles/node_backup/** 11 | - .github/** 12 | branches: 13 | - '!main' 14 | - '**' 15 | 16 | jobs: 17 | run-molecule-tests: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | molecule-driver: [docker] 22 | uses: ./.github/workflows/reusable-molecule.yml 23 | with: 24 | role-name: node 25 | molecule-driver: ${{ matrix.molecule-driver }} -------------------------------------------------------------------------------- /.github/workflows/pr-secure-apt.yml: -------------------------------------------------------------------------------- 1 | name: check PR (secure_apt) 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - roles/secure_apt/** 7 | - .github/** 8 | push: 9 | paths: 10 | - roles/secure_apt/** 11 | - .github/** 12 | branches: 13 | - '!main' 14 | - '**' 15 | 16 | jobs: 17 | run-molecule-tests: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | molecule-driver: [docker] 22 | uses: ./.github/workflows/reusable-molecule.yml 23 | with: 24 | role-name: secure_apt 25 | molecule-driver: ${{ matrix.molecule-driver }} -------------------------------------------------------------------------------- /.github/workflows/pr-state-exporter.yml: -------------------------------------------------------------------------------- 1 | name: check PR (state_exporter) 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - roles/state_exporter/** 7 | - .github/** 8 | push: 9 | paths: 10 | - roles/state_exporter/** 11 | - .github/** 12 | branches: 13 | - '!main' 14 | - '**' 15 | 16 | jobs: 17 | run-molecule-tests: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | molecule-driver: [docker] 22 | uses: ./.github/workflows/reusable-molecule.yml 23 | with: 24 | role-name: state_exporter 25 | molecule-driver: ${{ matrix.molecule-driver }} -------------------------------------------------------------------------------- /.github/workflows/pr-nginx-exporter.yml: -------------------------------------------------------------------------------- 1 | name: check PR (nginx_exporter) 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - roles/nginx_exporter/** 7 | - .github/** 8 | push: 9 | paths: 10 | - roles/nginx_exporter/** 11 | - .github/** 12 | branches: 13 | - '!main' 14 | - '**' 15 | 16 | jobs: 17 | run-molecule-tests: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | molecule-driver: [docker] 22 | uses: ./.github/workflows/reusable-molecule.yml 23 | with: 24 | role-name: nginx_exporter 25 | molecule-driver: ${{ matrix.molecule-driver }} 26 | -------------------------------------------------------------------------------- /.github/workflows/pr-ws-health-exporter.yml: -------------------------------------------------------------------------------- 1 | name: check PR (ws_health_exporter) 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - roles/ws_health_exporter/** 7 | - .github/** 8 | push: 9 | paths: 10 | - roles/ws_health_exporter/** 11 | - .github/** 12 | branches: 13 | - '!main' 14 | - '**' 15 | 16 | jobs: 17 | run-molecule-tests: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | molecule-driver: [docker] 22 | uses: ./.github/workflows/reusable-molecule.yml 23 | with: 24 | role-name: ws_health_exporter 25 | molecule-driver: ${{ matrix.molecule-driver }} 26 | -------------------------------------------------------------------------------- /roles/node/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: ${DRIVER:-docker} 6 | platforms: 7 | - name: molecule-instance-node 8 | # DOCKER 9 | image: paritytech/debian11:latest 10 | command: ${MOLECULE_DOCKER_COMMAND:-""} 11 | # need this for systemctl to work in Docker 12 | privileged: true 13 | # to pull image from docker hub uncomment this 14 | pre_build_image: true 15 | 16 | provisioner: 17 | name: ansible 18 | options: 19 | D: true 20 | config_options: 21 | defaults: 22 | callbacks_enabled: timer 23 | verifier: 24 | name: ansible 25 | options: 26 | D: true 27 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: ${DRIVER:-docker} 6 | platforms: 7 | - name: molecule-instance-node-parachain 8 | # DOCKER 9 | image: paritytech/debian11:latest 10 | command: ${MOLECULE_DOCKER_COMMAND:-""} 11 | # need this for systemctl to work in Docker 12 | privileged: true 13 | # to pull image from docker hub uncomment this 14 | pre_build_image: true 15 | 16 | provisioner: 17 | name: ansible 18 | options: 19 | D: true 20 | config_options: 21 | defaults: 22 | callbacks_enabled: timer 23 | verifier: 24 | name: ansible 25 | options: 26 | D: true 27 | -------------------------------------------------------------------------------- /roles/nginx/tasks/remove.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx | remove | stop nginx 3 | ansible.builtin.systemd: 4 | name: nginx 5 | state: stopped 6 | 7 | - name: nginx | remove | remove packeges 8 | ansible.builtin.apt: 9 | name: "{{ packeges }}" 10 | state: absent 11 | purge: true 12 | vars: 13 | packeges: 14 | - nginx 15 | - nginx-common 16 | - nginx-full 17 | - certbot 18 | 19 | - name: nginx | remove | remove directories 20 | ansible.builtin.file: 21 | name: "{{ item }}" 22 | state: absent 23 | loop: 24 | - /var/www/letsencrypt 25 | - /etc/letsencrypt 26 | - /etc/nginx 27 | - /etc/systemd/system/nginx.service.d 28 | -------------------------------------------------------------------------------- /roles/node/tasks/includes/_delete_db_folder.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restore {{ item.part }} | Stop the service 3 | ansible.builtin.systemd: 4 | name: "{{ node_app_name }}" 5 | state: stopped 6 | notify: restart service {{ node_handler_id }} 7 | ignore_errors: "{{ not _node_systemd_unit_file_stat.stat.exists }}" 8 | 9 | - name: Restore {{ item.part }} | Delete db folder 10 | ansible.builtin.file: 11 | path: "{{ item.chain_path }}/{{ item.db_folder }}" 12 | state: absent 13 | 14 | - name: Restore {{ item.part }} | Recreate db folder 15 | ansible.builtin.file: 16 | path: "{{ item.chain_path }}/{{ item.db_folder }}" 17 | state: directory 18 | mode: "0755" 19 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: ${DRIVER:-docker} 6 | platforms: 7 | - name: molecule-instance-ws-health-exporter 8 | # DOCKER 9 | image: paritytech/debian11:latest 10 | command: ${MOLECULE_DOCKER_COMMAND:-""} 11 | # need this for systemctl to work in Docker 12 | privileged: true 13 | # to pull image from docker hub uncomment this 14 | pre_build_image: true 15 | 16 | provisioner: 17 | name: ansible 18 | options: 19 | D: true 20 | config_options: 21 | defaults: 22 | callbacks_enabled: timer 23 | verifier: 24 | name: ansible 25 | options: 26 | D: true 27 | -------------------------------------------------------------------------------- /roles/nginx/tasks/letsencrypt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx | letsencrypt | calculate list of letsencrypt domains 3 | ansible.builtin.set_fact: 4 | _nginx_letsencrypt_domains: "{{ nginx_sites | json_query(pattern) | map(attribute='domain') | unique }}" 5 | vars: 6 | pattern: "[?ssl_issuer==`letsencrypt`]" 7 | 8 | - name: nginx| letsencrypt | print list of letsencrypt domains 9 | ansible.builtin.debug: 10 | var: _nginx_letsencrypt_domains 11 | 12 | - name: nginx | letsencrypt | include issuing tasks of letsencrypt certs 13 | ansible.builtin.include_tasks: 14 | file: letsencrypt-loop.yml 15 | apply: 16 | tags: [nginx, nginx-letsencrypt] 17 | loop: "{{ _nginx_letsencrypt_domains }}" 18 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: verify 3 | hosts: all 4 | gather_facts: false 5 | tasks: 6 | - name: verify | deploy nginx with wipe 7 | ansible.builtin.include_role: 8 | name: nginx 9 | vars: 10 | nginx_remove_enable: true 11 | - name: verify | check https RPC endpoints 12 | ansible.builtin.uri: 13 | url: https://{{ item.domain }} 14 | validate_certs: false 15 | loop: "{{ nginx_sites }}" 16 | when: item.template == 'site-rpc.j2' 17 | - name: verify | check wss RPC endpoints 18 | ansible.builtin.command: websocat --insecure -E wss:///{{ item.domain }} 19 | changed_when: false 20 | loop: "{{ nginx_sites }}" 21 | -------------------------------------------------------------------------------- /roles/nginx/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/node/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/key_inject/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/node_backup/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/secure_apt/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/nginx_exporter/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/state_exporter/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: ${DRIVER:-docker} 6 | platforms: 7 | - name: molecule-instance-nginx 8 | # DOCKER 9 | image: paritytech/debian11:latest 10 | command: ${MOLECULE_DOCKER_COMMAND:-""} 11 | # need this for systemctl to work in Docker 12 | privileged: true 13 | # to pull image from docker hub uncomment this 14 | pre_build_image: true 15 | etc_hosts: 16 | a.rpc.lan: 127.0.0.1 17 | b.rpc.lan: 127.0.0.1 18 | c.rpc.lan: 127.0.0.1 19 | d.rpc.lan: 127.0.0.1 20 | 21 | provisioner: 22 | name: ansible 23 | options: 24 | D: true 25 | config_options: 26 | defaults: 27 | callbacks_enabled: timer 28 | verifier: 29 | name: ansible 30 | options: 31 | D: true 32 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Molecule 3 | ansible_user: root 4 | 5 | nginx_letsencrypt_mock: true 6 | nginx_dhparam_size: 1024 7 | nginx_sites: 8 | - template: site-rpc.j2 9 | domain: a.rpc.lan 10 | ssl_issuer: letsencrypt 11 | params: 12 | rpc_port: 9933 13 | rpc_ws_port: 9944 14 | - template: site-rpc.j2 15 | domain: b.rpc.lan 16 | ssl_issuer: manual 17 | ssl_manual_cert_file: test1.pem 18 | params: 19 | rpc_port: 9933 20 | rpc_ws_port: 9944 21 | - template: site-connect.j2 22 | domain: c.rpc.lan 23 | ssl_issuer: letsencrypt 24 | params: 25 | connect_port: 9944 26 | - template: site-connect.j2 27 | domain: d.rpc.lan 28 | ssl_issuer: manual 29 | ssl_manual_cert_file: test2.pem 30 | params: 31 | connect_port: 9944 32 | -------------------------------------------------------------------------------- /roles/node_backup/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | _node_backup_scripts_path: "{{ node_backup_base_path }}/scripts" 4 | _node_backup_log_path: "{{ node_backup_base_path }}/logs" 5 | _node_backup_venv_path: "{{ node_backup_base_path }}/venv" 6 | _node_backup_exporter_path: "{{ node_backup_base_path }}/exporter" 7 | _node_backup_exporter_file: "{{ _node_backup_exporter_path }}/exporter.py" 8 | _node_backup_exporter_cache_file: "{{ _node_backup_exporter_path }}/exporter.cache" 9 | _node_backup_rclone_deb: https://downloads.rclone.org/v1.63.1/rclone-v1.63.1-linux-amd64.deb 10 | 11 | _node_backup_r2_types: [r2-rclone] 12 | _node_backup_gcp_types: [gcp-native, gcp-rclone] 13 | _node_backup_rclone_types: [gcp-rclone, r2-rclone, s3-rclone] 14 | _node_backup_storages: 15 | s3-rclone: s3 16 | r2-rclone: r2 17 | gcp-rclone: gcp 18 | gcp-native: gcp 19 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Molecule 3 | ansible_user: root 4 | 5 | # Common 6 | node_binary_version: v0.9.430 7 | node_legacy_rpc_flags: false 8 | node_rpc_port: 9944 9 | node_parachain_rpc_port: 9954 10 | node_binary: https://github.com/paritytech/cumulus/releases/download/{{ node_binary_version }}/polkadot-parachain 11 | node_binary_signature: https://github.com/paritytech/cumulus/releases/download/{{ node_binary_version }}/polkadot-parachain.asc 12 | node_app_name: shell 13 | node_prometheus_file_exporter_path: /tmp/substrate.prom 14 | node_enable_public_ip_detection: false 15 | 16 | # Relaychain 17 | node_chain: paseo 18 | node_chainspec: https://paritytech.github.io/chainspecs/paseo/relaychain/chainspec.json 19 | node_chain_backup_restoring_type: none 20 | 21 | # Parachain 22 | node_parachain_chain: shell 23 | node_parachain_chain_backup_restoring_type: none 24 | node_parachain_role: collator 25 | -------------------------------------------------------------------------------- /roles/node_backup/templates/rclone/rclone.conf.j2: -------------------------------------------------------------------------------- 1 | {% if node_backup_targets | json_query('[].type') | intersect(_node_backup_rclone_types) | length > 0 %} 2 | [S3backups] 3 | type = s3 4 | provider = {{ node_backup_s3_provider }} 5 | access_key_id = {{ node_backup_s3_access_key_id }} 6 | secret_access_key = {{ node_backup_s3_secret_access_key }} 7 | endpoint = {{ node_backup_s3_endpoint }} 8 | {% if node_backup_s3_region != "" %} 9 | region = {{ node_backup_s3_region }} 10 | {% endif %} 11 | acl = private 12 | upload_cutoff = 1024M 13 | upload_concurrency = {{ node_backup_max_concurrent_requests }} 14 | chunk_size = 256M 15 | {% if node_backup_s3_provider == "Cloudflare" %} 16 | no_check_bucket = true 17 | {% endif %} 18 | {% endif %} 19 | 20 | {% if node_backup_targets | json_query('[].type') | intersect(_node_backup_gcp_types) | length > 0 %} 21 | [GCPbackups] 22 | type = google cloud storage 23 | bucket_policy_only = true 24 | {% endif %} 25 | 26 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ws_health_exporter_url: https://raw.githubusercontent.com/paritytech/scripts/9961136320c2454fe99ba4643c156becd84ae704/dockerfiles/ws-health-exporter/exporter.py 4 | ws_health_exporter_base_path: /opt/{{ _ws_health_exporter_name }} 5 | # user has to be created by the role user 6 | ws_health_exporter_user: polkadot 7 | 8 | # you can find more details here 9 | # https://github.com/paritytech/scripts/blob/master/dockerfiles/ws-health-exporter/README.md 10 | ws_health_exporter_host: "0.0.0.0" 11 | ws_health_exporter_port: 8001 12 | ws_health_exporter_log_level: INFO 13 | ws_health_exporter_ws_check_interval: 10 14 | ws_health_exporter_ws_timeout: 60 15 | ws_health_exporter_node_max_unsynchronized_block_drift: 0 # blocks, 0 - disabled 16 | ws_health_exporter_node_min_peers: 10 # peers 17 | ws_health_exporter_min_block_rate: 0.0 # blocks/second, 0.0 - disabled 18 | ws_health_exporter_block_rate_measurement_period: 600 # seconds 19 | 20 | ws_health_exporter_ws_urls: 21 | - ws://127.0.0.1:9944 22 | -------------------------------------------------------------------------------- /roles/key_inject/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - tags: [key-inject, key_inject] 3 | block: 4 | - name: Parachain keys 5 | ansible.builtin.include_tasks: inject.yml 6 | loop: 7 | - rpc_port: "{{ key_inject_parachain_rpc_port }}" 8 | scheme: "{{ key_inject_parachain_scheme }}" 9 | type: aura 10 | priv_key: "{{ key_inject_parachain_aura_private_key }}" 11 | loop_control: 12 | label: Parachain {{ item.type }} key 13 | when: key_inject_parachain_aura_private_key is defined 14 | 15 | - name: Relaychain keys 16 | ansible.builtin.include_tasks: inject.yml 17 | loop: "{{ key_inject_relay_chain_key_list }}" 18 | loop_control: 19 | label: Relaychain {{ item.type }} key 20 | when: key_inject_relay_chain_key_list is defined 21 | 22 | - name: Check session key is present 23 | ansible.builtin.include_tasks: check_session_key.yml 24 | when: 25 | - key_inject_relay_chain_key_list is defined 26 | - key_inject_check_session_key 27 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/molecule/default/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | gather_facts: true 5 | tasks: 6 | - name: Collect service facts 7 | ansible.builtin.service_facts: 8 | 9 | - name: print service facts 10 | ansible.builtin.debug: 11 | var: ansible_facts.services[item+'.service'] 12 | loop: 13 | - alice 14 | - bob 15 | 16 | - name: check service 17 | ansible.builtin.assert: 18 | that: ansible_facts.services[item+'.service'].state == 'running' 19 | loop: 20 | - alice 21 | - bob 22 | 23 | - name: check ws health exporter 24 | ansible.builtin.uri: 25 | url: http://127.0.0.1:{{ ws_health_exporter_port }}/health/readiness 26 | use_proxy: false 27 | register: _ws_health_exporter 28 | until: _ws_health_exporter.status == 200 29 | retries: 10 # 10 * 5 seconds = 50 sec 30 | delay: 5 31 | 32 | - name: Print service facts 33 | ansible.builtin.debug: 34 | var: _ws_health_exporter 35 | -------------------------------------------------------------------------------- /roles/node_backup/molecule/default/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | gather_facts: false 5 | tasks: 6 | - name: wait until ~10 blocks created 7 | ansible.builtin.uri: 8 | url: http://127.0.0.1:9933 9 | method: POST 10 | body_format: json 11 | body: 12 | id: 1 13 | jsonrpc: "2.0" 14 | method: chain_getHeader 15 | params: [] 16 | return_content: true 17 | register: _node_backup_register_header 18 | until: _node_backup_register_header.json.result.number | int(base=16) > 10 19 | retries: 10 20 | delay: 10 21 | 22 | - name: Print current block 23 | ansible.builtin.debug: 24 | var: _node_backup_register_header.json.result.number | int(base=16) 25 | 26 | # TODO - add tests 27 | # Test backup-exporter: 28 | # 1. We can push fake data to backup-exporter (like run bash script). 29 | # Then we can check the Prometheus endpoint to check and match the results. 30 | # This will allow checking the code of the exporter. 31 | # 2. We can upload data to local MinIO -------------------------------------------------------------------------------- /roles/nginx/tasks/letsencrypt-loop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx | letsencrypt | domain - {{ item }} | setup letsencrypt cmd 3 | ansible.builtin.set_fact: 4 | _nginx_letsencrypt_cmd: certbot certonly --webroot -w /var/www/letsencrypt -d {{ item }} -n -m {{ nginx_letsencrypt_email }} --agree-tos{%- if ansible_check_mode 5 | %} --dry-run{% endif %}{%- if nginx_letsencrypt_mock %} --server https://127.0.0.1:14000/dir --no-verify-ssl{%- endif %} 6 | 7 | - name: nginx | letsencrypt | domain - {{ item }} | print letsencrypt cmd 8 | ansible.builtin.debug: 9 | var: _nginx_letsencrypt_cmd 10 | 11 | - name: nginx | letsencrypt | domain - {{ item }} | create certificate 12 | ansible.builtin.command: "{{ _nginx_letsencrypt_cmd }}" 13 | register: _nginx_letsencrypt_register 14 | until: _nginx_letsencrypt_register.rc is defined and _nginx_letsencrypt_register.rc == 0 15 | retries: 5 16 | delay: 10 17 | check_mode: false 18 | changed_when: false 19 | 20 | - name: nginx | letsencrypt | domain - {{ item }} | print letsencrypt output 21 | ansible.builtin.debug: 22 | var: _nginx_letsencrypt_register.stdout 23 | -------------------------------------------------------------------------------- /roles/secure_apt/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create custom keyring directory 3 | ansible.builtin.file: 4 | path: "{{ _secure_apt_keyring_folder }}" 5 | state: directory 6 | mode: "0755" 7 | 8 | - name: Add APT key 9 | ansible.builtin.apt_key: 10 | id: "{{ secure_apt_key }}" 11 | keyring: "{{ _secure_apt_keyring_folder }}/{{ secure_apt_key }}.gpg" 12 | keyserver: "{{ secure_apt_keyserver }}" 13 | # Causes a fatal error in check mode due to apt-key + grep 14 | # more info: https://github.com/ansible/ansible/issues/28820 15 | ignore_errors: "{{ ansible_check_mode }}" 16 | 17 | - name: Set restrictive permissions for key file 18 | ansible.builtin.file: 19 | path: "{{ _secure_apt_keyring_folder }}/{{ secure_apt_key }}.gpg" 20 | mode: "0444" 21 | # Causes a fatal error in check mode 22 | ignore_errors: "{{ ansible_check_mode }}" 23 | 24 | - name: Add APT repository 25 | ansible.builtin.apt_repository: 26 | repo: deb [arch=amd64 signed-by={{ _secure_apt_keyring_folder }}/{{ secure_apt_key }}.gpg] {{ item }} 27 | update_cache: "{{ secure_apt_update_cache }}" 28 | loop: "{{ secure_apt_repositories }}" 29 | -------------------------------------------------------------------------------- /roles/node/molecule/default/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Molecule 3 | ansible_user: root 4 | 5 | ## Node 6 | node_chain: polkadot 7 | node_app_name: "{{ node_chain }}" 8 | node_binary_version: v1.12.0 9 | node_legacy_rpc_flags: false 10 | node_rpc_port: 9944 11 | node_binary: https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot 12 | node_binary_signature: https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot.asc 13 | node_pruning: 256 14 | node_paritydb_enable: true 15 | node_chain_backup_restoring_type: none 16 | node_parachain_chain_backup_restoring_type: none 17 | # This private key is only for modulecule tests 18 | # Note: don't modify this key either, because the last character (which is invisible here) is special 19 | # and without it, subkey won't be able to work with it 20 | node_p2p_private_key: a4964e8e979c29fcdd79403db8c374cae91857e69a13162f7664a6529bd66093 21 | node_prometheus_file_exporter_path: /tmp/substrate.prom 22 | node_data_root_path: /opt/polkadot-root 23 | node_memory_profiler_log_path: /opt/polkadot-root-logs 24 | node_enable_public_ip_detection: false 25 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/templates/.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ws_health_exporter systemd service 3 | 4 | [Service] 5 | Environment="PYTHONUNBUFFERED=True" 6 | Environment="WSHE_NODE_RPC_URLS={{ ws_health_exporter_ws_urls | join(',') }}" 7 | Environment="WSHE_LOG_LEVEL={{ ws_health_exporter_log_level }}" 8 | Environment="WSHE_HOST={{ ws_health_exporter_host }}" 9 | Environment="WSHE_PORT={{ ws_health_exporter_port }}" 10 | Environment="WSHE_WS_CHECK_INTERVAL={{ ws_health_exporter_ws_check_interval }}" 11 | Environment="WSHE_WS_TIMEOUT={{ ws_health_exporter_ws_timeout }}" 12 | Environment="WSHE_NODE_MAX_UNSYNCHRONIZED_BLOCK_DRIFT={{ ws_health_exporter_node_max_unsynchronized_block_drift }}" 13 | Environment="WSHE_NODE_MIN_PEERS={{ ws_health_exporter_node_min_peers }}" 14 | Environment="WSHE_MIN_BLOCK_RATE={{ ws_health_exporter_min_block_rate }}" 15 | Environment="WSHE_BLOCK_RATE_MEASUREMENT_PERIOD={{ ws_health_exporter_block_rate_measurement_period }}" 16 | 17 | ExecStart={{ _ws_health_exporter_venv }}/bin/python3 {{ _ws_health_exporter_file }} 18 | 19 | Restart=always 20 | User={{ ws_health_exporter_user }} 21 | Group={{ ws_health_exporter_user }} 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain_remote_rc/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: false 5 | pre_tasks: 6 | - name: Install Python 7 | ansible.builtin.raw: apt -y update && apt install -y python3 8 | changed_when: false 9 | - name: Install required packages 10 | ansible.builtin.apt: 11 | name: 12 | - gpg 13 | update_cache: false 14 | changed_when: false 15 | tasks: 16 | - name: relaychain 17 | ansible.builtin.include_role: 18 | name: node 19 | vars: 20 | node_app_name: relaychain-shell 21 | node_data_root_path: /opt/{{ node_app_name }} 22 | node_chain: rococo-dev 23 | node_custom_options: [--alice] 24 | node_binary_version: v0.9.43 25 | node_legacy_rpc_flags: false 26 | node_binary: https://github.com/paritytech/polkadot/releases/download/{{ node_binary_version }}/polkadot 27 | node_binary_signature: https://github.com/paritytech/polkadot/releases/download/{{ node_binary_version }}/polkadot.asc 28 | node_rpc_port: 9944 29 | node_chain_backup_restoring_type: none 30 | node_prometheus_file_exporter_path: /tmp/substrate-rc.prom 31 | -------------------------------------------------------------------------------- /roles/key_inject/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | subkey_path: https://releases.parity.io/substrate/x86_64-debian%3Astretch/v3.0.0/subkey/subkey 3 | 4 | # Parachain key injection variables 5 | key_inject_parachain_rpc_port: 9954 6 | key_inject_parachain_scheme: sr25519 7 | # key_inject_parachain_aura_private_key= 8 | 9 | # Relay chain key injection variables 10 | key_inject_relay_chain_rpc_port: 9944 11 | # key_inject_relay_chain_key_list: 12 | # - scheme: "sr25519" # Optional default is sr25519 13 | # rpc_port: "9933" # Optional default is {{ key_inject_relay_chain_rpc_port }} 14 | # type: "gran" # Required, key type 15 | # priv_key: "0xcc...9123//1//grandpa" # Required, key seed 16 | # - type: "babe" 17 | # priv_key: "SECRET SEED" 18 | # - type: "imon" 19 | # priv_key: "SECRET SEED" 20 | # - type: "para" 21 | # priv_key: "SECRET SEED" 22 | # - type: "asgn" 23 | # priv_key: "SECRET SEED" 24 | # - type: "audi" 25 | # priv_key: "SECRET SEED" 26 | # - scheme: "ecdsa" 27 | # type: "beef 28 | # priv_key: "SECRET SEED" 29 | 30 | # if set to true, public part of from key_inject_relay_chain_key_list will be combined 31 | # and verified that it is present in keystore 32 | key_inject_check_session_key: true 33 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain_remote_rc/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | gather_facts: false 5 | tasks: 6 | - name: Collect service facts 7 | ansible.builtin.service_facts: 8 | 9 | - name: Print service facts 10 | ansible.builtin.debug: 11 | var: ansible_facts.services['parachain-shell.service'] 12 | 13 | - name: check service 14 | ansible.builtin.assert: 15 | that: ansible_facts.services['parachain-shell.service'].state == 'running' 16 | 17 | - name: Get parachain system_health 18 | ansible.builtin.uri: 19 | url: http://127.0.0.1:{{ node_parachain_rpc_port }} 20 | method: POST 21 | body: { id: 1, jsonrpc: "2.0", method: system_health, params: [] } 22 | body_format: json 23 | headers: 24 | Content-Type: application/json 25 | use_proxy: false 26 | until: _parachain_system_health_result.status is defined and _parachain_system_health_result.status == 200 27 | retries: 3 28 | delay: 10 29 | register: _parachain_system_health_result 30 | 31 | - name: Print system_health 32 | ansible.builtin.debug: 33 | msg: "Parachain: {{ _parachain_system_health_result.json }}" 34 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/files/pebble/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDGzCCAgOgAwIBAgIIbEfayDFsBtwwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE 3 | AxMVbWluaWNhIHJvb3QgY2EgMjRlMmRiMCAXDTE3MTIwNjE5NDIxMFoYDzIxMDcx 4 | MjA2MTk0MjEwWjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB 5 | AQUAA4IBDwAwggEKAoIBAQCbFMW3DXXdErvQf2lCZ0qz0DGEWadDoF0O2neM5mVa 6 | VQ7QGW0xc5Qwvn3Tl62C0JtwLpF0pG2BICIN+DHdVaIUwkf77iBS2doH1I3waE1I 7 | 8GkV9JrYmFY+j0dA1SwBmqUZNXhLNwZGq1a91nFSI59DZNy/JciqxoPX2K++ojU2 8 | FPpuXe2t51NmXMsszpa+TDqF/IeskA9A/ws6UIh4Mzhghx7oay2/qqj2IIPjAmJj 9 | i73kdUvtEry3wmlkBvtVH50+FscS9WmPC5h3lDTk5nbzSAXKuFusotuqy3XTgY5B 10 | PiRAwkZbEY43JNfqenQPHo7mNTt29i+NVVrBsnAa5ovrAgMBAAGjYzBhMA4GA1Ud 11 | DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T 12 | AQH/BAIwADAiBgNVHREEGzAZgglsb2NhbGhvc3SCBnBlYmJsZYcEfwAAATANBgkq 13 | hkiG9w0BAQsFAAOCAQEAYIkXff8H28KS0KyLHtbbSOGU4sujHHVwiVXSATACsNAE 14 | D0Qa8hdtTQ6AUqA6/n8/u1tk0O4rPE/cTpsM3IJFX9S3rZMRsguBP7BSr1Lq/XAB 15 | 7JP/CNHt+Z9aKCKcg11wIX9/B9F7pyKM3TdKgOpqXGV6TMuLjg5PlYWI/07lVGFW 16 | /mSJDRs8bSCFmbRtEqc4lpwlrpz+kTTnX6G7JDLfLWYw/xXVqwFfdengcDTHCc8K 17 | wtgGq/Gu6vcoBxIO3jaca+OIkMfxxXmGrcNdseuUCa3RMZ8Qy03DqGu6Y6XQyK4B 18 | W8zIG6H9SVKkAznM2yfYhW8v2ktcaZ95/OBHY97ZIw== 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/molecule/default/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Molecule 3 | ansible_user: root 4 | 5 | ## node 6 | node_role: "validator" 7 | node_user: polkadot 8 | node_chain: westend-local 9 | node_chain_backup_chain_path: "westend_local_testnet" 10 | node_chain_backup_restoring_type: none 11 | node_parachain_chain_backup_restoring_type: none 12 | node_p2p_bind_addr: "127.0.0.1" 13 | node_ansible_annotation_path: /tmp/substrate.prom 14 | node_binary_version: v1.12.0 15 | node_binary: "https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot" 16 | node_prepare_worker_binary: "https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot-prepare-worker" 17 | node_execute_worker_binary: "https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot-execute-worker" 18 | 19 | ## ws_health_exporter 20 | ws_health_exporter_user: "{{ node_user }}" 21 | ws_health_exporter_log_level: DEBUG 22 | ws_health_exporter_port: 8001 23 | ws_health_exporter_node_max_unsynchronized_block_drift: 2 24 | ws_health_exporter_node_min_peers: 1 25 | 26 | ws_health_exporter_ws_urls: 27 | - ws://127.0.0.1:9933 28 | - ws://127.0.0.1:9934 29 | -------------------------------------------------------------------------------- /roles/node_backup/tasks/exporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: node-backup | exporter | remove the cache file 3 | ansible.builtin.file: 4 | path: "{{ _node_backup_exporter_cache_file }}" 5 | state: absent 6 | notify: restart node-backup exporter 7 | when: node_backup_wipe_cache_enable | bool 8 | 9 | - name: node-backup | exporter | copy exporter file 10 | ansible.builtin.copy: 11 | src: exporter.py 12 | dest: "{{ _node_backup_exporter_file }}" 13 | mode: "0755" 14 | owner: "{{ node_backup_user }}" 15 | group: "{{ node_backup_user }}" 16 | notify: restart node-backup exporter 17 | 18 | - name: node-backup | exporter | copy exporter systemd unit file 19 | ansible.builtin.template: 20 | src: node-backup-exporter.service.j2 21 | dest: /etc/systemd/system/node-backup-exporter.service 22 | owner: root 23 | group: root 24 | mode: "0644" 25 | notify: restart node-backup exporter 26 | 27 | # to avoid 2 restarts during the first deploy 28 | - name: node-backup | exporter | flush handlers 29 | ansible.builtin.meta: flush_handlers 30 | 31 | - name: node-backup | exporter | start exporter service 32 | ansible.builtin.systemd: 33 | name: node-backup-exporter 34 | state: started 35 | enabled: true 36 | daemon_reload: true 37 | -------------------------------------------------------------------------------- /roles/node/tasks/300-wipe.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Wipe | Stop service 3 | ansible.builtin.systemd: 4 | name: "{{ node_app_name }}" 5 | state: stopped 6 | notify: restart service {{ node_handler_id }} 7 | ignore_errors: "{{ not _node_systemd_unit_file_stat.stat.exists }}" 8 | 9 | - name: Wipe | Delete relaychain DB 10 | ansible.builtin.file: 11 | path: "{{ _node_data_chain_path }}" 12 | state: absent 13 | vars: 14 | # remove whole `chains` folder 15 | _node_chain_id: "" 16 | notify: restart service {{ node_handler_id }} 17 | retries: 5 18 | register: _node_wipe_results 19 | until: not _node_wipe_results.failed 20 | delay: 60 21 | when: node_database_wipe | bool 22 | 23 | - name: Wipe | Delete parachain DB 24 | ansible.builtin.file: 25 | path: "{{ _node_parachain_data_chain_path }}" 26 | state: absent 27 | vars: 28 | _node_parachain_chain_id: "" 29 | notify: restart service {{ node_handler_id }} 30 | retries: 5 31 | register: _node_wipe_results 32 | until: not _node_wipe_results.failed 33 | delay: 60 34 | when: node_parachain_role != '' and (node_parachain_database_wipe | bool) 35 | 36 | # we need it to update information about free space after the wipe 37 | - name: Wipe | Gather facts 38 | ansible.builtin.gather_facts: 39 | -------------------------------------------------------------------------------- /.github/workflows/requirements-molecule.txt: -------------------------------------------------------------------------------- 1 | ansible-lint==24.5.0 2 | attrs==23.2.0 3 | black==24.4.2 4 | bracex==2.4 5 | certifi==2024.7.4 6 | cffi==1.16.0 7 | charset-normalizer==3.3.2 8 | click==8.1.7 9 | click-help-colors==0.9.4 10 | cryptography==42.0.7 11 | distro==1.9.0 12 | enrich==1.2.7 13 | filelock==3.14.0 14 | idna==3.7 15 | importlib_metadata==7.1.0 16 | Jinja2==3.1.4 17 | jmespath==1.0.1 18 | jsonschema==4.22.0 19 | jsonschema-specifications==2023.12.1 20 | markdown-it-py==3.0.0 21 | MarkupSafe==2.1.5 22 | mdurl==0.1.2 23 | molecule==24.2.1 24 | molecule-plugins==23.5.3 25 | mypy-extensions==1.0.0 26 | packaging==24.0 27 | pathspec==0.12.1 28 | platformdirs==4.2.2 29 | pluggy==1.5.0 30 | pycparser==2.22 31 | Pygments==2.18.0 32 | PyYAML==6.0.1 33 | referencing==0.35.1 34 | resolvelib==1.0.1 35 | rich==13.7.1 36 | rpds-py==0.18.1 37 | ruamel.yaml==0.18.6 38 | ruamel.yaml.clib==0.2.8 39 | selinux==0.3.0 40 | subprocess-tee==0.4.1 41 | urllib3==2.2.2 42 | wcmatch==8.5.2 43 | yamllint==1.35.1 44 | zipp==3.19.1 45 | # requests > 2.31.0 brakes Docker Python library 46 | # The fix https://github.com/docker/docker-py/pull/3257/files 47 | # has not been in a release of the Docker Python library 48 | # they should be updated together 49 | # when new versions of the Docker Python library > 7.1.0 are released 50 | docker==7.1.0 51 | requests==2.31.0 52 | -------------------------------------------------------------------------------- /roles/key_inject/tasks/check_session_key.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check session key | Generate session 3 | ansible.builtin.set_fact: 4 | key_inject_session_key: "0x{% for key in key_inject_relay_chain_key_list %}{{ (key.priv_key | paritytech.chain.subkey_inspect(scheme=(key.scheme | default('sr25519')))).publicKey.replace('0x', 5 | '') }}{% endfor %}" 6 | 7 | - name: Check session key | Run rpc 8 | ansible.builtin.uri: 9 | url: http://127.0.0.1:{{ key_inject_relay_chain_rpc_port }} 10 | method: POST 11 | body: 12 | jsonrpc: "2.0" 13 | method: author_hasSessionKeys 14 | params: ["{{ key_inject_session_key }}"] 15 | id: 1 16 | body_format: json 17 | headers: 18 | Content-Type: application/json 19 | use_proxy: false 20 | changed_when: false 21 | check_mode: false 22 | register: key_inject_has_session_keys 23 | 24 | - name: Check session key | Debug 25 | ansible.builtin.debug: 26 | msg: "RPC call failed: {{ key_inject_has_session_keys.json }}" 27 | when: key_inject_has_session_keys.json.result is not defined 28 | 29 | - name: Check session key | Check 30 | ansible.builtin.debug: 31 | msg: Session Key {{ key_inject_session_key }} is {{ 'NOT ' if not key_inject_has_session_keys.json.result else '' }}present in keystore 32 | changed_when: not key_inject_has_session_keys.json.result 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Polkadot Collection - paritytech.chain 2 | 3 | ## Install Ansible collections 4 | 5 | Create `requirements.yml` file in your playbook repository (or add to the existing file): 6 | ```yaml 7 | collections: 8 | - name: https://github.com/paritytech/ansible-polkadot.git 9 | type: git 10 | version: 1.10.0 11 | ``` 12 | 13 | or 14 | 15 | ```yaml 16 | collections: 17 | - name: paritytech.chain 18 | version: 1.10.0 19 | ``` 20 | 21 | If you want to install collections in the project space, you have to run: 22 | ```commandline 23 | mkdir collections 24 | ansible-galaxy collection install -f -r requirements.yml -p ./collections 25 | ``` 26 | 27 | If you want to install collections in the global space (`~/.ansible/collections`), 28 | you have to run: 29 | ```commandline 30 | ansible-galaxy collection install -f -r requirements.yml 31 | ``` 32 | 33 | ## Roles 34 | 35 | * key_inject - [README](./roles/key_inject/README.md) 36 | * node - [README](./roles/node/README.md) 37 | * node_backup - [README](./roles/node_backup/README.md) 38 | * secure_apt - [README](./roles/secure_apt/README.md) 39 | * state_exporter - [README](./roles/state_exporter/README.md) 40 | * ws_health_exporter - [README](./roles/ws_health_exporter/README.md) 41 | * nginx - [README](./roles/nginx/README.md) 42 | * nginx_exporter - [README](./roles/nginx_exporter/README.md) 43 | -------------------------------------------------------------------------------- /roles/node/templates/node.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description={{ node_app_name }} Systemd Service 3 | 4 | [Service] 5 | Restart=always 6 | RestartSec=90 7 | EnvironmentFile=/etc/default/polkadot-{{ node_app_name }} 8 | User={{ node_user }} 9 | Group={{ node_user }} 10 | MemoryHigh={{ node_memory_high }} 11 | MemoryMax={{ node_memory_max }} 12 | ExecStart={{ _node_binary_path }}/{{ _node_main_binary_file_name }} \ 13 | $COMMON \ 14 | {% if node_parachain_role != '' and node_parachain_relay_chain_rpc_urls != [] %} 15 | $PC_NAME $PC_ROLE_SPECIFIC $PC_KEY $PC_CHAIN $PC_REMOTE_RC_URLS $PC_ADDR $PC_CONNECTIONS $PC_DB $PC_TELEMETRY $PC_PRUNING $PC_LOGS $PC_METRICS $PC_WS $PC_RPC $PC_WASM_RUNTIME $PC_CUSTOM_OPTIONS 16 | {% elif node_parachain_role != '' %} 17 | $PC_NAME $PC_ROLE_SPECIFIC $PC_KEY $PC_CHAIN $PC_ADDR $PC_CONNECTIONS $PC_DB $PC_TELEMETRY $PC_PRUNING $PC_LOGS $PC_METRICS $PC_WS $PC_RPC $PC_WASM_RUNTIME $PC_CUSTOM_OPTIONS \ 18 | -- \ 19 | {% endif %} 20 | {% if node_parachain_relay_chain_rpc_urls == [] %} 21 | $RC_NAME $RC_ROLE_SPECIFIC $RC_KEY $RC_CHAIN $RC_ADDR $RC_CONNECTIONS $RC_DB $RC_TELEMETRY $RC_PRUNING $RC_LOGS $RC_METRICS $RC_WS $RC_RPC $RC_WASM_RUNTIME $RC_CUSTOM_OPTIONS 22 | {% endif %} 23 | 24 | {% if node_syslog_labels != '' %} 25 | SyslogIdentifier={{ node_syslog_labels }} 26 | {% endif %} 27 | 28 | [Install] 29 | WantedBy=multi-user.target 30 | -------------------------------------------------------------------------------- /roles/nginx/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | user www-data; 2 | worker_processes auto; 3 | pid /run/nginx.pid; 4 | include /etc/nginx/modules-enabled/*.conf; 5 | worker_rlimit_nofile {{ nginx_worker_rlimit_nofile }}; 6 | 7 | events { 8 | worker_connections 8000; 9 | # multi_accept on; 10 | } 11 | 12 | http { 13 | limit_req_zone "$http_x_forwarded_for" zone=zone:10m rate={{ nginx_max_request_rate }}r/s; 14 | limit_req_zone "$binary_remote_addr" zone=ipzone:10m rate={{ nginx_max_request_rate }}r/s; 15 | sendfile on; 16 | tcp_nopush on; 17 | tcp_nodelay on; 18 | keepalive_timeout 65; 19 | types_hash_max_size 2048; 20 | 21 | include /etc/nginx/mime.types; 22 | default_type application/octet-stream; 23 | {% for directive in nginx_http_context_directives %} 24 | {{ directive }}; 25 | {% endfor %} 26 | 27 | {% if nginx_log_extended_enable %} 28 | log_format main '$remote_addr - $http_x_forwarded_for - $remote_user [$time_local] ' '"$request" $status $body_bytes_sent "$http_referer" ' '"$http_user_agent"'; 29 | access_log /var/log/nginx/access.log main; 30 | {% else %} 31 | access_log /var/log/nginx/access.log; 32 | {% endif %} 33 | error_log /var/log/nginx/error.log; 34 | 35 | gzip on; 36 | gzip_disable "msie6"; 37 | 38 | server_tokens off; 39 | 40 | include /etc/nginx/conf.d/*.conf; 41 | include /etc/nginx/sites-enabled/*; 42 | } 43 | -------------------------------------------------------------------------------- /roles/nginx/tasks/tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx | tests | fail if the site isn't unique 1 3 | ansible.builtin.set_fact: 4 | _nginx_revised_sites: [] 5 | 6 | - name: nginx | tests | fail if the site isn't unique 2 7 | ansible.builtin.set_fact: 8 | _nginx_revised_sites: "{{ _nginx_revised_sites + ['template: ' + item.template + ' domain: ' + item.domain] }}" 9 | loop: "{{ nginx_sites }}" 10 | 11 | - name: nginx | tests | fail if the site isn't unique 3 12 | ansible.builtin.fail: 13 | msg: "{{ item }}. A pair of 'template' and 'domain' variables must be unique for each item of the 'nginx_sites' variable " 14 | loop: "{{ _nginx_revised_sites | sort }}" 15 | loop_control: 16 | extended: true 17 | when: not ansible_loop.last and (item == ansible_loop.nextitem) 18 | 19 | - name: nginx | tests | check the ssl_issuer variable 20 | ansible.builtin.fail: 21 | msg: The 'ssl_issuer' variable must be defined, it can contain only 'manual', 'letsencrypt' values! 22 | loop: "{{ nginx_sites }}" 23 | when: item.ssl_issuer is not defined or item.ssl_issuer not in ['manual', 'letsencrypt'] 24 | 25 | - name: nginx | tests | check the ssl_manual_cert_file variable 26 | ansible.builtin.fail: 27 | msg: The 'ssl_manual_cert_file' variable must be defined, if 'ssl_issuer' == 'manual' 28 | loop: "{{ nginx_sites }}" 29 | when: item.ssl_issuer == 'manual' and item.ssl_manual_cert_file is not defined 30 | -------------------------------------------------------------------------------- /roles/nginx_exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Nginx exporter 3 | tags: [nginx-exporter] 4 | block: 5 | - name: Nginx exporter | download exporter 6 | ansible.builtin.unarchive: 7 | src: "{{ nginx_exporter_binary }}" 8 | dest: "{{ _nginx_exporter_file | dirname }}" 9 | remote_src: true 10 | owner: root 11 | group: root 12 | mode: "0644" 13 | notify: Restart nginx-exporter 14 | 15 | - name: Nginx exporter | change permissions of binary 16 | ansible.builtin.file: 17 | path: "{{ _nginx_exporter_file }}" 18 | owner: root 19 | group: root 20 | mode: "0755" 21 | state: file 22 | notify: Restart nginx-exporter 23 | 24 | - name: Nginx exporter | copy exporter systemd unit file 25 | ansible.builtin.template: 26 | src: .service.j2 27 | dest: /etc/systemd/system/{{ nginx_exporter_name }}.service 28 | owner: root 29 | group: root 30 | mode: "0600" 31 | notify: Restart nginx-exporter 32 | 33 | # to avoid 2 restarts during the first deploy 34 | - name: Nginx exporter | flush handlers 35 | ansible.builtin.meta: flush_handlers 36 | 37 | - name: Nginx exporter | start exporter service 38 | ansible.builtin.systemd: 39 | name: "{{ nginx_exporter_name }}" 40 | state: started 41 | enabled: true 42 | daemon_reload: true 43 | -------------------------------------------------------------------------------- /roles/node/tasks/200-prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare | Create user 3 | ansible.builtin.user: 4 | name: "{{ node_user }}" 5 | shell: /sbin/nologin 6 | register: _node_user_stat 7 | 8 | - name: Prepare | Setup '_node_user_home_path' variable 9 | ansible.builtin.set_fact: 10 | _node_user_home_path: "{{ _node_user_stat.home }}" 11 | 12 | - name: Prepare | Check '_node_user_home_path' variable 1 13 | ansible.builtin.stat: 14 | path: "{{ _node_user_home_path }}" 15 | register: _node_user_home_path_stat 16 | 17 | - name: Prepare | Check '_node_user_home_path' variable 2 18 | ansible.builtin.fail: 19 | msg: The user home dir {{ _node_user_home_path }} must be created! 20 | when: _node_user_home_path_stat.stat.isdir is not defined or not _node_user_home_path_stat.stat.isdir 21 | ignore_errors: "{{ not _node_systemd_unit_file_stat.stat.exists }}" 22 | 23 | - name: Prepare | Print _node_user_home_path 24 | ansible.builtin.debug: 25 | var: _node_user_home_path 26 | 27 | - name: Prepare | Print _node_data_root_path 28 | ansible.builtin.debug: 29 | var: _node_data_root_path 30 | 31 | - name: Prepare | Print _node_memory_profiler_log_path 32 | ansible.builtin.debug: 33 | var: _node_memory_profiler_log_path 34 | 35 | - name: Prepare | Create temporary directory 36 | ansible.builtin.tempfile: 37 | state: directory 38 | suffix: polkadot_temp_dir 39 | register: _node_temp_dir 40 | check_mode: false 41 | changed_when: false 42 | -------------------------------------------------------------------------------- /roles/node_backup/tasks/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: node-backup | requirements | install packages 3 | ansible.builtin.package: 4 | name: "{{ packages }}" 5 | state: present 6 | update_cache: true 7 | vars: 8 | packages: 9 | - curl 10 | - jq 11 | - expect 12 | - moreutils 13 | - python3-venv 14 | - python3-setuptools 15 | 16 | - name: node-backup | requirements | install Python modules 17 | ansible.builtin.pip: 18 | name: 19 | - prometheus-client==0.17.0 20 | virtualenv: "{{ _node_backup_venv_path }}" 21 | virtualenv_command: python3 -m venv 22 | notify: restart node-backup exporter 23 | 24 | - name: node-backup | requirements | configure rclone 25 | when: node_backup_targets | json_query('[].type') | intersect(_node_backup_rclone_types) | length > 0 26 | block: 27 | - name: node-backup | requirements | install rclone 28 | ansible.builtin.apt: 29 | deb: "{{ _node_backup_rclone_deb }}" 30 | 31 | - name: node backup | requirements | create rclone config directory 32 | ansible.builtin.file: 33 | path: /root/.config/rclone 34 | state: directory 35 | mode: "0700" 36 | owner: root 37 | group: root 38 | 39 | - name: node-backup | requirements | copy rclone config 40 | ansible.builtin.template: 41 | src: rclone/rclone.conf.j2 42 | dest: /root/.config/rclone/rclone.conf 43 | owner: root 44 | group: root 45 | mode: "0600" 46 | -------------------------------------------------------------------------------- /roles/node_backup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: node-backup | tests 3 | ansible.builtin.include_tasks: 4 | file: tests.yml 5 | apply: 6 | tags: [node-backup, node-backup-tests] 7 | tags: [node-backup, node-backup-tests] 8 | 9 | - name: node-backup | create directories 10 | ansible.builtin.file: 11 | path: "{{ item.path }}" 12 | state: directory 13 | mode: "0755" 14 | owner: "{{ item.user }}" 15 | group: "{{ item.user }}" 16 | loop: 17 | - path: "{{ node_backup_base_path }}" 18 | user: root 19 | - path: "{{ _node_backup_scripts_path }}" 20 | user: root 21 | - path: "{{ _node_backup_exporter_path }}" 22 | user: "{{ node_backup_user }}" 23 | - path: "{{ _node_backup_log_path }}" 24 | user: root 25 | - path: "{{ _node_backup_venv_path }}" 26 | user: "{{ node_backup_user }}" 27 | tags: [node-backup] 28 | 29 | - name: node-backup | requirements 30 | ansible.builtin.include_tasks: 31 | file: requirements.yml 32 | apply: 33 | tags: [node-backup, node-backup-requirements] 34 | tags: [node-backup, node-backup-requirements] 35 | 36 | - name: node-backup | job 37 | ansible.builtin.include_tasks: 38 | file: job.yml 39 | apply: 40 | tags: [node-backup, node-backup-job] 41 | tags: [node-backup, node-backup-job] 42 | 43 | - name: node-backup | exporter 44 | ansible.builtin.include_tasks: 45 | file: exporter.yml 46 | apply: 47 | tags: [node-backup, node-backup-exporter] 48 | tags: [node-backup, node-backup-exporter] 49 | -------------------------------------------------------------------------------- /.github/workflows/reusable-galaxy-deploy.yml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_call: 3 | inputs: 4 | ansible-version: 5 | required: false 6 | type: string 7 | default: 9.0.1 8 | secrets: 9 | api-token: 10 | required: true 11 | jobs: 12 | deploy-galaxy: 13 | runs-on: ubuntu-22.04 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | with: 18 | path: "${{ github.repository }}" 19 | - name: Setup Python 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: '3.x' 23 | - name: Setup Python modules 24 | run: pip3 install --no-cache-dir ansible==${{ inputs.ansible-version }} yq 25 | - name: Print Ansible version 26 | run: ansible --version 27 | - name: Build collection 28 | run: ansible-galaxy collection build "${{ github.repository }}" 29 | - name: Save API token 30 | run: echo '${{ secrets.api-token }}' > api-token 31 | - name: Publish collection 32 | run: | 33 | GALAXY_NAMESPACE=$(cat ${GITHUB_REPOSITORY}/galaxy.yml | yq -r '.namespace' | tr -d '\n') 34 | GALAXY_NAME=$(cat ${GITHUB_REPOSITORY}/galaxy.yml | yq -r '.name' | tr -d '\n') 35 | VERSION=$(cat ${GITHUB_REPOSITORY}/galaxy.yml | yq -r '.version' | tr -d '\n') 36 | ansible-galaxy collection publish ${GALAXY_NAMESPACE}-${GALAXY_NAME}-${VERSION}.tar.gz --api-key="$(cat api-token | tr -d '\n')" 37 | - name: Remove API token 38 | run: rm -v api-token 39 | -------------------------------------------------------------------------------- /roles/node/tasks/700-get-chainid.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Relay chain 3 | - name: Get chain id | Slurp chainspec_file 4 | ansible.builtin.slurp: 5 | src: "{{ _node_chainspec_file }}" 6 | register: _node_current_chainspec 7 | when: node_chainspec != '' 8 | check_mode: false 9 | changed_when: false 10 | 11 | - name: Get chain id | Set chain id 12 | ansible.builtin.set_fact: 13 | _node_chain_id: "{{ (_node_current_chainspec['content'] | b64decode | from_json).id if node_chainspec != '' else node_chain_backup_chain_path }}" 14 | 15 | - name: Get chain id | print _node_chain_id 16 | ansible.builtin.debug: 17 | var: _node_chain_id 18 | 19 | # Parachain 20 | - name: Get chain id | Slurp parachain_chainspec_file 21 | ansible.builtin.slurp: 22 | src: "{{ _node_parachain_chainspec_file }}" 23 | register: _node_current_parachain_chainspec 24 | when: 25 | - node_parachain_role != '' 26 | - node_parachain_chainspec != '' 27 | check_mode: false 28 | changed_when: false 29 | 30 | - name: Get chain id | Set parachain chain id 31 | ansible.builtin.set_fact: 32 | _node_parachain_chain_id: "{%- if node_parachain_role != '' and node_parachain_chainspec != '' -%} {{ (_node_current_parachain_chainspec['content'] | b64decode 33 | | from_json).id }}{%- elif node_parachain_role != '' and node_parachain_chainspec == '' -%} {{ node_parachain_chain_backup_chain_path }}{%- else -%} {%- endif 34 | %}" 35 | 36 | - name: Get chain id | print _node_parachain_chain_id 37 | ansible.builtin.debug: 38 | var: _node_parachain_chain_id 39 | when: node_parachain_role != '' 40 | -------------------------------------------------------------------------------- /roles/node_backup/molecule/default/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Molecule 3 | ansible_user: root 4 | 5 | ## node 6 | node_role: "validator" 7 | node_user: polkadot 8 | node_chain: westend-local 9 | node_chain_backup_chain_path: "westend_local_testnet" 10 | node_chain_backup_restoring_type: none 11 | node_parachain_chain_backup_restoring_type: none 12 | node_p2p_bind_addr: "127.0.0.1" 13 | node_ansible_annotation_path: /tmp/substrate.prom 14 | node_binary_version: v1.12.0 15 | node_binary: "https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot" 16 | node_prepare_worker_binary: "https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot-prepare-worker" 17 | node_execute_worker_binary: "https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-{{ node_binary_version }}/polkadot-execute-worker" 18 | 19 | 20 | # node_backup 21 | _gcp_bucket: test-blockstore-backups 22 | node_backup_user: polkadot 23 | node_backup_r2_access_key_id: abc 24 | node_backup_r2_secret_access_key: cba 25 | node_backup_r2_api_url: https://a.b 26 | node_backup_targets: 27 | - service_name: alice-rocksdb-prune 28 | local_path: /opt/alice-rocksdb-prune/chains/{{ node_chain_backup_chain_path }}/db 29 | rpc_port: 9933 30 | bucket_name: "{{ _gcp_bucket }}" 31 | type: gcp-rclone 32 | - service_name: bob-paritydb-prune 33 | local_path: /opt/bob-paritydb-prune/chains/{{ node_chain_backup_chain_path }}/paritydb 34 | rpc_port: 9934 35 | bucket_name: "{{ _gcp_bucket }}" 36 | type: r2-rclone 37 | bucket_domain: c.d 38 | -------------------------------------------------------------------------------- /roles/node_backup/tasks/job.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: node-backup | job | set _node_backup_targets variable 1 3 | ansible.builtin.set_fact: 4 | _node_backup_targets: [] 5 | 6 | - name: node-backup | job | set _node_backup_targets variable 2 7 | ansible.builtin.set_fact: 8 | _node_backup_targets: "{{ _node_backup_targets + [item | combine({'id': _node_backup_id}, recursive=True)] }}" 9 | vars: 10 | _node_backup_id: "{{ (_node_backup_storages[item.type] + '-' + item.bucket_name + '-' + item.service_name) | regex_replace('[^0-9a-zA-Z]+', '-') }}" 11 | loop: "{{ node_backup_targets }}" 12 | 13 | - name: node-backup | job | copy single backup scripts 14 | ansible.builtin.template: 15 | src: single-backup.sh.j2 16 | dest: "{{ _node_backup_scripts_path }}/{{ item.id }}.sh" 17 | mode: "0755" 18 | owner: root 19 | group: root 20 | loop: "{{ _node_backup_targets }}" 21 | tags: [node-backup-test] 22 | 23 | - name: node-backup | job | copy common backup script 24 | ansible.builtin.template: 25 | src: common-backup.sh.j2 26 | dest: "{{ _node_backup_scripts_path }}/common.sh" 27 | mode: "0755" 28 | owner: root 29 | group: root 30 | tags: [node-backup-test] 31 | 32 | - name: node-backup | job | copy backup systemd unit files 33 | ansible.builtin.template: 34 | src: "{{ item }}.j2" 35 | dest: /etc/systemd/system/{{ item }} 36 | owner: root 37 | group: root 38 | mode: "0644" 39 | loop: 40 | - node-backup.service 41 | - node-backup.timer 42 | notify: restart node-backup timer 43 | 44 | - name: node-backup | job | enable timer 45 | ansible.builtin.systemd: 46 | name: node-backup.timer 47 | state: started 48 | enabled: true 49 | daemon_reload: true 50 | -------------------------------------------------------------------------------- /roles/state_exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: state_exporter 3 | tags: [state-exporter] 4 | block: 5 | - name: state_exporter | Install apt packages 6 | ansible.builtin.package: 7 | name: "{{ packages }}" 8 | state: present 9 | update_cache: true 10 | vars: 11 | packages: 12 | - python3-prometheus-client 13 | - python3-schedule 14 | - python3-psutil 15 | 16 | - name: state_exporter | Create directory 17 | ansible.builtin.file: 18 | path: "{{ state_exporter_file | dirname }}" 19 | state: directory 20 | mode: "0755" 21 | owner: "{{ state_exporter_user }}" 22 | group: "{{ state_exporter_user }}" 23 | 24 | - name: state_exporter | Copy exporter 25 | ansible.builtin.copy: 26 | src: exporter.py 27 | dest: "{{ state_exporter_file }}" 28 | mode: "0755" 29 | owner: "{{ state_exporter_user }}" 30 | group: "{{ state_exporter_user }}" 31 | notify: restart state-exporter 32 | 33 | - name: state_exporter | Copy exporter systemd unit file 34 | ansible.builtin.template: 35 | src: .service.j2 36 | dest: /etc/systemd/system/{{ state_exporter_name }}.service 37 | owner: root 38 | group: root 39 | mode: "0600" 40 | notify: restart state-exporter 41 | 42 | # to avoid 2 restarts during the first deploy 43 | - name: state_exporter | Flush handlers 44 | ansible.builtin.meta: flush_handlers 45 | 46 | - name: state_exporter | Start exporter service 47 | ansible.builtin.systemd: 48 | name: "{{ state_exporter_name }}" 49 | state: started 50 | enabled: true 51 | daemon_reload: true 52 | -------------------------------------------------------------------------------- /roles/node/molecule/README.md: -------------------------------------------------------------------------------- 1 | ## Ansible molecule test 2 | [Molecule](https://molecule.readthedocs.io/en/latest/) allow us to apply and test roles. 3 | Molecule will create instances (platforms) using a selected driver (e.g. docker, vagrant, azure, gcp...). 4 | Then it will apply role, test it (verify.yml) and clean everything. 5 | 6 | ### Requirements 7 | - yamllint 8 | - ansible-lint 9 | - molecule 10 | 11 | **Install:** 12 | ```bash 13 | sudo su 14 | apt install -y yamllint 15 | pip3 install 'molecule[docker]' ansible-lint 16 | ``` 17 | **Check:** 18 | ```bash 19 | $ yamllint --version; ansible-lint --version; molecule --version; 20 | yamllint 1.20.0 21 | ansible-lint 5.3.0 using ansible 2.11.6 22 | molecule 3.5.2 using python 3.8 23 | ansible:2.11.6 24 | delegated:3.5.2 from molecule 25 | docker:1.1.0 from molecule_docker requiring collections: community.docker>=1.9.1 26 | ``` 27 | 28 | ### Test role 29 | #### Relaychain 30 | ```bash 31 | cd roles/node 32 | molecule test 33 | ``` 34 | #### Parachain 35 | ```bash 36 | cd roles/node 37 | molecule test -s parachain 38 | ``` 39 | 40 | 41 | ### Deploy locally 42 | You can deploy role locally in docker container, e.g to check node logs. 43 | #### Relaychain 44 | ```bash 45 | cd roles/node 46 | molecule lint 47 | molecule converge 48 | molecule verify 49 | molecule login 50 | > journalctl -f 51 | > exit 52 | molecule destroy # to clean everything 53 | ``` 54 | #### Parachain 55 | ```bash 56 | cd roles/node 57 | molecule lint 58 | molecule converge --scenario-name parachain 59 | molecule verify --scenario-name parachain 60 | molecule login --scenario-name parachain --host instance-parachain 61 | > journalctl -f 62 | > exit 63 | molecule destroy --scenario-name parachain # to clean everything 64 | ``` -------------------------------------------------------------------------------- /roles/node/tasks/900-systemd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Systemd | Get public IP 3 | community.general.ipify_facts: 4 | timeout: 30 5 | register: _node_ipify_result 6 | until: _node_ipify_result.failed is defined and not _node_ipify_result.failed 7 | retries: 3 8 | delay: 10 9 | when: node_enable_public_ip_detection | bool 10 | 11 | - name: Systemd | Create directories 12 | ansible.builtin.file: 13 | path: "{{ item }}" 14 | state: directory 15 | mode: "0755" 16 | owner: "{{ node_user }}" 17 | group: "{{ node_user }}" 18 | loop: 19 | - "{{ _node_data_root_path }}" 20 | 21 | - name: Systemd | Copy {{ node_app_name }} systemd unit file 22 | ansible.builtin.template: 23 | src: node.service.j2 24 | dest: "{{ _node_unit_file }}" 25 | owner: root 26 | group: root 27 | mode: "0600" 28 | notify: restart service {{ node_handler_id }} 29 | 30 | - name: Systemd | Copy {{ node_app_name }} environment variable file 31 | ansible.builtin.template: 32 | src: env.j2 33 | owner: "{{ node_user }}" 34 | group: "{{ node_user }}" 35 | dest: /etc/default/polkadot-{{ node_app_name }} 36 | mode: "0644" 37 | notify: restart service {{ node_handler_id }} 38 | tags: [node-memory-profiler] 39 | 40 | # to avoid 2 restarts during the first deploy 41 | - name: Systemd | Flush handlers 42 | ansible.builtin.meta: flush_handlers 43 | 44 | - name: Systemd | Start {{ node_app_name }} service 45 | ansible.builtin.systemd: 46 | name: "{{ node_app_name }}" 47 | state: "{{ 'started' if (node_start_service | bool) else 'stopped' }}" 48 | enabled: true 49 | daemon_reload: true 50 | notify: health check {{ node_handler_id }} 51 | ignore_errors: "{{ not _node_systemd_unit_file_stat.stat.exists }}" 52 | -------------------------------------------------------------------------------- /.github/workflows/branch-main.yml: -------------------------------------------------------------------------------- 1 | name: main branch 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | run-molecule-tests: 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | role-names: [node, ws_health_exporter, nginx, node_backup ] 14 | molecule-drivers: [docker] 15 | # We test the latest version and minimum supported version 16 | ansible-versions: [8.0.0, 9.0.1] 17 | uses: ./.github/workflows/reusable-molecule.yml 18 | with: 19 | role-name: ${{ matrix.role-names }} 20 | molecule-driver: ${{ matrix.molecule-drivers }} 21 | ansible-version: ${{ matrix.ansible-versions }} 22 | check-version: 23 | uses: ./.github/workflows/reusable-check-version.yml 24 | with: 25 | compare-versions: false 26 | deploy-galaxy: 27 | needs: [run-molecule-tests, check-version] 28 | uses: ./.github/workflows/reusable-galaxy-deploy.yml 29 | secrets: 30 | api-token: ${{ secrets.GALAXY_API_KEY }} 31 | create-git-tag: 32 | runs-on: ubuntu-22.04 33 | needs: [deploy-galaxy, check-version] 34 | env: 35 | CURRENT_GALAXY_VERSION: ${{ needs.check-version.outputs.current-galaxy-version }} 36 | steps: 37 | - name: Print tag version 38 | run: | 39 | echo "Tag version: ${CURRENT_GALAXY_VERSION}" 40 | - name: Create Tag 41 | uses: actions/github-script@v7 42 | with: 43 | script: | 44 | const {CURRENT_GALAXY_VERSION} = process.env 45 | github.rest.git.createRef({ 46 | owner: context.repo.owner, 47 | repo: context.repo.repo, 48 | ref: `refs/tags/${CURRENT_GALAXY_VERSION}`, 49 | sha: context.sha 50 | }) 51 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/files/pebble/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAmxTFtw113RK70H9pQmdKs9AxhFmnQ6BdDtp3jOZlWlUO0Blt 3 | MXOUML5905etgtCbcC6RdKRtgSAiDfgx3VWiFMJH++4gUtnaB9SN8GhNSPBpFfSa 4 | 2JhWPo9HQNUsAZqlGTV4SzcGRqtWvdZxUiOfQ2TcvyXIqsaD19ivvqI1NhT6bl3t 5 | redTZlzLLM6Wvkw6hfyHrJAPQP8LOlCIeDM4YIce6Gstv6qo9iCD4wJiY4u95HVL 6 | 7RK8t8JpZAb7VR+dPhbHEvVpjwuYd5Q05OZ280gFyrhbrKLbqst104GOQT4kQMJG 7 | WxGONyTX6np0Dx6O5jU7dvYvjVVawbJwGuaL6wIDAQABAoIBAGW9W/S6lO+DIcoo 8 | PHL+9sg+tq2gb5ZzN3nOI45BfI6lrMEjXTqLG9ZasovFP2TJ3J/dPTnrwZdr8Et/ 9 | 357YViwORVFnKLeSCnMGpFPq6YEHj7mCrq+YSURjlRhYgbVPsi52oMOfhrOIJrEG 10 | ZXPAwPRi0Ftqu1omQEqz8qA7JHOkjB2p0i2Xc/uOSJccCmUDMlksRYz8zFe8wHuD 11 | XvUL2k23n2pBZ6wiez6Xjr0wUQ4ESI02x7PmYgA3aqF2Q6ECDwHhjVeQmAuypMF6 12 | IaTjIJkWdZCW96pPaK1t+5nTNZ+Mg7tpJ/PRE4BkJvqcfHEOOl6wAE8gSk5uVApY 13 | ZRKGmGkCgYEAzF9iRXYo7A/UphL11bR0gqxB6qnQl54iLhqS/E6CVNcmwJ2d9pF8 14 | 5HTfSo1/lOXT3hGV8gizN2S5RmWBrc9HBZ+dNrVo7FYeeBiHu+opbX1X/C1HC0m1 15 | wJNsyoXeqD1OFc1WbDpHz5iv4IOXzYdOdKiYEcTv5JkqE7jomqBLQk8CgYEAwkG/ 16 | rnwr4ThUo/DG5oH+l0LVnHkrJY+BUSI33g3eQ3eM0MSbfJXGT7snh5puJW0oXP7Z 17 | Gw88nK3Vnz2nTPesiwtO2OkUVgrIgWryIvKHaqrYnapZHuM+io30jbZOVaVTMR9c 18 | X/7/d5/evwXuP7p2DIdZKQKKFgROm1XnhNqVgaUCgYBD/ogHbCR5RVsOVciMbRlG 19 | UGEt3YmUp/vfMuAsKUKbT2mJM+dWHVlb+LZBa4pC06QFgfxNJi/aAhzSGvtmBEww 20 | xsXbaceauZwxgJfIIUPfNZCMSdQVIVTi2Smcx6UofBz6i/Jw14MEwlvhamaa7qVf 21 | kqflYYwelga1wRNCPopLaQKBgQCWsZqZKQqBNMm0Q9yIhN+TR+2d7QFjqeePoRPl 22 | 1qxNejhq25ojE607vNv1ff9kWUGuoqSZMUC76r6FQba/JoNbefI4otd7x/GzM9uS 23 | 8MHMJazU4okwROkHYwgLxxkNp6rZuJJYheB4VDTfyyH/ng5lubmY7rdgTQcNyZ5I 24 | majRYQKBgAMKJ3RlII0qvAfNFZr4Y2bNIq+60Z+Qu2W5xokIHCFNly3W1XDDKGFe 25 | CCPHSvQljinke3P9gPt2HVdXxcnku9VkTti+JygxuLkVg7E0/SWwrWfGsaMJs+84 26 | fK+mTZay2d3v24r9WKEKwLykngYPyZw5+BdWU0E+xx5lGUd3U4gG 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /roles/nginx/tasks/certs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx | certs | calculate list of custom certs 3 | ansible.builtin.set_fact: 4 | _nginx_custom_certs: "{{ nginx_sites | json_query(pattern) | map(attribute='ssl_manual_cert_file') | unique }}" 5 | vars: 6 | pattern: "[?ssl_issuer==`manual`]" 7 | 8 | - name: nginx | certs | print list of custom certs 9 | ansible.builtin.debug: 10 | var: _nginx_custom_certs 11 | 12 | - name: nginx | certs | manage cert directory 13 | ansible.builtin.file: 14 | name: "{{ _nginx_custom_certs_base_path }}" 15 | state: "{% if _nginx_custom_certs | length > 0 %}directory{% else %}absent{% endif %}" 16 | owner: root 17 | group: root 18 | mode: "0755" 19 | 20 | - name: nginx | certs | custom certs 21 | when: _nginx_custom_certs | length > 0 22 | block: 23 | - name: nginx | certs | custom certs | find unmanaged custom certs files 24 | ansible.builtin.find: 25 | paths: "{{ _nginx_custom_certs_base_path }}" 26 | patterns: ^((?!{{ _nginx_custom_certs | join('|') }}).)*$ 27 | use_regex: true 28 | register: _nginx_unmanaged_custom_certs_files 29 | 30 | - name: nginx | certs | custom certs | print list of unmanaged custom certs files 31 | ansible.builtin.debug: 32 | msg: "{{ _nginx_unmanaged_custom_certs_files.files | map(attribute='path') }}" 33 | 34 | - name: nginx | certs | custom certs | remove unmanaged custom certs files 35 | ansible.builtin.file: 36 | path: "{{ item.path }}" 37 | state: absent 38 | loop: "{{ _nginx_unmanaged_custom_certs_files.files }}" 39 | 40 | - name: nginx | certs | include tasks of sorts copying 41 | ansible.builtin.include_tasks: 42 | file: certs-loop.yml 43 | apply: 44 | tags: [nginx, nginx-custom-certs] 45 | loop: "{{ _nginx_custom_certs }}" 46 | -------------------------------------------------------------------------------- /roles/nginx/tasks/sites.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx | sites | build the list of site configs 1 3 | ansible.builtin.set_fact: 4 | _nginx_sites: [] 5 | 6 | - name: nginx | sites | build the list of site configs 2 7 | ansible.builtin.set_fact: 8 | _nginx_sites: "{{ _nginx_sites + [item | combine({'site_name': _nginx_site_name, 'site_id': _nginx_site_id, 'params': _nginx_site_params}, recursive=True)] }}" 9 | vars: 10 | _nginx_site_name: "{{ (item.template.split('.')[0] + '_' + item.domain) | regex_replace('[^0-9a-zA-Z]+', '_') }}" 11 | _nginx_site_id: "{{ (_nginx_site_name | hash('sha1'))[:6] }}" 12 | _nginx_site_params: "{{ item.params | default({}) }}" 13 | loop: "{{ nginx_sites }}" 14 | 15 | - name: nginx | sites | print the list of site configs 16 | ansible.builtin.debug: 17 | msg: "{{ _nginx_sites }}" 18 | 19 | - name: nginx | sites | find unmanaged site config files 20 | ansible.builtin.find: 21 | paths: /etc/nginx/sites-enabled 22 | patterns: ^((?!{{ _nginx_sites | map(attribute='site_name') | join('|') }}|default).)*$ 23 | use_regex: true 24 | register: _nginx_unmanaged_site_config_files 25 | 26 | - name: nginx | sites | print list of unmanaged site config files 27 | ansible.builtin.debug: 28 | msg: "{{ _nginx_unmanaged_site_config_files.files | map(attribute='path') }}" 29 | 30 | - name: nginx | sites | remove unmanaged site config files 31 | ansible.builtin.file: 32 | path: "{{ item.path }}" 33 | state: absent 34 | loop: "{{ _nginx_unmanaged_site_config_files.files }}" 35 | notify: reload nginx config 36 | 37 | - name: nginx | sites | copy site configs 38 | ansible.builtin.template: 39 | src: "{{ item.template }}" 40 | dest: /etc/nginx/sites-enabled/{{ item.site_name }} 41 | owner: root 42 | group: root 43 | mode: "0644" 44 | notify: reload nginx config 45 | loop: "{{ _nginx_sites }}" 46 | -------------------------------------------------------------------------------- /roles/node_backup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | # R2 configuration 5 | node_backup_r2_access_key_id: "" 6 | node_backup_r2_secret_access_key: "" 7 | node_backup_r2_api_url: "" 8 | 9 | # S3 Compatible configuration (defaults to filling with r2 configuration for backward compat) 10 | node_backup_s3_access_key_id: "{{ node_backup_r2_access_key_id }}" 11 | node_backup_s3_secret_access_key: "{{ node_backup_r2_secret_access_key }}" 12 | node_backup_s3_endpoint: "{{ node_backup_r2_api_url }}" 13 | node_backup_s3_region: "" 14 | # The rclone provider to use for the backup 15 | node_backup_s3_provider: Cloudflare 16 | 17 | node_backup_max_concurrent_requests: 50 18 | 19 | node_backup_schedule: 20 | - "*-*-* 01:00:00" 21 | 22 | node_backup_user: polkadot 23 | 24 | node_backup_base_path: /opt/node_backup 25 | node_backup_tmp_path: /tmp 26 | 27 | # It wipes a local cash of the node-bakcup expoter. 28 | # It's useful if you rename or remove some backups from the 'node_backup_targets' variable 29 | node_backup_wipe_cache_enable: false 30 | 31 | # List of the nodes deployed to the host 32 | # service_name - is used to extract information about db type and should be following: 33 | # node_chain-<[paritydb|rocksdb]-[prune|archive] 34 | # where: `node_chain` is value of `node_chain` variable from `node` role. 35 | node_backup_targets: [] 36 | # - service_name: polkadot-rocksdb-prune 37 | # local_path: /opt/polkadot-rocksdb-prune/chains/polkadot/db 38 | # rpc_port: 9934 39 | # # old way of backups. It takes more time to restore and backup 40 | # # it's true by default 41 | # tar: false 42 | # # type of backup. can be 'gcp-native', 'gcp-rclone', 'r2-rclone' or 's3-rclone' 43 | # type: 'gcp-rclone' 44 | # # name of the bucket 45 | # bucket_name: "backup" 46 | # # the public domain name of the bucket 47 | # # it's empty by default 48 | # bucket_domain: "backup.polkadot.io" 49 | -------------------------------------------------------------------------------- /roles/nginx/templates/site-connect.j2: -------------------------------------------------------------------------------- 1 | upstream websocket_{{ item.site_id }} { 2 | server 127.0.0.1:{{ item.params.connect_port | default('30333') }}; 3 | } 4 | 5 | map $http_upgrade $endpoint_{{ item.site_id }} { 6 | default websocket_{{ item.site_id }}; 7 | '' close; 8 | } 9 | 10 | 11 | server { 12 | listen 443 ssl; 13 | server_name {{ item.domain }}; 14 | {% if item.ssl_issuer == 'letsencrypt' %} 15 | ssl_certificate /etc/letsencrypt/live/{{ item.domain }}/fullchain.pem; 16 | ssl_certificate_key /etc/letsencrypt/live/{{ item.domain }}/privkey.pem; 17 | {% elif item.ssl_issuer == 'manual' %} 18 | ssl_certificate {{ _nginx_custom_certs_base_path }}{{ item.ssl_manual_cert_file }}; 19 | ssl_certificate_key {{ _nginx_custom_certs_base_path }}{{ item.ssl_manual_cert_file }}; 20 | {% else %} 21 | {{ "the ssl_issuer parameter must be defined."/0 }} 22 | {% endif %} 23 | 24 | location / { 25 | limit_req zone=ipzone burst={{ nginx_burst_request_rate }}; 26 | proxy_buffers 16 4k; 27 | proxy_buffer_size 2k; 28 | proxy_pass http://$endpoint_{{ item.site_id }}; 29 | proxy_http_version 1.1; 30 | proxy_set_header Upgrade $http_upgrade; 31 | proxy_set_header Connection "Upgrade"; 32 | proxy_set_header Host $host; 33 | } 34 | } 35 | 36 | {% if item.params.no_host_external_port is defined %} 37 | # we need it to execute the cloud host healthcheck, we can't use TLS there 38 | server { 39 | listen {{ item.params.no_host_external_port }}; 40 | server_name _; 41 | location / { 42 | limit_req zone=ipzone burst={{ nginx_burst_request_rate }}; 43 | proxy_buffers 16 4k; 44 | proxy_buffer_size 2k; 45 | proxy_pass http://$endpoint_{{ item.site_id }}; 46 | proxy_http_version 1.1; 47 | proxy_set_header Upgrade $http_upgrade; 48 | proxy_set_header Connection "Upgrade"; 49 | proxy_set_header Host $host; 50 | } 51 | } 52 | {% endif %} 53 | -------------------------------------------------------------------------------- /roles/node_backup/tasks/tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: node-backup | test | check R2 configuration 3 | ansible.builtin.fail: 4 | msg: If the R2 backups are used, 'node_backup_r2_access_key_id', 'node_backup_r2_secret_access_key' and 'node_backup_r2_api_url' variables have to be specified 5 | when: node_backup_targets | json_query('[].type') | intersect(_node_backup_r2_types) | length > 0 and ( node_backup_r2_access_key_id == '' or node_backup_r2_secret_access_key 6 | == '' or node_backup_r2_api_url == '' ) 7 | 8 | - name: node-backup | test | check s3 configuration 9 | ansible.builtin.fail: 10 | msg: If the S3 backups are used, 'node_backup_s3_access_key_id', 'node_backup_s3_secret_access_key', 'node_backup_s3_endpoint' amd 'node_backup_s3_provider' variables have to be specified 11 | when: node_backup_targets | json_query('[].type') | intersect(_node_backup_rclone_types) | length > 0 and ( node_backup_s3_access_key_id == '' or node_backup_s3_secret_access_key 12 | == '' or node_backup_s3_endpoint == '' or node_backup_s3_provider == '' ) 13 | 14 | - name: node-backup | test | check variables 15 | ansible.builtin.fail: 16 | msg: "'service_name', 'rpc_port', 'type' and 'bucket_name' fields have to be specified for each item in 'node_backup_targets'" 17 | when: item.service_name == '' or item.rpc_port == '' or item.type == '' or item.bucket_name == '' 18 | loop: "{{ node_backup_targets }}" 19 | 20 | - name: node-backup | test | check R2 backups 21 | ansible.builtin.fail: 22 | msg: the 'bucket_domain' field has to be specified for R2 backups 23 | when: item.type in _node_backup_r2_types and item.bucket_domain == '' 24 | loop: "{{ node_backup_targets }}" 25 | 26 | - name: node-backup | test | check backup types 27 | ansible.builtin.fail: 28 | msg: "{{ item.type }} is not a valid backup type" 29 | when: item.type not in (_node_backup_gcp_types + _node_backup_rclone_types) 30 | loop: "{{ node_backup_targets }}" 31 | -------------------------------------------------------------------------------- /.github/workflows/reusable-molecule.yml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_call: 3 | inputs: 4 | role-name: 5 | required: true 6 | type: string 7 | molecule-driver: 8 | required: true 9 | type: string 10 | ansible-version: 11 | required: false 12 | type: string 13 | default: 9.0.1 14 | jobs: 15 | molecule: 16 | runs-on: ubuntu-22.04 17 | env: 18 | DRIVER: ${{ inputs.molecule-driver }} 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v4 22 | with: 23 | path: "${{ github.repository }}" 24 | - name: Setup Python 25 | uses: actions/setup-python@v4 26 | with: 27 | python-version: '3.12' 28 | - name: Check molecule 29 | run: | 30 | if [ -d "molecule" ]; then 31 | echo "MOLECULE_IS_PRESENT=PRESENT" >> "${GITHUB_ENV}" 32 | fi 33 | working-directory: "${{ github.repository }}/roles/${{ inputs.role-name }}" 34 | - name: Install Python modules 35 | run: | 36 | set -e 37 | echo "ansible==${{ inputs.ansible-version }}" >> requirements-molecule.txt 38 | pip3 install --no-cache-dir -r requirements-molecule.txt 39 | working-directory: "${{ github.repository }}/.github/workflows" 40 | - name: Print versions 41 | run: | 42 | set -e 43 | ansible --version 44 | molecule --version 45 | yamllint --version 46 | ansible-lint --version 47 | - name: Run lint 48 | run: | 49 | set -e 50 | yamllint . 51 | ansible-lint --exclude .ansible/collections 52 | working-directory: "${{ github.repository }}/roles/${{ inputs.role-name }}" 53 | - name: Run molecule tests 54 | if: ${{ env.MOLECULE_IS_PRESENT }} 55 | run: molecule test --all 56 | working-directory: "${{ github.repository }}/roles/${{ inputs.role-name }}" 57 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: false 5 | pre_tasks: 6 | - name: Install Python3 7 | ansible.builtin.raw: apt -y update && apt install -y python3 8 | changed_when: false 9 | - name: Install required packages 10 | ansible.builtin.apt: 11 | name: 12 | - gpg 13 | update_cache: false 14 | changed_when: false 15 | tasks: 16 | - name: Include node alice 17 | # use include role to skip ansible-lint 18 | ansible.builtin.include_role: 19 | name: node 20 | vars: 21 | node_app_name: "alice" 22 | node_handler_id: "{{ node_app_name }}" 23 | node_data_root_path: "/opt/{{ node_app_name }}" 24 | # 12D3KooWHhB5LqXji1moEvbzCEq7HzJuMvqi5E3BkoceUcQFPX2f 25 | node_p2p_private_key: "4f1ae54a051e08161456b74a70b85e45e161fd4f614637f50a2a5f09ba7afb2e" 26 | node_custom_options: 27 | - "--alice" 28 | - "--bootnodes /ip4/127.0.0.1/tcp/30334/p2p/12D3KooWKvNLq5fFMcQvdZHejhUvRQcSxDpbWFBo1mXh4kGR949r" 29 | - "--no-hardware-benchmarks" 30 | - "--rpc-cors '*'" 31 | node_p2p_port: "30333" 32 | node_prometheus_port: "9615" 33 | node_rpc_port: "9933" 34 | - name: Include node bob 35 | ansible.builtin.include_role: 36 | name: node 37 | vars: 38 | node_app_name: bob 39 | node_handler_id: "{{ node_app_name }}" 40 | node_data_root_path: /opt/{{ node_app_name }} 41 | # 12D3KooWKvNLq5fFMcQvdZHejhUvRQcSxDpbWFBo1mXh4kGR949r 42 | node_p2p_private_key: "042cd72c647f27c6da663f15665b59f707bce7de4b771b098361ff756cde168e" 43 | node_custom_options: 44 | - "--bob" 45 | - "--bootnodes /ip4/127.0.0.1/tcp/30333/p2p/12D3KooWHhB5LqXji1moEvbzCEq7HzJuMvqi5E3BkoceUcQFPX2f" 46 | - "--no-hardware-benchmarks" 47 | - "--rpc-cors '*'" 48 | node_p2p_port: "30334" 49 | node_prometheus_port: "9616" 50 | node_rpc_port: "9934" 51 | -------------------------------------------------------------------------------- /roles/nginx/templates/site-rpc.j2: -------------------------------------------------------------------------------- 1 | upstream rpc_{{ item.site_id }} { 2 | server 127.0.0.1:{{ item.params.rpc_port | default('9944') }}; 3 | } 4 | 5 | upstream ws_{{ item.site_id }} { 6 | server 127.0.0.1:{{ item.params.rpc_ws_port | default('9944') }}; 7 | } 8 | 9 | # map to different upstream backends based on header 10 | map $http_upgrade $endpoint_{{ item.site_id }} { 11 | default ws_{{ item.site_id }}; 12 | '' rpc_{{ item.site_id }}; 13 | } 14 | 15 | server { 16 | listen 443 ssl; 17 | server_name {{ item.domain }}; 18 | {% if item.ssl_issuer == 'letsencrypt' %} 19 | ssl_certificate /etc/letsencrypt/live/{{ item.domain }}/fullchain.pem; 20 | ssl_certificate_key /etc/letsencrypt/live/{{ item.domain }}/privkey.pem; 21 | {% elif item.ssl_issuer == 'manual' %} 22 | ssl_certificate {{ _nginx_custom_certs_base_path }}{{ item.ssl_manual_cert_file }}; 23 | ssl_certificate_key {{ _nginx_custom_certs_base_path }}{{ item.ssl_manual_cert_file }}; 24 | {% else %} 25 | {{ "the ssl_issuer parameter must be defined."/0 }} 26 | {% endif %} 27 | location / { 28 | limit_req zone=zone burst={{ nginx_burst_request_rate }}; 29 | proxy_buffers 16 4k; 30 | proxy_buffer_size 2k; 31 | proxy_pass http://$endpoint_{{ item.site_id }}; 32 | proxy_http_version 1.1; 33 | proxy_set_header Upgrade $http_upgrade; 34 | proxy_set_header Connection "Upgrade"; 35 | proxy_set_header Host $host; 36 | } 37 | } 38 | 39 | {% if item.params.no_host_external_port is defined %} 40 | # we need it to execute the cloud host healthcheck, we can't use TLS there 41 | server { 42 | listen {{ item.params.no_host_external_port }}; 43 | server_name _; 44 | location / { 45 | limit_req zone=zone burst={{ nginx_burst_request_rate }}; 46 | proxy_buffers 16 4k; 47 | proxy_buffer_size 2k; 48 | proxy_pass http://$endpoint_{{ item.site_id }}; 49 | proxy_http_version 1.1; 50 | proxy_set_header Upgrade $http_upgrade; 51 | proxy_set_header Connection "Upgrade"; 52 | proxy_set_header Host $host; 53 | } 54 | } 55 | {% endif %} 56 | -------------------------------------------------------------------------------- /roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nginx_letsencrypt_email: devops-team@parity.io 3 | nginx_letsencrypt_mock: false 4 | nginx_dhparam_size: 4096 5 | nginx_worker_rlimit_nofile: 30000 6 | # requests per second 7 | nginx_max_request_rate: 2 8 | nginx_burst_request_rate: 5 9 | 10 | # print extended data about clients 11 | nginx_log_extended_enable: false 12 | 13 | nginx_http_context_directives: [] 14 | # - "server_names_hash_bucket_size 128" 15 | 16 | # flow 17 | ## Remove nginx, letsencrypt. Wipe all configs and , certificates. 18 | nginx_remove_enable: false 19 | 20 | # 'nginx_sites': 21 | ## - 'template' - a name of a site template file, including '.j2'. 22 | ## - 'domain' - a real domain name as is, without placeholders etc. 23 | ## - 'ssl_issuer' - defines how TLS certificates are managed. Can be 'manual' or 'letsencrypt'. 24 | ## - 'ssl_manual_cert_file' - it must be specified if 'ssl_issuer'='manual'. 25 | ## It defines the name of a custom certificate file. 26 | ## Custom certificates have to be stored in the 'files' directories on the role or playbook levels. 27 | ## But it's better to store them on the playbook level. 28 | ## - 'params' - optional. But, it must be specified if the template of the site uses any custom variables inside. 29 | ## The dictionary contains user variables that are used in site templates. 30 | ## 31 | ## 'template', 'domain', 'ssl_manual_cert_file' variables can have the same values 32 | ## in more than one item of the 'nginx_sites' list, the role can manage it. 33 | ## But, a pair of 'template' and 'domain' variables must be unique for each item of the list. 34 | 35 | #nginx_sites: 36 | # - template: site-rpc.j2 37 | # domain: "a.r-test-2.parity-lab.parity.io" 38 | # ssl_issuer: letsencrypt 39 | # params: 40 | # rpc_port: 9933 41 | # rpc_ws_port: 9944 42 | # no_host_external_port: 8081 # it accepts any host in headers. It's useful for health checks. 43 | # - template: site-rpc.j2 44 | # domain: "b.r-test-2.parity-lab.parity.io" 45 | # ssl_issuer: letsencrypt 46 | # params: {} 47 | # - template: site-connect.j2 48 | # domain: "c.r-test-2.parity-lab.parity.io" 49 | # ssl_issuer: manual 50 | # ssl_manual_cert_file: "ws.polkadot.io.pem" 51 | -------------------------------------------------------------------------------- /roles/node_backup/molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: false 5 | pre_tasks: 6 | - name: Prepare | Install Python3 7 | ansible.builtin.raw: apt -y update && apt install -y python3 8 | changed_when: false 9 | - name: Prepare | Install required packages 10 | ansible.builtin.apt: 11 | name: 12 | - gpg 13 | - cron 14 | update_cache: false 15 | changed_when: false 16 | - name: Prepare | create user parity 17 | ansible.builtin.user: 18 | name: polkadot 19 | tasks: 20 | - name: Include node alice 21 | # use include role to skip ansible-lint 22 | ansible.builtin.include_role: 23 | name: node 24 | vars: 25 | node_app_name: "alice-rocksdb-prune" 26 | node_handler_id: "{{ node_app_name }}" 27 | node_data_root_path: "/opt/{{ node_app_name }}" 28 | # 12D3KooWHhB5LqXji1moEvbzCEq7HzJuMvqi5E3BkoceUcQFPX2f 29 | node_p2p_private_key: "4f1ae54a051e08161456b74a70b85e45e161fd4f614637f50a2a5f09ba7afb2e" 30 | node_custom_options: 31 | - "--alice" 32 | - "--bootnodes /ip4/127.0.0.1/tcp/30334/p2p/12D3KooWKvNLq5fFMcQvdZHejhUvRQcSxDpbWFBo1mXh4kGR949r" 33 | - "--no-hardware-benchmarks" 34 | - "--rpc-cors '*'" 35 | node_p2p_port: "30333" 36 | node_prometheus_port: "9615" 37 | node_rpc_port: "9933" 38 | - name: Include node bob 39 | ansible.builtin.include_role: 40 | name: node 41 | vars: 42 | node_app_name: "bob-paritydb-prune" 43 | node_handler_id: "{{ node_app_name }}" 44 | node_data_root_path: /opt/{{ node_app_name }} 45 | # 12D3KooWKvNLq5fFMcQvdZHejhUvRQcSxDpbWFBo1mXh4kGR949r 46 | node_p2p_private_key: "042cd72c647f27c6da663f15665b59f707bce7de4b771b098361ff756cde168e" 47 | node_custom_options: 48 | - "--bob" 49 | - "--bootnodes /ip4/127.0.0.1/tcp/30333/p2p/12D3KooWHhB5LqXji1moEvbzCEq7HzJuMvqi5E3BkoceUcQFPX2f" 50 | - "--no-hardware-benchmarks" 51 | - "--rpc-cors '*'" 52 | node_p2p_port: "30334" 53 | node_prometheus_port: "9616" 54 | node_rpc_port: "9934" 55 | node_paritydb_enable: true 56 | -------------------------------------------------------------------------------- /roles/node/tasks/801-restore-chain-tar.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restore {{ item.part }} | Tar restoring | Delete temp download folder 3 | ansible.builtin.file: 4 | path: "{{ item.chain_path }}/tmp" 5 | state: absent 6 | changed_when: false 7 | ignore_errors: "{{ not _node_data_chain_path_stat.stat.exists }}" 8 | 9 | - name: Restore {{ item.part }} | Tar restoring | Create temp download folder 10 | ansible.builtin.file: 11 | path: "{{ item.chain_path }}/tmp" 12 | state: directory 13 | owner: "{{ node_user }}" 14 | group: "{{ node_user }}" 15 | mode: "0755" 16 | changed_when: false 17 | ignore_errors: "{{ not _node_data_chain_path_stat.stat.exists }}" 18 | 19 | - name: Restore {{ item.part }} | Tar restoring | Check if temp download folder already exists 20 | ansible.builtin.stat: 21 | path: "{{ item.chain_path }}/tmp" 22 | get_checksum: false 23 | register: _node_data_chain_tmp_path_stat 24 | 25 | - name: Restore {{ item.part }} | Tar restoring | Download chain backup 26 | ansible.builtin.get_url: 27 | url: "{{ item.tar_url }}" 28 | dest: "{{ item.chain_path }}/db.tar" 29 | tmp_dest: "{{ item.chain_path }}/tmp" 30 | owner: "{{ node_user }}" 31 | group: "{{ node_user }}" 32 | mode: "0644" 33 | timeout: 900 34 | ignore_errors: "{{ not _node_data_chain_path_stat.stat.exists or not _node_data_chain_tmp_path_stat.stat.exists }}" 35 | 36 | - name: Restore {{ item.part }} | Tar restoring | Check if backup file already exists 37 | ansible.builtin.stat: 38 | path: "{{ item.chain_path }}/db.tar" 39 | get_checksum: false 40 | register: _node_data_chain_backup_file_stat 41 | 42 | - name: Restore {{ item.part }} | Tar restoring | Extract chain backup 43 | ansible.builtin.unarchive: 44 | copy: false 45 | src: "{{ item.chain_path }}/db.tar" 46 | dest: "{{ item.chain_path }}/{{ item.db_folder }}" 47 | owner: "{{ node_user }}" 48 | group: "{{ node_user }}" 49 | ignore_errors: "{{ not _node_data_chain_path_stat.stat.exists or not _node_data_chain_backup_file_stat.stat.exists }}" 50 | notify: restart service {{ node_handler_id }} 51 | 52 | - name: Restore {{ item.part }} | Tar restoring | Delete backup archive 53 | ansible.builtin.file: 54 | path: "{{ item.chain_path }}/db.tar" 55 | state: absent 56 | ignore_errors: "{{ not _node_data_chain_path_stat.stat.exists }}" 57 | -------------------------------------------------------------------------------- /roles/key_inject/tasks/inject.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Inject keys 3 | tags: [key-inject, key_inject] 4 | block: 5 | - name: Inject | Setting {{ item.type }} pub keys 6 | ansible.builtin.set_fact: 7 | key_inject_pub_key: "{{ (item.priv_key | paritytech.chain.subkey_inspect(scheme=(item.scheme | default('sr25519')))).publicKey }}" 8 | 9 | - name: Inject | Check {{ item.type }} key 10 | ansible.builtin.uri: 11 | url: http://127.0.0.1:{{ item.rpc_port | default(key_inject_relay_chain_rpc_port) }} 12 | method: POST 13 | body: 14 | jsonrpc: "2.0" 15 | method: author_hasKey 16 | params: ["{{ key_inject_pub_key }}", "{{ item.type }}"] 17 | id: 1 18 | body_format: json 19 | headers: 20 | Content-Type: application/json 21 | use_proxy: false 22 | changed_when: false 23 | check_mode: false 24 | # retries 10 times, because this role can run after node role without pause, and the node is not up yet 25 | until: key_inject_uri.status is defined and key_inject_uri.status == 200 26 | retries: 12 27 | delay: 10 28 | register: key_inject_uri 29 | 30 | - name: Inject | Check {{ item.type }} key results 31 | ansible.builtin.debug: 32 | msg: Key {{ key_inject_pub_key }} ({{ item.type }}, {{ item.scheme | default('sr25519') }}) is {{ 'NOT ' if not key_inject_uri.json.result else '' }}present 33 | in keystore 34 | changed_when: not key_inject_uri.json.result 35 | 36 | - name: Inject | Inject {{ item.type }} keys 37 | ansible.builtin.uri: 38 | url: http://127.0.0.1:{{ item.rpc_port | default(key_inject_relay_chain_rpc_port) }} 39 | method: POST 40 | body: 41 | jsonrpc: "2.0" 42 | method: author_insertKey 43 | params: ["{{ item.type }}", "{{ item.priv_key }}", "{{ key_inject_pub_key }}"] 44 | id: 1 45 | body_format: json 46 | headers: 47 | Content-Type: application/json 48 | use_proxy: false 49 | changed_when: true 50 | notify: Restart service 51 | register: key_inject_uri 52 | when: not key_inject_uri.json.result 53 | 54 | - name: Inject | Inject {{ item.type }} keys results 55 | ansible.builtin.debug: 56 | var: key_inject_uri 57 | when: not ansible_check_mode 58 | -------------------------------------------------------------------------------- /roles/ws_health_exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ws_health_exporter 3 | tags: [ws_health_exporter, ws-health-exporter] 4 | block: 5 | - name: ws_health_exporter | install apt packages 6 | ansible.builtin.package: 7 | name: "{{ packages }}" 8 | state: present 9 | update_cache: true 10 | vars: 11 | packages: 12 | - python3-venv 13 | - python3-setuptools 14 | 15 | - name: ws_health_exporter | create base directory 16 | ansible.builtin.file: 17 | path: "{{ ws_health_exporter_base_path }}" 18 | state: directory 19 | mode: "0755" 20 | owner: root 21 | group: root 22 | 23 | - name: ws_health_exporter | download exporter file 24 | ansible.builtin.get_url: 25 | url: "{{ ws_health_exporter_url }}" 26 | dest: "{{ _ws_health_exporter_file }}" 27 | mode: "0755" 28 | owner: root 29 | group: root 30 | timeout: 30 31 | notify: restart ws-health-exporter 32 | 33 | - name: ws_health_exporter | install Python modules 34 | ansible.builtin.pip: 35 | name: 36 | - prometheus-client==0.16.0 37 | - websocket-client==1.5.1 38 | - apscheduler==3.10.1 39 | - flask==3.0.0 40 | - environs==9.5.0 41 | - waitress==2.1.2 42 | virtualenv: "{{ _ws_health_exporter_venv }}" 43 | virtualenv_command: python3 -m venv 44 | 45 | - name: ws_health_exporter | set root as owner of the venv directory 46 | ansible.builtin.file: 47 | path: "{{ _ws_health_exporter_venv }}" 48 | state: directory 49 | recurse: true 50 | owner: root 51 | group: root 52 | 53 | - name: ws_health_exporter | copy exporter systemd unit file 54 | ansible.builtin.template: 55 | src: .service.j2 56 | dest: /etc/systemd/system/{{ _ws_health_exporter_name }}.service 57 | owner: root 58 | group: root 59 | mode: "0600" 60 | notify: restart ws-health-exporter 61 | 62 | # to avoid 2 restarts during the first deploy 63 | - name: ws_health_exporter | Flush handlers 64 | ansible.builtin.meta: flush_handlers 65 | 66 | - name: ws_health_exporter | start exporter service 67 | ansible.builtin.systemd: 68 | name: "{{ _ws_health_exporter_name }}" 69 | state: started 70 | enabled: true 71 | daemon_reload: true 72 | -------------------------------------------------------------------------------- /.github/workflows/reusable-check-version.yml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_call: 3 | inputs: 4 | compare-versions: 5 | required: false 6 | type: boolean 7 | default: true 8 | outputs: 9 | current-galaxy-version: 10 | description: "Current Galaxy version" 11 | value: ${{ jobs.check-version.outputs.current-galaxy-version }} 12 | jobs: 13 | check-version: 14 | runs-on: ubuntu-22.04 15 | outputs: 16 | current-galaxy-version: ${{ steps.check-current-version.outputs.current-galaxy-version }} 17 | steps: 18 | - name: Setup Python modules 19 | run: pip3 install --no-cache-dir yq 20 | - name: Checkout current ref 21 | uses: actions/checkout@v4 22 | with: 23 | path: "${{ github.repository }}" 24 | - name: Checkout main ref 25 | if: ${{ inputs.compare-versions }} 26 | uses: actions/checkout@v4 27 | with: 28 | ref: 'main' 29 | path: "main/${{ github.repository }}" 30 | - name: Check the current version 31 | id: check-current-version 32 | run: | 33 | CURRENT_GALAXY_VERSION=$(cat ${GITHUB_REPOSITORY}/galaxy.yml | yq -r '.version' | tr -d '\n') 34 | echo "Current Galaxy version: ${CURRENT_GALAXY_VERSION}" 35 | if [ "$CURRENT_GALAXY_VERSION" != 'null' ] 36 | then 37 | echo "CURRENT_GALAXY_VERSION=${CURRENT_GALAXY_VERSION}" >> "${GITHUB_ENV}" 38 | echo "current-galaxy-version=${CURRENT_GALAXY_VERSION}" >> "${GITHUB_OUTPUT}" 39 | fi 40 | - name: Check the version in the main branch 41 | if: ${{ inputs.compare-versions }} 42 | run: | 43 | MAIN_GALAXY_VERSION=$(cat main/${GITHUB_REPOSITORY}/galaxy.yml | yq -r '.version' | tr -d '\n') 44 | echo "Galaxy version in the main branch: ${MAIN_GALAXY_VERSION}" 45 | echo "MAIN_GALAXY_VERSION=${MAIN_GALAXY_VERSION}" >> "${GITHUB_ENV}" 46 | - name: Validate the current version 47 | if: ${{ ! env.CURRENT_GALAXY_VERSION }} 48 | uses: actions/github-script@v7 49 | with: 50 | script: | 51 | core.setFailed('Your Galaxy version is absent or empty!') 52 | - name: Compare versions 53 | if: ${{ inputs.compare-versions }} 54 | uses: jackbilestech/semver-compare@1.0.4 55 | with: 56 | base: ${{ env.MAIN_GALAXY_VERSION }} 57 | head: ${{ env.CURRENT_GALAXY_VERSION }} 58 | operator: '>=' 59 | -------------------------------------------------------------------------------- /roles/node/tasks/001-health-check.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Health check | Collect service facts 3 | ansible.builtin.service_facts: 4 | tags: [node-restore-chain] 5 | 6 | - name: Health check | Fail is service is not running 7 | ansible.builtin.fail: 8 | msg: Service {{ node_app_name }} is not running 9 | when: 10 | - ansible_facts.services[node_app_name+'.service'] is defined 11 | - ansible_facts.services[node_app_name+'.service'].state != 'running' 12 | - not (_node_pre_check | default(false)) 13 | 14 | - name: Health check | Show service state 15 | ansible.builtin.debug: 16 | var: ansible_facts.services[node_app_name+'.service'].state 17 | 18 | - name: Health Check | Block 19 | when: ansible_facts.services[node_app_name+'.service'] is defined and ansible_facts.services[node_app_name+'.service'].state not in ['stopped', 'inactive'] 20 | block: 21 | - name: Health check | Run health check 22 | ansible.builtin.uri: 23 | url: http://127.0.0.1:{{ (node_parachain_relay_chain_rpc_urls | length != 0) | ternary(node_parachain_rpc_port, node_rpc_port) }} 24 | method: POST 25 | body_format: json 26 | body: 27 | id: 1 28 | jsonrpc: "2.0" 29 | method: system_health 30 | params: [] 31 | return_content: true 32 | use_proxy: false 33 | register: _node_health_check_register 34 | until: _node_health_check_register.status is defined and _node_health_check_register.status == 200 35 | retries: 12 36 | delay: 10 37 | check_mode: false 38 | changed_when: false 39 | 40 | - name: Health check | Print health check result 41 | ansible.builtin.debug: 42 | msg: | 43 | {{ node_app_name }} is healthy 44 | peers: {{ _node_health_check_register.json.result.peers }} 45 | 46 | - name: Health check | Check the current version using API 47 | ansible.builtin.uri: 48 | url: http://127.0.0.1:{{ (node_parachain_relay_chain_rpc_urls | length != 0) | ternary(node_parachain_rpc_port, node_rpc_port) }} 49 | method: POST 50 | body_format: json 51 | body: 52 | id: 1 53 | jsonrpc: "2.0" 54 | method: system_version 55 | params: [] 56 | return_content: true 57 | use_proxy: false 58 | register: _node_version_check_register 59 | until: _node_version_check_register.status is defined and _node_version_check_register.status == 200 60 | retries: 2 61 | delay: 10 62 | check_mode: false 63 | changed_when: false 64 | 65 | - name: Health check | Print the current version according to API 66 | ansible.builtin.debug: 67 | msg: The current version is {{ _node_version_check_register.json.result }} 68 | -------------------------------------------------------------------------------- /galaxy.yml: -------------------------------------------------------------------------------- 1 | ### REQUIRED 2 | # The namespace of the collection. This can be a company/brand/organization or product namespace under which all 3 | # content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with 4 | # underscores or numbers and cannot contain consecutive underscores 5 | namespace: paritytech 6 | 7 | # The name of the collection. Has the same character restrictions as 'namespace' 8 | name: chain 9 | 10 | # The version of the collection. Must be compatible with semantic versioning 11 | version: 1.10.10 12 | 13 | # The path to the Markdown (.md) readme file. This path is relative to the root of the collection 14 | readme: README.md 15 | 16 | # A list of the collection's content authors. Can be just the name or in the format 'Full Name (url) 17 | # @nicks:irc/im.site#channel' 18 | authors: 19 | - Devops Team 20 | 21 | ### OPTIONAL but strongly recommended 22 | # A short summary description of the collection 23 | description: parity chain operations 24 | 25 | # Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only 26 | # accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file' 27 | license: 28 | - GPL-2.0-or-later 29 | 30 | # The path to the license file for the collection. This path is relative to the root of the collection. This key is 31 | # mutually exclusive with 'license' 32 | license_file: '' 33 | 34 | # A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character 35 | # requirements as 'namespace' and 'name' 36 | tags: 37 | - parity 38 | - substrate 39 | - polkadot 40 | - kusama 41 | - validator 42 | - parachain 43 | 44 | # Collections that this collection requires to be installed for it to be usable. The key of the dict is the 45 | # collection label 'namespace.name'. The value is a version range 46 | # L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version 47 | # range specifiers can be set and are separated by ',' 48 | dependencies: {} 49 | 50 | # The URL of the originating SCM repository 51 | repository: https://github.com/paritytech/ansible-polkadot.git 52 | 53 | # The URL to any online docs 54 | documentation: https://github.com/paritytech/ansible-polkadot 55 | 56 | # The URL to the homepage of the collection/project 57 | homepage: https://parity.io 58 | 59 | # The URL to the collection issue tracker 60 | issues: https://github.com/paritytech/ansible-polkadot/issues 61 | 62 | # A list of file glob-like patterns used to filter any files or directories that should not be included in the build 63 | # artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This 64 | # uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry', 65 | # and '.git' are always filtered 66 | build_ignore: [] 67 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/files/test1.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEWjCCA7ugAwIBAgIUFi6dt6Ljwt7usmsFraZQkHbBR+4wCgYIKoZIzj0EAwIw 3 | XTELMAkGA1UEBhMCRVUxDTALBgNVBAgMBFRFU1QxDTALBgNVBAcMBFRFU1QxDTAL 4 | BgNVBAoMBFRFU1QxDTALBgNVBAsMBFRFU1QxEjAQBgNVBAMMCSoucnBjLmxhbjAe 5 | Fw0yMjA0MjUxODExMzZaFw0zMjA0MjIxODExMzZaMF0xCzAJBgNVBAYTAkVVMQ0w 6 | CwYDVQQIDARURVNUMQ0wCwYDVQQHDARURVNUMQ0wCwYDVQQKDARURVNUMQ0wCwYD 7 | VQQLDARURVNUMRIwEAYDVQQDDAkqLnJwYy5sYW4wggJdMIIB0AYHKoZIzj0CATCC 8 | AcMCAQEwTQYHKoZIzj0BAQJCAf////////////////////////////////////// 9 | ////////////////////////////////////////////////MIGfBEIB//////// 10 | //////////////////////////////////////////////////////////////// 11 | //////////////wEQgBRlT65YY4cmh+SmiGgtoVA7qLacluZsxXzuLSJkY7xCeFW 12 | GTlR7H6TexZSwL07sb8HNXPfiD0sNPHvRR/Ua1A/AAMVANCeiAApHLhTlsxnFzky 13 | hKqg2mS6BIGFBADGhY4GtwQE6c2ePstmI5W0QpxkgTkFP7Uh+CivYGtNPbqhS153 14 | 7+dZKP4dwSei/6jeM0izwYVqQpv5fn4xwuW9ZgEYOSlqeJo7wARcil+0LH0b2Zj1 15 | RElXm0RoF6+9Fyc+ZiyX7nKZXvQmQMVQuQE/rQdhNTxwhqJywkCIvpR2n9FmUAJC 16 | Af//////////////////////////////////////////+lGGh4O/L5Zrf8wBSPcJ 17 | pdA7tcm4iZxHrrtvtx6ROGQJAgEBA4GGAAQBexiUvRext+wPweeabtxEBGRhrUHY 18 | AVVOVGJNZajY4cs1fFj6ibC7hRDx/zoe1K8/8JfPjIErpGGUDddwDPGO9BsAaxnw 19 | 9Zjsqcg7eVZ80FCcV7NkidCWowW+2vNGJVz+H7Uvni/nP8EV/rirPkikJ7GLZo+i 20 | 2GFlbyd5aiXjpXmiuBejUzBRMB0GA1UdDgQWBBTDRA1ZGMG/Mug+UA/FpXbxKhIt 21 | +TAfBgNVHSMEGDAWgBTDRA1ZGMG/Mug+UA/FpXbxKhIt+TAPBgNVHRMBAf8EBTAD 22 | AQH/MAoGCCqGSM49BAMCA4GMADCBiAJCARjvVk6dGYDkBNzwyGOOP9MzWnsNgaDg 23 | HPnpBFMM5Ut/P/P9ApWcc4HDOv+zp22KkgVfoIyF1J3S6djA/3qHMV5XAkIBjlfr 24 | LznARSA19eeU69LHSd4+kfZ/45eE48bfC/pxkgmRMdIFqb5r72z5quj/W/SM4F8s 25 | l3LHQFWzwMex/DVwU/I= 26 | -----END CERTIFICATE----- 27 | -----BEGIN EC PARAMETERS----- 28 | MIIBwwIBATBNBgcqhkjOPQEBAkIB//////////////////////////////////// 29 | //////////////////////////////////////////////////8wgZ8EQgH///// 30 | //////////////////////////////////////////////////////////////// 31 | /////////////////ARCAFGVPrlhjhyaH5KaIaC2hUDuotpyW5mzFfO4tImRjvEJ 32 | 4VYZOVHsfpN7FlLAvTuxvwc1c9+IPSw08e9FH9RrUD8AAxUA0J6IACkcuFOWzGcX 33 | OTKEqqDaZLoEgYUEAMaFjga3BATpzZ4+y2YjlbRCnGSBOQU/tSH4KK9ga009uqFL 34 | Xnfv51ko/h3BJ6L/qN4zSLPBhWpCm/l+fjHC5b1mARg5KWp4mjvABFyKX7QsfRvZ 35 | mPVESVebRGgXr70XJz5mLJfucple9CZAxVC5AT+tB2E1PHCGonLCQIi+lHaf0WZQ 36 | AkIB///////////////////////////////////////////6UYaHg78vlmt/zAFI 37 | 9wml0Du1ybiJnEeuu2+3HpE4ZAkCAQE= 38 | -----END EC PARAMETERS----- 39 | -----BEGIN EC PRIVATE KEY----- 40 | MIICngIBAQRCAdg/rpMq12SkpeFecWlMaOjp1Xrd7TKCyHyrz5m5W7MEDowo80h3 41 | wBFdrCtauwzSz6UBzBvk4QMdaAhVlCngel/woIIBxzCCAcMCAQEwTQYHKoZIzj0B 42 | AQJCAf////////////////////////////////////////////////////////// 43 | ////////////////////////////MIGfBEIB//////////////////////////// 44 | //////////////////////////////////////////////////////////wEQgBR 45 | lT65YY4cmh+SmiGgtoVA7qLacluZsxXzuLSJkY7xCeFWGTlR7H6TexZSwL07sb8H 46 | NXPfiD0sNPHvRR/Ua1A/AAMVANCeiAApHLhTlsxnFzkyhKqg2mS6BIGFBADGhY4G 47 | twQE6c2ePstmI5W0QpxkgTkFP7Uh+CivYGtNPbqhS1537+dZKP4dwSei/6jeM0iz 48 | wYVqQpv5fn4xwuW9ZgEYOSlqeJo7wARcil+0LH0b2Zj1RElXm0RoF6+9Fyc+ZiyX 49 | 7nKZXvQmQMVQuQE/rQdhNTxwhqJywkCIvpR2n9FmUAJCAf////////////////// 50 | ////////////////////////+lGGh4O/L5Zrf8wBSPcJpdA7tcm4iZxHrrtvtx6R 51 | OGQJAgEBoYGJA4GGAAQBexiUvRext+wPweeabtxEBGRhrUHYAVVOVGJNZajY4cs1 52 | fFj6ibC7hRDx/zoe1K8/8JfPjIErpGGUDddwDPGO9BsAaxnw9Zjsqcg7eVZ80FCc 53 | V7NkidCWowW+2vNGJVz+H7Uvni/nP8EV/rirPkikJ7GLZo+i2GFlbyd5aiXjpXmi 54 | uBc= 55 | -----END EC PRIVATE KEY----- -------------------------------------------------------------------------------- /roles/nginx/molecule/default/files/test2.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEWjCCA7ugAwIBAgIUFi6dt6Ljwt7usmsFraZQkHbBR+4wCgYIKoZIzj0EAwIw 3 | XTELMAkGA1UEBhMCRVUxDTALBgNVBAgMBFRFU1QxDTALBgNVBAcMBFRFU1QxDTAL 4 | BgNVBAoMBFRFU1QxDTALBgNVBAsMBFRFU1QxEjAQBgNVBAMMCSoucnBjLmxhbjAe 5 | Fw0yMjA0MjUxODExMzZaFw0zMjA0MjIxODExMzZaMF0xCzAJBgNVBAYTAkVVMQ0w 6 | CwYDVQQIDARURVNUMQ0wCwYDVQQHDARURVNUMQ0wCwYDVQQKDARURVNUMQ0wCwYD 7 | VQQLDARURVNUMRIwEAYDVQQDDAkqLnJwYy5sYW4wggJdMIIB0AYHKoZIzj0CATCC 8 | AcMCAQEwTQYHKoZIzj0BAQJCAf////////////////////////////////////// 9 | ////////////////////////////////////////////////MIGfBEIB//////// 10 | //////////////////////////////////////////////////////////////// 11 | //////////////wEQgBRlT65YY4cmh+SmiGgtoVA7qLacluZsxXzuLSJkY7xCeFW 12 | GTlR7H6TexZSwL07sb8HNXPfiD0sNPHvRR/Ua1A/AAMVANCeiAApHLhTlsxnFzky 13 | hKqg2mS6BIGFBADGhY4GtwQE6c2ePstmI5W0QpxkgTkFP7Uh+CivYGtNPbqhS153 14 | 7+dZKP4dwSei/6jeM0izwYVqQpv5fn4xwuW9ZgEYOSlqeJo7wARcil+0LH0b2Zj1 15 | RElXm0RoF6+9Fyc+ZiyX7nKZXvQmQMVQuQE/rQdhNTxwhqJywkCIvpR2n9FmUAJC 16 | Af//////////////////////////////////////////+lGGh4O/L5Zrf8wBSPcJ 17 | pdA7tcm4iZxHrrtvtx6ROGQJAgEBA4GGAAQBexiUvRext+wPweeabtxEBGRhrUHY 18 | AVVOVGJNZajY4cs1fFj6ibC7hRDx/zoe1K8/8JfPjIErpGGUDddwDPGO9BsAaxnw 19 | 9Zjsqcg7eVZ80FCcV7NkidCWowW+2vNGJVz+H7Uvni/nP8EV/rirPkikJ7GLZo+i 20 | 2GFlbyd5aiXjpXmiuBejUzBRMB0GA1UdDgQWBBTDRA1ZGMG/Mug+UA/FpXbxKhIt 21 | +TAfBgNVHSMEGDAWgBTDRA1ZGMG/Mug+UA/FpXbxKhIt+TAPBgNVHRMBAf8EBTAD 22 | AQH/MAoGCCqGSM49BAMCA4GMADCBiAJCARjvVk6dGYDkBNzwyGOOP9MzWnsNgaDg 23 | HPnpBFMM5Ut/P/P9ApWcc4HDOv+zp22KkgVfoIyF1J3S6djA/3qHMV5XAkIBjlfr 24 | LznARSA19eeU69LHSd4+kfZ/45eE48bfC/pxkgmRMdIFqb5r72z5quj/W/SM4F8s 25 | l3LHQFWzwMex/DVwU/I= 26 | -----END CERTIFICATE----- 27 | -----BEGIN EC PARAMETERS----- 28 | MIIBwwIBATBNBgcqhkjOPQEBAkIB//////////////////////////////////// 29 | //////////////////////////////////////////////////8wgZ8EQgH///// 30 | //////////////////////////////////////////////////////////////// 31 | /////////////////ARCAFGVPrlhjhyaH5KaIaC2hUDuotpyW5mzFfO4tImRjvEJ 32 | 4VYZOVHsfpN7FlLAvTuxvwc1c9+IPSw08e9FH9RrUD8AAxUA0J6IACkcuFOWzGcX 33 | OTKEqqDaZLoEgYUEAMaFjga3BATpzZ4+y2YjlbRCnGSBOQU/tSH4KK9ga009uqFL 34 | Xnfv51ko/h3BJ6L/qN4zSLPBhWpCm/l+fjHC5b1mARg5KWp4mjvABFyKX7QsfRvZ 35 | mPVESVebRGgXr70XJz5mLJfucple9CZAxVC5AT+tB2E1PHCGonLCQIi+lHaf0WZQ 36 | AkIB///////////////////////////////////////////6UYaHg78vlmt/zAFI 37 | 9wml0Du1ybiJnEeuu2+3HpE4ZAkCAQE= 38 | -----END EC PARAMETERS----- 39 | -----BEGIN EC PRIVATE KEY----- 40 | MIICngIBAQRCAdg/rpMq12SkpeFecWlMaOjp1Xrd7TKCyHyrz5m5W7MEDowo80h3 41 | wBFdrCtauwzSz6UBzBvk4QMdaAhVlCngel/woIIBxzCCAcMCAQEwTQYHKoZIzj0B 42 | AQJCAf////////////////////////////////////////////////////////// 43 | ////////////////////////////MIGfBEIB//////////////////////////// 44 | //////////////////////////////////////////////////////////wEQgBR 45 | lT65YY4cmh+SmiGgtoVA7qLacluZsxXzuLSJkY7xCeFWGTlR7H6TexZSwL07sb8H 46 | NXPfiD0sNPHvRR/Ua1A/AAMVANCeiAApHLhTlsxnFzkyhKqg2mS6BIGFBADGhY4G 47 | twQE6c2ePstmI5W0QpxkgTkFP7Uh+CivYGtNPbqhS1537+dZKP4dwSei/6jeM0iz 48 | wYVqQpv5fn4xwuW9ZgEYOSlqeJo7wARcil+0LH0b2Zj1RElXm0RoF6+9Fyc+ZiyX 49 | 7nKZXvQmQMVQuQE/rQdhNTxwhqJywkCIvpR2n9FmUAJCAf////////////////// 50 | ////////////////////////+lGGh4O/L5Zrf8wBSPcJpdA7tcm4iZxHrrtvtx6R 51 | OGQJAgEBoYGJA4GGAAQBexiUvRext+wPweeabtxEBGRhrUHYAVVOVGJNZajY4cs1 52 | fFj6ibC7hRDx/zoe1K8/8JfPjIErpGGUDddwDPGO9BsAaxnw9Zjsqcg7eVZ80FCc 53 | V7NkidCWowW+2vNGJVz+H7Uvni/nP8EV/rirPkikJ7GLZo+i2GFlbyd5aiXjpXmi 54 | uBc= 55 | -----END EC PRIVATE KEY----- -------------------------------------------------------------------------------- /roles/node/tasks/100-tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test | Check node_binary 3 | ansible.builtin.fail: 4 | msg: The 'node_binary' variable can't be empty! 5 | when: node_binary == '' 6 | 7 | - name: Test | Check if node_binary_signature is a URL 8 | ansible.builtin.fail: 9 | msg: The 'node_binary_signature' variable must be a URL! 10 | when: 11 | - node_binary_signature != '' 12 | - not node_binary_signature.startswith('http') 13 | 14 | - name: Test | Check node_app_name 15 | ansible.builtin.fail: 16 | msg: The 'node_app_name' variable can't be empty! 17 | when: node_app_name == '' 18 | 19 | - name: Test | Check node_chain 20 | ansible.builtin.fail: 21 | msg: The 'node_chain' variable can't be empty! 22 | when: 23 | - node_chain == '' 24 | - node_chainspec == '' 25 | - node_parachain_relay_chain_rpc_urls == [] 26 | 27 | - name: Test | Check node_parachain_chain 28 | ansible.builtin.fail: 29 | msg: The 'node_parachain_chain' variable can't be empty! 30 | when: node_parachain_role != '' and node_parachain_chain == '' and node_parachain_chainspec == '' 31 | 32 | - name: Test | Check node_role 33 | ansible.builtin.fail: 34 | msg: The 'node_role' variable can contain only 'validator', 'boot', 'full' or 'rpc' values! 35 | when: node_role == '' or node_role not in ["validator", "boot", "rpc", "full"] 36 | 37 | - name: Test | Check node_parachain_role 38 | ansible.builtin.fail: 39 | msg: The 'node_parachain_role' variable can contain only 'collator', 'validator' 'rpc' or 'full' values! 40 | when: node_parachain_role != '' and node_parachain_role not in ["collator", "rpc", "full", "validator"] 41 | 42 | - name: Test | Check correctness of role variables 43 | ansible.builtin.fail: 44 | msg: You use the wrong combination of 'node_role' and 'node_parachain_role' variables! 45 | when: node_role == 'validator' and node_parachain_role == 'collator' 46 | 47 | - name: Test | Check node_app_name 48 | ansible.builtin.fail: 49 | msg: The 'node_app_name' variable can contain only '0-9a-zA-Z_-' symbols! 50 | when: node_app_name is regex('[^0-9a-zA-Z_-]+') 51 | 52 | - name: Test | Check node_chain_backup_restoring_type 53 | ansible.builtin.fail: 54 | msg: The 'node_chain_backup_restoring_type' variable can contain only 'http', 'tar' or 'none' values! 55 | when: node_chain_backup_restoring_type not in ["http", "tar", "none"] 56 | 57 | - name: Test | Check node_parachain_chain_backup_restoring_type 58 | ansible.builtin.fail: 59 | msg: The 'node_parachain_chain_backup_restoring_type' variable can contain only 'http', 'tar' or 'none' values! 60 | when: node_parachain_chain_backup_restoring_type not in ["http", "tar", "none"] 61 | 62 | - name: Test | Check node_chain_backup_url 63 | ansible.builtin.fail: 64 | msg: If you use tar backups, you have to set the node_chain_backup_url variable 65 | when: node_chain_backup_restoring_type == 'tar' and node_chain_backup_url == '' 66 | 67 | - name: Test | Check node_parachain_chain_backup_url 68 | ansible.builtin.fail: 69 | msg: If you use tar backups, you have to set the node_parachain_chain_backup_url variable 70 | when: node_parachain_chain_backup_restoring_type == 'tar' and node_parachain_chain_backup_url == '' 71 | 72 | - name: Test | Check number of executions in play 73 | ansible.builtin.fail: 74 | msg: "If you execute {{ ansible_role_name }} role multiple time in one play, please set node_handler_id " 75 | when: ansible_play_role_names | regex_findall(ansible_role_name) | length > 1 and node_handler_id == '' 76 | -------------------------------------------------------------------------------- /roles/node/molecule/default/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | gather_facts: false 5 | tasks: 6 | - name: Collect service facts 7 | ansible.builtin.service_facts: 8 | 9 | - name: Print service facts 10 | ansible.builtin.debug: 11 | var: ansible_facts.services['polkadot.service'] 12 | 13 | - name: check service 14 | ansible.builtin.assert: 15 | that: ansible_facts.services['polkadot.service'].state == 'running' 16 | 17 | - name: Get system_health 18 | ansible.builtin.uri: 19 | url: http://127.0.0.1:{{ node_rpc_port }} 20 | method: POST 21 | body: { id: 1, jsonrpc: "2.0", method: system_health, params: [] } 22 | body_format: json 23 | headers: 24 | Content-Type: application/json 25 | use_proxy: false 26 | until: _system_health_result.status is defined and _system_health_result.status == 200 27 | retries: 3 28 | delay: 10 29 | register: _system_health_result 30 | 31 | - name: Print system_health 32 | ansible.builtin.debug: 33 | msg: "{{ _system_health_result.json }}" 34 | 35 | - name: Re-deploy node with additional parameters 36 | ansible.builtin.include_role: 37 | name: node 38 | vars: 39 | node_database_wipe: true 40 | node_parachain_database_wipe: true 41 | node_start_service: false 42 | 43 | - name: Collect service facts 1 44 | ansible.builtin.service_facts: 45 | 46 | - name: Print service facts 1 47 | ansible.builtin.debug: 48 | var: ansible_facts.services['polkadot.service'] 49 | 50 | - name: Check service 1 51 | ansible.builtin.assert: 52 | that: ansible_facts.services['polkadot.service'].state == 'stopped' 53 | 54 | - name: Start {{ node_app_name }} service 55 | ansible.builtin.systemd: 56 | name: "{{ node_app_name }}" 57 | state: started 58 | 59 | - name: Collect service facts 2 60 | ansible.builtin.service_facts: 61 | 62 | - name: Print service facts 2 63 | ansible.builtin.debug: 64 | var: ansible_facts.services['polkadot.service'] 65 | 66 | - name: Check service 2 67 | ansible.builtin.assert: 68 | that: ansible_facts.services['polkadot.service'].state == 'running' 69 | 70 | - name: Get system_health 71 | ansible.builtin.uri: 72 | url: http://127.0.0.1:{{ node_rpc_port }} 73 | method: POST 74 | body: { id: 1, jsonrpc: "2.0", method: system_health, params: [] } 75 | body_format: json 76 | headers: 77 | Content-Type: application/json 78 | use_proxy: false 79 | until: _system_health_result.status is defined and _system_health_result.status == 200 80 | retries: 3 81 | delay: 10 82 | register: _system_health_result 83 | 84 | - name: Print system_health 85 | ansible.builtin.debug: 86 | msg: "{{ _system_health_result.json }}" 87 | 88 | - name: Get system_syncState 89 | ansible.builtin.uri: 90 | url: http://127.0.0.1:{{ node_rpc_port }} 91 | method: POST 92 | body: { id: 1, jsonrpc: "2.0", method: system_syncState, params: [] } 93 | body_format: json 94 | headers: 95 | Content-Type: application/json 96 | use_proxy: false 97 | register: _system_syncstate_result 98 | 99 | - name: Print system_syncState 100 | ansible.builtin.debug: 101 | msg: "{{ _system_syncstate_result.json }}" 102 | -------------------------------------------------------------------------------- /roles/nginx/molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: prepare 3 | hosts: all 4 | gather_facts: false 5 | pre_tasks: 6 | - name: prepare | install Python3 7 | ansible.builtin.raw: apt -y update && apt install -y python3 8 | changed_when: false 9 | vars: 10 | websocat_dist_binary: https://github.com/vi/websocat/releases/download/v1.9.0/websocat_linux64 11 | websocat_binary: /usr/local/bin/websocat 12 | pebble_dist_binary: https://github.com/letsencrypt/pebble/releases/download/v2.3.1/pebble_linux-amd64 13 | pebble_binary: /usr/local/bin/pebble 14 | pebble_conf_dir: /usr/local/etc/pebble/ 15 | tasks: 16 | - name: prepare | install packeges 17 | ansible.builtin.apt: 18 | name: "{{ packeges }}" 19 | state: present 20 | update_cache: false 21 | vars: 22 | packeges: 23 | - ca-certificates 24 | - bash 25 | - netcat-openbsd 26 | - name: prepare | build hosts file 27 | ansible.builtin.lineinfile: 28 | dest: /etc/hosts 29 | line: 127.0.0.1 {{ item.domain }} 30 | state: present 31 | loop: "{{ nginx_sites }}" 32 | when: molecule_yml.driver.name == 'lxd' 33 | # websocat provides mock for WebSocket 34 | - name: prepare | download websocat binary 35 | ansible.builtin.get_url: 36 | url: "{{ websocat_dist_binary }}" 37 | dest: "{{ websocat_binary }}" 38 | mode: "0755" 39 | owner: root 40 | group: root 41 | # pebble provides mock for ACME (letsencrypt) 42 | - name: prepare | create pebble config directory 43 | ansible.builtin.file: 44 | name: "{{ pebble_conf_dir }}" 45 | state: directory 46 | owner: root 47 | group: root 48 | mode: "0755" 49 | - name: prepare | download pebble binary 50 | ansible.builtin.get_url: 51 | url: "{{ pebble_dist_binary }}" 52 | dest: "{{ pebble_binary }}" 53 | mode: "0755" 54 | owner: root 55 | group: root 56 | - name: prepare | copy pebble config files 57 | ansible.builtin.copy: 58 | src: "{{ item.src }}" 59 | dest: "{{ item.dst }}" 60 | owner: root 61 | group: root 62 | mode: "0644" 63 | loop: 64 | - { src: pebble/cert.pem, dst: "{{ pebble_conf_dir }}" } # fake 127.0.0.1 certificate 65 | - { src: pebble/key.pem, dst: "{{ pebble_conf_dir }}" } # fake 127.0.0.1 certificate 66 | - name: prepare | copy config templates 67 | ansible.builtin.template: 68 | src: "{{ item.src }}" 69 | dest: "{{ item.dst }}" 70 | owner: root 71 | group: root 72 | mode: "0644" 73 | loop: 74 | - { src: pebble.service.j2, dst: /etc/systemd/system/pebble.service } 75 | - { src: pebble-config.json.j2, dst: "{{ pebble_conf_dir }}pebble-config.json" } 76 | - { src: websocat.service.j2, dst: /etc/systemd/system/websocat.service } 77 | - { src: http-stub.service.j2, dst: /etc/systemd/system/http-stub.service } 78 | - name: prepare | run services 79 | ansible.builtin.systemd: 80 | name: "{{ item }}" 81 | state: started 82 | enabled: true 83 | daemon_reload: true 84 | loop: 85 | - pebble.service 86 | - http-stub.service 87 | - websocat.service 88 | - name: prepare | collect service facts 89 | ansible.builtin.service_facts: 90 | - name: prepare | check services 91 | ansible.builtin.assert: 92 | that: ansible_facts.services[item].state == 'running' 93 | loop: 94 | - pebble.service 95 | - http-stub.service 96 | - websocat.service 97 | -------------------------------------------------------------------------------- /roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nginx 3 | tags: [nginx] 4 | block: 5 | - name: nginx | include test tasks 6 | ansible.builtin.include_tasks: 7 | file: tests.yml 8 | apply: 9 | tags: [nginx, nginx-tests] 10 | tags: [nginx-tests] 11 | 12 | - name: nginx | include remove tasks 13 | ansible.builtin.include_tasks: 14 | file: remove.yml 15 | apply: 16 | tags: [nginx, nginx-remove] 17 | when: nginx_remove_enable | bool 18 | tags: [nginx-remove] 19 | 20 | - name: nginx | install packeges 21 | ansible.builtin.apt: 22 | name: "{{ packeges }}" 23 | state: present 24 | update_cache: true 25 | vars: 26 | packeges: 27 | - nginx 28 | - certbot 29 | 30 | - name: nginx | create directories 31 | ansible.builtin.file: 32 | name: "{{ item }}" 33 | state: directory 34 | owner: root 35 | group: root 36 | mode: "0755" 37 | loop: 38 | - /var/www/letsencrypt 39 | - /etc/letsencrypt/renewal-hooks/deploy 40 | - /etc/systemd/system/nginx.service.d 41 | 42 | - name: nginx | copy letsencrypt renewal-hook reload script 43 | ansible.builtin.copy: 44 | src: reload-nginx-config 45 | dest: /etc/letsencrypt/renewal-hooks/deploy 46 | owner: root 47 | group: root 48 | mode: "0744" 49 | 50 | - name: nginx | stat dhparams 51 | ansible.builtin.stat: 52 | path: /etc/nginx/dhparams.pem 53 | register: stat_dhparams 54 | 55 | - name: nginx | generate dhparams 56 | community.crypto.openssl_dhparam: 57 | path: /etc/nginx/dhparams.pem 58 | size: "{{ nginx_dhparam_size }}" 59 | owner: root 60 | group: root 61 | mode: "0600" 62 | notify: reload nginx config 63 | when: not stat_dhparams.stat.exists 64 | ignore_errors: "{{ ansible_check_mode }}" 65 | # molecule skip test 66 | tags: molecule-notest 67 | 68 | - name: nginx | copy config templates 69 | ansible.builtin.template: 70 | src: "{{ item.src }}" 71 | dest: "{{ item.dst }}" 72 | owner: root 73 | group: root 74 | mode: "0644" 75 | notify: reload nginx config 76 | loop: 77 | - { src: nginx.conf.j2, dst: /etc/nginx/nginx.conf } 78 | - { src: site-default.j2, dst: /etc/nginx/sites-enabled/default } 79 | - { src: override.conf.j2, dst: /etc/systemd/system/nginx.service.d/override.conf } 80 | 81 | - name: nginx | flush handlers 82 | ansible.builtin.meta: flush_handlers 83 | 84 | - name: nginx | include custom certs tasks 85 | ansible.builtin.include_tasks: 86 | file: certs.yml 87 | apply: 88 | tags: [nginx, nginx-custom-certs] 89 | tags: [nginx-custom-certs] 90 | 91 | - name: nginx | include letsencrypt tasks 92 | ansible.builtin.include_tasks: 93 | file: letsencrypt.yml 94 | apply: 95 | tags: [nginx, nginx-letsencrypt] 96 | tags: [nginx-letsencrypt] 97 | 98 | - name: nginx | include site tasks 99 | ansible.builtin.include_tasks: 100 | file: sites.yml 101 | apply: 102 | tags: [nginx, nginx-sites] 103 | tags: [nginx-sites] 104 | 105 | # to avoid 2 restarts during the first deploy 106 | - name: nginx | flush handlers 107 | ansible.builtin.meta: flush_handlers 108 | 109 | - name: nginx | start nginx 110 | ansible.builtin.systemd: 111 | name: nginx 112 | state: started 113 | enabled: true 114 | daemon_reload: true 115 | -------------------------------------------------------------------------------- /roles/node/molecule/parachain/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | gather_facts: false 5 | tasks: 6 | - name: re-deploy node with wipe 7 | ansible.builtin.include_role: 8 | name: node 9 | vars: 10 | node_database_wipe: true 11 | node_parachain_database_wipe: true 12 | 13 | - name: Collect service facts 14 | ansible.builtin.service_facts: 15 | 16 | - name: Print service facts 17 | ansible.builtin.debug: 18 | var: ansible_facts.services['shell.service'] 19 | 20 | - name: check service 21 | ansible.builtin.assert: 22 | that: ansible_facts.services['shell.service'].state == 'running' 23 | 24 | - name: Get relaychain system_health 25 | ansible.builtin.uri: 26 | url: http://127.0.0.1:{{ node_rpc_port }} 27 | method: POST 28 | body: { id: 1, jsonrpc: "2.0", method: system_health, params: [] } 29 | body_format: json 30 | headers: 31 | Content-Type: application/json 32 | use_proxy: false 33 | until: _relaychain_system_health_result.status is defined and _relaychain_system_health_result.status == 200 34 | retries: 3 35 | delay: 10 36 | register: _relaychain_system_health_result 37 | 38 | - name: Get parachain system_health 39 | ansible.builtin.uri: 40 | url: http://127.0.0.1:{{ node_parachain_rpc_port }} 41 | method: POST 42 | body: { id: 1, jsonrpc: "2.0", method: system_health, params: [] } 43 | body_format: json 44 | headers: 45 | Content-Type: application/json 46 | use_proxy: false 47 | until: _parachain_system_health_result.status is defined and _parachain_system_health_result.status == 200 48 | retries: 3 49 | delay: 10 50 | register: _parachain_system_health_result 51 | 52 | - name: Print system_health 53 | ansible.builtin.debug: 54 | msg: 55 | - "Relaychain: {{ _relaychain_system_health_result.json }}" 56 | - "Parachain: {{ _parachain_system_health_result.json }}" 57 | 58 | - name: relay chain syncing 59 | ansible.builtin.assert: 60 | that: _relaychain_system_health_result.json['result']['isSyncing'] 61 | 62 | - name: parachain is not syncing (it is not onboarded) 63 | ansible.builtin.assert: 64 | that: not _parachain_system_health_result.json['result']['isSyncing'] 65 | 66 | - name: Get relaychain system_syncState 67 | ansible.builtin.uri: 68 | url: http://127.0.0.1:{{ node_rpc_port }} 69 | method: POST 70 | body: { id: 1, jsonrpc: "2.0", method: system_syncState, params: [] } 71 | body_format: json 72 | headers: 73 | Content-Type: application/json 74 | use_proxy: false 75 | until: _relaychain_system_syncstate_result.status is defined and _relaychain_system_syncstate_result.status == 200 76 | retries: 3 77 | delay: 10 78 | register: _relaychain_system_syncstate_result 79 | 80 | - name: Get parachain system_syncState 81 | ansible.builtin.uri: 82 | url: http://127.0.0.1:{{ node_parachain_rpc_port }} 83 | method: POST 84 | body: { id: 1, jsonrpc: "2.0", method: system_syncState, params: [] } 85 | body_format: json 86 | headers: 87 | Content-Type: application/json 88 | use_proxy: false 89 | until: _parachain_system_syncstate_result.status is defined and _parachain_system_syncstate_result.status == 200 90 | retries: 3 91 | delay: 10 92 | register: _parachain_system_syncstate_result 93 | 94 | - name: Print system_syncState 95 | ansible.builtin.debug: 96 | msg: 97 | - "Relaychain: {{ _relaychain_system_syncstate_result.json }}" 98 | - "Parachain: {{ _parachain_system_syncstate_result.json }}" 99 | -------------------------------------------------------------------------------- /roles/node/README.md: -------------------------------------------------------------------------------- 1 | # Substrate/Polkadot node deployment ansible role 2 | 3 | The role can deploy a Substrate node. 4 | 5 | There are available relaychain roles: `validator`, `boot`, `full` and `rpc`. 6 | There are available parachain roles: `collator`, `validator`, `rpc` and `full`. 7 | 8 | The role can work in `check mode` regardless of the current state of the infrastructure. 9 | Use `--diff --check` CLI parameters to test changes before applying. 10 | 11 | ## Role preferences 12 | 13 | 14 | You can find all available variables and comments in the `defaults/main.yml` file. 15 | Almost all default values of variables do not need to be changed. 16 | You can redefine common or some very specific variables in your playbooks 17 | or inventory files if you need. 18 | 19 | You can find all available variables in the `vars/main.yml` file. 20 | 21 | ## Requirements 22 | 23 | * You have to be able to use `become` 24 | * The role can't be run with default values of variables only. You have to specify the `node_chain` variable 25 | at least. 26 | 27 | ## Examples 28 | 29 | ### Wipe block storage before deploying 30 | 31 | `ansible-playbook --tags "node" -e "node_database_wipe=true" -e "node_parachain_database_wipe=true" playbook.yml` 32 | 33 | ```yaml 34 | - hosts: host1 35 | become: yes 36 | roles: 37 | - node 38 | vars: 39 | node_database_wipe: true 40 | node_parachain_database_wipe: true 41 | ``` 42 | 43 | ### Restart nodes only 44 | 45 | `ansible-playbook --tags "node" -e "node_binary_deployment=False" 46 | -e "node_systemd_deployment=False" -e "node_force_restart=True" playbook.yml` 47 | 48 | ```yaml 49 | - hosts: host1 50 | become: yes 51 | roles: 52 | - node 53 | vars: 54 | node_binary_deployment: false 55 | node_systemd_deployment: false 56 | node_force_restart: true 57 | ``` 58 | 59 | ## Contributing 60 | 61 | If you want to add functionality or change something, please, try to save backward compatibility. 62 | A lot of playbook can be dependent on the role. Breaking changes should 63 | be discussed in a common review. 64 | 65 | ## Chain IDs 66 | 67 | The list doesn't contain all possible IDs and can be outdated. 68 | 69 | Relaychain chain IDs: 70 | ```yaml 71 | polkadot: "polkadot" 72 | kusama: "ksmcc3" 73 | westend: "westend2" 74 | paseo: "paseo" 75 | rococo-local: "rococo_local_testnet" 76 | ``` 77 | 78 | Parachain chain IDs: 79 | ```yaml 80 | statemine: "statemine" 81 | statemint: "statemint" 82 | westmint: "westmint" 83 | ``` 84 | 85 | ## Example basic inventory 86 | 87 | ``` 88 | all: 89 | vars: 90 | node_app_name: company-chain 91 | node_binary_version: v0.9.29 92 | node_chain: rococo-local 93 | node_user: polkadot 94 | node_binary: https://github.com/paritytech/polkadot/releases/download/{{ node_binary_version }}/polkadot 95 | node_binary_signature: https://github.com/paritytech/polkadot/releases/download/{{ node_binary_version }}/polkadot.asc 96 | children: 97 | validators: 98 | vars: 99 | hosts: 100 | validator1: 101 | node_custom_options: ["--alice"] 102 | ansible_host: validator1.company.com 103 | node_role: validator 104 | validator2: 105 | node_custom_options: ["--bob"] 106 | ansible_host: validator2.company.com 107 | node_role: validator 108 | rpcs: 109 | rpc1: 110 | ansible_host: rpc1.company.com 111 | node_role: rpc 112 | collators: 113 | vars: 114 | node_binary: https://github.com/paritytech/cumulus/releases/download/{{ node_binary_version }}0/polkadot-parachain 115 | node_binary_signature: https://github.com/paritytech/cumulus/releases/download/{{ node_binary_version }}0/polkadot-parachain.asc 116 | node_parachain_chain: shell 117 | hosts: 118 | collator1: 119 | ansible_host: collator1.company.com 120 | node_parachain_role: collator 121 | ``` 122 | -------------------------------------------------------------------------------- /roles/key_inject/README.md: -------------------------------------------------------------------------------- 1 | # key_inject ansible role 2 | 3 | This Ansible role is designed to facilitate the injection of cryptographic keys 4 | into Polkadot nodes, a crucial step for setting up a node for active 5 | participation in network operations like consensus and block authoring. This is 6 | meant only for development and testing purposes and use is not recommended in 7 | production. 8 | 9 | ## Key Role Functionality 10 | The `key_inject` role consists of several tasks organized into specific YAML 11 | files to streamline the process of key management on a Polkadot node: 12 | 13 | ### Key Tasks and Files 14 | - **check_session_key.yml**: Checks whether session keys are present in the 15 | node’s keystore. 16 | - **inject.yml**: Manages the injection of keys that are not found in the 17 | keystore. 18 | - **main.yml**: Coordinates the flow between checking and injecting keys. 19 | 20 | ### Detailed Process 21 | 1. **Key Generation**: 22 | - Generates session keys from specified private keys using 23 | `paritytech.chain.subkey_inspect`, defaulting to the `sr25519` cryptographic 24 | scheme. 25 | 26 | 2. **Key Verification**: 27 | - An RPC call checks for the presence of keys in the keystore. If absent, 28 | the process retries up to 12 times, with a 10-second pause between each try. 29 | 30 | 3. **Key Injection**: 31 | - If keys are missing in the keystore, they are injected via an RPC call. 32 | This includes handling for errors and notifications for service restarts 33 | after successful injections. 34 | 35 | 4. **Results Reporting**: 36 | - Outcomes of the injection process are logged, indicating the success or 37 | failure of the key injections. 38 | 39 | ### Security and Risk Considerations 40 | Using Ansible for key management is feasible but must be approached with caution, 41 | particularly on networks with real economic value: 42 | - **Secure Storage**: Keys should be encrypted and securely stored within 43 | Ansible variables. Use `ansible-vault encrypt` for sensitive data. 44 | - **Unique Keys**: Ensure no key sharing across nodes to avoid risks like 45 | slashing. 46 | 47 | **Risk of Slashing**: There's a high risk of slashing in production if keys are 48 | mismanaged, particularly from issues like double-signing due to key reuse. 49 | 50 | **Best Practice**: In production environments, the use of `author_rotateKeys` 51 | RPC method is strongly recommended over manual methods to mitigate risks. 52 | This method ensures keys are managed securely, preventing equivocation. 53 | If `author_rotateKeys` is not utilized, consider implementing robust key 54 | management server software that provides safeguards against key misuse and 55 | equivocation. 56 | 57 | ## Usage Instructions 58 | ```bash 59 | # playbooks/inject_keys.yaml 60 | --- 61 | - name: Inject keys into Polkadot nodes 62 | hosts: polkadot 63 | gather_facts: false 64 | tasks: 65 | - name: Inject keys 66 | ansible.builtin.include_role: 67 | name: paritytech.chain.key_inject 68 | ``` 69 | ```bash 70 | # group_vars/polkadot.yaml 71 | subkey_path: "https://releases.parity.io/substrate/x86_64-debian:stretch/v3.0.0/subkey/subkey" 72 | key_inject_relay_chain_rpc_port: 9944 73 | key_inject_relay_chain_key_list: 74 | - scheme: "sr25519" 75 | type: "gran" 76 | priv_key: "0xcc...9123//1//grandpa" 77 | - type: "babe" 78 | priv_key: "SECRET SEED" 79 | - type: "imon" 80 | priv_key: "SECRET SEED" 81 | - type: "para" 82 | priv_key: "SECRET SEED" 83 | - type: "asgn" 84 | priv_key: "SECRET SEED" 85 | - type: "audi" 86 | priv_key: "SECRET SEED" 87 | key_inject_check_session_key: true 88 | ``` 89 | 90 | ## Additional Resources 91 | This role supports a structured approach to key management but should only be 92 | used with a clear understanding of the security requirements and potential 93 | consequences like [slashing for equivocation](https://wiki.polkadot.network/docs/maintain-guides-avoid-slashing#equivocation). 94 | For further details on cryptographic practices in Polkadot, visit 95 | [Cryptography on Polkadot](https://wiki.polkadot.network/docs/learn-cryptography). 96 | -------------------------------------------------------------------------------- /plugins/filter/subkey.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from ansible.errors import AnsibleFilterError 5 | from ansible.module_utils._text import to_text 6 | import subprocess 7 | import json 8 | 9 | DOCUMENTATION = ''' 10 | name: subkey_inspect 11 | author: Parity Technologies 12 | version_added: "1.10.8" 13 | short_description: Inspects crypto keys using the Subkey utility 14 | description: 15 | - Filter that runs the Subkey inspect command to analyze cryptographic keys 16 | - Supports various networks and schemes 17 | - Can output public key information 18 | - Returns JSON formatted data about the key 19 | options: 20 | uri: 21 | description: The URI or key to inspect 22 | type: str 23 | required: true 24 | network: 25 | description: The network to use for the inspection 26 | type: str 27 | required: false 28 | default: '' 29 | scheme: 30 | description: The cryptographic scheme to use 31 | type: str 32 | required: false 33 | default: '' 34 | public: 35 | description: Whether to only show public key information 36 | type: bool 37 | required: false 38 | default: false 39 | ''' 40 | 41 | EXAMPLES = ''' 42 | # Basic key inspection 43 | - debug: 44 | msg: "{{ 'key_uri' | subkey_inspect }}" 45 | 46 | # Inspect with specific network 47 | - debug: 48 | msg: "{{ 'key_uri' | subkey_inspect(network='kusama') }}" 49 | 50 | # Inspect with specific scheme and show only public info 51 | - debug: 52 | msg: "{{ 'key_uri' | subkey_inspect(scheme='sr25519', public=true) }}" 53 | ''' 54 | 55 | RETURN = ''' 56 | ss58: 57 | description: The SS58 address of the key 58 | type: str 59 | sample: "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" 60 | public_key: 61 | description: The public key in hex format 62 | type: str 63 | sample: "0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" 64 | account_id: 65 | description: The account ID 66 | type: str 67 | sample: "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d" 68 | ''' 69 | 70 | 71 | def subkey_inspect(uri, network='', scheme='', public=False): 72 | """Run subkey inspect command and return output.""" 73 | uri = to_text(uri, errors='surrogate_or_strict', nonstring='simplerepr') 74 | 75 | args = [] 76 | log_uri = '' 77 | if scheme: 78 | args.extend(['--scheme', scheme]) 79 | if network: 80 | args.extend(['--network', network]) 81 | if public: 82 | args.extend(['--public']) 83 | log_uri = uri 84 | 85 | try: 86 | process = subprocess.Popen(['subkey', 'inspect', uri, "--output-type=json", *args], 87 | stdout=subprocess.PIPE, 88 | stderr=subprocess.PIPE, 89 | universal_newlines=True) 90 | except FileNotFoundError: 91 | raise AnsibleFilterError( 92 | "subkey binary is required for this filter. Please, install it on local machine: sudo curl -fSL -o " 93 | "/usr/local/bin/subkey 'https://releases.parity.io/substrate/x86_64-debian%3Astretch/v3.0.0/subkey/subkey' " 94 | "&& chmod +x /usr/local/bin/subkey" 95 | ) 96 | except Exception as e: 97 | raise AnsibleFilterError( 98 | 'Error running subkey command. \nCommand: subkey inspect %s %s \nError: %s' 99 | % (log_uri, ' '.join(args), e)) 100 | stdout, stderr = process.communicate() 101 | if process.returncode != 0: 102 | raise AnsibleFilterError('Error running subkey command. \nCommand: subkey inspect %s %s \nstdout: %s \nstderr: %s' 103 | % (log_uri, ' '.join(args), stdout, stderr)) 104 | try: 105 | output = json.loads(stdout) 106 | except Exception as e: 107 | raise AnsibleFilterError('Error parsing json:\n%s \nError: %s' % (stdout, e)) 108 | 109 | return output 110 | 111 | 112 | class FilterModule(object): 113 | """ Subkey filter """ 114 | 115 | def filters(self): 116 | return { 117 | 'subkey_inspect': subkey_inspect 118 | } 119 | -------------------------------------------------------------------------------- /roles/node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: node | Tests 3 | ansible.builtin.include_tasks: 4 | file: 100-tests.yml 5 | apply: 6 | tags: [node, node-tests] 7 | tags: [node, node-tests] 8 | 9 | - name: node | Check the systemd unit file exists 10 | ansible.builtin.stat: 11 | path: "{{ _node_unit_file }}" 12 | register: _node_systemd_unit_file_stat 13 | tags: 14 | - node 15 | - node-wipe 16 | - node-health-check 17 | - node-binary 18 | - node-memory-profiler 19 | - node-chain 20 | - node-restore-chain 21 | - node-systemd 22 | - node-restart 23 | - node-post-tasks 24 | 25 | - name: node | Prepare 26 | ansible.builtin.include_tasks: 27 | file: 200-prepare.yml 28 | apply: 29 | tags: 30 | - node 31 | - node-prepare 32 | - node-health-check 33 | - node-binary 34 | - node-memory-profiler 35 | - node-chain 36 | - node-restore-chain 37 | - node-systemd 38 | - node-restart 39 | - node-post-tasks 40 | tags: 41 | - node 42 | - node-prepare 43 | - node-health-check 44 | - node-binary 45 | - node-memory-profiler 46 | - node-chain 47 | - node-restore-chain 48 | - node-systemd 49 | - node-restart 50 | - node-post-tasks 51 | 52 | - name: node | Wipe 53 | ansible.builtin.include_tasks: 54 | file: 300-wipe.yml 55 | apply: 56 | tags: [node, node-wipe] 57 | when: node_database_wipe | bool or node_parachain_database_wipe | bool 58 | tags: [node, node-wipe] 59 | 60 | # It will only be run if the systemd service state is 'running'. 61 | # It should help to avoid the situation when we try to update a broken node. 62 | - name: node | Check the node state before deploying 63 | ansible.builtin.include_tasks: 64 | file: "001-health-check.yml" 65 | apply: 66 | tags: [node, node-health-check] 67 | vars: 68 | _node_pre_check: true 69 | tags: [node, node-health-check, node-restore-chain] 70 | 71 | - name: node | Binary 72 | ansible.builtin.include_tasks: 73 | file: 400-binary.yml 74 | apply: 75 | tags: [node, node-binary] 76 | when: node_binary_deployment | bool 77 | tags: [node, node-binary] 78 | 79 | - name: node | Memory profiler 80 | ansible.builtin.include_tasks: 81 | file: 500-memory-profiler.yml 82 | apply: 83 | tags: [node, node-memory-profiler] 84 | when: node_memory_profiler_enable | bool 85 | tags: [node, node-memory-profiler] 86 | 87 | - name: node | Chain 88 | ansible.builtin.include_tasks: 89 | file: 600-chain.yml 90 | apply: 91 | tags: [node, node-chain] 92 | when: node_chain_deployment | bool 93 | tags: [node, node-chain] 94 | 95 | - name: node | Get chain IDs 96 | ansible.builtin.include_tasks: 97 | file: 700-get-chainid.yml 98 | apply: 99 | tags: [node, node-restore-chain] 100 | when: _node_restore_relaychain or _node_restore_parachain 101 | tags: [node, node-restore-chain] 102 | 103 | - name: node | Restore chain 104 | ansible.builtin.include_tasks: 105 | file: 800-restore-chain.yml 106 | apply: 107 | tags: [node, node-restore-chain] 108 | loop: "{{ _node_restore_list }}" 109 | when: _node_restore_relaychain or _node_restore_parachain 110 | tags: [node, node-restore-chain] 111 | 112 | - name: node | Systemd 113 | ansible.builtin.include_tasks: 114 | file: 900-systemd.yml 115 | apply: 116 | tags: [node, node-systemd] 117 | when: node_systemd_deployment | bool 118 | tags: [node, node-systemd] 119 | 120 | - name: node | Restart 121 | ansible.builtin.include_tasks: 122 | file: "002-restart.yml" 123 | apply: 124 | tags: [node, node-restart] 125 | when: node_start_service | bool and node_force_restart | bool 126 | tags: [node, node-restart] 127 | 128 | - name: node | Post tasks 129 | ansible.builtin.include_tasks: 130 | file: 1000-post-tasks.yml 131 | apply: 132 | tags: 133 | - node 134 | - node-prepare 135 | - node-health-check 136 | - node-binary 137 | - node-memory-profiler 138 | - node-chain 139 | - node-restore-chain 140 | - node-systemd 141 | - node-restart 142 | - node-post-tasks 143 | tags: 144 | - node 145 | - node-prepare 146 | - node-health-check 147 | - node-binary 148 | - node-memory-profiler 149 | - node-chain 150 | - node-restore-chain 151 | - node-systemd 152 | - node-restart 153 | - node-post-tasks 154 | -------------------------------------------------------------------------------- /plugins/README.md: -------------------------------------------------------------------------------- 1 | # Collections Plugins Directory 2 | 3 | This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that 4 | is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that 5 | would contain module utils and modules respectively. 6 | 7 | Here is an example directory of the majority of plugins currently supported by Ansible: 8 | 9 | ``` 10 | └── plugins 11 | ├── action 12 | ├── become 13 | ├── cache 14 | ├── callback 15 | ├── cliconf 16 | ├── connection 17 | ├── filter 18 | ├── httpapi 19 | ├── inventory 20 | ├── lookup 21 | ├── module_utils 22 | ├── modules 23 | ├── netconf 24 | ├── shell 25 | ├── strategy 26 | ├── terminal 27 | ├── test 28 | └── vars 29 | ``` 30 | 31 | A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.10/plugins/plugins.html). 32 | 33 | # Filters 34 | ## subkey_inspect 35 | Small wrapper around subkey inspect command. 36 | ### Requirements 37 | Subkey binary should be installed on host machine. 38 | 39 | ```bash 40 | curl -fSL -o subkey 'https://releases.parity.io/substrate/x86_64-debian%3Astretch/v3.0.0/subkey/subkey' 41 | chmod +x subkey 42 | sudo mv subkey /usr/local/bin/subkey 43 | subkey -V 44 | ``` 45 | 46 | ### Usage 47 | ``` 48 | {{ var|infrastructure.chain_operations.subkey_inspect() }} 49 | ``` 50 | 51 | Example: 52 | ``` 53 | # ./test.yml 54 | - name: test 55 | hosts: localhost 56 | gather_facts: false 57 | vars: 58 | secretKey: "0xa021a8ab1f9a1b5dd293f56978b64531ec68db5b028197c2577417a24d4fa383//one" 59 | pubKey: "0x1e3a41ed0424929e949c531654b82baee9869bcea16d1115ca8344b637a44b10" 60 | tasks: 61 | - debug: 62 | msg: 63 | - "Print all keys: {{ secretKey | infrastructure.chain_operations.subkey_inspect }}" 64 | - "Print accountId: {{ (secretKey | infrastructure.chain_operations.subkey_inspect).accountId }}" 65 | - "Print publicKey: {{ (secretKey | infrastructure.chain_operations.subkey_inspect).publicKey }}" 66 | - "Print secretKeyUri: {{ (secretKey | infrastructure.chain_operations.subkey_inspect).secretKeyUri }}" 67 | - "Print secretSeed: {{ (secretKey | infrastructure.chain_operations.subkey_inspect).secretSeed }}" 68 | - "Print ss58Address: {{ (secretKey | infrastructure.chain_operations.subkey_inspect).ss58Address }}" 69 | # call filter with options 70 | - "Print kusama ss58Address: {{ (secretKey | infrastructure.chain_operations.subkey_inspect(network='kusama')).ss58Address }}" 71 | - "Print scheme=Ecdsa ss58Address: {{ (secretKey | infrastructure.chain_operations.subkey_inspect(scheme='Ecdsa')).ss58Address }}" 72 | - "Print public key ss58Address: {{ (pubKey | infrastructure.chain_operations.subkey_inspect(public=True)).ss58Address }}" 73 | - "Print public kusama ss58Address: {{ (pubKey | infrastructure.chain_operations.subkey_inspect(public=True,network='kusama')).ss58Address }}" 74 | 75 | 76 | # ansible-playbook ./test.yml --check 77 | **TASK [debug] ****************************************************************************************** 78 | ok: [localhost] => { 79 | "msg": [ 80 | "Print all keys: {'accountId': '0x1e3a41ed0424929e949c531654b82baee9869bcea16d1115ca8344b637a44b10', 'publicKey': '0x1e3a41ed0424929e949c531654b82baee9869bcea16d1115ca8344b637a44b10', 'secretKeyUri': '0xa021a8ab1f9a1b5dd293f56978b64531ec68db5b028197c2577417a24d4fa383//one', 'secretSeed': '0xf8e0c1c9b22a4e595c0893245c836e9ef235dfea5292e60f85e5c09f823df4cf', 'ss58Address': '5CkLbjyxrLAs8GJNwhaDLtGdhvsFWyS3N6MqSANsz4y37Moi'}", 81 | "Print accountId: 0x1e3a41ed0424929e949c531654b82baee9869bcea16d1115ca8344b637a44b10", 82 | "Print publicKey: 0x1e3a41ed0424929e949c531654b82baee9869bcea16d1115ca8344b637a44b10", 83 | "Print secretKeyUri: 0xa021a8ab1f9a1b5dd293f56978b64531ec68db5b028197c2577417a24d4fa383//one", 84 | "Print secretSeed: 0xf8e0c1c9b22a4e595c0893245c836e9ef235dfea5292e60f85e5c09f823df4cf", 85 | "Print ss58Address: 5CkLbjyxrLAs8GJNwhaDLtGdhvsFWyS3N6MqSANsz4y37Moi", 86 | "Print kusama ss58Address: DFxG4KqUhBnsv7piQPGEqddrX9VKeFDpUCappeqTsBXrN2H", 87 | "Print scheme=Ecdsa ss58Address: 5GQUR8Hx2u2KbbUvegJDnyEQGSeLZc6cPK5WWbZ814VsmNV7", 88 | "Print public key ss58Address: 5CkLbjyxrLAs8GJNwhaDLtGdhvsFWyS3N6MqSANsz4y37Moi", 89 | "Print public kusama ss58Address: DFxG4KqUhBnsv7piQPGEqddrX9VKeFDpUCappeqTsBXrN2H" 90 | ] 91 | } 92 | ``` 93 | -------------------------------------------------------------------------------- /roles/node/tasks/800-restore-chain.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restore {{ item.part }} | Check if chain folder already exists 3 | ansible.builtin.stat: 4 | path: "{{ item.chain_path }}" 5 | get_checksum: false 6 | register: _node_data_chain_path_stat 7 | 8 | - name: Restore {{ item.part }} | Check if db folder already exists 9 | ansible.builtin.stat: 10 | path: "{{ item.chain_path }}/{{ item.db_folder }}" 11 | get_checksum: false 12 | register: _node_data_chain_path_db_stat 13 | 14 | - name: Restore {{ item.part }} | Get size of db folder 15 | ansible.builtin.shell: du -cd 1 {{ item.chain_path }}/{{ item.db_folder }} | head -1 | awk '{print $1}' 16 | check_mode: false 17 | changed_when: false 18 | register: _node_data_chain_path_db_size 19 | 20 | - name: Restore {{ item.part }} | Set custom facts 1 21 | ansible.builtin.set_fact: 22 | _node_run_restore: "{{ not _node_data_chain_path_db_stat.stat.exists or _node_data_chain_path_db_size.stdout == '0' }}" 23 | # we only use the tmp_restore_path directory if we really need it (when a service is run) 24 | _node_use_tmp_restore_path: "{{ ansible_facts.services[node_app_name + '.service'] is defined and ansible_facts.services[node_app_name + '.service'].state == 25 | 'running' and node_chain_backup_tmp_restore_path != '' }}" 26 | #https://docs.ansible.com/ansible/latest/user_guide/complex_data_manipulation.html#find-mount-point 27 | # List can be empty if a mounted device doesn't start with `/` (exotic FS like docker overlay, bug https://github.com/ansible/ansible/issues/24644). 28 | # Be careful, if you use these FSs, the free space will not be checked. 29 | _node_run_check_size_mounts: "{{ ansible_mounts | selectattr('mount', 'in', _node_data_root_path) | list | sort(attribute='mount') }}" 30 | 31 | - name: Restore {{ item.part }} | Set custom facts 2 32 | ansible.builtin.set_fact: 33 | # We don't need to calculate free space if we sync a backup to an existing DB 34 | # Because we can't know the required amount of free space before syncing 35 | _node_run_check_size: "{{ not (_node_data_chain_path_db_stat.stat.exists and _node_data_chain_path_db_size.stdout != '0' and not _node_use_tmp_restore_path) and 36 | _node_run_check_size_mounts | length > 0 }}" 37 | _node_backup_dl_path: "{{ node_chain_backup_tmp_restore_path if _node_use_tmp_restore_path else item.chain_path + '/' + item.db_folder }}" 38 | 39 | - name: Run {{ item.part }} restoring 40 | when: _node_run_restore 41 | block: 42 | # A previous run can be stopped unexpectedly. 43 | # We have to remove the temp directory to calculate the right amount of free space. 44 | - name: Restore {{ item.part }} | Delete temporary folder 45 | ansible.builtin.file: 46 | path: "{{ node_chain_backup_tmp_restore_path }}" 47 | state: absent 48 | when: _node_use_tmp_restore_path 49 | 50 | # It doesn't really matter what directory it is, temporary or not. 51 | # Anyway, we really need an existing directory to allow sync utilities to be run 52 | - name: Restore {{ item.part }} | Make sure download path exists 53 | ansible.builtin.file: 54 | path: "{{ _node_backup_dl_path }}" 55 | state: directory 56 | owner: "{{ node_user }}" 57 | group: "{{ node_user }}" 58 | mode: "0755" 59 | 60 | - name: Restore {{ item.part }} | Check free space in '_node_data_root_path' 61 | ansible.builtin.set_fact: 62 | _node_restore_free_space: "{{ _node_run_check_size_mounts[-1]['size_available'] }}" 63 | when: _node_run_check_size 64 | 65 | - name: Restore {{ item.part }} | Print free space in '_node_data_root_path' 66 | ansible.builtin.debug: 67 | msg: Free space at destination = {{ _node_restore_free_space | filesizeformat(true) }} 68 | when: _node_run_check_size 69 | 70 | - name: Restore {{ item.part }} | Tar restoring 71 | ansible.builtin.include_tasks: 72 | file: 801-restore-chain-tar.yml 73 | apply: 74 | tags: [node, node-restore-chain] 75 | when: item.restoring_type == 'tar' 76 | 77 | - name: Restore {{ item.part }} | HTTP restoring 78 | ansible.builtin.include_tasks: 79 | file: 803-restore-chain-http.yml 80 | apply: 81 | tags: [node, node-restore-chain] 82 | when: item.restoring_type == 'http' 83 | 84 | - name: Restore {{ item.part }} | Check if data_root folder already exists 85 | ansible.builtin.stat: 86 | path: "{{ _node_data_root_path }}" 87 | get_checksum: false 88 | register: _node_data_root_path_stat 89 | when: ansible_check_mode 90 | 91 | - name: Restore {{ item.part }} | Recursively change ownership of the '_node_data_root_path' directory 92 | ansible.builtin.file: 93 | path: "{{ _node_data_root_path }}" 94 | state: directory 95 | recurse: true 96 | owner: "{{ node_user }}" 97 | group: "{{ node_user }}" 98 | ignore_errors: "{{ ansible_check_mode and not _node_data_root_path_stat.stat.exists }}" 99 | -------------------------------------------------------------------------------- /roles/node/tasks/600-chain.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Systemd | Create directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | mode: "0755" 7 | owner: "{{ node_user }}" 8 | group: "{{ node_user }}" 9 | loop: 10 | - "{{ _node_chainspec_file | dirname }}" 11 | - "{{ _node_wasm_runtime_base_path }}" 12 | - "{{ _node_p2p_key_file | dirname }}" 13 | register: _node_create_directories_register 14 | 15 | - name: Chain | Copy {{ node_app_name }} chainspec files 16 | ansible.builtin.template: 17 | src: "{{ node_chainspec }}" 18 | owner: "{{ node_user }}" 19 | group: "{{ node_user }}" 20 | dest: "{{ _node_chainspec_file }}" 21 | mode: "0644" 22 | notify: restart service {{ node_handler_id }} 23 | ignore_errors: "{{ _node_create_directories_register.changed }}" 24 | when: node_chainspec != '' and not node_chainspec.startswith('http') 25 | 26 | - name: Chain | Download {{ node_app_name }} chainspec files 27 | ansible.builtin.get_url: 28 | url: "{{ node_chainspec }}" 29 | owner: "{{ node_user }}" 30 | group: "{{ node_user }}" 31 | dest: "{{ _node_chainspec_file }}" 32 | mode: "0644" 33 | force: true 34 | notify: restart service {{ node_handler_id }} 35 | when: 36 | - node_chainspec != '' 37 | - node_chainspec.startswith('http') 38 | - not ansible_check_mode # (Bug ansible/ansible#65687) 39 | 40 | - name: Chain | Copy {{ node_app_name }} parachain chainspec file 41 | ansible.builtin.template: 42 | src: "{{ node_parachain_chainspec }}" 43 | owner: "{{ node_user }}" 44 | group: "{{ node_user }}" 45 | dest: "{{ _node_parachain_chainspec_file }}" 46 | mode: "0644" 47 | notify: restart service {{ node_handler_id }} 48 | ignore_errors: "{{ _node_create_directories_register.changed }}" 49 | when: node_parachain_role != '' and node_parachain_chainspec != '' and not node_parachain_chainspec.startswith('http') 50 | 51 | - name: Chain | Download {{ node_app_name }} parachain chainspec file 52 | ansible.builtin.get_url: 53 | url: "{{ node_parachain_chainspec }}" 54 | owner: "{{ node_user }}" 55 | group: "{{ node_user }}" 56 | dest: "{{ _node_parachain_chainspec_file }}" 57 | mode: "0644" 58 | force: true 59 | notify: restart service {{ node_handler_id }} 60 | when: 61 | - node_parachain_role != '' 62 | - node_parachain_chainspec != '' 63 | - node_parachain_chainspec.startswith('http') 64 | - not ansible_check_mode # (Bug ansible/ansible#65687) 65 | 66 | - name: Chain | Download {{ node_app_name }} wasm runtime file 67 | ansible.builtin.get_url: 68 | url: "{{ node_wasm_runtime }}" 69 | owner: "{{ node_user }}" 70 | group: "{{ node_user }}" 71 | dest: "{{ _node_wasm_runtime_base_path }}/relaychain.wasm" 72 | mode: "0644" 73 | notify: restart service {{ node_handler_id }} 74 | ignore_errors: "{{ _node_create_directories_register.changed }}" 75 | when: node_wasm_runtime != '' 76 | 77 | - name: Chain | Download {{ node_app_name }} parachain wasm runtime file 78 | ansible.builtin.get_url: 79 | url: "{{ node_parachain_wasm_runtime }}" 80 | owner: "{{ node_user }}" 81 | group: "{{ node_user }}" 82 | dest: "{{ _node_wasm_runtime_base_path }}/parachain.wasm" 83 | mode: "0644" 84 | notify: restart service {{ node_handler_id }} 85 | ignore_errors: "{{ _node_create_directories_register.changed }}" 86 | when: node_parachain_role != '' and node_parachain_wasm_runtime != '' 87 | 88 | - name: Chain | Find unmanaged {{ node_app_name }} wasm runtime file 89 | ansible.builtin.find: 90 | paths: "{{ _node_wasm_runtime_base_path }}" 91 | patterns: ^((?!relaychain.wasm|parachain.wasm).)*$ 92 | use_regex: true 93 | register: _node_unmanaged_wasm_runtime_files 94 | 95 | - name: Chain | Delete unmanaged {{ node_app_name }} wasm runtime file 96 | ansible.builtin.file: 97 | path: "{{ item.path }}" 98 | state: absent 99 | loop: "{{ _node_unmanaged_wasm_runtime_files.files }}" 100 | notify: restart service {{ node_handler_id }} 101 | 102 | - name: Chain | Copy {{ node_app_name }} p2p key file 103 | ansible.builtin.copy: 104 | content: "{{ node_p2p_private_key }}" 105 | owner: "{{ node_user }}" 106 | group: "{{ node_user }}" 107 | dest: "{{ _node_p2p_key_file }}" 108 | mode: "0600" 109 | notify: restart service {{ node_handler_id }} 110 | ignore_errors: "{{ _node_create_directories_register.changed }}" 111 | when: node_p2p_private_key != '' 112 | 113 | - name: Chain | Copy {{ node_app_name }} parachain p2p key file 114 | ansible.builtin.copy: 115 | content: "{{ node_parachain_p2p_private_key }}" 116 | owner: "{{ node_user }}" 117 | group: "{{ node_user }}" 118 | dest: "{{ _node_parachain_p2p_key_file }}" 119 | mode: "0600" 120 | notify: restart service {{ node_handler_id }} 121 | ignore_errors: "{{ _node_create_directories_register.changed }}" 122 | when: node_parachain_role != '' and node_parachain_p2p_private_key != '' 123 | -------------------------------------------------------------------------------- /roles/node/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ##################################################################################### 4 | # Common 5 | ##################################################################################### 6 | 7 | # https://semver.org/ 8 | _node_semver_regex: ^.*((?:0|(?:[1-9]\d*))\.(?:0|(?:[1-9]\d*))\.(?:0|(?:[1-9]\d*))(?:-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?).*$ 9 | _node_binary_version_from_url: "{{ (node_binary | regex_search(_node_semver_regex, '\\1'))[0] }}" 10 | 11 | # Name of the binary or absolute path to it 12 | _node_binary_gpg_binary: gpg 13 | 14 | _node_data_root_path: "{% if node_data_root_path != '' -%} {{ node_data_root_path }} {%- else -%} {{ _node_user_home_path }}/.local/share/polkadot {%- endif %}" 15 | _node_memory_profiler_log_path: "{% if node_memory_profiler_log_path != '' -%} {{ node_memory_profiler_log_path }} {%- else -%} {{ _node_user_home_path }}/logs {%- 16 | endif %}" 17 | _node_binary_path: "{{ _node_user_home_path }}/bin/{{ node_app_name }}" 18 | _node_main_binary_file_name: node 19 | _node_memory_profiler_binary_file: "{{ _node_binary_path }}/libbytehound.so" 20 | _node_wasm_runtime_base_path: "{{ _node_user_home_path }}/wasm_runtime/{{ node_app_name }}" 21 | _node_unit_file: /etc/systemd/system/{{ node_app_name }}.service 22 | 23 | _node_chain_backup_http_rclone_deb: https://downloads.rclone.org/v1.63.1/rclone-v1.63.1-linux-amd64.deb 24 | 25 | _node_profiles: 26 | validator: 27 | in_peers: "25" 28 | out_peers: "25" 29 | memory_high: 7900M 30 | memory_max: 8000M 31 | boot: 32 | in_peers: "25" 33 | out_peers: "25" 34 | memory_high: 10400M 35 | memory_max: 10500M 36 | rpc: 37 | in_peers: "25" 38 | out_peers: "25" 39 | memory_high: 5400M 40 | memory_max: 5500M 41 | full: 42 | in_peers: "25" 43 | out_peers: "25" 44 | memory_high: 5400M 45 | memory_max: 5500M 46 | 47 | _node_restore_list: " {%- if _node_restore_relaychain and not _node_restore_parachain -%} {{ [_node_chain_backup_data] }} {%- elif not _node_restore_relaychain and 48 | _node_restore_parachain -%} {{ [_node_parachain_chain_backup_data] }} {%- elif _node_restore_relaychain and _node_restore_parachain -%} {{ [_node_chain_backup_data, 49 | _node_parachain_chain_backup_data] }} {%- else -%} {{ [] }} {%- endif %}" 50 | 51 | ##################################################################################### 52 | # Relaychain 53 | ##################################################################################### 54 | 55 | _node_data_chain_path: "{{ _node_data_root_path }}{% if node_parachain_role != '' %}/polkadot{% endif %}/chains/{{ _node_chain_id }}" 56 | _node_p2p_key_file: "{{ _node_user_home_path }}/keys/{{ node_app_name }}_relaychain_p2p_key" 57 | _node_chainspec_file: "{{ _node_user_home_path }}/chainspecs/{{ node_app_name }}_relaychain_chainspec.json" 58 | 59 | _node_restore_relaychain: "{{ (node_chain_backup_restoring_type == 'http' and ( node_chain_backup_http_base_url != '' or node_chain_backup_http_url != '')) or (node_chain_backup_restoring_type 60 | == 'tar' and node_chain_backup_url != '') }}" 61 | _node_chain_backup_data: 62 | part: relaychain 63 | restoring_type: "{{ node_chain_backup_restoring_type }}" 64 | chain_path: "{{ _node_data_chain_path }}" 65 | db_folder: "{{ 'paritydb' if node_paritydb_enable else 'db' }}" 66 | tar_url: "{{ node_chain_backup_url }}" 67 | http_url: "{{ node_chain_backup_http_base_url + '/' + node_chain + ('-paritydb' if node_paritydb_enable else '-rocksdb') + ('-prune' if node_pruning > 0 else '-archive') 68 | }}" 69 | custom_http_url: "{{ node_chain_backup_http_url }}" 70 | 71 | ##################################################################################### 72 | # Parachain 73 | ##################################################################################### 74 | 75 | _node_parachain_data_chain_path: "{{ _node_data_root_path }}/chains/{{ _node_parachain_chain_id }}" 76 | _node_parachain_p2p_key_file: "{{ _node_user_home_path }}/keys/{{ node_app_name }}_parachain_p2p_key" 77 | _node_parachain_chainspec_file: "{{ _node_user_home_path }}/chainspecs/{{ node_app_name }}_parachain_chainspec.json" 78 | 79 | _node_restore_parachain: "{{ node_parachain_role != '' and ((node_parachain_chain_backup_restoring_type == 'http' and (node_parachain_chain_backup_http_base_url != 80 | '' or node_parachain_chain_backup_http_url != '' )) or (node_parachain_chain_backup_restoring_type == 'tar' and node_parachain_chain_backup_url != '')) }}" 81 | _node_parachain_chain_backup_data: 82 | part: parachain 83 | restoring_type: "{{ node_parachain_chain_backup_restoring_type }}" 84 | chain_path: "{{ _node_parachain_data_chain_path }}" 85 | db_folder: "{{ 'paritydb' if node_parachain_paritydb_enable else 'db' }}" 86 | tar_url: "{{ node_parachain_chain_backup_url }}" 87 | http_url: "{{ node_parachain_chain_backup_http_base_url + '/' + node_parachain_chain + ('-paritydb' if node_parachain_paritydb_enable else '-rocksdb') + ('-prune' 88 | if node_parachain_pruning > 0 else '-archive') }}" 89 | custom_http_url: "{{ node_parachain_chain_backup_http_url }}" 90 | -------------------------------------------------------------------------------- /roles/node/tasks/803-restore-chain-http.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restore {{ item.part }} | HTTP restoring | Install rclone 3 | ansible.builtin.apt: 4 | deb: "{{ _node_chain_backup_http_rclone_deb }}" 5 | when: node_chain_backup_http_install_rclone | bool 6 | 7 | - name: Restore {{ item.part }} | HTTP restoring | Check last version 8 | ansible.builtin.uri: 9 | url: "{{ item.http_url | trim | trim('/') }}/latest_version.meta.txt" 10 | method: GET 11 | return_content: true 12 | use_proxy: false 13 | register: _node_chain_backup_last_version_register 14 | until: _node_chain_backup_last_version_register.status is defined and _node_chain_backup_last_version_register.status == 200 15 | retries: 3 16 | delay: 10 17 | check_mode: false 18 | changed_when: false 19 | when: item.custom_http_url == '' 20 | 21 | - name: Restore {{ item.part }} | HTTP restoring | Setup _node_chain_backup_http_full_url 1 22 | ansible.builtin.set_fact: 23 | _node_chain_backup_http_full_url: "{% if item.custom_http_url == '' %} {{ item.http_url }}/{{ _node_chain_backup_last_version_register.content }} {% else %}{{ 24 | item.custom_http_url }}{% endif %}" 25 | 26 | - name: Restore {{ item.part }} | HTTP restoring | Setup _node_chain_backup_http_full_url 2 27 | ansible.builtin.set_fact: 28 | _node_chain_backup_http_full_url: "{{ _node_chain_backup_http_full_url | regex_replace('[\\s]+', '') | trim('/') }}" 29 | 30 | - name: Restore {{ item.part }} | HTTP restoring | Print backup url 31 | ansible.builtin.debug: 32 | msg: "{{ _node_chain_backup_http_full_url }}" 33 | 34 | - name: Restore {{ item.part }} | HTTP restoring | Print backup meta url 35 | ansible.builtin.debug: 36 | msg: "{{ _node_chain_backup_http_full_url }}.meta.txt" 37 | 38 | - name: Restore {{ item.part }} | HTTP restoring | Check the size of the backup 39 | ansible.builtin.uri: 40 | url: "{{ _node_chain_backup_http_full_url }}.meta.txt" 41 | method: GET 42 | return_content: true 43 | use_proxy: false 44 | register: _node_chain_restore_backup_size_register 45 | until: _node_chain_restore_backup_size_register.status is defined and _node_chain_restore_backup_size_register.status == 200 46 | retries: 3 47 | delay: 10 48 | check_mode: false 49 | changed_when: false 50 | when: _node_run_check_size 51 | 52 | - name: Restore {{ item.part }} | HTTP restoring | Print backup size 53 | ansible.builtin.debug: 54 | msg: Backup size = {{ (_node_chain_restore_backup_size_register.content | from_yaml).size | int | filesizeformat(true) }} 55 | when: _node_run_check_size 56 | 57 | - name: Restore {{ item.part }} | HTTP restoring | Fail if free space <500MB 58 | ansible.builtin.fail: 59 | msg: | 60 | Not enough free space to perform the restore, you should set the node_data_root_path variable 61 | to a path on a different drive with enough free space 62 | when: 63 | - _node_run_check_size 64 | - _node_restore_free_space | int - (_node_chain_restore_backup_size_register.content | from_yaml).size | int < 500 * 1024 * 1024 65 | # Skipped during check mode because it doesn't actually delete the db, making the result of this task wrong 66 | - not ansible_check_mode 67 | 68 | - name: Restore {{ item.part }} | HTTP restoring | Download the files.txt meta file 69 | ansible.builtin.get_url: 70 | url: "{{ _node_chain_backup_http_full_url }}/files.txt" 71 | dest: "{{ _node_temp_dir.path }}/{{ item.part }}-files.txt" 72 | mode: "0655" 73 | owner: root 74 | group: root 75 | timeout: 30 76 | check_mode: false 77 | changed_when: false 78 | 79 | - name: Restore {{ item.part }} | HTTP restoring | Stop service 80 | ansible.builtin.systemd: 81 | name: "{{ node_app_name }}" 82 | state: stopped 83 | notify: restart service {{ node_handler_id }} 84 | ignore_errors: "{{ not _node_systemd_unit_file_stat.stat.exists }}" 85 | when: not _node_use_tmp_restore_path 86 | 87 | - name: Restore {{ item.part }} | HTTP restoring | Download chain backup 88 | ansible.builtin.command: | 89 | rclone copy -v --contimeout=1m --retries 6 --retries-sleep 10 --error-on-no-transfer --inplace --no-gzip-encoding 90 | --disable-http2 --http-no-head --no-traverse --size-only --transfers={{ ansible_processor_vcpus * 5 }} 91 | --http-url {{ _node_chain_backup_http_full_url }} :http: 92 | {{ _node_backup_dl_path | quote }} --files-from-raw {{ _node_temp_dir.path }}/{{ item.part }}-files.txt 93 | changed_when: true 94 | notify: restart service {{ node_handler_id }} 95 | 96 | - name: Restore {{ item.part }} | HTTP restoring | Manage node_chain_backup_tmp_restore_path 97 | when: _node_use_tmp_restore_path 98 | block: 99 | - name: Restore {{ item.part }} | GCP restoring | Stop service and cleanup DB 100 | ansible.builtin.include_tasks: includes/_delete_db_folder.yml 101 | 102 | - name: Restore {{ item.part }} | HTTP restoring | Copy backup from temporary folder 103 | ansible.builtin.copy: 104 | src: "{{ node_chain_backup_tmp_restore_path }}/" 105 | dest: "{{ item.chain_path }}/{{ item.db_folder }}" 106 | owner: "{{ node_user }}" 107 | group: "{{ node_user }}" 108 | remote_src: true 109 | mode: "0755" 110 | 111 | - name: Restore {{ item.part }} | HTTP restoring | Delete temporary folder 112 | ansible.builtin.file: 113 | path: "{{ node_chain_backup_tmp_restore_path }}" 114 | state: absent 115 | -------------------------------------------------------------------------------- /roles/node/templates/env.j2: -------------------------------------------------------------------------------- 1 | NAME="{{ node_app_name }}" 2 | 3 | COMMON="\ 4 | --base-path {{ _node_data_root_path }} 5 | {%- if node_enable_detailed_log_output %} \ 6 | --detailed-log-output 7 | {%- endif %}" 8 | 9 | {% if ( node_parachain_relay_chain_rpc_urls | length ) == 0 %} 10 | RC_NAME="{% if node_parachain_has_name_fix %}--name '{{ node_public_name }}'{% endif %}" 11 | 12 | RC_KEY=" 13 | {%- if node_p2p_private_key != '' %} 14 | --node-key-file {{ _node_p2p_key_file }} 15 | {%- endif %}" 16 | 17 | RC_ROLE_SPECIFIC="\ 18 | {% if node_role == 'validator' %} 19 | --validator 20 | {%- elif node_role == 'rpc' %} 21 | {% if node_legacy_rpc_flags %} 22 | --unsafe-ws-external \ 23 | {% else %} 24 | --unsafe-rpc-external \ 25 | {% endif %} 26 | --rpc-methods Safe \ 27 | --rpc-cors '*' 28 | {%- elif node_role == 'boot' %} 29 | --listen-addr=/ip4/{{ node_p2p_bind_addr }}/tcp/{{ node_p2p_ws_port }}/ws 30 | {%- elif node_role == 'full' %} 31 | {%- endif %}" 32 | 33 | RC_CHAIN=" 34 | {%- if node_chainspec != '' %} 35 | --chain {{ _node_chainspec_file }} 36 | {%- else %} 37 | --chain {{ node_chain }} 38 | {%- endif %}" 39 | 40 | RC_ADDR="\ 41 | --listen-addr=/ip4/{{ node_p2p_bind_addr }}/tcp/{{ node_p2p_port }} 42 | {%- if node_enable_public_ip_detection %} \ 43 | --public-addr=/ip4/{{ ipify_public_ip }}/tcp/{{ node_p2p_public_port }} 44 | {%- endif %}" 45 | 46 | RC_CONNECTIONS="--in-peers {{ node_in_peers }} --out-peers {{ node_out_peers }}" 47 | 48 | RC_DB="\ 49 | {% if node_paritydb_enable %} 50 | --database paritydb \ 51 | {% endif %} 52 | {% if node_db_cache != '' %} 53 | --db-cache {{ node_db_cache }} 54 | {% endif %}" 55 | 56 | RC_TELEMETRY=" 57 | {%- if not node_telemetry_enable %} 58 | --no-telemetry 59 | {%- else %} 60 | {%- if node_telemetry_url != '' %} 61 | --telemetry-url '{{ node_telemetry_url }}' 62 | {%- endif %} 63 | {%- endif %}" 64 | 65 | RC_PRUNING=" 66 | {%- if node_pruning > 0 %} 67 | --state-pruning={{ node_pruning }} 68 | {%- else %} 69 | --state-pruning=archive 70 | {%- endif %}" 71 | 72 | RC_LOGS=" 73 | {%- if node_log_trace_enable %} 74 | -l{{ node_log_trace_config }} 75 | {%- elif node_log_debug_enable %} 76 | -l{{ node_log_debug_config }} 77 | {%- endif %}" 78 | 79 | RC_METRICS="\ 80 | {% if node_prometheus_external_enable %} 81 | --prometheus-external \ 82 | {% endif %} 83 | --prometheus-port {{ node_prometheus_port }}" 84 | 85 | RC_WS="\ 86 | {% if node_legacy_rpc_flags %} 87 | --ws-port {{ node_rpc_ws_port }} \ 88 | --ws-max-connections {{ node_ws_max_connections }} 89 | {%- else %} 90 | --rpc-max-connections {{ node_ws_max_connections }} 91 | {%- endif %}" 92 | 93 | RC_RPC="--rpc-port {{ node_rpc_port }}" 94 | 95 | RC_WASM_RUNTIME=" 96 | {%- if node_wasm_runtime != '' %} 97 | --wasm-runtime-overrides {{ _node_wasm_runtime_base_path }} 98 | {%- endif %}" 99 | 100 | RC_CUSTOM_OPTIONS="\ 101 | {% for option in node_custom_options %} 102 | {{ option }}{% if not loop.last %} \ 103 | {% endif %} 104 | {%- endfor %}" 105 | {%- endif %} 106 | 107 | {% if node_log_trace_enable or node_parachain_log_trace_enable %} 108 | RUST_BACKTRACE=1 109 | {%- endif %} 110 | 111 | {% if node_parachain_role != '' %} 112 | PC_NAME="--name {{ node_parachain_public_name }}" 113 | 114 | PC_KEY=" 115 | {%- if node_parachain_p2p_private_key != '' %} 116 | --node-key-file {{ _node_parachain_p2p_key_file }} 117 | {%- endif %}" 118 | 119 | PC_ROLE_SPECIFIC="\ 120 | {% if node_parachain_role == 'collator' %} 121 | --collator 122 | {%- elif node_parachain_role == 'validator' %} 123 | --validator 124 | {%- elif node_parachain_role == 'rpc' %} 125 | {% if node_legacy_rpc_flags %} 126 | --unsafe-ws-external \ 127 | {% else %} 128 | --unsafe-rpc-external \ 129 | {% endif %} 130 | --rpc-methods Safe \ 131 | --rpc-cors '*' 132 | {%- elif node_parachain_role == 'full' %} 133 | {%- endif %}" 134 | 135 | PC_CHAIN=" 136 | {%- if node_parachain_chainspec != '' %} 137 | --chain {{ _node_parachain_chainspec_file }} 138 | {%- else %} 139 | --chain {{ node_parachain_chain }} 140 | {%- endif %}" 141 | 142 | PC_REMOTE_RC_URLS=" 143 | {%- if ( node_parachain_relay_chain_rpc_urls | length ) != 0 %} 144 | --relay-chain-rpc-urls {% for url in node_parachain_relay_chain_rpc_urls %}'{{ url }}'{% if not loop.last %} {% endif %}{% endfor %} 145 | {%- endif %}" 146 | 147 | 148 | PC_ADDR="\ 149 | --listen-addr=/ip4/{{ node_parachain_p2p_bind_addr }}/tcp/{{ node_parachain_p2p_port }} 150 | {%- if node_enable_public_ip_detection %} \ 151 | --public-addr=/ip4/{{ ipify_public_ip }}/tcp/{{ node_parachain_p2p_public_port }} 152 | {%- endif %}" 153 | 154 | PC_CONNECTIONS="--in-peers {{ node_parachain_in_peers }} --out-peers {{ node_parachain_out_peers }}" 155 | 156 | PC_DB="\ 157 | {% if node_parachain_paritydb_enable %} 158 | --database paritydb \ 159 | {% endif %} 160 | {% if node_parachain_db_cache != '' %} 161 | --db-cache {{ node_parachain_db_cache }} 162 | {% endif %}" 163 | 164 | PC_TELEMETRY=" 165 | {%- if not node_parachain_telemetry_enable %} 166 | --no-telemetry 167 | {%- else %} 168 | {%- if node_parachain_telemetry_url != '' %} 169 | --telemetry-url '{{ node_parachain_telemetry_url }}' 170 | {%- endif %} 171 | {%- endif %}" 172 | 173 | PC_PRUNING=" 174 | {%- if node_parachain_pruning > 0 %} 175 | --state-pruning={{ node_parachain_pruning }} 176 | {%- else %} 177 | --state-pruning=archive 178 | {%- endif %}" 179 | 180 | PC_LOGS=" 181 | {%- if node_parachain_log_trace_enable %} 182 | -l{{ node_parachain_log_trace_config }} 183 | {%- endif %}" 184 | 185 | PC_METRICS="\ 186 | {% if node_parachain_prometheus_external_enable %} 187 | --prometheus-external \ 188 | {% endif %} 189 | --prometheus-port {{ node_parachain_prometheus_port }}" 190 | 191 | PC_WS="\ 192 | {% if node_legacy_rpc_flags %} 193 | --ws-port {{ node_parachain_rpc_ws_port }} \ 194 | --ws-max-connections {{ node_parachain_ws_max_connections }} 195 | {%- else %} 196 | --rpc-max-connections {{ node_parachain_ws_max_connections }} 197 | {%- endif %}" 198 | 199 | PC_RPC="--rpc-port {{ node_parachain_rpc_port }}" 200 | 201 | PC_WASM_RUNTIME=" 202 | {%- if node_parachain_wasm_runtime != '' %} 203 | --wasm-runtime-overrides {{ _node_wasm_runtime_base_path }} 204 | {%- endif %}" 205 | 206 | PC_CUSTOM_OPTIONS="\ 207 | {% for option in node_parachain_custom_options %} 208 | {{ option }}{% if not loop.last %} \ 209 | {% endif %} 210 | {%- endfor %}" 211 | {% endif %} 212 | 213 | {% if node_memory_profiler_enable %} 214 | MEMORY_PROFILER_OUTPUT="{{ _node_memory_profiler_log_path }}/profiling_%e_%t_%p.dat" 215 | MEMORY_PROFILER_LOGFILE="{{ _node_memory_profiler_log_path }}/profiling_%e_%t_%p.txt" 216 | MEMORY_PROFILER_LOG="{{ node_memory_profiler_log_level }}" 217 | MEMORY_PROFILER_CULL_TEMPORARY_ALLOCATIONS="1" 218 | MEMORY_PROFILER_TEMPORARY_ALLOCATION_LIFETIME_THRESHOLD="{{ node_memory_profiler_temporary_allocation_lifetime_threshold }}" 219 | LD_PRELOAD="{{ _node_memory_profiler_binary_file }}" 220 | {% endif %} 221 | -------------------------------------------------------------------------------- /roles/state_exporter/files/exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import schedule 5 | import time 6 | import sys 7 | import os 8 | import logging 9 | import traceback 10 | from prometheus_client import start_http_server, Gauge 11 | import psutil 12 | 13 | LOGGING_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' 14 | 15 | node_chain_folders = { 16 | 'polkadot': 'polkadot', 17 | 'kusama': 'ksmcc3', 18 | 'westend': 'westend2', 19 | 'rococo': 'rococo_v1_12' 20 | } 21 | 22 | process_metrics = { 23 | 'polkadot_state_process_cmdline': Gauge( 24 | 'polkadot_state_process_cmdline', 25 | 'cmdline of a node process', 26 | ['name', 'pid', 'cmd_line']), 27 | 'polkadot_state_process_threads': Gauge( 28 | 'polkadot_state_process_threads', 29 | 'number threads of a node process', 30 | ['name', 'pid']), 31 | 'polkadot_state_process_memory': Gauge( 32 | 'polkadot_state_process_memory', 33 | 'memory is used by a node process', 34 | ['name', 'pid']), 35 | 'polkadot_state_process_cpu_percent': Gauge( 36 | 'polkadot_state_process_cpu_percent', 37 | 'memory is used by a node process', 38 | ['name', 'pid']) 39 | } 40 | 41 | node_metrics = { 42 | 'polkadot_state_node_session_key': Gauge( 43 | 'polkadot_state_node_session_key', 44 | 'session key of a node', 45 | ['name', 'pid', 'session_key']) 46 | } 47 | 48 | PORT = 9110 49 | 50 | 51 | def update_metrics(): 52 | processes = {} 53 | 54 | for proc in psutil.process_iter(): 55 | try: 56 | process_cmdline = proc.cmdline() 57 | if not (len(process_cmdline) > 1 and '--name' in process_cmdline and '--chain' in process_cmdline): 58 | continue 59 | process_chain = process_cmdline[::-1][process_cmdline[::-1].index('--chain') - 1] 60 | process_name = process_cmdline[::-1][process_cmdline[::-1].index('--name') - 1] 61 | process_pid = proc.pid 62 | process_base_path = process_cmdline[::-1][process_cmdline[::-1].index('--base-path') - 1]\ 63 | if '--base-path' in process_cmdline else None 64 | # It will delete the previous process if 65 | # it's the parent of the current process (it can be docker, bash, etc.) 66 | if process_name in processes and processes[process_name]['pid'] < process_pid: 67 | del processes[process_name] 68 | processes[process_name] = {'pid': process_pid, 69 | 'chain': process_chain, 70 | 'cmd_line': ' '.join(process_cmdline[1:]), 71 | 'threads': proc.num_threads(), 72 | 'memory': proc.memory_info().rss, 73 | 'cpu_percent': proc.cpu_percent(), 74 | 'base_path': process_base_path 75 | } 76 | except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): 77 | pass 78 | except Exception as e: 79 | logger.error(e) 80 | logger.error(traceback.print_tb(e.__traceback__)) 81 | return 82 | logger.debug('processes were found: ' + str(processes)) 83 | 84 | try: 85 | # wipe metrics 86 | for metric in {**process_metrics, **node_metrics}.items(): 87 | for sample in metric[1].collect()[0].samples: 88 | metric[1].remove(*list(sample.labels.values())) 89 | 90 | for proc in processes: 91 | process_metrics['polkadot_state_process_cmdline'].labels( 92 | name=proc, 93 | pid=processes[proc]['pid'], 94 | cmd_line=processes[proc]['cmd_line']).set(1) 95 | process_metrics['polkadot_state_process_threads'].labels( 96 | name=proc, 97 | pid=processes[proc]['pid']).set(processes[proc]['threads']) 98 | process_metrics['polkadot_state_process_memory'].labels( 99 | name=proc, 100 | pid=processes[proc]['pid']).set(processes[proc]['memory']) 101 | process_metrics['polkadot_state_process_cpu_percent'].labels( 102 | name=proc, 103 | pid=processes[proc]['pid']).set(processes[proc]['cpu_percent']) 104 | if processes[proc]['base_path']: 105 | keystore_path = os.path.join( 106 | processes[proc]['base_path'], 107 | 'chains', 108 | node_chain_folders[processes[proc]['chain']], 109 | 'keystore') 110 | node_session_key = parse_session_key(keystore_path) 111 | if node_session_key: 112 | node_metrics['polkadot_state_node_session_key'].labels( 113 | name=proc, 114 | pid=processes[proc]['pid'], 115 | session_key=node_session_key).set(1) 116 | except Exception as e: 117 | logger.error(e) 118 | logger.error(traceback.print_tb(e.__traceback__)) 119 | return 120 | 121 | 122 | def parse_session_key(dir): 123 | # variants of key prefixes in the right order 124 | key_formats = ( 125 | ['6772616e', '62616265', '696d6f6e', '70617261', '61756469'], # v1 validator keys (gran,babe,imon,para,audi) 126 | ['6772616e', '62616265', '696d6f6e', '70617261', '6173676e', '61756469'], # v2 validator keys (gran,babe,imon,para,asgn,audi) 127 | ['6772616e', '62616265', '696d6f6e', '70617261', '6173676e', '61756469', '62656566'], # v3 validator keys (gran,babe,imon,para,asgn,audi,beef) 128 | ['6772616e', '62616265', '70617261', '6173676e', '61756469', '62656566'], # v4 validator keys (gran,babe,para,asgn,audi,beef) 129 | ['61757261'] # collator keys (aura) 130 | ) 131 | possible_prefixes = list(set([j for i in key_formats for j in i])) 132 | if os.path.isdir(dir): 133 | os.chdir(dir) 134 | files = os.listdir('.') 135 | files = [i for i in files if len(i) in [72, 74] and i[0:8] in possible_prefixes] 136 | if not files: 137 | return None 138 | # find creation time of the newest key 139 | time_of_last_key = sorted(list(set([int(os.path.getmtime(i)) for i in files])))[-1] 140 | # parse the newest public keys and prefix them with the names of files. 141 | # make sure to only pick up the keys created within 60 seconds interval 142 | keys = {i[0:8]: i[8:] for i in files if int(os.path.getmtime(i)) <= time_of_last_key and int(os.path.getmtime(i)) > time_of_last_key - 60} 143 | logger.debug('keys were found: ' + str(keys) + ' in the keystore path: ' + dir) 144 | for key_format in key_formats: 145 | if set(keys.keys()) == set(key_format): 146 | # build the session key 147 | session_key = '0x' + ''.join([keys[i] for i in key_format]) 148 | logger.debug('the session key was parsed: ' + session_key + ' in the keystore path: ' + dir) 149 | return(session_key) 150 | logger.error('Error parsing the session key') 151 | return None 152 | 153 | 154 | if __name__ == '__main__': 155 | global logger 156 | logger = logging.getLogger('state_exporter') 157 | 158 | # console handler 159 | ch = logging.StreamHandler() 160 | if len(sys.argv) > 1 and sys.argv[1] == 'debug': 161 | logger.setLevel(logging.DEBUG) 162 | else: 163 | logger.setLevel(logging.INFO) 164 | formatter = logging.Formatter(LOGGING_FORMAT) 165 | ch.setFormatter(formatter) 166 | logger.addHandler(ch) 167 | 168 | # Start up the server to expose the metrics 169 | start_http_server(PORT) # Metrics server 170 | schedule.every(10).seconds.do(update_metrics) 171 | while True: 172 | schedule.run_pending() 173 | time.sleep(1) 174 | -------------------------------------------------------------------------------- /roles/node/tasks/400-binary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Binary | Set variables 3 | ansible.builtin.set_fact: 4 | _node_binaries: "{{ [{'url': node_binary, 'signature_url': node_binary_signature, 'dst': _node_main_binary_file_name}] }}" 5 | _node_temp_binary_path: "{{ _node_temp_dir.path }}/bin" 6 | 7 | - name: Binary | Check current binary path exists 8 | ansible.builtin.stat: 9 | path: "{{ _node_binary_path }}" 10 | register: _node_binary_path_stat 11 | 12 | - name: Binary | Create temp bin directory 13 | ansible.builtin.file: 14 | path: "{{ _node_temp_binary_path }}" 15 | state: directory 16 | mode: "0755" 17 | owner: "{{ node_user }}" 18 | group: "{{ node_user }}" 19 | check_mode: false 20 | changed_when: false 21 | 22 | - name: Binary | Add prepare_worker to the '_node_binaries' variable 23 | ansible.builtin.set_fact: 24 | _node_binaries: "{{ _node_binaries + [{'url': node_prepare_worker_binary, 'signature_url': node_prepare_worker_binary_signature, 'dst': 'polkadot-prepare-worker'}] 25 | }}" 26 | when: node_prepare_worker_binary != '' 27 | 28 | - name: Binary | Add execute_worker to the '_node_binaries' variable 29 | ansible.builtin.set_fact: 30 | _node_binaries: "{{ _node_binaries + [{'url': node_execute_worker_binary, 'signature_url': node_execute_worker_binary_signature, 'dst': 'polkadot-execute-worker'}] 31 | }}" 32 | when: node_execute_worker_binary != '' 33 | 34 | - name: Binary | Download binaries to temp bin directory 35 | ansible.builtin.get_url: 36 | url: "{{ item.url }}" 37 | dest: "{{ _node_temp_binary_path }}/{{ item.dst }}" 38 | mode: "0755" 39 | owner: root 40 | group: root 41 | timeout: 30 42 | headers: 43 | PRIVATE-TOKEN: "{{ node_binary_download_private_token }}" 44 | loop: "{{ _node_binaries }}" 45 | check_mode: false 46 | changed_when: false 47 | 48 | - name: Binary | GPG signature verification 49 | when: node_binary_signature != '' or node_prepare_worker_binary_signature != '' or node_execute_worker_binary_signature != '' 50 | 51 | block: 52 | - name: Binary | Download GPG signatures for binaries 53 | ansible.builtin.get_url: 54 | url: "{{ item.signature_url }}" 55 | dest: "{{ _node_temp_binary_path }}/{{ item.dst }}.asc" 56 | mode: "0644" 57 | check_mode: false 58 | changed_when: false 59 | loop: "{{ _node_binaries }}" 60 | when: item.signature_url != '' 61 | 62 | - name: Binary | Import release GPG public key 63 | ansible.builtin.command: | 64 | {{ _node_binary_gpg_binary }} --keyserver hkps://keyserver.ubuntu.com --receive-keys {{ node_binary_release_key_id }} 65 | register: _node_keyout 66 | changed_when: "'imported' in _node_keyout.stderr" 67 | failed_when: _node_keyout.rc != 0 68 | check_mode: false 69 | 70 | - name: Binary | Verify GPG signatures for binaries 71 | ansible.builtin.command: | 72 | {{ _node_binary_gpg_binary }} --verify {{ _node_temp_binary_path }}/{{ item.dst }}.asc 73 | register: _node_verifyout 74 | check_mode: false 75 | changed_when: false 76 | loop: "{{ _node_binaries }}" 77 | when: item.signature_url != '' 78 | failed_when: _node_verifyout.rc != 0 79 | 80 | - name: Binary | Check new version 81 | ansible.builtin.command: "{{ _node_temp_binary_path }}/{{ _node_main_binary_file_name }} --version" 82 | register: _node_new_version 83 | check_mode: false 84 | changed_when: false 85 | 86 | - name: Binary | Check current binary file exists 87 | ansible.builtin.stat: 88 | path: "{{ _node_binary_path }}/{{ _node_main_binary_file_name }}" 89 | register: _node_binary_file_stat 90 | when: _node_binary_path_stat.stat.exists and _node_binary_path_stat.stat.isdir 91 | 92 | - name: Binary | Check current version 93 | ansible.builtin.command: "{{ _node_binary_path }}/{{ _node_main_binary_file_name }} --version" 94 | register: _node_current_version 95 | when: _node_binary_file_stat.stat is defined and _node_binary_file_stat.stat.exists and _node_binary_file_stat.stat.isreg 96 | check_mode: false 97 | changed_when: false 98 | 99 | - name: Binary | Setup version variables 100 | ansible.builtin.set_fact: 101 | _node_version_msg: | 102 | Current version: {{ current_version }} 103 | New version: {{ new_version }} 104 | _node_commit_hash: "{{ new_version.split('-')[1] }}" 105 | _node_version_equal: "{{ current_version == new_version }}" 106 | vars: 107 | current_version: "{% if _node_current_version.stdout is defined -%} {{ _node_current_version.stdout.split(' ')[1] }}{%- else -%}absent{%- endif %}" 108 | new_version: "{{ _node_new_version.stdout.split(' ')[1] }}" 109 | 110 | - name: Binary | Print versions according to the '--version' flag 111 | ansible.builtin.debug: 112 | msg: "{% if _node_version_equal -%}Versions are equal!{%- else -%}{{ _node_version_msg }}{%- endif %}" 113 | 114 | - name: Binary | Migration between versions 115 | block: 116 | - name: Binary | Check new version 117 | ansible.builtin.command: "{{ _node_temp_binary_path }}/{{ _node_main_binary_file_name }} --help" 118 | register: _node_new_help 119 | check_mode: false 120 | changed_when: false 121 | 122 | - name: Binary | Setup supported flags 123 | ansible.builtin.set_fact: 124 | _node_legacy_rpc_flags_supported: "{{ '--ws-port' in _node_new_help.stdout }}" 125 | _node_separate_binary_supported: "{{ '--workers-path' in _node_new_help.stdout }}" 126 | 127 | - name: Binary | Check new rpc flags 128 | ansible.builtin.fail: 129 | msg: "ERROR: RPC flag --ws-port {{ 'IS' if _node_legacy_rpc_flags_supported else 'NOT' }} supported. 'node_legacy_rpc_flags' should be set to {{ _node_legacy_rpc_flags_supported 130 | }}" 131 | when: 132 | # XOR (skip fail if both true or both false) 133 | - node_legacy_rpc_flags or _node_legacy_rpc_flags_supported 134 | - not (node_legacy_rpc_flags and _node_legacy_rpc_flags_supported) 135 | 136 | - name: Binary | Check new worker flags 137 | ansible.builtin.fail: 138 | msg: > 139 | ERROR: node flag --workers-path {{ 'IS' if _node_separate_binary_supported else 'NOT' }} supported. 'node_prepare_worker_binary' and 'node_execute_worker_binary' 140 | variables should be set 141 | when: 142 | - _node_separate_binary_supported 143 | - node_role == 'validator' 144 | - node_prepare_worker_binary == '' or node_execute_worker_binary == '' 145 | 146 | - name: Binary | Remove old binary file 147 | ansible.builtin.file: 148 | path: "{{ _node_binary_path }}" 149 | state: absent 150 | when: _node_binary_path_stat.stat.exists and not _node_binary_path_stat.stat.isdir 151 | 152 | - name: Binary | Create bin directory 153 | ansible.builtin.file: 154 | path: "{{ _node_binary_path }}" 155 | state: directory 156 | mode: "0755" 157 | owner: "{{ node_user }}" 158 | group: "{{ node_user }}" 159 | ignore_errors: "{{ ansible_check_mode and _node_binary_path_stat.stat.exists and not _node_binary_path_stat.stat.isdir }}" 160 | 161 | # We don't need checking of hashes. The copy module does it itself. 162 | # If you have the same binary file you will see a green check in the check mode. 163 | - name: Binary | Copy new binaries 164 | ansible.builtin.copy: 165 | src: "{{ _node_temp_binary_path }}/{{ item.dst }}" 166 | dest: "{{ _node_binary_path }}/{{ item.dst }}" 167 | remote_src: true 168 | mode: "0755" 169 | owner: "{{ node_user }}" 170 | group: "{{ node_user }}" 171 | loop: "{{ _node_binaries }}" 172 | notify: restart service {{ node_handler_id }} 173 | ignore_errors: "{{ ansible_check_mode and (not _node_binary_path_stat.stat.exists or (_node_binary_path_stat.stat.exists and not _node_binary_path_stat.stat.isdir)) 174 | }}" 175 | 176 | - name: Binary | Block 177 | when: node_prometheus_file_exporter_path != '' 178 | block: 179 | - name: Binary | Print _node_binary_version_from_url 180 | ansible.builtin.debug: 181 | var: _node_binary_version_from_url 182 | 183 | - name: Binary | Create annotation in Prometheus 184 | ansible.builtin.template: 185 | src: annotation.j2 186 | dest: "{{ node_prometheus_file_exporter_path }}" 187 | owner: root 188 | group: root 189 | mode: "0644" 190 | --------------------------------------------------------------------------------