├── .github └── workflows │ └── master.yml ├── .gitignore ├── .gitlab-ci.yml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── debian ├── postinst ├── prerm ├── validator-exporter.service ├── validator-manager.service └── validator.service ├── src ├── cli │ ├── contract.rs │ ├── exporter.rs │ ├── init │ │ ├── contracts.rs │ │ ├── mod.rs │ │ ├── node.rs │ │ └── systemd.rs │ ├── mod.rs │ ├── node.rs │ ├── seed.rs │ └── validator.rs ├── config │ ├── app_config.rs │ ├── global_config │ │ ├── mainnet.json │ │ ├── mod.rs │ │ └── testnet.json │ ├── mod.rs │ ├── node_config │ │ ├── default_config.json │ │ ├── log_cfg.yml │ │ └── mod.rs │ └── stored_keys.rs ├── contracts │ ├── cluster.rs │ ├── depool │ │ ├── mod.rs │ │ ├── stever │ │ │ ├── DePoolProxy.code │ │ │ ├── DePoolV1.tvc │ │ │ └── DePoolV2.tvc │ │ └── v3 │ │ │ ├── DePool.tvc │ │ │ └── DePoolProxy.code │ ├── elector.rs │ ├── mod.rs │ ├── strategy.rs │ └── wallet │ │ ├── EverWallet.code │ │ └── mod.rs ├── crypto │ ├── bip39.rs │ ├── legacy.rs │ └── mod.rs ├── defaults.rs ├── dirs.rs ├── exporter │ ├── file_target.rs │ ├── http_target.rs │ ├── mod.rs │ └── stdout_target.rs ├── main.rs ├── network │ ├── mod.rs │ ├── node_tcp_rpc │ │ ├── mod.rs │ │ ├── proto.rs │ │ ├── stats.rs │ │ └── tcp_adnl │ │ │ ├── mod.rs │ │ │ └── queries_cache.rs │ ├── node_udp_rpc │ │ ├── mod.rs │ │ └── proto.rs │ └── subscription.rs ├── proto.tl ├── util │ ├── block_stuff.rs │ ├── cli.rs │ ├── mod.rs │ ├── serde.rs │ ├── system.rs │ └── transaction.rs └── validator │ └── mod.rs └── templates ├── depool.toml ├── example.toml ├── single.toml └── stever.toml /.github/workflows/master.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: master 4 | 5 | jobs: 6 | check: 7 | name: Check 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout sources 11 | uses: actions/checkout@v2 12 | 13 | - name: Install stable toolchain 14 | uses: actions-rs/toolchain@v1 15 | with: 16 | profile: minimal 17 | toolchain: stable 18 | override: true 19 | 20 | - name: Run cargo check 21 | uses: actions-rs/cargo@v1 22 | with: 23 | command: check 24 | 25 | test: 26 | name: Test Suite 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: Checkout sources 30 | uses: actions/checkout@v2 31 | 32 | - name: Install stable toolchain 33 | uses: actions-rs/toolchain@v1 34 | with: 35 | profile: minimal 36 | toolchain: stable 37 | override: true 38 | 39 | - name: Run cargo test 40 | uses: actions-rs/cargo@v1 41 | with: 42 | command: test 43 | 44 | lints: 45 | name: Lints 46 | runs-on: ubuntu-latest 47 | steps: 48 | - name: Checkout sources 49 | uses: actions/checkout@v2 50 | 51 | - name: Install stable toolchain 52 | uses: actions-rs/toolchain@v1 53 | with: 54 | profile: minimal 55 | toolchain: stable 56 | override: true 57 | components: rustfmt, clippy 58 | 59 | - name: Run cargo fmt 60 | uses: actions-rs/cargo@v1 61 | with: 62 | command: fmt 63 | args: --all -- --check 64 | 65 | - name: Run cargo clippy 66 | uses: actions-rs/cargo@v1 67 | with: 68 | command: clippy 69 | args: -- -D warnings 70 | 71 | build-deb: 72 | name: Build .deb package 73 | runs-on: ubuntu-20.04 74 | steps: 75 | - name: Checkout sources 76 | uses: actions/checkout@v2 77 | 78 | - name: Install stable toolchain 79 | uses: actions-rs/toolchain@v1 80 | with: 81 | profile: minimal 82 | toolchain: stable 83 | override: true 84 | 85 | - name: Install `cargo-deb` 86 | uses: actions-rs/cargo@v1 87 | with: 88 | command: install 89 | args: cargo-deb 90 | 91 | - name: Run cargo deb 92 | uses: actions-rs/cargo@v1 93 | with: 94 | command: deb 95 | args: --output nodekeeper.deb 96 | 97 | - uses: actions/upload-artifact@v3 98 | with: 99 | name: nodekeeper.deb 100 | path: nodekeeper.deb 101 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /target 3 | 4 | /config.yaml 5 | /config.json 6 | /config.toml 7 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | include: 2 | - project: "infrastructure/gitlab-ci" 3 | ref: main 4 | file: "/templates/cloudbuild-common.gitlab-ci.yaml" 5 | 6 | stages: 7 | - cloudbuild 8 | 9 | cloudbuild:package: 10 | extends: .cloudbuild:deb-package-nodekeeper 11 | only: 12 | - tags 13 | except: 14 | - branches 15 | variables: 16 | REPO_NAME: broxus-deb-pub 17 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 0.2.18 (2024-05-27) 2 | 3 | ### Fixed 4 | 5 | - Fixed control query error message response. 6 | 7 | # 0.2.17 (2024-04-22) 8 | 9 | ### Fixed 10 | 11 | - Fixed incorrect node version parsing. 12 | 13 | # 0.2.16 (2024-04-22) 14 | 15 | ### Added 16 | 17 | - Added support for the new default repo and renamed node binary. 18 | - Added `getrawstats` subcommand to get validation stats as is. 19 | 20 | # 0.2.15 (2024-02-19) 21 | 22 | ### Fixed 23 | 24 | - Fixed incorrect memory info parsing (https://github.com/GuillaumeGomez/sysinfo/pull/1058). 25 | 26 | # 0.2.14 (2024-02-13) 27 | 28 | ### Changed 29 | 30 | - Updated the default node config. 31 | 32 | # 0.2.12 / 0.2.13 (2024-02-10) 33 | 34 | ### Changed 35 | 36 | - Build deb package with the lowest version of glibc (2.27). 37 | 38 | # 0.2.11 (2024-01-28) 39 | 40 | ### Fixed 41 | 42 | - Added additional retries for `GetCapabilities` query to avoid false negative node availability checks. 43 | 44 | # 0.2.10 (2023-10-17) 45 | 46 | ### Changed 47 | 48 | - Updated default global config for Everscale. 49 | 50 | # 0.2.9 (2023-08-29) 51 | 52 | ### Fixed 53 | 54 | - Fixed potential units mismatch in storage fee computation. 55 | 56 | # 0.2.8 (2023-08-29) 57 | 58 | ### Added 59 | 60 | - Added `storage_fee` field for `nodekeeper validator balance` output entries. 61 | 62 | ### Changed 63 | 64 | - When maintaining DePool balances, the accumulated storage fee on proxies 65 | is now taken into account. 66 | 67 | # 0.2.7 (2023-08-24) 68 | 69 | ### Added 70 | 71 | - Added `nodekeeper validator balance` subcommand which outputs a structured info about 72 | validator wallet(s) and address(es). 73 | - Added `nodekeeper validator withdraw ` subcommand which allows to easily 74 | withdraw tokens from the validator wallet. 75 | 76 | ### Changed 77 | 78 | - Validator manager subcommand moved from `nodekeeper validator` to `nodekeeper validator run`. 79 | 80 | ### Fixed 81 | 82 | - Fixed hidden cursor state after `ctrl+C` interruption in prompts. 83 | 84 | # 0.2.6 (2023-07-09) 85 | 86 | ### Changed 87 | 88 | - The `validator` service will not be restarted during an update. 89 | 90 | ### Fixed 91 | 92 | - Fixed file path autocompletion and `~/` now works as in shell. 93 | 94 | # 0.2.5 (2023-07-06) 95 | 96 | ### Added 97 | 98 | - Added `validator-exporter` systemd service for metrics. 99 | 100 | It listens on port `10000` by default. You can override it with: 101 | ```bash 102 | sudo systemctl edit validator-exporter 103 | ``` 104 | ```ini 105 | [Service] 106 | Environment=PORT=10000 107 | Environment=INTERVAL=10 108 | ``` 109 | 110 | # 0.2.4 (2023-04-05) 111 | 112 | ### Added 113 | 114 | - Added support for a new Venom update. 115 | - Added a `force` flag to the `validator` command. Used to force elect without checking the network config. 116 | 117 | ### Fixed 118 | 119 | - Double check election id before adding validator keys. 120 | 121 | # 0.2.3 (2023-04-05) 122 | 123 | ### Added 124 | 125 | - JSON output for template initialization from non-tty environment. 126 | 127 | ### Changed 128 | 129 | - Intermediate messages are now printed to the `stderr`. 130 | 131 | # 0.2.2 (2023-03-27) 132 | 133 | ### Fixed 134 | 135 | - Fixed `statvfs` for the newly created node. 136 | 137 | # 0.2.1 (2023-03-27) 138 | 139 | ### Added 140 | 141 | - Show system info during `init`. 142 | - Detect default node repo and features based on global config. 143 | 144 | # 0.2.0 (2023-03-24) 145 | 146 | ### Added 147 | 148 | - Added support for the new stEVER flow. 149 | - Detect currency based on global config. 150 | 151 | ### Changed 152 | 153 | - Renamed tool to `nodekeeper`. 154 | - Renamed `systemd` services to `validator` and `validator-manager`. 155 | 156 | # 0.1.5 (2023-03-16) 157 | 158 | ### Added 159 | 160 | - `.deb` package build. 161 | - You can now specify file path for a global config during `stever node init` 162 | (it used to be only URL). 163 | - Added check for the Rust installation. 164 | 165 | # 0.1.4 (2023-02-15) 166 | 167 | ### Added 168 | 169 | - Extended exported metrics. 170 | * Added `sync_status` label to the `node_ready` metric. 171 | * `validation_enabled`: `0`/`1`. 172 | * if validation is enabled 173 | 174 | `validator_type`: `0` - single / `1` - depool. 175 | * if validation is enabled and `validator_type=0` 176 | 177 | `validator_single_stake_per_round`: stake in nano EVERs. 178 | 179 | Labels: `validator` - validator wallet address. 180 | * if validation is enabled and `validator_type=1` 181 | 182 | `validator_depool_type`: `0` - default_v3, `1` - stever_v1, `2` - stever_v2. 183 | 184 | Labels: `validator` - validator wallet address, `depool` - depool address. 185 | 186 | ### Changed 187 | 188 | - Refactored project structure. 189 | 190 | # 0.1.3 (2023-02-06) 191 | 192 | ### Added 193 | 194 | - Added support for initialization templates. Templates can be specified for `stever init` command or 195 | its subcommands (except `systemd`). They are mostly used for running stever from scripts (i.e. from ansible). 196 | 197 | See [example.toml](/templates/example.toml) for more details. 198 | 199 | - Added `--user`,`--enable` and `--start` params to the `stever init systemd` to allow using it from scripts. 200 | 201 | - Added `stever node gendht` to export signed global config entries. 202 | 203 | ### Changed 204 | 205 | - Separate `stever init systemd` is now always required after the first initialization. 206 | 207 | # 0.1.2 (2023-02-03) 208 | 209 | ### Added 210 | 211 | - Added support for signature id. Signature for networks with this capability enabled will now be 212 | calculated differently to prevent security issues. 213 | - Added support for cloning the specific branch in repo and build the node with specified features. 214 | While initializing the node with `stever init`, add these flags after the repo url: 215 | - `-b,--branch ` - branch name; 216 | - `-f,--features ...` - list of features for `cargo build`; 217 | 218 | # 0.1.1 (2023-01-27) 219 | 220 | ### Added 221 | 222 | - Added support for the new version of the stEver DePool contract (`depool_type = "stever_v2"`). 223 | - Added `--version/-v` flag to get application version. 224 | - Added random offset from the beginning of the elections to spread the load (`0..1/4` of elections range). 225 | > Could be disabled by adding a flag `--disable-random-shift` 226 | - DePool and proxy balances are now replenished if there are not enough funds on them. 227 | 228 | ### Changed 229 | 230 | - `stever init --rebuild` now always replaces the existing node binary even if it is running (behavior is similar to `cp -f`). 231 | - Updated the default node config (added the `"gc": { .. }` section). 232 | 233 | ### Fixed 234 | 235 | - Fixed races in the blocks subscription loop. 236 | 237 | # 0.1.0 (2022-12-20) 238 | 239 | Initial release. 240 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nodekeeper" 3 | description = "All-in-one node management tool." 4 | version = "0.2.18" 5 | authors = ["Ivan Kalinin "] 6 | repository = "https://github.com/broxus/nodekeeper" 7 | edition = "2021" 8 | rust-version = "1.75" 9 | license-file = "./LICENSE" 10 | 11 | [dependencies] 12 | aes = "0.8" 13 | anyhow = "1.0.65" 14 | arc-swap = "1.5" 15 | argh = "0.1.9" 16 | async-trait = "0.1.57" 17 | base64 = "0.13.0" 18 | broxus-util = { version = "0.2", default-features = false, features = ["serde", "signal"] } 19 | bytes = "1.2" 20 | ctr = "0.9" 21 | ctrlc = "3.4" 22 | dashmap = "5.4.0" 23 | dialoguer = { version = "0.10", features = ["completion"] } 24 | everscale-crypto = { version = "0.2", features = ["tl-proto"] } 25 | everscale-network = { version = "0.5", default-features = false, features = ["rldp", "dht"] } 26 | futures-util = "0.3" 27 | generic-array = "0.14" 28 | hex = "0.4" 29 | hmac = "0.11.0" 30 | home = "0.5" 31 | indicatif = "0.17" 32 | libc = "0.2" 33 | num = "0.4" 34 | once_cell = "1.15" 35 | parking_lot = "0.12.1" 36 | pbkdf2 = "0.9.0" 37 | pomfrit = "0.1.8" 38 | public-ip = "0.2" 39 | rand = "0.8.5" 40 | reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] } 41 | rustc-hash = "1.1.0" 42 | serde = { version = "1", features = ["derive"] } 43 | serde_json = "1" 44 | serde_path_to_error = "0.1" 45 | sha2 = "0.9" 46 | sysinfo = "0.30" 47 | thiserror = "1.0.37" 48 | tl-proto = "0.4" 49 | tokio = { version = "1", features = [ 50 | "macros", 51 | "time", 52 | "sync", 53 | "rt-multi-thread", 54 | "net", 55 | "io-util", 56 | "process", 57 | ] } 58 | tokio-util = "0.7" 59 | toml = { version = "0.5", features = ["preserve_order"] } 60 | tracing = "0.1" 61 | tracing-subscriber = "0.3" 62 | url = { version = "2", features = ["serde"] } 63 | 64 | ed25519-dalek = { git = "https://github.com/broxus/ed25519-dalek.git" } 65 | tiny-bip39 = { git = "https://github.com/broxus/tiny-bip39.git", default-features = false } 66 | tiny-hderive = { git = "https://github.com/broxus/tiny-hderive.git" } 67 | 68 | ton_abi = { git = "https://github.com/broxus/ton-labs-abi.git" } 69 | ton_block = { git = "https://github.com/broxus/ton-labs-block.git", features = ["venom"] } 70 | ton_types = { git = "https://github.com/broxus/ton-labs-types.git" } 71 | ton_executor = { git = "https://github.com/broxus/ton-labs-executor.git" } 72 | 73 | nekoton-abi = { git = "https://github.com/broxus/nekoton.git", features = ["derive"] } 74 | nekoton-utils = { git = "https://github.com/broxus/nekoton.git" } 75 | 76 | [features] 77 | default = [] 78 | packaged = [] 79 | 80 | [package.metadata.deb] 81 | features = ["packaged"] 82 | depends = "build-essential, libssl-dev, pkg-config, libzstd-dev, libclang-dev, libgoogle-perftools-dev, git" 83 | section = "utility" 84 | assets = [ 85 | [ 86 | "target/release/nodekeeper", 87 | "/usr/local/bin/nodekeeper", 88 | "755", 89 | ], 90 | [ 91 | "README.md", 92 | "usr/share/doc/nodekeeper/README", 93 | "644", 94 | ], 95 | ] 96 | conf-files = ["/etc/nodekeeper/"] 97 | maintainer-scripts = "debian/" 98 | systemd-units = [ 99 | { unit-name = "validator", enable = true, start = false, stop-on-upgrade = false }, 100 | { unit-name = "validator-manager", enable = true, start = false }, 101 | { unit-name = "validator-exporter", enable = true, start = false }, 102 | ] 103 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | Logo 4 | 5 |

6 | 7 | # nodekeeper   [![rust-version-badge]][rust-version-link] [![workflow-badge]][workflow-link] 8 | 9 | [rust-version-badge]: https://img.shields.io/badge/rustc-1.67+-lightgray.svg 10 | [rust-version-link]: https://blog.rust-lang.org/2023/01/26/Rust-1.67.0.html 11 | [workflow-badge]: https://img.shields.io/github/actions/workflow/status/broxus/nodekeeper/master.yml?branch=master 12 | [workflow-link]: https://github.com/broxus/nodekeeper/actions?query=workflow%3Amaster 13 | 14 | All-in-one node management tool. 15 | 16 | ## How to install 17 | 18 | * Using Debian package 19 | ```bash 20 | sudo apt install curl gnupg 21 | 22 | # Add custom ppa repo 23 | curl https://europe-west1-apt.pkg.dev/doc/repo-signing-key.gpg \ 24 | | sudo apt-key add - 25 | echo 'deb [arch=amd64] https://europe-west1-apt.pkg.dev/projects/broxus-infrastructure broxus-deb-pub main' \ 26 | | sudo tee -a /etc/apt/sources.list.d/broxus.list 27 | 28 | # Install the tool 29 | sudo apt update 30 | sudo apt install nodekeeper 31 | 32 | # Add current user to the nodekeeper group 33 | sudo usermod -a -G nodekeeper $USER 34 | # Update groups cache (you can just relogin instead) 35 | newgrp nodekeeper 36 | ``` 37 | 38 | * Using `cargo install` 39 | ```bash 40 | # Install deps for the node 41 | sudo apt install curl pkg-config libssl-dev libzstd-dev libclang-dev libtcmalloc-minimal4 libprotobuf-dev libgoogle-perftools-dev 42 | 43 | # Install the app 44 | cargo install --locked --git https://github.com/broxus/nodekeeper 45 | ``` 46 | 47 | > NOTE: `systemd` configuration is different for cargo installation, 48 | see **Validation** section for more info. 49 | 50 | ## How to use 51 | 52 | ### Validation 53 | 54 | For Debian installation: 55 | ```bash 56 | # Install Rust 57 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 58 | source "$HOME/.cargo/env" 59 | # Or update to the latest stable version: 60 | # rustup update stable 61 | 62 | # Configure node 63 | nodekeeper init 64 | 65 | # Start services 66 | sudo systemctl restart validator 67 | sudo systemctl restart validator-manager 68 | sudo systemctl restart validator-exporter 69 | ``` 70 | 71 |
For cargo installation: 72 |

73 | 74 | ```bash 75 | # Optionally configure root directory: 76 | # export NODEKEEPER_ROOT=/var/nodekeeper 77 | # 78 | # Or explicitly specify it as a param, e.g.: 79 | # nodekeeper --root /var/nodekeeper init 80 | 81 | # Configure node 82 | nodekeeper init 83 | 84 | sudo $(which nodekeeper) init systemd 85 | ``` 86 | 87 |

88 |
89 |
90 | 91 | > NOTE: Make sure you back up your keys after initial configuration! 92 | > 93 | > All keys are stored at `/var/nodekeeper/keys/` (or `$HOME/.nodekeeper/keys/` by default for the cargo installation). 94 | 95 | You can also configure different steps separately: 96 | 97 | ```bash 98 | # Initialize only node configs 99 | nodekeeper init node 100 | 101 | # Initialize only contracts 102 | nodekeeper init contracts 103 | ``` 104 | 105 | Updating the node: 106 | 107 | ```bash 108 | nodekeeper init node --rebuild 109 | sudo systemctl restart validator 110 | ``` 111 | 112 | ### Metrics exporter 113 | 114 | ```bash 115 | # Metrics exporter as a server 116 | nodekeeper exporter --addr 0.0.0.0:10100 117 | 118 | # Metrics exporter to the file 119 | nodekeeper exporter --file /var/www/node_metrics.txt 120 | ``` 121 | 122 |
Example metrics 123 |

124 | 125 | ``` 126 | collected_at 1669042606 127 | node_ready 1 128 | node_version_major 0 129 | node_version_minor 51 130 | node_version_patch 1 131 | mc_seqno 155886 132 | mc_time 1669042601 133 | mc_time_diff 5 134 | sc_time_diff 5 135 | in_current_vset{adnl="d5af8f62c027774831aea3fe00d78fc78ed69f233d885382e72f9adefd8c4f05"} 1 136 | in_next_vset 0 137 | ``` 138 | 139 |

140 |
141 | 142 | ### Seed generator 143 | 144 | ```bash 145 | # Generate new seed 146 | nodekeeper seed generate 147 | #decline weapon swift luggage gorilla odor clown million leaf royal object movie 148 | 149 | # Derive keypair from the seed 150 | nodekeeper seed generate | nodekeeper seed derive 151 | #{ 152 | # "public": "72e8cb80621c41a95da3a004139ceefa39e8709e7a8183ed9ad601ce9a13714d", 153 | # "secret": "435726770e17089f6c0b647f5ce7418ba6d07ca6b8c15d0c42e2379d1a09b6cc" 154 | #} 155 | 156 | # Derive keypair from the secret 157 | nodekeeper seed pubkey 435726770e17089f6c0b647f5ce7418ba6d07ca6b8c15d0c42e2379d1a09b6cc 158 | #{ 159 | # "public": "72e8cb80621c41a95da3a004139ceefa39e8709e7a8183ed9ad601ce9a13714d", 160 | # "secret": "435726770e17089f6c0b647f5ce7418ba6d07ca6b8c15d0c42e2379d1a09b6cc" 161 | #} 162 | ``` 163 | 164 | ### Contract interaction 165 | 166 | ```bash 167 | # Compute account address and stateinit 168 | nodekeeper contract stateinit < ./path/to/Contract.tvc 169 | #{ 170 | # "address": "0:1df86a0f06aec400d04719052e6a17dffadc09f915c5e35e959d37d59beb7ac3", 171 | # "tvc": "te6ccgICAQAAA...some long base64 encoded BOC...AxWw==" 172 | #} 173 | 174 | # Execute getters locally 175 | nodekeeper contract call \ 176 | getParticipantInfo \ 177 | '{"addr":"0:2f61300e70e2cdb5f96d3d7a0d60c70dfa515f89c3d4926e958b5eb147977469"}' \ 178 | --addr '0:5325f4965e6388f97ae2578c19e8ffbc080f29d2357c5712d2a21d640dc10fb7' \ 179 | --abi ./path/to/Contract.abi.json 180 | #{ 181 | # "code": 0, 182 | # "output": { 183 | # "lockDonor": "0:0000000000000000000000000000000000000000000000000000000000000000", 184 | # "locks": [], 185 | # "reinvest": true, 186 | # "reward": "0", 187 | # "stakes": [], 188 | # "total": "0", 189 | # "vestingDonor": "0:0000000000000000000000000000000000000000000000000000000000000000", 190 | # "vestings": [], 191 | # "withdrawValue": "0" 192 | # } 193 | #} 194 | 195 | # and others 196 | ``` 197 | 198 | ### Execute node commands 199 | 200 | ```bash 201 | # Get config params 202 | nodekeeper node getparam 14 203 | #{ 204 | # "block_id": "-1:8000000000000000:156446:e6a099e43ba0e2a9b7b0d1e9b5207cef4e0e54c1dc2ea8811f0877ad78516bc0:fdca14025ba3b16b4286a561b7ade73f3e26a0224e9492cefc77b83ed649f37d", 205 | # "value": { 206 | # "basechain_block_fee": "073b9aca00", 207 | # "basechain_block_fee_dec": "1000000000", 208 | # "masterchain_block_fee": "076553f100", 209 | # "masterchain_block_fee_dec": "1700000000" 210 | # } 211 | #} 212 | 213 | # Send message 214 | nodekeeper node sendmessage < ./path/to/message.boc 215 | 216 | # and others 217 | ``` 218 | 219 | --- 220 | 221 |
All options 222 |

223 | 224 | ``` 225 | Usage: nodekeeper [--root ] [] 226 | 227 | All-in-one node management tool. 228 | 229 | Options: 230 | --root path to the root directory 231 | --help display usage information 232 | 233 | Commands: 234 | init Prepares configs and binaries 235 | validator Validation manager service 236 | contract Contract interaction stuff 237 | exporter Prometheus metrics exporter 238 | node Raw node tools operations 239 | seed Seed utils 240 | ``` 241 | 242 |

243 |
244 | 245 | ## FAQ 246 | 247 | - **I'm trying to participate in elections, but the node fails to generate keys with `Error: Permission denied (os error 13)`** 248 | 249 | **Answer:** Ensure that the configs folder has correct permissions. This might help: 250 | ```bash 251 | sudo chown -hR nodekeeper /var/nodekeeper 252 | ``` 253 | 254 | ## How it works 255 | 256 | This tool is a replacement of `ever-node-tools` and contains all the necessary functionality to manage a node. 257 | During initialization steps it prepares configs (at `$HOME/.nodekeeper` by default), downloads and builds the node, 258 | and deploys necessery contracts (all this through a cli with convenient choices!). 259 | 260 | After contracts configuration this tool manages validator wallet (which is [EVER Wallet contract](https://github.com/broxus/ever-wallet-contract)) 261 | and optionally a DePool (default v3 or stEVER variant); 262 | 263 | The update logic is based on two `systemd` services: 264 | 265 | - `validator` - the node itself; 266 | - `validator-manager` - service wrapper around `nodekeeper validator` command; 267 | 268 | It uses two protocols to communicate with the node - the first one is for the control server (`TCP ADNL`), 269 | and the second is for other stuff (`UDP ADNL`, same as the protocol used by all nodes in the network). 270 | 271 | ## Contributing 272 | 273 | We welcome contributions to the project! If you notice any issues or errors, feel free to open an issue or submit a pull request. 274 | 275 | ## License 276 | 277 | This project is licensed under the [License Apache](https://opensource.org/licenses/Apache-2.0). 278 | -------------------------------------------------------------------------------- /debian/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ]; then 5 | useradd nodekeeper -r || echo "User already exists." 6 | path="/var/nodekeeper" 7 | 8 | mkdir -p $path 9 | 10 | chown nodekeeper:nodekeeper -R $path 11 | chmod 770 -R $path 12 | chmod g+s -R $path 13 | fi 14 | #DEBHELPER# 15 | -------------------------------------------------------------------------------- /debian/prerm: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | deluser nodekeeper || echo "User is currently used by process" 5 | groupdel nodekeeper || echo "Group is currently used" 6 | 7 | #DEBHELPER# 8 | -------------------------------------------------------------------------------- /debian/validator-exporter.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Validator Metrics Exporter 3 | After=network.target 4 | StartLimitIntervalSec=0 5 | 6 | [Service] 7 | Type=simple 8 | Restart=always 9 | RestartSec=1 10 | User=nodekeeper 11 | Environment=PORT=10000 12 | Environment=INTERVAL=10 13 | ExecStart=/usr/local/bin/nodekeeper exporter \ 14 | --addr 0.0.0.0:${PORT} \ 15 | --interval ${INTERVAL} 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /debian/validator-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Validator Manager 3 | After=network.target 4 | StartLimitIntervalSec=0 5 | 6 | [Service] 7 | Type=simple 8 | Restart=always 9 | RestartSec=1 10 | User=nodekeeper 11 | ExecStart=/usr/local/bin/nodekeeper --root /var/nodekeeper validator run 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /debian/validator.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Validator Node 3 | After=network.target 4 | StartLimitIntervalSec=0 5 | 6 | [Service] 7 | Type=simple 8 | Restart=always 9 | RestartSec=1 10 | User=nodekeeper 11 | LimitNOFILE=2048000 12 | ExecStart=/var/nodekeeper/bin/node --configs /var/nodekeeper/node 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /src/cli/exporter.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::path::PathBuf; 3 | use std::time::Duration; 4 | 5 | use anyhow::Result; 6 | use argh::FromArgs; 7 | 8 | use super::CliContext; 9 | use crate::exporter::{ 10 | Exporter, ExporterTarget, FileExporterTarget, HttpExporterTarget, StdoutExporterTarget, 11 | }; 12 | 13 | #[derive(FromArgs)] 14 | /// Prometheus metrics exporter 15 | #[argh(subcommand, name = "exporter")] 16 | pub struct Cmd { 17 | /// socket addr to host the exporter 18 | #[argh(option, short = 'a')] 19 | addr: Option, 20 | 21 | /// path to the file where the metrics are written 22 | #[argh(option, short = 'f')] 23 | file: Option, 24 | 25 | /// whether to run exporter once 26 | #[argh(switch)] 27 | once: bool, 28 | 29 | /// metrics collection interval (in seconds). 10 seconds default 30 | #[argh(option, short = 'i', default = "10")] 31 | interval: u32, 32 | } 33 | 34 | impl Cmd { 35 | pub async fn run(self, ctx: CliContext) -> Result<()> { 36 | let mut targets = Vec::>::new(); 37 | 38 | // Add file exporter if path specified 39 | if let Some(file) = self.file { 40 | targets.push(Box::new(FileExporterTarget::new(file))); 41 | } 42 | 43 | // Add network exporter 44 | if let Some(addr) = self.addr { 45 | if self.once { 46 | return Err(ExporterError::OnceNotSupported.into()); 47 | } 48 | targets.push(Box::new(HttpExporterTarget::new(addr).await?)); 49 | } 50 | 51 | // Fallback to stdout exporter 52 | if targets.is_empty() { 53 | targets.push(Box::new(StdoutExporterTarget)); 54 | } 55 | 56 | let exporter = Exporter::new(ctx.dirs, targets); 57 | if self.once { 58 | exporter.once().await 59 | } else { 60 | let interval = Duration::from_secs(self.interval as u64); 61 | exporter.serve(interval).await; 62 | Ok(()) 63 | } 64 | } 65 | } 66 | 67 | #[derive(thiserror::Error, Debug)] 68 | enum ExporterError { 69 | #[error("once flag is not supported by http exporter")] 70 | OnceNotSupported, 71 | } 72 | -------------------------------------------------------------------------------- /src/cli/init/mod.rs: -------------------------------------------------------------------------------- 1 | use std::net::Ipv4Addr; 2 | use std::path::PathBuf; 3 | 4 | use anyhow::{Context, Result}; 5 | use argh::FromArgs; 6 | use broxus_util::{const_bool, serde_optional_string, serde_string_or_number}; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use super::{CliContext, ProjectDirs}; 10 | use crate::config::{AppConfig, AppConfigDePoolDeploymentParams, DePoolType, NodeConfig}; 11 | use crate::defaults; 12 | use crate::util::{is_terminal, print_output}; 13 | 14 | mod contracts; 15 | mod node; 16 | #[cfg(not(feature = "packaged"))] 17 | mod systemd; 18 | 19 | #[derive(FromArgs)] 20 | /// Prepares configs and binaries 21 | #[argh(subcommand, name = "init")] 22 | pub struct Cmd { 23 | #[argh(subcommand)] 24 | subcommand: Option, 25 | /// path to the params template 26 | #[argh(option)] 27 | template: Option, 28 | /// force download and build the latest node 29 | #[argh(switch)] 30 | rebuild: bool, 31 | } 32 | 33 | impl Cmd { 34 | pub async fn run(self, ctx: CliContext) -> Result<()> { 35 | fn load_template(template: Option) -> Result> { 36 | let Some(path) = &template else { 37 | return Ok(None); 38 | }; 39 | 40 | let data = std::fs::read_to_string(path).context("failed to read template")?; 41 | let template = if matches!(path.extension(), Some(ext) if ext == "toml") { 42 | let mut td = toml::Deserializer::new(&data); 43 | serde_path_to_error::deserialize(&mut td) 44 | .context("failed to parse template as TOML")? 45 | } else { 46 | let mut jd = serde_json::Deserializer::from_str(&data); 47 | serde_path_to_error::deserialize(&mut jd) 48 | .context("failed to parse template as JSON")? 49 | }; 50 | 51 | Ok(Some(template)) 52 | } 53 | 54 | let theme = &dialoguer::theme::ColorfulTheme::default(); 55 | match self.subcommand { 56 | None => { 57 | let template = load_template(self.template)?; 58 | 59 | let node = node::Cmd { 60 | rebuild: self.rebuild, 61 | } 62 | .run(theme, &ctx, &template) 63 | .await?; 64 | 65 | let contracts = contracts::Cmd {}.run(theme, &ctx, &template).await?; 66 | 67 | if template.is_some() && !is_terminal() { 68 | print_output(serde_json::json!({ 69 | "node": node, 70 | "contracts": contracts, 71 | })); 72 | } 73 | 74 | Ok(()) 75 | } 76 | Some(SubCmd::Node(cmd)) => { 77 | let template = load_template(self.template)?; 78 | 79 | let node = cmd.run(theme, &ctx, &template).await?; 80 | 81 | if template.is_some() && !is_terminal() { 82 | print_output(serde_json::to_value(node).unwrap()); 83 | } 84 | 85 | Ok(()) 86 | } 87 | Some(SubCmd::Contracts(cmd)) => { 88 | let template = load_template(self.template)?; 89 | 90 | let contracts = cmd.run(theme, &ctx, &template).await?; 91 | 92 | if template.is_some() && !is_terminal() { 93 | print_output(serde_json::to_value(contracts).unwrap()); 94 | } 95 | 96 | Ok(()) 97 | } 98 | #[cfg(not(feature = "packaged"))] 99 | Some(SubCmd::Systemd(cmd)) => { 100 | anyhow::ensure!( 101 | self.template.is_none(), 102 | "Template is not supported for `systemd` command" 103 | ); 104 | cmd.run(theme, &ctx).await 105 | } 106 | } 107 | } 108 | } 109 | 110 | #[derive(FromArgs)] 111 | #[argh(subcommand)] 112 | enum SubCmd { 113 | Node(node::Cmd), 114 | Contracts(contracts::Cmd), 115 | #[cfg(not(feature = "packaged"))] 116 | Systemd(systemd::Cmd), 117 | } 118 | 119 | impl ProjectDirs { 120 | fn store_app_config(&self, app_config: &AppConfig) -> Result<()> { 121 | app_config.store(&self.app_config) 122 | } 123 | 124 | fn store_node_config(&self, node_config: &NodeConfig) -> Result<()> { 125 | node_config.store(&self.node_config) 126 | } 127 | } 128 | 129 | #[derive(Deserialize, Serialize)] 130 | #[serde(deny_unknown_fields)] 131 | pub struct Template { 132 | /// General settings 133 | general: TemplateGeneral, 134 | 135 | /// Control server settings. 136 | #[serde(default)] 137 | control: TemplateControl, 138 | 139 | /// ADNL settings. 140 | #[serde(default)] 141 | adnl: TemplateAdnl, 142 | 143 | /// Optional validation params. 144 | #[serde(default)] 145 | validator: Option, 146 | } 147 | 148 | fn default_global_config() -> Option { 149 | Some("ever_mainnet".to_owned()) 150 | } 151 | 152 | #[derive(Deserialize, Serialize)] 153 | struct TemplateGeneral { 154 | /// Whether to create a root directory. Default: `true`. 155 | #[serde(default = "const_bool::")] 156 | create_root_dir: bool, 157 | 158 | /// Path, url or name of the global config. Default: `ever_mainnet`. 159 | /// 160 | /// NOTE: Tries to use the existing one if None. 161 | #[serde(default = "default_global_config")] 162 | global_config: Option, 163 | 164 | /// Whether to reset node logger settings. Default: `false`. 165 | #[serde(default)] 166 | reset_logger_config: bool, 167 | 168 | /// Whether to overwrite the existing node config. Default: `false`. 169 | #[serde(default)] 170 | reset_node_config: bool, 171 | 172 | /// Whether to reset the existing app config. Default: `false`. 173 | #[serde(default)] 174 | reset_app_config: bool, 175 | 176 | /// Path to the root directory for the node DB. 177 | node_db_path: PathBuf, 178 | 179 | /// Node repository info. 180 | #[serde(default)] 181 | node_repo: TemplateNodeRepo, 182 | } 183 | 184 | #[derive(Deserialize, Serialize)] 185 | #[serde(deny_unknown_fields, rename_all = "lowercase", tag = "type")] 186 | enum TemplateValidator { 187 | Single(TemplateValidatorSingle), 188 | DePool(Box), 189 | } 190 | 191 | #[derive(Deserialize, Serialize)] 192 | #[serde(deny_unknown_fields)] 193 | struct TemplateValidatorSingle { 194 | /// Whether to overwrite existing validation config. Default: `false`. 195 | #[serde(default)] 196 | overwrite: bool, 197 | 198 | /// Whether to overwrite existing validator keys. Default: `false`. 199 | #[serde(default)] 200 | overwrite_validator_keys: bool, 201 | 202 | /// Stake per round in nano EVERs. 203 | #[serde(with = "serde_string_or_number")] 204 | stake_per_round: u64, 205 | 206 | /// Optional stake factor. 207 | #[serde(default, skip_serializing_if = "Option::is_none")] 208 | stake_factor: Option, 209 | } 210 | 211 | #[derive(Deserialize, Serialize)] 212 | struct TemplateValidatorDePool { 213 | /// Whether to overwrite existing validation config. Default: `false`. 214 | #[serde(default)] 215 | overwrite: bool, 216 | 217 | /// Whether to overwrite existing validator keys. Default: `false`. 218 | #[serde(default)] 219 | overwrite_validator_keys: bool, 220 | 221 | /// Whether to overwrite existing DePool keys. Default: `false`. 222 | #[serde(default)] 223 | overwrite_depool_keys: bool, 224 | 225 | /// DePool type. 226 | depool_type: DePoolType, 227 | 228 | /// Optional stake factor. 229 | #[serde(default, skip_serializing_if = "Option::is_none")] 230 | stake_factor: Option, 231 | 232 | /// stEVER cluster address. 233 | #[serde( 234 | default, 235 | with = "serde_optional_string", 236 | skip_serializing_if = "Option::is_none" 237 | )] 238 | cluster: Option, 239 | 240 | /// DePool deployment params. 241 | #[serde(flatten)] 242 | deploy: AppConfigDePoolDeploymentParams, 243 | } 244 | 245 | #[derive(Deserialize, Serialize)] 246 | #[serde(default, deny_unknown_fields)] 247 | struct TemplateControl { 248 | /// Control server TCP port. Default: `None`. 249 | /// 250 | /// NOTE: Tries to use the default one or an existing from the config. 251 | port: Option, 252 | 253 | /// What to do with new node keys: `append` or `replace`. Default: `replace`. 254 | node_key_behavior: TemplateNodeKeyBehavior, 255 | 256 | /// On which address control server will be listening for requests. 257 | /// Default: `127.0.0.1`. 258 | listen_addr: Ipv4Addr, 259 | } 260 | 261 | impl Default for TemplateControl { 262 | fn default() -> Self { 263 | Self { 264 | port: None, 265 | node_key_behavior: TemplateNodeKeyBehavior::default(), 266 | listen_addr: Ipv4Addr::LOCALHOST, 267 | } 268 | } 269 | } 270 | 271 | #[derive(Default, Deserialize, Serialize)] 272 | #[serde(deny_unknown_fields)] 273 | struct TemplateAdnl { 274 | /// ADNL UDP port. Default: `None`. 275 | /// 276 | /// NOTE: Tries to use the default one or an existing from the config. 277 | #[serde(default)] 278 | port: Option, 279 | 280 | /// Public IP. Default: `None`. 281 | /// 282 | /// NOTE: Tries to resolve public ip if not specified. 283 | #[serde(default)] 284 | public_ip: Option, 285 | } 286 | 287 | struct TemplateNodeRepo { 288 | /// Node repository URL. 289 | url: reqwest::Url, 290 | /// Optional branch. 291 | branch: Option, 292 | /// Features which will be used during node build. 293 | features: Vec, 294 | } 295 | 296 | impl Default for TemplateNodeRepo { 297 | fn default() -> Self { 298 | Self { 299 | url: defaults::DEFAULT_NODE_REPO.parse().unwrap(), 300 | branch: None, 301 | features: Vec::new(), 302 | } 303 | } 304 | } 305 | 306 | impl Serialize for TemplateNodeRepo { 307 | fn serialize(&self, serializer: S) -> std::result::Result 308 | where 309 | S: serde::Serializer, 310 | { 311 | #[derive(Serialize)] 312 | struct Helper<'a> { 313 | url: &'a reqwest::Url, 314 | branch: &'a Option, 315 | features: &'a Vec, 316 | } 317 | 318 | if self.branch.is_none() && self.features.is_empty() { 319 | self.url.serialize(serializer) 320 | } else { 321 | Helper { 322 | url: &self.url, 323 | branch: &self.branch, 324 | features: &self.features, 325 | } 326 | .serialize(serializer) 327 | } 328 | } 329 | } 330 | 331 | impl<'de> Deserialize<'de> for TemplateNodeRepo { 332 | fn deserialize(deserializer: D) -> std::result::Result 333 | where 334 | D: serde::Deserializer<'de>, 335 | { 336 | #[derive(Deserialize)] 337 | #[serde(untagged, deny_unknown_fields)] 338 | enum Helper { 339 | Url(reqwest::Url), 340 | Full { 341 | url: reqwest::Url, 342 | branch: Option, 343 | features: Vec, 344 | }, 345 | } 346 | 347 | Ok(match Helper::deserialize(deserializer)? { 348 | Helper::Url(git) => Self { 349 | url: git, 350 | branch: None, 351 | features: Vec::new(), 352 | }, 353 | Helper::Full { 354 | url, 355 | branch, 356 | features, 357 | } => Self { 358 | url, 359 | branch, 360 | features, 361 | }, 362 | }) 363 | } 364 | } 365 | 366 | #[derive(Default, Deserialize, Serialize)] 367 | #[serde(rename_all = "lowercase")] 368 | enum TemplateNodeKeyBehavior { 369 | Append, 370 | #[default] 371 | Replace, 372 | } 373 | 374 | impl TemplateNodeKeyBehavior { 375 | pub fn is_append(&self) -> bool { 376 | matches!(self, Self::Append) 377 | } 378 | } 379 | -------------------------------------------------------------------------------- /src/cli/init/systemd.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::path::Path; 3 | use std::process::Stdio; 4 | 5 | use anyhow::{Context, Result}; 6 | use argh::FromArgs; 7 | use dialoguer::console::style; 8 | use dialoguer::theme::Theme; 9 | use dialoguer::Select; 10 | use tokio::process::Command; 11 | 12 | use crate::cli::{CliContext, ProjectDirs}; 13 | use crate::dirs::{VALIDATOR_EXPORTER_SERVICE, VALIDATOR_MANAGER_SERVICE, VALIDATOR_SERVICE}; 14 | use crate::util::*; 15 | 16 | #[derive(FromArgs)] 17 | /// Creates systemd services 18 | #[argh(subcommand, name = "systemd")] 19 | pub struct Cmd { 20 | /// which user to use for systemd services. 21 | #[argh(option)] 22 | user: Option, 23 | /// whether to enable services for auto-start. 24 | #[argh(switch)] 25 | enable: Option, 26 | /// whether to immediately start services. 27 | #[argh(switch)] 28 | start: Option, 29 | } 30 | 31 | impl Cmd { 32 | pub async fn run(self, theme: &dyn Theme, ctx: &CliContext) -> Result<()> { 33 | if self.user.is_none() && !is_terminal() { 34 | anyhow::bail!("`user` param is required when running without tty"); 35 | } 36 | 37 | let dirs = ctx.dirs(); 38 | let mut steps = Steps::new(2); 39 | 40 | // Ensure all services are created 41 | steps.next("Preparing services"); 42 | prepare_services(theme, dirs, &self.user)?; 43 | 44 | // Reload sysetmd 45 | steps.next("Reloading systemd configs"); 46 | systemd_daemon_reload().await?; 47 | 48 | // Optionally start services 49 | steps.next("Systemd services are configured now. Great!"); 50 | start_services(theme, self.enable, self.start).await?; 51 | 52 | Ok(()) 53 | } 54 | } 55 | 56 | pub fn prepare_services( 57 | theme: &dyn Theme, 58 | dirs: &ProjectDirs, 59 | user: &Option, 60 | ) -> Result<()> { 61 | const ROOT_USER: &str = "root"; 62 | 63 | let user = match user { 64 | Some(user) => Cow::Borrowed(user.as_str()), 65 | None => { 66 | // Determine current user id 67 | let uid = system::user_id(); 68 | // Determine "real" user id (if he runs this app under sudo) 69 | let other_user = match uid { 70 | // If current user is root 71 | 0 => match system::get_sudo_uid()? { 72 | // Root user is running this app under sudo 73 | Some(0) => None, 74 | // All other cases (no sudo or real user id) 75 | uid => uid, 76 | }, 77 | // Current user is not root 78 | uid => Some(uid), 79 | }; 80 | 81 | if let Some(uid) = other_user { 82 | // If there is an option of running services under non-root user, 83 | // ask user about it 84 | let other_user = system::user_name(uid).context("failed to get user name")?; 85 | match Select::with_theme(theme) 86 | .with_prompt("Select the user from which the service will work") 87 | .item(&other_user) 88 | .item("root") 89 | .default(0) 90 | .interact()? 91 | { 92 | // Running as non-root user 93 | 0 => Cow::Owned(other_user), 94 | // Running as root 95 | _ => Cow::Borrowed(ROOT_USER), 96 | } 97 | } else { 98 | // No options available 99 | system::user_name(uid) 100 | .map(Cow::Owned) 101 | .unwrap_or(Cow::Borrowed(ROOT_USER)) 102 | } 103 | } 104 | }; 105 | 106 | let print_service = |path: &Path| { 107 | eprintln!( 108 | "{}", 109 | style(format!("Created validator service at {}", path.display())).dim() 110 | ); 111 | }; 112 | 113 | // Create validator node service 114 | dirs.create_systemd_validator_service(&user)?; 115 | print_service(&dirs.validator_service); 116 | 117 | // Create validator manager service 118 | dirs.create_systemd_validator_manager_service(&user)?; 119 | print_service(&dirs.validator_manager_service); 120 | 121 | // Create validator exporter service 122 | dirs.create_systemd_validator_exporter_service(&user)?; 123 | print_service(&dirs.validator_exporter_service); 124 | 125 | Ok(()) 126 | } 127 | 128 | pub async fn start_services( 129 | theme: &dyn Theme, 130 | enable: Option, 131 | start: Option, 132 | ) -> Result<()> { 133 | let services = [ 134 | VALIDATOR_SERVICE, 135 | VALIDATOR_MANAGER_SERVICE, 136 | VALIDATOR_EXPORTER_SERVICE, 137 | ]; 138 | 139 | let enabled = match enable { 140 | Some(enable) => enable, 141 | None => confirm(theme, true, "Enable autostart services at system startup?")?, 142 | }; 143 | systemd_set_services_enabled(services, enabled).await?; 144 | 145 | let start = match start { 146 | Some(start) => start, 147 | None => confirm(theme, true, "Restart systemd services?")?, 148 | }; 149 | if start { 150 | for service in services { 151 | systemd_restart_service(service).await?; 152 | } 153 | } 154 | 155 | Ok(()) 156 | } 157 | 158 | macro_rules! validator_service { 159 | () => { 160 | r#"[Unit] 161 | Description=Validator Node 162 | After=network.target 163 | StartLimitIntervalSec=0 164 | 165 | [Service] 166 | Type=simple 167 | Restart=always 168 | RestartSec=1 169 | User={user} 170 | LimitNOFILE=2048000 171 | ExecStart={node_binary} --configs {configs_dir} 172 | 173 | [Install] 174 | WantedBy=multi-user.target 175 | "# 176 | }; 177 | } 178 | 179 | macro_rules! validator_manager_service { 180 | () => { 181 | r#"[Unit] 182 | Description=Validator Manager 183 | After=network.target 184 | StartLimitIntervalSec=0 185 | 186 | [Service] 187 | Type=simple 188 | Restart=always 189 | RestartSec=1 190 | User={user} 191 | ExecStart={nodekeeper_binary} --root {root_dir} validator run 192 | 193 | [Install] 194 | WantedBy=multi-user.target 195 | "# 196 | }; 197 | } 198 | 199 | macro_rules! validator_exporter_service { 200 | () => { 201 | r#"[Unit] 202 | Description=Validator Metrics Exporter 203 | After=network.target 204 | StartLimitIntervalSec=0 205 | 206 | [Service] 207 | Type=simple 208 | Restart=always 209 | RestartSec=1 210 | User={user} 211 | Environment=PORT=10000 212 | Environment=INTERVAL=10 213 | ExecStart={nodekeeper_binary} exporter \ 214 | --addr 0.0.0.0:${{PORT}} \ 215 | --interval ${{INTERVAL}} 216 | 217 | [Install] 218 | WantedBy=multi-user.target 219 | "# 220 | }; 221 | } 222 | 223 | impl ProjectDirs { 224 | fn create_systemd_validator_service(&self, user: &str) -> Result<()> { 225 | let node = std::fs::canonicalize(&self.node_binary) 226 | .context("failed to canonicalize node binary path")?; 227 | let node_configs_dir = std::fs::canonicalize(&self.node_configs_dir) 228 | .context("failed to canonicalize node configs path")?; 229 | 230 | let validator_service = format!( 231 | validator_service!(), 232 | user = user, 233 | node_binary = node.display(), 234 | configs_dir = node_configs_dir.display() 235 | ); 236 | std::fs::write(&self.validator_service, validator_service) 237 | .context("failed to create systemd validator service")?; 238 | 239 | Ok(()) 240 | } 241 | 242 | fn create_systemd_validator_manager_service(&self, user: &str) -> Result<()> { 243 | let current_exe = std::env::current_exe()?; 244 | let root_dir = std::fs::canonicalize(&self.root) 245 | .context("failed to canonicalize root directory path")?; 246 | 247 | let validator_manager_service = format!( 248 | validator_manager_service!(), 249 | user = user, 250 | nodekeeper_binary = current_exe.display(), 251 | root_dir = root_dir.display(), 252 | ); 253 | std::fs::write(&self.validator_manager_service, validator_manager_service) 254 | .context("failed to create systemd validator manager service")?; 255 | 256 | Ok(()) 257 | } 258 | 259 | fn create_systemd_validator_exporter_service(&self, user: &str) -> Result<()> { 260 | let current_exe = std::env::current_exe()?; 261 | 262 | let validator_exporter_service = format!( 263 | validator_exporter_service!(), 264 | user = user, 265 | nodekeeper_binary = current_exe.display(), 266 | ); 267 | std::fs::write(&self.validator_exporter_service, validator_exporter_service) 268 | .context("failed to create systemd validator manager service")?; 269 | 270 | Ok(()) 271 | } 272 | } 273 | 274 | async fn systemd_restart_service(service: &str) -> Result<()> { 275 | exec( 276 | Command::new("systemctl") 277 | .stdout(Stdio::piped()) 278 | .arg("restart") 279 | .arg(service), 280 | ) 281 | .await 282 | .with_context(|| format!("failed to restart service {service}")) 283 | } 284 | 285 | async fn systemd_set_services_enabled<'a, I: IntoIterator>( 286 | services: I, 287 | enabled: bool, 288 | ) -> Result<()> { 289 | let mut command = Command::new("systemctl"); 290 | command 291 | .stdout(Stdio::piped()) 292 | .arg(if enabled { "enable" } else { "disable" }); 293 | 294 | for service in services { 295 | command.arg(service); 296 | } 297 | 298 | exec(&mut command) 299 | .await 300 | .context("failed to enable services") 301 | } 302 | 303 | pub async fn systemd_daemon_reload() -> Result<()> { 304 | exec( 305 | Command::new("systemctl") 306 | .stdout(Stdio::piped()) 307 | .arg("daemon-reload"), 308 | ) 309 | .await 310 | .context("failed to reload systemd configs") 311 | } 312 | -------------------------------------------------------------------------------- /src/cli/mod.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use anyhow::Result; 4 | use argh::FromArgs; 5 | 6 | use crate::config::*; 7 | use crate::dirs::*; 8 | use crate::util::*; 9 | 10 | pub mod contract; 11 | pub mod exporter; 12 | pub mod init; 13 | pub mod node; 14 | pub mod seed; 15 | pub mod validator; 16 | 17 | /// All-in-one node management tool 18 | #[derive(FromArgs)] 19 | pub struct App { 20 | #[argh(subcommand)] 21 | command: Command, 22 | 23 | /// path to the root directory 24 | #[argh(option, default = "ProjectDirs::default_root_dir()")] 25 | root: PathBuf, 26 | } 27 | 28 | impl App { 29 | pub async fn run(self) -> Result<()> { 30 | tracing::debug!("root dir {:?}", self.root); 31 | 32 | let ctx = CliContext { 33 | dirs: ProjectDirs::new(self.root), 34 | }; 35 | 36 | match self.command { 37 | Command::Init(cmd) => invoke_as_cli(cmd.run(ctx)).await, 38 | Command::Validator(cmd) => cmd.run(ctx).await, 39 | Command::Contract(cmd) => invoke_as_cli(cmd.run(ctx)).await, 40 | Command::Exporter(cmd) => cmd.run(ctx).await, 41 | Command::Node(cmd) => cmd.run(ctx).await, 42 | Command::Seed(cmd) => cmd.run(), 43 | } 44 | } 45 | } 46 | 47 | #[derive(FromArgs)] 48 | #[argh(subcommand)] 49 | enum Command { 50 | Init(init::Cmd), 51 | Validator(validator::Cmd), 52 | Contract(contract::Cmd), 53 | Exporter(exporter::Cmd), 54 | Node(node::Cmd), 55 | Seed(seed::Cmd), 56 | } 57 | 58 | pub struct CliContext { 59 | dirs: ProjectDirs, 60 | } 61 | 62 | impl CliContext { 63 | pub fn load_config(&self) -> Result { 64 | AppConfig::load(&self.dirs.app_config) 65 | } 66 | 67 | pub fn dirs(&self) -> &ProjectDirs { 68 | &self.dirs 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/cli/seed.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use argh::FromArgs; 3 | 4 | use crate::crypto::{self, MnemonicType}; 5 | use crate::util::*; 6 | 7 | #[derive(FromArgs)] 8 | /// Seed utils 9 | #[argh(subcommand, name = "seed")] 10 | pub struct Cmd { 11 | #[argh(subcommand)] 12 | subcommand: SubCmd, 13 | } 14 | 15 | impl Cmd { 16 | pub fn run(self) -> Result<()> { 17 | match self.subcommand { 18 | SubCmd::Generate(cmd) => cmd.run(), 19 | SubCmd::Derive(cmd) => cmd.run(), 20 | SubCmd::Pubkey(cmd) => cmd.run(), 21 | } 22 | } 23 | } 24 | 25 | #[derive(FromArgs)] 26 | #[argh(subcommand)] 27 | enum SubCmd { 28 | Generate(CmdGenerate), 29 | Derive(CmdDerive), 30 | Pubkey(CmdPubkey), 31 | } 32 | 33 | #[derive(Debug, PartialEq, FromArgs)] 34 | /// Generates new seed 35 | #[argh(subcommand, name = "generate")] 36 | struct CmdGenerate { 37 | /// mnemonic type 38 | #[argh(option, long = "type", short = 't', default = "MnemonicType::Bip39")] 39 | ty: MnemonicType, 40 | } 41 | 42 | impl CmdGenerate { 43 | fn run(self) -> Result<()> { 44 | let seed = crypto::generate_seed(self.ty); 45 | print_output(seed); 46 | Ok(()) 47 | } 48 | } 49 | 50 | #[derive(Debug, PartialEq, FromArgs)] 51 | /// Derives key from seed 52 | #[argh(subcommand, name = "derive")] 53 | struct CmdDerive { 54 | /// mnemonic type 55 | #[argh(option, long = "type", short = 't', default = "MnemonicType::Bip39")] 56 | ty: MnemonicType, 57 | 58 | /// seed phrase or empty for input from stdin 59 | #[argh(positional)] 60 | seed: Option, 61 | 62 | /// derivation path for bip39 mnemonic 63 | #[argh(option, short = 'p')] 64 | path: Option, 65 | 66 | /// encode keys in base64 (hex by default) 67 | #[argh(switch)] 68 | base64: bool, 69 | } 70 | 71 | impl CmdDerive { 72 | fn run(self) -> Result<()> { 73 | let seed = parse_optional_input(self.seed, true)?; 74 | let seed = String::from_utf8(seed)?; 75 | 76 | let path = if let Some(path) = &self.path { 77 | path.as_str() 78 | } else { 79 | crypto::DEFAULT_PATH 80 | }; 81 | 82 | let keys = crypto::derive_from_phrase(seed.trim(), self.ty, path)?; 83 | 84 | print_output(encode_key_pair(keys.secret, keys.public, self.base64)); 85 | Ok(()) 86 | } 87 | } 88 | 89 | #[derive(Debug, PartialEq, FromArgs)] 90 | /// Computes public key from secret key 91 | #[argh(subcommand, name = "pubkey")] 92 | struct CmdPubkey { 93 | /// secret key in hex or empty for input from stdin 94 | #[argh(positional)] 95 | secret: Option, 96 | 97 | /// encode keys in base64 (hex by default) 98 | #[argh(switch)] 99 | base64: bool, 100 | } 101 | 102 | impl CmdPubkey { 103 | fn run(self) -> Result<()> { 104 | let secret = parse_optional_input(self.secret, false)?; 105 | 106 | let secret = ed25519_dalek::SecretKey::from_bytes(&secret)?; 107 | let public = ed25519_dalek::PublicKey::from(&secret); 108 | 109 | print_output(encode_key_pair(secret, public, self.base64)); 110 | Ok(()) 111 | } 112 | } 113 | 114 | fn encode_key_pair( 115 | secret: ed25519_dalek::SecretKey, 116 | public: ed25519_dalek::PublicKey, 117 | base64: bool, 118 | ) -> serde_json::Value { 119 | let encode = |bytes: &[u8; 32]| -> String { 120 | if base64 { 121 | base64::encode(bytes) 122 | } else { 123 | hex::encode(bytes) 124 | } 125 | }; 126 | 127 | serde_json::json!({ 128 | "secret": encode(secret.as_bytes()), 129 | "public": encode(public.as_bytes()), 130 | }) 131 | } 132 | -------------------------------------------------------------------------------- /src/config/app_config.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddrV4; 2 | use std::path::Path; 3 | use std::time::Duration; 4 | 5 | use anyhow::{Context, Result}; 6 | use broxus_util::{ 7 | const_duration_ms, serde_duration_ms, serde_hex_array, serde_optional_string, serde_string, 8 | serde_string_or_number, 9 | }; 10 | use everscale_crypto::ed25519; 11 | use serde::{Deserialize, Serialize}; 12 | 13 | use crate::defaults; 14 | use crate::util::{serde_mc_address, serde_public_key, serde_secret_key}; 15 | 16 | /// Tool config 17 | #[derive(Default, Clone, Serialize, Deserialize)] 18 | #[serde(default, deny_unknown_fields)] 19 | pub struct AppConfig { 20 | /// Control config 21 | pub control: Option, 22 | /// ADNL config 23 | pub adnl: Option, 24 | /// Validation config 25 | pub validator: Option, 26 | } 27 | 28 | impl AppConfig { 29 | pub fn load>(path: P) -> Result { 30 | let content = std::fs::read_to_string(path).context("failed to read app config")?; 31 | toml::from_str(&content).context("failed to deserialize app config") 32 | } 33 | 34 | pub fn store>(&self, path: P) -> Result<()> { 35 | let data = toml::to_string_pretty(self).context("failed to serialize config")?; 36 | std::fs::write(path, data).context("failed to save config") 37 | } 38 | 39 | pub fn currency(&self) -> &'static str { 40 | if let Some(currency) = defaults::currency_from_env() { 41 | return currency; 42 | } 43 | 44 | if let Some(adnl) = &self.adnl { 45 | if let Some(defaults) = defaults::detect_custom_defaults(&adnl.zerostate_file_hash) { 46 | return defaults.currency; 47 | } 48 | } 49 | 50 | defaults::DEFAULT_CURRENCY 51 | } 52 | 53 | pub fn node_repo(&self) -> &str { 54 | if let Some(node_repo) = defaults::node_repo_from_env() { 55 | return node_repo; 56 | } 57 | 58 | if let Some(adnl) = &self.adnl { 59 | if let Some(defaults) = defaults::detect_custom_defaults(&adnl.zerostate_file_hash) { 60 | return defaults.node_repo; 61 | } 62 | } 63 | 64 | defaults::DEFAULT_NODE_REPO 65 | } 66 | 67 | pub fn control(&self) -> Result<&AppConfigControl> { 68 | self.control.as_ref().context("control config is empty") 69 | } 70 | 71 | pub fn adnl(&self) -> Result<&AppConfigAdnl> { 72 | self.adnl.as_ref().context("adnl config is empty") 73 | } 74 | } 75 | 76 | #[derive(Clone, Serialize, Deserialize)] 77 | #[serde(deny_unknown_fields)] 78 | pub struct AppConfigControl { 79 | /// Control server socket address 80 | pub server_address: SocketAddrV4, 81 | 82 | /// Control server pubkey 83 | #[serde(with = "serde_public_key")] 84 | pub server_pubkey: ed25519::PublicKey, 85 | 86 | /// Control client pubkey 87 | #[serde(with = "serde_secret_key")] 88 | pub client_secret: ed25519::SecretKey, 89 | 90 | /// Control server connection timeout 91 | #[serde(with = "serde_duration_ms", default = "const_duration_ms::<2000>")] 92 | pub connection_timeout: Duration, 93 | 94 | /// Control server query timeout 95 | #[serde(with = "serde_duration_ms", default = "const_duration_ms::<10000>")] 96 | pub query_timeout: Duration, 97 | } 98 | 99 | impl AppConfigControl { 100 | pub fn from_addr_and_keys( 101 | addr: SocketAddrV4, 102 | server_key: ed25519::PublicKey, 103 | client_key: ed25519::SecretKey, 104 | ) -> Self { 105 | Self { 106 | server_address: addr, 107 | server_pubkey: server_key, 108 | client_secret: client_key, 109 | connection_timeout: Duration::from_millis(2000), 110 | query_timeout: Duration::from_millis(10000), 111 | } 112 | } 113 | } 114 | 115 | #[derive(Clone, Serialize, Deserialize)] 116 | #[serde(deny_unknown_fields)] 117 | pub struct AppConfigAdnl { 118 | /// Local ADNL port 119 | pub client_port: u16, 120 | 121 | /// Server ADNL address 122 | pub server_address: SocketAddrV4, 123 | 124 | /// Server overlay pubkey 125 | #[serde(with = "serde_public_key")] 126 | pub server_pubkey: ed25519::PublicKey, 127 | 128 | /// Zerostate file hash from the global config 129 | #[serde(with = "serde_hex_array")] 130 | pub zerostate_file_hash: [u8; 32], 131 | } 132 | 133 | #[derive(Clone, Eq, PartialEq, Serialize, Deserialize)] 134 | #[serde(deny_unknown_fields, rename_all = "lowercase", tag = "type")] 135 | pub enum AppConfigValidator { 136 | Single(AppConfigValidatorSingle), 137 | DePool(Box), 138 | } 139 | 140 | impl AppConfigValidator { 141 | pub fn is_single(&self) -> bool { 142 | matches!(self, Self::Single(_)) 143 | } 144 | } 145 | 146 | #[derive(Clone, Eq, PartialEq, Serialize, Deserialize)] 147 | #[serde(deny_unknown_fields)] 148 | pub struct AppConfigValidatorSingle { 149 | #[serde(with = "serde_mc_address")] 150 | pub address: ton_block::MsgAddressInt, 151 | #[serde(with = "serde_string_or_number")] 152 | pub stake_per_round: u64, 153 | #[serde(default, skip_serializing_if = "Option::is_none")] 154 | pub stake_factor: Option, 155 | } 156 | 157 | #[derive(Clone, Eq, PartialEq, Serialize, Deserialize)] 158 | #[serde(deny_unknown_fields)] 159 | pub struct AppConfigValidatorDePool { 160 | #[serde(with = "serde_string")] 161 | pub owner: ton_block::MsgAddressInt, 162 | #[serde(with = "serde_string")] 163 | pub depool: ton_block::MsgAddressInt, 164 | pub depool_type: DePoolType, 165 | #[serde(default, skip_serializing_if = "Option::is_none")] 166 | pub stake_factor: Option, 167 | #[serde( 168 | default, 169 | with = "serde_optional_string", 170 | skip_serializing_if = "Option::is_none" 171 | )] 172 | pub cluster: Option, 173 | #[serde(default, skip_serializing_if = "Option::is_none")] 174 | pub deploy: Option, 175 | } 176 | 177 | #[derive(Clone, Eq, PartialEq, Serialize, Deserialize)] 178 | pub struct AppConfigDePoolDeploymentParams { 179 | #[serde(with = "serde_string_or_number")] 180 | pub min_stake: u64, 181 | #[serde(with = "serde_string_or_number")] 182 | pub validator_assurance: u64, 183 | pub participant_reward_fraction: u8, 184 | } 185 | 186 | #[derive(Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] 187 | pub enum DePoolType { 188 | #[serde(rename = "default_v3")] 189 | DefaultV3, 190 | #[serde(rename = "stever_v1")] 191 | StEverV1, 192 | #[serde(rename = "stever_v2")] 193 | StEverV2, 194 | } 195 | 196 | impl DePoolType { 197 | pub const LATEST_STEVER: Self = Self::StEverV2; 198 | 199 | pub fn is_default(&self) -> bool { 200 | matches!(self, Self::DefaultV3) 201 | } 202 | 203 | pub fn is_stever(&self) -> bool { 204 | !self.is_default() 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/config/global_config/mod.rs: -------------------------------------------------------------------------------- 1 | use std::convert::{TryFrom, TryInto}; 2 | use std::path::Path; 3 | 4 | use anyhow::{anyhow, Context, Result}; 5 | use broxus_util::serde_base64_array; 6 | use everscale_network::proto; 7 | use serde::{Deserialize, Deserializer}; 8 | 9 | #[derive(Clone)] 10 | pub struct GlobalConfig { 11 | pub dht_nodes: Vec, 12 | pub zero_state: ton_block::BlockIdExt, 13 | } 14 | 15 | impl GlobalConfig { 16 | pub const MAINNET: &'static str = include_str!("mainnet.json"); 17 | pub const TESTNET: &'static str = include_str!("testnet.json"); 18 | 19 | pub fn load>(path: P) -> Result { 20 | let file = std::fs::File::open(path).context("failed to open global config")?; 21 | let config = serde_json::from_reader(std::io::BufReader::new(file)) 22 | .context("failed to deserialize global config")?; 23 | Ok(config) 24 | } 25 | } 26 | 27 | impl<'de> Deserialize<'de> for GlobalConfig { 28 | fn deserialize(deserializer: D) -> Result 29 | where 30 | D: Deserializer<'de>, 31 | { 32 | use serde::de::Error; 33 | 34 | GlobalConfigJson::deserialize(deserializer)? 35 | .try_into() 36 | .map_err(Error::custom) 37 | } 38 | } 39 | 40 | impl TryFrom for GlobalConfig { 41 | type Error = anyhow::Error; 42 | 43 | fn try_from(value: GlobalConfigJson) -> Result { 44 | require_type(value.ty, "config.global")?; 45 | require_type(value.validator.ty, "validator.config.global")?; 46 | 47 | Ok(Self { 48 | dht_nodes: value.dht.try_into()?, 49 | zero_state: value.validator.zero_state.try_into()?, 50 | }) 51 | } 52 | } 53 | 54 | impl TryFrom for Vec { 55 | type Error = anyhow::Error; 56 | 57 | fn try_from(value: DhtJson) -> Result { 58 | require_type(value.ty, "dht.config.global")?; 59 | require_type(value.static_nodes.ty, "dht.nodes")?; 60 | value 61 | .static_nodes 62 | .nodes 63 | .into_iter() 64 | .map(TryFrom::try_from) 65 | .collect() 66 | } 67 | } 68 | 69 | impl TryFrom for proto::dht::NodeOwned { 70 | type Error = anyhow::Error; 71 | 72 | fn try_from(value: DhtNodeJson) -> Result { 73 | require_type(value.ty, "dht.node")?; 74 | require_type(value.id.ty, "pub.ed25519")?; 75 | 76 | Ok(Self { 77 | id: everscale_crypto::tl::PublicKeyOwned::Ed25519 { key: value.id.key }, 78 | addr_list: value.addr_list.try_into()?, 79 | version: value.version as u32, 80 | signature: value.signature.to_vec().into(), 81 | }) 82 | } 83 | } 84 | 85 | impl TryFrom for proto::adnl::AddressList { 86 | type Error = anyhow::Error; 87 | 88 | fn try_from(value: AddressListJson) -> Result { 89 | require_type(value.ty, "adnl.addressList")?; 90 | 91 | Ok(Self { 92 | address: value 93 | .addrs 94 | .into_iter() 95 | .next() 96 | .map(TryFrom::try_from) 97 | .transpose()?, 98 | version: value.version as u32, 99 | reinit_date: value.reinit_date as u32, 100 | expire_at: value.expire_at as u32, 101 | }) 102 | } 103 | } 104 | 105 | impl TryFrom for proto::adnl::Address { 106 | type Error = anyhow::Error; 107 | 108 | fn try_from(value: AddressJson) -> Result { 109 | require_type(value.ty, "adnl.address.udp")?; 110 | 111 | Ok(proto::adnl::Address { 112 | ip: value.ip as u32, 113 | port: value.port as u32, 114 | }) 115 | } 116 | } 117 | 118 | impl TryFrom for ton_block::BlockIdExt { 119 | type Error = anyhow::Error; 120 | 121 | fn try_from(value: BlockIdJson) -> Result { 122 | Ok(ton_block::BlockIdExt { 123 | shard_id: ton_block::ShardIdent::with_tagged_prefix( 124 | value.workchain, 125 | value.shard as u64, 126 | )?, 127 | seq_no: value.seqno as u32, 128 | root_hash: value.root_hash.into(), 129 | file_hash: value.file_hash.into(), 130 | }) 131 | } 132 | } 133 | 134 | fn require_type(ty: String, required: &'static str) -> Result<()> { 135 | if ty == required { 136 | Ok(()) 137 | } else { 138 | Err(anyhow!("Invalid type {ty}, expected {required}")) 139 | } 140 | } 141 | 142 | #[derive(Deserialize)] 143 | struct GlobalConfigJson { 144 | #[serde(rename = "@type")] 145 | ty: String, 146 | dht: DhtJson, 147 | validator: ValidatorJson, 148 | } 149 | 150 | #[derive(Deserialize)] 151 | struct DhtJson { 152 | #[serde(rename = "@type")] 153 | ty: String, 154 | static_nodes: StaticNodesJson, 155 | } 156 | 157 | #[derive(Deserialize)] 158 | struct StaticNodesJson { 159 | #[serde(rename = "@type")] 160 | ty: String, 161 | nodes: Vec, 162 | } 163 | 164 | #[derive(Deserialize)] 165 | struct DhtNodeJson { 166 | #[serde(rename = "@type")] 167 | ty: String, 168 | id: IdJson, 169 | addr_list: AddressListJson, 170 | version: i32, 171 | #[serde(with = "serde_base64_array")] 172 | signature: [u8; 64], 173 | } 174 | 175 | #[derive(Deserialize)] 176 | struct IdJson { 177 | #[serde(rename = "@type")] 178 | ty: String, 179 | #[serde(with = "serde_base64_array")] 180 | key: [u8; 32], 181 | } 182 | 183 | #[derive(Deserialize)] 184 | struct AddressListJson { 185 | #[serde(rename = "@type")] 186 | ty: String, 187 | addrs: Vec, 188 | version: i32, 189 | reinit_date: i32, 190 | expire_at: i32, 191 | } 192 | 193 | #[derive(Deserialize)] 194 | struct AddressJson { 195 | #[serde(rename = "@type")] 196 | ty: String, 197 | ip: i32, 198 | port: i32, 199 | } 200 | 201 | #[derive(Deserialize)] 202 | struct ValidatorJson { 203 | #[serde(rename = "@type")] 204 | ty: String, 205 | zero_state: BlockIdJson, 206 | } 207 | 208 | #[derive(Deserialize)] 209 | struct BlockIdJson { 210 | workchain: i32, 211 | shard: i64, 212 | seqno: i32, 213 | #[serde(with = "serde_base64_array")] 214 | root_hash: [u8; 32], 215 | #[serde(with = "serde_base64_array")] 216 | file_hash: [u8; 32], 217 | } 218 | -------------------------------------------------------------------------------- /src/config/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::app_config::{ 2 | AppConfig, AppConfigAdnl, AppConfigControl, AppConfigDePoolDeploymentParams, 3 | AppConfigValidator, AppConfigValidatorDePool, AppConfigValidatorSingle, DePoolType, 4 | }; 5 | pub use self::global_config::GlobalConfig; 6 | pub use self::node_config::{NodeConfig, NodeConfigAdnl, NodeConfigControlServer, NodeLogConfig}; 7 | pub use self::stored_keys::StoredKeys; 8 | 9 | mod app_config; 10 | mod global_config; 11 | mod node_config; 12 | mod stored_keys; 13 | -------------------------------------------------------------------------------- /src/config/node_config/default_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "log_config_name": "log_cfg.yml", 3 | "ton_global_config_name": "global-config.json", 4 | "use_global_config": true, 5 | "ip_address": "0.0.0.0:30100", 6 | "control_server_port": 30101, 7 | "overlay_peers": [], 8 | "shard_keeper": { 9 | "get_peer_attempt_timeout_ms": 1000, 10 | "download_next_block_full_timeout_ms": 1000, 11 | "download_block_full_timeout_ms": 1000 12 | }, 13 | "kafka_consumer_config": { 14 | "group_id": "101", 15 | "brokers": "kafka", 16 | "topic": "requests", 17 | "session_timeout_ms": 6000, 18 | "run_attempt_timeout_ms": 1000 19 | }, 20 | "external_db_config": { 21 | "raw_block_producer": { 22 | "enabled": false, 23 | "brokers": "", 24 | "message_timeout_ms": 0, 25 | "topic": "", 26 | "attempt_timeout_ms": 0, 27 | "message_max_size": 0, 28 | "big_messages_storage": "" 29 | }, 30 | "block_producer": { 31 | "enabled": false, 32 | "brokers": "kafka", 33 | "message_timeout_ms": 100, 34 | "topic": "blocks", 35 | "attempt_timeout_ms": 100, 36 | "message_max_size": 1000000, 37 | "big_messages_storage": "big-blocks" 38 | }, 39 | "message_producer": { 40 | "enabled": false, 41 | "brokers": "kafka", 42 | "message_timeout_ms": 100, 43 | "topic": "messages", 44 | "attempt_timeout_ms": 100, 45 | "message_max_size": 1000000, 46 | "big_messages_storage": "big-messages" 47 | }, 48 | "transaction_producer": { 49 | "enabled": true, 50 | "brokers": "kafka", 51 | "message_timeout_ms": 100, 52 | "topic": "transactions", 53 | "attempt_timeout_ms": 100, 54 | "message_max_size": 1000000, 55 | "big_messages_storage": "big-transactions" 56 | }, 57 | "account_producer": { 58 | "enabled": false, 59 | "brokers": "kafka", 60 | "message_timeout_ms": 100, 61 | "topic": "accounts", 62 | "attempt_timeout_ms": 100, 63 | "message_max_size": 2000000, 64 | "big_messages_storage": "big-accounts" 65 | }, 66 | "block_proof_producer": { 67 | "enabled": false, 68 | "brokers": "kafka", 69 | "message_timeout_ms": 100, 70 | "topic": "blocks_signatures", 71 | "attempt_timeout_ms": 100, 72 | "message_max_size": 2000000, 73 | "big_messages_storage": "big-block-proofs" 74 | }, 75 | "bad_blocks_storage": "bad-blocks" 76 | }, 77 | "gc": { 78 | "enable_for_archives": true, 79 | "archives_life_time_hours": null, 80 | "enable_for_shard_state_persistent": true, 81 | "cells_gc_config": { 82 | "gc_interval_sec": 900, 83 | "cells_lifetime_sec": 1800 84 | } 85 | }, 86 | "cells_db_config": { 87 | "states_db_queue_len": 1000, 88 | "max_pss_slowdown_mcs": 750, 89 | "prefill_cells_counters": false, 90 | "cache_cells_counters": true, 91 | "cache_size_bytes": 4294967296 92 | }, 93 | "collator_config": { 94 | "cutoff_timeout_ms": 1000, 95 | "stop_timeout_ms": 1500, 96 | "clean_timeout_percentage_points": 150, 97 | "optimistic_clean_percentage_points": 1000, 98 | "max_secondary_clean_timeout_percentage_points": 350, 99 | "max_collate_threads": 10, 100 | "retry_if_empty": false, 101 | "finalize_empty_after_ms": 800, 102 | "empty_collation_sleep_ms": 100, 103 | "external_messages_timeout_percentage_points": 100 104 | }, 105 | "restore_db": false, 106 | "low_memory_mode": true, 107 | "skip_saving_persistent_states": false, 108 | "states_cache_mode": "Moderate", 109 | "states_cache_cleanup_diff": 1000 110 | } 111 | -------------------------------------------------------------------------------- /src/config/node_config/log_cfg.yml: -------------------------------------------------------------------------------- 1 | refresh_rate: 30 seconds 2 | 3 | appenders: 4 | stdout: 5 | kind: console 6 | encoder: 7 | pattern: "{l} [{h({t})}] {I}: {m}{n}" 8 | 9 | root: 10 | level: error 11 | appenders: 12 | - stdout 13 | 14 | loggers: 15 | # node messages 16 | ton_node: 17 | level: info 18 | ever_node: 19 | level: info 20 | boot: 21 | level: info 22 | sync: 23 | level: info 24 | storage: 25 | level: info 26 | 27 | # adnl messages 28 | adnl: 29 | level: error 30 | 31 | overlay: 32 | level: error 33 | 34 | rldp: 35 | level: error 36 | 37 | dht: 38 | level: error 39 | 40 | # block messages 41 | ton_block: 42 | level: off 43 | ever_block: 44 | level: off 45 | 46 | # block messages 47 | executor: 48 | level: off 49 | 50 | # tvm messages 51 | tvm: 52 | level: off 53 | 54 | librdkafka: 55 | level: error 56 | 57 | validator: 58 | level: info 59 | 60 | catchain: 61 | level: info 62 | 63 | validator_session: 64 | level: info 65 | 66 | telemetry: 67 | level: off 68 | -------------------------------------------------------------------------------- /src/config/stored_keys.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use anyhow::{Context, Result}; 4 | use broxus_util::{serde_hex_array, serde_optional_hex_array}; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | use crate::crypto::*; 8 | 9 | #[derive(Serialize)] 10 | pub struct StoredKeys { 11 | #[serde(with = "serde_hex_array")] 12 | pub secret: [u8; 32], 13 | #[serde( 14 | with = "serde_optional_hex_array", 15 | skip_serializing_if = "Option::is_none" 16 | )] 17 | pub public: Option<[u8; 32]>, 18 | #[serde(skip_serializing_if = "Option::is_none")] 19 | pub seed: Option, 20 | } 21 | 22 | impl StoredKeys { 23 | pub const DEFAULT_MNEMONIC_TYPE: MnemonicType = MnemonicType::Bip39; 24 | 25 | pub fn generate() -> Result { 26 | Self::from_seed(generate_seed(Self::DEFAULT_MNEMONIC_TYPE)) 27 | } 28 | 29 | pub fn from_seed>(seed: T) -> Result { 30 | fn inner(seed: &str) -> Result { 31 | let seed = seed.trim().to_owned(); 32 | let keypair = 33 | derive_from_phrase(&seed, StoredKeys::DEFAULT_MNEMONIC_TYPE, DEFAULT_PATH)?; 34 | Ok(StoredKeys { 35 | secret: keypair.secret.to_bytes(), 36 | public: Some(keypair.public.to_bytes()), 37 | seed: Some(seed), 38 | }) 39 | } 40 | 41 | inner(seed.as_ref()) 42 | } 43 | 44 | pub fn from_secret>(secret: T) -> Result { 45 | fn inner(secret: &[u8]) -> Result { 46 | let secret = ed25519_dalek::SecretKey::from_bytes(secret)?; 47 | let public = ed25519_dalek::PublicKey::from(&secret); 48 | Ok(StoredKeys { 49 | secret: secret.to_bytes(), 50 | public: Some(public.to_bytes()), 51 | seed: None, 52 | }) 53 | } 54 | 55 | inner(secret.as_ref()) 56 | } 57 | 58 | pub fn load_as_keypair>(path: P) -> Result { 59 | Ok(Self::load(path)?.as_keypair()) 60 | } 61 | 62 | pub fn load>(path: P) -> Result { 63 | fn inner(path: &Path) -> Result { 64 | #[derive(Deserialize)] 65 | #[serde(deny_unknown_fields)] 66 | pub struct StoredKeysHelper { 67 | #[serde(with = "serde_optional_hex_array")] 68 | pub secret: Option<[u8; 32]>, 69 | #[serde(default, with = "serde_optional_hex_array")] 70 | pub public: Option<[u8; 32]>, 71 | #[serde(default)] 72 | pub seed: Option, 73 | } 74 | 75 | let file = std::fs::File::open(path).context("failed to open keys file")?; 76 | let mut deserializer = 77 | serde_json::Deserializer::from_reader(std::io::BufReader::new(file)); 78 | let data: StoredKeysHelper = serde_path_to_error::deserialize(&mut deserializer) 79 | .context("failed to parse keys")?; 80 | 81 | if let Some(secret) = data.secret { 82 | Ok(StoredKeys { 83 | secret, 84 | public: data.public, 85 | seed: data.seed, 86 | }) 87 | } else if let Some(seed) = data.seed { 88 | StoredKeys::from_seed(seed) 89 | } else { 90 | anyhow::bail!("invalid keys file") 91 | } 92 | } 93 | 94 | inner(path.as_ref()) 95 | } 96 | 97 | pub fn store>(&self, path: P) -> Result<()> { 98 | let data = serde_json::to_string_pretty(self).context("failed to serialize keys")?; 99 | std::fs::write(path, data).context("failed to save keys") 100 | } 101 | 102 | pub fn as_secret(&self) -> ed25519_dalek::SecretKey { 103 | ed25519_dalek::SecretKey::from_bytes(&self.secret).unwrap() 104 | } 105 | 106 | pub fn as_keypair(&self) -> ed25519_dalek::Keypair { 107 | let secret = self.as_secret(); 108 | let public = ed25519_dalek::PublicKey::from(&secret); 109 | ed25519_dalek::Keypair { secret, public } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/contracts/cluster.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::sync::Arc; 3 | 4 | use anyhow::{Context, Result}; 5 | use futures_util::stream::{FuturesUnordered, StreamExt}; 6 | use nekoton_abi::{FunctionBuilder, KnownParamType, UnpackFirst}; 7 | 8 | use crate::contracts::Strategy; 9 | use crate::network::Subscription; 10 | 11 | pub struct Cluster { 12 | pub address: ton_block::MsgAddressInt, 13 | pub subscription: Arc, 14 | } 15 | 16 | impl Cluster { 17 | pub fn new(address: ton_block::MsgAddressInt, subscription: Arc) -> Self { 18 | Self { 19 | address, 20 | subscription, 21 | } 22 | } 23 | 24 | pub async fn find_deployed_strategy_for_depool( 25 | &self, 26 | depool: &ton_block::MsgAddressInt, 27 | ) -> Result> { 28 | const CHUNK_LEN: usize = 10; 29 | 30 | let all_strategies = self 31 | .get_deployed_strategies() 32 | .await 33 | .context("failed to get all deployed strategies")?; 34 | 35 | for chunk in all_strategies.chunks(CHUNK_LEN) { 36 | let mut futures = FuturesUnordered::new(); 37 | for strategy in chunk { 38 | let strategy = Strategy::new(strategy.clone(), self.subscription.clone()); 39 | futures.push(async move { 40 | let details = strategy.get_details().await; 41 | (strategy.address, details) 42 | }); 43 | } 44 | 45 | while let Some((strategy, details)) = futures.next().await { 46 | match details { 47 | Ok(details) => { 48 | if &details.depool == depool { 49 | return Ok(Some(strategy)); 50 | } 51 | } 52 | Err(e) => { 53 | tracing::warn!(%strategy, "failed to get strategy details: {e:?}"); 54 | } 55 | } 56 | } 57 | } 58 | 59 | Ok(None) 60 | } 61 | 62 | pub async fn get_deployed_strategies(&self) -> Result> { 63 | let details: StrategiesMap = self 64 | .subscription 65 | .run_local(&self.address, methods::deployed_strategies(), &[]) 66 | .await? 67 | .unpack_first()?; 68 | 69 | Ok(details.into_keys().collect()) 70 | } 71 | } 72 | 73 | type StrategiesMap = BTreeMap; 74 | 75 | mod methods { 76 | use super::*; 77 | 78 | pub fn deployed_strategies() -> &'static ton_abi::Function { 79 | once!(ton_abi::Function, || { 80 | FunctionBuilder::new("deployedStrategies") 81 | .abi_version(ABI_VERSION) 82 | .default_headers() 83 | .output("details", StrategiesMap::param_type()) 84 | .build() 85 | }) 86 | } 87 | } 88 | 89 | const ABI_VERSION: ton_abi::contract::AbiVersion = ton_abi::contract::ABI_VERSION_2_2; 90 | -------------------------------------------------------------------------------- /src/contracts/depool/stever/DePoolProxy.code: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/broxus/nodekeeper/2b9661b60d6cca6cd53375269905880173ee0615/src/contracts/depool/stever/DePoolProxy.code -------------------------------------------------------------------------------- /src/contracts/depool/stever/DePoolV1.tvc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/broxus/nodekeeper/2b9661b60d6cca6cd53375269905880173ee0615/src/contracts/depool/stever/DePoolV1.tvc -------------------------------------------------------------------------------- /src/contracts/depool/stever/DePoolV2.tvc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/broxus/nodekeeper/2b9661b60d6cca6cd53375269905880173ee0615/src/contracts/depool/stever/DePoolV2.tvc -------------------------------------------------------------------------------- /src/contracts/depool/v3/DePool.tvc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/broxus/nodekeeper/2b9661b60d6cca6cd53375269905880173ee0615/src/contracts/depool/v3/DePool.tvc -------------------------------------------------------------------------------- /src/contracts/depool/v3/DePoolProxy.code: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/broxus/nodekeeper/2b9661b60d6cca6cd53375269905880173ee0615/src/contracts/depool/v3/DePoolProxy.code -------------------------------------------------------------------------------- /src/contracts/elector.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::sync::Arc; 3 | 4 | use anyhow::{Context, Result}; 5 | use broxus_util::now; 6 | use nekoton_abi::{ 7 | BuildTokenValue, FunctionBuilder, KnownParamType, KnownParamTypePlain, MaybeRef, PackAbiPlain, 8 | TokenValueExt, UnpackAbi, UnpackAbiPlain, 9 | }; 10 | 11 | use super::{InternalMessage, ONE_EVER}; 12 | use crate::network::Subscription; 13 | use crate::util::split_address; 14 | 15 | pub struct Elector { 16 | address: ton_block::MsgAddressInt, 17 | subscription: Arc, 18 | } 19 | 20 | impl Elector { 21 | pub fn new(address: ton_types::UInt256, subscription: Arc) -> Self { 22 | let address = ton_block::MsgAddressInt::AddrStd(ton_block::MsgAddrStd { 23 | anycast: None, 24 | workchain_id: -1, 25 | address: address.into(), 26 | }); 27 | 28 | Self { 29 | address, 30 | subscription, 31 | } 32 | } 33 | 34 | pub fn address(&self) -> &ton_block::MsgAddressInt { 35 | &self.address 36 | } 37 | 38 | pub fn recover_stake(&self) -> Result { 39 | let now = now() as u64; 40 | Ok(InternalMessage { 41 | amount: ONE_EVER, 42 | dst: self.address.clone(), 43 | payload: methods::recover_stake() 44 | .encode_internal_input(&[now.token_value().named("query_id")]) 45 | .and_then(ton_types::BuilderData::into_cell)?, 46 | bounce: false, 47 | }) 48 | } 49 | 50 | /// Prepares validator node and generates elector payload 51 | pub async fn participate_in_elections( 52 | &self, 53 | election_id: u32, 54 | address: &ton_block::MsgAddressInt, 55 | stake_factor: u32, 56 | timings: &ton_block::ConfigParam15, 57 | signature_id: Option, 58 | ) -> Result { 59 | const TTL_OFFSET: u32 = 1000; 60 | 61 | anyhow::ensure!( 62 | address.is_masterchain(), 63 | "participant address not in masterchain" 64 | ); 65 | 66 | let (_, address) = split_address(address)?; 67 | 68 | let rpc = self.subscription.tcp_rpc(); 69 | 70 | // Generate new key 71 | let permanent_key_hash = rpc 72 | .generate_key_pair() 73 | .await 74 | .context("failed to generate validator keys")?; 75 | 76 | // Export its public key 77 | let perm_pubkey = rpc 78 | .export_public_key(&permanent_key_hash) 79 | .await 80 | .context("failed to export validator public key")?; 81 | 82 | // Add this key as a validator key 83 | let ttl = election_id 84 | + timings.validators_elected_for 85 | + timings.elections_start_before 86 | + timings.elections_end_before 87 | + timings.stake_held_for 88 | + TTL_OFFSET; 89 | rpc.add_validator_permanent_key(&permanent_key_hash, election_id, ttl) 90 | .await 91 | .context("failed to add validator permanent key")?; 92 | 93 | // Generate adnl key (key hash is equal to adnl addr) 94 | let adnl_addr = rpc 95 | .generate_key_pair() 96 | .await 97 | .context("failed to generate validator adnl keys")?; 98 | 99 | // Assign adnl address to the validator key 100 | // NOTE: ttl is 0 here because it is unused in the node 101 | rpc.add_validator_adnl_address(&permanent_key_hash, &adnl_addr, 0) 102 | .await 103 | .context("failed to add validator adnl address")?; 104 | 105 | // Sign data 106 | let unsigned = UnsignedParticipantData { 107 | election_id, 108 | address, 109 | max_factor: stake_factor, 110 | public_key: ton_types::UInt256::from(perm_pubkey.to_bytes()), 111 | adnl_addr: ton_types::UInt256::from(adnl_addr), 112 | }; 113 | 114 | let data_to_sign = unsigned.build_data_to_sign(); 115 | let data_to_sign = ton_abi::extend_signature_with_id(&data_to_sign, signature_id); 116 | 117 | let signature = rpc 118 | .sign(&permanent_key_hash, &data_to_sign) 119 | .await 120 | .context("failed to sign election data")?; 121 | 122 | // Generate internal message payload 123 | unsigned 124 | .sign(signature) 125 | .context("failed to insert signature") 126 | } 127 | 128 | pub async fn get_data(&self) -> Result { 129 | let state = self.get_state().await?; 130 | 131 | let ton_block::AccountState::AccountActive { state_init } = state.storage.state else { 132 | anyhow::bail!("elector account is not active"); 133 | }; 134 | 135 | let data = state_init.data.context("elector data is empty")?; 136 | let inner: data::PartialElectorData = ton_abi::TokenValue::decode_params( 137 | data::layout(), 138 | ton_types::SliceData::load_cell(data)?, 139 | &ton_abi::contract::ABI_VERSION_2_1, 140 | true, 141 | ) 142 | .context("failed to parse elector data")? 143 | .unpack()?; 144 | 145 | Ok(ElectorData { inner }) 146 | } 147 | 148 | async fn get_state(&self) -> Result { 149 | self.subscription 150 | .get_account_state(&self.address) 151 | .await 152 | .context("failed to get elector state")? 153 | .context("elector not found") 154 | } 155 | } 156 | 157 | pub struct ElectorData { 158 | inner: data::PartialElectorData, 159 | } 160 | 161 | impl ElectorData { 162 | pub fn election_id(&self) -> Option { 163 | let election_id = self.inner.current_election.0.as_ref()?.elect_at; 164 | Some(election_id) 165 | } 166 | 167 | pub fn nearest_unfreeze_at(&self, election_id: u32) -> Option { 168 | self.inner 169 | .past_elections 170 | .values() 171 | .map(|election| election.unfreeze_at) 172 | .find(|&unfreeze_at| unfreeze_at < election_id) 173 | } 174 | 175 | pub fn has_unfrozen_stake( 176 | &self, 177 | address: &ton_block::MsgAddressInt, 178 | ) -> Option { 179 | if !address.is_masterchain() { 180 | // Elector has rewards only for masterchain accounts 181 | return None; 182 | } 183 | 184 | let (_, address) = split_address(address).ok()?; 185 | self.inner.credits.get(&address).copied() 186 | } 187 | 188 | pub fn elected(&self, address: &ton_block::MsgAddressInt) -> bool { 189 | if !address.is_masterchain() { 190 | return false; 191 | } 192 | 193 | let Some(current_election) = &self.inner.current_election.0 else { 194 | return false; 195 | }; 196 | let Ok((_, address)) = split_address(address) else { 197 | return false; 198 | }; 199 | 200 | current_election 201 | .members 202 | .values() 203 | .any(|entry| entry.src_addr == address) 204 | } 205 | } 206 | 207 | struct UnsignedParticipantData { 208 | election_id: u32, 209 | address: ton_types::UInt256, 210 | max_factor: u32, 211 | public_key: ton_types::UInt256, 212 | adnl_addr: ton_types::UInt256, 213 | } 214 | 215 | impl UnsignedParticipantData { 216 | fn build_data_to_sign(&self) -> Vec { 217 | const TL_ID: u32 = 0x654C5074; 218 | 219 | let mut data = Vec::with_capacity(4 + 4 + 4 + 32 + 32); 220 | data.extend_from_slice(&TL_ID.to_be_bytes()); 221 | data.extend_from_slice(&self.election_id.to_be_bytes()); 222 | data.extend_from_slice(&self.max_factor.to_be_bytes()); 223 | data.extend_from_slice(self.address.as_array()); 224 | data.extend_from_slice(self.adnl_addr.as_array()); 225 | data 226 | } 227 | 228 | fn sign(self, signature: [u8; 64]) -> Result { 229 | methods::participate_in_elections() 230 | .encode_internal_input( 231 | &methods::ParticipateInElectionsInputs { 232 | query_id: now() as u64, 233 | validator_key: self.public_key, 234 | stake_at: self.election_id, 235 | max_factor: self.max_factor, 236 | adnl_addr: self.adnl_addr, 237 | signature: signature.to_vec(), 238 | } 239 | .pack(), 240 | ) 241 | .and_then(ton_types::BuilderData::into_cell) 242 | } 243 | } 244 | 245 | mod data { 246 | use super::*; 247 | 248 | pub fn layout() -> &'static [ton_abi::Param] { 249 | once!(Vec, || PartialElectorData::param_type()) 250 | } 251 | 252 | #[derive(Debug, UnpackAbiPlain, KnownParamTypePlain)] 253 | pub struct PartialElectorData { 254 | #[abi] 255 | pub current_election: MaybeRef, 256 | #[abi] 257 | pub credits: BTreeMap, 258 | #[abi] 259 | pub past_elections: BTreeMap, 260 | } 261 | 262 | #[derive(Debug, UnpackAbi, KnownParamType)] 263 | pub struct CurrentElectionData { 264 | #[abi(uint32)] 265 | pub elect_at: u32, 266 | #[abi(uint32)] 267 | pub elect_close: u32, 268 | #[abi(gram)] 269 | pub min_stake: u128, 270 | #[abi(gram)] 271 | pub total_stake: u128, 272 | #[abi] 273 | pub members: BTreeMap, 274 | #[abi(bool)] 275 | pub failed: bool, 276 | #[abi(bool)] 277 | pub finished: bool, 278 | } 279 | 280 | #[derive(Debug, UnpackAbi, KnownParamType)] 281 | pub struct ElectionMember { 282 | #[abi(gram)] 283 | pub msg_value: u64, 284 | #[abi(uint32)] 285 | pub created_at: u32, 286 | #[abi(uint32)] 287 | pub max_factor: u32, 288 | #[abi(uint256)] 289 | pub src_addr: ton_types::UInt256, 290 | #[abi(uint256)] 291 | pub adnl_addr: ton_types::UInt256, 292 | } 293 | 294 | #[derive(Debug, UnpackAbi, KnownParamType)] 295 | pub struct PastElectionData { 296 | #[abi(uint32)] 297 | pub unfreeze_at: u32, 298 | } 299 | } 300 | 301 | mod methods { 302 | use super::*; 303 | 304 | pub fn recover_stake() -> &'static ton_abi::Function { 305 | once!(ton_abi::Function, || FunctionBuilder::new("recover_stake") 306 | .id(0x47657424) 307 | .input("query_id", u64::param_type()) 308 | .build()) 309 | } 310 | 311 | #[derive(Clone, PackAbiPlain, KnownParamTypePlain)] 312 | pub struct ParticipateInElectionsInputs { 313 | #[abi(uint64)] 314 | pub query_id: u64, 315 | #[abi(uint256)] 316 | pub validator_key: ton_types::UInt256, 317 | #[abi(uint32)] 318 | pub stake_at: u32, 319 | #[abi(uint32)] 320 | pub max_factor: u32, 321 | #[abi(uint256)] 322 | pub adnl_addr: ton_types::UInt256, 323 | #[abi(bytes)] 324 | pub signature: Vec, 325 | } 326 | 327 | pub fn participate_in_elections() -> &'static ton_abi::Function { 328 | once!(ton_abi::Function, || { 329 | FunctionBuilder::new("participate_in_elections") 330 | .id(0x4E73744B) 331 | .inputs(ParticipateInElectionsInputs::param_type()) 332 | .build() 333 | }) 334 | } 335 | } 336 | -------------------------------------------------------------------------------- /src/contracts/mod.rs: -------------------------------------------------------------------------------- 1 | pub use cluster::Cluster; 2 | pub use depool::DePool; 3 | pub use elector::Elector; 4 | pub use strategy::Strategy; 5 | pub use wallet::Wallet; 6 | 7 | pub mod cluster; 8 | pub mod depool; 9 | pub mod elector; 10 | pub mod strategy; 11 | pub mod wallet; 12 | 13 | #[derive(Clone)] 14 | pub struct InternalMessage { 15 | pub dst: ton_block::MsgAddressInt, 16 | pub amount: u128, 17 | pub payload: ton_types::Cell, 18 | pub bounce: bool, 19 | } 20 | 21 | impl InternalMessage { 22 | pub fn empty(dst: ton_block::MsgAddressInt, amount: u128, bounce: bool) -> Self { 23 | Self { 24 | dst, 25 | amount, 26 | payload: Default::default(), 27 | bounce, 28 | } 29 | } 30 | } 31 | 32 | pub const ONE_EVER: u128 = 1_000_000_000; 33 | -------------------------------------------------------------------------------- /src/contracts/strategy.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::Result; 4 | use nekoton_abi::{ 5 | BuildTokenValue, FunctionBuilder, KnownParamType, TokenValueExt, UnpackAbi, UnpackFirst, 6 | }; 7 | 8 | use crate::network::Subscription; 9 | 10 | pub struct Strategy { 11 | pub address: ton_block::MsgAddressInt, 12 | pub subscription: Arc, 13 | } 14 | 15 | impl Strategy { 16 | pub fn new(address: ton_block::MsgAddressInt, subscription: Arc) -> Self { 17 | Self { 18 | address, 19 | subscription, 20 | } 21 | } 22 | 23 | pub async fn get_details(&self) -> Result
{ 24 | let details = self 25 | .subscription 26 | .run_local( 27 | &self.address, 28 | methods::get_details(), 29 | &[0u32.token_value().named("answerId")], 30 | ) 31 | .await? 32 | .unpack_first()?; 33 | Ok(details) 34 | } 35 | } 36 | 37 | #[derive(Clone, UnpackAbi, KnownParamType)] 38 | pub struct Details { 39 | #[abi(address)] 40 | pub vault: ton_block::MsgAddressInt, 41 | #[abi(address)] 42 | pub depool: ton_block::MsgAddressInt, 43 | #[abi(uint32)] 44 | pub strategy_version: u32, 45 | #[abi(uint8)] 46 | pub state: u8, 47 | } 48 | 49 | mod methods { 50 | use super::*; 51 | 52 | pub fn get_details() -> &'static ton_abi::Function { 53 | once!(ton_abi::Function, || { 54 | FunctionBuilder::new("getDetails") 55 | .abi_version(ABI_VERSION) 56 | .time_header() 57 | .expire_header() 58 | .input("answerId", u32::param_type()) 59 | .output("details", Details::param_type()) 60 | .build() 61 | }) 62 | } 63 | } 64 | 65 | const ABI_VERSION: ton_abi::contract::AbiVersion = ton_abi::contract::ABI_VERSION_2_2; 66 | -------------------------------------------------------------------------------- /src/contracts/wallet/EverWallet.code: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/broxus/nodekeeper/2b9661b60d6cca6cd53375269905880173ee0615/src/contracts/wallet/EverWallet.code -------------------------------------------------------------------------------- /src/contracts/wallet/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::{Context, Result}; 4 | use nekoton_abi::{FunctionBuilder, KnownParamTypePlain, PackAbiPlain}; 5 | use ton_abi::contract::ABI_VERSION_2_3; 6 | use ton_block::{Deserializable, GetRepresentationHash}; 7 | 8 | use super::{InternalMessage, ONE_EVER}; 9 | use crate::network::Subscription; 10 | use crate::util::{make_default_headers, TransactionWithHash}; 11 | 12 | pub struct Wallet { 13 | keypair: ed25519_dalek::Keypair, 14 | address: ton_block::MsgAddressInt, 15 | subscription: Arc, 16 | } 17 | 18 | impl Wallet { 19 | pub const INITIAL_BALANCE: u128 = 10 * ONE_EVER; 20 | 21 | pub fn new( 22 | workchain_id: i8, 23 | keypair: ed25519_dalek::Keypair, 24 | subscription: Arc, 25 | ) -> Self { 26 | Self { 27 | address: compute_wallet_address(workchain_id, &keypair.public), 28 | keypair, 29 | subscription, 30 | } 31 | } 32 | 33 | pub fn address(&self) -> &ton_block::MsgAddressInt { 34 | &self.address 35 | } 36 | 37 | pub async fn get_balance(&self) -> Result> { 38 | let account = self.get_account_state().await?; 39 | Ok(account.map(|state| state.storage.balance.grams.as_u128())) 40 | } 41 | 42 | /// Sends the internal message to the recipient, returns the destination transaction 43 | pub async fn call(&self, internal_message: InternalMessage) -> Result { 44 | let dst = internal_message.dst.clone(); 45 | let mut dst_transactions = self.subscription.subscribe(&dst); 46 | 47 | let src_tx = self.transfer(internal_message).await?; 48 | tracing::debug!(source_tx_hash = ?src_tx.hash, "message sent from wallet"); 49 | 50 | let mut out_msg_hash = None; 51 | src_tx 52 | .data 53 | .out_msgs 54 | .iterate_slices(|msg| { 55 | let Some(msg) = msg.reference_opt(0) else { 56 | return Ok(true); 57 | }; 58 | 59 | let msg_hash = msg.repr_hash(); 60 | let msg = ton_block::Message::construct_from_cell(msg)?; 61 | let Some(header) = msg.int_header() else { 62 | return Ok(true); 63 | }; 64 | 65 | if header.dst == dst { 66 | out_msg_hash = Some(msg_hash); 67 | Ok(false) 68 | } else { 69 | Ok(true) 70 | } 71 | }) 72 | .context("failed to find outgoing message")?; 73 | let out_msg_hash = out_msg_hash.context("outgoing message not found")?; 74 | 75 | while let Some(tx) = dst_transactions.recv().await { 76 | tracing::debug!(source_tx_hash = ?src_tx.hash, tx_hash = ?tx.hash, "new transaction found"); 77 | let Some(msg) = tx.data.in_msg_cell() else { 78 | continue; 79 | }; 80 | if msg.repr_hash() == out_msg_hash { 81 | return Ok(tx); 82 | } 83 | } 84 | anyhow::bail!("destination transaction was not found") 85 | } 86 | 87 | /// Sends the internal message to the recipient, returns the source transaction 88 | pub async fn transfer(&self, internal_message: InternalMessage) -> Result { 89 | let account = self.get_account_state().await?; 90 | 91 | let state_init = match account { 92 | Some(account) => match account.storage.state { 93 | ton_block::AccountState::AccountActive { .. } => None, 94 | ton_block::AccountState::AccountFrozen { .. } => { 95 | anyhow::bail!("account frozen"); 96 | } 97 | ton_block::AccountState::AccountUninit => Some( 98 | make_state_init(&self.keypair.public).context("failed to make state init")?, 99 | ), 100 | }, 101 | None => anyhow::bail!("account not deployed"), 102 | }; 103 | 104 | let inputs = ever_wallet::SendTransactionInputs { 105 | dest: internal_message.dst, 106 | value: internal_message.amount, 107 | bounce: internal_message.bounce, 108 | flags: 3, 109 | payload: internal_message.payload, 110 | } 111 | .pack(); 112 | 113 | let tx = self 114 | .subscription 115 | .send_message_with_retires(|timeout, signature_id| { 116 | let (expire_at, headers) = make_default_headers(Some(self.keypair.public), timeout); 117 | 118 | let mut message = ton_block::Message::with_ext_in_header( 119 | ton_block::ExternalInboundMessageHeader { 120 | dst: self.address.clone(), 121 | ..Default::default() 122 | }, 123 | ); 124 | 125 | message.set_body( 126 | ever_wallet::send_transaction() 127 | .encode_input( 128 | &headers, 129 | &inputs, 130 | false, 131 | Some((&self.keypair, signature_id)), 132 | Some(self.address.clone()), 133 | ) 134 | .and_then(ton_types::SliceData::load_builder)?, 135 | ); 136 | 137 | if let Some(state_init) = state_init.clone() { 138 | message.set_state_init(state_init); 139 | } 140 | 141 | Ok((message, expire_at)) 142 | }) 143 | .await?; 144 | 145 | Ok(tx) 146 | } 147 | 148 | async fn get_account_state(&self) -> Result> { 149 | self.subscription 150 | .get_account_state(&self.address) 151 | .await 152 | .context("failed to get account state") 153 | } 154 | } 155 | 156 | pub fn compute_wallet_address( 157 | workchain_id: i8, 158 | pubkey: &ed25519_dalek::PublicKey, 159 | ) -> ton_block::MsgAddressInt { 160 | let hash = make_state_init(pubkey) 161 | .and_then(|state| state.hash()) 162 | .unwrap(); 163 | ton_block::MsgAddressInt::AddrStd(ton_block::MsgAddrStd::with_address( 164 | None, 165 | workchain_id, 166 | hash.into(), 167 | )) 168 | } 169 | 170 | fn make_state_init(public_key: &ed25519_dalek::PublicKey) -> Result { 171 | use ton_types::IBitstring; 172 | 173 | let mut data = ton_types::BuilderData::new(); 174 | data.append_raw(public_key.as_bytes(), 256)?.append_u64(0)?; 175 | let data = data.into_cell()?; 176 | 177 | Ok(ton_block::StateInit { 178 | code: Some(ever_wallet_code().clone()), 179 | data: Some(data), 180 | ..Default::default() 181 | }) 182 | } 183 | 184 | fn ever_wallet_code() -> &'static ton_types::Cell { 185 | once!(ton_types::Cell, || { 186 | let mut data = include_bytes!("./EverWallet.code").as_ref(); 187 | ton_types::deserialize_tree_of_cells(&mut data).unwrap() 188 | }) 189 | } 190 | 191 | mod ever_wallet { 192 | use super::*; 193 | 194 | #[derive(Clone, PackAbiPlain, KnownParamTypePlain)] 195 | pub struct SendTransactionInputs { 196 | #[abi(address)] 197 | pub dest: ton_block::MsgAddressInt, 198 | #[abi(uint128)] 199 | pub value: u128, 200 | #[abi(bool)] 201 | pub bounce: bool, 202 | #[abi(uint8)] 203 | pub flags: u8, 204 | #[abi(cell)] 205 | pub payload: ton_types::Cell, 206 | } 207 | 208 | pub fn send_transaction() -> &'static ton_abi::Function { 209 | once!(ton_abi::Function, || { 210 | FunctionBuilder::new("sendTransaction") 211 | .abi_version(ABI_VERSION_2_3) 212 | .pubkey_header() 213 | .time_header() 214 | .expire_header() 215 | .inputs(SendTransactionInputs::param_type()) 216 | .build() 217 | }) 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/crypto/bip39.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use tiny_hderive::bip32::ExtendedPrivKey; 3 | 4 | use super::LANGUAGE; 5 | 6 | pub fn validate_phrase(phrase: &str) -> Result<()> { 7 | bip39::Mnemonic::from_phrase(phrase, LANGUAGE)?; 8 | Ok(()) 9 | } 10 | 11 | pub fn derive_from_phrase(phrase: &str, path: &str) -> Result { 12 | let mnemonic = bip39::Mnemonic::from_phrase(phrase, LANGUAGE)?; 13 | let hd = bip39::Seed::new(&mnemonic, ""); 14 | let seed_bytes = hd.as_bytes(); 15 | 16 | let derived = ExtendedPrivKey::derive(seed_bytes, path) 17 | .map_err(|_| anyhow::anyhow!("Invalid derivation path"))?; 18 | 19 | let secret = ed25519_dalek::SecretKey::from_bytes(&derived.secret())?; 20 | let public = ed25519_dalek::PublicKey::from(&secret); 21 | Ok(ed25519_dalek::Keypair { secret, public }) 22 | } 23 | -------------------------------------------------------------------------------- /src/crypto/legacy.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use ed25519_dalek::Keypair; 3 | use hmac::{Mac, NewMac}; 4 | use pbkdf2::pbkdf2; 5 | 6 | use super::LANGUAGE; 7 | 8 | pub fn validate_phrase(phrase: &str) -> Result<()> { 9 | let wordmap = LANGUAGE.wordmap(); 10 | let mut word_count = 0; 11 | for word in phrase.split_whitespace() { 12 | word_count += 1; 13 | anyhow::ensure!(word_count <= 24, "Expected 24 words"); 14 | wordmap.get_bits(word)?; 15 | } 16 | 17 | anyhow::ensure!(word_count == 24, "Expected 24 words"); 18 | Ok(()) 19 | } 20 | 21 | pub fn derive_from_phrase(phrase: &str) -> Result { 22 | const PBKDF_ITERATIONS: u32 = 100_000; 23 | const SALT: &[u8] = b"TON default seed"; 24 | 25 | validate_phrase(phrase)?; 26 | 27 | let password = hmac::Hmac::::new_from_slice(phrase.as_bytes()) 28 | .unwrap() 29 | .finalize() 30 | .into_bytes(); 31 | 32 | let mut res = [0; 512 / 8]; 33 | pbkdf2::>(&password, SALT, PBKDF_ITERATIONS, &mut res); 34 | 35 | let secret = ed25519_dalek::SecretKey::from_bytes(&res[0..32])?; 36 | let public = ed25519_dalek::PublicKey::from(&secret); 37 | Ok(Keypair { secret, public }) 38 | } 39 | -------------------------------------------------------------------------------- /src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use anyhow::Result; 4 | use hmac::digest::Digest; 5 | use rand::Rng; 6 | 7 | mod bip39; 8 | mod legacy; 9 | 10 | const LANGUAGE: ::bip39::Language = ::bip39::Language::English; 11 | 12 | pub const DEFAULT_PATH: &str = "m/44'/396'/0'/0/0"; 13 | 14 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 15 | pub enum MnemonicType { 16 | /// Phrase with 24 words, used in Crystal Wallet 17 | Legacy, 18 | /// Phrase with 12 words, used everywhere else. The additional parameter is used in 19 | /// derivation path to create multiple keys from one mnemonic 20 | Bip39, 21 | } 22 | 23 | impl FromStr for MnemonicType { 24 | type Err = anyhow::Error; 25 | 26 | fn from_str(s: &str) -> Result { 27 | match s { 28 | "legacy" => Ok(Self::Legacy), 29 | "bip39" => Ok(Self::Bip39), 30 | _ => Err(anyhow::anyhow!( 31 | "unknown mnemonic type (neither `legacy` nor `bip39`)" 32 | )), 33 | } 34 | } 35 | } 36 | 37 | pub fn validate_phrase(phrase: &str, mnemonic_type: MnemonicType) -> Result<()> { 38 | match mnemonic_type { 39 | MnemonicType::Legacy => legacy::validate_phrase(phrase), 40 | MnemonicType::Bip39 => bip39::validate_phrase(phrase), 41 | } 42 | } 43 | 44 | pub fn derive_from_phrase( 45 | phrase: &str, 46 | mnemonic_type: MnemonicType, 47 | path: &str, 48 | ) -> Result { 49 | match mnemonic_type { 50 | MnemonicType::Legacy => legacy::derive_from_phrase(phrase), 51 | MnemonicType::Bip39 => bip39::derive_from_phrase(phrase, path), 52 | } 53 | } 54 | 55 | /// Generates seed phrase 56 | pub fn generate_seed(mnemonic_type: MnemonicType) -> String { 57 | use ::bip39::util::{Bits11, IterExt}; 58 | 59 | let rng = &mut rand::thread_rng(); 60 | 61 | pub fn generate_words(entropy: &[u8]) -> Vec<&'static str> { 62 | let wordlist = LANGUAGE.wordlist(); 63 | 64 | let checksum_byte = sha2::Sha256::digest(entropy)[0]; 65 | 66 | entropy 67 | .iter() 68 | .chain(Some(&checksum_byte)) 69 | .bits() 70 | .map(|bits: Bits11| wordlist.get_word(bits)) 71 | .collect() 72 | } 73 | 74 | match mnemonic_type { 75 | MnemonicType::Legacy => { 76 | let entropy: [u8; 32] = rng.gen(); 77 | generate_words(&entropy) 78 | } 79 | MnemonicType::Bip39 => { 80 | let entropy: [u8; 16] = rng.gen(); 81 | generate_words(&entropy) 82 | } 83 | } 84 | .join(" ") 85 | } 86 | -------------------------------------------------------------------------------- /src/defaults.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use once_cell::race::OnceBox; 4 | 5 | use crate::util::parse_hex_or_base64; 6 | 7 | pub const DEFAULT_CURRENCY: &str = "EVER"; 8 | 9 | pub const DEFAULT_NODE_REPO: &str = "https://github.com/everx-labs/ever-node.git"; 10 | 11 | pub const DEFAULT_CONTROL_PORT: u16 = 5031; 12 | pub const DEFAULT_LOCAL_ADNL_PORT: u16 = 0; 13 | pub const DEFAULT_ADNL_PORT: u16 = 30100; 14 | 15 | const ENV_CURRENCY: &str = "NODEKEEPER_CURRENCY"; 16 | const ENV_NODE_REPO: &str = "NODEKEEPER_NODE_REPO"; 17 | 18 | #[derive(Copy, Clone)] 19 | pub struct Values { 20 | pub currency: &'static str, 21 | pub node_repo: &'static str, 22 | } 23 | 24 | pub fn currency_from_env() -> Option<&'static str> { 25 | static ENV_VALUE: OnceBox> = OnceBox::new(); 26 | ENV_VALUE 27 | .get_or_init(|| Box::new(std::env::var(ENV_CURRENCY).ok())) 28 | .as_deref() 29 | } 30 | 31 | pub fn node_repo_from_env() -> Option<&'static str> { 32 | static ENV_VALUE: OnceBox> = OnceBox::new(); 33 | ENV_VALUE 34 | .get_or_init(|| Box::new(std::env::var(ENV_NODE_REPO).ok())) 35 | .as_deref() 36 | } 37 | 38 | macro_rules! decl_known_networks { 39 | ($ident:ident, { $($file_hash:literal => { currency: $currency:expr, node_repo: $node_repo:expr, }),*$(,)? }) => { 40 | pub fn $ident(zerostate_file_hash: &[u8; 32]) -> Option { 41 | static KNOWN_NETWORKS: OnceBox> = OnceBox::new(); 42 | KNOWN_NETWORKS.get_or_init(|| Box::new(HashMap::from([ 43 | $((parse_hex_or_base64($file_hash).unwrap().try_into().unwrap(), Values { 44 | currency: $currency, 45 | node_repo: $node_repo, 46 | })),* 47 | ]))) 48 | .get(zerostate_file_hash) 49 | .copied() 50 | } 51 | } 52 | } 53 | 54 | decl_known_networks! { 55 | detect_custom_defaults, { 56 | "ywj7H75tJ3PgbEeX+UNP3j0iR1x9imIIJJuQgrlCr8s=" => { 57 | currency: "VENOM", 58 | node_repo: "https://github.com/everx-labs/ever-node.git -f with_signature_id", 59 | }, 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/dirs.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | const ENV: &str = "NODEKEEPER_ROOT"; 4 | 5 | pub const VALIDATOR_SERVICE: &str = "validator"; 6 | pub const VALIDATOR_MANAGER_SERVICE: &str = "validator-manager"; 7 | pub const VALIDATOR_EXPORTER_SERVICE: &str = "validator-exporter"; 8 | 9 | pub struct ProjectDirs { 10 | pub app_config: PathBuf, 11 | pub node_config: PathBuf, 12 | pub node_log_config: PathBuf, 13 | pub global_config: PathBuf, 14 | pub node_configs_dir: PathBuf, 15 | pub binaries_dir: PathBuf, 16 | pub node_binary: PathBuf, 17 | pub default_node_db_dir: PathBuf, 18 | pub git_cache_dir: PathBuf, 19 | pub keys_dir: PathBuf, 20 | pub validator_keys: PathBuf, 21 | pub depool_keys: PathBuf, 22 | pub root: PathBuf, 23 | pub validator_service: PathBuf, 24 | pub validator_manager_service: PathBuf, 25 | pub validator_exporter_service: PathBuf, 26 | } 27 | 28 | impl ProjectDirs { 29 | pub fn new>(root_dir: P) -> Self { 30 | let root = root_dir.as_ref().to_path_buf(); 31 | let node_configs_dir = root.join("node"); 32 | let binaries_dir = root.join("bin"); 33 | let git_cache_dir = root.join("git"); 34 | 35 | let node_binary = binaries_dir.join("node"); 36 | 37 | let systemd_root = PathBuf::from("/etc/systemd/system"); 38 | let validator_service = systemd_root.join(format!("{VALIDATOR_SERVICE}.service")); 39 | let validator_manager_service = 40 | systemd_root.join(format!("{VALIDATOR_MANAGER_SERVICE}.service")); 41 | let validator_exporter_service = 42 | systemd_root.join(format!("{VALIDATOR_EXPORTER_SERVICE}.service")); 43 | 44 | let keys_dir = root.join("keys"); 45 | let validator_keys = keys_dir.join("vld.keys.json"); 46 | let depool_keys = keys_dir.join("depool.keys.json"); 47 | 48 | #[cfg(feature = "packaged")] 49 | let default_node_db_dir = root.join("db"); 50 | #[cfg(not(feature = "packaged"))] 51 | let default_node_db_dir = PathBuf::from("/var/ever/rnode"); 52 | 53 | Self { 54 | app_config: root.join("config.toml"), 55 | node_config: node_configs_dir.join("config.json"), 56 | node_log_config: node_configs_dir.join("log_cfg.yml"), 57 | global_config: node_configs_dir.join("global-config.json"), 58 | node_configs_dir, 59 | binaries_dir, 60 | node_binary, 61 | default_node_db_dir, 62 | git_cache_dir, 63 | keys_dir, 64 | validator_keys, 65 | depool_keys, 66 | root, 67 | validator_service, 68 | validator_manager_service, 69 | validator_exporter_service, 70 | } 71 | } 72 | 73 | pub fn default_root_dir() -> PathBuf { 74 | if let Ok(path) = std::env::var(ENV) { 75 | PathBuf::from(path) 76 | } else { 77 | default_root_dir() 78 | } 79 | } 80 | } 81 | 82 | #[cfg(feature = "packaged")] 83 | fn default_root_dir() -> PathBuf { 84 | PathBuf::from("/var/nodekeeper") 85 | } 86 | 87 | #[cfg(not(feature = "packaged"))] 88 | fn default_root_dir() -> PathBuf { 89 | use crate::util::system; 90 | 91 | const DEFAULT_ROOT_DIR: &str = ".nodekeeper"; 92 | 93 | let home_dir = if let Some(uid) = system::get_sudo_uid().unwrap() { 94 | system::home_dir(uid) 95 | } else { 96 | home::home_dir() 97 | }; 98 | 99 | match home_dir { 100 | Some(home) => home.join(DEFAULT_ROOT_DIR), 101 | None => { 102 | panic!("No valid home directory path could be retrieved from the operating system") 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/exporter/file_target.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | use std::os::unix::prelude::OpenOptionsExt; 3 | use std::path::PathBuf; 4 | 5 | use anyhow::Result; 6 | 7 | use super::ExporterTarget; 8 | 9 | pub struct FileExporterTarget { 10 | file_path: PathBuf, 11 | temp_file_path: PathBuf, 12 | } 13 | 14 | impl FileExporterTarget { 15 | pub fn new(path: PathBuf) -> Self { 16 | let mut temp_extension = path.extension().unwrap_or_default().to_os_string(); 17 | temp_extension.push(std::ffi::OsString::from("temp")); 18 | 19 | let mut temp_file_path = path.clone(); 20 | temp_file_path.set_extension(temp_extension); 21 | 22 | Self { 23 | file_path: path, 24 | temp_file_path, 25 | } 26 | } 27 | } 28 | 29 | impl ExporterTarget for FileExporterTarget { 30 | fn target_name(&self) -> &'static str { 31 | "file_exporter" 32 | } 33 | 34 | fn write(&self, metrics: &dyn std::fmt::Display) -> Result<()> { 35 | let mut temp_file = std::fs::OpenOptions::new() 36 | .write(true) 37 | .truncate(true) 38 | .create(true) 39 | .mode(0o644) 40 | .open(&self.temp_file_path)?; 41 | 42 | write!(temp_file, "{metrics}")?; 43 | drop(temp_file); 44 | 45 | std::fs::rename(&self.temp_file_path, &self.file_path)?; 46 | Ok(()) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/exporter/http_target.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | 6 | use super::ExporterTarget; 7 | 8 | pub struct HttpExporterTarget { 9 | state: Arc>>, 10 | _exporter: Arc, 11 | } 12 | 13 | impl HttpExporterTarget { 14 | pub async fn new(addr: SocketAddr) -> Result { 15 | let (exporter, writer) = pomfrit::create_exporter(Some(pomfrit::Config { 16 | collection_interval_sec: 1, 17 | listen_address: addr, 18 | metrics_path: None, 19 | })) 20 | .await?; 21 | 22 | let state = Arc::new(parking_lot::RwLock::default()); 23 | writer.spawn({ 24 | let state = state.clone(); 25 | move |writer| { 26 | let metrics = state.read(); 27 | if let Some(metrics) = &*metrics { 28 | writer.write_str(metrics); 29 | } 30 | } 31 | }); 32 | 33 | Ok(Self { 34 | state, 35 | _exporter: exporter, 36 | }) 37 | } 38 | } 39 | 40 | impl ExporterTarget for HttpExporterTarget { 41 | fn target_name(&self) -> &'static str { 42 | "http_exporter" 43 | } 44 | 45 | fn write(&self, metrics: &dyn std::fmt::Display) -> Result<()> { 46 | let mut state = self.state.write(); 47 | *state = Some(metrics.to_string()); 48 | Ok(()) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/exporter/mod.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use anyhow::Result; 4 | use pomfrit::formatter::DisplayPrometheusExt; 5 | 6 | pub use self::file_target::FileExporterTarget; 7 | pub use self::http_target::HttpExporterTarget; 8 | pub use self::stdout_target::StdoutExporterTarget; 9 | use crate::config::{AppConfig, AppConfigValidator, DePoolType}; 10 | use crate::dirs::ProjectDirs; 11 | use crate::network::{NodeStats, NodeTcpRpc, ValidatorSetEntry}; 12 | 13 | mod file_target; 14 | mod http_target; 15 | mod stdout_target; 16 | 17 | pub struct Exporter { 18 | dirs: ProjectDirs, 19 | targets: Vec>, 20 | } 21 | 22 | impl Exporter { 23 | pub fn new(dirs: ProjectDirs, targets: Vec>) -> Self { 24 | Self { dirs, targets } 25 | } 26 | 27 | pub async fn serve(self, interval: Duration) { 28 | if self.targets.is_empty() { 29 | return; 30 | } 31 | 32 | let mut interval = tokio::time::interval(interval); 33 | loop { 34 | interval.tick().await; 35 | 36 | let (config, node_rpc) = match self.init_node_rpc().await { 37 | Ok(value) => value, 38 | Err((e, fallback)) => { 39 | tracing::error!("failed to prepare exporter: {e:?}"); 40 | self.export(&fallback); 41 | continue; 42 | } 43 | }; 44 | 45 | if let Err(e) = self.collect(&config, &node_rpc).await { 46 | tracing::error!("failed to collect metrics: {e:?}"); 47 | } 48 | } 49 | } 50 | 51 | pub async fn once(self) -> Result<()> { 52 | match self.init_node_rpc().await { 53 | Ok((config, node_rpc)) => self.collect(&config, &node_rpc).await, 54 | Err((e, _)) => Err(e), 55 | } 56 | } 57 | 58 | async fn collect(&self, config: &AppConfig, node_rpc: &NodeTcpRpc) -> Result<()> { 59 | let stats = node_rpc.get_stats().await?; 60 | let collected_at = broxus_util::now(); 61 | 62 | tracing::debug!("collected node stats"); 63 | 64 | let metrics = Metrics { 65 | collected_at, 66 | config, 67 | stats: &stats, 68 | }; 69 | self.export(&metrics); 70 | 71 | Ok(()) 72 | } 73 | 74 | fn export(&self, metrics: &dyn std::fmt::Display) { 75 | for target in &self.targets { 76 | if let Err(e) = target.write(metrics) { 77 | tracing::warn!( 78 | "failed to write metrics to the {}: {e:?}", 79 | target.target_name() 80 | ); 81 | } 82 | } 83 | } 84 | 85 | async fn init_node_rpc( 86 | &self, 87 | ) -> Result<(AppConfig, NodeTcpRpc), (anyhow::Error, MetricsFallback)> { 88 | fn fallback(e: anyhow::Error) -> (anyhow::Error, MetricsFallback) { 89 | (e, MetricsFallback { config_is_valid: V }) 90 | } 91 | 92 | let config = AppConfig::load(&self.dirs.app_config).map_err(fallback::)?; 93 | let control = config.control().map_err(fallback::)?; 94 | let node_rpc = NodeTcpRpc::new(control).await.map_err(fallback::)?; 95 | Ok((config, node_rpc)) 96 | } 97 | } 98 | 99 | pub trait ExporterTarget { 100 | fn target_name(&self) -> &'static str; 101 | 102 | fn write(&self, metrics: &dyn std::fmt::Display) -> Result<()>; 103 | } 104 | 105 | struct MetricsFallback { 106 | config_is_valid: bool, 107 | } 108 | 109 | impl std::fmt::Display for MetricsFallback { 110 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 111 | f.begin_metric(CONFIG_IS_VALID) 112 | .value(self.config_is_valid as u8)?; 113 | f.begin_metric(EXPORTER_READY).value(0) 114 | } 115 | } 116 | 117 | #[derive(Copy, Clone)] 118 | struct Metrics<'a> { 119 | collected_at: u32, 120 | config: &'a AppConfig, 121 | stats: &'a NodeStats, 122 | } 123 | 124 | impl std::fmt::Display for Metrics<'_> { 125 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 126 | const NODE_READY: &str = "node_ready"; 127 | const SYNC_STATUS: &str = "sync_status"; 128 | 129 | f.begin_metric(CONFIG_IS_VALID).value(1)?; 130 | f.begin_metric(EXPORTER_READY).value(1)?; 131 | 132 | f.begin_metric("collected_at").value(self.collected_at)?; 133 | 134 | let stats = match self.stats { 135 | NodeStats::NotReady(sync_status) => { 136 | return f 137 | .begin_metric(NODE_READY) 138 | .label(SYNC_STATUS, sync_status) 139 | .value(0) 140 | } 141 | NodeStats::Running(stats) => { 142 | f.begin_metric(NODE_READY) 143 | .label(SYNC_STATUS, stats.sync_status) 144 | .value(1)?; 145 | stats 146 | } 147 | }; 148 | 149 | let node_version = &stats.node_version; 150 | 151 | f.begin_metric("node_version") 152 | .label( 153 | "version", 154 | format!( 155 | "{}.{}.{}", 156 | node_version.major, node_version.minor, node_version.patch 157 | ), 158 | ) 159 | .value(0)?; 160 | 161 | f.begin_metric("node_version_major") 162 | .value(node_version.major)?; 163 | f.begin_metric("node_version_minor") 164 | .value(node_version.minor)?; 165 | f.begin_metric("node_version_patch") 166 | .value(node_version.patch)?; 167 | 168 | f.begin_metric("mc_seqno") 169 | .value(stats.last_mc_block.seq_no)?; 170 | 171 | f.begin_metric("mc_time").value(stats.mc_time)?; 172 | f.begin_metric("mc_time_diff").value(stats.mc_time_diff)?; 173 | f.begin_metric("sc_time_diff").value(stats.sc_time_diff)?; 174 | 175 | const ADNL_LABEL: &str = "adnl"; 176 | 177 | const IN_CURRENT_VSET: &str = "in_current_vset"; 178 | match &stats.in_current_vset { 179 | ValidatorSetEntry::None => { 180 | f.begin_metric(IN_CURRENT_VSET).value(0)?; 181 | } 182 | ValidatorSetEntry::Validator(adnl) => f 183 | .begin_metric(IN_CURRENT_VSET) 184 | .label(ADNL_LABEL, hex::encode(adnl)) 185 | .value(1)?, 186 | }; 187 | 188 | const IN_NEXT_VSET: &str = "in_next_vset"; 189 | match &stats.in_next_vset { 190 | ValidatorSetEntry::None => { 191 | f.begin_metric(IN_NEXT_VSET).value(0)?; 192 | } 193 | ValidatorSetEntry::Validator(adnl) => f 194 | .begin_metric(IN_NEXT_VSET) 195 | .label(ADNL_LABEL, hex::encode(adnl)) 196 | .value(1)?, 197 | }; 198 | 199 | const VALIDATION_ENABLED: &str = "validation_enabled"; 200 | const VALIDATOR_TYPE: &str = "validator_type"; 201 | 202 | if let Some(validator) = &self.config.validator { 203 | f.begin_metric(VALIDATION_ENABLED).value(1)?; 204 | match validator { 205 | AppConfigValidator::Single(single) => { 206 | f.begin_metric(VALIDATOR_TYPE).value(0)?; 207 | f.begin_metric("validator_single_stake_per_round") 208 | .label("validator", &single.address) 209 | .value(single.stake_per_round)?; 210 | } 211 | AppConfigValidator::DePool(depool) => { 212 | f.begin_metric(VALIDATOR_TYPE).value(1)?; 213 | f.begin_metric("validator_depool_type") 214 | .label("validator", &depool.owner) 215 | .label("depool", &depool.depool) 216 | .value(depool.depool_type.into_u8())?; 217 | } 218 | } 219 | } else { 220 | f.begin_metric(VALIDATION_ENABLED).value(0)?; 221 | } 222 | 223 | Ok(()) 224 | } 225 | } 226 | 227 | impl DePoolType { 228 | fn into_u8(self) -> u8 { 229 | match self { 230 | Self::DefaultV3 => 0, 231 | Self::StEverV1 => 1, 232 | Self::StEverV2 => 2, 233 | } 234 | } 235 | } 236 | 237 | const CONFIG_IS_VALID: &str = "config_is_valid"; 238 | const EXPORTER_READY: &str = "exporter_ready"; 239 | -------------------------------------------------------------------------------- /src/exporter/stdout_target.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | 3 | use super::ExporterTarget; 4 | 5 | pub struct StdoutExporterTarget; 6 | 7 | impl ExporterTarget for StdoutExporterTarget { 8 | fn target_name(&self) -> &'static str { 9 | "stdout_exporter" 10 | } 11 | 12 | fn write(&self, metrics: &dyn std::fmt::Display) -> anyhow::Result<()> { 13 | write!(std::io::stdout(), "{metrics}")?; 14 | Ok(()) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use dialoguer::console; 3 | 4 | #[macro_export] 5 | macro_rules! once { 6 | ($ty:path, || $expr:expr) => {{ 7 | static ONCE: once_cell::race::OnceBox<$ty> = once_cell::race::OnceBox::new(); 8 | ONCE.get_or_init(|| Box::new($expr)) 9 | }}; 10 | } 11 | 12 | macro_rules! selector_variant { 13 | ($ty:ident, { $($name:ident => $text:literal),*$(,)? }) => { 14 | #[derive(Copy, Clone, Eq, PartialEq)] 15 | enum $ty { 16 | $($name),*, 17 | } 18 | 19 | impl $ty { 20 | #[allow(unused)] 21 | fn all() -> Vec { 22 | vec![$(Self::$name),*] 23 | } 24 | } 25 | 26 | impl ::std::fmt::Display for $ty { 27 | fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { 28 | f.write_str(match self { 29 | $(Self::$name => $text),*, 30 | }) 31 | } 32 | } 33 | }; 34 | } 35 | 36 | mod cli; 37 | mod config; 38 | mod contracts; 39 | mod crypto; 40 | mod defaults; 41 | mod dirs; 42 | mod exporter; 43 | mod network; 44 | mod util; 45 | mod validator; 46 | 47 | #[tokio::main] 48 | async fn main() -> Result<()> { 49 | if console::user_attended() { 50 | tracing_subscriber::fmt::init(); 51 | } else { 52 | tracing_subscriber::fmt::fmt().without_time().init(); 53 | } 54 | 55 | argh::from_env::>().0.run().await 56 | } 57 | 58 | struct ArgsOrVersion(T); 59 | 60 | impl argh::TopLevelCommand for ArgsOrVersion {} 61 | 62 | impl argh::FromArgs for ArgsOrVersion { 63 | fn from_args(command_name: &[&str], args: &[&str]) -> Result { 64 | /// Also use argh for catching `--version`-only invocations 65 | #[derive(argh::FromArgs)] 66 | struct Version { 67 | /// print version information and exit 68 | #[argh(switch, short = 'v')] 69 | pub version: bool, 70 | } 71 | 72 | match Version::from_args(command_name, args) { 73 | Ok(v) if v.version => Err(argh::EarlyExit { 74 | output: format!( 75 | "{} {}", 76 | command_name.first().unwrap_or(&""), 77 | env!("CARGO_PKG_VERSION") 78 | ), 79 | status: Ok(()), 80 | }), 81 | Err(exit) if exit.status.is_ok() => { 82 | let help = match T::from_args(command_name, &["--help"]) { 83 | Ok(_) => unreachable!(), 84 | Err(exit) => exit.output, 85 | }; 86 | Err(argh::EarlyExit { 87 | output: format!("{help} -v, --version print version information and exit"), 88 | status: Ok(()), 89 | }) 90 | } 91 | _ => T::from_args(command_name, args).map(Self), 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/network/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::node_tcp_rpc::*; 2 | pub use self::node_udp_rpc::NodeUdpRpc; 3 | pub use self::subscription::Subscription; 4 | 5 | mod node_tcp_rpc; 6 | mod node_udp_rpc; 7 | mod subscription; 8 | -------------------------------------------------------------------------------- /src/network/node_tcp_rpc/mod.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use anyhow::Result; 4 | use everscale_crypto::ed25519; 5 | use tl_proto::{IntermediateBytes, TlRead, TlWrite}; 6 | 7 | use self::stats::StatsError; 8 | pub use self::stats::{NodeStats, ValidatorSetEntry}; 9 | use self::tcp_adnl::{TcpAdnl, TcpAdnlConfig, TcpAdnlError}; 10 | use crate::config::AppConfigControl; 11 | 12 | mod proto; 13 | mod stats; 14 | mod tcp_adnl; 15 | 16 | #[derive(Clone)] 17 | pub struct NodeTcpRpc { 18 | tcp_adnl: TcpAdnl, 19 | query_timeout: Duration, 20 | } 21 | 22 | impl NodeTcpRpc { 23 | pub async fn new(config: &AppConfigControl) -> Result { 24 | let tcp_adnl = TcpAdnl::connect(TcpAdnlConfig { 25 | server_address: config.server_address.into(), 26 | server_pubkey: config.server_pubkey, 27 | client_secret: config.client_secret, 28 | connection_timeout: config.connection_timeout, 29 | }) 30 | .await 31 | .map_err(NodeRpcError::ConnectionFailed)?; 32 | 33 | let query_timeout = config.query_timeout; 34 | 35 | Ok(Self { 36 | tcp_adnl, 37 | query_timeout, 38 | }) 39 | } 40 | 41 | pub async fn generate_key_pair(&self) -> Result<[u8; 32]> { 42 | let proto::KeyHash { key_hash } = self.query(proto::GenerateKeyPair).await?; 43 | Ok(key_hash) 44 | } 45 | 46 | pub async fn export_public_key(&self, key_hash: &[u8; 32]) -> Result { 47 | let pubkey: everscale_crypto::tl::PublicKeyOwned = 48 | self.query(proto::ExportPublicKey { key_hash }).await?; 49 | ed25519::PublicKey::from_tl(pubkey.as_equivalent_ref()) 50 | .ok_or_else(|| NodeRpcError::InvalidPubkey.into()) 51 | } 52 | 53 | pub async fn sign(&self, key_hash: &[u8; 32], data: &[u8]) -> Result<[u8; 64]> { 54 | let proto::Signature { signature } = self.query(proto::Sign { key_hash, data }).await?; 55 | signature 56 | .try_into() 57 | .map_err(|_| NodeRpcError::InvalidSignature.into()) 58 | } 59 | 60 | pub async fn add_validator_permanent_key( 61 | &self, 62 | key_hash: &[u8; 32], 63 | election_date: u32, 64 | ttl: u32, 65 | ) -> Result<()> { 66 | self.query(proto::AddValidatorPermanentKey { 67 | key_hash, 68 | election_date, 69 | ttl, 70 | }) 71 | .await 72 | .map(expect_success) 73 | } 74 | 75 | pub async fn add_validator_adnl_address( 76 | &self, 77 | permanent_key_hash: &[u8; 32], 78 | key_hash: &[u8; 32], 79 | ttl: u32, 80 | ) -> Result<()> { 81 | self.query(proto::AddValidatorAdnlAddress { 82 | permanent_key_hash, 83 | key_hash, 84 | ttl, 85 | }) 86 | .await 87 | .map(expect_success) 88 | } 89 | 90 | pub async fn get_stats(&self) -> Result { 91 | let stats = self.query::<_, proto::Stats>(proto::GetStats).await?; 92 | NodeStats::try_from(stats).map_err(|e| NodeRpcError::InvalidStats(e).into()) 93 | } 94 | 95 | pub async fn get_raw_stats(&self) -> Result { 96 | let stats = self.query::<_, proto::Stats>(proto::GetStats).await?; 97 | 98 | let mut result = serde_json::Map::new(); 99 | for stat in stats.items { 100 | let key = String::from_utf8_lossy(&stat.key).into_owned(); 101 | let Ok(value) = serde_json::from_slice(&stat.value) else { 102 | continue; 103 | }; 104 | result.insert(key, value); 105 | } 106 | 107 | Ok(serde_json::Value::Object(result)) 108 | } 109 | 110 | pub async fn set_states_gc_interval(&self, interval_ms: u32) -> Result<()> { 111 | self.query(proto::SetStatesGcInterval { interval_ms }) 112 | .await 113 | .map(expect_success) 114 | } 115 | 116 | pub async fn send_message>(&self, message: T) -> Result<()> { 117 | // NOTE: proto::Success is used here on purpose instead of SendMsgStatus 118 | self.query(proto::SendMessage { 119 | body: message.as_ref(), 120 | }) 121 | .await 122 | .map(expect_success) 123 | } 124 | 125 | pub async fn get_config_all(&self) -> Result { 126 | use ton_block::Deserializable; 127 | 128 | let proto::ConfigInfo { 129 | id, config_proof, .. 130 | } = self 131 | .query(proto::GetConfigAll { 132 | mode: 0, 133 | id: proto::BlockIdExt::default(), 134 | }) 135 | .await?; 136 | 137 | Ok(ConfigWithId { 138 | block_id: convert_proto_to_block_id(id)?, 139 | config: ton_block::ConfigParams::construct_from_bytes(&config_proof) 140 | .map_err(|_| NodeRpcError::InvalidBlockchainConfig)?, 141 | }) 142 | } 143 | 144 | pub async fn get_config_param(&self, param: u32) -> Result { 145 | let proto::ConfigInfo { 146 | id, config_proof, .. 147 | } = self 148 | .query(proto::GetConfigParams { 149 | mode: 0, 150 | id: proto::BlockIdExt::default(), 151 | param_list: std::slice::from_ref(¶m), 152 | }) 153 | .await?; 154 | 155 | Ok(ConfigParamWithId { 156 | block_id: convert_proto_to_block_id(id)?, 157 | param: String::from_utf8(config_proof).map_err(|_| NodeRpcError::InvalidString)?, 158 | }) 159 | } 160 | 161 | pub async fn get_shard_account_state( 162 | &self, 163 | address: &ton_block::MsgAddressInt, 164 | ) -> Result { 165 | use ton_block::Deserializable; 166 | 167 | let shard_account = self 168 | .query::<_, proto::ShardAccount>(proto::GetShardAccountState { 169 | address: address.to_string().as_bytes(), 170 | }) 171 | .await?; 172 | 173 | match shard_account { 174 | proto::ShardAccount::State(data) => { 175 | ton_block::ShardAccount::construct_from_bytes(&data) 176 | .map_err(|_| NodeRpcError::InvalidAccountState.into()) 177 | } 178 | proto::ShardAccount::Empty => Ok(ton_block::ShardAccount::default()), 179 | } 180 | } 181 | 182 | async fn query(&self, query: Q) -> Result 183 | where 184 | Q: TlWrite, 185 | for<'a> R: TlRead<'a>, 186 | { 187 | enum QueryResponse { 188 | Ok(T), 189 | Err(String), 190 | } 191 | 192 | impl<'a, R> tl_proto::TlRead<'a> for QueryResponse 193 | where 194 | R: TlRead<'a>, 195 | { 196 | type Repr = tl_proto::Boxed; 197 | 198 | fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { 199 | let constructor = { 200 | let mut offset: usize = *offset; 201 | ::read_from(packet, &mut offset)? 202 | }; 203 | if constructor == proto::ControlQueryError::TL_ID { 204 | let proto::ControlQueryError { message, .. } = <_>::read_from(packet, offset)?; 205 | Ok(QueryResponse::Err(message)) 206 | } else { 207 | ::read_from(packet, offset).map(QueryResponse::Ok) 208 | } 209 | } 210 | } 211 | 212 | match self 213 | .tcp_adnl 214 | .query( 215 | proto::ControlQuery(IntermediateBytes(query)), 216 | self.query_timeout, 217 | ) 218 | .await 219 | { 220 | Ok(Some(QueryResponse::Ok(data))) => Ok(data), 221 | Ok(Some(QueryResponse::Err(message))) => Err(anyhow::Error::msg(message)), 222 | Ok(None) => Err(NodeRpcError::QueryTimeout.into()), 223 | Err(e) => Err(NodeRpcError::QueryFailed(e).into()), 224 | } 225 | } 226 | } 227 | 228 | fn convert_proto_to_block_id( 229 | id: proto::BlockIdExtOwned, 230 | ) -> Result { 231 | Ok(ton_block::BlockIdExt { 232 | shard_id: ton_block::ShardIdent::with_tagged_prefix(id.workchain, id.shard) 233 | .map_err(|_| NodeRpcError::InvalidBlockId)?, 234 | seq_no: id.seqno, 235 | root_hash: id.root_hash.into(), 236 | file_hash: id.file_hash.into(), 237 | }) 238 | } 239 | 240 | pub struct ConfigWithId { 241 | pub block_id: ton_block::BlockIdExt, 242 | pub config: ton_block::ConfigParams, 243 | } 244 | 245 | pub struct ConfigParamWithId { 246 | pub block_id: ton_block::BlockIdExt, 247 | pub param: String, 248 | } 249 | 250 | fn expect_success(_: proto::Success) {} 251 | 252 | #[derive(thiserror::Error, Debug)] 253 | pub enum NodeRpcError { 254 | #[error("connection failed")] 255 | ConnectionFailed(#[source] TcpAdnlError), 256 | #[error("query failed")] 257 | QueryFailed(#[source] TcpAdnlError), 258 | #[error("query timeout")] 259 | QueryTimeout, 260 | #[error("invalid stats")] 261 | InvalidStats(#[source] StatsError), 262 | #[error("invalid pubkey")] 263 | InvalidPubkey, 264 | #[error("invalid signature")] 265 | InvalidSignature, 266 | #[error("invalid string")] 267 | InvalidString, 268 | #[error("invalid account state")] 269 | InvalidAccountState, 270 | #[error("invalid block id")] 271 | InvalidBlockId, 272 | #[error("invalid blockchain config")] 273 | InvalidBlockchainConfig, 274 | } 275 | -------------------------------------------------------------------------------- /src/network/node_tcp_rpc/proto.rs: -------------------------------------------------------------------------------- 1 | use tl_proto::{IntermediateBytes, TlRead, TlWrite}; 2 | 3 | #[derive(Debug, TlWrite)] 4 | #[tl(boxed, id = "engine.validator.controlQuery", scheme = "proto.tl")] 5 | pub struct ControlQuery(pub IntermediateBytes); 6 | 7 | #[derive(Debug, TlRead)] 8 | #[tl(boxed, id = "engine.validator.controlQueryError", scheme = "proto.tl")] 9 | pub struct ControlQueryError { 10 | pub code: i32, 11 | #[tl(with = "tl_string")] 12 | pub message: String, 13 | } 14 | 15 | #[derive(Copy, Clone, TlWrite)] 16 | #[tl(boxed, id = "engine.validator.generateKeyPair", scheme = "proto.tl")] 17 | pub struct GenerateKeyPair; 18 | 19 | #[derive(Copy, Clone, TlWrite)] 20 | #[tl(boxed, id = "engine.validator.exportPublicKey", scheme = "proto.tl")] 21 | pub struct ExportPublicKey<'tl> { 22 | #[tl(size_hint = 32)] 23 | pub key_hash: HashRef<'tl>, 24 | } 25 | 26 | #[derive(Copy, Clone, TlWrite)] 27 | #[tl(boxed, id = "engine.validator.sign", scheme = "proto.tl")] 28 | pub struct Sign<'tl> { 29 | #[tl(size_hint = 32)] 30 | pub key_hash: HashRef<'tl>, 31 | pub data: &'tl [u8], 32 | } 33 | 34 | #[derive(Copy, Clone, TlWrite)] 35 | #[tl( 36 | boxed, 37 | id = "engine.validator.addValidatorPermanentKey", 38 | scheme = "proto.tl" 39 | )] 40 | pub struct AddValidatorPermanentKey<'tl> { 41 | #[tl(size_hint = 32)] 42 | pub key_hash: HashRef<'tl>, 43 | #[tl(size_hint = 4)] 44 | pub election_date: u32, 45 | #[tl(size_hint = 4)] 46 | pub ttl: u32, 47 | } 48 | 49 | #[derive(Copy, Clone, TlWrite)] 50 | #[tl( 51 | boxed, 52 | id = "engine.validator.addValidatorAdnlAddress", 53 | scheme = "proto.tl" 54 | )] 55 | pub struct AddValidatorAdnlAddress<'tl> { 56 | #[tl(size_hint = 32)] 57 | pub permanent_key_hash: HashRef<'tl>, 58 | #[tl(size_hint = 32)] 59 | pub key_hash: HashRef<'tl>, 60 | #[tl(size_hint = 4)] 61 | pub ttl: u32, 62 | } 63 | 64 | #[derive(Copy, Clone, TlWrite)] 65 | #[tl(boxed, id = "engine.validator.getStats", scheme = "proto.tl")] 66 | pub struct GetStats; 67 | 68 | #[derive(Copy, Clone, TlWrite)] 69 | #[tl( 70 | boxed, 71 | id = "engine.validator.setStatesGcInterval", 72 | scheme = "proto.tl" 73 | )] 74 | pub struct SetStatesGcInterval { 75 | pub interval_ms: u32, 76 | } 77 | 78 | #[derive(Copy, Clone, TlWrite)] 79 | #[tl(boxed, id = "liteServer.sendMessage", scheme = "proto.tl")] 80 | pub struct SendMessage<'tl> { 81 | pub body: &'tl [u8], 82 | } 83 | 84 | #[derive(Copy, Clone, TlWrite)] 85 | #[tl(boxed, id = "liteServer.getConfigAll", scheme = "proto.tl")] 86 | pub struct GetConfigAll<'tl> { 87 | pub mode: u32, 88 | pub id: BlockIdExt<'tl>, 89 | } 90 | 91 | #[derive(Clone, TlWrite)] 92 | #[tl(boxed, id = "liteServer.getConfigParams", scheme = "proto.tl")] 93 | pub struct GetConfigParams<'tl> { 94 | pub mode: u32, 95 | pub id: BlockIdExt<'tl>, 96 | pub param_list: &'tl [u32], 97 | } 98 | 99 | #[derive(Copy, Clone, TlWrite)] 100 | #[tl(boxed, id = "raw.getShardAccountState", scheme = "proto.tl")] 101 | pub struct GetShardAccountState<'tl> { 102 | pub address: &'tl [u8], 103 | } 104 | 105 | #[derive(Copy, Clone, TlRead, TlWrite)] 106 | #[tl(size_hint = 80)] 107 | pub struct BlockIdExt<'tl> { 108 | pub workchain: i32, 109 | pub shard: u64, 110 | pub seqno: u32, 111 | pub root_hash: HashRef<'tl>, 112 | pub file_hash: HashRef<'tl>, 113 | } 114 | 115 | impl Default for BlockIdExt<'static> { 116 | fn default() -> Self { 117 | Self { 118 | workchain: 0, 119 | shard: 0x8000000000000000, 120 | seqno: 0, 121 | root_hash: &[0; 32], 122 | file_hash: &[0; 32], 123 | } 124 | } 125 | } 126 | 127 | #[derive(Copy, Clone, Debug, TlRead, TlWrite)] 128 | #[tl(size_hint = 80)] 129 | pub struct BlockIdExtOwned { 130 | pub workchain: i32, 131 | pub shard: u64, 132 | pub seqno: u32, 133 | pub root_hash: [u8; 32], 134 | pub file_hash: [u8; 32], 135 | } 136 | 137 | #[derive(Copy, Clone, TlRead)] 138 | #[tl(boxed, id = "engine.validator.success", scheme = "proto.tl")] 139 | pub struct Success; 140 | 141 | #[derive(Copy, Clone, TlRead)] 142 | #[tl(boxed, id = "engine.validator.keyHash", scheme = "proto.tl")] 143 | pub struct KeyHash { 144 | pub key_hash: [u8; 32], 145 | } 146 | 147 | #[derive(Clone, TlRead)] 148 | #[tl(boxed, id = "engine.validator.signature", scheme = "proto.tl")] 149 | pub struct Signature { 150 | pub signature: Vec, 151 | } 152 | 153 | #[derive(Clone, TlRead)] 154 | #[tl(boxed, id = "engine.validator.stats", scheme = "proto.tl")] 155 | pub struct Stats { 156 | pub items: Vec, 157 | } 158 | 159 | #[derive(Clone, TlRead)] 160 | pub struct OneState { 161 | pub key: Vec, 162 | pub value: Vec, 163 | } 164 | 165 | #[derive(Clone, Debug, TlRead)] 166 | #[tl(boxed, id = "liteServer.configInfo", scheme = "proto.tl")] 167 | pub struct ConfigInfo { 168 | pub mode: u32, 169 | pub id: BlockIdExtOwned, 170 | pub state_proof: Vec, 171 | pub config_proof: Vec, 172 | } 173 | 174 | #[derive(Clone, TlRead)] 175 | #[tl(boxed, scheme = "proto.tl")] 176 | pub enum ShardAccount { 177 | #[tl(id = "raw.shardAccountState")] 178 | State(Vec), 179 | #[tl(id = "raw.shardAccountNone")] 180 | Empty, 181 | } 182 | 183 | pub type HashRef<'tl> = &'tl [u8; 32]; 184 | 185 | mod tl_string { 186 | use tl_proto::{TlRead, TlResult}; 187 | 188 | pub fn read(packet: &[u8], offset: &mut usize) -> TlResult { 189 | let bytes = <&[u8]>::read_from(packet, offset)?; 190 | Ok(String::from_utf8_lossy(bytes).into_owned()) 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /src/network/node_tcp_rpc/stats.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use broxus_util::{serde_base64_array, serde_hex_array}; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use super::proto; 7 | use crate::util::serde_block_id; 8 | 9 | #[allow(clippy::large_enum_variant)] 10 | #[derive(Clone, Debug, Serialize)] 11 | #[serde(rename_all = "snake_case", tag = "state")] 12 | pub enum NodeStats { 13 | Running(RunningStats), 14 | NotReady(SyncStatus), 15 | } 16 | 17 | impl NodeStats { 18 | pub fn try_into_running(self) -> Result { 19 | match self { 20 | Self::Running(stats) => Ok(stats), 21 | Self::NotReady(_) => Err(StatsError::NotReady), 22 | } 23 | } 24 | } 25 | 26 | #[derive(Clone, Debug, Serialize)] 27 | pub struct RunningStats { 28 | pub sync_status: SyncStatus, 29 | pub node_version: NodeVersion, 30 | #[serde(with = "serde_hex_array")] 31 | pub overlay_adnl_id: [u8; 32], 32 | pub mc_time: u32, 33 | pub mc_time_diff: i32, 34 | pub sc_time_diff: i32, 35 | #[serde(with = "serde_block_id")] 36 | pub last_mc_block: ton_block::BlockIdExt, 37 | pub in_current_vset: ValidatorSetEntry, 38 | pub in_next_vset: ValidatorSetEntry, 39 | } 40 | 41 | impl TryFrom for NodeStats { 42 | type Error = StatsError; 43 | 44 | fn try_from(stats: proto::Stats) -> Result { 45 | let mut sync_status = None; 46 | let mut mc_time = None; 47 | let mut mc_time_diff = None; 48 | let mut sc_time_diff = None; 49 | let mut node_version = None; 50 | let mut overlay_adnl_id = None; 51 | let mut in_current_vset = None; 52 | let mut current_vset_adnl = None; 53 | let mut in_next_vset = None; 54 | let mut next_vset_adnl = None; 55 | let mut last_mc_block = None; 56 | 57 | #[inline] 58 | fn parse_stat<'de, T: Deserialize<'de>>(value: &'de [u8]) -> Result { 59 | serde_json::from_slice::(value).map_err(|_| StatsError::InvalidValue) 60 | } 61 | 62 | #[derive(Debug, Deserialize)] 63 | struct KeyHash(#[serde(with = "serde_base64_array")] [u8; 32]); 64 | 65 | for item in stats.items { 66 | match item.key.as_slice() { 67 | STATS_SYNC_STATUS | STATS_NODE_STATUS => { 68 | sync_status = Some(parse_stat::(&item.value)?); 69 | } 70 | STATS_MC_BLOCK_TIME => { 71 | mc_time = Some(parse_stat::(&item.value)?); 72 | } 73 | STATS_NODE_VERSION => { 74 | let str = parse_stat::(&item.value)?; 75 | node_version = Some(NodeVersion::from_str(&str)?); 76 | } 77 | STATS_PUBLIC_OVERLAY_ADNL_ID => { 78 | let KeyHash(id) = parse_stat::(&item.value)?; 79 | overlay_adnl_id = Some(id); 80 | } 81 | STATS_TIMEDIFF => { 82 | mc_time_diff = Some(parse_stat::(&item.value)?); 83 | } 84 | STATS_SHARDS_TIMEDIFF if item.value != VALUE_UNKNOWN => { 85 | sc_time_diff = Some(parse_stat::(&item.value)?); 86 | } 87 | STATS_IN_CURRENT_VSET if item.value != VALUE_UNKNOWN => { 88 | in_current_vset = Some(parse_stat::(&item.value)?); 89 | } 90 | STATS_CURRENT_VSET_ADNL => { 91 | current_vset_adnl = Some(parse_stat(&item.value)?); 92 | } 93 | STATS_IN_NEXT_VSET if item.value != VALUE_UNKNOWN => { 94 | in_next_vset = Some(parse_stat::(&item.value)?); 95 | } 96 | STATS_NEXT_VSET_ADNL => { 97 | next_vset_adnl = Some(parse_stat(&item.value)?); 98 | } 99 | STATS_LAST_APPLIED_MC_BLOCK => { 100 | #[derive(Debug, Deserialize)] 101 | struct Block<'a> { 102 | shard: &'a str, 103 | seq_no: u32, 104 | #[serde(with = "serde_hex_array")] 105 | rh: [u8; 32], 106 | #[serde(with = "serde_hex_array")] 107 | fh: [u8; 32], 108 | } 109 | let Ok(block) = parse_stat::(&item.value) else { 110 | continue; 111 | }; 112 | 113 | let mut shard_parts = block.shard.split(':'); 114 | let shard_id = match (shard_parts.next(), shard_parts.next()) { 115 | (Some(wc), Some(shard)) => { 116 | let wc = i32::from_str(wc).map_err(|_| StatsError::InvalidValue)?; 117 | let shard = u64::from_str_radix(shard, 16) 118 | .map_err(|_| StatsError::InvalidValue)?; 119 | ton_block::ShardIdent::with_tagged_prefix(wc, shard) 120 | .map_err(|_| StatsError::InvalidValue)? 121 | } 122 | _ => return Err(StatsError::InvalidValue), 123 | }; 124 | 125 | last_mc_block = Some(ton_block::BlockIdExt { 126 | shard_id, 127 | seq_no: block.seq_no, 128 | root_hash: block.rh.into(), 129 | file_hash: block.fh.into(), 130 | }); 131 | } 132 | _ => {} 133 | } 134 | } 135 | 136 | let sync_status = sync_status.unwrap_or(SyncStatus::NoSetStatus); 137 | if sync_status != SyncStatus::SynchronizationFinished { 138 | return Ok(Self::NotReady(sync_status)); 139 | } 140 | 141 | match ( 142 | mc_time, 143 | mc_time_diff, 144 | sc_time_diff, 145 | last_mc_block, 146 | node_version, 147 | overlay_adnl_id, 148 | ) { 149 | ( 150 | Some(mc_time), 151 | Some(mc_time_diff), 152 | Some(sc_time_diff), 153 | Some(last_mc_block), 154 | Some(node_version), 155 | Some(overlay_adnl_id), 156 | ) => { 157 | let in_current_vset = match (in_current_vset, current_vset_adnl) { 158 | (Some(true), Some(KeyHash(adnl))) => ValidatorSetEntry::Validator(adnl), 159 | (Some(true), None) => return Err(StatsError::FieldsMissing), 160 | _ => ValidatorSetEntry::None, 161 | }; 162 | 163 | let in_next_vset = match (in_next_vset, next_vset_adnl) { 164 | (Some(true), Some(KeyHash(adnl))) => ValidatorSetEntry::Validator(adnl), 165 | (Some(true), None) => return Err(StatsError::FieldsMissing), 166 | _ => ValidatorSetEntry::None, 167 | }; 168 | 169 | Ok(Self::Running(RunningStats { 170 | sync_status, 171 | node_version, 172 | overlay_adnl_id, 173 | mc_time, 174 | mc_time_diff, 175 | sc_time_diff, 176 | last_mc_block, 177 | in_current_vset, 178 | in_next_vset, 179 | })) 180 | } 181 | _ => Err(StatsError::FieldsMissing), 182 | } 183 | } 184 | } 185 | 186 | #[derive(Copy, Clone, Debug, Serialize)] 187 | pub struct NodeVersion { 188 | pub major: u32, 189 | pub minor: u32, 190 | pub patch: u32, 191 | } 192 | 193 | impl FromStr for NodeVersion { 194 | type Err = StatsError; 195 | 196 | fn from_str(s: &str) -> Result { 197 | let mut parts = s.split('.'); 198 | 199 | fn parse_part(part: &str) -> Result { 200 | u32::from_str(part).map_err(|_| StatsError::InvalidValue) 201 | } 202 | 203 | match (parts.next(), parts.next(), parts.next()) { 204 | (Some(major), Some(minor), Some(patch)) => Ok(Self { 205 | major: parse_part(major)?, 206 | minor: parse_part(minor)?, 207 | patch: parse_part(patch)?, 208 | }), 209 | _ => Err(StatsError::InvalidValue), 210 | } 211 | } 212 | } 213 | 214 | #[derive(Copy, Clone, Debug)] 215 | pub enum ValidatorSetEntry { 216 | None, 217 | Validator([u8; 32]), 218 | } 219 | 220 | impl Serialize for ValidatorSetEntry { 221 | fn serialize(&self, serializer: S) -> Result { 222 | match self { 223 | Self::None => serializer.serialize_none(), 224 | Self::Validator(adnl) => serializer.serialize_some(&hex::encode(adnl)), 225 | } 226 | } 227 | } 228 | 229 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] 230 | #[serde(rename_all = "snake_case")] 231 | pub enum SyncStatus { 232 | StartBoot, 233 | LoadMasterState, 234 | LoadShardStates, 235 | FinishBoot, 236 | SynchronizationByBlocks, 237 | SynchronizationFinished, 238 | CheckingDb, 239 | DbBroken, 240 | NoSetStatus, 241 | } 242 | 243 | impl std::fmt::Display for SyncStatus { 244 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 245 | std::fmt::Debug::fmt(self, f) 246 | } 247 | } 248 | 249 | #[derive(thiserror::Error, Debug)] 250 | pub enum StatsError { 251 | #[error("node is not ready")] 252 | NotReady, 253 | #[error("invalid value")] 254 | InvalidValue, 255 | #[error("fields missing")] 256 | FieldsMissing, 257 | } 258 | 259 | const STATS_SYNC_STATUS: &[u8] = b"sync_status"; 260 | const STATS_NODE_STATUS: &[u8] = b"node_status"; 261 | const STATS_MC_BLOCK_TIME: &[u8] = b"masterchainblocktime"; 262 | const STATS_NODE_VERSION: &[u8] = b"node_version"; 263 | const STATS_PUBLIC_OVERLAY_ADNL_ID: &[u8] = b"public_overlay_key_id"; 264 | const STATS_TIMEDIFF: &[u8] = b"timediff"; 265 | const STATS_SHARDS_TIMEDIFF: &[u8] = b"shards_timediff"; 266 | const STATS_IN_CURRENT_VSET: &[u8] = b"in_current_vset_p34"; 267 | const STATS_CURRENT_VSET_ADNL: &[u8] = b"current_vset_p34_adnl_id"; 268 | const STATS_IN_NEXT_VSET: &[u8] = b"in_next_vset_p36"; 269 | const STATS_NEXT_VSET_ADNL: &[u8] = b"next_vset_p36_adnl_id"; 270 | const STATS_LAST_APPLIED_MC_BLOCK: &[u8] = b"last_applied_masterchain_block_id"; 271 | 272 | const VALUE_UNKNOWN: &[u8] = b"\"unknown\""; 273 | -------------------------------------------------------------------------------- /src/network/node_tcp_rpc/tcp_adnl/mod.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::sync::atomic::{AtomicUsize, Ordering}; 3 | use std::sync::Arc; 4 | use std::time::Duration; 5 | 6 | use ctr::cipher::{KeyIvInit, StreamCipher}; 7 | use everscale_crypto::ed25519; 8 | use rand::{Rng, RngCore}; 9 | use sha2::Digest; 10 | use tl_proto::{IntermediateBytes, TlRead, TlWrite}; 11 | use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; 12 | use tokio::net::TcpStream; 13 | use tokio::sync::mpsc; 14 | use tokio_util::sync::CancellationToken; 15 | 16 | use self::queries_cache::QueriesCache; 17 | 18 | mod queries_cache; 19 | 20 | pub struct TcpAdnlConfig { 21 | pub server_address: SocketAddr, 22 | pub server_pubkey: ed25519::PublicKey, 23 | pub client_secret: ed25519::SecretKey, 24 | pub connection_timeout: Duration, 25 | } 26 | 27 | #[derive(Clone)] 28 | pub struct TcpAdnl { 29 | state: Arc, 30 | } 31 | 32 | impl TcpAdnl { 33 | pub async fn connect(config: TcpAdnlConfig) -> Result { 34 | let (socket_rx, socket_tx) = match tokio::time::timeout( 35 | config.connection_timeout, 36 | TcpStream::connect(config.server_address), 37 | ) 38 | .await 39 | { 40 | Ok(connection) => connection 41 | .map_err(TcpAdnlError::ConnectionError)? 42 | .into_split(), 43 | Err(_) => return Err(TcpAdnlError::ConnectionTimeout), 44 | }; 45 | 46 | let mut initial_buffer = vec![0; 160]; 47 | rand::thread_rng().fill_bytes(&mut initial_buffer); 48 | 49 | let cipher_receive = Aes256Ctr::new( 50 | generic_array::GenericArray::from_slice(&initial_buffer[0..32]), 51 | generic_array::GenericArray::from_slice(&initial_buffer[64..80]), 52 | ); 53 | let cipher_send = Aes256Ctr::new( 54 | generic_array::GenericArray::from_slice(&initial_buffer[32..64]), 55 | generic_array::GenericArray::from_slice(&initial_buffer[80..96]), 56 | ); 57 | 58 | let (tx, rx) = mpsc::unbounded_channel::(); 59 | 60 | let state = Arc::new(SharedState { 61 | queries_cache: Arc::new(Default::default()), 62 | cancellation_token: Default::default(), 63 | packets_tx: tx, 64 | query_id: Default::default(), 65 | }); 66 | 67 | tokio::spawn(socket_writer( 68 | socket_tx, 69 | cipher_send, 70 | rx, 71 | state.cancellation_token.clone(), 72 | )); 73 | tokio::spawn(socket_reader( 74 | socket_rx, 75 | cipher_receive, 76 | state.queries_cache.clone(), 77 | state.cancellation_token.clone(), 78 | )); 79 | 80 | build_handshake_packet( 81 | &config.server_pubkey, 82 | &config.client_secret, 83 | &mut initial_buffer, 84 | ); 85 | state 86 | .packets_tx 87 | .send(Packet::unencrypted(initial_buffer)) 88 | .ok() 89 | .unwrap(); 90 | 91 | Ok(Self { state }) 92 | } 93 | 94 | pub async fn query(&self, query: Q, timeout: Duration) -> Result, TcpAdnlError> 95 | where 96 | Q: TlWrite, 97 | for<'a> R: TlRead<'a>, 98 | { 99 | let cancelled = self.state.cancellation_token.cancelled(); 100 | if self.state.cancellation_token.is_cancelled() { 101 | return Err(TcpAdnlError::SocketClosed); 102 | } 103 | 104 | let mut query_id = [0; 32]; 105 | query_id[..std::mem::size_of::()].copy_from_slice( 106 | &self 107 | .state 108 | .query_id 109 | .fetch_add(1, Ordering::AcqRel) 110 | .to_le_bytes(), 111 | ); 112 | 113 | let data = tl_proto::serialize(AdnlMessageQuery { 114 | query_id: &query_id, 115 | query: IntermediateBytes(query), 116 | }); 117 | 118 | let pending_query = self.state.queries_cache.add_query(query_id); 119 | if self.state.packets_tx.send(Packet::encrypted(data)).is_err() { 120 | return Err(TcpAdnlError::SocketClosed); 121 | } 122 | 123 | let answer = tokio::select! { 124 | res = tokio::time::timeout(timeout, pending_query.wait()) => { 125 | res.ok().flatten() 126 | } 127 | _ = cancelled => return Err(TcpAdnlError::SocketClosed), 128 | }; 129 | 130 | Ok(match answer { 131 | Some(query) => { 132 | Some(tl_proto::deserialize(&query).map_err(TcpAdnlError::InvalidAnswer)?) 133 | } 134 | None => None, 135 | }) 136 | } 137 | } 138 | 139 | struct SharedState { 140 | queries_cache: Arc, 141 | cancellation_token: CancellationToken, 142 | packets_tx: PacketsTx, 143 | query_id: AtomicUsize, 144 | } 145 | 146 | impl Drop for SharedState { 147 | fn drop(&mut self) { 148 | self.cancellation_token.cancel(); 149 | } 150 | } 151 | 152 | async fn socket_writer( 153 | mut socket: T, 154 | mut cipher: Aes256Ctr, 155 | mut rx: PacketsRx, 156 | cancellation_token: CancellationToken, 157 | ) where 158 | T: AsyncWrite + Unpin, 159 | { 160 | tokio::pin!(let cancelled = cancellation_token.cancelled();); 161 | 162 | while let Some(mut packet) = rx.recv().await { 163 | let data = &mut packet.data; 164 | 165 | if packet.encrypt { 166 | let len = data.len(); 167 | 168 | data.reserve(len + 68); 169 | data.resize(len + 36, 0); 170 | data.copy_within(..len, 36); 171 | data[..4].copy_from_slice(&((len + 64) as u32).to_le_bytes()); 172 | 173 | let nonce: [u8; 32] = rand::thread_rng().gen(); 174 | data[4..36].copy_from_slice(&nonce); 175 | 176 | data.extend_from_slice(sha2::Sha256::digest(&data[4..]).as_slice()); 177 | 178 | cipher.apply_keystream(data); 179 | } 180 | 181 | tokio::select! { 182 | res = socket.write_all(data) => match res { 183 | Ok(_) => continue, 184 | Err(e) => { 185 | if !cancellation_token.is_cancelled() { 186 | cancellation_token.cancel(); 187 | tracing::error!("failed to write data to the socket: {e:?}"); 188 | } 189 | break; 190 | } 191 | }, 192 | _ = &mut cancelled => break, 193 | } 194 | } 195 | 196 | tracing::debug!("sender loop finished"); 197 | } 198 | 199 | async fn socket_reader( 200 | mut socket: T, 201 | mut cipher: Aes256Ctr, 202 | queries_cache: Arc, 203 | cancellation_token: CancellationToken, 204 | ) where 205 | T: AsyncRead + Unpin, 206 | { 207 | tokio::pin!(let cancelled = cancellation_token.cancelled();); 208 | 209 | loop { 210 | let mut length = [0; 4]; 211 | tokio::select! { 212 | res = socket.read_exact(&mut length) => match res { 213 | Ok(_) => cipher.apply_keystream(&mut length), 214 | Err(e) => { 215 | if !cancellation_token.is_cancelled() { 216 | cancellation_token.cancel(); 217 | tracing::error!("failed to read length from the socket: {e:?}"); 218 | } 219 | break; 220 | } 221 | }, 222 | _ = &mut cancelled => break, 223 | } 224 | 225 | let length = u32::from_le_bytes(length) as usize; 226 | if length < 64 { 227 | continue; 228 | } 229 | 230 | let mut buffer = vec![0; length]; 231 | tokio::select! { 232 | res = socket.read_exact(&mut buffer) => match res { 233 | Ok(_) => cipher.apply_keystream(&mut buffer), 234 | Err(e) => { 235 | if !cancellation_token.is_cancelled() { 236 | cancellation_token.cancel(); 237 | tracing::error!("failed to read data from the socket: {e:?}"); 238 | } 239 | break; 240 | } 241 | }, 242 | _ = &mut cancelled => break, 243 | } 244 | 245 | if !sha2::Sha256::digest(&buffer[..length - 32]) 246 | .as_slice() 247 | .eq(&buffer[length - 32..length]) 248 | { 249 | tracing::warn!("packet checksum mismatch"); 250 | continue; 251 | } 252 | 253 | buffer.truncate(length - 32); 254 | buffer.drain(..32); 255 | 256 | if buffer.is_empty() { 257 | continue; 258 | } 259 | 260 | match tl_proto::deserialize::(&buffer) { 261 | Ok(AdnlMessageAnswer { query_id, data }) => { 262 | queries_cache.update_query(query_id, data); 263 | } 264 | Err(e) => tracing::warn!("invalid response: {e:?}"), 265 | }; 266 | } 267 | 268 | tracing::debug!("receiver loop finished"); 269 | } 270 | 271 | struct Packet { 272 | data: Vec, 273 | encrypt: bool, 274 | } 275 | 276 | impl Packet { 277 | fn encrypted(data: Vec) -> Self { 278 | Self { 279 | data, 280 | encrypt: true, 281 | } 282 | } 283 | 284 | fn unencrypted(data: Vec) -> Self { 285 | Self { 286 | data, 287 | encrypt: false, 288 | } 289 | } 290 | } 291 | 292 | type PacketsTx = mpsc::UnboundedSender; 293 | type PacketsRx = mpsc::UnboundedReceiver; 294 | 295 | pub fn build_handshake_packet( 296 | server_pubkey: &ed25519::PublicKey, 297 | client_secret: &ed25519::SecretKey, 298 | buffer: &mut Vec, 299 | ) { 300 | let server_short_id = tl_proto::hash(server_pubkey.as_tl()); 301 | let client_public_key = ed25519::PublicKey::from(client_secret); 302 | 303 | let shared_secret = client_secret.expand().compute_shared_secret(server_pubkey); 304 | 305 | // Prepare packet 306 | let checksum: [u8; 32] = sha2::Sha256::digest(buffer.as_slice()).into(); 307 | 308 | let length = buffer.len(); 309 | buffer.resize(length + 96, 0); 310 | buffer.copy_within(..length, 96); 311 | 312 | buffer[..32].copy_from_slice(server_short_id.as_slice()); 313 | buffer[32..64].copy_from_slice(client_public_key.as_bytes()); 314 | buffer[64..96].copy_from_slice(&checksum); 315 | 316 | // Encrypt packet data 317 | build_packet_cipher(&shared_secret, &checksum).apply_keystream(&mut buffer[96..]); 318 | } 319 | 320 | pub fn build_packet_cipher(shared_secret: &[u8; 32], checksum: &[u8; 32]) -> Aes256Ctr { 321 | let mut aes_key_bytes: [u8; 32] = *shared_secret; 322 | aes_key_bytes[16..32].copy_from_slice(&checksum[16..32]); 323 | let mut aes_ctr_bytes: [u8; 16] = checksum[0..16].try_into().unwrap(); 324 | aes_ctr_bytes[4..16].copy_from_slice(&shared_secret[20..32]); 325 | 326 | Aes256Ctr::new( 327 | &generic_array::GenericArray::from(aes_key_bytes), 328 | &generic_array::GenericArray::from(aes_ctr_bytes), 329 | ) 330 | } 331 | 332 | #[derive(Clone, TlWrite)] 333 | #[tl(boxed, id = "adnl.message.query", scheme = "proto.tl")] 334 | struct AdnlMessageQuery<'tl, T> { 335 | #[tl(size_hint = 32)] 336 | query_id: &'tl [u8; 32], 337 | query: IntermediateBytes, 338 | } 339 | 340 | #[derive(Copy, Clone, TlRead)] 341 | #[tl(boxed, id = "adnl.message.answer", scheme = "proto.tl")] 342 | struct AdnlMessageAnswer<'tl> { 343 | #[tl(size_hint = 32)] 344 | query_id: &'tl [u8; 32], 345 | data: &'tl [u8], 346 | } 347 | 348 | #[derive(thiserror::Error, Debug)] 349 | pub enum TcpAdnlError { 350 | #[error("connection timeout")] 351 | ConnectionTimeout, 352 | #[error("failed to open connection")] 353 | ConnectionError(#[source] std::io::Error), 354 | #[error("socket closed")] 355 | SocketClosed, 356 | #[error("invalid answer")] 357 | InvalidAnswer(#[source] tl_proto::TlError), 358 | } 359 | 360 | pub type Aes256Ctr = ctr::Ctr64BE; 361 | -------------------------------------------------------------------------------- /src/network/node_tcp_rpc/tcp_adnl/queries_cache.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, Weak}; 2 | 3 | use tokio::sync::oneshot; 4 | 5 | use crate::util::FxDashMap; 6 | 7 | #[derive(Default)] 8 | pub struct QueriesCache { 9 | queries: FxDashMap<[u8; 32], DataTx>, 10 | } 11 | 12 | impl QueriesCache { 13 | pub fn add_query(self: &Arc, query_id: [u8; 32]) -> PendingAdnlQuery { 14 | let (tx, rx) = oneshot::channel(); 15 | 16 | self.queries.insert(query_id, tx); 17 | 18 | PendingAdnlQuery { 19 | query_id, 20 | data_rx: Some(rx), 21 | cache: Arc::downgrade(self), 22 | finished: false, 23 | } 24 | } 25 | 26 | pub fn update_query(&self, query_id: &[u8; 32], answer: &[u8]) { 27 | if let Some((_, tx)) = self.queries.remove(query_id) { 28 | tx.send(answer.to_vec()).ok(); 29 | } 30 | } 31 | } 32 | 33 | pub struct PendingAdnlQuery { 34 | query_id: [u8; 32], 35 | data_rx: Option, 36 | cache: Weak, 37 | finished: bool, 38 | } 39 | 40 | impl PendingAdnlQuery { 41 | pub async fn wait(mut self) -> Option> { 42 | // SAFETY: `data_rx` is guaranteed to be `Some` 43 | let data_rx = unsafe { self.data_rx.take().unwrap_unchecked() }; 44 | let data = data_rx.await.ok(); 45 | self.finished = true; 46 | data 47 | } 48 | } 49 | 50 | impl Drop for PendingAdnlQuery { 51 | fn drop(&mut self) { 52 | if self.finished { 53 | return; 54 | } 55 | 56 | if let Some(cache) = self.cache.upgrade() { 57 | cache.queries.remove(&self.query_id); 58 | } 59 | } 60 | } 61 | 62 | type DataTx = oneshot::Sender>; 63 | type DataRx = oneshot::Receiver>; 64 | -------------------------------------------------------------------------------- /src/network/node_udp_rpc/mod.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddrV4; 2 | use std::sync::Arc; 3 | use std::time::Duration; 4 | 5 | use anyhow::{Context, Result}; 6 | use everscale_network::{adnl, overlay, rldp, NetworkBuilder}; 7 | use parking_lot::Mutex; 8 | use rand::Rng; 9 | use tl_proto::{TlRead, TlWrite}; 10 | 11 | use crate::config::AppConfigAdnl; 12 | use crate::util::BlockStuff; 13 | 14 | mod proto; 15 | #[derive(Clone)] 16 | pub struct NodeUdpRpc { 17 | inner: Arc, 18 | } 19 | 20 | impl NodeUdpRpc { 21 | pub async fn new(config: &AppConfigAdnl) -> Result { 22 | // Resolve public ip 23 | let ip_addr = public_ip::addr_v4() 24 | .await 25 | .context("failed to resolve public ip")?; 26 | 27 | // Build keystore 28 | let keystore = adnl::Keystore::builder() 29 | .with_tagged_key(*session_keys(), KEY_TAG)? 30 | .build(); 31 | 32 | // Build network 33 | let rldp_options = rldp::NodeOptions { 34 | force_compression: true, 35 | ..Default::default() 36 | }; 37 | 38 | let (adnl, rldp) = NetworkBuilder::with_adnl( 39 | SocketAddrV4::new(ip_addr, config.client_port), 40 | keystore, 41 | adnl::NodeOptions { 42 | use_loopback_for_neighbours: true, 43 | ..Default::default() 44 | }, 45 | ) 46 | .with_rldp(rldp_options) 47 | .build() 48 | .context("failed to build network stack")?; 49 | 50 | // Prepare overlay prefix 51 | let overlay_id_full = overlay::IdFull::for_workchain_overlay( 52 | ton_block::MASTERCHAIN_ID, 53 | &config.zerostate_file_hash, 54 | ); 55 | let overlay_id = overlay_id_full.compute_short_id(); 56 | 57 | let query_prefix = tl_proto::serialize(everscale_network::proto::rpc::OverlayQuery { 58 | overlay: overlay_id.as_slice(), 59 | }); 60 | 61 | // Add server as peer 62 | let peer_id_full = adnl::NodeIdFull::new(config.server_pubkey); 63 | let peer_id = peer_id_full.compute_short_id(); 64 | 65 | let local_id = *adnl.key_by_tag(KEY_TAG)?.id(); 66 | adnl.add_peer( 67 | adnl::NewPeerContext::Dht, 68 | &local_id, 69 | &peer_id, 70 | config.server_address, 71 | peer_id_full, 72 | ) 73 | .context("failed to add server as a peer")?; 74 | 75 | // Done 76 | Ok(NodeUdpRpc { 77 | inner: Arc::new(NodeInner { 78 | local_id, 79 | peer_id, 80 | query_prefix, 81 | adnl, 82 | rldp, 83 | roundtrip: Default::default(), 84 | }), 85 | }) 86 | } 87 | 88 | pub async fn get_capabilities(&self) -> Result { 89 | const MAX_ATTEMPTS: usize = 5; 90 | 91 | let mut attempt = 0; 92 | loop { 93 | let res = self.inner.adnl_query(proto::GetCapabilities, 1000).await; 94 | attempt += 1; 95 | if res.is_ok() || attempt >= MAX_ATTEMPTS { 96 | break res; 97 | } 98 | } 99 | } 100 | 101 | /// Waits for the next block 102 | pub async fn get_next_block( 103 | &self, 104 | prev_block_id: &ton_block::BlockIdExt, 105 | ) -> Result { 106 | let mut timeouts = BLOCK_TIMEOUTS; 107 | 108 | let mut attempt = 0; 109 | loop { 110 | let data = self 111 | .inner 112 | .rldp_query(proto::DownloadNextBlockFull { prev_block_id }, attempt) 113 | .await 114 | .context("rldp query failed")?; 115 | 116 | match data.as_deref().map(tl_proto::deserialize) { 117 | // Received valid block 118 | Some(Ok(proto::DataFull::Found { 119 | block_id, block, .. 120 | })) => break BlockStuff::new(block, block_id), 121 | // Received invalid response 122 | Some(Err(e)) => break Err(e.into()), 123 | // Received empty response or nothing (due to timeout) 124 | Some(Ok(proto::DataFull::Empty)) | None => { 125 | tracing::debug!("next block not found"); 126 | timeouts.sleep_and_update().await; 127 | attempt += 1; 128 | } 129 | } 130 | } 131 | } 132 | 133 | /// Polls the server for the specified block 134 | pub async fn get_block(&self, block_id: &ton_block::BlockIdExt) -> Result { 135 | let mut timeouts = BLOCK_TIMEOUTS; 136 | loop { 137 | match self 138 | .inner 139 | .adnl_query(proto::PrepareBlock { block_id }, 1000) 140 | .await? 141 | { 142 | proto::Prepared::Found => break, 143 | proto::Prepared::NotFound => { 144 | tracing::debug!("block not found"); 145 | timeouts.sleep_and_update().await; 146 | } 147 | } 148 | } 149 | 150 | timeouts = BLOCK_TIMEOUTS; 151 | let mut attempt = 0; 152 | loop { 153 | let data = self 154 | .inner 155 | .rldp_query(proto::RpcDownloadBlock { block_id }, attempt) 156 | .await?; 157 | 158 | match data { 159 | Some(block) => break BlockStuff::new(&block, block_id.clone()), 160 | None => { 161 | tracing::debug!("block receiver timeout"); 162 | timeouts.sleep_and_update().await; 163 | attempt += 1; 164 | } 165 | } 166 | } 167 | } 168 | } 169 | 170 | struct NodeInner { 171 | local_id: adnl::NodeIdShort, 172 | peer_id: adnl::NodeIdShort, 173 | query_prefix: Vec, 174 | adnl: Arc, 175 | rldp: Arc, 176 | roundtrip: Mutex, 177 | } 178 | 179 | impl NodeInner { 180 | async fn adnl_query(&self, query: Q, timeout: u64) -> Result 181 | where 182 | Q: TlWrite, 183 | for<'a> R: TlRead<'a, Repr = tl_proto::Boxed> + 'static, 184 | { 185 | self.adnl 186 | .query_with_prefix( 187 | &self.local_id, 188 | &self.peer_id, 189 | &self.query_prefix, 190 | query, 191 | Some(timeout), 192 | ) 193 | .await? 194 | .context("timeout") 195 | } 196 | 197 | async fn rldp_query(&self, query: Q, attempt: u64) -> Result>> 198 | where 199 | Q: TlWrite, 200 | { 201 | const ATTEMPT_INTERVAL: u64 = 50; // milliseconds 202 | 203 | let prefix = &self.query_prefix; 204 | let mut query_data = Vec::with_capacity(prefix.len() + query.max_size_hint()); 205 | query_data.extend_from_slice(prefix); 206 | query.write_to(&mut query_data); 207 | 208 | let roundtrip = { 209 | let roundtrip = *self.roundtrip.lock(); 210 | if roundtrip > 0 { 211 | Some(roundtrip + attempt * ATTEMPT_INTERVAL) 212 | } else { 213 | None 214 | } 215 | }; 216 | 217 | let (answer, roundtrip) = self 218 | .rldp 219 | .query(&self.local_id, &self.peer_id, query_data, roundtrip) 220 | .await?; 221 | 222 | if answer.is_some() { 223 | let mut current_roundtrip = self.roundtrip.lock(); 224 | if *current_roundtrip > 0 { 225 | *current_roundtrip = (*current_roundtrip + roundtrip) / 2; 226 | } else { 227 | *current_roundtrip = roundtrip; 228 | } 229 | } 230 | 231 | Ok(answer) 232 | } 233 | } 234 | 235 | impl Drop for NodeInner { 236 | fn drop(&mut self) { 237 | self.adnl.shutdown(); 238 | } 239 | } 240 | 241 | const BLOCK_TIMEOUTS: DownloaderTimeouts = DownloaderTimeouts { 242 | initial: 200, 243 | max: 1000, 244 | multiplier: 1.2, 245 | }; 246 | 247 | #[derive(Debug, Copy, Clone)] 248 | struct DownloaderTimeouts { 249 | /// Milliseconds 250 | initial: u64, 251 | /// Milliseconds 252 | max: u64, 253 | 254 | multiplier: f64, 255 | } 256 | 257 | impl DownloaderTimeouts { 258 | async fn sleep_and_update(&mut self) { 259 | tokio::time::sleep(Duration::from_millis(self.initial)).await; 260 | self.update(); 261 | } 262 | 263 | fn update(&mut self) -> u64 { 264 | self.initial = std::cmp::min(self.max, (self.initial as f64 * self.multiplier) as u64); 265 | self.initial 266 | } 267 | } 268 | 269 | fn session_keys() -> &'static [u8; 32] { 270 | use once_cell::sync::OnceCell; 271 | 272 | static KEYS: OnceCell<[u8; 32]> = OnceCell::new(); 273 | KEYS.get_or_init(|| rand::thread_rng().gen()) 274 | } 275 | 276 | const KEY_TAG: usize = 0; 277 | -------------------------------------------------------------------------------- /src/network/node_udp_rpc/proto.rs: -------------------------------------------------------------------------------- 1 | use tl_proto::{TlError, TlPacket, TlRead, TlResult, TlWrite}; 2 | 3 | #[derive(Copy, Clone, TlWrite)] 4 | #[tl(boxed, id = "tonNode.prepareBlock", scheme = "proto.tl")] 5 | pub struct PrepareBlock<'tl> { 6 | #[tl(with = "tl_block_id")] 7 | pub block_id: &'tl ton_block::BlockIdExt, 8 | } 9 | 10 | #[derive(Clone, TlWrite)] 11 | #[tl(boxed, id = "tonNode.downloadBlock", scheme = "proto.tl")] 12 | pub struct RpcDownloadBlock<'tl> { 13 | #[tl(with = "tl_block_id")] 14 | pub block_id: &'tl ton_block::BlockIdExt, 15 | } 16 | 17 | #[derive(Copy, Clone, TlWrite)] 18 | #[tl(boxed, id = "tonNode.downloadNextBlockFull", scheme = "proto.tl")] 19 | pub struct DownloadNextBlockFull<'tl> { 20 | #[tl(with = "tl_block_id")] 21 | pub prev_block_id: &'tl ton_block::BlockIdExt, 22 | } 23 | 24 | #[derive(Clone, TlRead)] 25 | #[tl(boxed, scheme = "proto.tl")] 26 | pub enum DataFull<'tl> { 27 | #[tl(id = "tonNode.dataFull")] 28 | Found { 29 | #[tl(with = "tl_block_id")] 30 | block_id: ton_block::BlockIdExt, 31 | proof: &'tl [u8], 32 | block: &'tl [u8], 33 | is_link: bool, 34 | }, 35 | #[tl(id = "tonNode.dataFullEmpty")] 36 | Empty, 37 | } 38 | 39 | #[derive(Debug, Copy, Clone, Eq, PartialEq, TlRead)] 40 | #[tl(boxed, scheme = "proto.tl")] 41 | pub enum Prepared { 42 | #[tl(id = "tonNode.notFound")] 43 | NotFound, 44 | #[tl(id = "tonNode.prepared")] 45 | Found, 46 | } 47 | 48 | #[derive(TlWrite, TlRead)] 49 | #[tl(boxed, id = "tonNode.getCapabilities", scheme = "proto.tl")] 50 | pub struct GetCapabilities; 51 | 52 | #[derive(Debug, Copy, Clone, Eq, PartialEq, TlRead)] 53 | #[tl( 54 | boxed, 55 | id = "tonNode.capabilities", 56 | size_hint = 12, 57 | scheme = "proto.tl" 58 | )] 59 | pub struct Capabilities { 60 | pub version: u32, 61 | pub capabilities: u64, 62 | } 63 | 64 | mod tl_block_id { 65 | use super::*; 66 | 67 | pub const SIZE_HINT: usize = 80; 68 | 69 | pub const fn size_hint(_: &ton_block::BlockIdExt) -> usize { 70 | SIZE_HINT 71 | } 72 | 73 | pub fn write(block: &ton_block::BlockIdExt, packet: &mut P) { 74 | packet.write_i32(block.shard_id.workchain_id()); 75 | packet.write_u64(block.shard_id.shard_prefix_with_tag()); 76 | packet.write_u32(block.seq_no); 77 | packet.write_raw_slice(block.root_hash.as_slice()); 78 | packet.write_raw_slice(block.file_hash.as_slice()); 79 | } 80 | 81 | pub fn read(packet: &[u8], offset: &mut usize) -> TlResult { 82 | let shard_id = ton_block::ShardIdent::with_tagged_prefix( 83 | i32::read_from(packet, offset)?, 84 | u64::read_from(packet, offset)?, 85 | ) 86 | .map_err(|_| TlError::InvalidData)?; 87 | let seq_no = u32::read_from(packet, offset)?; 88 | let root_hash = <[u8; 32]>::read_from(packet, offset)?; 89 | let file_hash = <[u8; 32]>::read_from(packet, offset)?; 90 | 91 | Ok(ton_block::BlockIdExt { 92 | shard_id, 93 | seq_no, 94 | root_hash: root_hash.into(), 95 | file_hash: file_hash.into(), 96 | }) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/proto.tl: -------------------------------------------------------------------------------- 1 | // Generic stuff 2 | //////////////////////////////////////////////////////////////////////////////// 3 | 4 | ---types--- 5 | 6 | int ? = Int; 7 | long ? = Long; 8 | string ? = String; 9 | object ? = Object; 10 | int256 8*[ int ] = Int256; 11 | bytes data:string = Bytes; 12 | 13 | pub.unenc data:bytes = PublicKey; 14 | pub.ed25519 key:int256 = PublicKey; 15 | pub.aes key:int256 = PublicKey; 16 | pub.overlay name:bytes = PublicKey; 17 | 18 | 19 | // Node TCP ADNL stuff 20 | //////////////////////////////////////////////////////////////////////////////// 21 | 22 | ---types--- 23 | 24 | tcp.pong random_id:long = tcp.Pong; 25 | 26 | adnl.message.query query_id:int256 query:bytes = adnl.Message; 27 | adnl.message.answer query_id:int256 answer:bytes = adnl.Message; 28 | 29 | tonNode.blockIdExt workchain:int shard:long seqno:int root_hash:int256 file_hash:int256 = tonNode.BlockIdExt; 30 | 31 | accountAddress account_address:string = AccountAddress; 32 | 33 | engine.validator.success = engine.validator.Success; 34 | 35 | engine.validator.keyHash key_hash:int256 = engine.validator.KeyHash; 36 | engine.validator.signature signature:bytes = engine.validator.Signature; 37 | 38 | engine.validator.oneStat key:string value:string = engine.validator.OneStat; 39 | engine.validator.stats stats:(vector engine.validator.oneStat) = engine.validator.Stats; 40 | 41 | liteServer.sendMsgStatus status:int = liteServer.SendMsgStatus; 42 | liteServer.configInfo mode:# id:tonNode.blockIdExt state_proof:bytes config_proof:bytes = liteServer.ConfigInfo; 43 | 44 | raw.shardAccountState shard_account:bytes = raw.ShardAccountState; 45 | raw.shardAccountNone = raw.ShardAccountState; 46 | 47 | ---functions--- 48 | 49 | tcp.ping random_id:long = tcp.Pong; 50 | 51 | engine.validator.controlQuery data:bytes = Object; 52 | engine.validator.controlQueryError code:int message:string = engine.validator.ControlQueryError; 53 | 54 | engine.validator.generateKeyPair = engine.validator.KeyHash; 55 | engine.validator.exportPublicKey key_hash:int256 = PublicKey; 56 | engine.validator.sign key_hash:int256 data:bytes = engine.validator.Signature; 57 | engine.validator.addValidatorPermanentKey key_hash:int256 election_date:int ttl:int = engine.validator.Success; 58 | engine.validator.addValidatorAdnlAddress permanent_key_hash:int256 key_hash:int256 ttl:int = engine.validator.Success; 59 | engine.validator.getStats = engine.validator.Stats; 60 | engine.validator.setStatesGcInterval interval_ms:int = engine.validator.Success; 61 | 62 | liteServer.sendMessage body:bytes = liteServer.SendMsgStatus; 63 | liteServer.getConfigAll mode:# id:tonNode.blockIdExt = liteServer.ConfigInfo; 64 | liteServer.getConfigParams mode:# id:tonNode.blockIdExt param_list:(vector int) = liteServer.ConfigInfo; 65 | 66 | raw.getShardAccountState account_address:accountAddress = raw.ShardAccountState; 67 | 68 | 69 | // Node UDP ADNL stuff 70 | //////////////////////////////////////////////////////////////////////////////// 71 | 72 | ---types--- 73 | 74 | tonNode.blockDescriptionEmpty = tonNode.BlockDescription; 75 | tonNode.blockDescription id:tonNode.blockIdExt = tonNode.BlockDescription; 76 | 77 | tonNode.preparedProofEmpty = tonNode.PreparedProof; 78 | tonNode.preparedProof = tonNode.PreparedProof; 79 | tonNode.preparedProofLink = tonNode.PreparedProof; 80 | 81 | tonNode.prepared = tonNode.Prepared; 82 | tonNode.notFound = tonNode.Prepared; 83 | 84 | tonNode.preparedState = tonNode.PreparedState; 85 | tonNode.notFoundState = tonNode.PreparedState; 86 | 87 | tonNode.keyBlocks blocks:(vector tonNode.blockIdExt) incomplete:Bool error:Bool = tonNode.KeyBlocks; 88 | 89 | tonNode.dataFull id:tonNode.blockIdExt proof:bytes block:bytes is_link:Bool = tonNode.DataFull; 90 | tonNode.dataFullEmpty = tonNode.DataFull; 91 | 92 | tonNode.data data:bytes = tonNode.Data; 93 | 94 | tonNode.archiveNotFound = tonNode.ArchiveInfo; 95 | tonNode.archiveInfo id:long = tonNode.ArchiveInfo; 96 | 97 | tonNode.capabilities version:int capabilities:long = tonNode.Capabilities; 98 | 99 | ---functions--- 100 | 101 | tonNode.getNextBlockDescription prev_block:tonNode.blockIdExt = tonNode.BlockDescription; 102 | tonNode.prepareBlockProof block:tonNode.blockIdExt allow_partial:Bool = tonNode.PreparedProof; 103 | tonNode.prepareKeyBlockProof block:tonNode.blockIdExt allow_partial:Bool = tonNode.PreparedProof; 104 | tonNode.prepareBlock block:tonNode.blockIdExt = tonNode.Prepared; 105 | tonNode.getNextKeyBlockIds block:tonNode.blockIdExt max_size:int = tonNode.KeyBlocks; 106 | tonNode.downloadNextBlockFull prev_block:tonNode.blockIdExt = tonNode.DataFull; 107 | tonNode.downloadBlockFull block:tonNode.blockIdExt = tonNode.DataFull; 108 | tonNode.downloadBlock block:tonNode.blockIdExt = tonNode.Data; 109 | 110 | tonNode.downloadBlockProof block:tonNode.blockIdExt = tonNode.Data; 111 | tonNode.downloadKeyBlockProof block:tonNode.blockIdExt = tonNode.Data; 112 | tonNode.downloadBlockProofLink block:tonNode.blockIdExt = tonNode.Data; 113 | tonNode.downloadKeyBlockProofLink block:tonNode.blockIdExt = tonNode.Data; 114 | 115 | tonNode.getCapabilities = tonNode.Capabilities; 116 | -------------------------------------------------------------------------------- /src/util/block_stuff.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use rustc_hash::FxHashMap; 3 | use ton_block::Deserializable; 4 | 5 | pub struct BlockStuff { 6 | id: ton_block::BlockIdExt, 7 | block: ton_block::Block, 8 | } 9 | 10 | impl BlockStuff { 11 | pub fn new(mut data: &[u8], id: ton_block::BlockIdExt) -> Result { 12 | let file_hash = ton_types::UInt256::calc_file_hash(data); 13 | anyhow::ensure!(id.file_hash() == file_hash, "wrong file_hash for {id}"); 14 | 15 | let root = ton_types::deserialize_tree_of_cells(&mut data)?; 16 | anyhow::ensure!( 17 | id.root_hash() == root.repr_hash(), 18 | "wrong root hash for {id}" 19 | ); 20 | 21 | let block = ton_block::Block::construct_from(&mut ton_types::SliceData::load_cell(root)?)?; 22 | Ok(Self { id, block }) 23 | } 24 | 25 | #[inline(always)] 26 | pub fn id(&self) -> &ton_block::BlockIdExt { 27 | &self.id 28 | } 29 | 30 | #[inline(always)] 31 | pub fn block(&self) -> &ton_block::Block { 32 | &self.block 33 | } 34 | 35 | pub fn read_brief_info(&self) -> Result { 36 | let info = self.block.read_info()?; 37 | 38 | let (prev1, prev2) = match info.read_prev_ref()? { 39 | ton_block::BlkPrevInfo::Block { prev } => { 40 | let shard_id = if info.after_split() { 41 | info.shard().merge()? 42 | } else { 43 | *info.shard() 44 | }; 45 | 46 | let id = ton_block::BlockIdExt { 47 | shard_id, 48 | seq_no: prev.seq_no, 49 | root_hash: prev.root_hash, 50 | file_hash: prev.file_hash, 51 | }; 52 | 53 | (id, None) 54 | } 55 | ton_block::BlkPrevInfo::Blocks { prev1, prev2 } => { 56 | let prev1 = prev1.read_struct()?; 57 | let prev2 = prev2.read_struct()?; 58 | let (shard1, shard2) = info.shard().split()?; 59 | 60 | let id1 = ton_block::BlockIdExt { 61 | shard_id: shard1, 62 | seq_no: prev1.seq_no, 63 | root_hash: prev1.root_hash, 64 | file_hash: prev1.file_hash, 65 | }; 66 | 67 | let id2 = ton_block::BlockIdExt { 68 | shard_id: shard2, 69 | seq_no: prev2.seq_no, 70 | root_hash: prev2.root_hash, 71 | file_hash: prev2.file_hash, 72 | }; 73 | 74 | (id1, Some(id2)) 75 | } 76 | }; 77 | 78 | Ok(BriefBlockInfo { 79 | gen_utime: info.gen_utime().as_u32(), 80 | prev1, 81 | prev2, 82 | }) 83 | } 84 | 85 | pub fn shard_blocks(&self) -> Result> { 86 | let mut shards = FxHashMap::default(); 87 | self.block() 88 | .read_extra()? 89 | .read_custom()? 90 | .context("given block is not a masterchain block")? 91 | .hashes() 92 | .iterate_shards(|ident, descr| { 93 | let last_shard_block = ton_block::BlockIdExt { 94 | shard_id: ident, 95 | seq_no: descr.seq_no, 96 | root_hash: descr.root_hash, 97 | file_hash: descr.file_hash, 98 | }; 99 | shards.insert(ident, last_shard_block); 100 | Ok(true) 101 | })?; 102 | 103 | Ok(shards) 104 | } 105 | 106 | pub fn shard_blocks_seq_no(&self) -> Result> { 107 | let mut shards = FxHashMap::default(); 108 | self.block() 109 | .read_extra()? 110 | .read_custom()? 111 | .context("given block is not a masterchain block")? 112 | .hashes() 113 | .iterate_shards(|ident, descr| { 114 | shards.insert(ident, descr.seq_no); 115 | Ok(true) 116 | })?; 117 | 118 | Ok(shards) 119 | } 120 | } 121 | 122 | #[derive(Clone)] 123 | pub struct BriefBlockInfo { 124 | pub gen_utime: u32, 125 | pub prev1: ton_block::BlockIdExt, 126 | pub prev2: Option, 127 | } 128 | 129 | #[derive(Clone)] 130 | pub struct StoragePrices { 131 | inner: ton_executor::AccStoragePrices, 132 | } 133 | 134 | impl StoragePrices { 135 | pub fn new(config: &ton_block::ConfigParams) -> Result { 136 | Ok(Self { 137 | inner: ton_executor::AccStoragePrices::with_config(&config.storage_prices()?)?, 138 | }) 139 | } 140 | 141 | pub fn compute_fee( 142 | &self, 143 | storage: &ton_block::StorageInfo, 144 | is_masterchain: bool, 145 | now: u32, 146 | ) -> u128 { 147 | self.inner.calc_storage_fee( 148 | storage.used().cells().into(), 149 | storage.used().bits().into(), 150 | storage.last_paid(), 151 | now, 152 | is_masterchain, 153 | ) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /src/util/cli.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::io::{Read, Write}; 3 | use std::path::Path; 4 | use std::str::FromStr; 5 | 6 | use anyhow::{Context, Result}; 7 | use dialoguer::console; 8 | use dialoguer::theme::Theme; 9 | use tokio::process::Command; 10 | use tokio_util::sync::CancellationToken; 11 | use ton_block::Deserializable; 12 | 13 | pub async fn exec(command: &mut Command) -> Result<()> { 14 | let mut child = command.spawn()?; 15 | 16 | let status = child 17 | .wait() 18 | .await 19 | .context("child process encountered an error")?; 20 | 21 | anyhow::ensure!( 22 | status.success(), 23 | "child process failed with exit code {status}" 24 | ); 25 | Ok(()) 26 | } 27 | 28 | pub struct Tokens(pub T); 29 | 30 | impl + Copy> std::fmt::Display for Tokens { 31 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 32 | let num: u128 = self.0.into(); 33 | let int = num / 1000000000; 34 | let mut frac = num % 1000000000; 35 | 36 | int.fmt(f)?; 37 | if frac > 0 { 38 | while frac % 10 == 0 && frac > 0 { 39 | frac /= 10; 40 | } 41 | f.write_fmt(format_args!(".{frac}"))?; 42 | } 43 | Ok(()) 44 | } 45 | } 46 | 47 | #[derive(Clone)] 48 | pub struct AddressInput(pub ton_block::MsgAddressInt); 49 | 50 | impl std::fmt::Display for AddressInput { 51 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 52 | self.0.fmt(f) 53 | } 54 | } 55 | 56 | impl FromStr for AddressInput { 57 | type Err = anyhow::Error; 58 | 59 | fn from_str(s: &str) -> Result { 60 | ton_block::MsgAddressInt::from_str(s.trim()) 61 | .map(Self) 62 | .map_err(|_| anyhow::Error::msg("invalid address")) 63 | } 64 | } 65 | 66 | #[derive(Clone)] 67 | pub struct OptionalAddressInput(pub Option); 68 | 69 | impl std::fmt::Display for OptionalAddressInput { 70 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 71 | match &self.0 { 72 | Some(addr) => addr.fmt(f), 73 | None => Ok(()), 74 | } 75 | } 76 | } 77 | 78 | impl FromStr for OptionalAddressInput { 79 | type Err = anyhow::Error; 80 | 81 | fn from_str(s: &str) -> Result { 82 | if s.is_empty() { 83 | return Ok(Self(None)); 84 | } 85 | 86 | let AddressInput(addr) = s.parse()?; 87 | Ok(Self(Some(addr))) 88 | } 89 | } 90 | 91 | pub fn parse_contract_abi

(path: P) -> Result 92 | where 93 | P: AsRef, 94 | { 95 | let data = std::fs::read(path.as_ref()).context("failed to read JSON ABI")?; 96 | let mut jd = serde_json::Deserializer::from_slice(&data); 97 | let contract: ton_abi::contract::SerdeContract = 98 | serde_path_to_error::deserialize(&mut jd).context("failed to parse JSON ABI")?; 99 | ton_abi::Contract::try_from(contract) 100 | } 101 | 102 | pub fn parse_address(address: &str) -> Result { 103 | ton_block::MsgAddressInt::from_str(address).map_err(From::from) 104 | } 105 | 106 | pub fn parse_optional_pubkey(pubkey: Option) -> Result> { 107 | match pubkey { 108 | Some(pubkey) => { 109 | let pubkey = parse_hex_or_base64(&pubkey)?; 110 | Ok(Some(ed25519_dalek::PublicKey::from_bytes(&pubkey)?)) 111 | } 112 | None => Ok(None), 113 | } 114 | } 115 | 116 | pub fn parse_optional_input(data: Option, raw: bool) -> Result> { 117 | match data { 118 | Some(data) if raw => Ok(data.into()), 119 | Some(data) => parse_hex_or_base64(&data), 120 | None => { 121 | let mut data = Vec::new(); 122 | std::io::stdin() 123 | .read_to_end(&mut data) 124 | .context("failed to read from stdin")?; 125 | Ok(data) 126 | } 127 | } 128 | } 129 | 130 | pub fn parse_key_hash(hash: &str) -> Result<[u8; 32]> { 131 | let data = parse_hex_or_base64(hash).context("invalid key hash")?; 132 | data.try_into() 133 | .map_err(|_| anyhow::Error::msg("invalid key hash length")) 134 | } 135 | 136 | pub fn parse_optional_state_init(data: Option) -> Result> { 137 | data.as_deref().map(parse_state_init).transpose() 138 | } 139 | 140 | pub fn parse_state_init(data: &str) -> Result { 141 | ton_block::StateInit::construct_from_base64(data) 142 | } 143 | 144 | pub fn parse_hex_or_base64(data: &str) -> Result> { 145 | if let Some(hash) = data.strip_prefix("0x") { 146 | hex::decode(hash).map_err(From::from) 147 | } else { 148 | match hex::decode(data) { 149 | Ok(bytes) => Ok(bytes), 150 | Err(e) => match base64::decode(data) { 151 | Ok(bytes) => Ok(bytes), 152 | _ => Err(e.into()), 153 | }, 154 | } 155 | } 156 | } 157 | 158 | pub fn confirm(theme: &dyn Theme, default: bool, text: T) -> std::io::Result 159 | where 160 | T: Into, 161 | { 162 | dialoguer::Confirm::with_theme(theme) 163 | .with_prompt(text) 164 | .default(default) 165 | .interact() 166 | } 167 | 168 | pub fn print_output(arg: T) { 169 | if is_terminal() { 170 | writeln!(std::io::stdout(), "{arg:#}") 171 | } else { 172 | write!(std::io::stdout(), "{arg}") 173 | } 174 | .unwrap() 175 | } 176 | 177 | pub fn print_error(text: impl std::fmt::Display) { 178 | if is_terminal() { 179 | eprintln!("{}", console::style(format!("✘ {text}")).red().bold()); 180 | } else { 181 | eprintln!("Error: {text}"); 182 | } 183 | } 184 | 185 | pub fn note(text: impl std::fmt::Display) -> impl std::fmt::Display { 186 | console::style(format!("({text})")).dim() 187 | } 188 | 189 | pub struct Steps { 190 | total: usize, 191 | current: usize, 192 | } 193 | 194 | impl Steps { 195 | pub fn new(total: usize) -> Self { 196 | Self { total, current: 0 } 197 | } 198 | 199 | pub fn next(&mut self, text: impl std::fmt::Display) { 200 | if is_terminal() { 201 | eprintln!( 202 | "{} {text}", 203 | console::style(format!("[{}/{}]", self.current, self.total)) 204 | .bold() 205 | .dim() 206 | ); 207 | } else { 208 | eprintln!("[{}/{}] {text}", self.current, self.total); 209 | } 210 | self.current += 1; 211 | } 212 | } 213 | 214 | pub fn is_terminal() -> bool { 215 | use once_cell::race::OnceBox; 216 | 217 | static IS_TERMINAL: OnceBox = OnceBox::new(); 218 | *IS_TERMINAL.get_or_init(|| Box::new(console::user_attended())) 219 | } 220 | 221 | pub async fn invoke_as_cli(f: F) -> Result<()> 222 | where 223 | F: Future>, 224 | { 225 | let token = setup_handlers(); 226 | tokio::select! { 227 | res = f => res.or_else(ignore_interrupt), 228 | _ = token.cancelled() => { 229 | eprintln!(); 230 | Ok(()) 231 | } 232 | } 233 | } 234 | 235 | fn setup_handlers() -> CancellationToken { 236 | let token = CancellationToken::new(); 237 | 238 | if is_terminal() { 239 | ctrlc::set_handler({ 240 | let cancellation_token = token.clone(); 241 | move || { 242 | cancellation_token.cancel(); 243 | let term = dialoguer::console::Term::stdout(); 244 | let _ = term.show_cursor(); 245 | } 246 | }) 247 | .expect("Error setting Ctrl-C handler"); 248 | } 249 | 250 | token 251 | } 252 | 253 | fn ignore_interrupt(e: anyhow::Error) -> Result<()> { 254 | if !is_terminal() { 255 | return Err(e); 256 | } 257 | 258 | if let Some(e) = e.downcast_ref::() { 259 | if e.kind() == std::io::ErrorKind::Interrupted { 260 | eprintln!(); 261 | return Ok(()); 262 | } 263 | } 264 | 265 | Err(e) 266 | } 267 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | use std::hash::BuildHasherDefault; 2 | 3 | use dashmap::DashMap; 4 | 5 | pub use self::block_stuff::*; 6 | pub use self::cli::*; 7 | pub use self::serde::*; 8 | pub use self::transaction::*; 9 | 10 | mod block_stuff; 11 | mod cli; 12 | mod serde; 13 | pub mod system; 14 | mod transaction; 15 | 16 | pub type FxDashMap = DashMap>; 17 | -------------------------------------------------------------------------------- /src/util/serde.rs: -------------------------------------------------------------------------------- 1 | use everscale_crypto::ed25519; 2 | use serde::{Deserialize, Deserializer, Serializer}; 3 | 4 | pub mod serde_mc_address { 5 | use broxus_util::serde_string; 6 | use ton_block::MsgAddressInt; 7 | 8 | use super::*; 9 | 10 | pub use serde_string::serialize; 11 | 12 | pub fn deserialize<'de, D: Deserializer<'de>>( 13 | deserializer: D, 14 | ) -> Result { 15 | use serde::de::Error; 16 | 17 | match serde_string::deserialize(deserializer)? { 18 | MsgAddressInt::AddrStd(addr) => { 19 | if addr.workchain_id as i32 == ton_block::MASTERCHAIN_ID { 20 | Ok(MsgAddressInt::AddrStd(addr)) 21 | } else { 22 | Err(Error::custom("expected masterchain address")) 23 | } 24 | } 25 | MsgAddressInt::AddrVar(_) => Err(Error::custom("unsupported address")), 26 | } 27 | } 28 | } 29 | 30 | pub mod serde_public_key { 31 | use super::*; 32 | 33 | pub fn serialize( 34 | public: &ed25519::PublicKey, 35 | serializer: S, 36 | ) -> Result { 37 | serializer.serialize_str(&hex::encode(public.as_bytes())) 38 | } 39 | 40 | pub fn deserialize<'de, D: Deserializer<'de>>( 41 | deserializer: D, 42 | ) -> Result { 43 | use serde::de::Error; 44 | 45 | let str = String::deserialize(deserializer)?; 46 | let bytes = match hex::decode(&str) { 47 | Ok(bytes) if bytes.len() == 32 => bytes, 48 | _ => match base64::decode(&str) { 49 | Ok(bytes) => bytes, 50 | Err(_) => return Err(Error::custom("invalid pubkey string")), 51 | }, 52 | }; 53 | 54 | let bytes = bytes 55 | .try_into() 56 | .map_err(|_| Error::custom("invalid pubkey length"))?; 57 | 58 | ed25519::PublicKey::from_bytes(bytes).ok_or_else(|| Error::custom("invalid pubkey")) 59 | } 60 | } 61 | 62 | pub mod serde_secret_key { 63 | use super::*; 64 | 65 | pub fn serialize( 66 | secret: &ed25519::SecretKey, 67 | serializer: S, 68 | ) -> Result { 69 | serializer.serialize_str(&hex::encode(secret.as_bytes())) 70 | } 71 | 72 | pub fn deserialize<'de, D: Deserializer<'de>>( 73 | deserializer: D, 74 | ) -> Result { 75 | use serde::de::Error; 76 | 77 | let str = String::deserialize(deserializer)?; 78 | let bytes = match hex::decode(&str) { 79 | Ok(bytes) if bytes.len() == 32 => bytes, 80 | _ => match base64::decode(&str) { 81 | Ok(bytes) => bytes, 82 | Err(_) => return Err(Error::custom("invalid secret key string")), 83 | }, 84 | }; 85 | 86 | let bytes = bytes 87 | .try_into() 88 | .map_err(|_| Error::custom("invalid secret key length"))?; 89 | 90 | Ok(ed25519::SecretKey::from_bytes(bytes)) 91 | } 92 | } 93 | 94 | pub mod serde_block_id { 95 | use super::*; 96 | 97 | pub fn serialize( 98 | block_id: &ton_block::BlockIdExt, 99 | serializer: S, 100 | ) -> Result { 101 | serializer.serialize_str(&format!( 102 | "{}:{:016x}:{}:{:x}:{:x}", 103 | block_id.shard_id.workchain_id(), 104 | block_id.shard_id.shard_prefix_with_tag(), 105 | block_id.seq_no, 106 | block_id.root_hash, 107 | block_id.file_hash 108 | )) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/util/system.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::{CStr, CString, OsString}; 2 | use std::mem::MaybeUninit; 3 | use std::os::unix::ffi::OsStringExt; 4 | use std::os::unix::prelude::OsStrExt; 5 | use std::path::{Path, PathBuf}; 6 | use std::ptr; 7 | 8 | use anyhow::{Context, Result}; 9 | 10 | #[allow(unused)] 11 | pub fn get_sudo_uid() -> Result> { 12 | match std::env::var("SUDO_UID") { 13 | Ok(uid) => Ok(Some(uid.parse().context("invalid SUDO_UID")?)), 14 | Err(_) => Ok(None), 15 | } 16 | } 17 | 18 | #[allow(unused)] 19 | pub fn user_id() -> u32 { 20 | // SAFETY: no errors are defined 21 | unsafe { libc::getuid() } 22 | } 23 | 24 | #[allow(unused)] 25 | pub fn user_name(uid: u32) -> Option { 26 | // SAFETY: `buf` outlives `pwd.pw_name` 27 | unsafe { 28 | let mut buf = make_buffer(); 29 | let pwd = get_passwd(uid, &mut buf)?; 30 | Some(CStr::from_ptr(pwd.pw_name).to_string_lossy().into_owned()) 31 | } 32 | } 33 | 34 | #[allow(unused)] 35 | pub fn home_dir(uid: u32) -> Option { 36 | // SAFETY: `buf` outlives `pwd.pw_dir` 37 | unsafe { 38 | let mut buf = make_buffer(); 39 | let pwd = get_passwd(uid, &mut buf)?; 40 | 41 | let bytes = CStr::from_ptr(pwd.pw_dir).to_bytes().to_vec(); 42 | let pw_dir = OsString::from_vec(bytes); 43 | 44 | Some(PathBuf::from(pw_dir)) 45 | } 46 | } 47 | 48 | pub fn make_shell_path(path: &str) -> PathBuf { 49 | // Replace `~` with a path to the home directory 50 | if let Some(path_after_tilde) = path.strip_prefix('~') { 51 | if path_after_tilde.is_empty() || path_after_tilde.starts_with('/') { 52 | if let Some(home) = home::home_dir() { 53 | return home.join(path_after_tilde.trim_start_matches('/')); 54 | } 55 | } 56 | } 57 | 58 | PathBuf::from(path) 59 | } 60 | 61 | #[derive(Debug, Clone, Copy)] 62 | pub struct FsStats { 63 | pub free_space: u64, 64 | pub available_space: u64, 65 | pub total_space: u64, 66 | pub allocation_granularity: u64, 67 | } 68 | 69 | pub fn statvfs>(path: P) -> Result { 70 | let path = 71 | CString::new(path.as_ref().as_os_str().as_bytes()).context("invalid path for statvfs")?; 72 | 73 | let mut stat: libc::statvfs = unsafe { std::mem::zeroed() }; 74 | 75 | let res = unsafe { libc::statvfs(path.as_ptr() as *const _, &mut stat) }; 76 | anyhow::ensure!(res == 0, std::io::Error::last_os_error()); 77 | 78 | Ok(FsStats { 79 | free_space: stat.f_frsize * stat.f_bfree, 80 | available_space: stat.f_frsize * stat.f_bavail, 81 | total_space: stat.f_frsize * stat.f_blocks, 82 | allocation_granularity: stat.f_frsize, 83 | }) 84 | } 85 | 86 | unsafe fn get_passwd(uid: u32, buf: &mut Buffer) -> Option { 87 | let mut pwd: MaybeUninit = MaybeUninit::uninit(); 88 | let mut pwdp = ptr::null_mut(); 89 | match libc::getpwuid_r( 90 | uid, 91 | pwd.as_mut_ptr(), 92 | buf.as_mut_ptr(), 93 | buf.capacity(), 94 | &mut pwdp, 95 | ) { 96 | 0 if !pwdp.is_null() => Some(pwd.assume_init()), 97 | _ => None, 98 | } 99 | } 100 | 101 | fn make_buffer() -> Buffer { 102 | // SAFETY: `name` arg is valid 103 | let init_size = match unsafe { libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) } { 104 | -1 => 1024, 105 | n => n as usize, 106 | }; 107 | Buffer::with_capacity(init_size) 108 | } 109 | 110 | type Buffer = Vec; 111 | -------------------------------------------------------------------------------- /src/util/transaction.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use anyhow::Result; 4 | 5 | pub fn split_address(address: &ton_block::MsgAddressInt) -> Result<(i32, ton_types::UInt256)> { 6 | match address { 7 | ton_block::MsgAddressInt::AddrStd(ton_block::MsgAddrStd { 8 | workchain_id, 9 | address, 10 | .. 11 | }) => Ok(( 12 | *workchain_id as _, 13 | ton_types::UInt256::from_slice(&address.get_bytestring_on_stack(0)), 14 | )), 15 | ton_block::MsgAddressInt::AddrVar(_) => Err(anyhow::anyhow!("unsupported address")), 16 | } 17 | } 18 | 19 | pub fn make_default_headers( 20 | pubkey: Option, 21 | timeout: u32, 22 | ) -> (u32, HashMap) { 23 | let time = broxus_util::now_ms_u64(); 24 | let expire_at = (time / 1000) as u32 + timeout; 25 | 26 | let headers = HashMap::from([ 27 | ("time".to_owned(), ton_abi::TokenValue::Time(time)), 28 | ("expire".to_owned(), ton_abi::TokenValue::Expire(expire_at)), 29 | ("pubkey".to_owned(), ton_abi::TokenValue::PublicKey(pubkey)), 30 | ]); 31 | 32 | (expire_at, headers) 33 | } 34 | 35 | #[derive(Debug, Clone)] 36 | pub struct TransactionWithHash { 37 | pub hash: ton_types::UInt256, 38 | pub data: ton_block::Transaction, 39 | } 40 | -------------------------------------------------------------------------------- /templates/depool.toml: -------------------------------------------------------------------------------- 1 | [general] 2 | node_db_path = "/var/ever/db" 3 | global_config = "ever_mainnet" 4 | node_repo = "https://github.com/everx-labs/ever-node" 5 | 6 | [adnl] 7 | port = 30100 8 | 9 | [validator] 10 | type = "depool" 11 | # DePool type: `default_v3`, `stever_v1` or `stever_v2` 12 | depool_type = "default_v3" 13 | # Minimal participant stake in nano EVERs 14 | min_stake = "10000000000" 15 | # Validator assurance in nano EVERs 16 | validator_assurance = "50000000000000" 17 | # Participant reward fraction, 1..=95 18 | participant_reward_fraction = 95 19 | -------------------------------------------------------------------------------- /templates/example.toml: -------------------------------------------------------------------------------- 1 | #### General settings 2 | [general] 3 | 4 | ## REQUIRED: 5 | 6 | # Path to the root directory for the node DB. 7 | node_db_path = "/var/ever/db" 8 | 9 | ## OPTIONAL: 10 | 11 | # Whether to create a root directory. Default: `true`. 12 | create_root_dir = true 13 | # Url or name of the global config. Default: `ever_mainnet`. 14 | global_config = "ever_mainnet" 15 | # Whether to reset node logger settings. Default: `false`. 16 | reset_logger_config = false 17 | # Whether to overwrite the existing node config. Default: `false`. 18 | reset_node_config = false 19 | # Whether to reset the existing app config. Default: `false`. 20 | reset_app_config = false 21 | # Repo url. Default: `https://github.com/everx-labs/ever-node` 22 | node_repo = "https://github.com/everx-labs/ever-node" 23 | 24 | ## Alternative repo url: 25 | # [general.node_repo] 26 | # url = "https://github.com/everx-labs/ever-node" 27 | # branch = "signature_with_id" 28 | # features = ["signature_with_id"] 29 | 30 | 31 | #### Optional control server settings 32 | [control] 33 | # Control server TCP port. Default: `5031`. 34 | port = 5031 35 | # What to do with new node keys: `append` or `replace`. Default: `replace`. 36 | node_key_behavior = "replace" 37 | # On which address control server will be listening for requests. Default: `127.0.0.1`. 38 | listen_addr = "127.0.0.1" 39 | 40 | 41 | #### Optional ADNL settings 42 | [adnl] 43 | # ADNL UDP port. Default: `30100`. 44 | port = 30100 45 | # # Explicit public IP. Resolved by default. 46 | # public_ip = "123.123.123.123" 47 | 48 | 49 | #### Optional validator settings 50 | 51 | # # 1. Validate as single 52 | # [validator] 53 | 54 | # ## REQUIRED: 55 | 56 | # # Validator type 57 | # type = "single" 58 | # # Stake per round in nano EVERs 59 | # stake_per_round = "100000000000000" 60 | 61 | # ## OPTIONAL: 62 | 63 | # # Whether to overwrite existing validation config. Default: `false`. 64 | # overwrite = false 65 | # # Whether to overwrite existing validator keys. Default: `false`. 66 | # overwrite_validator_keys = false 67 | 68 | # 2. Validate as DePool 69 | [validator] 70 | 71 | ## REQUIRED: 72 | 73 | # Validation type 74 | type = "depool" 75 | # DePool type: `default_v3`, `stever_v1` or `stever_v2` 76 | depool_type = "default_v3" 77 | # Minimal participant stake in nano EVERs 78 | min_stake = "10000000000" 79 | # Validator assurance in nano EVERs 80 | validator_assurance = "50000000000000" 81 | # Participant reward fraction, 1..=95 82 | participant_reward_fraction = 95 83 | # stEVER cluster address 84 | cluster = "0:86ea048f599734f266d3267a66941cd218dfb8120e4eca8cc055fdba8413fade" 85 | 86 | ## OPTIONAL: 87 | 88 | # Whether to overwrite existing validation config. Default: `false`. 89 | overwrite = false 90 | # Whether to overwrite existing validator keys. Default: `false`. 91 | overwrite_validator_keys = false 92 | # Whether to overwrite existing DePool keys. Default: `false`. 93 | overwrite_depool_keys = false 94 | -------------------------------------------------------------------------------- /templates/single.toml: -------------------------------------------------------------------------------- 1 | [general] 2 | node_db_path = "/var/ever/db" 3 | global_config = "ever_mainnet" 4 | node_repo = "https://github.com/everx-labs/ever-node" 5 | 6 | [adnl] 7 | port = 30100 8 | 9 | [validator] 10 | type = "single" 11 | stake_per_round = "100000000000000" 12 | -------------------------------------------------------------------------------- /templates/stever.toml: -------------------------------------------------------------------------------- 1 | [general] 2 | node_db_path = "/var/ever/db" 3 | global_config = "ever_mainnet" 4 | node_repo = "https://github.com/everx-labs/ever-node" 5 | 6 | [adnl] 7 | port = 30100 8 | 9 | [validator] 10 | type = "depool" 11 | depool_type = "stever_v2" 12 | min_stake = "10000000000" # 10 EVER 13 | validator_assurance = "50000000000000" # 50000 EVER 14 | participant_reward_fraction = 95 15 | 16 | # Main cluster, must be replaced with required 17 | cluster = "0:86ea048f599734f266d3267a66941cd218dfb8120e4eca8cc055fdba8413fade" 18 | --------------------------------------------------------------------------------