├── .github └── ISSUE_TEMPLATE │ └── tip--themelio-improvement-proposal-.md ├── .gitignore ├── .vscode └── settings.json ├── Cargo.lock ├── Cargo.toml ├── GENESIS.yaml ├── LICENSE-MPL-2.0 ├── Metrics.md ├── README.md ├── diagram.drawio ├── diagram.png ├── rust-toolchain ├── src ├── README.md ├── args.rs ├── autoretry.rs ├── bin │ ├── melnode-db-migrate.rs │ └── melsimnet.rs ├── lib.rs ├── main.rs ├── node.rs ├── node │ ├── blksync.rs │ └── indexer.rs ├── staker.rs └── storage │ ├── mempool.rs │ ├── mod.rs │ ├── smt.rs │ └── storage.rs └── staker-config.yaml /.github/ISSUE_TEMPLATE/tip--themelio-improvement-proposal-.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: MIP (Mel Improvement Proposal) 3 | about: A formalized proposed change to the Mel protocol or tooling 4 | title: "" 5 | labels: "" 6 | assignees: "" 7 | --- 8 | 9 | | | | 10 | | -------- | --- | 11 | | TIP | | 12 | | Category | | 13 | | Author | | 14 | 15 | ## Summary 16 | 17 | ## Motivation 18 | 19 | ## Proposed changes 20 | 21 | ## Deployment 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/.idea/** 3 | .DS_Store 4 | sql.db 5 | perf.data 6 | perf.data* 7 | /results 8 | *.output -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "spellright.language": [ 3 | "en_CA" 4 | ], 5 | "spellright.documentTypes": [ 6 | "markdown", 7 | "latex", 8 | "plaintext" 9 | ] 10 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "melnode" 3 | version = "0.20.8" 4 | repository ="https://github.com/mel-project/melnode" 5 | license = "MPL-2.0" 6 | description = "Reference implementation of Mel" 7 | edition = "2021" 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | [profile.dev] 11 | panic = "abort" 12 | opt-level = 1 13 | overflow-checks = true 14 | 15 | [profile.release] 16 | panic = "abort" 17 | opt-level = 3 18 | overflow-checks = true 19 | debug = 0 20 | # lto = 'thin' 21 | # codegen-units = 1 22 | 23 | [patch.crates-io] 24 | # melstf={path="../melstf"} 25 | # tip911-stakeset={path="../melstf/lib/tip911-stakeset"} 26 | # # melnet={path="../melnet"} 27 | # melprot={path="../melprot"} 28 | 29 | [profile.release-dbg] 30 | inherits = "release" 31 | debug = 2 32 | panic = "abort" 33 | lto = false 34 | codegen-units = 32 35 | 36 | [dependencies.gethostname] 37 | version = "0.2.3" 38 | optional = true 39 | 40 | [dependencies.prometheus] 41 | version = "0.13.3" 42 | optional = true 43 | 44 | [dependencies.public-ip] 45 | version = "0.2.2" 46 | optional = true 47 | 48 | [dependencies.reqwest] 49 | version = "0.11.16" 50 | features = ["json", "rustls-tls"] 51 | default-features = false 52 | optional = true 53 | 54 | 55 | 56 | [dependencies] 57 | # melprot = { git = "https://github.com/themeliolabs/melnodeprot", branch = "use-themelio-structs-0.3-beta" } 58 | # melprot = { git = "https://github.com/themeliolabs/melprot", branch = "master" } 59 | 60 | anyhow = "1.0.70" 61 | # base64 = "0.13.0" 62 | arc-swap = "1.6.0" 63 | 64 | dashmap = "4.0.2" 65 | default-net = { version = "0.6.0", optional = true } 66 | env_logger = "0.9.3" 67 | ethnum = "1.3.2" 68 | fastrand = "1.9.0" 69 | futures-util = "0.3.27" 70 | hex = "0.4.3" 71 | imbl = { version = "1.0.1", features = ["serde"] } 72 | log = "0.4.17" 73 | lru = "0.7.8" 74 | 75 | # meshanina = {path="../meshanina"} 76 | meshanina = "0.4.2" 77 | novasmt = "0.2.20" 78 | # novasymph = "0.3.1" 79 | # novasymph={path="../novasymph"} 80 | once_cell = "1.17.1" 81 | parking_lot = "0.12.1" 82 | serde = "1.0.158" 83 | serde_json = { version = "1.0.95", features = ["arbitrary_precision"] } 84 | defmac = "0.2.1" 85 | smol = "1.3.0" 86 | smolscale = "0.3.52" 87 | smol-timeout = "0.6.0" 88 | stdcode = "0.1.14" 89 | 90 | tap = "1.0.1" 91 | # tracing = "0.1.34" 92 | #themelio-stf = { path = "../themelio-stf" } 93 | #tip911-stakeset = { path = "../themelio-stf/lib/tip911-stakeset" } 94 | # tip911-stakeset = { git = "https://github.com/themeliolabs/themelio-stf", branch = "applytx-refactors" } 95 | # themelio-stf = { git = "https://github.com/themeliolabs/themelio-stf", branch = "applytx-refactors" } 96 | # melvm = { git = "https://github.com/themeliolabs/themelio-stf", branch = "applytx-refactors" } 97 | 98 | tmelcrypt = "0.2.7" 99 | 100 | dhat = { version = "0.3.2", optional = true } 101 | async-trait = "0.1.68" 102 | clone-macro = "0.1.0" 103 | lz4_flex = "0.9.5" 104 | serde_with = "1.14.0" 105 | serde_yaml = "0.8.26" 106 | dirs = "4.0.0" 107 | jemallocator-global = "0.3.2" 108 | melnet2 = "0.3.1" 109 | nanorpc = "0.1.12" 110 | moka = "0.9.7" 111 | event-listener = "2.5.3" 112 | crossbeam-queue = "0.3.8" 113 | libc = "0.2.140" 114 | streamlette = "0.2.7" 115 | thiserror = "1.0.40" 116 | bytes = "1.4.0" 117 | async-oneshot = "0.5.0" 118 | rusqlite = { version = "0.28.0", features = ["bundled"] } 119 | scopeguard = "1.1.0" 120 | base64 = "0.21.0" 121 | 122 | clap = { version = "4.1.14", features = ["derive", "cargo", "unicode"] } 123 | melbootstrap = "0.8.3" 124 | melstructs = "0.3.2" 125 | melblkidx = "0.7.4" 126 | melstf = "0.12.3" 127 | tip911-stakeset = "0.0.2" 128 | melvm = "0.1.0" 129 | melprot = "0.13.4" 130 | #lz4_flex = "0.9.3" 131 | 132 | # [target.'cfg(unix)'.dependencies] 133 | # mimalloc = "0.1.28" 134 | -------------------------------------------------------------------------------- /GENESIS.yaml: -------------------------------------------------------------------------------- 1 | network: custom02 # anything from custom02..custom08 2 | # specifies the "initial stash" of money in the genesis block 3 | init_coindata: 4 | # what address gets the initial supply of money 5 | covhash: t5xw3qvzvfezkb748d3zt929zkbt7szgt6jr3zfxxnewj1rtajpjx0 6 | # how many units (in millionths) 7 | value: 1000000 8 | # denomination 9 | denom: MEL 10 | # additional data in the UTXO 11 | additional_data: "" 12 | # specifies all the stakers with consensus power. 13 | # we need to specify ourselves in order to produce any blocks; "themelio-crypttool generate-ed25519" (install via cargo) can generate a keypair for us 14 | stakes: 15 | deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef: 16 | pubkey: 4ce983d241f1d40b0e5b65e0bd1a6877a35acaec5182f110810f1276103c829e 17 | e_start: 0 18 | e_post_end: 100000 # essentially never end the stake 19 | syms_staked: 10000 # does not matter 20 | # Initial fee pool 21 | init_fee_pool: 10000 22 | # Initial fee multiplier 23 | init_fee_multiplier: 1 24 | -------------------------------------------------------------------------------- /LICENSE-MPL-2.0: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | -------------------------------------------------------------------------------- /Metrics.md: -------------------------------------------------------------------------------- 1 | # Metrics 2 | 3 | To enable metrics, you will need to include the `metrics` feature: 4 | ``` 5 | $ cargo build --locked --release --features metrics 6 | ``` 7 | 8 | You can also use the pre-built docker image, which includes the metrics webserver. 9 | 10 | 11 | ## Prometheus Details 12 | 13 | The `metrics` feature enables a webserver that runs on port `8080`, with the default prometheus endpoint of `/metrics`. 14 | 15 | 16 | Example output is as follows: 17 | ``` 18 | # HELP themelio_node_cpu_load_idle_percentage Idle CPU Load (Percentage) 19 | # TYPE themelio_node_cpu_load_idle_percentage gauge 20 | themelio_node_cpu_load_idle_percentage{hostname="hostname-goes-here",network="mainnet"} 98.23587036132813 21 | # HELP themelio_node_cpu_load_system_percentage System CPU Load (Percentage) 22 | # TYPE themelio_node_cpu_load_system_percentage gauge 23 | themelio_node_cpu_load_system_percentage{hostname="hostname-goes-here",network="mainnet"} 0.009037130512297153 24 | # HELP themelio_node_cpu_load_user_percentage User CPU Load (Percentage) 25 | # TYPE themelio_node_cpu_load_user_percentage gauge 26 | themelio_node_cpu_load_user_percentage{hostname="hostname-goes-here",network="mainnet"} 1.7550925016403198 27 | # HELP themelio_node_highest_block Highest Block 28 | # TYPE themelio_node_highest_block gauge 29 | themelio_node_highest_block{hostname="hostname-goes-here",network="mainnet"} 108518 30 | # HELP themelio_node_memory_free_bytes Free Memory (In Bytes) 31 | # TYPE themelio_node_memory_free_bytes gauge 32 | themelio_node_memory_free_bytes{hostname="hostname-goes-here",network="mainnet"} 19658530816 33 | # HELP themelio_node_memory_total_bytes Total Memory (In Bytes) 34 | # TYPE themelio_node_memory_total_bytes gauge 35 | themelio_node_memory_total_bytes{hostname="hostname-goes-here",network="mainnet"} 33531518976 36 | # HELP themelio_node_network_received_bytes Network Data Received (In Bytes) 37 | # TYPE themelio_node_network_received_bytes gauge 38 | themelio_node_network_received_bytes{hostname="hostname-goes-here",network="mainnet"} 13112110718 39 | # HELP themelio_node_network_transmitted_bytes Network Data Transmitted (In Bytes) 40 | # TYPE themelio_node_network_transmitted_bytes gauge 41 | themelio_node_network_transmitted_bytes{hostname="hostname-goes-here",network="mainnet"} 9058541586 42 | # HELP themelio_node_root_filesystem_free_bytes Root Filesystem Free Space (In Bytes) 43 | # TYPE themelio_node_root_filesystem_free_bytes gauge 44 | themelio_node_root_filesystem_free_bytes{hostname="hostname-goes-here",network="mainnet"} 211633070080 45 | # HELP themelio_node_root_filesystem_total_bytes Root Filesystem Total Space (In Bytes) 46 | # TYPE themelio_node_root_filesystem_total_bytes gauge 47 | themelio_node_root_filesystem_total_bytes{hostname="hostname-goes-here",network="mainnet"} 315993423872 48 | # HELP themelio_node_uptime_seconds Uptime (In Seconds) 49 | # TYPE themelio_node_uptime_seconds gauge 50 | themelio_node_uptime_seconds{hostname="hostname-goes-here",network="mainnet"} 1637959 51 | ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # melnode: Mel's reference implementation 2 | 3 | [![](https://img.shields.io/crates/v/melnode)](https://crates.io/crates/melode) 4 | ![](https://img.shields.io/crates/l/melnode) 5 | 6 | [Mel](https://melproject.org) is a new public blockchain focused on security, performance, and long-term stability. `melnode` is Mel's reference implementation in Rust. 7 | 8 | ## Installation 9 | 10 | For security reasons, until we have reliable reproducible build infrastructure, the only official distribution channel for `melnode` is its source code package on [crates.io](https://crates.io). 11 | 12 | Fortunately, installing `melnode` from source is extremely easy. First, make sure [cargo](https://doc.rust-lang.org/cargo/getting-started/installation.html) is installed on your machine. Then, simply run 13 | 14 | ``` 15 | $ cargo install --locked melnode 16 | ``` 17 | 18 | This produces an executable `melnode` in `~/.cargo/bin/`, which should already be in your `$PATH`. 19 | 20 | ## Hardware Requirements 21 | 22 | ### Minimum: 23 | 24 | - 1-core CPU 25 | - 4GB of RAM 26 | - at least 200GB of free storage (SSD not necessary) 27 | - 10 Mbit/sec download Internet service 28 | 29 | ### Recommended: 30 | 31 | - 4+ core CPU 32 | - 16GB of RAM 33 | - 200+GB of free storage on a fast device (SSD, RAID array, etc) 34 | - 50+ Mbit/sec up/download Internet service 35 | 36 | Full nodes replicate every consensus-confirmed block, validating its contents and ensuring network security while providing a local cache of the entire blockchain state. Running a full node helps contribute to the security and (read) performance of the network. 37 | 38 | There are two kinds of full nodes: 39 | 40 | - **Replica nodes** comprise the vast majority of full nodes. They replicate and verify blocks but do not vote in consensus. 41 | - **Staker nodes**, the ultimate guardians of Mel security, have SYM locked up and participate in consensus. They are analogous to miners in proof-of-work blockchains like Bitcoin. 42 | 43 | ## Auditor Full Node 44 | 45 | ### On the Mel Mainnet: 46 | 47 | To run an replica on the “mainnet” (which at the moment is far from stable, but does have a persistent history), just run: 48 | 49 | ``` 50 | $ melnode 51 | ``` 52 | 53 | `melnode` will then beginning synchronizing all the blocks in the blockchain. This will take quite a while (a day or so) and store a bunch of data in in `~/.melnode/`. 54 | 55 | ### On the Mel Testnet: 56 | 57 | To run the replica on the non-persistent testnet, where most covenant development and testing will happen during the betanet period, run instead 58 | 59 | ``` 60 | $ melnode --bootstrap tm-1.themelio.org:11814 --testnet 61 | ``` 62 | 63 | Note that two things were needed to connect to the testnet: 64 | 65 | - Connecting to a testnet bootstrap node (this is the first server your full node talks to) 66 | - Specifying --testnet, to use testnet validation rules 67 | 68 | ### Configurations 69 | 70 | You can change the configuration of an replica node with the following flags: 71 | 72 | ``` 73 | --bootstrap ... 74 | Bootstrap addresses. May be given as a DNS name [default: mainnet-bootstrap.themelio.org:11814] 75 | 76 | --database Database path [default: /tmp/themelio-mainnet] 77 | 78 | --listen Listen address 79 | 80 | --testnet Use testnet validation rules 81 | 82 | --override-genesis 83 | If given, uses this YAML file to configure the network genesis rather than following the known 84 | testnet/mainnet genesis 85 | ``` 86 | 87 | ### Local simnet support 88 | 89 | **Note**: there will soon be a tool to automatically generate these configurations. 90 | 91 | We can configure a simnet --- a "fake" network local to our computer --- by the combination of three options: 92 | 93 | - `--bootstrap 127.0.0.1:11814` to bootstrap only with ourselves instead of any remote node 94 | - `--override-genesis network-config.yaml`, where `custom-config.yaml` contains configuration for a _custom network_ of the following form: 95 | 96 | ```yaml 97 | network: custom02 # anything from custom02..custom08 98 | # specifies the "initial stash" of money in the genesis block 99 | init_coindata: 100 | # what address gets the initial supply of money 101 | covhash: t5xw3qvzvfezkb748d3zt929zkbt7szgt6jr3zfxxnewj1rtajpjx0 102 | # how many units (in millionths) 103 | value: 1000000 104 | # denomination 105 | denom: MEL 106 | # additional data in the UTXO, as a hex string 107 | additional_data: "" 108 | # specifies all the stakers with consensus power. 109 | # we need to specify ourselves in order to produce any blocks; "themelio-crypttool generate-ed25519" (install via cargo) can generate a keypair for us 110 | stakes: 111 | deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef: 112 | pubkey: 4ce983d241f1d40b0e5b65e0bd1a6877a35acaec5182f110810f1276103c829e 113 | e_start: 0 114 | e_post_end: 100000 # essentially never end the stake 115 | syms_staked: 10000 # does not matter 116 | # Initial fee pool 117 | init_fee_pool: 10000 118 | # Initial fee multiplier 119 | init_fee_multplier: 1 120 | ``` 121 | 122 | - `--staker-cfg staker-config.yaml` must contain a _staker_ configuration like this: 123 | 124 | ```yaml 125 | # secret key; must correspond to "stakes.dead[...]beef.pubkey" in the network config 126 | signing_secret: 5b4c8873cbdb089439d025e9fa817b1df1128231699131c245c0027be880d4d44ce983d241f1d40b0e5b65e0bd1a6877a35acaec5182f110810f1276103c829e 127 | # address for staker-network communication, this can be arbitrary 128 | listen: 127.0.0.1:20000 129 | # must be same as "listen" 130 | bootstrap: 127.0.0.1:20000 131 | # where block rewards are sent 132 | payout_addr: t5xw3qvzvfezkb748d3zt929zkbt7szgt6jr3zfxxnewj1rtajpjx0 133 | # vote for this fee multiplier (higher values charge more fees) 134 | target_fee_multiplier: 10000 135 | ``` 136 | 137 | A more detailed explanation of `--staker-cfg` requires explaining the staker and consensus system, as follows: 138 | 139 | ## Staker Full Node 140 | 141 | ### On the Mel Mainnet: 142 | 143 | WIP 144 | 145 | ### On the Mel Testnet: 146 | 147 | WIP 148 | 149 | ### Configuration 150 | 151 | WIP 152 | 153 | ## Contributing 154 | 155 | Thank you for considering contributing to the Mel source code! We welcome all contributions from absolutely anyone on the internet. 156 | 157 | For minor changes, simply fork, fix, and submit a pull request. If you want to propose a larger change, be sure to get in touch with our dev team on [Discord]() to make sure your change aligns with Mel's overarching philosophy. This will likely save you a significant amount of time and effort, as well as speed up the review process of your change. 158 | 159 | Finally, make sure your code adheres to the following guidelines: 160 | 161 | - Your code must adhere to the official Rust [style guide](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/guide.md) 162 | - Your code must be documented per the official Rust [documentation guidelines](https://rust-lang.github.io/api-guidelines/documentation.html) 163 | - Pull requests must be based on and opened against the `master` branch 164 | - Commit messages must be descriptive 165 | - Any protocol changes (whether consensus-breaking or not) must start as a TIP (e.g. [TIP-101](https://github.com/themeliolabs/melnode/issues/87)) 166 | 167 | ## License 168 | 169 | The license of the project is the Mozilla Public License, version 2. 170 | 171 | --- 172 | 173 | 194 | 195 | 200 | -------------------------------------------------------------------------------- /diagram.drawio: -------------------------------------------------------------------------------- 1 | 7Vxbc5s4FP41nuk+2MMd/JikSbe7bTedzPTy1JFBttlgxIKc2PvrVwKEJSQbTMB2tnUeCkcg4Fy+850j6Mi8WW3epSBZfkQBjEaGFmxG5tuRYejm1CD/UMm2kEynWiFYpGFQHrQTPIT/wlLIDluHAcyEAzFCEQ4TUeijOIY+FmQgTdGzeNgcReJVE7CAkuDBB5Es/RoGeMnuTtvJf4fhYolrAyvAji0F2RIE6JkTmbcj8yZFCBdbq80NjKjumFqK8+72jFb3lcIYtznBTPCnz1H01fMev0XGzN582/wYm3YxzROI1uUDl3eLt0wDKVrHAaSz6CPz+nkZYviQAJ+OPhObE9kSr6JyeI5ifAdWYUTN/R5l8OkRkOmuQjrb9TyMohsUoZQMxiimogBky2ryDKfosdKzSSSLCGTUghodZRrUqkPZZCPDnDr0j4zIiil19QRTDDecqFTUO4hWEKdbckg5OmZOWjrtWDdLwTPnA3YpW3L2N9mZoPS7RTX5zjZkozTPEaaSLfWJBNt9ijDyUSRZjSgroZtJinxIVdhkuRnwHxe5rf9a4yikxjnSosQIgQ29wFKZxzNmptOTeXRDNI+jMI6uMI41lG0cyTYPGKUUWfaZxd8SFQcwNVsYpoi/D7MmS2UFeOr2sVab2/RPZTUn/5XzcfLi1481TUu0ZhVXnDkdhTXdoazpSta8jpD/+J5YYQPTnyrSTOfCIm3anK9gHFzRxE+jjGaP0BfVv8tnNI/ATYi/cdvfue23G36HqsCiUULUmW7zcyZutf+9nD3f2Z2Y722rnAVSzO5tCaL5TZj6uXHzkbuQ6qI8Lyj39PLy9zANiQphyo4ICE0pHxqleIkWKAbR7U7a4E57nSND69SHzWBH7ngB8YHjyoRFb/Sgq3G+pMqoTJbCCODwSaRlKv8qr3CPQvJku5wxNSa2iDN1ACmevDyPZ1K1qQyncapCOdJUuctXj949Clj+ExIOwCTd3C3DjKSerRwVxAwfwIywcyEYQBQuYhopxA2ob11TbAgJ/b0qB1ZhEOQOk0KSX8Asn486YEIfLn9c+3pkv+3gcAcDvA5RFcMvb2HEs2gVdGkT3TNcwUxGL55ki5OOaxOg+TyDg1hdl63eE/jpncDP0QwB/SaabV0UAubXrp92blh0LwoWLcetYZmh2d1g0dIkWDRPDIuGmrllrxYNq5DvAw4NT0Qu62UedALAk+35Ea4SdKDcTalK4kWu40FouAigCQcu1aU5xJFYO4De3FdWWr4HZ/Nh6mND0b3wbBlTBmPteos2E5e5ghCsUBzU80XZMdqbzQ4bjktcR5F2ZRLZn61aZqZolj8Sw5cXZR4WJo2px7mo1KNLNLqqNo9m5Lo0lX7a1KPLTYOeuJlQmBauq2RnGu/jOu/hnMP3TcvOVoS6LV3+sopQo9ZXtoyJZ3VzedN2JszHWCViTCdT/qfXmNzQMSA3Z27ItYgkIkUpIWFOhGneTcnWgm69SWg7TQNBQOhU9tvrZWks+HtgaYZuub34mieeMBxLq/gJtzIBn2Vr4xTEGfBxiGIFId9h44xS9hfkeTm1VlA6MWwOTfUGLD0upXfHsrYYtfMR03QlQndC2DK0jnlatzw1MW1AqFMWHYYpufPVOggxolB1b9zLrhtFYZLlyZIt7kRoHTQXIMd4dX3FZjp3YaCqI0zHnJpBT3WELtp97Mp1RFVr8H7nDVVIsJqVM86bp5DqKUYBTFKEFVlERbsExtOCjR2POVy5IdQb2mTaRMeUJccOxHgM29+tOxFqNTIwFk8XSsHqCNSaf9VWLaue/Kl6XXJJ/YDBI0x/yjcCxq5Y/qnWKVVANVjHw7iIerBrXTdQJ2VANGrdAzEuqyKsLSbp047Nd1dEI73O0IZGI09y909wg8cFl5dKAR/EQRgADF9vZ97osebre6FyeJJsGpK974jilxF9rtk6jOhtzPasvQxa6u3BJq0Jm5SU69iubkXR3PZ1ZnfMM9pSsLZ9X5ZMKWW1HSmDDrkMOZ14Ot+6EtFMJzHidANGy51o/M8RA02vt8xaweZJQ02uRwui99OVo2Pv0spR80A5+gSy7SpZ/qpHT1SPtkZD87IKUt0TS5eub6XpniMgnS5CaA0+B+aDpuqNaApa4yyBfjgnzl0PCwICWPR4EUzK1V/FJwTtuaEKAcW4Er4x4D8+EL5TaABHMsq8nB39wL2b3QcUWqLTjKtWXcNL0/V2RxcoHP8JH56+3Gqbd5/d2J/+/cf8x5exTAjzRYDzkMAdNFkuT8Qmmt60gHo845NZY4m0hEdRviFgLSmUOmCtErCGbds5Z8VEu/5Wml5fz28Ni3ZtyaHe8ukPCdVx0eLVF9pkS/ZAwL5yUmH9vWhhSmhhyOtHSuZkDwUXikUWSS0NfBXQVELvcx5ucvLZApULFJ5WIaVQN6/WAxY9GBoqVdb7w72pUuag169dlWwaTU5qJ1Xtca+t/XqpZ793HMxdB4GTz2iH/J/PaPsh51wZzRRLV9Pp2Ocd6xpJjTzNF3m+VD4MnN5knn+/jiLC9zS+68sFjPPPmn7xnHvJuPhikSC/Nk02ua+wYdYpDgCmrjVP0UoOvXO3itviYJsMvh9/Ttg4LicWu2+M2vfa6lKHvPyBy6tnBbVl5nNlMsVLWv87lnApqpYJGcNCqpKXoOH9OlvCrFdkxaif6dgsRGXFUzLxOQp/4Xu2Rnp1ZMlfNVKPo2OnpkXMvxt50Xnbn2P5M4DOLdB8rv09UKvlong/qztkd/efuxSH7/6HHPP2Pw== -------------------------------------------------------------------------------- /diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mel-project/melnode/22e2239a7421e9f7e4e1909abad01a95d28b8c65/diagram.png -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | stable 2 | -------------------------------------------------------------------------------- /src/README.md: -------------------------------------------------------------------------------- 1 | The code is organized using the Actor pattern, using `Actor` defined in `common.rs`. 2 | 3 | There are the following actors: 4 | 5 | - `Auditor` handles network requests on the replica network. It periodicially syncs state with neighbors. It handles internal requests to forward transactions and consume new blocks. 6 | 7 | - `Stakeholder` holds keys and participates in symphonia consensus. It uses a timer to wait 30 seconds, grab the current state from `Auditor` to form a block proposal, and run the consensus algorithm. When a block is decided, it is pushed to `Storage` with its consensus proof. `Stakeholder` doesn't run at all if the node is not a stakeholder. 8 | 9 | As well as `Storage`, which is not an actor and does the following functions: 10 | 11 | - Save every new block with its consensus proof, pruning way-too-old blocks 12 | 13 | - Keep track of the current state 14 | 15 | - Sync to disk atomically periodically 16 | 17 | - Load from disk on creation 18 | 19 | ### RPC verbs 20 | 21 | - `newtx` takes in a Transaction and returns a bool 22 | 23 | - `gettx` takes in a txhash and returns a transaction 24 | 25 | - `newblk` takes in a NewBlockReq and returns a NewBlockResp. Req includes a consensus proof, a header, and a list of TXIDs. Resp is either a confirmation, or a list of transactions that are missing. Req should be repeated with a list of missing transactions attached. 26 | -------------------------------------------------------------------------------- /src/args.rs: -------------------------------------------------------------------------------- 1 | use crate::storage::Storage; 2 | 3 | use std::{net::SocketAddr, path::PathBuf}; 4 | 5 | use anyhow::Context; 6 | use clap::Parser; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use melstf::GenesisConfig; 10 | use melstructs::Address; 11 | use tap::Tap; 12 | use tmelcrypt::Ed25519SK; 13 | 14 | #[derive(Debug, Parser)] 15 | #[command(author, version, about, long_about = None)] 16 | /// Command-line arguments. 17 | pub struct MainArgs { 18 | /// Listen address 19 | #[arg(long, default_value = "0.0.0.0:41814")] 20 | listen: SocketAddr, 21 | 22 | /// Optional listen address for nodes using the legacy melnet protocol. 23 | #[arg(long)] 24 | legacy_listen: Option, 25 | 26 | /// Advertise address. Put your public IP address here. 27 | #[arg(long)] 28 | advertise: Option, 29 | 30 | /// Override bootstrap addresses. May be given as a DNS name. 31 | #[arg(long, default_value = "auto")] 32 | bootstrap: Vec, 33 | 34 | /// Database path 35 | #[arg(long)] 36 | database: Option, 37 | 38 | /// Path to a YAML staker configuration 39 | #[arg(long)] 40 | staker_cfg: Option, 41 | 42 | /// If given, uses this JSON file to configure the network genesis rather than following the known testnet/mainnet genesis. 43 | #[arg(long)] 44 | override_genesis: Option, 45 | 46 | /// If set to true, default to the testnet. Otherwise, mainnet validation rules are used. 47 | #[arg(long)] 48 | testnet: bool, 49 | 50 | /// If set to true, runs a self-test by replaying the history from genesis, ensuring that everything is correct 51 | #[arg(long)] 52 | pub self_test: bool, 53 | 54 | /// Create an in-memory coin index. **RPC endpoints that rely on this will be disabled if this is not set!** 55 | #[arg(long)] 56 | pub index_coins: bool, 57 | } 58 | 59 | /// Staker configuration, YAML-deserializable. 60 | #[derive(Clone, Debug, Deserialize, Serialize)] 61 | pub struct StakerConfig { 62 | /// ed25519 secret key of the staker 63 | #[serde(with = "serde_with::rust::display_fromstr")] 64 | pub signing_secret: Ed25519SK, 65 | /// Listen address for the staker. 66 | #[serde(with = "serde_with::rust::display_fromstr")] 67 | pub listen: SocketAddr, 68 | /// Bootstrap address into the staker network. 69 | #[serde(with = "serde_with::rust::display_fromstr")] 70 | pub bootstrap: SocketAddr, 71 | /// Payout address 72 | #[serde(with = "serde_with::rust::display_fromstr")] 73 | pub payout_addr: Address, 74 | /// Target fee multiplier 75 | pub target_fee_multiplier: u128, 76 | } 77 | 78 | impl MainArgs { 79 | /// Gets the advertised IP. 80 | pub fn advertise_addr(&self) -> Option { 81 | self.advertise 82 | } 83 | 84 | /// Derives the genesis configuration from the arguments 85 | pub async fn genesis_config(&self) -> anyhow::Result { 86 | if let Some(path) = &self.override_genesis { 87 | let genesis_yaml: Vec = smol::fs::read(&path) 88 | .await 89 | .context("cannot read genesis config")?; 90 | Ok(serde_yaml::from_slice(&genesis_yaml) 91 | .context("error while parsing genesis config")?) 92 | } else if self.testnet { 93 | Ok(GenesisConfig::std_testnet()) 94 | } else { 95 | Ok(GenesisConfig::std_mainnet()) 96 | } 97 | } 98 | 99 | pub async fn storage(&self) -> anyhow::Result { 100 | let genesis = self.genesis_config().await?; 101 | 102 | let database_default_path = dirs::home_dir().expect("no home dir?!").tap_mut(|p| { 103 | p.push(".melnode/"); 104 | }); 105 | let database_base_path = self.database.clone().unwrap_or(database_default_path); 106 | let _history_path = database_base_path 107 | .clone() 108 | .tap_mut(|path| path.push("history")); 109 | let _smt_path = database_base_path 110 | .clone() 111 | .tap_mut(|path| path.push("smt.db")); 112 | 113 | std::fs::create_dir_all(&database_base_path)?; 114 | let storage = Storage::open(database_base_path, genesis) 115 | .await 116 | .context("cannot make storage")?; 117 | 118 | log::debug!("node storage opened"); 119 | 120 | Ok(storage) 121 | } 122 | 123 | /// Derives a list of bootstrap addresses 124 | pub async fn bootstrap(&self) -> anyhow::Result> { 125 | if !self.bootstrap.is_empty() { 126 | let mut bootstrap = vec![]; 127 | for name in self.bootstrap.iter() { 128 | let addrs = if name == "auto" { 129 | melbootstrap::bootstrap_routes(self.genesis_config().await?.network) 130 | } else { 131 | smol::net::resolve(&name) 132 | .await 133 | .context("cannot resolve DNS bootstrap")? 134 | }; 135 | bootstrap.extend(addrs); 136 | } 137 | Ok(bootstrap) 138 | } else { 139 | Ok(melbootstrap::bootstrap_routes( 140 | self.genesis_config().await?.network, 141 | )) 142 | } 143 | } 144 | 145 | /// Listening address 146 | pub fn listen_addr(&self) -> SocketAddr { 147 | self.listen 148 | } 149 | 150 | /// Legacy listening address 151 | pub fn legacy_listen_addr(&self) -> Option { 152 | self.legacy_listen 153 | } 154 | 155 | /// Staker secret key 156 | pub async fn staker_cfg(&self) -> anyhow::Result> { 157 | if let Some(path) = self.staker_cfg.as_ref() { 158 | let s = std::fs::read_to_string(path)?; 159 | let lele: StakerConfig = serde_yaml::from_str(&s)?; 160 | Ok(Some(lele)) 161 | } else { 162 | Ok(None) 163 | } 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /src/autoretry.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Debug, time::Duration}; 2 | 3 | use futures_util::Future; 4 | 5 | /// Retries a function until it works, internally doing exponential backoff 6 | pub async fn autoretry>, Fun: FnMut() -> Fut>( 7 | mut f: Fun, 8 | ) -> T { 9 | let mut sleep_interval = Duration::from_millis(50); 10 | loop { 11 | match f().await { 12 | Ok(val) => return val, 13 | Err(err) => { 14 | log::warn!("autoretrying due to {:?}", err); 15 | smol::Timer::after(sleep_interval).await; 16 | sleep_interval *= 2; 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/bin/melnode-db-migrate.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use clap::Parser; 4 | use melnode::storage::Storage; 5 | use melstf::GenesisConfig; 6 | use melstructs::{Block, ConsensusProof}; 7 | 8 | #[derive(Debug, Parser)] 9 | struct Args { 10 | #[arg(long)] 11 | old: PathBuf, 12 | 13 | #[arg(long)] 14 | new: PathBuf, 15 | } 16 | 17 | fn main() -> anyhow::Result<()> { 18 | smolscale::block_on(async move { 19 | let args = Args::parse(); 20 | let storage = Storage::open(args.new, GenesisConfig::std_mainnet()).await?; 21 | let directory = std::fs::read_dir(args.old)?; 22 | let mut paths = vec![]; 23 | for file in directory { 24 | let file = file?; 25 | paths.push(file.path()); 26 | eprintln!("touching {:?}", file.path()); 27 | } 28 | paths.sort_unstable(); 29 | let total = paths.len(); 30 | for file in paths { 31 | let raw_block = smol::fs::read(file).await?; 32 | let (blk, cproof): (Block, ConsensusProof) = stdcode::deserialize(&raw_block)?; 33 | eprintln!( 34 | "[{:.2}%] applying {}/{total}", 35 | 100.0 * blk.header.height.0 as f64 / total as f64, 36 | blk.header.height 37 | ); 38 | storage.apply_block(blk, cproof).await?; 39 | } 40 | Ok(()) 41 | }) 42 | } 43 | -------------------------------------------------------------------------------- /src/bin/melsimnet.rs: -------------------------------------------------------------------------------- 1 | use clap::{Args, Parser, Subcommand}; 2 | use melnode::args::StakerConfig; 3 | use std::fmt::Write; 4 | use std::net::{Ipv4Addr, SocketAddr}; 5 | 6 | use melstf::GenesisConfig; 7 | use melstructs::{Address, CoinData, CoinValue, Denom, NetID, StakeDoc, TxHash}; 8 | use stdcode::StdcodeSerializeExt; 9 | use tmelcrypt::{Ed25519SK, Hashable}; 10 | 11 | #[derive(Parser)] 12 | struct Command { 13 | #[command(subcommand)] 14 | command: Sub, 15 | } 16 | 17 | #[derive(Subcommand)] 18 | enum Sub { 19 | Create(CreateArgs), 20 | } 21 | 22 | #[derive(Args)] 23 | struct CreateArgs { 24 | #[arg(short, long)] 25 | stake: Vec, 26 | } 27 | 28 | fn main() -> anyhow::Result<()> { 29 | match Command::parse().command { 30 | Sub::Create(create) => main_create(create), 31 | } 32 | } 33 | 34 | fn main_create(create: CreateArgs) -> anyhow::Result<()> { 35 | let secrets: Vec = create.stake.iter().map(|_| Ed25519SK::generate()).collect(); 36 | let genesis_config = GenesisConfig { 37 | network: NetID::Custom02, 38 | init_coindata: CoinData { 39 | covhash: Address(Default::default()), 40 | value: Default::default(), 41 | denom: Denom::Mel, 42 | additional_data: Default::default(), 43 | }, 44 | stakes: create 45 | .stake 46 | .iter() 47 | .zip(secrets.iter()) 48 | .map(|(amount, key)| { 49 | ( 50 | TxHash(key.to_public().stdcode().hash()), 51 | StakeDoc { 52 | pubkey: key.to_public(), 53 | e_start: 0, 54 | e_post_end: 1000000, 55 | syms_staked: *amount, 56 | }, 57 | ) 58 | }) 59 | .collect(), 60 | init_fee_pool: 0.into(), 61 | init_fee_multiplier: 100, 62 | }; 63 | let staker_configs = 64 | create 65 | .stake 66 | .iter() 67 | .zip(secrets.iter()) 68 | .enumerate() 69 | .map(|(i, (_amount, key))| { 70 | let addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), (i + 5000) as u16); 71 | StakerConfig { 72 | signing_secret: *key, 73 | listen: addr, 74 | bootstrap: SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 5000), 75 | payout_addr: Address(Default::default()), 76 | target_fee_multiplier: 10000, 77 | } 78 | }); 79 | 80 | let mut run_all_script = 81 | "#!/bin/sh\ntrap \"trap - SIGTERM && kill -- -$$\" SIGINT SIGTERM EXIT\n".to_string(); 82 | for (i, config) in staker_configs.enumerate() { 83 | let yaml = serde_yaml::to_string(&config)?; 84 | std::fs::write(format!("staker-{i}.yaml"), yaml.as_bytes())?; 85 | let run_cmd = format!("#!/bin/sh\nmelnode --bootstrap 127.0.0.1:2000 --listen 127.0.0.1:{} --advertise 127.0.0.1:{} --override-genesis genesis.yaml --staker-cfg staker-{i}.yaml --database .database-{}", 2000+i, 2000+i, i); 86 | std::fs::write(format!("run-staker-{i}.sh"), run_cmd.as_bytes())?; 87 | 88 | writeln!(&mut run_all_script, "sh run-staker-{i}.sh &")?; 89 | } 90 | writeln!(&mut run_all_script, "wait")?; 91 | std::fs::write("run-all.sh", run_all_script.as_bytes())?; 92 | std::fs::write("genesis.yaml", &serde_yaml::to_vec(&genesis_config)?)?; 93 | Ok(()) 94 | } 95 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod args; 2 | 3 | pub mod node; 4 | 5 | pub mod autoretry; 6 | pub mod staker; 7 | pub mod storage; 8 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use melnode::{args::MainArgs, node::Node, staker::Staker, storage::Storage}; 2 | 3 | use anyhow::Context; 4 | 5 | use clap::Parser; 6 | use melnet2::{wire::http::HttpBackhaul, Swarm}; 7 | use melprot::{Client, CoinChange, NodeRpcClient}; 8 | use melstructs::{BlockHeight, CoinID}; 9 | 10 | #[cfg(feature = "dhat-heap")] 11 | #[global_allocator] 12 | static ALLOC: dhat::Alloc = dhat::Alloc; 13 | 14 | fn main() -> anyhow::Result<()> { 15 | if std::env::var("RUST_LOG").is_err() { 16 | std::env::set_var("RUST_LOG", "melnode=debug,warn"); 17 | } 18 | 19 | let mut builder = env_logger::Builder::from_env("RUST_LOG"); 20 | 21 | builder.init(); 22 | let opts = MainArgs::parse(); 23 | 24 | smolscale::block_on(main_async(opts)) 25 | } 26 | 27 | pub const VERSION: &str = env!("CARGO_PKG_VERSION"); 28 | 29 | /// Runs the main function for a node. 30 | pub async fn main_async(opt: MainArgs) -> anyhow::Result<()> { 31 | #[cfg(feature = "dhat-heap")] 32 | let _profiler = dhat::Profiler::new_heap(); 33 | 34 | log::info!("melnode v{} initializing...", VERSION); 35 | 36 | let genesis = opt.genesis_config().await?; 37 | let netid = genesis.network; 38 | let storage: Storage = opt.storage().await?; 39 | let bootstrap = opt.bootstrap().await?; 40 | 41 | log::info!("bootstrapping with {:?}", bootstrap); 42 | 43 | let swarm: Swarm = 44 | Swarm::new(HttpBackhaul::new(), NodeRpcClient, "melnode"); 45 | 46 | // we add the bootstrap routes as "sticky" routes that never expire 47 | for addr in bootstrap.iter() { 48 | swarm.add_route(addr.to_string().into(), true).await; 49 | } 50 | 51 | let _node_prot = Node::start( 52 | netid, 53 | opt.listen_addr(), 54 | opt.advertise_addr(), 55 | storage.clone(), 56 | opt.index_coins, 57 | swarm.clone(), 58 | ) 59 | .await?; 60 | 61 | let _staker_prot = opt 62 | .staker_cfg() 63 | .await? 64 | .map(|cfg| Staker::new(storage.clone(), cfg)); 65 | 66 | if opt.self_test { 67 | let storage = storage.clone(); 68 | 69 | let rpc_client = swarm 70 | .connect(opt.listen_addr().to_string().into()) 71 | .await 72 | .unwrap(); 73 | let client = Client::new(netid, rpc_client); 74 | 75 | client.dangerously_trust_latest().await.unwrap(); 76 | let snapshot = client.latest_snapshot().await.unwrap(); 77 | smolscale::spawn::>(async move { 78 | loop { 79 | log::info!("*** SELF TEST STARTED! ***"); 80 | let mut state = storage 81 | .get_state(BlockHeight(9)) 82 | .await 83 | .context("no block 1")?; 84 | let last_height = storage.highest_height().await.0; 85 | for bh in 10..=last_height { 86 | let bh = BlockHeight(bh); 87 | // let blk = storage.get_state(bh).await.context("no block")?.to_block(); 88 | let blk = storage.get_block(bh).await.context("no block")?; 89 | state = state.apply_block(&blk).expect("block application failed"); 90 | smol::future::yield_now().await; 91 | log::debug!( 92 | "{}/{} replayed correctly ({:.2}%)", 93 | bh, 94 | last_height, 95 | bh.0 as f64 / last_height as f64 * 100.0 96 | ); 97 | 98 | // indexer test 99 | if opt.index_coins { 100 | if let Some(tx_0) = blk.transactions.iter().next() { 101 | let recipient = tx_0.outputs[0].covhash; 102 | let coin_changes = snapshot 103 | .get_raw() 104 | .get_coin_changes(bh, recipient) 105 | .await 106 | .unwrap() 107 | .unwrap(); 108 | 109 | log::debug!("testing transaction recipient {recipient}"); 110 | 111 | assert!(coin_changes 112 | .contains(&CoinChange::Add(CoinID::new(tx_0.hash_nosigs(), 0)))); 113 | } 114 | if let Some(proposer_action) = blk.proposer_action { 115 | let reward_dest = proposer_action.reward_dest; 116 | let coin_changes = snapshot 117 | .get_raw() 118 | .get_coin_changes(bh, reward_dest) 119 | .await 120 | .unwrap() 121 | .unwrap(); 122 | 123 | log::debug!("testing proposer {reward_dest}"); 124 | assert!(coin_changes 125 | .contains(&CoinChange::Add(CoinID::proposer_reward(bh)))); 126 | } 127 | } 128 | } 129 | } 130 | }) 131 | .detach(); 132 | } 133 | 134 | // #[cfg(feature = "dhat-heap")] 135 | // for i in 0..300 { 136 | // smol::Timer::after(Duration::from_secs(1)).await; 137 | // dbg!(i); 138 | // } 139 | 140 | #[cfg(not(feature = "dhat-heap"))] 141 | let _: u64 = smol::future::pending().await; 142 | 143 | Ok(()) 144 | } 145 | -------------------------------------------------------------------------------- /src/node.rs: -------------------------------------------------------------------------------- 1 | mod blksync; 2 | mod indexer; 3 | 4 | use crate::{node::blksync::attempt_blksync, storage::Storage}; 5 | 6 | use anyhow::Context; 7 | use async_trait::async_trait; 8 | use base64::Engine; 9 | use lru::LruCache; 10 | use melblkidx::{CoinInfo, Indexer}; 11 | use melnet2::{wire::http::HttpBackhaul, Backhaul, Swarm}; 12 | use novasmt::{CompressedProof, Database, InMemoryCas, Tree}; 13 | use once_cell::sync::Lazy; 14 | use parking_lot::Mutex; 15 | 16 | use melstf::SmtMapping; 17 | use melstructs::{ 18 | AbbrBlock, Address, Block, BlockHeight, CoinID, ConsensusProof, NetID, Transaction, TxHash, 19 | }; 20 | use std::{ 21 | collections::BTreeMap, 22 | net::SocketAddr, 23 | time::{Duration, Instant}, 24 | }; 25 | use stdcode::StdcodeSerializeExt; 26 | 27 | use melprot::{ 28 | CoinChange, CoinSpendStatus, NodeRpcClient, NodeRpcProtocol, NodeRpcService, StateSummary, 29 | Substate, TransactionError, 30 | }; 31 | 32 | use smol_timeout::TimeoutExt; 33 | use tmelcrypt::{HashVal, Hashable}; 34 | 35 | use self::indexer::WrappedIndexer; 36 | 37 | /// An actor implementing the node P2P protocol, common for both replicas and stakers.. 38 | pub struct Node { 39 | _blksync_task: smol::Task<()>, 40 | } 41 | 42 | impl Node { 43 | /// Creates a new Node. 44 | pub async fn start( 45 | netid: NetID, 46 | listen_addr: SocketAddr, 47 | 48 | advertise_addr: Option, 49 | storage: Storage, 50 | index_coins: bool, 51 | swarm: Swarm, 52 | ) -> anyhow::Result { 53 | // This is all we need to do since start_listen does not block. 54 | log::debug!("starting to listen at {}", listen_addr); 55 | swarm 56 | .start_listen( 57 | listen_addr.to_string().into(), 58 | advertise_addr.map(|addr| addr.to_string().into()), 59 | NodeRpcService( 60 | NodeRpcImpl::start( 61 | swarm.clone(), 62 | listen_addr, 63 | netid, 64 | storage.clone(), 65 | index_coins, 66 | ) 67 | .await?, 68 | ), 69 | ) 70 | .await?; 71 | 72 | let _blksync_task = smolscale::spawn(blksync_loop(netid, swarm, storage)); 73 | Ok(Self { _blksync_task }) 74 | } 75 | } 76 | 77 | async fn blksync_loop(_netid: NetID, swarm: Swarm, storage: Storage) { 78 | loop { 79 | let gap_time: Duration = Duration::from_secs_f64(fastrand::f64() * 1.0); 80 | let routes = swarm.routes().await; 81 | let random_peer = routes.first().cloned(); 82 | if let Some(peer) = random_peer { 83 | log::trace!("picking peer {} out of {} peers", &peer, routes.len()); 84 | let fallible_part = async { 85 | let client = swarm.connect(peer.clone()).await?; 86 | let addr: SocketAddr = peer.clone().to_string().parse()?; 87 | let res = attempt_blksync(addr, &client, &storage).await?; 88 | anyhow::Ok(res) 89 | }; 90 | match fallible_part.await { 91 | Err(e) => { 92 | log::warn!("failed to blksync with {}: {:?}", peer, e); 93 | log::warn!("last state: {:?}", storage.highest_state().await.header()); 94 | } 95 | Ok(blklen) => { 96 | if blklen > 0 { 97 | log::debug!("synced to height {:?}", storage.highest_height().await); 98 | } 99 | } 100 | } 101 | } 102 | smol::Timer::after(gap_time).await; 103 | } 104 | } 105 | 106 | // This struct is responsible for obtaining any "state" needed for the implementation of the RPC business logic. 107 | pub struct NodeRpcImpl { 108 | network: NetID, 109 | storage: Storage, 110 | recent: Mutex>, 111 | summary: Mutex>, 112 | coin_smts: Mutex>>, 113 | abbr_block_cache: moka::sync::Cache, 114 | swarm: Swarm, 115 | indexer: Option, 116 | } 117 | 118 | impl NodeRpcImpl { 119 | async fn start( 120 | swarm: Swarm, 121 | listen_addr: SocketAddr, 122 | network: NetID, 123 | storage: Storage, 124 | index_coins: bool, 125 | ) -> anyhow::Result { 126 | let indexer = if index_coins { 127 | Some(WrappedIndexer::start(network, storage.clone(), listen_addr).await?) 128 | } else { 129 | None 130 | }; 131 | Ok(Self { 132 | network, 133 | storage, 134 | recent: LruCache::new(1000).into(), 135 | coin_smts: LruCache::new(100).into(), 136 | summary: LruCache::new(10).into(), 137 | swarm, 138 | abbr_block_cache: moka::sync::Cache::new(1000), 139 | indexer, 140 | }) 141 | } 142 | 143 | async fn get_coin_tree(&self, height: BlockHeight) -> anyhow::Result> { 144 | let otree = self.coin_smts.lock().get(&height).cloned(); 145 | if let Some(v) = otree { 146 | Ok(v) 147 | } else { 148 | let state = self 149 | .storage 150 | .get_state(height) 151 | .await 152 | .context(format!("block {} not confirmed yet", height))?; 153 | let mut mm = SmtMapping::new( 154 | Database::new(InMemoryCas::default()) 155 | .get_tree(Default::default()) 156 | .unwrap(), 157 | ); 158 | 159 | let transactions = state.to_block().transactions; 160 | for tx in transactions.iter() { 161 | mm.insert(tx.hash_nosigs(), tx.clone()); 162 | } 163 | self.coin_smts.lock().put(height, mm.mapping.clone()); 164 | Ok(mm.mapping) 165 | } 166 | } 167 | 168 | async fn get_indexer(&self) -> Option<&Indexer> { 169 | if let Some(indexer) = self.indexer.as_ref() { 170 | let indexer = indexer.inner(); 171 | let height = self.storage.highest_height().await; 172 | while indexer.max_height() < height { 173 | log::warn!("waiting for {height} to be available at the indexer..."); 174 | smol::Timer::after(Duration::from_secs(1)).await; 175 | } 176 | Some(indexer) 177 | } else { 178 | None 179 | } 180 | } 181 | } 182 | 183 | /// Global TCP backhaul for node connections 184 | static TCP_BACKHAUL: Lazy = Lazy::new(HttpBackhaul::new); 185 | 186 | #[async_trait] 187 | impl NodeRpcProtocol for NodeRpcImpl { 188 | async fn send_tx(&self, tx: Transaction) -> Result<(), TransactionError> { 189 | if let Some(val) = self.recent.lock().peek(&tx.hash_nosigs()) { 190 | if val.elapsed().as_secs_f64() < 10.0 { 191 | return Err(TransactionError::RecentlySeen); 192 | } 193 | } 194 | self.recent.lock().put(tx.hash_nosigs(), Instant::now()); 195 | log::trace!("handling send_tx"); 196 | let start = Instant::now(); 197 | 198 | self.storage 199 | .mempool_mut() 200 | .apply_transaction(&tx) 201 | .map_err(|e| { 202 | if !e.to_string().contains("duplicate") { 203 | log::warn!("cannot apply tx: {:?}", e) 204 | } 205 | TransactionError::Invalid(e.to_string()) 206 | })?; 207 | 208 | log::debug!( 209 | "txhash {}.. inserted ({:?} applying)", 210 | &tx.hash_nosigs().to_string()[..10], 211 | start.elapsed(), 212 | ); 213 | 214 | let routes = self.swarm.routes().await; 215 | for neigh in routes.iter().take(16).cloned() { 216 | log::debug!("about to broadcast txhash {} to {neigh}", tx.hash_nosigs()); 217 | let tx = tx.clone(); 218 | smolscale::spawn(async move { 219 | let conn = TCP_BACKHAUL.connect(neigh).await?; 220 | NodeRpcClient(conn) 221 | .send_tx(tx) 222 | .timeout(Duration::from_secs(10)) 223 | .await 224 | .context("oh no")???; 225 | anyhow::Ok(()) 226 | }) 227 | .detach(); 228 | } 229 | 230 | Ok(()) 231 | } 232 | 233 | async fn get_abbr_block(&self, height: BlockHeight) -> Option<(AbbrBlock, ConsensusProof)> { 234 | if let Some(c) = self.abbr_block_cache.get(&height) { 235 | return Some(c); 236 | } 237 | log::trace!("handling get_abbr_block({})", height); 238 | let block = self.storage.get_block(height).await?; 239 | let proof = self.storage.get_consensus(height).await?; 240 | let summ = (block.abbreviate(), proof); 241 | self.abbr_block_cache.insert(height, summ.clone()); 242 | Some(summ) 243 | } 244 | 245 | async fn get_summary(&self) -> StateSummary { 246 | log::trace!("handling get_summary()"); 247 | let highest = self.storage.highest_state().await; 248 | let header = highest.header(); 249 | let res = self.summary.lock().get(&header.height).cloned(); 250 | if let Some(res) = res { 251 | res 252 | } else { 253 | let proof = self 254 | .storage 255 | .get_consensus(header.height) 256 | .await 257 | .unwrap_or_default(); 258 | let summary = StateSummary { 259 | netid: self.network, 260 | height: header.height, 261 | header, 262 | proof, 263 | }; 264 | self.summary.lock().push(header.height, summary.clone()); 265 | summary 266 | } 267 | } 268 | 269 | async fn get_block(&self, height: BlockHeight) -> Option { 270 | log::trace!("handling get_state({})", height); 271 | self.storage.get_block(height).await 272 | } 273 | 274 | async fn get_lz4_blocks(&self, height: BlockHeight, size_limit: usize) -> Option { 275 | log::debug!("get_lz4_blocks({height}, {size_limit})"); 276 | let size_limit = size_limit.min(10_000_000); 277 | // TODO: limit the *compressed* size. But this is fine because compression makes stuff smoller 278 | let mut total_count = 0; 279 | let mut accum = vec![]; 280 | let mut proof_accum = vec![]; 281 | 282 | let mut height = height; 283 | while total_count <= size_limit { 284 | if let Some(block) = self.get_block(height).await { 285 | match self.storage.get_consensus(height).await { 286 | Some(proof) => { 287 | total_count += block.stdcode().len(); 288 | total_count += proof.stdcode().len(); 289 | 290 | accum.push(block); 291 | proof_accum.push(proof); 292 | 293 | if total_count > size_limit { 294 | log::info!("BATCH IS DONE"); 295 | if accum.len() > 1 { 296 | accum.pop(); 297 | } 298 | } 299 | 300 | height += BlockHeight(1); 301 | } 302 | _ => { 303 | log::warn!("no proof stored for height {}", height); 304 | } 305 | } 306 | } else if accum.is_empty() { 307 | log::warn!("no stored block for height: {:?}", height); 308 | return None; 309 | } else { 310 | break; 311 | } 312 | } 313 | 314 | let compressed = lz4_flex::compress_prepend_size(&(accum, proof_accum).stdcode()); 315 | Some(base64::engine::general_purpose::STANDARD_NO_PAD.encode(compressed)) 316 | } 317 | 318 | async fn get_smt_branch( 319 | &self, 320 | height: BlockHeight, 321 | elem: Substate, 322 | key: HashVal, 323 | ) -> Option<(Vec, CompressedProof)> { 324 | log::trace!("handling get_smt_branch({}, {:?})", height, elem); 325 | let state = self.storage.get_state(height).await?; 326 | let ctree = self.get_coin_tree(height).await.ok()?; 327 | let coins_smt = state.raw_coins_smt(); 328 | let history_smt = state.raw_history_smt(); 329 | let pools_smt = state.raw_pools_smt(); 330 | 331 | let (v, proof) = match elem { 332 | Substate::Coins => coins_smt.get_with_proof(key.0), 333 | Substate::History => history_smt.get_with_proof(key.0), 334 | Substate::Pools => pools_smt.get_with_proof(key.0), 335 | Substate::Stakes => todo!("no longer relevant"), 336 | Substate::Transactions => ctree.get_with_proof(key.0), 337 | }; 338 | Some((v.to_vec(), proof.compress())) 339 | } 340 | 341 | async fn get_stakers_raw(&self, height: BlockHeight) -> Option>> { 342 | let state = self.storage.get_state(height).await?; 343 | // Note, the returned HashVal is >> HASHED AGAIN << because this is supposed to be compatible with the old SmtMapping encoding, where the key to the `stakes` SMT is the *hash of the transaction hash* due to a quirk. 344 | Some( 345 | state 346 | .raw_stakes() 347 | .iter() 348 | .map(|(k, v)| (k.0.hash(), v.stdcode())) 349 | .collect(), 350 | ) 351 | } 352 | 353 | async fn get_some_coins(&self, height: BlockHeight, covhash: Address) -> Option> { 354 | let indexer = self.get_indexer().await?; 355 | let coins: Vec = indexer 356 | .query_coins() 357 | .covhash(covhash) 358 | .unspent_by(height) 359 | .iter() 360 | .map(|c| CoinID { 361 | txhash: c.create_txhash, 362 | index: c.create_index, 363 | }) 364 | .collect(); 365 | Some(coins) 366 | } 367 | 368 | async fn get_coin_changes( 369 | &self, 370 | height: BlockHeight, 371 | covhash: Address, 372 | ) -> Option> { 373 | log::debug!("get_coin_changes({height}, {covhash})"); 374 | self.storage.get_block(height).await?; 375 | let indexer = self.get_indexer().await?; 376 | // get coins 1 block below the given height 377 | let deleted_coins: Vec = indexer 378 | .query_coins() 379 | .covhash(covhash) 380 | .spend_height_range(height.0..=height.0) 381 | .iter() 382 | .collect(); 383 | 384 | // get coins at the given height 385 | let added_coins: Vec = indexer 386 | .query_coins() 387 | .covhash(covhash) 388 | .create_height_range(height.0..=height.0) 389 | .iter() 390 | .collect(); 391 | 392 | if !added_coins.is_empty() || !deleted_coins.is_empty() { 393 | log::debug!( 394 | "{} added, {} deleted", 395 | added_coins.len(), 396 | deleted_coins.len() 397 | ); 398 | } 399 | 400 | // which coins got added in after_coins? 401 | let added: Vec = added_coins 402 | .iter() 403 | .map(|coin| CoinChange::Add(CoinID::new(coin.create_txhash, coin.create_index))) 404 | .collect(); 405 | 406 | // which coins got deleted in before coins? 407 | let deleted: Vec = deleted_coins 408 | .iter() 409 | .map(|coin| { 410 | CoinChange::Delete( 411 | CoinID::new(coin.create_txhash, coin.create_index), 412 | coin.spend_info.unwrap().spend_txhash, 413 | ) 414 | }) 415 | .collect(); 416 | 417 | Some([added, deleted].concat()) 418 | } 419 | 420 | async fn get_coin_spend(&self, coin: CoinID) -> Option { 421 | let indexer = self.get_indexer().await?; 422 | 423 | let spend_info = indexer 424 | .query_coins() 425 | .create_txhash(coin.txhash) 426 | .create_index(coin.index) 427 | .iter() 428 | .next()? 429 | .spend_info; 430 | match spend_info { 431 | Some(info) => Some(CoinSpendStatus::Spent(( 432 | info.spend_txhash, 433 | info.spend_height, 434 | ))), 435 | None => Some(CoinSpendStatus::NotSpent), 436 | } 437 | } 438 | } 439 | -------------------------------------------------------------------------------- /src/node/blksync.rs: -------------------------------------------------------------------------------- 1 | use crate::storage::Storage; 2 | use anyhow::Context; 3 | use base64::Engine; 4 | use futures_util::stream::{StreamExt, TryStreamExt}; 5 | use melprot::NodeRpcClient; 6 | use melstructs::{Block, BlockHeight, ConsensusProof}; 7 | use smol_timeout::TimeoutExt; 8 | use std::{ 9 | net::SocketAddr, 10 | time::{Duration, Instant}, 11 | }; 12 | 13 | /// Attempts a sync using the given given node client. 14 | pub async fn attempt_blksync( 15 | addr: SocketAddr, 16 | client: &NodeRpcClient, 17 | storage: &Storage, 18 | ) -> anyhow::Result { 19 | if std::env::var("MELNODE_OLD_BLKSYNC").is_ok() { 20 | return attempt_blksync_legacy(addr, client, storage).await; 21 | } 22 | 23 | let their_highest = client 24 | .get_summary() 25 | .timeout(Duration::from_secs(5)) 26 | .await 27 | .context("timed out getting summary")? 28 | .context("cannot get their highest block")? 29 | .height; 30 | 31 | let my_highest = storage.highest_height().await; 32 | if their_highest <= my_highest { 33 | return Ok(0); 34 | } 35 | 36 | let mut num_blocks_applied: usize = 0; 37 | let my_highest: u64 = my_highest.0 + 1; 38 | 39 | let mut height = BlockHeight(my_highest); 40 | while height <= their_highest { 41 | let start = Instant::now(); 42 | 43 | log::debug!("gonna get compressed blocks from {addr}..."); 44 | let compressed_blocks = client 45 | .get_lz4_blocks(height, 500_000) 46 | .timeout(Duration::from_secs(30)) 47 | .await 48 | .context("timeout while getting compressed blocks")? 49 | .context("failed to get compressed blocks")?; 50 | log::debug!("got compressed blocks!"); 51 | 52 | let (blocks, cproofs): (Vec, Vec) = match compressed_blocks { 53 | Some(compressed) => { 54 | // decode base64 first 55 | let compressed_base64 = base64::engine::general_purpose::STANDARD_NO_PAD 56 | .decode(compressed.as_bytes())?; 57 | 58 | // decompress 59 | let decompressed = lz4_flex::decompress_size_prepended(&compressed_base64)?; 60 | 61 | stdcode::deserialize::<(Vec, Vec)>(&decompressed)? 62 | } 63 | _ => anyhow::bail!("missing block {height}"), 64 | }; 65 | 66 | let mut last_applied_height = height; 67 | log::info!( 68 | "fully resolved blocks {}..{} from peer {} in {:.2}ms", 69 | blocks.first().map(|b| b.header.height).unwrap_or_default(), 70 | blocks.last().map(|b| b.header.height).unwrap_or_default(), 71 | addr, 72 | start.elapsed().as_secs_f64() * 1000.0 73 | ); 74 | for (block, cproof) in blocks.iter().zip(cproofs) { 75 | // validate before applying 76 | if block.header.height != last_applied_height { 77 | anyhow::bail!("wanted block {}, but got {}", height, block.header.height); 78 | } 79 | 80 | storage 81 | .apply_block(block.clone(), cproof) 82 | .await 83 | .context("could not apply a resolved block")?; 84 | num_blocks_applied += 1; 85 | 86 | last_applied_height += BlockHeight(1); 87 | } 88 | 89 | height += BlockHeight(blocks.len() as u64); 90 | } 91 | 92 | Ok(num_blocks_applied) 93 | } 94 | 95 | /// Attempts a sync using the given given node client, in a legacy fashion. 96 | pub async fn attempt_blksync_legacy( 97 | addr: SocketAddr, 98 | client: &NodeRpcClient, 99 | storage: &Storage, 100 | ) -> anyhow::Result { 101 | let their_highest = client 102 | .get_summary() 103 | .timeout(Duration::from_secs(5)) 104 | .await 105 | .context("timed out getting summary")? 106 | .context("cannot get their highest block")? 107 | .height; 108 | let my_highest = storage.highest_height().await; 109 | if their_highest <= my_highest { 110 | return Ok(0); 111 | } 112 | let height_stream = futures_util::stream::iter((my_highest.0..=their_highest.0).skip(1)) 113 | .map(BlockHeight) 114 | .take( 115 | std::env::var("THEMELIO_BLKSYNC_BATCH") 116 | .ok() 117 | .and_then(|d| d.parse().ok()) 118 | .unwrap_or(1000), 119 | ); 120 | let lookup_tx = |tx| storage.mempool().lookup_recent_tx(tx); 121 | let mut result_stream = height_stream 122 | .map(Ok::<_, anyhow::Error>) 123 | .try_filter_map(|height| async move { 124 | Ok(Some(async move { 125 | let start = Instant::now(); 126 | 127 | let (block, cproof): (Block, ConsensusProof) = match client 128 | .get_full_block(height, &lookup_tx) 129 | .timeout(Duration::from_secs(15)) 130 | .await 131 | .context("timeout")?? 132 | { 133 | Some(v) => v, 134 | _ => anyhow::bail!("mysteriously missing block {}", height), 135 | }; 136 | 137 | if block.header.height != height { 138 | anyhow::bail!("WANTED BLK {}, got {}", height, block.header.height); 139 | } 140 | log::trace!( 141 | "fully resolved block {} from peer {} in {:.2}ms", 142 | block.header.height, 143 | addr, 144 | start.elapsed().as_secs_f64() * 1000.0 145 | ); 146 | Ok((block, cproof)) 147 | })) 148 | }) 149 | .try_buffered(64) 150 | .boxed(); 151 | let mut toret = 0; 152 | while let Some(res) = result_stream.try_next().await? { 153 | let (block, proof): (Block, ConsensusProof) = res; 154 | 155 | storage 156 | .apply_block(block, proof) 157 | .await 158 | .context("could not apply a resolved block")?; 159 | toret += 1; 160 | } 161 | Ok(toret) 162 | } 163 | -------------------------------------------------------------------------------- /src/node/indexer.rs: -------------------------------------------------------------------------------- 1 | use std::{net::SocketAddr, time::Duration}; 2 | 3 | use anyhow::Context; 4 | use melblkidx::Indexer; 5 | use melprot::Client; 6 | 7 | use melstructs::{Checkpoint, NetID}; 8 | 9 | use crate::storage::Storage; 10 | 11 | pub struct WrappedIndexer { 12 | indexer: Indexer, 13 | _task: smol::Task<()>, 14 | } 15 | 16 | impl WrappedIndexer { 17 | /// Creates a new CoinIndexer. 18 | pub async fn start( 19 | network: NetID, 20 | storage: Storage, 21 | connect_addr: SocketAddr, 22 | ) -> anyhow::Result { 23 | let mut localhost_listen_addr = connect_addr; 24 | localhost_listen_addr.set_ip("127.0.0.1".parse().unwrap()); 25 | // TODO: connect_lazy shouldn't return a Result, since backhaul.connect_lazy is "infallible"? 26 | let client = Client::connect_http(network, localhost_listen_addr).await?; 27 | let _task = smolscale::spawn(indexer_loop(storage.clone(), client.clone())); 28 | Ok(Self { 29 | indexer: Indexer::new(storage.get_indexer_path(), client) 30 | .context("indexer failed to be created")?, 31 | _task, 32 | }) 33 | } 34 | 35 | /// Gets a reference to the indexer within. 36 | pub fn inner(&self) -> &Indexer { 37 | &self.indexer 38 | } 39 | } 40 | 41 | async fn indexer_loop(storage: Storage, client: Client) { 42 | loop { 43 | let trusted_height = storage.highest_state().await; 44 | client.trust(Checkpoint { 45 | height: trusted_height.header().height, 46 | header_hash: trusted_height.header().hash(), 47 | }); 48 | smol::Timer::after(Duration::from_secs(1)).await; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/staker.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | args::StakerConfig, 3 | storage::{MeshaCas, Storage}, 4 | }; 5 | 6 | use anyhow::Context; 7 | use async_trait::async_trait; 8 | use bytes::Bytes; 9 | 10 | use dashmap::DashMap; 11 | use melnet2::{wire::http::HttpBackhaul, Swarm}; 12 | 13 | use nanorpc::{nanorpc_derive, DynRpcTransport}; 14 | 15 | use melstf::SealedState; 16 | use melstructs::{Block, BlockHeight, ConsensusProof, NetID, ProposerAction, StakeDoc}; 17 | use once_cell::sync::Lazy; 18 | use parking_lot::RwLock; 19 | use smol::{ 20 | channel::{Receiver, Sender}, 21 | prelude::*, 22 | }; 23 | use smol_timeout::TimeoutExt; 24 | use std::{collections::BTreeMap, time::Instant}; 25 | use std::{ 26 | collections::HashMap, 27 | sync::Arc, 28 | time::{Duration, SystemTime}, 29 | }; 30 | use stdcode::StdcodeSerializeExt; 31 | use streamlette::{DeciderConfig, DiffMessage}; 32 | use tap::Tap; 33 | use tmelcrypt::{Ed25519PK, Ed25519SK, HashVal}; 34 | 35 | static MAINNET_START_TIME: Lazy = Lazy::new(|| { 36 | std::time::UNIX_EPOCH + Duration::from_secs(1618365600) + Duration::from_secs(30 * 6000) 37 | }); // Apr 14 2021 38 | 39 | static TESTNET_START_TIME: Lazy = 40 | Lazy::new(|| std::time::UNIX_EPOCH + Duration::from_secs(1665123000)); 41 | 42 | fn height_to_time(netid: NetID, height: BlockHeight) -> SystemTime { 43 | match netid { 44 | NetID::Testnet => *TESTNET_START_TIME + Duration::from_secs(height.0 * 30), 45 | NetID::Mainnet => *MAINNET_START_TIME + Duration::from_secs(height.0 * 30), 46 | _ => SystemTime::now(), 47 | } 48 | } 49 | 50 | /// An actor that represents the background process that runs staker logic. 51 | /// 52 | /// Talks to other stakers over the staker P2P network, decides on blocks using the Streamlette consensus algorithm, and stuffs decided blocks into [Storage]. 53 | pub struct Staker { 54 | _network_task: smol::Task<()>, 55 | } 56 | 57 | impl Staker { 58 | /// Creates a new instance of the staker protocol. 59 | pub fn new(storage: Storage, cfg: StakerConfig) -> Self { 60 | Self { 61 | _network_task: smolscale::spawn(network_task(storage, cfg)), 62 | } 63 | } 64 | } 65 | 66 | type DiffReq = ( 67 | u128, 68 | HashMap, 69 | async_oneshot::Sender>, 70 | ); 71 | 72 | async fn network_task(storage: Storage, cfg: StakerConfig) { 73 | loop { 74 | if let Err(err) = network_task_inner(storage.clone(), cfg.clone()).await { 75 | log::warn!("staker failed: {:?}", err); 76 | } 77 | } 78 | } 79 | 80 | // TODO: the current consensus has a very small chance of not reaching completion, especially when the number of nodes is small compared to the network latency (e.g. a localhost simnet). 81 | // This is because one could enter a situation where streamlette does decide, but less than 2/3 of the streamlette instances terminate properly, since the rest get stuck as 2/3 exit streamlette and no longer relay messages. 82 | // This *seems* kinda fundamental to "oneshot" consensus that clean everything up after decision. I wonder whether there's a "standard solution" to this. 83 | 84 | async fn network_task_inner(storage: Storage, cfg: StakerConfig) -> anyhow::Result<()> { 85 | // A channel for sending requests for diffs 86 | let (send_diff_req, recv_diff_req) = smol::channel::bounded::(100); 87 | // The melnet2 swarm for the staker 88 | let swarm: Swarm> = Swarm::new( 89 | HttpBackhaul::new(), 90 | |conn| StakerNetClient(DynRpcTransport::new(conn)), 91 | "melstaker-2", 92 | ); 93 | swarm 94 | .add_route(cfg.bootstrap.to_string().into(), true) 95 | .await; 96 | // a "consensus proof gatherer" (see description) 97 | let sig_gather: Arc = Arc::new(DashMap::new()); 98 | swarm 99 | .start_listen( 100 | cfg.listen.to_string().into(), 101 | Some(cfg.listen.to_string().into()), 102 | StakerNetService(StakerNetProtocolImpl { 103 | send_diff_req, 104 | sig_gather: sig_gather.clone(), 105 | storage: storage.clone(), 106 | }), 107 | ) 108 | .await 109 | .context("cannot start listen")?; 110 | // TODO better time calcs 111 | loop { 112 | let base_state = storage.highest_state().await; 113 | let next_height: BlockHeight = base_state.header().height + BlockHeight(1); 114 | let skip_round = async { 115 | storage.get_state_or_wait(next_height).await; 116 | log::warn!("skipping consensus for {next_height} since we already got it"); 117 | anyhow::Ok(()) 118 | }; 119 | let next_time = height_to_time(base_state.header().network, next_height); 120 | if next_height.0 > 10 { 121 | sig_gather.remove(&BlockHeight(next_height.0 - 10)); 122 | } 123 | smol::Timer::after(Duration::from_secs(10)).await; // wait AT LEAST for a while 124 | while SystemTime::now() < next_time { 125 | smol::Timer::after(Duration::from_millis(100)).await; 126 | } 127 | 128 | log::debug!("starting consensus for {next_height}..."); 129 | let consensus_start_time = Instant::now(); 130 | 131 | let log_key = format!("{next_height}/{}", cfg.listen); 132 | 133 | let decide_round = async { 134 | let proposed_state = storage.mempool().to_state(); 135 | let sealed_proposed_state = proposed_state.clone().seal(None); 136 | if sealed_proposed_state.header().height != next_height { 137 | log::warn!("mempool not at the right height, trying again"); 138 | storage.mempool_mut().rebase(base_state); 139 | } else { 140 | let action = ProposerAction { 141 | fee_multiplier_delta: if base_state.header().fee_multiplier 142 | > cfg.target_fee_multiplier 143 | { 144 | -100 145 | } else { 146 | 100 147 | }, 148 | reward_dest: cfg.payout_addr, 149 | }; 150 | // create the config 151 | let proposed_state = proposed_state.seal(Some(action)); 152 | log::debug!( 153 | "proposed state has {} transactions", 154 | proposed_state.to_block().transactions.len() 155 | ); 156 | let config = StakerInner { 157 | base_state: base_state.clone(), 158 | my_proposal: proposed_state.to_block(), 159 | // TODO: THIS MUST BE REPLACED WITH A PROPER MAJORITY BEACON FOR MANIPULATION RESISTANCE 160 | nonce: base_state.header().height.0 as _, 161 | my_sk: cfg.signing_secret, 162 | 163 | recv_diff_req: recv_diff_req.clone(), 164 | swarm: swarm.clone(), 165 | }; 166 | let mut decider = streamlette::Decider::new(config); 167 | let decision = decider.tick_to_end().await; 168 | log::debug!( 169 | "{log_key} DECIDED on a block with {} bytes within {:?}", 170 | decision.len(), 171 | consensus_start_time.elapsed() 172 | ); 173 | let decision: Block = stdcode::deserialize(&decision) 174 | .expect("decision reached on an INVALID block?!?!?!?!?!?!"); 175 | 176 | // now we must assemble the consensus proof separately. 177 | // everybody has already decided on the block, we're just sharing signatures of it. 178 | 179 | // we start by inserting our own decision into the map. 180 | sig_gather.insert( 181 | decision.header.height, 182 | imbl::HashMap::new().tap_mut(|map| { 183 | map.insert( 184 | cfg.signing_secret.to_public(), 185 | cfg.signing_secret.sign(&decision.header.hash()).into(), 186 | ); 187 | }), 188 | ); 189 | 190 | let _spammer = smolscale::spawn(async move { decider.sync_state(None).await }); 191 | 192 | // then, until we finally have enough signatures, we spam our neighbors incessantly. 193 | let stakes = base_state.raw_stakes(); 194 | let epoch = base_state.header().height.epoch(); 195 | let vote_threshold = stakes.total_votes(epoch) * 2 / 3; 196 | let get_proof = || { 197 | let map = sig_gather.entry(decision.header.height).or_default(); 198 | if map.keys().map(|pk| stakes.votes(epoch, *pk)).sum::() > vote_threshold 199 | { 200 | Some(map) 201 | } else { 202 | None 203 | } 204 | }; 205 | loop { 206 | if let Some(result) = get_proof() { 207 | let cproof: ConsensusProof = 208 | result.clone().into_iter().map(|(k, v)| (k, v)).collect(); 209 | if let Err(err) = storage.apply_block(decision.clone(), cproof).await { 210 | log::error!("cannot commit newly decided block: {:?}", err) 211 | } 212 | log::debug!( 213 | "{log_key} COMMITTED the newly decided block within {:?}", 214 | consensus_start_time.elapsed() 215 | ); 216 | break; 217 | } 218 | let random_neigh = swarm.routes().await.first().cloned(); 219 | if let Some(neigh) = random_neigh { 220 | log::trace!("syncing with {} for consensus proof", neigh); 221 | let fallible = async { 222 | let connection = swarm 223 | .connect(neigh.clone()) 224 | .timeout(Duration::from_secs(1)) 225 | .await 226 | .context("timed out for connection")??; 227 | let result = connection 228 | .get_sigs(next_height) 229 | .timeout(Duration::from_secs(1)) 230 | .await 231 | .context("timed out for getting")??; 232 | anyhow::Ok(result) 233 | }; 234 | match fallible.await { 235 | Err(err) => log::warn!("cannot sync with {neigh}: {:?}", err), 236 | Ok(map) => { 237 | let mut existing = sig_gather.entry(next_height).or_default(); 238 | for (k, v) in map { 239 | existing.insert(k, v); 240 | } 241 | log::debug!( 242 | "{log_key} now have {} votes in consensus proof after talking to {neigh}", 243 | existing.len() 244 | ); 245 | } 246 | } 247 | } 248 | smallsleep().await; 249 | } 250 | } 251 | anyhow::Ok(()) 252 | }; 253 | skip_round.or(decide_round).await?; 254 | } 255 | } 256 | 257 | struct StakerInner { 258 | base_state: SealedState, 259 | my_proposal: Block, 260 | nonce: u128, 261 | my_sk: Ed25519SK, 262 | 263 | recv_diff_req: Receiver, 264 | swarm: Swarm>, 265 | } 266 | 267 | #[async_trait] 268 | impl DeciderConfig for StakerInner { 269 | fn generate_proposal(&self) -> Bytes { 270 | self.my_proposal.stdcode().into() 271 | } 272 | 273 | fn verify_proposal(&self, prop: &[u8]) -> bool { 274 | if let Ok(blk) = stdcode::deserialize::(prop) { 275 | self.base_state.apply_block(&blk).is_ok() 276 | } else { 277 | false 278 | } 279 | } 280 | 281 | async fn sync_core(&self, core: &mut streamlette::Core) { 282 | let core = RwLock::new(core); 283 | let main_loop = async { 284 | loop { 285 | let routes = self.swarm.routes().await; 286 | log::trace!("syncing core with {:?}", routes); 287 | for route in routes { 288 | let our_summary = core.read().summary(); 289 | let fallible = async { 290 | let conn = self 291 | .swarm 292 | .connect(route.clone()) 293 | .timeout(Duration::from_secs(1)) 294 | .await 295 | .context("timed out connecting")??; 296 | let diff: Vec = conn 297 | .get_diff(self.nonce, our_summary.clone()) 298 | .timeout(Duration::from_secs(5)) 299 | .await 300 | .context("timed out receiving diff")??; 301 | anyhow::Ok(diff) 302 | }; 303 | match fallible.await { 304 | Ok(diff) => { 305 | // apply the diffs 306 | for diff in diff { 307 | if let Err(err) = core.write().apply_one_diff(diff.clone()) { 308 | log::warn!("invalid diff from {route} ({:?}): {:?}", err, diff); 309 | } 310 | } 311 | } 312 | Err(err) => { 313 | log::trace!("could not sync with {route}: {:?}", err) 314 | } 315 | } 316 | } 317 | smallsleep().await; 318 | } 319 | }; 320 | let respond_loop = async { 321 | loop { 322 | if let Ok((nonce, their_summary, mut send_resp)) = self.recv_diff_req.recv().await { 323 | if nonce == self.nonce { 324 | let diff = core.read().get_diff(&their_summary); 325 | let _ = send_resp.send(diff); 326 | } else { 327 | let _ = send_resp.send(vec![]); 328 | } 329 | } else { 330 | smol::future::pending::<()>().await; 331 | } 332 | } 333 | }; 334 | main_loop.race(respond_loop).await 335 | } 336 | 337 | fn vote_weights(&self) -> BTreeMap { 338 | let height: BlockHeight = self.base_state.header().height + BlockHeight(1); 339 | self.base_state 340 | .raw_stakes() 341 | .pre_tip911() 342 | .iter() 343 | .fold(BTreeMap::new(), |mut map, val| { 344 | let stake_doc: StakeDoc = stdcode::deserialize(&val.1).unwrap(); 345 | if height.epoch() >= stake_doc.e_start && height.epoch() < stake_doc.e_post_end { 346 | *map.entry(stake_doc.pubkey).or_default() += stake_doc.syms_staked.0 as u64; 347 | } 348 | map 349 | }) 350 | } 351 | 352 | fn seed(&self) -> u128 { 353 | self.nonce 354 | } 355 | 356 | fn my_secret(&self) -> Ed25519SK { 357 | self.my_sk 358 | } 359 | } 360 | 361 | async fn smallsleep() { 362 | smol::Timer::after(Duration::from_millis(fastrand::u64(200..500))).await; 363 | } 364 | 365 | #[nanorpc_derive] 366 | #[async_trait] 367 | pub trait StakerNetProtocol { 368 | /// Obtains a diff from the node, given a summary of the client's state. 369 | async fn get_diff(&self, nonce: u128, summary: HashMap) -> Vec; 370 | /// Obtains all known signatures for the given confirmed height. Used to assemble [ConsensusProof]s after streamlette finishes deciding. 371 | async fn get_sigs(&self, height: BlockHeight) -> HashMap; 372 | } 373 | 374 | struct StakerNetProtocolImpl { 375 | send_diff_req: Sender, 376 | sig_gather: Arc, 377 | storage: Storage, 378 | } 379 | 380 | #[async_trait] 381 | impl StakerNetProtocol for StakerNetProtocolImpl { 382 | async fn get_diff(&self, nonce: u128, summary: HashMap) -> Vec { 383 | let (send_resp, recv_resp) = async_oneshot::oneshot(); 384 | let _ = self.send_diff_req.try_send((nonce, summary, send_resp)); 385 | 386 | if let Ok(val) = recv_resp.await { 387 | val 388 | } else { 389 | vec![] 390 | } 391 | } 392 | 393 | async fn get_sigs(&self, height: BlockHeight) -> HashMap { 394 | if let Some(val) = self.storage.get_consensus(height).await { 395 | val.into_iter().collect() 396 | } else { 397 | self.sig_gather 398 | .get(&height) 399 | .map(|s| s.clone()) 400 | .unwrap_or_default() 401 | .into_iter() 402 | .collect() // convert from immutable to std 403 | } 404 | } 405 | } 406 | 407 | type ConsensusProofGatherer = DashMap>; 408 | -------------------------------------------------------------------------------- /src/storage/mempool.rs: -------------------------------------------------------------------------------- 1 | use crate::storage::MeshaCas; 2 | 3 | use std::collections::HashSet; 4 | 5 | use melstf::{SealedState, StateError, UnsealedState}; 6 | use melstructs::{Transaction, TxHash}; 7 | use melvm::covenant_weight_from_bytes; 8 | 9 | const WEIGHT_LIMIT: u128 = 10_000_000; 10 | 11 | /// Mempool encapsulates a "mempool" --- a provisional state that is used to form new blocks by stakers, or provisionally validate transactions by replicas. 12 | pub struct Mempool { 13 | provisional_state: UnsealedState, 14 | last_rebase: UnsealedState, 15 | txx_in_state: HashSet, 16 | next_weight: u128, // seen: LruCache, 17 | } 18 | 19 | impl Mempool { 20 | /// Create sa new mempool based on a provisional state. 21 | pub fn new(state: UnsealedState) -> Self { 22 | Self { 23 | provisional_state: state.clone(), 24 | last_rebase: state, 25 | txx_in_state: Default::default(), 26 | next_weight: 0, 27 | // seen: LruCache::new(10000), 28 | } 29 | } 30 | /// Creates a State based on the present state of the mempool. 31 | pub fn to_state(&self) -> UnsealedState { 32 | self.provisional_state.clone() 33 | } 34 | 35 | /// Tries to add a transaction to the mempool. 36 | pub fn apply_transaction(&mut self, tx: &Transaction) -> anyhow::Result<()> { 37 | if self.next_weight < WEIGHT_LIMIT { 38 | if !self.txx_in_state.insert(tx.hash_nosigs()) { 39 | return Err(StateError::DuplicateTx.into()); 40 | } 41 | self.provisional_state.apply_tx(tx)?; 42 | self.next_weight += tx.weight(covenant_weight_from_bytes); 43 | // self.seen.put(tx.hash_nosigs(), ()); 44 | Ok(()) 45 | } else { 46 | anyhow::bail!("mempool is full, try again later") 47 | } 48 | } 49 | 50 | /// Forcibly replaces the internal state of the mempool with the given state. 51 | pub fn rebase(&mut self, state: SealedState) { 52 | let current_sealed = self.provisional_state.clone().seal(None); 53 | log::trace!( 54 | "forcibly rebasing mempool {} => {}", 55 | current_sealed.header().height, 56 | state.header().height 57 | ); 58 | if !current_sealed.is_empty() { 59 | let transactions = current_sealed.to_block().transactions; 60 | log::warn!("*** THROWING AWAY {} MEMPOOL TXX ***", transactions.len()); 61 | } 62 | 63 | let next_state = state.next_unsealed(); 64 | self.provisional_state = next_state.clone(); 65 | self.last_rebase = next_state; 66 | self.txx_in_state.clear(); 67 | self.next_weight = 0; 68 | } 69 | 70 | /// Lookups a recent transaction. 71 | pub fn lookup_recent_tx(&self, _hash: TxHash) -> Option { 72 | None 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | mod mempool; 2 | mod smt; 3 | 4 | #[allow(clippy::module_inception)] 5 | mod storage; 6 | 7 | pub use smt::*; 8 | pub use storage::*; 9 | -------------------------------------------------------------------------------- /src/storage/smt.rs: -------------------------------------------------------------------------------- 1 | use novasmt::ContentAddrStore; 2 | 3 | /// A meshanina-backed autosmt backend 4 | pub struct MeshaCas { 5 | inner: meshanina::Mapping, 6 | } 7 | 8 | impl MeshaCas { 9 | /// Takes exclusively ownership of a Meshanina database and creates an autosmt backend. 10 | pub fn new(db: meshanina::Mapping) -> Self { 11 | Self { inner: db } 12 | } 13 | 14 | /// Syncs to disk. 15 | pub fn flush(&self) { 16 | self.inner.flush() 17 | } 18 | } 19 | 20 | impl ContentAddrStore for MeshaCas { 21 | fn get<'a>(&'a self, key: &[u8]) -> Option> { 22 | Some(std::borrow::Cow::Owned( 23 | self.inner.get(tmelcrypt::hash_single(key).0)?.to_vec(), 24 | )) 25 | } 26 | 27 | fn insert(&self, key: &[u8], value: &[u8]) { 28 | self.inner.insert(tmelcrypt::hash_single(key).0, value); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/storage/storage.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use event_listener::Event; 3 | use rusqlite::{params, OptionalExtension}; 4 | use smol::channel::{Receiver, Sender}; 5 | use std::{ 6 | ops::{Deref, DerefMut}, 7 | path::PathBuf, 8 | sync::Arc, 9 | time::Instant, 10 | }; 11 | use stdcode::StdcodeSerializeExt; 12 | use tap::Tap; 13 | use tip911_stakeset::StakeSet; 14 | use tmelcrypt::HashVal; 15 | 16 | use moka::sync::Cache; 17 | use parking_lot::RwLock; 18 | 19 | use melstf::{GenesisConfig, SealedState}; 20 | use melstructs::{Block, BlockHeight, CoinValue, ConsensusProof, NetID, StakeDoc, TxHash, TxKind}; 21 | 22 | use crate::autoretry::autoretry; 23 | 24 | use super::{mempool::Mempool, MeshaCas}; 25 | 26 | /// Storage encapsulates all storage used by a Mel full node (replica or staker). 27 | #[derive(Clone)] 28 | pub struct Storage { 29 | send_pool: Sender, 30 | recv_pool: Receiver, 31 | old_cache: Arc>, 32 | forest: Arc>, 33 | 34 | genesis: GenesisConfig, 35 | 36 | mempool: Arc>, 37 | 38 | /// A notifier for a new block happening. 39 | new_block_notify: Arc, 40 | 41 | /// SQLite path 42 | sqlite_path: PathBuf, 43 | 44 | lock: Arc>, 45 | } 46 | 47 | impl Storage { 48 | /// Gets an immutable reference to the mempool. 49 | pub fn mempool(&self) -> impl Deref + '_ { 50 | self.mempool.read() 51 | } 52 | 53 | /// Gets a mutable reference to the mempool. 54 | pub fn mempool_mut(&self) -> impl DerefMut + '_ { 55 | self.mempool.write() 56 | } 57 | 58 | pub fn get_indexer_path(&self) -> PathBuf { 59 | let path = self.sqlite_path.clone(); 60 | format!("{}.coinindex.db", path.to_string_lossy()).into() 61 | } 62 | 63 | /// Opens a NodeStorage, given a meshanina and boringdb database. 64 | pub async fn open(mut db_folder: PathBuf, genesis: GenesisConfig) -> anyhow::Result { 65 | let genesis_id = tmelcrypt::hash_single(stdcode::serialize(&genesis).unwrap()); 66 | db_folder.push(format!("{}/", hex::encode(genesis_id.0))); 67 | std::fs::create_dir_all(&db_folder).context("cannot make folder")?; 68 | let sqlite_path = db_folder.clone().tap_mut(|path| path.push("storage.db")); 69 | let mesha_path = db_folder.clone().tap_mut(|path| path.push("merkle.db")); 70 | log::debug!("about to sqlite"); 71 | let conn = rusqlite::Connection::open(&sqlite_path).context("cannot make sqlite")?; 72 | conn.execute("create table if not exists history (height primary key not null, header not null, block not null)", params![])?; 73 | conn.execute("create table if not exists consensus_proofs (height primary key not null, proof not null)", params![])?; 74 | conn.execute( 75 | "create table if not exists stakes (txhash primary key not null, height not null, stake_doc not null)", 76 | params![], 77 | )?; 78 | conn.execute( 79 | "create table if not exists misc (key primary key not null, value not null)", 80 | params![], 81 | )?; 82 | 83 | log::debug!("sqlite initted"); 84 | 85 | // initialize the stakes 86 | for (txhash, stake) in genesis.stakes.iter() { 87 | conn.execute( 88 | "insert into stakes values ($1, $2, $3) on conflict do nothing", 89 | params![txhash.to_string(), 0, stake.stdcode()], 90 | )?; 91 | } 92 | 93 | let (send_pool, recv_pool) = smol::channel::unbounded(); 94 | for _ in 0..16 { 95 | let conn = rusqlite::Connection::open(&sqlite_path)?; 96 | conn.query_row("pragma journal_mode=WAL", params![], |_| Ok(()))?; 97 | conn.execute("pragma synchronous=normal", params![])?; 98 | send_pool.send(conn).await.unwrap(); 99 | } 100 | 101 | log::debug!("about to mesha"); 102 | let forest = novasmt::Database::new(MeshaCas::new( 103 | meshanina::Mapping::open(&mesha_path).context("cannot open mesha")?, 104 | )); 105 | let mempool = Arc::new(Mempool::new(genesis.clone().realize(&forest)).into()); 106 | Ok(Self { 107 | send_pool, 108 | recv_pool, 109 | old_cache: Arc::new(Cache::new(100)), 110 | forest: Arc::new(forest), 111 | 112 | genesis, 113 | 114 | new_block_notify: Arc::new(Event::new()), 115 | mempool, 116 | sqlite_path, 117 | 118 | lock: Default::default(), 119 | }) 120 | } 121 | 122 | /// Obtain the highest state. 123 | pub async fn highest_state(&self) -> SealedState { 124 | // TODO this may be a bit stale 125 | let height = self.highest_height().await; 126 | if height.0 > 0 { 127 | self.get_state(height).await.expect("highest not available") 128 | } else { 129 | self.genesis.clone().realize(self.forest()).seal(None) 130 | } 131 | } 132 | 133 | /// Obtain the highest height. 134 | pub async fn highest_height(&self) -> BlockHeight { 135 | autoretry(|| async { 136 | let conn = self.recv_pool.recv().await?; 137 | let send_pool = self.send_pool.clone(); 138 | smol::unblock(move || { 139 | let conn = scopeguard::guard(conn, |conn| send_pool.try_send(conn).unwrap()); 140 | let val: Option = 141 | conn.query_row("select max(height) from history", params![], |r| r.get(0))?; 142 | anyhow::Ok(val.map(BlockHeight)) 143 | }) 144 | .await 145 | }) 146 | .await 147 | .unwrap_or_default() 148 | } 149 | 150 | /// Waits until a certain height is available, then returns it. 151 | pub async fn get_state_or_wait(&self, height: BlockHeight) -> SealedState { 152 | loop { 153 | let notify = self.new_block_notify.listen(); 154 | match self.get_state(height).await { 155 | Some(val) => return val, 156 | _ => notify.await, 157 | } 158 | } 159 | } 160 | 161 | /// Reconstruct the stakeset at a given height. 162 | async fn get_stakeset(&self, height: BlockHeight) -> StakeSet { 163 | autoretry(|| async { 164 | let conn = self.recv_pool.recv().await?; 165 | let send_pool = self.send_pool.clone(); 166 | let genesis = self.genesis.clone(); 167 | smol::unblock(move || { 168 | let conn = scopeguard::guard(conn, |conn| send_pool.try_send(conn).unwrap()); 169 | let mut stmt = conn 170 | .prepare("select txhash, height, stake_doc from stakes where height <= $1")?; 171 | let mut stakes = StakeSet::new(vec![].into_iter()); 172 | // TODO this is dumb! 173 | for (txhash, stake) in genesis.stakes { 174 | stakes.add_stake(txhash, stake); 175 | } 176 | for row in stmt.query_map(params![height.0], |row| { 177 | Ok((row.get(0)?, row.get(1)?, row.get(2)?)) 178 | })? { 179 | let row: (String, u64, Vec) = row?; 180 | let t: TxHash = row.0.parse()?; 181 | let sd: StakeDoc = stdcode::deserialize(&row.2)?; 182 | stakes.add_stake(t, sd); 183 | } 184 | stakes.unlock_old(height.epoch()); 185 | anyhow::Ok(stakes) 186 | }) 187 | .await 188 | }) 189 | .await 190 | } 191 | 192 | /// Obtain just one particular Block. 193 | pub async fn get_block(&self, height: BlockHeight) -> Option { 194 | autoretry(|| async { 195 | if let Some(val) = self.old_cache.get(&height) { 196 | return anyhow::Ok(Some(val)); 197 | } 198 | let conn = self.recv_pool.recv().await?; 199 | let send_pool = self.send_pool.clone(); 200 | let res = smol::unblock(move || { 201 | let conn = scopeguard::guard(conn, |conn| send_pool.try_send(conn).unwrap()); 202 | let block_blob: Option> = conn 203 | .query_row( 204 | "select block from history where height = $1", 205 | params![height.0], 206 | |row| row.get(0), 207 | ) 208 | .optional()?; 209 | if let Some(block_blob) = block_blob { 210 | let block: Block = stdcode::deserialize(&block_blob)?; 211 | Ok(Some(block)) 212 | } else { 213 | Ok(None) 214 | } 215 | }) 216 | .await; 217 | if let Ok(Some(res)) = &res { 218 | self.old_cache.insert(height, res.clone()); 219 | } 220 | res 221 | }) 222 | .await 223 | } 224 | 225 | /// Obtain a historical SealedState. 226 | pub async fn get_state(&self, height: BlockHeight) -> Option> { 227 | let block: Block = self.get_block(height).await?; 228 | let stakeset = self.get_stakeset(height).await; 229 | assert_eq!( 230 | HashVal(stakeset.pre_tip911().root_hash()), 231 | block.header.stakes_hash 232 | ); 233 | Some(SealedState::from_block(&block, &stakeset, &self.forest)) 234 | } 235 | 236 | /// Obtain a historical ConsensusProof. 237 | pub async fn get_consensus(&self, height: BlockHeight) -> Option { 238 | autoretry(|| async { 239 | let conn = self.recv_pool.recv().await?; 240 | let send_pool = self.send_pool.clone(); 241 | smol::unblock(move || { 242 | let conn = scopeguard::guard(conn, |conn| send_pool.try_send(conn).unwrap()); 243 | let vec: Option> = conn 244 | .query_row( 245 | "select proof from consensus_proofs where height = $1", 246 | params![height.0], 247 | |r| r.get(0), 248 | ) 249 | .optional()?; 250 | if let Some(vec) = vec { 251 | anyhow::Ok(Some(stdcode::deserialize(&vec)?)) 252 | } else { 253 | Ok(None) 254 | } 255 | }) 256 | .await 257 | }) 258 | .await 259 | } 260 | 261 | /// Consumes a block, applying it to the current state. 262 | pub async fn apply_block(&self, blk: Block, cproof: ConsensusProof) -> anyhow::Result<()> { 263 | let _guard = self.lock.lock().await; 264 | if blk.header.height.0 == 531 { 265 | eprintln!("APPLY BLOCK: {:#?}", blk); 266 | } 267 | let highest_state = self.highest_state().await; 268 | let header = blk.header; 269 | if header.height != highest_state.header().height + 1.into() { 270 | anyhow::bail!( 271 | "cannot apply block {} to height {}", 272 | header.height, 273 | highest_state.header().height 274 | ); 275 | } 276 | 277 | // Check the consensus proof 278 | let mut total_votes = CoinValue(0); 279 | let mut present_votes = CoinValue(0); 280 | for stake_doc_bytes in highest_state.raw_stakes().pre_tip911().iter() { 281 | let stake_doc: StakeDoc = stdcode::deserialize(&stake_doc_bytes.1)?; 282 | if blk.header.height.epoch() >= stake_doc.e_start 283 | && blk.header.height.epoch() < stake_doc.e_post_end 284 | { 285 | total_votes += stake_doc.syms_staked; 286 | if let Some(v) = cproof.get(&stake_doc.pubkey) { 287 | if stake_doc.pubkey.verify(&blk.header.hash(), v) { 288 | present_votes += total_votes; 289 | } 290 | } 291 | } 292 | } 293 | if present_votes.0 <= 2 * total_votes.0 / 3 { 294 | anyhow::bail!( 295 | "rejecting putative block {} due to insufficient votes ({}/{})", 296 | blk.header.height, 297 | present_votes, 298 | total_votes 299 | ) 300 | } 301 | 302 | let start = Instant::now(); 303 | let new_state = highest_state.apply_block(&blk)?; 304 | // we flush the merkle stuff first, because the sqlite points to merkle 305 | self.forest.storage().flush(); 306 | let apply_time = start.elapsed(); 307 | let start = Instant::now(); 308 | 309 | // now transactionally save to sqlite 310 | { 311 | let conn = self.recv_pool.recv().await?; 312 | let send_pool = self.send_pool.clone(); 313 | let _forest = self.forest.clone(); 314 | smol::unblock(move || { 315 | let mut conn = scopeguard::guard(conn, |conn| send_pool.try_send(conn).unwrap()); 316 | let conn = conn.transaction()?; 317 | conn.execute( 318 | "insert into history (height, header, block) values ($1, $2, $3)", 319 | params![blk.header.height.0, blk.header.stdcode(), blk.stdcode()], 320 | )?; 321 | 322 | conn.execute( 323 | "insert into consensus_proofs (height, proof) values ($1, $2)", 324 | params![blk.header.height.0, stdcode::serialize(&cproof).unwrap()], 325 | )?; 326 | 327 | for txn in blk.transactions { 328 | if txn.kind == TxKind::Stake { 329 | if let Ok(doc) = stdcode::deserialize::(&txn.data) { 330 | // TODO BUG BUG this poorly replicates the validation logic. Make a method SealedState::new_stakes() 331 | if blk.header.height.0 >= 500000 || blk.header.network != NetID::Mainnet { 332 | conn.execute("insert into stakes (txhash, height, stake_doc) values ($1, $2, $3)", params![txn.hash_nosigs().to_string(), blk.header.height.0, doc.stdcode()])?; 333 | } 334 | } 335 | } 336 | } 337 | conn.commit()?; 338 | anyhow::Ok(()) 339 | }) 340 | .await? 341 | } 342 | log::debug!( 343 | "applied block {} / {} in {:.2}ms (history insertion {:.2}ms)", 344 | new_state.header().height, 345 | new_state.header().hash(), 346 | apply_time.as_secs_f64() * 1000.0, 347 | start.elapsed().as_secs_f64() * 1000.0 348 | ); 349 | let next = self.highest_state().await; 350 | self.mempool_mut().rebase(next); 351 | self.new_block_notify.notify(usize::MAX); 352 | 353 | Ok(()) 354 | } 355 | 356 | /// Gets the forest. 357 | pub fn forest(&self) -> &novasmt::Database { 358 | &self.forest 359 | } 360 | } 361 | -------------------------------------------------------------------------------- /staker-config.yaml: -------------------------------------------------------------------------------- 1 | # secret key; must correspond to "stakes.dead[...]beef.pubkey" in the network config 2 | signing_secret: 5b4c8873cbdb089439d025e9fa817b1df1128231699131c245c0027be880d4d44ce983d241f1d40b0e5b65e0bd1a6877a35acaec5182f110810f1276103c829e 3 | # address for staker-network communication, this can be arbitrary 4 | listen: 127.0.0.1:20000 5 | # must be same as "listen" 6 | bootstrap: 127.0.0.1:20000 7 | # where block rewards are sent 8 | payout_addr: t5xw3qvzvfezkb748d3zt929zkbt7szgt6jr3zfxxnewj1rtajpjx0 9 | # vote for this fee multiplier (higher values charge more fees) 10 | target_fee_multiplier: 10000 11 | --------------------------------------------------------------------------------