├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md └── src ├── disc.rs ├── disc ├── dns │ ├── backend │ │ ├── memory.rs │ │ ├── mod.rs │ │ └── trust_dns.rs │ └── mod.rs └── v4 │ ├── kad.rs │ ├── message.rs │ ├── mod.rs │ ├── node.rs │ ├── proto.rs │ └── util.rs ├── ecies.rs ├── ecies ├── algorithm.rs └── proto.rs ├── errors.rs ├── lib.rs ├── mac.rs ├── node_filter.rs ├── peer.rs ├── rlpx.rs ├── transport.rs ├── types.rs └── util.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "03:00" 8 | open-pull-requests-limit: 10 9 | commit-message: 10 | prefix: chore 11 | include: scope 12 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | release: 9 | types: [published] 10 | 11 | env: 12 | CARGO_TERM_COLOR: always 13 | 14 | jobs: 15 | build: 16 | name: build and test ${{ matrix.job.target }} (${{ matrix.job.os }}) 17 | runs-on: ${{ matrix.job.os }} 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | job: 22 | - os: ubuntu-latest 23 | target: x86_64-unknown-linux-gnu 24 | - os: ubuntu-latest 25 | target: aarch64-unknown-linux-gnu 26 | - os: macos-latest 27 | target: x86_64-apple-darwin 28 | - os: macos-latest 29 | target: aarch64-apple-darwin 30 | - os: windows-latest 31 | target: x86_64-pc-windows-msvc 32 | 33 | steps: 34 | - uses: actions/checkout@v2 35 | - name: Install latest stable toolchain 36 | uses: actions-rs/toolchain@v1 37 | with: 38 | profile: minimal 39 | toolchain: stable 40 | target: ${{ matrix.job.target }} 41 | override: true 42 | components: rustfmt, clippy 43 | - name: Build 44 | run: cargo build --verbose 45 | - name: Run tests 46 | run: cargo test --verbose 47 | 48 | lint: 49 | name: lints 50 | runs-on: ubuntu-latest 51 | steps: 52 | - name: Checkout sources 53 | uses: actions/checkout@v2 54 | - name: Install stable toolchain 55 | uses: actions-rs/toolchain@v1 56 | with: 57 | toolchain: stable 58 | profile: minimal 59 | components: rustfmt, clippy 60 | override: true 61 | - name: Install nightly toolchain 62 | uses: actions-rs/toolchain@v1 63 | with: 64 | toolchain: nightly 65 | profile: minimal 66 | components: rustfmt, clippy 67 | override: true 68 | - uses: Swatinem/rust-cache@v1 69 | with: 70 | cache-on-failure: true 71 | - name: cargo fmt 72 | run: cargo +nightly fmt --all -- --check 73 | - name: cargo clippy 74 | run: cargo +stable clippy --all-features -- -D warnings 75 | 76 | publish: 77 | # Only do this job if publishing a release 78 | needs: 79 | [ 80 | build, 81 | lint, 82 | ] 83 | if: github.event_name == 'release' && github.event.action == 'published' 84 | runs-on: ubuntu-latest 85 | 86 | steps: 87 | - name: Checkout repository 88 | uses: actions/checkout@v2 89 | 90 | - name: Install toolchain 91 | uses: actions-rs/toolchain@v1 92 | with: 93 | toolchain: stable 94 | override: true 95 | 96 | - name: Publish crate 97 | uses: katyo/publish-crates@v1 98 | with: 99 | publish-delay: 30000 100 | registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} 101 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "devp2p-rs" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | aes = "0.8.1" 10 | anyhow = { version = "1.0.58", features = ["std"] } 11 | array-init = "2.0.1" 12 | arrayvec = "0.7.2" 13 | async-stream = "0.3.3" 14 | async-trait = "0.1.56" 15 | auto_impl = "1.0.1" 16 | block-padding = "0.3.2" 17 | byteorder = "1.4.3" 18 | bytes = "1.2.0" 19 | chrono = "0.4.19" 20 | cidr = "0.2.1" 21 | cipher = { version = "0.4", features = ["block-padding"] } 22 | ctr = "0.9.1" 23 | data-encoding = "2.3.2" 24 | derive_more = "0.99.17" 25 | digest = "0.10.3" 26 | educe = "0.4.19" 27 | enr = { git = "https://github.com/sigp/enr", default-features = false, features = [ 28 | "rust-secp256k1", 29 | ] } 30 | enum-primitive-derive = "0.2.2" 31 | ethereum-types = "0.14.0" 32 | fastrlp = { version = "0.2.0", features = [ "derive", "ethereum-types", "std" ] } 33 | futures = "0.3.21" 34 | generic-array = "0.14.5" 35 | hex = "0.4.3" 36 | hmac = "0.12.1" 37 | igd = { version = "0.12.0", features = [ "aio" ] } 38 | lru = "0.7.8" 39 | maplit = "1.0.2" 40 | num-traits = "0.2.15" 41 | parking_lot = "0.12.1" 42 | rand = "0.8.5" 43 | secp256k1 = { version = "0.24.0", features = [ "global-context", "rand-std", "recovery" ] } 44 | sha2 = "0.10.2" 45 | sha3 = "0.10.1" 46 | snap = "1.0.5" 47 | # discovery uses these structured concurrency primitives 48 | task-group = { git = "https://github.com/vorot93/task-group" } 49 | thiserror = "1.0.31" 50 | tokio = { version = "1.20.1", features = [ "full" ] } 51 | tokio-stream = "0.1.9" 52 | tokio-util = { version = "0.7.3", features = ["codec"] } 53 | tracing = "0.1.35" 54 | trust-dns-resolver = "0.22.0" 55 | typenum = "1.15.0" 56 | url = "2.2.2" 57 | uuid = { version = "1.1.2", features = [ "v4" ] } 58 | 59 | [dev-dependencies] 60 | hex-literal = "0.3.4" 61 | proptest = "1.0.0" 62 | 63 | [lib] 64 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # devp2p-rs 2 | 3 | 4 | 5 | [![GPL3 License](https://img.shields.io/github/license/rjected/devp2p-rs)](https://github.com/rjected/devp2p-rs/blob/main/LICENSE) 6 | [![CI](https://github.com/rjected/devp2p-rs/actions/workflows/ci.yml/badge.svg)](https://github.com/rjected/devp2p-rs/actions/workflows/ci.yml) 7 | 8 | 9 | 10 | 11 | 12 | A devp2p networking stack from [`akula`](https://github.com/akula-bft/akula). 13 | 14 | *Compiler support: requires rustc 1.62+* 15 | -------------------------------------------------------------------------------- /src/disc.rs: -------------------------------------------------------------------------------- 1 | use crate::{NodeRecord, PeerId}; 2 | use async_stream::stream; 3 | use futures::{stream::BoxStream, StreamExt}; 4 | use std::{collections::HashMap, net::SocketAddr, pin::Pin, task::Poll, time::Duration}; 5 | use tokio::time::sleep; 6 | use tokio_stream::Stream; 7 | 8 | pub mod v4; 9 | pub use self::v4::{Discv4, Discv4Builder}; 10 | 11 | pub mod dns; 12 | 13 | pub use self::dns::DnsDiscovery; 14 | 15 | pub type Discovery = BoxStream<'static, anyhow::Result>; 16 | 17 | pub struct StaticNodes(Pin> + Send + 'static>>); 18 | 19 | impl StaticNodes { 20 | pub fn new(nodes: HashMap, delay: Duration) -> Self { 21 | Self(Box::pin(stream! { 22 | loop { 23 | for (&addr, &id) in &nodes { 24 | yield Ok(NodeRecord { id, addr }); 25 | sleep(delay).await; 26 | } 27 | } 28 | })) 29 | } 30 | } 31 | 32 | impl Stream for StaticNodes { 33 | type Item = anyhow::Result; 34 | 35 | fn poll_next( 36 | mut self: Pin<&mut Self>, 37 | cx: &mut std::task::Context<'_>, 38 | ) -> Poll> { 39 | self.0.poll_next_unpin(cx) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/disc/dns/backend/memory.rs: -------------------------------------------------------------------------------- 1 | use super::Backend; 2 | use async_trait::async_trait; 3 | use std::collections::HashMap; 4 | use tracing::*; 5 | 6 | #[async_trait] 7 | impl Backend for HashMap { 8 | async fn get_record(&self, fqdn: String) -> anyhow::Result> { 9 | debug!("resolving {}", fqdn); 10 | if let Some(v) = self.get(&fqdn) { 11 | debug!("resolved {} to {}", fqdn, v); 12 | return Ok(Some(v.clone())); 13 | } 14 | 15 | Ok(None) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/disc/dns/backend/mod.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use auto_impl::auto_impl; 3 | 4 | pub mod memory; 5 | pub mod trust_dns; 6 | 7 | #[async_trait] 8 | #[auto_impl(&, Box, Arc)] 9 | pub trait Backend: Send + Sync + 'static { 10 | async fn get_record(&self, fqdn: String) -> anyhow::Result>; 11 | } 12 | -------------------------------------------------------------------------------- /src/disc/dns/backend/trust_dns.rs: -------------------------------------------------------------------------------- 1 | use super::Backend; 2 | use async_trait::async_trait; 3 | use tracing::*; 4 | use trust_dns_resolver::{ 5 | error::{ResolveError, ResolveErrorKind}, 6 | proto::DnsHandle, 7 | AsyncResolver, ConnectionProvider, 8 | }; 9 | 10 | #[async_trait] 11 | impl Backend for AsyncResolver 12 | where 13 | C: DnsHandle, 14 | P: ConnectionProvider, 15 | { 16 | async fn get_record(&self, fqdn: String) -> anyhow::Result> { 17 | trace!("Resolving FQDN {}", fqdn); 18 | match self.txt_lookup(format!("{}.", fqdn)).await { 19 | Err(e) => { 20 | if !matches!(e.kind(), ResolveErrorKind::NoRecordsFound { .. }) { 21 | return Err(e.into()); 22 | } 23 | } 24 | Ok(v) => { 25 | if let Some(txt) = v.into_iter().next() { 26 | if let Some(txt_entry) = txt.iter().next() { 27 | return Ok(Some(String::from_utf8(txt_entry.to_vec())?)); 28 | } 29 | } 30 | } 31 | } 32 | 33 | Ok(None) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/disc/dns/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{util::pk2id, NodeRecord}; 2 | use anyhow::{anyhow, bail}; 3 | use arrayvec::ArrayString; 4 | use async_stream::{stream, try_stream}; 5 | use bytes::Bytes; 6 | use data_encoding::*; 7 | use derive_more::{Deref, Display}; 8 | use educe::Educe; 9 | use enr::{Enr, EnrKeyUnambiguous, EnrPublicKey}; 10 | use maplit::hashset; 11 | use secp256k1::{PublicKey, SecretKey}; 12 | use std::{ 13 | collections::{HashMap, HashSet}, 14 | fmt, 15 | fmt::{Display, Formatter}, 16 | pin::Pin, 17 | str::FromStr, 18 | sync::Arc, 19 | time::Duration, 20 | }; 21 | use task_group::TaskGroup; 22 | use thiserror::Error; 23 | use tokio::sync::mpsc::{channel, Receiver}; 24 | use tokio_stream::{Stream, StreamExt}; 25 | use tracing::*; 26 | 27 | mod backend; 28 | pub use self::backend::Backend; 29 | 30 | type Base32Hash = ArrayString; 31 | 32 | pub type QueryStream = Pin>> + Send + 'static>>; 33 | 34 | pub const BASE32_HASH_LEN: usize = 26; 35 | pub const ROOT_PREFIX: &str = "enrtree-root:v1"; 36 | pub const LINK_PREFIX: &str = "enrtree://"; 37 | pub const BRANCH_PREFIX: &str = "enrtree-branch:"; 38 | pub const ENR_PREFIX: &str = "enr:"; 39 | 40 | const MAX_SINGLE_RESOLUTION: u64 = 10; 41 | const MAX_RESOLUTION_DURATION: u64 = 1800; 42 | 43 | pub struct DnsDiscovery { 44 | #[allow(unused)] 45 | tasks: TaskGroup, 46 | receiver: Receiver>, 47 | } 48 | 49 | impl DnsDiscovery { 50 | #[must_use] 51 | pub fn new( 52 | discovery: Arc>, 53 | domain: String, 54 | public_key: Option, 55 | ) -> Self { 56 | let tasks = TaskGroup::default(); 57 | 58 | let (tx, receiver) = channel(1); 59 | tasks.spawn_with_name("DNS discovery pump", async move { 60 | loop { 61 | let mut query = discovery.query(domain.clone(), public_key); 62 | let restart_at = 63 | std::time::Instant::now() + Duration::from_secs(MAX_RESOLUTION_DURATION); 64 | 65 | loop { 66 | match tokio::time::timeout( 67 | Duration::from_secs(MAX_SINGLE_RESOLUTION), 68 | query.next(), 69 | ) 70 | .await 71 | { 72 | Ok(Some(Err(e))) => { 73 | if tx.send(Err(e)).await.is_err() { 74 | return; 75 | } 76 | break; 77 | } 78 | Ok(Some(Ok(v))) => { 79 | if let Some(addr) = v.tcp4_socket() { 80 | if tx 81 | .send(Ok(NodeRecord { 82 | addr: addr.into(), 83 | id: pk2id(&v.public_key()), 84 | })) 85 | .await 86 | .is_err() 87 | { 88 | return; 89 | } 90 | } 91 | } 92 | Ok(None) => { 93 | break; 94 | } 95 | Err(_) => {} 96 | } 97 | 98 | if std::time::Instant::now() > restart_at { 99 | trace!("Restarting DNS resolution"); 100 | break; 101 | } 102 | } 103 | } 104 | }); 105 | 106 | Self { tasks, receiver } 107 | } 108 | } 109 | 110 | impl Stream for DnsDiscovery { 111 | type Item = anyhow::Result; 112 | 113 | fn poll_next( 114 | mut self: std::pin::Pin<&mut Self>, 115 | cx: &mut std::task::Context<'_>, 116 | ) -> std::task::Poll> { 117 | Pin::new(&mut self.receiver).poll_recv(cx) 118 | } 119 | } 120 | 121 | #[derive(Debug, Error)] 122 | #[error("Invalid Enr: {0}")] 123 | pub struct InvalidEnr(String); 124 | 125 | fn debug_bytes(b: &Bytes, f: &mut std::fmt::Formatter) -> std::fmt::Result { 126 | write!(f, "{}", hex::encode(b)) 127 | } 128 | 129 | #[derive(Clone, Deref, Educe)] 130 | #[educe(Debug)] 131 | pub struct RootRecord { 132 | #[deref] 133 | base: UnsignedRoot, 134 | #[educe(Debug(method = "debug_bytes"))] 135 | signature: Bytes, 136 | } 137 | 138 | #[derive(Clone, Debug, Display)] 139 | #[display( 140 | fmt = "{} e={} l={} seq={}", 141 | ROOT_PREFIX, 142 | enr_root, 143 | link_root, 144 | sequence 145 | )] 146 | pub struct UnsignedRoot { 147 | enr_root: Base32Hash, 148 | link_root: Base32Hash, 149 | sequence: usize, 150 | } 151 | 152 | impl RootRecord { 153 | fn verify(&self, pk: &K::PublicKey) -> anyhow::Result<()> { 154 | let mut sig = self.signature.clone(); 155 | 156 | // TODO: find way to unify with ed25519 sigs 157 | sig.truncate(64); 158 | if !pk.verify_v4(self.base.to_string().as_bytes(), &sig) { 159 | bail!("Public key does not match"); 160 | } 161 | 162 | Ok(()) 163 | } 164 | } 165 | 166 | impl Display for RootRecord { 167 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 168 | write!( 169 | f, 170 | "{} sig={}", 171 | self.base, 172 | BASE64.encode(self.signature.as_ref()) 173 | ) 174 | } 175 | } 176 | 177 | #[derive(Clone, Educe)] 178 | #[educe(Debug)] 179 | pub enum DnsRecord { 180 | Root(RootRecord), 181 | Link { 182 | public_key: K::PublicKey, 183 | domain: String, 184 | }, 185 | Branch { 186 | children: HashSet, 187 | }, 188 | Enr { 189 | record: Enr, 190 | }, 191 | } 192 | 193 | impl Display for DnsRecord { 194 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 195 | match self { 196 | Self::Root(root_record) => write!(f, "{}", root_record), 197 | Self::Link { public_key, domain } => write!( 198 | f, 199 | "{}{}@{}", 200 | LINK_PREFIX, 201 | BASE32_NOPAD.encode(public_key.encode_uncompressed().as_ref()), 202 | domain 203 | ), 204 | Self::Branch { children } => write!( 205 | f, 206 | "{}{}", 207 | BRANCH_PREFIX, 208 | children 209 | .iter() 210 | .map(ToString::to_string) 211 | .collect::>() 212 | .join(",") 213 | ), 214 | Self::Enr { record } => write!(f, "{}", record.to_base64()), 215 | } 216 | } 217 | } 218 | 219 | impl FromStr for DnsRecord { 220 | type Err = anyhow::Error; 221 | 222 | fn from_str(s: &str) -> Result { 223 | trace!("Parsing record {}", s); 224 | if let Some(root) = s.strip_prefix(ROOT_PREFIX) { 225 | let mut e = None; 226 | let mut l = None; 227 | let mut seq = None; 228 | let mut sig = None; 229 | for entry in root.split_whitespace() { 230 | if let Some(v) = entry.strip_prefix("e=") { 231 | trace!("Extracting ENR root: {:?}", v); 232 | e = Some(v.parse()?); 233 | } else if let Some(v) = entry.strip_prefix("l=") { 234 | trace!("Extracting link root: {:?}", v); 235 | l = Some(v.parse()?); 236 | } else if let Some(v) = entry.strip_prefix("seq=") { 237 | trace!("Extracting sequence: {:?}", v); 238 | seq = Some(v.parse()?); 239 | } else if let Some(v) = entry.strip_prefix("sig=") { 240 | trace!("Extracting signature: {:?}", v); 241 | let v = BASE64URL_NOPAD.decode(v.as_bytes())?.into(); 242 | sig = Some(v); 243 | } else { 244 | bail!("Invalid string: {}", entry); 245 | } 246 | } 247 | 248 | let v = RootRecord { 249 | base: UnsignedRoot { 250 | enr_root: e.ok_or_else(|| anyhow!("ENR root absent"))?, 251 | link_root: l.ok_or_else(|| anyhow!("Link root absent"))?, 252 | sequence: seq.ok_or_else(|| anyhow!("Sequence not found"))?, 253 | }, 254 | signature: sig.ok_or_else(|| anyhow!("Signature not found"))?, 255 | }; 256 | 257 | trace!("Successfully parsed {:?}", v); 258 | 259 | return Ok(DnsRecord::Root(v)); 260 | } 261 | 262 | if let Some(link) = s.strip_prefix(LINK_PREFIX) { 263 | let mut it = link.split('@'); 264 | let public_key = K::decode_public( 265 | &BASE32_NOPAD.decode( 266 | it.next() 267 | .ok_or_else(|| anyhow!("Public key not found"))? 268 | .as_bytes(), 269 | )?, 270 | )?; 271 | let domain = it 272 | .next() 273 | .ok_or_else(|| anyhow!("Domain not found"))? 274 | .to_string(); 275 | 276 | return Ok(DnsRecord::Link { public_key, domain }); 277 | } 278 | 279 | if let Some(branch) = s.strip_prefix(BRANCH_PREFIX) { 280 | let children = branch 281 | .trim() 282 | .split(',') 283 | .filter_map(|h| match h.parse::() { 284 | Ok(v) => { 285 | if v.is_empty() { 286 | None 287 | } else { 288 | Some(Ok(v)) 289 | } 290 | } 291 | Err(e) => Some(Err(anyhow::Error::new(e))), 292 | }) 293 | .collect::>()?; 294 | 295 | return Ok(DnsRecord::Branch { children }); 296 | } 297 | 298 | if s.starts_with(ENR_PREFIX) { 299 | let record = s.parse::>().map_err(InvalidEnr)?; 300 | 301 | return Ok(DnsRecord::Enr { record }); 302 | } 303 | 304 | bail!("Invalid string: {}", s) 305 | } 306 | } 307 | 308 | fn domain_is_allowed( 309 | whitelist: &Option>>, 310 | domain: &str, 311 | public_key: &K::PublicKey, 312 | ) -> bool { 313 | whitelist.as_ref().map_or(true, |whitelist| { 314 | whitelist.get(domain).map_or(false, |pk| { 315 | pk.encode().as_ref() == public_key.encode().as_ref() 316 | }) 317 | }) 318 | } 319 | 320 | #[derive(Clone, Debug)] 321 | enum BranchKind { 322 | Enr, 323 | Link { 324 | remote_whitelist: Option>>, 325 | }, 326 | } 327 | 328 | fn resolve_branch( 329 | task_group: Arc, 330 | backend: Arc, 331 | host: String, 332 | children: HashSet, 333 | kind: BranchKind, 334 | ) -> QueryStream { 335 | let (tx, mut branches_res) = tokio::sync::mpsc::channel(1); 336 | for subdomain in &children { 337 | let fqdn = format!("{}.{}", subdomain, host); 338 | task_group.spawn_with_name(format!("DNS discovery: {}", fqdn), { 339 | let subdomain = *subdomain; 340 | let tx = tx.clone(); 341 | let backend = backend.clone(); 342 | let host = host.clone(); 343 | let kind = kind.clone(); 344 | let fqdn = fqdn.clone(); 345 | let task_group = task_group.clone(); 346 | async move { 347 | if let Err(e) = { 348 | let tx = tx.clone(); 349 | async move { 350 | let record = backend.get_record(fqdn).await?; 351 | if let Some(record) = record { 352 | trace!("Resolved record {}: {:?}", subdomain, record); 353 | let record = record.parse()?; 354 | match record { 355 | DnsRecord::Branch { children } => { 356 | let mut t = 357 | resolve_branch(task_group, backend, host, children, kind); 358 | while let Some(item) = t.try_next().await? { 359 | let _ = tx.send(Ok(item)).await; 360 | } 361 | 362 | return Ok(()); 363 | } 364 | DnsRecord::Link { public_key, domain } => { 365 | if let BranchKind::Link { remote_whitelist } = &kind { 366 | if domain_is_allowed::( 367 | remote_whitelist, 368 | &domain, 369 | &public_key, 370 | ) { 371 | let mut t = resolve_tree( 372 | Some(task_group), 373 | backend, 374 | domain, 375 | Some(public_key), 376 | None, 377 | remote_whitelist.clone(), 378 | ); 379 | while let Some(item) = t.try_next().await? { 380 | let _ = tx.send(Ok(item)).await; 381 | } 382 | } else { 383 | trace!( 384 | "Skipping subtree for forbidden domain: {}", 385 | domain 386 | ); 387 | } 388 | return Ok(()); 389 | } else { 390 | return Err(anyhow!( 391 | "Unexpected link record in ENR tree: {}", 392 | subdomain 393 | )); 394 | } 395 | } 396 | DnsRecord::Enr { record } => { 397 | if let BranchKind::Enr = &kind { 398 | let _ = tx.send(Ok(record)).await; 399 | 400 | return Ok(()); 401 | } else { 402 | return Err(anyhow!( 403 | "Unexpected ENR record in link tree: {}", 404 | subdomain 405 | )); 406 | } 407 | } 408 | DnsRecord::Root { .. } => { 409 | return Err(anyhow!("Unexpected root record: {}", subdomain)); 410 | } 411 | } 412 | } else { 413 | debug!("Child {} is empty", subdomain); 414 | } 415 | 416 | Ok(()) 417 | } 418 | } 419 | .await 420 | { 421 | let _ = tx.send(Err(e)).await; 422 | } 423 | } 424 | }); 425 | } 426 | 427 | Box::pin(stream! { 428 | trace!("Resolving branch {:?}", children); 429 | while let Some(v) = branches_res.recv().await { 430 | yield v; 431 | } 432 | trace!("Branch {:?} resolution complete", children); 433 | }) 434 | } 435 | 436 | fn resolve_tree( 437 | task_group: Option>, 438 | backend: Arc, 439 | host: String, 440 | public_key: Option, 441 | seen_sequence: Option, 442 | remote_whitelist: Option>>, 443 | ) -> QueryStream { 444 | Box::pin(try_stream! { 445 | let task_group = task_group.unwrap_or_default(); 446 | let record = backend.get_record(host.clone()).await?; 447 | if let Some(record) = &record { 448 | let record = DnsRecord::::from_str(record)?; 449 | if let DnsRecord::Root(record) = &record { 450 | if let Some(pk) = public_key { 451 | record.verify::(&pk)?; 452 | } 453 | 454 | let UnsignedRoot { enr_root, link_root, sequence } = &record.base; 455 | 456 | if let Some(seen) = seen_sequence { 457 | if *sequence <= seen { 458 | // We have already seen this record. 459 | return; 460 | } 461 | } 462 | 463 | let mut s = resolve_branch(task_group.clone(), backend.clone(), host.clone(), hashset![ *link_root ], BranchKind::Link { remote_whitelist }); 464 | while let Some(record) = s.try_next().await? { 465 | yield record; 466 | } 467 | 468 | let mut s = resolve_branch(task_group.clone(),backend.clone(), host.clone(), hashset![ *enr_root ], BranchKind::Enr); 469 | while let Some(record) = s.try_next().await? { 470 | yield record; 471 | } 472 | } else { 473 | Err(anyhow!("Expected root, got {:?}", record))? 474 | } 475 | trace!("Resolution of tree at {} complete", host); 476 | } else { 477 | warn!("No records found for tree {}", host); 478 | } 479 | }) 480 | } 481 | 482 | pub struct Resolver { 483 | backend: Arc, 484 | task_group: Option>, 485 | seen_sequence: Option, 486 | remote_whitelist: Option>>, 487 | } 488 | 489 | impl Resolver { 490 | pub fn new(backend: Arc) -> Self { 491 | Self { 492 | backend, 493 | task_group: None, 494 | seen_sequence: None, 495 | remote_whitelist: None, 496 | } 497 | } 498 | 499 | pub fn with_task_group(&mut self, task_group: Arc) -> &mut Self { 500 | self.task_group = Some(task_group); 501 | self 502 | } 503 | 504 | pub fn with_seen_sequence(&mut self, seen_sequence: usize) -> &mut Self { 505 | self.seen_sequence = Some(seen_sequence); 506 | self 507 | } 508 | 509 | pub fn with_remote_whitelist( 510 | &mut self, 511 | remote_whitelist: Arc>, 512 | ) -> &mut Self { 513 | self.remote_whitelist = Some(remote_whitelist); 514 | self 515 | } 516 | 517 | pub fn query(&self, host: impl Display, public_key: Option) -> QueryStream { 518 | resolve_tree( 519 | self.task_group.clone(), 520 | self.backend.clone(), 521 | host.to_string(), 522 | public_key, 523 | self.seen_sequence, 524 | self.remote_whitelist.clone(), 525 | ) 526 | } 527 | 528 | pub fn query_tree(&self, tree_link: impl AsRef) -> QueryStream { 529 | match DnsRecord::::from_str(tree_link.as_ref()).and_then(|link| { 530 | if let DnsRecord::Link { public_key, domain } = link { 531 | info!("{}/{}", domain, hex::encode(public_key.encode())); 532 | Ok((public_key, domain)) 533 | } else { 534 | bail!("Unexpected record type") 535 | } 536 | }) { 537 | Ok((public_key, domain)) => self.query(domain, Some(public_key)), 538 | Err(e) => Box::pin(tokio_stream::once(Err(e))), 539 | } 540 | } 541 | } 542 | 543 | #[cfg(test)] 544 | mod tests { 545 | use super::*; 546 | use hex_literal::hex; 547 | use maplit::hashmap; 548 | use secp256k1::{PublicKey, SecretKey}; 549 | use std::collections::{HashMap, HashSet}; 550 | 551 | fn test_records_to_hashmap( 552 | domain: &str, 553 | records: &[(Option<&str>, &str)], 554 | ) -> HashMap { 555 | records 556 | .iter() 557 | .map(|(sub, entry)| { 558 | ( 559 | format!( 560 | "{}{}", 561 | sub.map(|s| format!("{}.", s)).unwrap_or_default(), 562 | domain 563 | ), 564 | entry.to_string(), 565 | ) 566 | }) 567 | .collect() 568 | } 569 | 570 | fn test_records_to_hashmap_geth(records: &[(&str, &str)]) -> HashMap { 571 | records 572 | .iter() 573 | .map(|(domain, entry)| (domain.to_string(), entry.to_string())) 574 | .collect() 575 | } 576 | 577 | #[tokio::test] 578 | async fn eip_example() { 579 | const DOMAIN: &str = "mynodes.org"; 580 | const TEST_RECORDS: &[(Option<&str>, &str)] = &[ 581 | ( 582 | None, 583 | "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA" 584 | ), ( 585 | Some("C7HRFPF3BLGF3YR4DY5KX3SMBE"), 586 | "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org" 587 | ), ( 588 | Some("JWXYDBPXYWG6FX3GMDIBFA6CJ4"), 589 | "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24", 590 | ), ( 591 | Some("2XS2367YHAXJFGLZHVAWLQD4ZY"), 592 | "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA" 593 | ), ( 594 | Some("H4FHT4B454P6UXFD7JCYQ5PWDY"), 595 | "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI" 596 | ), ( 597 | Some("MHTDO6TMUBRIA2XWG5LUDACK24"), 598 | "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o" 599 | ) 600 | ]; 601 | 602 | let data = test_records_to_hashmap(DOMAIN, TEST_RECORDS); 603 | 604 | let mut s = Resolver::<_, SecretKey>::new(Arc::new(data)) 605 | .with_remote_whitelist(Arc::new(hashmap!{ 606 | "morenodes.example.org".to_string() => PublicKey::from_slice(&hex!("049f88229042fef9200246f49f94d9b77c4e954721442714e85850cb6d9e5daf2d880ea0e53cb3ac1a75f9923c2726a4f941f7d326781baa6380754a360de5c2b6")).unwrap() 607 | })) 608 | .query(DOMAIN.to_string(), None); 609 | let mut out = HashSet::new(); 610 | while let Some(record) = s.try_next().await.unwrap() { 611 | assert!(out.insert(record.to_base64())); 612 | } 613 | assert_eq!( 614 | out, 615 | hashset![ 616 | "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", 617 | "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI", 618 | "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", 619 | ].into_iter().map(ToString::to_string).collect() 620 | ); 621 | } 622 | 623 | #[tokio::test] 624 | async fn bad_node() { 625 | const TEST_RECORDS: &[(&str, &str)] = &[ 626 | ("n", "enrtree-root:v1 e=INDMVBZEEQ4ESVYAKGIYU74EAA l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=3 sig=Vl3AmunLur0JZ3sIyJPSH6A3Vvdp4F40jWQeCmkIhmcgwE4VC5U9wpK8C_uL_CMY29fd6FAhspRvq2z_VysTLAA"), 627 | ("C7HRFPF3BLGF3YR4DY5KX3SMBE.n", "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"), 628 | ("INDMVBZEEQ4ESVYAKGIYU74EAA.n", "enr:-----"), 629 | ]; 630 | 631 | let data = test_records_to_hashmap_geth(TEST_RECORDS); 632 | 633 | let err = Resolver::<_, SecretKey>::new(Arc::new(data)) 634 | .query_tree("enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@n") 635 | .collect::, _>>() 636 | .await 637 | .unwrap_err(); 638 | if !err.chain().any(|e| e.is::()) { 639 | unreachable!("should have seen the correct error") 640 | } 641 | } 642 | } 643 | -------------------------------------------------------------------------------- /src/disc/v4/kad.rs: -------------------------------------------------------------------------------- 1 | use super::{message::*, util::*, NodeId, NodeRecord}; 2 | use array_init::array_init; 3 | use arrayvec::ArrayVec; 4 | use ethereum_types::H256; 5 | use std::{ 6 | collections::{BTreeMap, VecDeque}, 7 | convert::TryFrom, 8 | }; 9 | use tracing::*; 10 | 11 | pub const BUCKET_SIZE: usize = 16; 12 | pub const REPLACEMENTS_SIZE: usize = 16; 13 | 14 | const ADDRESS_BYTES_SIZE: usize = 32; 15 | pub const ADDRESS_BITS: usize = 8 * ADDRESS_BYTES_SIZE; 16 | 17 | pub fn distance(n1: NodeId, n2: NodeId) -> H256 { 18 | keccak256(n1) ^ keccak256(n2) 19 | } 20 | 21 | pub type NodeBucket = ArrayVec; 22 | 23 | #[derive(Debug, Default)] 24 | pub struct KBucket { 25 | bucket: VecDeque, 26 | replacements: VecDeque, 27 | } 28 | 29 | impl KBucket { 30 | pub fn find_peer_pos(&self, peer: NodeId) -> Option { 31 | (0..self.bucket.len()).find(|&i| self.bucket[i].id == peer) 32 | } 33 | 34 | pub fn push_replacement(&mut self, peer: NodeRecord) { 35 | if self.replacements.len() < REPLACEMENTS_SIZE { 36 | self.replacements.push_back(peer) 37 | } 38 | } 39 | } 40 | 41 | #[derive(Debug)] 42 | pub struct Table { 43 | id_hash: H256, 44 | kbuckets: [KBucket; ADDRESS_BITS], 45 | } 46 | 47 | impl Table { 48 | /// Construct a new [`Table`] 49 | pub fn new(id: NodeId) -> Self { 50 | Self { 51 | id_hash: keccak256(id), 52 | kbuckets: array_init(|_| Default::default()), 53 | } 54 | } 55 | 56 | fn logdistance(&self, peer: NodeId) -> Option { 57 | let remote_hash = keccak256(peer); 58 | for i in (0..ADDRESS_BYTES_SIZE).rev() { 59 | let byte_index = ADDRESS_BYTES_SIZE - i - 1; 60 | let d = self.id_hash[byte_index] ^ remote_hash[byte_index]; 61 | if d != 0 { 62 | let high_bit_index = 7 - d.leading_zeros() as usize; 63 | return Some(i * 8 + high_bit_index); 64 | } 65 | } 66 | None // n1 and n2 are equal, so logdistance is -inf 67 | } 68 | 69 | fn bucket(&self, peer: NodeId) -> Option<(usize, &KBucket)> { 70 | self.logdistance(peer) 71 | .map(|bucket_idx| (bucket_idx, &self.kbuckets[bucket_idx])) 72 | } 73 | 74 | fn bucket_mut(&mut self, peer: NodeId) -> Option<(usize, &mut KBucket)> { 75 | if let Some(bucket_idx) = self.logdistance(peer) { 76 | return Some((bucket_idx, &mut self.kbuckets[bucket_idx])); 77 | } 78 | 79 | None 80 | } 81 | 82 | /// Get the [`Endpoint`] for the requested peer. 83 | pub fn get(&self, peer: NodeId) -> Option { 84 | self.bucket(peer).and_then(|(_, bucket)| { 85 | bucket 86 | .bucket 87 | .iter() 88 | .find(|entry| entry.id == peer) 89 | .copied() 90 | .map(From::from) 91 | }) 92 | } 93 | 94 | pub fn filled_buckets(&self) -> Vec { 95 | self.kbuckets 96 | .iter() 97 | .enumerate() 98 | .filter_map(|(i, kbucket)| { 99 | if kbucket.bucket.len() >= BUCKET_SIZE { 100 | Some(u8::try_from(i).expect("there are only 255 kbuckets")) 101 | } else { 102 | None 103 | } 104 | }) 105 | .collect() 106 | } 107 | 108 | pub fn oldest(&self, bucket_no: u8) -> Option { 109 | self.kbuckets[bucket_no as usize] 110 | .bucket 111 | .iter() 112 | .next_back() 113 | .copied() 114 | } 115 | 116 | /// Add verified node if there is space. 117 | #[instrument(skip_all, fields(node = &*node.id.to_string()))] 118 | pub fn add_verified(&mut self, node: NodeRecord) { 119 | trace!("Adding peer"); 120 | if node.address.is_ipv6() { 121 | return; 122 | } 123 | 124 | if let Some((bucket_idx, bucket)) = self.bucket_mut(node.id) { 125 | trace!("Adding to bucket: {bucket_idx}"); 126 | if let Some(pos) = bucket.find_peer_pos(node.id) { 127 | bucket.bucket.remove(pos); 128 | } 129 | 130 | // Push to front of bucket if we have less than BUCKET_SIZE peers, or we are shuffling existing peer... 131 | if bucket.bucket.len() < BUCKET_SIZE { 132 | bucket.bucket.push_front(node); 133 | } else { 134 | // ...add to replacements otherwise 135 | bucket.push_replacement(node); 136 | } 137 | } 138 | } 139 | 140 | /// Add seen node if there is space. 141 | #[instrument(skip_all, fields(node = &*node.id.to_string()))] 142 | pub fn add_seen(&mut self, node: NodeRecord) { 143 | trace!("Adding peer"); 144 | if node.address.is_ipv6() { 145 | return; 146 | } 147 | 148 | if let Some((bucket_idx, bucket)) = self.bucket_mut(node.id) { 149 | trace!("Adding peer to bucket {bucket_idx}"); 150 | if bucket.find_peer_pos(node.id).is_some() { 151 | // Peer exists already, do nothing 152 | return; 153 | } 154 | 155 | // Push to back of bucket if we have less than BUCKET_SIZE peers... 156 | if bucket.bucket.len() < BUCKET_SIZE { 157 | bucket.bucket.push_back(node); 158 | } else { 159 | // ...add to replacements otherwise 160 | bucket.push_replacement(node); 161 | } 162 | } 163 | } 164 | 165 | /// Remove node from the bucket 166 | #[instrument(skip_all, fields(node = &*node.to_string()))] 167 | pub fn remove(&mut self, node: NodeId) { 168 | if let Some((bucket_idx, bucket)) = self.bucket_mut(node) { 169 | if bucket.replacements.is_empty() { 170 | trace!("Not removing from bucket {bucket_idx}: no replacements"); 171 | return; 172 | } 173 | 174 | for i in 0..bucket.bucket.len() { 175 | if bucket.bucket[i].id == node { 176 | let replacement = bucket 177 | .replacements 178 | .pop_front() 179 | .expect("already returned if no replacement"); 180 | trace!("Replacing in bucket {bucket_idx} with {:?}", replacement); 181 | bucket.bucket.remove(i); 182 | bucket.bucket.push_back(replacement); 183 | 184 | return; 185 | } 186 | } 187 | } 188 | } 189 | 190 | pub fn neighbours(&self, peer: NodeId) -> Option { 191 | self.bucket(peer).map(|(_, bucket)| { 192 | bucket 193 | .bucket 194 | .iter() 195 | .filter_map(|neighbour| { 196 | if peer == neighbour.id { 197 | None 198 | } else { 199 | Some(*neighbour) 200 | } 201 | }) 202 | .collect() 203 | }) 204 | } 205 | 206 | pub fn nearest_node_entries(&self, target: NodeId) -> BTreeMap { 207 | self.kbuckets 208 | .iter() 209 | .flat_map(|bucket| &bucket.bucket) 210 | .map(|n| (distance(n.id, target), *n)) 211 | .collect() 212 | } 213 | 214 | /// Returns the number of peers in all buckets in the table 215 | pub fn len(&self) -> usize { 216 | self.kbuckets 217 | .iter() 218 | .fold(0, |total, bucket| total + bucket.bucket.len()) 219 | } 220 | 221 | pub fn is_empty(&self) -> bool { 222 | self.kbuckets.iter().all(|bucket| bucket.bucket.is_empty()) 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /src/disc/v4/message.rs: -------------------------------------------------------------------------------- 1 | use super::{NodeId, NodeRecord}; 2 | use bytes::BufMut; 3 | use derive_more::*; 4 | use ethereum_types::H256; 5 | use fastrlp::{Decodable, DecodeError, Encodable, Header, RlpDecodable, RlpEncodable}; 6 | use std::net::IpAddr; 7 | 8 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Deref, DerefMut, From)] 9 | pub struct Ip(pub IpAddr); 10 | 11 | impl Encodable for Ip { 12 | fn encode(&self, out: &mut dyn BufMut) { 13 | match self.0 { 14 | IpAddr::V4(addr) => addr.octets().encode(out), 15 | IpAddr::V6(addr) => addr.octets().encode(out), 16 | } 17 | } 18 | 19 | fn length(&self) -> usize { 20 | match self.0 { 21 | IpAddr::V4(addr) => addr.octets().length(), 22 | IpAddr::V6(addr) => addr.octets().length(), 23 | } 24 | } 25 | } 26 | 27 | impl Decodable for Ip { 28 | fn decode(buf: &mut &[u8]) -> Result { 29 | match Header::decode(&mut &**buf)?.payload_length { 30 | 0 => Err(DecodeError::Custom("empty")), 31 | 4 => Ok(Self(IpAddr::from(<[u8; 4]>::decode(buf)?))), 32 | 16 => Ok(Self(IpAddr::from(<[u8; 16]>::decode(buf)?))), 33 | other => { 34 | tracing::debug!("ip_addr_rlp_decode: wrong address length {other}"); 35 | Err(DecodeError::Custom("wrong IP address length")) 36 | } 37 | } 38 | } 39 | } 40 | 41 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable)] 42 | pub struct Endpoint { 43 | pub address: Ip, 44 | pub udp_port: u16, 45 | pub tcp_port: u16, 46 | } 47 | 48 | impl From for Endpoint { 49 | fn from( 50 | NodeRecord { 51 | address, 52 | tcp_port, 53 | udp_port, 54 | .. 55 | }: NodeRecord, 56 | ) -> Self { 57 | Self { 58 | address, 59 | udp_port, 60 | tcp_port, 61 | } 62 | } 63 | } 64 | 65 | #[derive(Clone, Copy, Debug, RlpEncodable, RlpDecodable)] 66 | pub struct FindNodeMessage { 67 | pub id: NodeId, 68 | pub expire: u64, 69 | } 70 | 71 | #[derive(Clone, Debug, RlpEncodable, RlpDecodable)] 72 | pub struct NeighboursMessage { 73 | pub nodes: Vec, 74 | pub expire: u64, 75 | } 76 | 77 | #[derive(Debug, Clone)] 78 | pub struct PingMessage { 79 | pub from: Endpoint, 80 | pub to: Endpoint, 81 | pub expire: u64, 82 | } 83 | 84 | #[derive(RlpEncodable)] 85 | struct PingMessageE<'s> { 86 | version: u64, 87 | from: &'s Endpoint, 88 | to: &'s Endpoint, 89 | expire: &'s u64, 90 | } 91 | 92 | impl Encodable for PingMessage { 93 | fn encode(&self, out: &mut dyn BufMut) { 94 | let Self { from, to, expire } = self; 95 | 96 | PingMessageE { 97 | version: 4, 98 | from, 99 | to, 100 | expire, 101 | } 102 | .encode(out) 103 | } 104 | fn length(&self) -> usize { 105 | let Self { from, to, expire } = self; 106 | 107 | PingMessageE { 108 | version: 4, 109 | from, 110 | to, 111 | expire, 112 | } 113 | .length() 114 | } 115 | } 116 | 117 | #[derive(RlpDecodable)] 118 | struct PingMessageD { 119 | _version: u64, 120 | from: Endpoint, 121 | to: Endpoint, 122 | expire: u64, 123 | } 124 | 125 | #[derive(RlpDecodable)] 126 | struct PingMessageDEnr { 127 | _version: u64, 128 | from: Endpoint, 129 | to: Endpoint, 130 | expire: u64, 131 | _enr_seq: u64, 132 | } 133 | 134 | impl Decodable for PingMessage { 135 | fn decode(buf: &mut &[u8]) -> Result { 136 | let (from, to, expire) = { 137 | PingMessageD::decode(buf) 138 | .map( 139 | |PingMessageD { 140 | from, to, expire, .. 141 | }| (from, to, expire), 142 | ) 143 | .or_else(|e| { 144 | if let DecodeError::ListLengthMismatch { .. } = e { 145 | PingMessageDEnr::decode(buf).map( 146 | |PingMessageDEnr { 147 | from, to, expire, .. 148 | }| (from, to, expire), 149 | ) 150 | } else { 151 | Err(e) 152 | } 153 | })? 154 | }; 155 | 156 | Ok(Self { from, to, expire }) 157 | } 158 | } 159 | 160 | #[derive(Debug, Clone, RlpEncodable)] 161 | pub struct PongMessage { 162 | pub to: Endpoint, 163 | pub echo: H256, 164 | pub expire: u64, 165 | } 166 | 167 | #[derive(RlpDecodable)] 168 | struct PongMessageD { 169 | to: Endpoint, 170 | echo: H256, 171 | expire: u64, 172 | } 173 | 174 | #[derive(RlpDecodable)] 175 | struct PongMessageDEnr { 176 | to: Endpoint, 177 | echo: H256, 178 | expire: u64, 179 | _enr_seq: u64, 180 | } 181 | 182 | impl Decodable for PongMessage { 183 | fn decode(buf: &mut &[u8]) -> Result { 184 | let (to, echo, expire) = { 185 | PongMessageD::decode(buf) 186 | .map( 187 | |PongMessageD { 188 | to, echo, expire, .. 189 | }| (to, echo, expire), 190 | ) 191 | .or_else(|e| { 192 | if let DecodeError::ListLengthMismatch { .. } = e { 193 | PongMessageDEnr::decode(buf).map( 194 | |PongMessageDEnr { 195 | to, echo, expire, .. 196 | }| (to, echo, expire), 197 | ) 198 | } else { 199 | Err(e) 200 | } 201 | })? 202 | }; 203 | 204 | Ok(Self { to, echo, expire }) 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/disc/v4/mod.rs: -------------------------------------------------------------------------------- 1 | //! Ethereum Node Discovery v4 implementation. 2 | 3 | #![allow(clippy::type_complexity)] 4 | 5 | pub mod kad; 6 | pub mod message; 7 | pub mod node; 8 | pub mod proto; 9 | pub mod util; 10 | 11 | use educe::Educe; 12 | use ethereum_types::H512; 13 | use std::{pin::Pin, sync::Arc}; 14 | use task_group::TaskGroup; 15 | use tokio::sync::mpsc::{channel, Receiver}; 16 | use tokio_stream::Stream; 17 | 18 | pub type NodeId = H512; 19 | pub use self::node::{Node, NodeRecord}; 20 | 21 | #[derive(Educe)] 22 | #[educe(Default)] 23 | pub struct Discv4Builder { 24 | #[educe(Default(1))] 25 | concurrent_lookups: usize, 26 | #[educe(Default(20))] 27 | cache: usize, 28 | } 29 | 30 | impl Discv4Builder { 31 | pub fn with_concurrent_lookups(mut self, concurrent_lookups: usize) -> Self { 32 | self.concurrent_lookups = concurrent_lookups; 33 | self 34 | } 35 | 36 | pub fn with_cache(mut self, cache: usize) -> Self { 37 | self.cache = cache; 38 | self 39 | } 40 | 41 | pub fn build(self, node: Arc) -> Discv4 { 42 | Discv4::new(node, self.concurrent_lookups, self.cache) 43 | } 44 | } 45 | 46 | pub struct Discv4 { 47 | #[allow(unused)] 48 | tasks: TaskGroup, 49 | receiver: Receiver, 50 | } 51 | 52 | impl Discv4 { 53 | #[must_use] 54 | fn new(node: Arc, concurrent_lookups: usize, cache: usize) -> Self { 55 | let tasks = TaskGroup::default(); 56 | 57 | let (tx, receiver) = channel(cache); 58 | 59 | for i in 0..concurrent_lookups { 60 | let node = node.clone(); 61 | let tx = tx.clone(); 62 | tasks.spawn_with_name(format!("discv4 lookup #{}", i), { 63 | async move { 64 | let node = node.clone(); 65 | let tx = tx.clone(); 66 | loop { 67 | for record in node.lookup(rand::random()).await { 68 | let _ = tx 69 | .send(crate::NodeRecord { 70 | addr: record.tcp_addr(), 71 | id: record.id, 72 | }) 73 | .await; 74 | } 75 | } 76 | } 77 | }); 78 | } 79 | 80 | Self { tasks, receiver } 81 | } 82 | } 83 | 84 | impl Stream for Discv4 { 85 | type Item = anyhow::Result; 86 | 87 | fn poll_next( 88 | mut self: std::pin::Pin<&mut Self>, 89 | cx: &mut std::task::Context<'_>, 90 | ) -> std::task::Poll> { 91 | Pin::new(&mut self.receiver) 92 | .poll_recv(cx) 93 | .map(|opt| opt.map(Ok)) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/disc/v4/proto.rs: -------------------------------------------------------------------------------- 1 | use super::message::*; 2 | use enum_primitive_derive::Primitive; 3 | use tokio::sync::oneshot::Sender as OneshotSender; 4 | 5 | #[derive(Primitive)] 6 | pub enum MessageId { 7 | Ping = 1, 8 | Pong = 2, 9 | FindNode = 3, 10 | Neighbours = 4, 11 | } 12 | 13 | #[derive(Debug)] 14 | pub enum EgressMessage { 15 | Ping(PingMessage, Option>), 16 | Pong(PongMessage), 17 | FindNode(FindNodeMessage), 18 | Neighbours(NeighboursMessage), 19 | } 20 | -------------------------------------------------------------------------------- /src/disc/v4/util.rs: -------------------------------------------------------------------------------- 1 | use super::NodeId; 2 | use ethereum_types::H256; 3 | use secp256k1::{Message, PublicKey}; 4 | use sha3::{Digest, Keccak256}; 5 | 6 | pub fn keccak256>(data: T) -> H256 { 7 | H256::from_slice(Keccak256::digest(data.as_ref()).as_slice()) 8 | } 9 | 10 | pub fn keccak256_message>(data: T) -> Message { 11 | Message::from_slice(Keccak256::digest(data.as_ref()).as_slice()).unwrap() 12 | } 13 | 14 | pub fn pk2id(pk: &PublicKey) -> NodeId { 15 | NodeId::from_slice(&pk.serialize_uncompressed()[1..]) 16 | } 17 | -------------------------------------------------------------------------------- /src/ecies.rs: -------------------------------------------------------------------------------- 1 | //! ECIES protocol implementation 2 | 3 | mod algorithm; 4 | mod proto; 5 | 6 | pub use self::proto::{ECIESCodec, ECIESState, ECIESStream, EgressECIESValue, IngressECIESValue}; 7 | -------------------------------------------------------------------------------- /src/ecies/algorithm.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | errors::ECIESError, 3 | mac::{HeaderBytes, MAC}, 4 | util::{hmac_sha256, id2pk, pk2id, sha256}, 5 | PeerId, 6 | }; 7 | use aes::{cipher::StreamCipher, Aes128, Aes256}; 8 | use anyhow::{format_err, Context}; 9 | use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; 10 | use bytes::{BufMut, Bytes, BytesMut}; 11 | use ctr::Ctr64BE; 12 | use digest::{crypto_common::KeyIvInit, Digest}; 13 | use educe::Educe; 14 | use ethereum_types::{H128, H256}; 15 | use fastrlp::{Encodable, Rlp, RlpEncodable, RlpMaxEncodedLen}; 16 | use rand::{thread_rng, Rng}; 17 | use secp256k1::{ 18 | ecdsa::{RecoverableSignature, RecoveryId}, 19 | PublicKey, SecretKey, SECP256K1, 20 | }; 21 | use sha2::Sha256; 22 | use sha3::Keccak256; 23 | use std::{convert::TryFrom, io}; 24 | 25 | const PROTOCOL_VERSION: usize = 4; 26 | 27 | pub(crate) const MAX_BODY_SIZE: usize = 19_573_451; 28 | 29 | fn ecdh_x(public_key: &PublicKey, secret_key: &SecretKey) -> H256 { 30 | H256::from_slice(&secp256k1::ecdh::shared_secret_point(public_key, secret_key)[..32]) 31 | } 32 | 33 | fn kdf(secret: H256, s1: &[u8], dest: &mut [u8]) { 34 | // SEC/ISO/Shoup specify counter size SHOULD be equivalent 35 | // to size of hash output, however, it also notes that 36 | // the 4 bytes is okay. NIST specifies 4 bytes. 37 | let mut ctr = 1_u32; 38 | let mut written = 0_usize; 39 | while written < dest.len() { 40 | let mut hasher = Sha256::default(); 41 | let ctrs = [ 42 | (ctr >> 24) as u8, 43 | (ctr >> 16) as u8, 44 | (ctr >> 8) as u8, 45 | ctr as u8, 46 | ]; 47 | hasher.update(&ctrs); 48 | hasher.update(secret.as_bytes()); 49 | hasher.update(s1); 50 | let d = hasher.finalize(); 51 | dest[written..(written + 32)].copy_from_slice(&d); 52 | written += 32; 53 | ctr += 1; 54 | } 55 | } 56 | 57 | #[derive(Educe)] 58 | #[educe(Debug)] 59 | pub struct ECIES { 60 | #[educe(Debug(ignore))] 61 | secret_key: SecretKey, 62 | public_key: PublicKey, 63 | remote_public_key: Option, 64 | 65 | pub(crate) remote_id: Option, 66 | 67 | #[educe(Debug(ignore))] 68 | ephemeral_secret_key: SecretKey, 69 | ephemeral_public_key: PublicKey, 70 | ephemeral_shared_secret: Option, 71 | remote_ephemeral_public_key: Option, 72 | 73 | nonce: H256, 74 | remote_nonce: Option, 75 | 76 | #[educe(Debug(ignore))] 77 | ingress_aes: Option>, 78 | #[educe(Debug(ignore))] 79 | egress_aes: Option>, 80 | ingress_mac: Option, 81 | egress_mac: Option, 82 | 83 | init_msg: Option, 84 | remote_init_msg: Option, 85 | 86 | body_size: Option, 87 | } 88 | 89 | fn split_at_mut(arr: &mut [T], mid: usize) -> Result<(&mut [T], &mut [T]), ECIESError> { 90 | if mid > arr.len() { 91 | return Err(ECIESError::Other(format_err!( 92 | "too short: {mid} > {}", 93 | arr.len() 94 | ))); 95 | } 96 | Ok(arr.split_at_mut(mid)) 97 | } 98 | 99 | impl ECIES { 100 | fn new_static_client( 101 | secret_key: SecretKey, 102 | remote_id: PeerId, 103 | nonce: H256, 104 | ephemeral_secret_key: SecretKey, 105 | ) -> Result { 106 | let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key); 107 | let remote_public_key = id2pk(remote_id)?; 108 | let ephemeral_public_key = PublicKey::from_secret_key(SECP256K1, &ephemeral_secret_key); 109 | 110 | Ok(Self { 111 | secret_key, 112 | public_key, 113 | ephemeral_secret_key, 114 | ephemeral_public_key, 115 | nonce, 116 | 117 | remote_public_key: Some(remote_public_key), 118 | remote_ephemeral_public_key: None, 119 | remote_nonce: None, 120 | ephemeral_shared_secret: None, 121 | init_msg: None, 122 | remote_init_msg: None, 123 | 124 | remote_id: Some(remote_id), 125 | 126 | body_size: None, 127 | egress_aes: None, 128 | ingress_aes: None, 129 | egress_mac: None, 130 | ingress_mac: None, 131 | }) 132 | } 133 | 134 | pub fn new_client(secret_key: SecretKey, remote_id: PeerId) -> Result { 135 | let nonce = H256::random(); 136 | let ephemeral_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 137 | 138 | Self::new_static_client(secret_key, remote_id, nonce, ephemeral_secret_key) 139 | } 140 | 141 | pub fn new_static_server( 142 | secret_key: SecretKey, 143 | nonce: H256, 144 | ephemeral_secret_key: SecretKey, 145 | ) -> Result { 146 | let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key); 147 | let ephemeral_public_key = PublicKey::from_secret_key(SECP256K1, &ephemeral_secret_key); 148 | 149 | Ok(Self { 150 | secret_key, 151 | public_key, 152 | ephemeral_secret_key, 153 | ephemeral_public_key, 154 | nonce, 155 | 156 | remote_public_key: None, 157 | remote_ephemeral_public_key: None, 158 | remote_nonce: None, 159 | ephemeral_shared_secret: None, 160 | init_msg: None, 161 | remote_init_msg: None, 162 | 163 | remote_id: None, 164 | 165 | body_size: None, 166 | egress_aes: None, 167 | ingress_aes: None, 168 | egress_mac: None, 169 | ingress_mac: None, 170 | }) 171 | } 172 | 173 | pub fn new_server(secret_key: SecretKey) -> Result { 174 | let nonce = H256::random(); 175 | let ephemeral_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 176 | 177 | Self::new_static_server(secret_key, nonce, ephemeral_secret_key) 178 | } 179 | 180 | pub fn remote_id(&self) -> PeerId { 181 | self.remote_id.unwrap() 182 | } 183 | 184 | fn encrypt_message(&self, data: &[u8], out: &mut BytesMut) { 185 | out.reserve(secp256k1::constants::UNCOMPRESSED_PUBLIC_KEY_SIZE + 16 + data.len() + 32); 186 | 187 | let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 188 | out.extend_from_slice( 189 | &PublicKey::from_secret_key(SECP256K1, &secret_key).serialize_uncompressed(), 190 | ); 191 | 192 | let x = ecdh_x(&self.remote_public_key.unwrap(), &secret_key); 193 | let mut key = [0_u8; 32]; 194 | kdf(x, &[], &mut key); 195 | 196 | let enc_key = H128::from_slice(&key[0..16]); 197 | let mac_key = sha256(&key[16..32]); 198 | 199 | let iv = H128::random(); 200 | let mut encryptor = Ctr64BE::::new(enc_key.as_ref().into(), iv.as_ref().into()); 201 | 202 | let mut encrypted = data.to_vec(); 203 | encryptor.apply_keystream(&mut encrypted); 204 | 205 | let total_size: u16 = u16::try_from(65 + 16 + data.len() + 32).unwrap(); 206 | 207 | let tag = hmac_sha256( 208 | mac_key.as_ref(), 209 | &[iv.as_bytes(), &encrypted], 210 | &total_size.to_be_bytes(), 211 | ); 212 | 213 | out.extend_from_slice(iv.as_bytes()); 214 | out.extend_from_slice(&encrypted); 215 | out.extend_from_slice(tag.as_ref()); 216 | } 217 | 218 | fn decrypt_message<'a>(&self, data: &'a mut [u8]) -> Result<&'a mut [u8], ECIESError> { 219 | let (auth_data, encrypted) = split_at_mut(data, 2)?; 220 | let (pubkey_bytes, encrypted) = split_at_mut(encrypted, 65)?; 221 | let public_key = PublicKey::from_slice(pubkey_bytes) 222 | .with_context(|| format!("bad public key {}", hex::encode(pubkey_bytes)))?; 223 | let (data_iv, tag_bytes) = split_at_mut(encrypted, encrypted.len() - 32)?; 224 | let (iv, encrypted_data) = split_at_mut(data_iv, 16)?; 225 | let tag = H256::from_slice(tag_bytes); 226 | 227 | let x = ecdh_x(&public_key, &self.secret_key); 228 | let mut key = [0_u8; 32]; 229 | kdf(x, &[], &mut key); 230 | let enc_key = H128::from_slice(&key[0..16]); 231 | let mac_key = sha256(&key[16..32]); 232 | 233 | let check_tag = hmac_sha256(mac_key.as_ref(), &[iv, encrypted_data], auth_data); 234 | if check_tag != tag { 235 | return Err(ECIESError::TagCheckFailed); 236 | } 237 | 238 | let decrypted_data = encrypted_data; 239 | 240 | let mut decryptor = Ctr64BE::::new(enc_key.as_ref().into(), (*iv).into()); 241 | decryptor.apply_keystream(decrypted_data); 242 | 243 | Ok(decrypted_data) 244 | } 245 | 246 | fn create_auth_unencrypted(&self) -> BytesMut { 247 | let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key); 248 | let msg = x ^ self.nonce; 249 | let (rec_id, sig) = SECP256K1 250 | .sign_ecdsa_recoverable( 251 | &secp256k1::Message::from_slice(msg.as_bytes()).unwrap(), 252 | &self.ephemeral_secret_key, 253 | ) 254 | .serialize_compact(); 255 | 256 | let mut sig_bytes = [0_u8; 65]; 257 | sig_bytes[..64].copy_from_slice(&sig); 258 | sig_bytes[64] = rec_id.to_i32() as u8; 259 | 260 | let id = pk2id(&self.public_key); 261 | 262 | #[derive(RlpEncodable)] 263 | struct S<'a> { 264 | sig_bytes: &'a [u8; 65], 265 | id: &'a PeerId, 266 | nonce: &'a H256, 267 | protocol_version: u8, 268 | } 269 | 270 | let mut out = BytesMut::new(); 271 | S { 272 | sig_bytes: &sig_bytes, 273 | id: &id, 274 | nonce: &self.nonce, 275 | protocol_version: PROTOCOL_VERSION as u8, 276 | } 277 | .encode(&mut out); 278 | 279 | out.resize(out.len() + thread_rng().gen_range(100..=300), 0); 280 | out 281 | } 282 | 283 | #[cfg(test)] 284 | fn create_auth(&mut self) -> BytesMut { 285 | let mut buf = BytesMut::new(); 286 | self.write_auth(&mut buf); 287 | buf 288 | } 289 | 290 | pub fn write_auth(&mut self, buf: &mut BytesMut) { 291 | let unencrypted = self.create_auth_unencrypted(); 292 | 293 | let mut out = buf.split_off(buf.len()); 294 | out.put_u16(0); 295 | 296 | let mut encrypted = out.split_off(out.len()); 297 | self.encrypt_message(&unencrypted, &mut encrypted); 298 | 299 | let len_bytes = u16::try_from(encrypted.len()).unwrap().to_be_bytes(); 300 | out[..len_bytes.len()].copy_from_slice(&len_bytes); 301 | 302 | out.unsplit(encrypted); 303 | 304 | self.init_msg = Some(Bytes::copy_from_slice(&out)); 305 | 306 | buf.unsplit(out); 307 | } 308 | 309 | fn parse_auth_unencrypted(&mut self, data: &[u8]) -> Result<(), ECIESError> { 310 | let mut data = Rlp::new(data)?; 311 | 312 | let sigdata = data 313 | .get_next::<[u8; 65]>()? 314 | .ok_or(ECIESError::InvalidAuthData)?; 315 | let signature = RecoverableSignature::from_compact( 316 | &sigdata[0..64], 317 | RecoveryId::from_i32(sigdata[64] as i32)?, 318 | )?; 319 | let remote_id = data.get_next()?.ok_or(ECIESError::InvalidAuthData)?; 320 | self.remote_id = Some(remote_id); 321 | self.remote_public_key = Some(id2pk(remote_id).context("failed to parse peer id")?); 322 | self.remote_nonce = Some(data.get_next()?.ok_or(ECIESError::InvalidAuthData)?); 323 | 324 | let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key); 325 | self.remote_ephemeral_public_key = Some(SECP256K1.recover_ecdsa( 326 | &secp256k1::Message::from_slice((x ^ self.remote_nonce.unwrap()).as_ref()).unwrap(), 327 | &signature, 328 | )?); 329 | self.ephemeral_shared_secret = Some(ecdh_x( 330 | &self.remote_ephemeral_public_key.unwrap(), 331 | &self.ephemeral_secret_key, 332 | )); 333 | 334 | Ok(()) 335 | } 336 | 337 | pub fn read_auth(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { 338 | self.remote_init_msg = Some(Bytes::copy_from_slice(data)); 339 | let unencrypted = self.decrypt_message(data)?; 340 | self.parse_auth_unencrypted(unencrypted) 341 | } 342 | 343 | fn create_ack_unencrypted(&self) -> impl AsRef<[u8]> { 344 | #[derive(RlpEncodable, RlpMaxEncodedLen)] 345 | struct S { 346 | id: PeerId, 347 | nonce: H256, 348 | protocol_version: u8, 349 | } 350 | 351 | fastrlp::encode_fixed_size(&S { 352 | id: pk2id(&self.ephemeral_public_key), 353 | nonce: self.nonce, 354 | protocol_version: PROTOCOL_VERSION as u8, 355 | }) 356 | } 357 | 358 | #[cfg(test)] 359 | pub fn create_ack(&mut self) -> BytesMut { 360 | let mut buf = BytesMut::new(); 361 | self.write_ack(&mut buf); 362 | buf 363 | } 364 | 365 | pub fn write_ack(&mut self, out: &mut BytesMut) { 366 | let unencrypted = self.create_ack_unencrypted(); 367 | 368 | let mut buf = out.split_off(out.len()); 369 | 370 | // reserve space for length 371 | buf.put_u16(0); 372 | 373 | // encrypt and append 374 | let mut encrypted = buf.split_off(buf.len()); 375 | self.encrypt_message(unencrypted.as_ref(), &mut encrypted); 376 | let len_bytes = u16::try_from(encrypted.len()).unwrap().to_be_bytes(); 377 | buf.unsplit(encrypted); 378 | 379 | // write length 380 | buf[..len_bytes.len()].copy_from_slice(&len_bytes[..]); 381 | 382 | self.init_msg = Some(buf.clone().freeze()); 383 | out.unsplit(buf); 384 | 385 | self.setup_frame(true); 386 | } 387 | 388 | fn parse_ack_unencrypted(&mut self, data: &[u8]) -> Result<(), ECIESError> { 389 | let mut data = Rlp::new(data)?; 390 | self.remote_ephemeral_public_key = 391 | Some(id2pk(data.get_next()?.ok_or(ECIESError::InvalidAckData)?)?); 392 | self.remote_nonce = Some(data.get_next()?.ok_or(ECIESError::InvalidAckData)?); 393 | 394 | self.ephemeral_shared_secret = Some(ecdh_x( 395 | &self.remote_ephemeral_public_key.unwrap(), 396 | &self.ephemeral_secret_key, 397 | )); 398 | Ok(()) 399 | } 400 | 401 | pub fn read_ack(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { 402 | self.remote_init_msg = Some(Bytes::copy_from_slice(data)); 403 | let unencrypted = self.decrypt_message(data)?; 404 | self.parse_ack_unencrypted(unencrypted)?; 405 | self.setup_frame(false); 406 | Ok(()) 407 | } 408 | 409 | fn setup_frame(&mut self, incoming: bool) { 410 | let mut hasher = Keccak256::new(); 411 | for el in &if incoming { 412 | [self.nonce, self.remote_nonce.unwrap()] 413 | } else { 414 | [self.remote_nonce.unwrap(), self.nonce] 415 | } { 416 | hasher.update(el); 417 | } 418 | let h_nonce = H256::from(hasher.finalize().as_ref()); 419 | 420 | let iv = H128::default(); 421 | let shared_secret: H256 = { 422 | let mut hasher = Keccak256::new(); 423 | hasher.update(self.ephemeral_shared_secret.unwrap().as_ref()); 424 | hasher.update(h_nonce.as_ref()); 425 | H256::from(hasher.finalize().as_ref()) 426 | }; 427 | 428 | let aes_secret: H256 = { 429 | let mut hasher = Keccak256::new(); 430 | hasher.update(self.ephemeral_shared_secret.unwrap().as_ref()); 431 | hasher.update(shared_secret.as_ref()); 432 | H256::from(hasher.finalize().as_ref()) 433 | }; 434 | self.ingress_aes = Some(Ctr64BE::::new( 435 | aes_secret.as_ref().into(), 436 | iv.as_ref().into(), 437 | )); 438 | self.egress_aes = Some(Ctr64BE::::new( 439 | aes_secret.as_ref().into(), 440 | iv.as_ref().into(), 441 | )); 442 | 443 | let mac_secret: H256 = { 444 | let mut hasher = Keccak256::new(); 445 | hasher.update(self.ephemeral_shared_secret.unwrap().as_ref()); 446 | hasher.update(aes_secret.as_ref()); 447 | H256::from(hasher.finalize().as_ref()) 448 | }; 449 | self.ingress_mac = Some(MAC::new(mac_secret)); 450 | self.ingress_mac 451 | .as_mut() 452 | .unwrap() 453 | .update((mac_secret ^ self.nonce).as_ref()); 454 | self.ingress_mac 455 | .as_mut() 456 | .unwrap() 457 | .update(self.remote_init_msg.as_ref().unwrap()); 458 | self.egress_mac = Some(MAC::new(mac_secret)); 459 | self.egress_mac 460 | .as_mut() 461 | .unwrap() 462 | .update((mac_secret ^ self.remote_nonce.unwrap()).as_ref()); 463 | self.egress_mac 464 | .as_mut() 465 | .unwrap() 466 | .update(self.init_msg.as_ref().unwrap()); 467 | } 468 | 469 | #[cfg(test)] 470 | fn create_header(&mut self, size: usize) -> BytesMut { 471 | let mut out = BytesMut::new(); 472 | self.write_header(&mut out, size); 473 | out 474 | } 475 | 476 | pub fn write_header(&mut self, out: &mut BytesMut, size: usize) { 477 | let mut buf = [0; 8]; 478 | BigEndian::write_uint(&mut buf, size as u64, 3); 479 | let mut header = [0_u8; 16]; 480 | header[0..3].copy_from_slice(&buf[0..3]); 481 | header[3..6].copy_from_slice(&[194, 128, 128]); 482 | 483 | let mut header = HeaderBytes::from(header); 484 | self.egress_aes 485 | .as_mut() 486 | .unwrap() 487 | .apply_keystream(&mut header); 488 | self.egress_mac.as_mut().unwrap().update_header(&header); 489 | let tag = self.egress_mac.as_mut().unwrap().digest(); 490 | 491 | out.reserve(ECIES::header_len()); 492 | out.extend_from_slice(&header); 493 | out.extend_from_slice(tag.as_bytes()); 494 | } 495 | 496 | pub fn read_header(&mut self, data: &mut [u8]) -> Result { 497 | let (header_bytes, mac_bytes) = split_at_mut(data, 16)?; 498 | let header = HeaderBytes::from_mut_slice(header_bytes); 499 | let mac = H128::from_slice(&mac_bytes[..16]); 500 | 501 | self.ingress_mac.as_mut().unwrap().update_header(header); 502 | let check_mac = self.ingress_mac.as_mut().unwrap().digest(); 503 | if check_mac != mac { 504 | return Err(ECIESError::TagCheckFailed); 505 | } 506 | 507 | self.ingress_aes.as_mut().unwrap().apply_keystream(header); 508 | if header.as_slice().len() < 3 { 509 | return Err(ECIESError::InvalidHeader); 510 | } 511 | let body_size = usize::try_from(header.as_slice().read_uint::(3)?) 512 | .context("excessive body len")?; 513 | 514 | if body_size > MAX_BODY_SIZE { 515 | return Err(ECIESError::IO(io::Error::new( 516 | io::ErrorKind::InvalidInput, 517 | format!( 518 | "body size ({}) exceeds limit ({} bytes)", 519 | body_size, MAX_BODY_SIZE 520 | ), 521 | ))); 522 | } 523 | 524 | self.body_size = Some(body_size); 525 | 526 | Ok(self.body_size.unwrap()) 527 | } 528 | 529 | pub const fn header_len() -> usize { 530 | 32 531 | } 532 | 533 | pub fn body_len(&self) -> usize { 534 | let len = self.body_size.unwrap(); 535 | (if len % 16 == 0 { 536 | len 537 | } else { 538 | (len / 16 + 1) * 16 539 | }) + 16 540 | } 541 | 542 | #[cfg(test)] 543 | fn create_body(&mut self, data: &[u8]) -> BytesMut { 544 | let mut out = BytesMut::new(); 545 | self.write_body(&mut out, data); 546 | out 547 | } 548 | 549 | pub fn write_body(&mut self, out: &mut BytesMut, data: &[u8]) { 550 | let len = if data.len() % 16 == 0 { 551 | data.len() 552 | } else { 553 | (data.len() / 16 + 1) * 16 554 | }; 555 | let old_len = out.len(); 556 | out.resize(old_len + len, 0); 557 | 558 | let encrypted = &mut out[old_len..old_len + len]; 559 | encrypted[..data.len()].copy_from_slice(data); 560 | 561 | self.egress_aes.as_mut().unwrap().apply_keystream(encrypted); 562 | self.egress_mac.as_mut().unwrap().update_body(encrypted); 563 | let tag = self.egress_mac.as_mut().unwrap().digest(); 564 | 565 | out.extend_from_slice(tag.as_bytes()); 566 | } 567 | 568 | pub fn read_body<'a>(&mut self, data: &'a mut [u8]) -> Result<&'a mut [u8], ECIESError> { 569 | let (body, mac_bytes) = split_at_mut(data, data.len() - 16)?; 570 | let mac = H128::from_slice(mac_bytes); 571 | self.ingress_mac.as_mut().unwrap().update_body(body); 572 | let check_mac = self.ingress_mac.as_mut().unwrap().digest(); 573 | if check_mac != mac { 574 | return Err(ECIESError::TagCheckFailed); 575 | } 576 | 577 | let size = self.body_size.unwrap(); 578 | self.body_size = None; 579 | let ret = body; 580 | self.ingress_aes.as_mut().unwrap().apply_keystream(ret); 581 | Ok(split_at_mut(ret, size)?.0) 582 | } 583 | } 584 | 585 | #[cfg(test)] 586 | mod tests { 587 | use super::*; 588 | use hex_literal::hex; 589 | 590 | #[test] 591 | fn ecdh() { 592 | let our_secret_key = SecretKey::from_slice(&hex!( 593 | "202a36e24c3eb39513335ec99a7619bad0e7dc68d69401b016253c7d26dc92f8" 594 | )) 595 | .unwrap(); 596 | let remote_public_key = id2pk(hex!("d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666").into()).unwrap(); 597 | 598 | assert_eq!( 599 | ecdh_x(&remote_public_key, &our_secret_key), 600 | hex!("821ce7e01ea11b111a52b2dafae8a3031a372d83bdf1a78109fa0783c2b9d5d3").into() 601 | ) 602 | } 603 | 604 | #[test] 605 | fn communicate() { 606 | let server_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 607 | let server_public_key = PublicKey::from_secret_key(SECP256K1, &server_secret_key); 608 | let client_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 609 | 610 | let mut server_ecies = ECIES::new_server(server_secret_key).unwrap(); 611 | let mut client_ecies = 612 | ECIES::new_client(client_secret_key, pk2id(&server_public_key)).unwrap(); 613 | 614 | // Handshake 615 | let mut auth = client_ecies.create_auth(); 616 | server_ecies.read_auth(&mut auth).unwrap(); 617 | let mut ack = server_ecies.create_ack(); 618 | client_ecies.read_ack(&mut ack).unwrap(); 619 | 620 | let server_to_client_data = [0_u8, 1_u8, 2_u8, 3_u8, 4_u8]; 621 | let client_to_server_data = [5_u8, 6_u8, 7_u8]; 622 | 623 | // Test server to client 1 624 | let mut header = server_ecies.create_header(server_to_client_data.len()); 625 | assert_eq!(header.len(), ECIES::header_len()); 626 | client_ecies.read_header(&mut *header).unwrap(); 627 | let mut body = server_ecies.create_body(&server_to_client_data); 628 | assert_eq!(body.len(), client_ecies.body_len()); 629 | let ret = client_ecies.read_body(&mut *body).unwrap(); 630 | assert_eq!(ret, server_to_client_data); 631 | 632 | // Test client to server 1 633 | server_ecies 634 | .read_header(&mut *client_ecies.create_header(client_to_server_data.len())) 635 | .unwrap(); 636 | let mut b = client_ecies.create_body(&client_to_server_data); 637 | let ret = server_ecies.read_body(&mut b).unwrap(); 638 | assert_eq!(ret, client_to_server_data); 639 | 640 | // Test server to client 2 641 | client_ecies 642 | .read_header(&mut *server_ecies.create_header(server_to_client_data.len())) 643 | .unwrap(); 644 | let mut b = server_ecies.create_body(&server_to_client_data); 645 | let ret = client_ecies.read_body(&mut b).unwrap(); 646 | assert_eq!(ret, server_to_client_data); 647 | 648 | // Test server to client 3 649 | client_ecies 650 | .read_header(&mut *server_ecies.create_header(server_to_client_data.len())) 651 | .unwrap(); 652 | let mut b = server_ecies.create_body(&server_to_client_data); 653 | let ret = client_ecies.read_body(&mut b).unwrap(); 654 | assert_eq!(ret, server_to_client_data); 655 | 656 | // Test client to server 2 657 | server_ecies 658 | .read_header(&mut *client_ecies.create_header(client_to_server_data.len())) 659 | .unwrap(); 660 | let mut b = client_ecies.create_body(&client_to_server_data); 661 | let ret = server_ecies.read_body(&mut b).unwrap(); 662 | assert_eq!(ret, client_to_server_data); 663 | 664 | // Test client to server 3 665 | server_ecies 666 | .read_header(&mut *client_ecies.create_header(client_to_server_data.len())) 667 | .unwrap(); 668 | let mut b = client_ecies.create_body(&client_to_server_data); 669 | let ret = server_ecies.read_body(&mut b).unwrap(); 670 | assert_eq!(ret, client_to_server_data); 671 | } 672 | 673 | fn eip8_test_server_key() -> SecretKey { 674 | SecretKey::from_slice(&hex!( 675 | "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" 676 | )) 677 | .unwrap() 678 | } 679 | 680 | fn eip8_test_client() -> ECIES { 681 | let client_static_key = SecretKey::from_slice(&hex!( 682 | "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee" 683 | )) 684 | .unwrap(); 685 | 686 | let client_ephemeral_key = SecretKey::from_slice(&hex!( 687 | "869d6ecf5211f1cc60418a13b9d870b22959d0c16f02bec714c960dd2298a32d" 688 | )) 689 | .unwrap(); 690 | 691 | let client_nonce = H256(hex!( 692 | "7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6" 693 | )); 694 | 695 | let server_id = pk2id(&PublicKey::from_secret_key( 696 | SECP256K1, 697 | &eip8_test_server_key(), 698 | )); 699 | 700 | ECIES::new_static_client( 701 | client_static_key, 702 | server_id, 703 | client_nonce, 704 | client_ephemeral_key, 705 | ) 706 | .unwrap() 707 | } 708 | 709 | fn eip8_test_server() -> ECIES { 710 | let server_ephemeral_key = SecretKey::from_slice(&hex!( 711 | "e238eb8e04fee6511ab04c6dd3c89ce097b11f25d584863ac2b6d5b35b1847e4" 712 | )) 713 | .unwrap(); 714 | 715 | let server_nonce = H256(hex!( 716 | "559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd" 717 | )); 718 | 719 | ECIES::new_static_server(eip8_test_server_key(), server_nonce, server_ephemeral_key) 720 | .unwrap() 721 | } 722 | 723 | #[test] 724 | /// Test vectors from https://eips.ethereum.org/EIPS/eip-8 725 | fn eip8_test() { 726 | // EIP-8 format with version 4 and no additional list elements 727 | let auth2 = hex!( 728 | " 729 | 01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b 730 | 0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b1593f0d84ac74f6e475f1b8d56116b84 731 | 9634a8c458705bf83a626ea0384d4d7341aae591fae42ce6bd5c850bfe0b999a694a49bbbaf3ef6c 732 | da61110601d3b4c02ab6c30437257a6e0117792631a4b47c1d52fc0f8f89caadeb7d02770bf999cc 733 | 147d2df3b62e1ffb2c9d8c125a3984865356266bca11ce7d3a688663a51d82defaa8aad69da39ab6 734 | d5470e81ec5f2a7a47fb865ff7cca21516f9299a07b1bc63ba56c7a1a892112841ca44b6e0034dee 735 | 70c9adabc15d76a54f443593fafdc3b27af8059703f88928e199cb122362a4b35f62386da7caad09 736 | c001edaeb5f8a06d2b26fb6cb93c52a9fca51853b68193916982358fe1e5369e249875bb8d0d0ec3 737 | 6f917bc5e1eafd5896d46bd61ff23f1a863a8a8dcd54c7b109b771c8e61ec9c8908c733c0263440e 738 | 2aa067241aaa433f0bb053c7b31a838504b148f570c0ad62837129e547678c5190341e4f1693956c 739 | 3bf7678318e2d5b5340c9e488eefea198576344afbdf66db5f51204a6961a63ce072c8926c 740 | " 741 | ); 742 | 743 | // EIP-8 format with version 56 and 3 additional list elements (sent from A to B) 744 | let auth3 = hex!( 745 | " 746 | 01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7 747 | 2471f93a91b493f8e00abc4b80f682973de715d77ba3a005a242eb859f9a211d93a347fa64b597bf 748 | 280a6b88e26299cf263b01b8dfdb712278464fd1c25840b995e84d367d743f66c0e54a586725b7bb 749 | f12acca27170ae3283c1073adda4b6d79f27656993aefccf16e0d0409fe07db2dc398a1b7e8ee93b 750 | cd181485fd332f381d6a050fba4c7641a5112ac1b0b61168d20f01b479e19adf7fdbfa0905f63352 751 | bfc7e23cf3357657455119d879c78d3cf8c8c06375f3f7d4861aa02a122467e069acaf513025ff19 752 | 6641f6d2810ce493f51bee9c966b15c5043505350392b57645385a18c78f14669cc4d960446c1757 753 | 1b7c5d725021babbcd786957f3d17089c084907bda22c2b2675b4378b114c601d858802a55345a15 754 | 116bc61da4193996187ed70d16730e9ae6b3bb8787ebcaea1871d850997ddc08b4f4ea668fbf3740 755 | 7ac044b55be0908ecb94d4ed172ece66fd31bfdadf2b97a8bc690163ee11f5b575a4b44e36e2bfb2 756 | f0fce91676fd64c7773bac6a003f481fddd0bae0a1f31aa27504e2a533af4cef3b623f4791b2cca6 757 | d490 758 | " 759 | ); 760 | 761 | // EIP-8 format with version 4 and no additional list elements (sent from B to A) 762 | let ack2 = hex!( 763 | " 764 | 01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470 765 | b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeabbdfd1e837c1ff4cace34311cd7f4de 766 | 05d59279e3524ab26ef753a0095637ac88f2b499b9914b5f64e143eae548a1066e14cd2f4bd7f814 767 | c4652f11b254f8a2d0191e2f5546fae6055694aed14d906df79ad3b407d94692694e259191cde171 768 | ad542fc588fa2b7333313d82a9f887332f1dfc36cea03f831cb9a23fea05b33deb999e85489e645f 769 | 6aab1872475d488d7bd6c7c120caf28dbfc5d6833888155ed69d34dbdc39c1f299be1057810f34fb 770 | e754d021bfca14dc989753d61c413d261934e1a9c67ee060a25eefb54e81a4d14baff922180c395d 771 | 3f998d70f46f6b58306f969627ae364497e73fc27f6d17ae45a413d322cb8814276be6ddd13b885b 772 | 201b943213656cde498fa0e9ddc8e0b8f8a53824fbd82254f3e2c17e8eaea009c38b4aa0a3f306e8 773 | 797db43c25d68e86f262e564086f59a2fc60511c42abfb3057c247a8a8fe4fb3ccbadde17514b7ac 774 | 8000cdb6a912778426260c47f38919a91f25f4b5ffb455d6aaaf150f7e5529c100ce62d6d92826a7 775 | 1778d809bdf60232ae21ce8a437eca8223f45ac37f6487452ce626f549b3b5fdee26afd2072e4bc7 776 | 5833c2464c805246155289f4 777 | " 778 | ); 779 | 780 | // EIP-8 format with version 57 and 3 additional list elements (sent from B to A) 781 | let ack3 = hex!( 782 | " 783 | 01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7 784 | ae835aeac72e1573b8a0225dd56d157a010846d888dac7464baf53f2ad4e3d584531fa203658fab0 785 | 3a06c9fd5e35737e417bc28c1cbf5e5dfc666de7090f69c3b29754725f84f75382891c561040ea1d 786 | dc0d8f381ed1b9d0d4ad2a0ec021421d847820d6fa0ba66eaf58175f1b235e851c7e2124069fbc20 787 | 2888ddb3ac4d56bcbd1b9b7eab59e78f2e2d400905050f4a92dec1c4bdf797b3fc9b2f8e84a482f3 788 | d800386186712dae00d5c386ec9387a5e9c9a1aca5a573ca91082c7d68421f388e79127a5177d4f8 789 | 590237364fd348c9611fa39f78dcdceee3f390f07991b7b47e1daa3ebcb6ccc9607811cb17ce51f1 790 | c8c2c5098dbdd28fca547b3f58c01a424ac05f869f49c6a34672ea2cbbc558428aa1fe48bbfd6115 791 | 8b1b735a65d99f21e70dbc020bfdface9f724a0d1fb5895db971cc81aa7608baa0920abb0a565c9c 792 | 436e2fd13323428296c86385f2384e408a31e104670df0791d93e743a3a5194ee6b076fb6323ca59 793 | 3011b7348c16cf58f66b9633906ba54a2ee803187344b394f75dd2e663a57b956cb830dd7a908d4f 794 | 39a2336a61ef9fda549180d4ccde21514d117b6c6fd07a9102b5efe710a32af4eeacae2cb3b1dec0 795 | 35b9593b48b9d3ca4c13d245d5f04169b0b1 796 | " 797 | ); 798 | 799 | eip8_test_server().read_auth(&mut auth2.to_vec()).unwrap(); 800 | eip8_test_server().read_auth(&mut auth3.to_vec()).unwrap(); 801 | 802 | let mut test_client = eip8_test_client(); 803 | let mut test_server = eip8_test_server(); 804 | 805 | test_server 806 | .read_auth(&mut test_client.create_auth()) 807 | .unwrap(); 808 | 809 | test_client.read_ack(&mut test_server.create_ack()).unwrap(); 810 | 811 | test_client.read_ack(&mut ack2.to_vec()).unwrap(); 812 | test_client.read_ack(&mut ack3.to_vec()).unwrap(); 813 | } 814 | } 815 | -------------------------------------------------------------------------------- /src/ecies/proto.rs: -------------------------------------------------------------------------------- 1 | use super::algorithm::{ECIES, MAX_BODY_SIZE}; 2 | use crate::{errors::ECIESError, transport::Transport, types::PeerId}; 3 | use anyhow::{bail, Context as _}; 4 | use bytes::{Bytes, BytesMut}; 5 | use futures::{ready, Sink, SinkExt}; 6 | use secp256k1::SecretKey; 7 | use std::{ 8 | fmt::Debug, 9 | io, 10 | pin::Pin, 11 | task::{Context, Poll}, 12 | }; 13 | use tokio_stream::{Stream, StreamExt}; 14 | use tokio_util::codec::{Decoder, Encoder, Framed}; 15 | use tracing::{debug, instrument, trace}; 16 | 17 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 18 | /// Current ECIES state of a connection 19 | pub enum ECIESState { 20 | Auth, 21 | Ack, 22 | Header, 23 | Body, 24 | } 25 | 26 | #[derive(Clone, Debug, PartialEq, Eq)] 27 | /// Raw egress values for an ECIES protocol 28 | pub enum EgressECIESValue { 29 | Auth, 30 | Ack, 31 | Message(Bytes), 32 | } 33 | 34 | #[derive(Clone, Debug, PartialEq, Eq)] 35 | /// Raw ingress values for an ECIES protocol 36 | pub enum IngressECIESValue { 37 | AuthReceive(PeerId), 38 | Ack, 39 | Message(Bytes), 40 | } 41 | 42 | /// Tokio codec for ECIES 43 | #[derive(Debug)] 44 | pub struct ECIESCodec { 45 | ecies: ECIES, 46 | state: ECIESState, 47 | } 48 | 49 | impl ECIESCodec { 50 | /// Create a new server codec using the given secret key 51 | pub fn new_server(secret_key: SecretKey) -> Result { 52 | Ok(Self { 53 | ecies: ECIES::new_server(secret_key)?, 54 | state: ECIESState::Auth, 55 | }) 56 | } 57 | 58 | /// Create a new client codec using the given secret key and the server's public id 59 | pub fn new_client(secret_key: SecretKey, remote_id: PeerId) -> Result { 60 | Ok(Self { 61 | ecies: ECIES::new_client(secret_key, remote_id)?, 62 | state: ECIESState::Auth, 63 | }) 64 | } 65 | } 66 | 67 | impl Decoder for ECIESCodec { 68 | type Item = IngressECIESValue; 69 | type Error = io::Error; 70 | 71 | #[instrument(level = "trace", skip_all, fields(peer=&*format!("{:?}", self.ecies.remote_id.map(|s| s.to_string())), state=&*format!("{:?}", self.state)))] 72 | fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { 73 | loop { 74 | match self.state { 75 | ECIESState::Auth => { 76 | trace!("parsing auth"); 77 | if buf.len() < 2 { 78 | return Ok(None); 79 | } 80 | 81 | let payload_size = u16::from_be_bytes([buf[0], buf[1]]) as usize; 82 | let total_size = payload_size + 2; 83 | 84 | if buf.len() < total_size { 85 | trace!("current len {}, need {}", buf.len(), total_size); 86 | return Ok(None); 87 | } 88 | 89 | self.ecies.read_auth(&mut *buf.split_to(total_size))?; 90 | 91 | self.state = ECIESState::Header; 92 | return Ok(Some(IngressECIESValue::AuthReceive(self.ecies.remote_id()))); 93 | } 94 | ECIESState::Ack => { 95 | trace!("parsing ack with len {}", buf.len()); 96 | if buf.len() < 2 { 97 | return Ok(None); 98 | } 99 | 100 | let payload_size = u16::from_be_bytes([buf[0], buf[1]]) as usize; 101 | let total_size = payload_size + 2; 102 | 103 | if buf.len() < total_size { 104 | trace!("current len {}, need {}", buf.len(), total_size); 105 | return Ok(None); 106 | } 107 | 108 | self.ecies.read_ack(&mut *buf.split_to(total_size))?; 109 | 110 | self.state = ECIESState::Header; 111 | return Ok(Some(IngressECIESValue::Ack)); 112 | } 113 | ECIESState::Header => { 114 | if buf.len() < ECIES::header_len() { 115 | trace!("current len {}, need {}", buf.len(), ECIES::header_len()); 116 | return Ok(None); 117 | } 118 | 119 | self.ecies 120 | .read_header(&mut *buf.split_to(ECIES::header_len()))?; 121 | 122 | self.state = ECIESState::Body; 123 | } 124 | ECIESState::Body => { 125 | if buf.len() < self.ecies.body_len() { 126 | return Ok(None); 127 | } 128 | 129 | let mut data = buf.split_to(self.ecies.body_len()); 130 | let ret = Bytes::copy_from_slice(self.ecies.read_body(&mut *data)?); 131 | 132 | self.state = ECIESState::Header; 133 | return Ok(Some(IngressECIESValue::Message(ret))); 134 | } 135 | } 136 | } 137 | } 138 | } 139 | 140 | impl Encoder for ECIESCodec { 141 | type Error = io::Error; 142 | 143 | #[instrument(level = "trace", skip(self, buf), fields(peer=&*format!("{:?}", self.ecies.remote_id.map(|s| s.to_string())), state=&*format!("{:?}", self.state)))] 144 | fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { 145 | match item { 146 | EgressECIESValue::Auth => { 147 | self.state = ECIESState::Ack; 148 | self.ecies.write_auth(buf); 149 | Ok(()) 150 | } 151 | EgressECIESValue::Ack => { 152 | self.state = ECIESState::Header; 153 | self.ecies.write_ack(buf); 154 | Ok(()) 155 | } 156 | EgressECIESValue::Message(data) => { 157 | if data.len() > MAX_BODY_SIZE { 158 | return Err(io::Error::new( 159 | io::ErrorKind::InvalidInput, 160 | format!( 161 | "body size ({}) exceeds limit ({} bytes)", 162 | data.len(), 163 | MAX_BODY_SIZE 164 | ), 165 | )); 166 | } 167 | 168 | self.ecies.write_header(buf, data.len()); 169 | self.ecies.write_body(buf, &data); 170 | Ok(()) 171 | } 172 | } 173 | } 174 | } 175 | 176 | /// `ECIES` stream over TCP exchanging raw bytes 177 | #[derive(Debug)] 178 | pub struct ECIESStream { 179 | stream: Framed, 180 | remote_id: PeerId, 181 | } 182 | 183 | impl ECIESStream 184 | where 185 | Io: Transport, 186 | { 187 | /// Connect to an `ECIES` server 188 | #[instrument(skip(transport, secret_key), fields(peer=&*format!("{:?}", transport.remote_addr())))] 189 | pub async fn connect( 190 | transport: Io, 191 | secret_key: SecretKey, 192 | remote_id: PeerId, 193 | ) -> anyhow::Result { 194 | let ecies = ECIESCodec::new_client(secret_key, remote_id) 195 | .map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid handshake"))?; 196 | 197 | let mut transport = ecies.framed(transport); 198 | 199 | trace!("sending ecies auth ..."); 200 | transport.send(EgressECIESValue::Auth).await?; 201 | 202 | trace!("waiting for ecies ack ..."); 203 | let ack = transport.try_next().await?; 204 | 205 | trace!("parsing ecies ack ..."); 206 | if matches!(ack, Some(IngressECIESValue::Ack)) { 207 | Ok(Self { 208 | stream: transport, 209 | remote_id, 210 | }) 211 | } else { 212 | bail!("invalid handshake: expected ack, got {:?} instead", ack) 213 | } 214 | } 215 | 216 | /// Listen on a just connected ECIES client 217 | #[instrument(skip_all, fields(peer=&*format!("{:?}", transport.remote_addr())))] 218 | pub async fn incoming(transport: Io, secret_key: SecretKey) -> anyhow::Result { 219 | let ecies = ECIESCodec::new_server(secret_key).context("handshake error")?; 220 | 221 | debug!("incoming ecies stream ..."); 222 | let mut transport = ecies.framed(transport); 223 | let ack = transport.try_next().await?; 224 | 225 | debug!("receiving ecies auth"); 226 | let remote_id = match ack { 227 | Some(IngressECIESValue::AuthReceive(remote_id)) => remote_id, 228 | other => { 229 | debug!("expected auth, got {:?} instead", other); 230 | bail!("invalid handshake"); 231 | } 232 | }; 233 | 234 | debug!("sending ecies ack ..."); 235 | transport 236 | .send(EgressECIESValue::Ack) 237 | .await 238 | .context("failed to send ECIES auth")?; 239 | 240 | Ok(Self { 241 | stream: transport, 242 | remote_id, 243 | }) 244 | } 245 | 246 | /// Get the remote id 247 | pub fn remote_id(&self) -> PeerId { 248 | self.remote_id 249 | } 250 | } 251 | 252 | impl Stream for ECIESStream 253 | where 254 | Io: Transport, 255 | { 256 | type Item = Result; 257 | 258 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 259 | match ready!(Pin::new(&mut self.get_mut().stream).poll_next(cx)) { 260 | Some(Ok(IngressECIESValue::Message(body))) => Poll::Ready(Some(Ok(body))), 261 | Some(other) => Poll::Ready(Some(Err(io::Error::new( 262 | io::ErrorKind::Other, 263 | format!( 264 | "ECIES stream protocol error: expected message, received {:?}", 265 | other 266 | ), 267 | )))), 268 | None => Poll::Ready(None), 269 | } 270 | } 271 | } 272 | 273 | impl Sink for ECIESStream 274 | where 275 | Io: Transport, 276 | { 277 | type Error = io::Error; 278 | 279 | fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 280 | Pin::new(&mut self.get_mut().stream).poll_ready(cx) 281 | } 282 | 283 | fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { 284 | let this = self.get_mut(); 285 | Pin::new(&mut this.stream).start_send(EgressECIESValue::Message(item))?; 286 | 287 | Ok(()) 288 | } 289 | 290 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 291 | Pin::new(&mut self.get_mut().stream).poll_flush(cx) 292 | } 293 | 294 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 295 | Pin::new(&mut self.get_mut().stream).poll_close(cx) 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use thiserror::Error; 3 | 4 | #[derive(Debug, Error)] 5 | pub enum ECIESError { 6 | #[error("IO error")] 7 | IO(#[from] io::Error), 8 | #[error("tag check failure")] 9 | TagCheckFailed, 10 | #[error("invalid auth data")] 11 | InvalidAuthData, 12 | #[error("invalid ack data")] 13 | InvalidAckData, 14 | #[error("invalid body data")] 15 | InvalidHeader, 16 | #[error("other")] 17 | Other(#[from] anyhow::Error), 18 | } 19 | 20 | impl From for io::Error { 21 | fn from(error: ECIESError) -> Self { 22 | Self::new(io::ErrorKind::Other, format!("ECIES error: {:?}", error)) 23 | } 24 | } 25 | 26 | impl From for ECIESError { 27 | fn from(error: secp256k1::Error) -> Self { 28 | Self::Other(error.into()) 29 | } 30 | } 31 | 32 | impl From for ECIESError { 33 | fn from(error: fastrlp::DecodeError) -> Self { 34 | Self::Other(error.into()) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Ethereum devp2p protocol implementation 2 | //! 3 | //! It is layered in the following way: 4 | //! * `RLPxNode` which represents the whole pool of connected peers. It handles message routing and peer management. 5 | //! * `MuxServer` which provides a request-response API to otherwise stateless P2P protocol. 6 | //! * `EthIngressServer` which `MuxServer` calls into when new requests and gossip messages arrive. 7 | //! * `MuxServer` itself implements `EthProtocol` which is a simple gateway to abstract Ethereum network. 8 | 9 | #![allow(clippy::large_enum_variant, clippy::upper_case_acronyms)] 10 | 11 | pub mod disc; 12 | pub mod ecies; 13 | pub mod errors; 14 | pub mod mac; 15 | pub mod node_filter; 16 | pub mod peer; 17 | pub mod rlpx; 18 | pub mod transport; 19 | pub mod types; 20 | pub mod util; 21 | 22 | pub use disc::*; 23 | pub use peer::{DisconnectReason, PeerStream}; 24 | pub use rlpx::{ListenOptions, Swarm, SwarmBuilder}; 25 | pub use types::{ 26 | CapabilityId, CapabilityInfo, CapabilityName, CapabilityServer, CapabilityVersion, 27 | InboundEvent, Message, NodeRecord, OutboundEvent, PeerId, 28 | }; 29 | -------------------------------------------------------------------------------- /src/mac.rs: -------------------------------------------------------------------------------- 1 | use aes::Aes256Enc; 2 | use block_padding::NoPadding; 3 | use cipher::BlockEncrypt; 4 | use digest::KeyInit; 5 | use ethereum_types::{H128, H256}; 6 | use generic_array::GenericArray; 7 | use sha3::{Digest, Keccak256}; 8 | use typenum::U16; 9 | 10 | pub type HeaderBytes = GenericArray; 11 | 12 | #[derive(Debug)] 13 | pub struct MAC { 14 | secret: H256, 15 | hasher: Keccak256, 16 | } 17 | 18 | impl MAC { 19 | pub fn new(secret: H256) -> Self { 20 | Self { 21 | secret, 22 | hasher: Keccak256::new(), 23 | } 24 | } 25 | 26 | pub fn update(&mut self, data: &[u8]) { 27 | self.hasher.update(data) 28 | } 29 | 30 | pub fn update_header(&mut self, data: &HeaderBytes) { 31 | let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap(); 32 | let mut encrypted = self.digest().to_fixed_bytes(); 33 | aes.encrypt_padded::(&mut encrypted, H128::len_bytes()) 34 | .unwrap(); 35 | for i in 0..data.len() { 36 | encrypted[i] ^= data[i]; 37 | } 38 | self.hasher.update(encrypted); 39 | } 40 | 41 | pub fn update_body(&mut self, data: &[u8]) { 42 | self.hasher.update(data); 43 | let prev = self.digest(); 44 | let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap(); 45 | let mut encrypted = self.digest().to_fixed_bytes(); 46 | aes.encrypt_padded::(&mut encrypted, H128::len_bytes()) 47 | .unwrap(); 48 | for i in 0..16 { 49 | encrypted[i] ^= prev[i]; 50 | } 51 | self.hasher.update(encrypted); 52 | } 53 | 54 | pub fn digest(&self) -> H128 { 55 | H128::from_slice(&self.hasher.clone().finalize()[0..16]) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/node_filter.rs: -------------------------------------------------------------------------------- 1 | use super::types::PeerId; 2 | use std::{ 3 | collections::HashSet, 4 | fmt::Debug, 5 | sync::{ 6 | atomic::{AtomicUsize, Ordering}, 7 | Arc, 8 | }, 9 | }; 10 | 11 | pub trait NodeFilter: Debug + Send + 'static { 12 | fn max_peers(&self) -> usize; 13 | fn is_banned(&self, id: PeerId) -> bool; 14 | fn is_allowed(&self, pool_size: usize, id: PeerId) -> bool { 15 | pool_size < self.max_peers() && !self.is_banned(id) 16 | } 17 | fn ban(&mut self, id: PeerId); 18 | } 19 | 20 | #[derive(Debug)] 21 | pub struct MemoryNodeFilter { 22 | peer_limiter: Arc, 23 | ban_list: HashSet, 24 | } 25 | 26 | impl MemoryNodeFilter { 27 | pub fn new(peer_limiter: Arc) -> Self { 28 | Self { 29 | peer_limiter, 30 | ban_list: Default::default(), 31 | } 32 | } 33 | } 34 | 35 | impl NodeFilter for MemoryNodeFilter { 36 | fn max_peers(&self) -> usize { 37 | self.peer_limiter.load(Ordering::Relaxed) 38 | } 39 | 40 | fn is_banned(&self, id: PeerId) -> bool { 41 | self.ban_list.contains(&id) 42 | } 43 | 44 | fn ban(&mut self, id: PeerId) { 45 | self.ban_list.insert(id); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/peer.rs: -------------------------------------------------------------------------------- 1 | use super::{ecies::ECIESStream, transport::Transport, types::*, util::pk2id}; 2 | use anyhow::{anyhow, bail, Context as _}; 3 | use bytes::{Bytes, BytesMut}; 4 | use derive_more::Display; 5 | use enum_primitive_derive::Primitive; 6 | use fastrlp::*; 7 | use futures::{ready, Sink, SinkExt}; 8 | use num_traits::*; 9 | use secp256k1::{PublicKey, SecretKey, SECP256K1}; 10 | use std::{ 11 | fmt::Debug, 12 | io, 13 | pin::Pin, 14 | task::{Context, Poll}, 15 | }; 16 | use tokio_stream::{Stream, StreamExt}; 17 | use tracing::*; 18 | 19 | const MAX_PAYLOAD_SIZE: usize = 16 * 1024 * 1024; 20 | 21 | /// RLPx disconnect reason. 22 | #[derive(Clone, Copy, Debug, Display, Primitive)] 23 | pub enum DisconnectReason { 24 | #[display(fmt = "disconnect requested")] 25 | DisconnectRequested = 0x00, 26 | #[display(fmt = "TCP sub-system error")] 27 | TcpSubsystemError = 0x01, 28 | #[display(fmt = "breach of protocol, e.g. a malformed message, bad RLP, ...")] 29 | ProtocolBreach = 0x02, 30 | #[display(fmt = "useless peer")] 31 | UselessPeer = 0x03, 32 | #[display(fmt = "too many peers")] 33 | TooManyPeers = 0x04, 34 | #[display(fmt = "already connected")] 35 | AlreadyConnected = 0x05, 36 | #[display(fmt = "incompatible P2P protocol version")] 37 | IncompatibleP2PProtocolVersion = 0x06, 38 | #[display(fmt = "null node identity received - this is automatically invalid")] 39 | NullNodeIdentity = 0x07, 40 | #[display(fmt = "client quitting")] 41 | ClientQuitting = 0x08, 42 | #[display(fmt = "unexpected identity in handshake")] 43 | UnexpectedHandshakeIdentity = 0x09, 44 | #[display(fmt = "identity is the same as this node (i.e. connected to itself)")] 45 | ConnectedToSelf = 0x0a, 46 | #[display(fmt = "ping timeout")] 47 | PingTimeout = 0x0b, 48 | #[display(fmt = "some other reason specific to a subprotocol")] 49 | SubprotocolSpecific = 0x10, 50 | } 51 | 52 | /// RLPx protocol version. 53 | #[derive(Copy, Clone, Debug, Primitive)] 54 | pub enum ProtocolVersion { 55 | V5 = 5, 56 | } 57 | 58 | #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)] 59 | pub struct CapabilityMessage { 60 | pub name: CapabilityName, 61 | pub version: usize, 62 | } 63 | 64 | #[derive(Clone, Debug, RlpEncodable, RlpDecodable)] 65 | pub struct HelloMessage { 66 | pub protocol_version: usize, 67 | pub client_version: String, 68 | pub capabilities: Vec, 69 | pub port: u16, 70 | pub id: PeerId, 71 | } 72 | 73 | #[derive(Debug)] 74 | struct Snappy { 75 | encoder: snap::raw::Encoder, 76 | decoder: snap::raw::Decoder, 77 | } 78 | 79 | impl Default for Snappy { 80 | fn default() -> Self { 81 | Self { 82 | encoder: snap::raw::Encoder::new(), 83 | decoder: snap::raw::Decoder::new(), 84 | } 85 | } 86 | } 87 | 88 | /// RLPx transport peer stream 89 | #[allow(unused)] 90 | #[derive(Debug)] 91 | pub struct PeerStream { 92 | stream: ECIESStream, 93 | client_version: String, 94 | shared_capabilities: Vec, 95 | port: u16, 96 | id: PeerId, 97 | remote_id: PeerId, 98 | 99 | snappy: Snappy, 100 | 101 | disconnected: bool, 102 | } 103 | 104 | impl PeerStream 105 | where 106 | Io: Transport, 107 | { 108 | /// Remote public id of this peer 109 | pub fn remote_id(&self) -> PeerId { 110 | self.remote_id 111 | } 112 | 113 | /// Get all capabilities of this peer stream 114 | pub fn capabilities(&self) -> &[CapabilityInfo] { 115 | &self.shared_capabilities 116 | } 117 | 118 | /// Connect to a peer over TCP 119 | pub async fn connect( 120 | transport: Io, 121 | secret_key: SecretKey, 122 | remote_id: PeerId, 123 | client_version: String, 124 | capabilities: Vec, 125 | port: u16, 126 | ) -> anyhow::Result { 127 | Self::new( 128 | ECIESStream::connect(transport, secret_key, remote_id).await?, 129 | secret_key, 130 | client_version, 131 | capabilities, 132 | port, 133 | ) 134 | .await 135 | } 136 | 137 | /// Incoming peer stream over TCP 138 | pub async fn incoming( 139 | transport: Io, 140 | secret_key: SecretKey, 141 | client_version: String, 142 | capabilities: Vec, 143 | port: u16, 144 | ) -> anyhow::Result { 145 | Self::new( 146 | ECIESStream::incoming(transport, secret_key).await?, 147 | secret_key, 148 | client_version, 149 | capabilities, 150 | port, 151 | ) 152 | .await 153 | } 154 | 155 | /// Create a new peer stream 156 | #[instrument(skip_all, fields(id=&*transport.remote_id().to_string()))] 157 | pub async fn new( 158 | mut transport: ECIESStream, 159 | secret_key: SecretKey, 160 | client_version: String, 161 | capabilities: Vec, 162 | port: u16, 163 | ) -> anyhow::Result { 164 | let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key); 165 | let id = pk2id(&public_key); 166 | let nonhello_capabilities = capabilities.clone(); 167 | let nonhello_client_version = client_version.clone(); 168 | 169 | debug!("Connecting to RLPx peer {:02x}", transport.remote_id()); 170 | 171 | let hello = HelloMessage { 172 | port, 173 | id, 174 | protocol_version: ProtocolVersion::V5.to_usize().unwrap(), 175 | client_version, 176 | capabilities: { 177 | let mut caps = Vec::new(); 178 | for cap in &capabilities { 179 | caps.push(CapabilityMessage { 180 | name: cap.name, 181 | version: cap.version, 182 | }); 183 | } 184 | caps 185 | }, 186 | }; 187 | trace!("Sending hello message: {:?}", hello); 188 | 189 | let mut outbound_hello = BytesMut::new(); 190 | 0_u8.encode(&mut outbound_hello); 191 | hello.encode(&mut outbound_hello); 192 | 193 | trace!("Outbound hello: {}", hex::encode(&outbound_hello)); 194 | transport.send(outbound_hello.freeze()).await?; 195 | 196 | let hello = transport.try_next().await?; 197 | 198 | let hello = hello.ok_or_else(|| { 199 | debug!("Hello failed because of no value"); 200 | anyhow!("hello failed (no value)") 201 | })?; 202 | trace!("Receiving hello message: {:02x?}", hello); 203 | 204 | let message_id = u8::decode(&mut &hello[..1])?; 205 | let payload = &mut &hello[1..]; 206 | match message_id { 207 | 0 => {} 208 | 1 => { 209 | let reason = u8::decode(payload).map(DisconnectReason::from_u8)?; 210 | bail!( 211 | "explicit disconnect: {}", 212 | reason 213 | .map(|r| r.to_string()) 214 | .unwrap_or_else(|| "(unknown)".to_string()) 215 | ); 216 | } 217 | _ => { 218 | bail!( 219 | "Hello failed because message id is not 0 but {}: {:02x?}", 220 | message_id, 221 | payload 222 | ); 223 | } 224 | } 225 | 226 | let val = HelloMessage::decode(payload).context("hello failed (rlp)")?; 227 | debug!("hello message: {:?}", val); 228 | let mut shared_capabilities: Vec = Vec::new(); 229 | 230 | for cap_info in nonhello_capabilities { 231 | let cap_match = val 232 | .capabilities 233 | .iter() 234 | .any(|v| v.name == cap_info.name && v.version == cap_info.version); 235 | 236 | if cap_match { 237 | shared_capabilities.push(cap_info); 238 | } 239 | } 240 | 241 | let shared_caps_original = shared_capabilities.clone(); 242 | 243 | for cap_info in shared_caps_original { 244 | shared_capabilities 245 | .retain(|v| v.name != cap_info.name || v.version >= cap_info.version); 246 | } 247 | 248 | shared_capabilities.sort_by_key(|v| v.name); 249 | 250 | let no_shared_caps = shared_capabilities.is_empty(); 251 | 252 | let mut this = Self { 253 | remote_id: transport.remote_id(), 254 | stream: transport, 255 | client_version: nonhello_client_version, 256 | port, 257 | id, 258 | shared_capabilities, 259 | snappy: Snappy::default(), 260 | disconnected: false, 261 | }; 262 | 263 | if no_shared_caps { 264 | debug!("No shared capabilities, disconnecting."); 265 | let _ = this 266 | .send(PeerMessage::Disconnect(DisconnectReason::UselessPeer)) 267 | .await; 268 | 269 | bail!( 270 | "Handshake failed - no shared capabilities (our: {:?}, their: {:?})", 271 | capabilities, 272 | val.capabilities 273 | ); 274 | } 275 | 276 | Ok(this) 277 | } 278 | } 279 | 280 | /// Sending message for RLPx 281 | #[derive(Clone, Debug)] 282 | pub struct SubprotocolMessage { 283 | pub cap_name: CapabilityName, 284 | pub message: Message, 285 | } 286 | 287 | #[derive(Clone, Debug)] 288 | pub enum PeerMessage { 289 | Disconnect(DisconnectReason), 290 | Ping, 291 | Pong, 292 | Subprotocol(SubprotocolMessage), 293 | } 294 | 295 | impl Stream for PeerStream 296 | where 297 | Io: Transport, 298 | { 299 | type Item = Result; 300 | 301 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 302 | let mut s = self.get_mut(); 303 | 304 | if s.disconnected { 305 | return Poll::Ready(None); 306 | } 307 | 308 | match ready!(Pin::new(&mut s.stream).poll_next(cx)) { 309 | Some(Ok(val)) => { 310 | trace!("Received peer message: {}", hex::encode(&val)); 311 | 312 | let (cap, id, data) = match u8::decode(&mut &val[..1]) { 313 | Ok(message_id) => { 314 | let input = &val[1..]; 315 | let payload_len = snap::raw::decompress_len(input)?; 316 | if payload_len > MAX_PAYLOAD_SIZE { 317 | return Poll::Ready(Some(Err(io::Error::new( 318 | io::ErrorKind::InvalidInput, 319 | format!( 320 | "payload size ({}) exceeds limit ({} bytes)", 321 | payload_len, MAX_PAYLOAD_SIZE 322 | ), 323 | )))); 324 | } 325 | let data = Bytes::from(s.snappy.decoder.decompress_vec(input)?); 326 | trace!("Decompressed raw message data: {}", hex::encode(&data)); 327 | 328 | if message_id < 0x10 { 329 | match message_id { 330 | 0x01 => { 331 | s.disconnected = true; 332 | if let Some(reason) = u8::decode(&mut &*data) 333 | .ok() 334 | .and_then(DisconnectReason::from_u8) 335 | { 336 | return Poll::Ready(Some(Ok(PeerMessage::Disconnect( 337 | reason, 338 | )))); 339 | } else { 340 | return Poll::Ready(Some(Err(io::Error::new( 341 | io::ErrorKind::Other, 342 | format!( 343 | "peer disconnected with malformed message: {}", 344 | hex::encode(data) 345 | ), 346 | )))); 347 | } 348 | } 349 | 0x02 => { 350 | debug!("received ping message data {:?}", data); 351 | return Poll::Ready(Some(Ok(PeerMessage::Ping))); 352 | } 353 | 0x03 => { 354 | debug!("received pong message"); 355 | return Poll::Ready(Some(Ok(PeerMessage::Pong))); 356 | } 357 | _ => { 358 | debug!("received unknown reserved message"); 359 | return Poll::Ready(Some(Err(io::Error::new( 360 | io::ErrorKind::Other, 361 | "unhandled reserved message", 362 | )))); 363 | } 364 | } 365 | } 366 | 367 | let mut message_id = message_id as usize - 0x10; 368 | let mut index = 0; 369 | for cap in &s.shared_capabilities { 370 | if message_id > cap.length { 371 | message_id -= cap.length; 372 | index += 1; 373 | } 374 | } 375 | if index >= s.shared_capabilities.len() { 376 | return Poll::Ready(Some(Err(io::Error::new( 377 | io::ErrorKind::Other, 378 | "invalid message id (out of cap range)", 379 | )))); 380 | } 381 | (s.shared_capabilities[index], message_id, data) 382 | } 383 | Err(e) => { 384 | return Poll::Ready(Some(Err(io::Error::new( 385 | io::ErrorKind::Other, 386 | format!("message id parsing failed (invalid): {}", e), 387 | )))); 388 | } 389 | }; 390 | 391 | trace!( 392 | "Cap: {}, id: {}, data: {}", 393 | CapabilityId::from(cap), 394 | id, 395 | hex::encode(&data) 396 | ); 397 | 398 | Poll::Ready(Some(Ok(PeerMessage::Subprotocol(SubprotocolMessage { 399 | cap_name: cap.name, 400 | message: Message { id, data }, 401 | })))) 402 | } 403 | Some(Err(e)) => Poll::Ready(Some(Err(e))), 404 | None => Poll::Ready(None), 405 | } 406 | } 407 | } 408 | 409 | impl Sink for PeerStream 410 | where 411 | Io: Transport, 412 | { 413 | type Error = io::Error; 414 | 415 | fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 416 | Pin::new(&mut self.get_mut().stream).poll_ready(cx) 417 | } 418 | 419 | fn start_send(self: Pin<&mut Self>, message: PeerMessage) -> Result<(), Self::Error> { 420 | let this = self.get_mut(); 421 | 422 | if this.disconnected { 423 | return Err(io::Error::new( 424 | io::ErrorKind::BrokenPipe, 425 | "disconnection requested", 426 | )); 427 | } 428 | 429 | let (message_id, payload) = match message { 430 | PeerMessage::Disconnect(reason) => { 431 | this.disconnected = true; 432 | ( 433 | 0x01, 434 | fastrlp::encode_fixed_size(&reason.to_u8().unwrap()) 435 | .to_vec() 436 | .into(), 437 | ) 438 | } 439 | PeerMessage::Ping => { 440 | debug!("sending ping message"); 441 | (0x02, Bytes::from_static(&[EMPTY_LIST_CODE])) 442 | } 443 | PeerMessage::Pong => { 444 | debug!("sending pong message"); 445 | (0x03, Bytes::from_static(&[EMPTY_LIST_CODE])) 446 | } 447 | PeerMessage::Subprotocol(SubprotocolMessage { cap_name, message }) => { 448 | let Message { id, data } = message; 449 | let cap = *this 450 | .shared_capabilities 451 | .iter() 452 | .find(|cap| cap.name == cap_name) 453 | .unwrap_or_else(|| { 454 | panic!( 455 | "attempted to send payload of unsupported capability ({}/{}/{})", 456 | cap_name.0, 457 | id, 458 | this.remote_id(), 459 | ) 460 | }); 461 | 462 | assert!( 463 | id < cap.length, 464 | "attempted to send payload with message id too big ({}/{}/{})", 465 | cap_name.0, 466 | id, 467 | this.remote_id() 468 | ); 469 | 470 | let mut message_id = 0x10; 471 | for scap in &this.shared_capabilities { 472 | if scap == &cap { 473 | break; 474 | } 475 | 476 | message_id += scap.length; 477 | } 478 | message_id += id; 479 | 480 | (message_id, data) 481 | } 482 | }; 483 | 484 | let mut msg = BytesMut::with_capacity(2 + payload.len()); 485 | message_id.encode(&mut msg); 486 | 487 | let mut buf = msg.split_off(msg.len()); 488 | 489 | if payload.len() > MAX_PAYLOAD_SIZE { 490 | return Err(io::Error::new( 491 | io::ErrorKind::InvalidInput, 492 | format!( 493 | "payload size ({}) exceeds limit ({} bytes)", 494 | payload.len(), 495 | MAX_PAYLOAD_SIZE 496 | ), 497 | )); 498 | } 499 | 500 | buf.resize(snap::raw::max_compress_len(payload.len()), 0); 501 | 502 | let compressed_len = this.snappy.encoder.compress(&*payload, &mut buf).unwrap(); 503 | buf.truncate(compressed_len); 504 | 505 | msg.unsplit(buf); 506 | 507 | Pin::new(&mut this.stream).start_send(msg.freeze())?; 508 | 509 | Ok(()) 510 | } 511 | 512 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 513 | Pin::new(&mut self.get_mut().stream).poll_flush(cx) 514 | } 515 | 516 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 517 | Pin::new(&mut self.get_mut().stream).poll_close(cx) 518 | } 519 | } 520 | -------------------------------------------------------------------------------- /src/rlpx.rs: -------------------------------------------------------------------------------- 1 | //! RLPx protocol implementation in Rust 2 | 3 | use super::{ 4 | disc::Discovery, 5 | node_filter::{MemoryNodeFilter, NodeFilter}, 6 | peer::*, 7 | transport::{TcpServer, TokioCidrListener, Transport}, 8 | types::*, 9 | }; 10 | use anyhow::{anyhow, bail, Context}; 11 | use cidr::IpCidr; 12 | use educe::Educe; 13 | use futures::sink::SinkExt; 14 | use lru::LruCache; 15 | use parking_lot::Mutex; 16 | use secp256k1::SecretKey; 17 | use std::{ 18 | collections::{hash_map::Entry, BTreeMap, HashMap}, 19 | fmt::Debug, 20 | future::Future, 21 | net::SocketAddr, 22 | num::NonZeroUsize, 23 | ops::Deref, 24 | sync::{ 25 | atomic::{AtomicBool, AtomicUsize, Ordering}, 26 | Arc, Weak, 27 | }, 28 | time::{Duration, Instant}, 29 | }; 30 | use task_group::TaskGroup; 31 | use tokio::{ 32 | net::{TcpListener, TcpStream}, 33 | sync::{ 34 | mpsc::{channel, unbounded_channel}, 35 | oneshot::{channel as oneshot, Sender as OneshotSender}, 36 | Mutex as AsyncMutex, OwnedSemaphorePermit, Semaphore, 37 | }, 38 | time::sleep, 39 | }; 40 | use tokio_stream::{StreamExt, StreamMap}; 41 | use tracing::*; 42 | use uuid::Uuid; 43 | 44 | const GRACE_PERIOD_SECS: u64 = 2; 45 | const HANDSHAKE_TIMEOUT_SECS: u64 = 10; 46 | const PING_TIMEOUT: Duration = Duration::from_secs(15); 47 | const PING_INTERVAL: Duration = Duration::from_secs(60); 48 | const MAX_FAILED_PINGS: usize = 3; 49 | const _DISCOVERY_TIMEOUT_SECS: u64 = 90; 50 | const _DISCOVERY_CONNECT_TIMEOUT_SECS: u64 = 5; 51 | 52 | #[derive(Clone, Copy, Debug)] 53 | enum DisconnectInitiator { 54 | Local, 55 | LocalForceful, 56 | Remote, 57 | } 58 | 59 | #[derive(Debug)] 60 | struct DisconnectSignal { 61 | initiator: DisconnectInitiator, 62 | reason: DisconnectReason, 63 | } 64 | 65 | #[derive(Debug)] 66 | struct ConnectedPeerState { 67 | _tasks: TaskGroup, 68 | } 69 | 70 | #[derive(Debug)] 71 | enum PeerConnectionState { 72 | Connecting { connection_id: Uuid }, 73 | Connected(ConnectedPeerState), 74 | } 75 | 76 | impl PeerConnectionState { 77 | const fn is_connected(&self) -> bool { 78 | matches!(self, Self::Connected(_)) 79 | } 80 | } 81 | 82 | #[derive(Debug)] 83 | struct PeerState { 84 | connection_state: PeerConnectionState, 85 | _sem_permit: OwnedSemaphorePermit, 86 | } 87 | 88 | #[derive(Debug)] 89 | struct PeerStreams { 90 | /// Mapping of remote IDs to streams in `StreamMap` 91 | mapping: HashMap, 92 | semaphore: Arc, 93 | } 94 | 95 | impl PeerStreams { 96 | fn new(max_peers: usize) -> Self { 97 | Self { 98 | mapping: Default::default(), 99 | semaphore: Arc::new(Semaphore::new(max_peers)), 100 | } 101 | } 102 | 103 | fn disconnect_peer(&mut self, remote_id: PeerId) -> bool { 104 | self.mapping.remove(&remote_id).is_some() 105 | } 106 | } 107 | 108 | #[derive(Educe)] 109 | #[educe(Clone)] 110 | struct PeerStreamHandshakeData { 111 | port: u16, 112 | secret_key: SecretKey, 113 | client_version: String, 114 | capabilities: Arc, 115 | capability_server: Arc, 116 | } 117 | 118 | async fn handle_incoming( 119 | task_group: Weak, 120 | streams: Arc>, 121 | node_filter: Arc>, 122 | tcp_incoming: TS, 123 | handshake_data: PeerStreamHandshakeData, 124 | ) where 125 | TS: TcpServer, 126 | C: CapabilityServer, 127 | { 128 | let _: anyhow::Result<()> = async { 129 | loop { 130 | match tcp_incoming.accept().await { 131 | Err(e) => { 132 | bail!("failed to accept peer: {:?}, shutting down", e); 133 | } 134 | Ok(stream) => { 135 | let tasks = task_group 136 | .upgrade() 137 | .ok_or_else(|| anyhow!("task group is down"))?; 138 | 139 | let task_name = format!("Incoming connection setup: {:?}", stream); 140 | 141 | let f = handle_incoming_request( 142 | streams.clone(), 143 | node_filter.clone(), 144 | stream, 145 | handshake_data.clone(), 146 | ); 147 | tasks.spawn_with_name(task_name, f); 148 | } 149 | } 150 | } 151 | } 152 | .await; 153 | } 154 | 155 | /// Set up newly connected peer's state, start its tasks 156 | fn setup_peer_state( 157 | streams: Weak>, 158 | capability_server: Arc, 159 | remote_id: PeerId, 160 | peer: PeerStream, 161 | ) -> ConnectedPeerState 162 | where 163 | C: CapabilityServer, 164 | Io: Transport, 165 | { 166 | let capability_set = peer 167 | .capabilities() 168 | .iter() 169 | .copied() 170 | .map(|cap_info| (cap_info.name, cap_info.version)) 171 | .collect::>(); 172 | let (mut sink, mut stream) = futures::StreamExt::split(peer); 173 | let (peer_disconnect_tx, mut peer_disconnect_rx) = unbounded_channel(); 174 | let tasks = TaskGroup::default(); 175 | 176 | capability_server.on_peer_connect(remote_id, capability_set); 177 | 178 | let pinged = Arc::new(AtomicBool::default()); 179 | let (pings_tx, mut pings) = channel(1); 180 | let (pongs_tx, mut pongs) = channel(1); 181 | 182 | // This will handle incoming packets from peer. 183 | tasks.spawn_with_name(format!("peer {} ingress router", remote_id), { 184 | let peer_disconnect_tx = peer_disconnect_tx.clone(); 185 | let capability_server = capability_server.clone(); 186 | let pinged = pinged.clone(); 187 | async move { 188 | let disconnect_signal = { 189 | async move { 190 | while let Some(message) = stream.next().await { 191 | match message { 192 | Err(e) => { 193 | debug!("Peer incoming error: {}", e); 194 | break; 195 | } 196 | Ok(PeerMessage::Subprotocol(SubprotocolMessage { 197 | cap_name, 198 | message, 199 | })) => { 200 | // Actually handle the message 201 | capability_server 202 | .on_peer_event( 203 | remote_id, 204 | InboundEvent::Message { 205 | capability_name: cap_name, 206 | message, 207 | }, 208 | ) 209 | .await 210 | } 211 | Ok(PeerMessage::Disconnect(reason)) => { 212 | // Peer has requested disconnection. 213 | return DisconnectSignal { 214 | initiator: DisconnectInitiator::Remote, 215 | reason, 216 | }; 217 | } 218 | Ok(PeerMessage::Ping) => { 219 | let _ = pongs_tx.send(()).await; 220 | } 221 | Ok(PeerMessage::Pong) => { 222 | // Pong received, peer is off the hook 223 | pinged.store(false, Ordering::SeqCst); 224 | } 225 | } 226 | } 227 | 228 | // Ingress stream is closed, force disconnect the peer. 229 | DisconnectSignal { 230 | initiator: DisconnectInitiator::Remote, 231 | reason: DisconnectReason::DisconnectRequested, 232 | } 233 | } 234 | } 235 | .await; 236 | 237 | let _ = peer_disconnect_tx.send(disconnect_signal); 238 | } 239 | .instrument(span!(Level::DEBUG, "IN", "peer={}", remote_id.to_string(),)) 240 | }); 241 | 242 | // This will send our packets to peer. 243 | tasks.spawn_with_name( 244 | format!("peer {} egress router & disconnector", remote_id), 245 | async move { 246 | let mut event_fut = capability_server.next(remote_id); 247 | loop { 248 | let mut disconnecting = None; 249 | 250 | // Egress message and trigger to execute _after_ it is sent 251 | let mut egress = Option::<(PeerMessage, Option>)>::None; 252 | tokio::select! { 253 | // Handle event from capability server. 254 | msg = &mut event_fut => { 255 | // Invariant: CapabilityServer::next() will never be called after disconnect event 256 | match msg { 257 | OutboundEvent::Message { 258 | capability_name, message 259 | } => { 260 | event_fut = capability_server.next(remote_id); 261 | egress = Some((PeerMessage::Subprotocol(SubprotocolMessage { 262 | cap_name: capability_name, message 263 | }), None)); 264 | } 265 | OutboundEvent::Disconnect { 266 | reason 267 | } => { 268 | egress = Some((PeerMessage::Disconnect(reason), None)); 269 | disconnecting = Some(DisconnectSignal { 270 | initiator: DisconnectInitiator::Local, reason 271 | }); 272 | } 273 | }; 274 | }, 275 | // We ping the peer. 276 | Some(tx) = pings.recv() => { 277 | egress = Some((PeerMessage::Ping, Some(tx))); 278 | } 279 | // Peer has pinged us. 280 | Some(_) = pongs.recv() => { 281 | egress = Some((PeerMessage::Pong, None)); 282 | } 283 | // Ping timeout or signal from ingress router. 284 | Some(DisconnectSignal { initiator, reason }) = peer_disconnect_rx.recv() => { 285 | if let DisconnectInitiator::Local = initiator { 286 | egress = Some((PeerMessage::Disconnect(reason), None)); 287 | } 288 | disconnecting = Some(DisconnectSignal { initiator, reason }) 289 | } 290 | }; 291 | 292 | if let Some((message, trigger)) = egress { 293 | trace!("Sending message: {:?}", message); 294 | 295 | // Send egress message, force disconnect on error. 296 | if let Err(e) = sink.send(message).await { 297 | debug!("peer disconnected with error {:?}", e); 298 | disconnecting.get_or_insert(DisconnectSignal { 299 | initiator: DisconnectInitiator::LocalForceful, 300 | reason: DisconnectReason::TcpSubsystemError, 301 | }); 302 | } else if let Some(trigger) = trigger { 303 | // Reason for signal in trigger: 304 | // We don't want to timeout peer if our TCP socket is too slow 305 | let _ = trigger.send(()); 306 | } 307 | } 308 | 309 | if let Some(DisconnectSignal { initiator, reason }) = disconnecting { 310 | debug!("Disconnecting, initiated by {initiator:?} for reason {reason:?}"); 311 | if let DisconnectInitiator::Local = initiator { 312 | // We have sent disconnect message, wait for grace period. 313 | sleep(Duration::from_secs(GRACE_PERIOD_SECS)).await; 314 | } 315 | capability_server 316 | .on_peer_event( 317 | remote_id, 318 | InboundEvent::Disconnect { 319 | reason: Some(reason), 320 | }, 321 | ) 322 | .await; 323 | break; 324 | } 325 | } 326 | 327 | // We are done, drop the peer state. 328 | if let Some(streams) = streams.upgrade() { 329 | // This is the last line guaranteed to be executed. 330 | // After this the peer's task group is dropped and any alive tasks are forcibly cancelled. 331 | streams.lock().disconnect_peer(remote_id); 332 | } 333 | } 334 | .instrument(span!( 335 | Level::DEBUG, 336 | "OUT/DISC", 337 | "peer={}", 338 | remote_id.to_string(), 339 | )), 340 | ); 341 | 342 | // This will ping the peer and disconnect if they don't respond. 343 | tasks.spawn_with_name(format!("peer {} pinger", remote_id), async move { 344 | let mut failed_pings = 0; 345 | loop { 346 | pinged.store(true, Ordering::SeqCst); 347 | 348 | let (cb_tx, ping_sent_rx) = oneshot(); 349 | 350 | // Pipes went down, pinger must exit 351 | if pings_tx.send(cb_tx).await.is_err() || ping_sent_rx.await.is_err() { 352 | return; 353 | }; 354 | 355 | sleep(PING_TIMEOUT).await; 356 | 357 | if pinged.load(Ordering::SeqCst) { 358 | failed_pings += 1; 359 | 360 | if failed_pings >= MAX_FAILED_PINGS { 361 | let _ = peer_disconnect_tx.send(DisconnectSignal { 362 | initiator: DisconnectInitiator::Local, 363 | reason: DisconnectReason::PingTimeout, 364 | }); 365 | 366 | return; 367 | } 368 | } else { 369 | failed_pings = 0; 370 | 371 | sleep(PING_INTERVAL).await; 372 | } 373 | } 374 | }); 375 | ConnectedPeerState { _tasks: tasks } 376 | } 377 | 378 | /// Establishes the connection with peer and adds them to internal state. 379 | async fn handle_incoming_request( 380 | streams: Arc>, 381 | node_filter: Arc>, 382 | stream: Io, 383 | handshake_data: PeerStreamHandshakeData, 384 | ) where 385 | C: CapabilityServer, 386 | Io: Transport, 387 | { 388 | let PeerStreamHandshakeData { 389 | secret_key, 390 | client_version, 391 | capabilities, 392 | capability_server, 393 | port, 394 | } = handshake_data; 395 | // Do handshake and convert incoming connection into stream. 396 | let peer_res = tokio::time::timeout( 397 | Duration::from_secs(HANDSHAKE_TIMEOUT_SECS), 398 | PeerStream::incoming( 399 | stream, 400 | secret_key, 401 | client_version, 402 | capabilities.get_capabilities().to_vec(), 403 | port, 404 | ), 405 | ) 406 | .await 407 | .unwrap_or_else(|_| Err(anyhow!("incoming connection timeout"))); 408 | 409 | match peer_res { 410 | Ok(peer) => { 411 | let remote_id = peer.remote_id(); 412 | let s = streams.clone(); 413 | let mut s = s.lock(); 414 | let node_filter = node_filter.clone(); 415 | let PeerStreams { mapping, semaphore } = &mut *s; 416 | let total_connections = mapping.len(); 417 | 418 | match mapping.entry(remote_id) { 419 | Entry::Occupied(entry) => { 420 | debug!( 421 | "We are already {} to remote peer {}!", 422 | if entry.get().connection_state.is_connected() { 423 | "connected" 424 | } else { 425 | "connecting" 426 | }, 427 | remote_id 428 | ); 429 | } 430 | Entry::Vacant(entry) => { 431 | if let Ok(sem_permit) = semaphore.clone().try_acquire_owned() { 432 | if node_filter.lock().is_allowed(total_connections, remote_id) { 433 | debug!("New incoming peer connected: {}", remote_id); 434 | entry.insert(PeerState { 435 | connection_state: PeerConnectionState::Connected(setup_peer_state( 436 | Arc::downgrade(&streams), 437 | capability_server, 438 | remote_id, 439 | peer, 440 | )), 441 | _sem_permit: sem_permit, 442 | }); 443 | } else { 444 | trace!("Node filter rejected peer {}, disconnecting", remote_id); 445 | } 446 | } 447 | } 448 | } 449 | } 450 | Err(e) => { 451 | debug!("Peer disconnected with error {}", e); 452 | } 453 | } 454 | } 455 | 456 | #[derive(Debug, Default)] 457 | struct CapabilitySet { 458 | capability_cache: Vec, 459 | } 460 | 461 | impl CapabilitySet { 462 | fn get_capabilities(&self) -> &[CapabilityInfo] { 463 | &self.capability_cache 464 | } 465 | } 466 | 467 | impl From> for CapabilitySet { 468 | fn from(inner: BTreeMap) -> Self { 469 | let capability_cache = inner 470 | .iter() 471 | .map( 472 | |(&CapabilityId { name, version }, &length)| CapabilityInfo { 473 | name, 474 | version, 475 | length, 476 | }, 477 | ) 478 | .collect(); 479 | 480 | Self { capability_cache } 481 | } 482 | } 483 | 484 | /// This is an asynchronous RLPx server implementation. 485 | /// 486 | /// `Swarm` is the representation of swarm of connected RLPx peers that 487 | /// supports registration for capability servers. 488 | /// 489 | /// This implementation is based on the concept of structured concurrency. 490 | /// Internal state is managed by a multitude of workers that run in separate runtime tasks 491 | /// spawned on the running executor during the server creation and addition of new peers. 492 | /// All continuously running workers are inside the task scope owned by the server struct. 493 | #[derive(Educe)] 494 | #[educe(Debug)] 495 | pub struct Swarm { 496 | #[allow(unused)] 497 | tasks: Arc, 498 | 499 | streams: Arc>, 500 | 501 | currently_connecting: Arc, 502 | 503 | node_filter: Arc>, 504 | 505 | capabilities: Arc, 506 | #[educe(Debug(ignore))] 507 | capability_server: Arc, 508 | 509 | #[educe(Debug(ignore))] 510 | secret_key: SecretKey, 511 | client_version: String, 512 | port: u16, 513 | } 514 | 515 | /// Builder for ergonomically creating a new `Server`. 516 | #[derive(Debug)] 517 | pub struct SwarmBuilder { 518 | task_group: Option>, 519 | listen_options: Option, 520 | client_version: String, 521 | } 522 | 523 | impl SwarmBuilder { 524 | pub fn with_task_group(mut self, task_group: Arc) -> Self { 525 | self.task_group = Some(task_group); 526 | self 527 | } 528 | 529 | pub fn with_listen_options(mut self, options: ListenOptions) -> Self { 530 | self.listen_options = Some(options); 531 | self 532 | } 533 | 534 | pub fn with_client_version(mut self, version: String) -> Self { 535 | self.client_version = version; 536 | self 537 | } 538 | 539 | /// Create a new RLPx node 540 | pub async fn build( 541 | self, 542 | capability_mask: BTreeMap, 543 | capability_server: Arc, 544 | secret_key: SecretKey, 545 | ) -> anyhow::Result>> { 546 | Swarm::new_inner( 547 | secret_key, 548 | self.client_version, 549 | self.task_group, 550 | capability_mask.into(), 551 | capability_server, 552 | self.listen_options, 553 | ) 554 | .await 555 | } 556 | } 557 | 558 | #[derive(Educe)] 559 | #[educe(Debug)] 560 | pub struct ListenOptions { 561 | #[educe(Debug(ignore))] 562 | discovery_tasks: Arc>>, 563 | min_peers: usize, 564 | max_peers: NonZeroUsize, 565 | addr: SocketAddr, 566 | cidr: Option, 567 | no_new_peers: Arc, 568 | } 569 | 570 | impl ListenOptions { 571 | pub fn new( 572 | discovery_tasks: StreamMap, 573 | min_peers: usize, 574 | max_peers: NonZeroUsize, 575 | addr: SocketAddr, 576 | cidr: Option, 577 | no_new_peers: Arc, 578 | ) -> Self { 579 | Self { 580 | discovery_tasks: Arc::new(AsyncMutex::new(discovery_tasks)), 581 | min_peers, 582 | max_peers, 583 | addr, 584 | cidr, 585 | no_new_peers, 586 | } 587 | } 588 | } 589 | 590 | impl Swarm<()> { 591 | pub fn builder() -> SwarmBuilder { 592 | SwarmBuilder { 593 | task_group: None, 594 | listen_options: None, 595 | client_version: format!("rust-devp2p/{}", env!("CARGO_PKG_VERSION")), 596 | } 597 | } 598 | } 599 | 600 | impl Swarm { 601 | pub async fn new( 602 | capability_mask: BTreeMap, 603 | capability_server: Arc, 604 | secret_key: SecretKey, 605 | ) -> anyhow::Result> { 606 | Swarm::builder() 607 | .build(capability_mask, capability_server, secret_key) 608 | .await 609 | } 610 | 611 | #[allow(unreachable_code)] 612 | async fn new_inner( 613 | secret_key: SecretKey, 614 | client_version: String, 615 | task_group: Option>, 616 | capabilities: CapabilitySet, 617 | capability_server: Arc, 618 | listen_options: Option, 619 | ) -> anyhow::Result> { 620 | let tasks = task_group.unwrap_or_default(); 621 | 622 | let port = listen_options 623 | .as_ref() 624 | .map_or(0, |options| options.addr.port()); 625 | 626 | let max_peers = listen_options 627 | .as_ref() 628 | .map_or(usize::MAX, |options| options.max_peers.get()); 629 | let streams = Arc::new(Mutex::new(PeerStreams::new(max_peers))); 630 | let node_filter = Arc::new(Mutex::new(MemoryNodeFilter::new(Arc::new( 631 | max_peers.into(), 632 | )))); 633 | 634 | let capabilities = Arc::new(capabilities); 635 | 636 | if let Some(options) = &listen_options { 637 | let tcp_incoming = TcpListener::bind(options.addr) 638 | .await 639 | .context("Failed to bind RLPx node to socket")?; 640 | let cidr = options.cidr; 641 | tasks.spawn_with_name("incoming handler", { 642 | let handshake_data = PeerStreamHandshakeData { 643 | port, 644 | secret_key, 645 | client_version: client_version.clone(), 646 | capabilities: capabilities.clone(), 647 | capability_server: capability_server.clone(), 648 | }; 649 | 650 | handle_incoming( 651 | Arc::downgrade(&tasks), 652 | streams.clone(), 653 | node_filter.clone(), 654 | TokioCidrListener::new(tcp_incoming, cidr), 655 | handshake_data, 656 | ) 657 | }); 658 | } 659 | 660 | let server = Arc::new(Self { 661 | tasks: tasks.clone(), 662 | streams, 663 | currently_connecting: Default::default(), 664 | node_filter, 665 | capabilities, 666 | capability_server, 667 | secret_key, 668 | client_version, 669 | port, 670 | }); 671 | 672 | if let Some(options) = listen_options { 673 | tasks.spawn_with_name("dialer", { 674 | let server = Arc::downgrade(&server); 675 | let tasks = tasks.clone(); 676 | async move { 677 | let banlist = Arc::new(Mutex::new(LruCache::new(10_000))); 678 | 679 | for worker in 0..options.max_peers.get() { 680 | tasks.spawn_with_name(format!("dialer #{worker}"), { 681 | let banlist = banlist.clone(); 682 | let server = server.clone(); 683 | let discovery_tasks = options.discovery_tasks.clone(); 684 | let no_new_peers = options.no_new_peers.clone(); 685 | async move { 686 | loop { 687 | while let Some(num_peers) = server.upgrade().map(|server| server.num_peers()) { 688 | if !no_new_peers.load(Ordering::SeqCst) && (num_peers < options.min_peers || worker == 1) && num_peers < max_peers { 689 | let next_peer = discovery_tasks.lock().await.next().await; 690 | match next_peer { 691 | None => (), 692 | Some((disc_id, Err(e))) => { 693 | debug!("Failed to get new peer: {e} ({disc_id})") 694 | } 695 | Some((disc_id, Ok(NodeRecord { id, addr }))) => { 696 | let now = Instant::now(); 697 | if let Some(banned_timestamp) = 698 | banlist.lock().get_mut(&id).copied() 699 | { 700 | let time_since_ban: Duration = 701 | now - banned_timestamp; 702 | if time_since_ban <= Duration::from_secs(300) { 703 | let secs_since_ban = time_since_ban.as_secs(); 704 | debug!( 705 | "Skipping failed peer ({id}, failed {secs_since_ban}s ago)", 706 | ); 707 | continue; 708 | } 709 | } 710 | 711 | if let Some(server) = server.upgrade() { 712 | debug!("Dialing peer {id:?}@{addr} ({disc_id})"); 713 | if server 714 | .add_peer_inner(addr, id, true) 715 | .await 716 | .is_err() 717 | { 718 | banlist.lock().put(id, Instant::now()); 719 | } 720 | } else { 721 | break; 722 | } 723 | } 724 | } 725 | } else { 726 | let delay = 2000; 727 | debug!("Not accepting peers, delaying dial for {delay}ms"); 728 | sleep(Duration::from_millis(delay)).await; 729 | } 730 | } 731 | } 732 | 733 | debug!("Quitting"); 734 | } 735 | .instrument(span!( 736 | Level::DEBUG, 737 | "dialer", 738 | worker 739 | )) 740 | }); 741 | } 742 | } 743 | }); 744 | } 745 | 746 | Ok(server) 747 | } 748 | 749 | /// Add a new peer to this RLPx node. Returns `true` if it was added successfully (did not exist before, accepted by node filter). 750 | pub fn add_peer( 751 | &self, 752 | node_record: NodeRecord, 753 | ) -> impl Future> + Send + 'static { 754 | self.add_peer_inner(node_record.addr, node_record.id, false) 755 | } 756 | 757 | fn add_peer_inner( 758 | &self, 759 | addr: SocketAddr, 760 | remote_id: PeerId, 761 | untrusted_peer: bool, 762 | ) -> impl Future> + Send + 'static { 763 | let tasks = self.tasks.clone(); 764 | let streams = self.streams.clone(); 765 | let node_filter = self.node_filter.clone(); 766 | 767 | let capability_set = self.capabilities.get_capabilities().to_vec(); 768 | let capability_server = self.capability_server.clone(); 769 | 770 | let secret_key = self.secret_key; 771 | let client_version = self.client_version.clone(); 772 | let port = self.port; 773 | 774 | let (tx, rx) = tokio::sync::oneshot::channel(); 775 | let connection_id = Uuid::new_v4(); 776 | let currently_connecting = self.currently_connecting.clone(); 777 | 778 | // Start reaper task that will terminate this connection if connection future gets dropped. 779 | tasks.spawn_with_name(format!("connection {} reaper", connection_id), { 780 | let cid = connection_id; 781 | let streams = streams.clone(); 782 | let currently_connecting = currently_connecting.clone(); 783 | async move { 784 | if rx.await.is_err() { 785 | let mut s = streams.lock(); 786 | if let Entry::Occupied(entry) = s.mapping.entry(remote_id) { 787 | // If this is the same connection attempt, then remove. 788 | if let PeerConnectionState::Connecting { connection_id } = 789 | entry.get().connection_state 790 | { 791 | if connection_id == cid { 792 | trace!("Reaping failed outbound connection: {}/{}", remote_id, cid); 793 | 794 | entry.remove(); 795 | } 796 | } 797 | } 798 | } 799 | currently_connecting.fetch_sub(1, Ordering::SeqCst); 800 | } 801 | }); 802 | 803 | async move { 804 | let mut inserted = false; 805 | 806 | { 807 | let semaphore = streams.lock().semaphore.clone(); 808 | trace!("Awaiting semaphore permit"); 809 | let sem_permit = match semaphore.acquire_owned().await { 810 | Ok(v) => v, 811 | Err(_) => return Ok(false), 812 | }; 813 | trace!("Semaphore permit acquired"); 814 | 815 | currently_connecting.fetch_add(1, Ordering::SeqCst); 816 | 817 | let mut streams = streams.lock(); 818 | let node_filter = node_filter.lock(); 819 | 820 | let connection_num = streams.mapping.len(); 821 | 822 | match streams.mapping.entry(remote_id) { 823 | Entry::Occupied(key) => { 824 | debug!( 825 | "We are already {} to remote peer {}!", 826 | if key.get().connection_state.is_connected() { 827 | "connected" 828 | } else { 829 | "connecting" 830 | }, 831 | remote_id 832 | ); 833 | } 834 | Entry::Vacant(vacant) => { 835 | if untrusted_peer && !node_filter.is_allowed(connection_num, remote_id) { 836 | trace!("rejecting peer {}", remote_id); 837 | } else { 838 | debug!("connecting to peer {} at {}", remote_id, addr); 839 | 840 | vacant.insert(PeerState { 841 | connection_state: PeerConnectionState::Connecting { connection_id }, 842 | _sem_permit: sem_permit, 843 | }); 844 | inserted = true; 845 | } 846 | } 847 | } 848 | } 849 | 850 | if !inserted { 851 | return Ok(false); 852 | } 853 | 854 | // Connecting to peer is a long running operation so we have to break the mutex lock. 855 | let peer_res = async { 856 | let transport = TcpStream::connect(addr).await?; 857 | PeerStream::connect( 858 | transport, 859 | secret_key, 860 | remote_id, 861 | client_version, 862 | capability_set, 863 | port, 864 | ) 865 | .await 866 | } 867 | .await; 868 | 869 | let streams = streams.clone(); 870 | let mut streams_guard = streams.lock(); 871 | let PeerStreams { mapping, .. } = &mut *streams_guard; 872 | 873 | // Adopt the new connection if the peer has not been dropped or superseded by incoming connection. 874 | if let Entry::Occupied(mut peer_state) = mapping.entry(remote_id) { 875 | if !peer_state.get().connection_state.is_connected() { 876 | match peer_res { 877 | Ok(peer) => { 878 | assert_eq!(peer.remote_id(), remote_id); 879 | debug!("New peer connected: {}", remote_id); 880 | 881 | peer_state.get_mut().connection_state = 882 | PeerConnectionState::Connected(setup_peer_state( 883 | Arc::downgrade(&streams), 884 | capability_server, 885 | remote_id, 886 | peer, 887 | )); 888 | 889 | let _ = tx.send(()); 890 | return Ok(true); 891 | } 892 | Err(e) => { 893 | debug!("Peer {:?} disconnected with error: {}", remote_id, e); 894 | peer_state.remove(); 895 | return Err(e); 896 | } 897 | } 898 | } 899 | } 900 | 901 | Ok(false) 902 | } 903 | .instrument(span!( 904 | Level::DEBUG, 905 | "add peer", 906 | "remote_id={}", 907 | &*remote_id.to_string() 908 | )) 909 | } 910 | 911 | /// Returns the number of peers we're currently dialing 912 | pub fn dialing(&self) -> usize { 913 | self.currently_connecting.load(Ordering::SeqCst) 914 | } 915 | 916 | /// All peers 917 | pub fn num_peers(&self) -> usize { 918 | self.streams.lock().mapping.len() 919 | } 920 | } 921 | 922 | impl Deref for Swarm { 923 | type Target = C; 924 | 925 | fn deref(&self) -> &Self::Target { 926 | &*self.capability_server 927 | } 928 | } 929 | -------------------------------------------------------------------------------- /src/transport.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use cidr::IpCidr; 3 | use std::{fmt::Debug, net::SocketAddr}; 4 | use tokio::{ 5 | io::{AsyncRead, AsyncWrite}, 6 | net::{TcpListener, TcpStream}, 7 | }; 8 | use tracing::debug; 9 | 10 | pub trait Transport: AsyncRead + AsyncWrite + Debug + Send + Unpin + 'static { 11 | fn remote_addr(&self) -> Option; 12 | } 13 | 14 | impl Transport for TcpStream { 15 | fn remote_addr(&self) -> Option { 16 | self.peer_addr().ok() 17 | } 18 | } 19 | 20 | #[async_trait] 21 | pub trait TcpServer { 22 | type Conn: Transport; 23 | 24 | async fn accept(&self) -> anyhow::Result; 25 | } 26 | 27 | #[async_trait] 28 | impl TcpServer for TcpListener { 29 | type Conn = TcpStream; 30 | 31 | async fn accept(&self) -> anyhow::Result { 32 | Ok(TcpListener::accept(self).await?.0) 33 | } 34 | } 35 | 36 | pub struct TokioCidrListener { 37 | tcp_server: TcpListener, 38 | cidr_mask: Option, 39 | } 40 | 41 | impl TokioCidrListener { 42 | pub fn new(tcp_server: TcpListener, cidr_mask: Option) -> Self { 43 | Self { 44 | tcp_server, 45 | cidr_mask, 46 | } 47 | } 48 | } 49 | 50 | #[async_trait] 51 | impl TcpServer for TokioCidrListener { 52 | type Conn = TcpStream; 53 | 54 | async fn accept(&self) -> anyhow::Result { 55 | loop { 56 | let (node, remote_addr) = self.tcp_server.accept().await?; 57 | 58 | if let Some(cidr) = &self.cidr_mask { 59 | if !cidr.contains(&remote_addr.ip()) { 60 | debug!( 61 | "Ignoring connection request: {} is not in range {}", 62 | remote_addr, cidr 63 | ); 64 | continue; 65 | } 66 | } 67 | return Ok(node); 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use crate::{peer::DisconnectReason, util::hex_debug}; 2 | use arrayvec::ArrayString; 3 | use async_trait::async_trait; 4 | use auto_impl::auto_impl; 5 | use bytes::{Bytes, BytesMut}; 6 | use derive_more::Display; 7 | use educe::Educe; 8 | pub use ethereum_types::H512 as PeerId; 9 | use fastrlp::*; 10 | use std::{collections::HashMap, fmt::Debug, future::pending, net::SocketAddr, str::FromStr}; 11 | 12 | /// Record that specifies information necessary to connect to RLPx node 13 | #[derive(Clone, Copy, Debug)] 14 | pub struct NodeRecord { 15 | /// Node ID. 16 | pub id: PeerId, 17 | /// Address of RLPx TCP server. 18 | pub addr: SocketAddr, 19 | } 20 | 21 | impl FromStr for NodeRecord { 22 | type Err = Box; 23 | 24 | fn from_str(s: &str) -> Result { 25 | const PREFIX: &str = "enode://"; 26 | 27 | let (prefix, data) = s.split_at(PREFIX.len()); 28 | if prefix != PREFIX { 29 | return Err("Not an enode".into()); 30 | } 31 | 32 | let mut parts = data.split('@'); 33 | let id = parts.next().ok_or("Failed to read remote ID")?.parse()?; 34 | let addr = parts.next().ok_or("Failed to read address")?.parse()?; 35 | 36 | Ok(Self { id, addr }) 37 | } 38 | } 39 | 40 | #[derive(Clone, Copy, Debug, Display, PartialEq, Eq, Hash, PartialOrd, Ord)] 41 | pub struct CapabilityName(pub ArrayString<4>); 42 | 43 | impl Encodable for CapabilityName { 44 | fn encode(&self, out: &mut dyn BufMut) { 45 | self.0.as_bytes().encode(out) 46 | } 47 | fn length(&self) -> usize { 48 | self.0.as_bytes().length() 49 | } 50 | } 51 | 52 | impl Decodable for CapabilityName { 53 | fn decode(buf: &mut &[u8]) -> Result { 54 | Ok(Self( 55 | ArrayString::from( 56 | std::str::from_utf8(&BytesMut::decode(buf)?) 57 | .map_err(|_| DecodeError::Custom("should be a UTF-8 string"))?, 58 | ) 59 | .map_err(|_| DecodeError::Custom("capability name is too long"))?, 60 | )) 61 | } 62 | } 63 | 64 | pub type CapabilityLength = usize; 65 | pub type CapabilityVersion = usize; 66 | 67 | #[derive(Clone, Debug, Copy, PartialEq, Eq)] 68 | /// Capability information 69 | pub struct CapabilityInfo { 70 | pub name: CapabilityName, 71 | pub version: CapabilityVersion, 72 | pub length: CapabilityLength, 73 | } 74 | 75 | impl CapabilityInfo { 76 | pub fn new(CapabilityId { name, version }: CapabilityId, length: CapabilityLength) -> Self { 77 | Self { 78 | name, 79 | version, 80 | length, 81 | } 82 | } 83 | } 84 | 85 | #[derive(Clone, Debug, Display, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] 86 | #[display(fmt = "{}/{}", name, version)] 87 | pub struct CapabilityId { 88 | pub name: CapabilityName, 89 | pub version: CapabilityVersion, 90 | } 91 | 92 | impl From for CapabilityId { 93 | fn from(CapabilityInfo { name, version, .. }: CapabilityInfo) -> Self { 94 | Self { name, version } 95 | } 96 | } 97 | 98 | #[derive(Clone, Debug, Display)] 99 | pub enum InboundEvent { 100 | #[display( 101 | fmt = "disconnect/{}", 102 | "reason.map(|r| r.to_string()).unwrap_or_else(|| \"(no reason)\".to_string())" 103 | )] 104 | Disconnect { reason: Option }, 105 | #[display(fmt = "message/{}/{}", capability_name, "message.id")] 106 | Message { 107 | capability_name: CapabilityName, 108 | message: Message, 109 | }, 110 | } 111 | 112 | #[derive(Clone, Debug)] 113 | pub enum OutboundEvent { 114 | Disconnect { 115 | reason: DisconnectReason, 116 | }, 117 | Message { 118 | capability_name: CapabilityName, 119 | message: Message, 120 | }, 121 | } 122 | 123 | #[async_trait] 124 | #[auto_impl(&, Box, Arc)] 125 | pub trait CapabilityServer: Send + Sync + 'static { 126 | /// Should be used to set up relevant state for the peer. 127 | fn on_peer_connect(&self, peer: PeerId, caps: HashMap); 128 | /// Called on the next event for peer. 129 | async fn on_peer_event(&self, peer: PeerId, event: InboundEvent); 130 | /// Get the next event for peer. 131 | async fn next(&self, peer: PeerId) -> OutboundEvent; 132 | } 133 | 134 | #[async_trait] 135 | impl CapabilityServer for () { 136 | fn on_peer_connect(&self, _: PeerId, _: HashMap) {} 137 | 138 | async fn on_peer_event(&self, _: PeerId, _: InboundEvent) {} 139 | 140 | async fn next(&self, _: PeerId) -> OutboundEvent { 141 | pending().await 142 | } 143 | } 144 | 145 | #[derive(Clone, Educe)] 146 | #[educe(Debug)] 147 | pub struct Message { 148 | pub id: usize, 149 | #[educe(Debug(method = "hex_debug"))] 150 | pub data: Bytes, 151 | } 152 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use super::types::*; 2 | use ethereum_types::H256; 3 | use hmac::{Hmac, Mac}; 4 | use secp256k1::PublicKey; 5 | use sha2::Sha256; 6 | use sha3::{Digest, Keccak256}; 7 | use std::fmt::{self, Formatter}; 8 | 9 | pub fn keccak256(data: &[u8]) -> H256 { 10 | H256::from(Keccak256::digest(data).as_ref()) 11 | } 12 | 13 | pub fn sha256(data: &[u8]) -> H256 { 14 | H256::from(Sha256::digest(data).as_ref()) 15 | } 16 | 17 | pub fn hmac_sha256(key: &[u8], input: &[&[u8]], auth_data: &[u8]) -> H256 { 18 | let mut hmac = Hmac::::new_from_slice(key).unwrap(); 19 | for input in input { 20 | hmac.update(input); 21 | } 22 | hmac.update(auth_data); 23 | H256::from_slice(&*hmac.finalize().into_bytes()) 24 | } 25 | 26 | pub fn pk2id(pk: &PublicKey) -> PeerId { 27 | PeerId::from_slice(&pk.serialize_uncompressed()[1..]) 28 | } 29 | 30 | pub fn id2pk(id: PeerId) -> Result { 31 | let mut s = [0_u8; 65]; 32 | s[0] = 4; 33 | s[1..].copy_from_slice(id.as_bytes()); 34 | PublicKey::from_slice(&s) 35 | } 36 | 37 | pub fn hex_debug>(s: &T, f: &mut Formatter) -> fmt::Result { 38 | f.write_str(&hex::encode(&s)) 39 | } 40 | 41 | #[cfg(test)] 42 | mod tests { 43 | use super::*; 44 | use secp256k1::{SecretKey, SECP256K1}; 45 | 46 | #[test] 47 | fn pk2id2pk() { 48 | let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); 49 | let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); 50 | assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); 51 | } 52 | } 53 | --------------------------------------------------------------------------------