├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md └── src ├── builder.rs ├── bundle ├── mod.rs └── pool.rs ├── lib.rs ├── main.rs └── rpc.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | RUSTFLAGS: "-Dwarnings" 12 | 13 | jobs: 14 | build: 15 | 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: fmt 21 | run: cargo fmt --all --check 22 | - name: build 23 | run: cargo build --verbose --all-targets --all-features --all 24 | - name: clippy 25 | run: cargo clippy --verbose --all --all-features 26 | - name: test 27 | run: cargo test --verbose --all 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "evangelion" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | beacon-api-client = { git = "https://github.com/ralexstokes/beacon-api-client", rev = "d838d93" } 8 | clap = "4.4.0" 9 | ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "2bcb975" } 10 | ethers = "2.0.8" 11 | eyre = "0.6.8" 12 | futures-util = "0.3.28" 13 | jsonrpsee = { version = "0.20.0", features = ["macros", "server"] } 14 | mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "db54c2d", features = ["serde"] } 15 | rand = "0.8.5" 16 | reth = { git = "https://github.com/paradigmxyz/reth.git", package = "reth", version = "0.1.0-alpha.8" } 17 | reth-interfaces = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-interfaces", version = "0.1.0-alpha.8" } 18 | reth-payload-builder = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-payload-builder", version = "0.1.0-alpha.8" } 19 | reth-primitives = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-primitives", version = "0.1.0-alpha.8" } 20 | reth-provider = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-provider", version = "0.1.0-alpha.8" } 21 | reth-revm = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-revm", version = "0.1.0-alpha.8" } 22 | reth-revm-primitives = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-revm-primitives", version = "0.1.0-alpha.8" } 23 | reth-rpc = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-rpc", version = "0.1.0-alpha.8" } 24 | reth-rpc-types = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-rpc-types", version = "0.1.0-alpha.8" } 25 | reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-transaction-pool", version = "0.1.0-alpha.8" } 26 | serde = { version = "1.0.187", features = ["std", "derive"] } 27 | ssz_rs = "0.9.0" 28 | tokio = "1.29.1" 29 | tokio-stream = { version = "0.1.14", features = ["sync"] } 30 | tokio-util = { version = "0.7.8", features = ["time"] } 31 | tracing = "0.1.37" 32 | url = "2.4.0" 33 | 34 | [patch.crates-io] 35 | revm = { git = "https://github.com/bluealloy/revm/", branch = "release/v25" } 36 | revm-primitives = { git = "https://github.com/bluealloy/revm/", branch = "release/v25" } 37 | 38 | [dev-dependencies] 39 | reth-provider = { git = "https://github.com/paradigmxyz/reth.git", package = "reth-provider", version = "0.1.0-alpha.8", features = ["test-utils"] } 40 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Jacob Kaufmann 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # evangelion 2 | 3 | a prototype block builder for ethereum 4 | -------------------------------------------------------------------------------- /src/builder.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, HashSet, VecDeque}; 2 | use std::future::Future; 3 | use std::matches; 4 | use std::pin::Pin; 5 | use std::sync::{ 6 | atomic::{self, AtomicBool}, 7 | Arc, Mutex, 8 | }; 9 | use std::task::{Context, Poll}; 10 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 11 | 12 | use crate::bundle::{pool::BundlePool, Bundle, BundleCompact, BundleId}; 13 | 14 | use ethers::{ 15 | signers::{LocalWallet, Signer}, 16 | types::{ 17 | transaction::{ 18 | eip1559::Eip1559TransactionRequest, eip2718::TypedTransaction, 19 | eip2930::AccessList as EthersAccessList, 20 | }, 21 | Bytes as EthersBytes, NameOrAddress, 22 | }, 23 | }; 24 | use futures_util::{stream::Fuse, FutureExt, Stream, StreamExt}; 25 | use reth_interfaces::Error as RethError; 26 | use reth_payload_builder::{ 27 | error::PayloadBuilderError, BuiltPayload, KeepPayloadJobAlive, PayloadBuilderAttributes, 28 | PayloadJob, PayloadJobGenerator, 29 | }; 30 | use reth_primitives::AccessListItem; 31 | use reth_primitives::{ 32 | constants::{BEACON_NONCE, EMPTY_OMMER_ROOT}, 33 | proofs, AccessList, Block, BlockNumber, Bytes, ChainSpec, Header, IntoRecoveredTransaction, 34 | Receipt, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, U256, 35 | }; 36 | use reth_provider::{ 37 | BlockReaderIdExt, CanonStateNotification, PostState, StateProvider, StateProviderFactory, 38 | }; 39 | use reth_revm::{ 40 | access_list::AccessListInspector, 41 | database::State, 42 | env::tx_env_with_recovered, 43 | executor::{ 44 | commit_state_changes, increment_account_balance, post_block_withdrawals_balance_increments, 45 | }, 46 | into_reth_log, 47 | revm::{ 48 | db::{CacheDB, DatabaseRef}, 49 | precompile::{Precompiles, SpecId as PrecompileSpecId}, 50 | primitives::{BlockEnv, CfgEnv, EVMError, Env, InvalidTransaction, ResultAndState, B160}, 51 | EVM, 52 | }, 53 | }; 54 | use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; 55 | use tokio::time::Interval; 56 | use tokio::{ 57 | sync::{broadcast, mpsc, oneshot}, 58 | task, 59 | time::{sleep, Sleep}, 60 | }; 61 | use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}; 62 | use tokio_util::time::DelayQueue; 63 | 64 | struct UnpackagedPayload { 65 | attributes: PayloadBuilderAttributes, 66 | block_env: BlockEnv, 67 | state: Arc>, 68 | post_state: PostState, 69 | extra_data: Bytes, 70 | txs: Vec, 71 | bundles: HashSet, 72 | cumulative_gas_used: u64, 73 | proposer_payment: U256, 74 | } 75 | 76 | impl UnpackagedPayload { 77 | pub fn package(self) -> Result { 78 | let base_fee = self.block_env.basefee.to::(); 79 | let block_num = self.block_env.number.to::(); 80 | let block_gas_limit: u64 = self.block_env.gas_limit.try_into().unwrap_or(u64::MAX); 81 | 82 | // compute accumulators 83 | let receipts_root = self.post_state.receipts_root(block_num); 84 | let logs_bloom = self.post_state.logs_bloom(block_num); 85 | let transactions_root = proofs::calculate_transaction_root(&self.txs); 86 | let withdrawals_root = proofs::calculate_withdrawals_root(&self.attributes.withdrawals); 87 | let state_root = self.state.state().state_root(self.post_state)?; 88 | 89 | let header = Header { 90 | parent_hash: self.attributes.parent, 91 | ommers_hash: EMPTY_OMMER_ROOT, 92 | beneficiary: self.block_env.coinbase, 93 | state_root, 94 | transactions_root, 95 | receipts_root, 96 | withdrawals_root: Some(withdrawals_root), 97 | logs_bloom, 98 | difficulty: U256::ZERO, 99 | number: block_num, 100 | gas_limit: block_gas_limit, 101 | gas_used: self.cumulative_gas_used, 102 | timestamp: self.attributes.timestamp, 103 | mix_hash: self.attributes.prev_randao, 104 | nonce: BEACON_NONCE, 105 | base_fee_per_gas: Some(base_fee), 106 | blob_gas_used: None, 107 | excess_blob_gas: None, 108 | extra_data: self.extra_data, 109 | parent_beacon_block_root: None, 110 | }; 111 | 112 | let block = Block { 113 | header, 114 | body: self.txs, 115 | ommers: vec![], 116 | withdrawals: Some(self.attributes.withdrawals.clone()), 117 | }; 118 | let block = block.seal_slow(); 119 | 120 | let payload = BuiltPayload::new(self.attributes.id, block, self.proposer_payment); 121 | let payload = Payload { 122 | inner: Arc::new(payload), 123 | bundles: self.bundles, 124 | }; 125 | 126 | Ok(payload) 127 | } 128 | } 129 | 130 | struct Payload { 131 | inner: Arc, 132 | bundles: HashSet, 133 | } 134 | 135 | #[derive(Clone, Debug)] 136 | struct PayloadAttributes { 137 | inner: PayloadBuilderAttributes, 138 | extra_data: Bytes, 139 | wallet: LocalWallet, 140 | } 141 | 142 | #[derive(Clone, Debug)] 143 | struct JobConfig { 144 | attributes: PayloadAttributes, 145 | parent: Arc, 146 | chain: Arc, 147 | deadline: Duration, 148 | interval: Duration, 149 | } 150 | 151 | #[derive(Clone, Debug, Default)] 152 | pub struct Cancel(Arc); 153 | 154 | impl Cancel { 155 | pub fn cancel(&self) { 156 | self.0.store(true, atomic::Ordering::Relaxed) 157 | } 158 | 159 | pub fn is_cancelled(&self) -> bool { 160 | self.0.load(atomic::Ordering::Relaxed) 161 | } 162 | } 163 | 164 | /// a build job scoped to `config` 165 | pub struct Job { 166 | attributes: PayloadAttributes, 167 | parent: Arc, 168 | chain: Arc, 169 | deadline: Pin>, 170 | interval: Interval, 171 | cancel: Cancel, 172 | client: Arc, 173 | pool: Arc, 174 | bundles: HashMap, 175 | incoming: Fuse>, 176 | invalidated: Fuse>, 177 | built_payloads: Vec, 178 | pending_payloads: VecDeque>>, 179 | } 180 | 181 | impl Job { 182 | fn new>( 183 | config: JobConfig, 184 | cancel: Cancel, 185 | client: Arc, 186 | pool: Arc, 187 | bundles: I, 188 | incoming: Fuse>, 189 | invalidated: Fuse>, 190 | ) -> Self { 191 | let bundles = bundles 192 | .into_iter() 193 | .map(|bundle| (bundle.id, BundleCompact(bundle.txs))) 194 | .collect(); 195 | let built_payloads = Vec::new(); 196 | let pending_payloads = VecDeque::new(); 197 | 198 | let deadline = Box::pin(sleep(config.deadline)); 199 | let interval = tokio::time::interval(config.interval); 200 | 201 | Self { 202 | attributes: config.attributes, 203 | parent: config.parent, 204 | chain: config.chain, 205 | deadline, 206 | interval, 207 | cancel, 208 | client, 209 | pool, 210 | bundles, 211 | invalidated, 212 | incoming, 213 | built_payloads, 214 | pending_payloads, 215 | } 216 | } 217 | } 218 | 219 | impl Future for Job 220 | where 221 | Client: StateProviderFactory + 'static, 222 | Pool: TransactionPool + 'static, 223 | { 224 | type Output = Result<(), PayloadBuilderError>; 225 | 226 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 227 | let this = self.get_mut(); 228 | 229 | // check whether the job was cancelled 230 | if this.cancel.is_cancelled() { 231 | return Poll::Ready(Ok(())); 232 | } 233 | 234 | // check whether the deadline for the job expired 235 | if this.deadline.as_mut().poll(cx).is_ready() { 236 | return Poll::Ready(Ok(())); 237 | } 238 | 239 | // incorporate new incoming bundles 240 | let mut num_incoming_bundles = 0; 241 | let mut incoming = Pin::new(&mut this.incoming); 242 | loop { 243 | match incoming.as_mut().poll_next(cx) { 244 | Poll::Ready(Some(Ok((id, block_num, bundle)))) => { 245 | // if the bundle is not eligible for the job, then skip the bundle 246 | if block_num != this.parent.number + 1 { 247 | continue; 248 | } 249 | 250 | this.bundles.insert(id, bundle); 251 | num_incoming_bundles += 1; 252 | } 253 | Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(_skipped)))) => continue, 254 | Poll::Ready(None) | Poll::Pending => break, 255 | } 256 | } 257 | 258 | // remove any invalidated bundles 259 | let mut expired_bundles = HashSet::new(); 260 | let mut invalidated = Pin::new(&mut this.invalidated); 261 | loop { 262 | match invalidated.as_mut().poll_next(cx) { 263 | Poll::Ready(Some(Ok(exp))) => { 264 | this.bundles.remove(&exp); 265 | expired_bundles.insert(exp); 266 | } 267 | Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(_skipped)))) => continue, 268 | Poll::Ready(None) | Poll::Pending => break, 269 | } 270 | } 271 | 272 | // remove all payloads that contain an expired bundle 273 | this.built_payloads 274 | .retain(|payload| payload.bundles.is_disjoint(&expired_bundles)); 275 | 276 | // check the build interval 277 | let interval_reached = this.interval.poll_tick(cx).is_ready(); 278 | 279 | // if there are any expired or new bundles, or the build interval was reached, then build a 280 | // new payload 281 | if !expired_bundles.is_empty() || num_incoming_bundles > 0 || interval_reached { 282 | // NOTE: here we greedily select bundles that do not "obviously conflict" with 283 | // previously selected bundles. you could do far more sophisticated things here. 284 | let mut bundles: Vec<(BundleId, BundleCompact)> = vec![]; 285 | for (id, bundle) in &this.bundles { 286 | if !bundles.iter().any(|(_, b)| b.conflicts(bundle)) { 287 | bundles.push((*id, bundle.clone())); 288 | } 289 | } 290 | 291 | let attributes = this.attributes.clone(); 292 | let parent = Arc::clone(&this.parent); 293 | let chain = Arc::clone(&this.chain); 294 | let client = Arc::clone(&this.client); 295 | let pool = Arc::clone(&this.pool); 296 | let pending = task::spawn_blocking(move || { 297 | // TODO: come back to this 298 | build(attributes, parent, chain, client, pool, bundles) 299 | }); 300 | 301 | this.pending_payloads.push_back(pending); 302 | } 303 | 304 | // poll all pending payloads 305 | let payload_id = this.attributes.inner.payload_id(); 306 | while let Some(mut pending) = this.pending_payloads.pop_front() { 307 | match pending.poll_unpin(cx) { 308 | Poll::Ready(payload) => { 309 | match payload { 310 | Ok(Ok(payload)) => { 311 | // cache the built payload 312 | this.built_payloads.push(payload); 313 | } 314 | Ok(Err(err)) => { 315 | tracing::warn!(payload = %payload_id, "payload build failed {err}") 316 | } 317 | Err(err) => { 318 | tracing::error!(payload = %payload_id, "payload task failed to complete {err}") 319 | } 320 | } 321 | } 322 | Poll::Pending => this.pending_payloads.push_back(pending), 323 | } 324 | } 325 | 326 | // keep payloads sorted 327 | this.built_payloads 328 | .sort_by_key(|payload| payload.inner.fees()); 329 | 330 | Poll::Pending 331 | } 332 | } 333 | 334 | pub struct PayloadTask { 335 | best_payload: Option>, 336 | empty_payload: Option>>, 337 | } 338 | 339 | impl Future for PayloadTask { 340 | type Output = Result, PayloadBuilderError>; 341 | 342 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 343 | let this = self.get_mut(); 344 | 345 | if let Some(best) = this.best_payload.take() { 346 | return Poll::Ready(Ok(best)); 347 | } 348 | 349 | let mut empty = this.empty_payload.take().unwrap(); 350 | match empty.poll_unpin(cx) { 351 | Poll::Ready(Ok(res)) => Poll::Ready(res.map(|p| p.inner)), 352 | Poll::Ready(Err(err)) => Poll::Ready(Err(err.into())), 353 | Poll::Pending => { 354 | this.empty_payload = Some(empty); 355 | Poll::Pending 356 | } 357 | } 358 | } 359 | } 360 | 361 | impl PayloadJob for Job 362 | where 363 | Client: StateProviderFactory + Send + Sync + 'static, 364 | Pool: TransactionPool + 'static, 365 | { 366 | type ResolvePayloadFuture = PayloadTask; 367 | 368 | fn best_payload(&self) -> Result, PayloadBuilderError> { 369 | if let Some(best) = self.built_payloads.first() { 370 | return Ok(Arc::clone(&best.inner)); 371 | } 372 | 373 | let empty = build( 374 | self.attributes.clone(), 375 | Arc::clone(&self.parent), 376 | Arc::clone(&self.chain), 377 | Arc::clone(&self.client), 378 | Arc::new(NoopTransactionPool::default()), 379 | None, 380 | )?; 381 | Ok(empty.inner) 382 | } 383 | 384 | fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { 385 | let best_payload = self.built_payloads.first().map(|p| p.inner.clone()); 386 | 387 | // if there is no best payload, then build an empty payload 388 | let empty_payload = if best_payload.is_none() { 389 | let (tx, rx) = oneshot::channel(); 390 | let attributes = self.attributes.clone(); 391 | let parent = Arc::clone(&self.parent); 392 | let chain = Arc::clone(&self.chain); 393 | let client = Arc::clone(&self.client); 394 | let pool = Arc::new(NoopTransactionPool::default()); 395 | task::spawn_blocking(move || { 396 | let payload = build(attributes, parent, chain, client, pool, None); 397 | let _ = tx.send(payload); 398 | }); 399 | 400 | Some(rx) 401 | } else { 402 | None 403 | }; 404 | 405 | ( 406 | PayloadTask { 407 | best_payload, 408 | empty_payload, 409 | }, 410 | KeepPayloadJobAlive::No, 411 | ) 412 | } 413 | } 414 | 415 | #[derive(Clone, Debug)] 416 | pub struct BuilderConfig { 417 | pub deadline: Duration, 418 | pub interval: Duration, 419 | pub extra_data: Bytes, 420 | pub wallet: LocalWallet, 421 | } 422 | 423 | pub struct Builder { 424 | chain: Arc, 425 | deadline: Duration, 426 | interval: Duration, 427 | wallet: LocalWallet, 428 | extra_data: Bytes, 429 | client: Arc, 430 | pool: Arc, 431 | bundle_pool: Arc>, 432 | incoming: broadcast::Sender<(BundleId, BlockNumber, BundleCompact)>, 433 | invalidated: broadcast::Sender, 434 | jobs: mpsc::UnboundedSender<(PayloadBuilderAttributes, Cancel)>, 435 | } 436 | 437 | impl Builder 438 | where 439 | Client: StateProviderFactory + Unpin, 440 | Pool: TransactionPool + Unpin, 441 | { 442 | pub fn new( 443 | config: BuilderConfig, 444 | chain: ChainSpec, 445 | client: Client, 446 | pool: Pool, 447 | jobs: mpsc::UnboundedSender<(PayloadBuilderAttributes, Cancel)>, 448 | ) -> Self { 449 | let chain = Arc::new(chain); 450 | let client = Arc::new(client); 451 | let pool = Arc::new(pool); 452 | let (incoming, _) = broadcast::channel(256); 453 | let (invalidated, _) = broadcast::channel(256); 454 | 455 | let bundle_pool = BundlePool::default(); 456 | let bundle_pool = Arc::new(Mutex::new(bundle_pool)); 457 | 458 | Self { 459 | chain, 460 | deadline: config.deadline, 461 | interval: config.interval, 462 | wallet: config.wallet, 463 | extra_data: config.extra_data, 464 | client, 465 | jobs, 466 | pool, 467 | bundle_pool, 468 | incoming, 469 | invalidated, 470 | } 471 | } 472 | 473 | /// spawns the builder maintenance task 474 | pub fn start( 475 | &self, 476 | mut bundle_flow: mpsc::UnboundedReceiver, 477 | mut state_events: mpsc::UnboundedReceiver, 478 | ) { 479 | let bundle_pool = Arc::clone(&self.bundle_pool); 480 | let invalidated = self.invalidated.clone(); 481 | let incoming = self.incoming.clone(); 482 | 483 | tokio::spawn(async move { 484 | // bundle refresh interval 485 | let mut interval = tokio::time::interval(Duration::from_secs(1)); 486 | 487 | // track bundle expirations 488 | let mut bundle_expirations = DelayQueue::new(); 489 | 490 | loop { 491 | tokio::select! { 492 | _ = interval.tick() => { 493 | bundle_pool.lock().unwrap().tick(SystemTime::now()); 494 | } 495 | Some(bundle) = bundle_flow.recv() => { 496 | let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); 497 | 498 | // if the bundle already expired, then ignore it 499 | if *bundle.eligibility.end() <= now { 500 | continue; 501 | } 502 | 503 | // track the timeout of the bundle 504 | let timeout = Duration::from_secs(bundle.eligibility.end() - now); 505 | bundle_expirations.insert(bundle.id, timeout); 506 | 507 | bundle_pool.lock().unwrap().0.insert(bundle.clone()); 508 | 509 | // notify jobs about new bundle 510 | // 511 | // NOTE: you could create metadata (e.g. access list) about the bundle here 512 | // or within each job 513 | let Bundle { id, txs, block_num, .. } = bundle; 514 | let _ = incoming.send((id, block_num, BundleCompact(txs))); 515 | } 516 | Some(expired) = bundle_expirations.next() => { 517 | // notify jobs about expired bundle 518 | let _ = invalidated.send(expired.into_inner()); 519 | } 520 | Some(event) = state_events.recv() => { 521 | // maintain the bundle pool based on state events. notify jobs about 522 | // invalidated bundles. 523 | let removed = bundle_pool.lock().unwrap().maintain(event); 524 | for bundle in removed { 525 | let _ = invalidated.send(bundle); 526 | } 527 | } 528 | } 529 | } 530 | }); 531 | } 532 | } 533 | 534 | impl PayloadJobGenerator for Builder 535 | where 536 | Client: StateProviderFactory + BlockReaderIdExt + 'static, 537 | Pool: TransactionPool + 'static, 538 | { 539 | type Job = Job; 540 | 541 | fn new_payload_job( 542 | &self, 543 | attributes: PayloadBuilderAttributes, 544 | ) -> Result { 545 | // retrieve the latest block 546 | let latest = self 547 | .client 548 | .latest_header()? 549 | .ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent))?; 550 | 551 | // only build on the latest block 552 | if attributes.parent != latest.hash() { 553 | return Err(PayloadBuilderError::Internal(RethError::Custom( 554 | "must build on latest block".into(), 555 | ))); 556 | } 557 | 558 | let attributes = PayloadAttributes { 559 | inner: attributes, 560 | extra_data: self.extra_data.clone(), 561 | wallet: self.wallet.clone(), 562 | }; 563 | 564 | let parent = Arc::new(latest.header.seal_slow()); 565 | let config = JobConfig { 566 | attributes, 567 | chain: Arc::clone(&self.chain), 568 | parent, 569 | deadline: self.deadline, 570 | interval: self.interval, 571 | }; 572 | 573 | let cancel = Cancel::default(); 574 | 575 | // collect eligible bundles from the pool 576 | // 577 | // NOTE: it may make more sense to use `attributes.timestamp` here in call to `eligible` 578 | let bundles = self 579 | .bundle_pool 580 | .lock() 581 | .unwrap() 582 | .eligible(config.parent.number, SystemTime::now()); 583 | 584 | let incoming = BroadcastStream::new(self.incoming.subscribe()).fuse(); 585 | let invalidated = BroadcastStream::new(self.invalidated.subscribe()).fuse(); 586 | 587 | // alert about new payload job 588 | let _ = self 589 | .jobs 590 | .send((config.attributes.inner.clone(), cancel.clone())); 591 | 592 | Ok(Job::new( 593 | config, 594 | cancel, 595 | Arc::clone(&self.client), 596 | Arc::clone(&self.pool), 597 | bundles, 598 | incoming, 599 | invalidated, 600 | )) 601 | } 602 | } 603 | 604 | fn build( 605 | attributes: PayloadAttributes, 606 | parent: Arc, 607 | chain: Arc, 608 | client: Arc, 609 | pool: P, 610 | bundles: I, 611 | ) -> Result 612 | where 613 | Client: StateProviderFactory, 614 | P: TransactionPool, 615 | I: IntoIterator, 616 | { 617 | let state = client.state_by_block_hash(parent.hash)?; 618 | let state = State::new(state); 619 | let unpackaged_payload = build_on_state(attributes, parent, chain, state, pool, bundles)?; 620 | unpackaged_payload.package() 621 | } 622 | 623 | fn build_on_state( 624 | attributes: PayloadAttributes, 625 | parent: Arc, 626 | chain: Arc, 627 | state: State, 628 | pool: P, 629 | bundles: I, 630 | ) -> Result, PayloadBuilderError> 631 | where 632 | S: StateProvider, 633 | P: TransactionPool, 634 | I: IntoIterator, 635 | { 636 | let state = Arc::new(state); 637 | let mut db = CacheDB::new(Arc::clone(&state)); 638 | 639 | let mut post_state = PostState::default(); 640 | 641 | let (cfg_env, mut block_env) = attributes.inner.cfg_and_block_env(&chain, &parent); 642 | 643 | // mark the builder as the coinbase in the block env 644 | block_env.coinbase = attributes.wallet.address().into(); 645 | 646 | let block_num = block_env.number.to::(); 647 | let base_fee = block_env.basefee.to::(); 648 | let block_gas_limit: u64 = block_env.gas_limit.try_into().unwrap_or(u64::MAX); 649 | 650 | // reserve gas for end-of-block proposer payment 651 | const PROPOSER_PAYMENT_GAS_ALLOWANCE: u64 = 21000; 652 | let execution_gas_limit = block_gas_limit - PROPOSER_PAYMENT_GAS_ALLOWANCE; 653 | 654 | let mut coinbase_payment = U256::ZERO; 655 | let mut cumulative_gas_used = 0; 656 | let mut txs = Vec::new(); 657 | let mut bundle_ids = HashSet::new(); 658 | 659 | // execute bundles 660 | for (id, bundle) in bundles { 661 | // check gas for entire bundle 662 | let bundle_gas_limit: u64 = bundle.0.iter().map(|tx| tx.gas_limit()).sum(); 663 | if cumulative_gas_used + bundle_gas_limit > execution_gas_limit { 664 | continue; 665 | } 666 | 667 | // clone the database, so that if the execution fails, then we can keep the state of the 668 | // database as if the execution was never attempted. currently, there is no way to roll 669 | // back the database state if the execution fails part-way through. 670 | // 671 | // NOTE: we will be able to refactor to do rollbacks after the following is merged: 672 | // https://github.com/paradigmxyz/reth/pull/3512 673 | let mut execution_db = db.clone(); 674 | let mut execution_post_state = post_state.clone(); 675 | 676 | let mut bundle = bundle.0; 677 | let execution = execute( 678 | &mut execution_db, 679 | &mut execution_post_state, 680 | &cfg_env, 681 | &block_env, 682 | cumulative_gas_used, 683 | bundle.clone(), 684 | ); 685 | match execution { 686 | Ok(execution) => { 687 | coinbase_payment += execution.coinbase_payment; 688 | cumulative_gas_used = execution.cumulative_gas_used; 689 | txs.append(&mut bundle); 690 | 691 | db = execution_db; 692 | post_state = execution_post_state; 693 | } 694 | Err(_) => continue, 695 | } 696 | 697 | // add bundle to set of executed bundles 698 | bundle_ids.insert(id); 699 | } 700 | 701 | // execute transactions from mempool 702 | // 703 | // TODO: support more sophisticated mixtures of bundles and transactions 704 | let mut mempool_txs = pool.best_transactions_with_base_fee(base_fee); 705 | while let Some(tx) = mempool_txs.next() { 706 | // if we don't have sufficient gas for the transaction, then we skip past it. we also mark 707 | // the transaction invalid, which will remove any subsequent transactions that depend on it 708 | // from the iterator. 709 | if cumulative_gas_used + tx.gas_limit() > execution_gas_limit { 710 | mempool_txs.mark_invalid(&tx); 711 | continue; 712 | } 713 | 714 | let recovered_tx = tx.to_recovered_transaction(); 715 | 716 | // NOTE: we do not need to clone the DB here as we do for bundle execution 717 | let execution = execute( 718 | &mut db, 719 | &mut post_state, 720 | &cfg_env, 721 | &block_env, 722 | cumulative_gas_used, 723 | Some(recovered_tx.clone()), 724 | ); 725 | match execution { 726 | Ok(execution) => { 727 | coinbase_payment += execution.coinbase_payment; 728 | cumulative_gas_used = execution.cumulative_gas_used; 729 | txs.push(recovered_tx); 730 | } 731 | // if we have any transaction error other than the nonce being too low, then we mark 732 | // the transaction invalid 733 | Err(EVMError::Transaction(err)) => { 734 | if !matches!(err, InvalidTransaction::NonceTooLow { .. }) { 735 | mempool_txs.mark_invalid(&tx); 736 | } 737 | } 738 | // treat any other errors as fatal 739 | Err(err) => return Err(PayloadBuilderError::EvmExecutionError(err)), 740 | } 741 | } 742 | 743 | // construct payment to proposer fee recipient. 744 | // 745 | // NOTE: we give the entire coinbase payment to the proposer, except for the gas that we need 746 | // to execute the transaction. if the coinbase payment cannot cover the gas cost to pay the 747 | // proposer, then we do not make any payment. 748 | let payment_tx_gas_cost = block_env.basefee * U256::from(PROPOSER_PAYMENT_GAS_ALLOWANCE); 749 | let proposer_payment = coinbase_payment.saturating_sub(payment_tx_gas_cost); 750 | if proposer_payment > U256::ZERO { 751 | let builder_acct = db 752 | .basic(block_env.coinbase)? 753 | .expect("builder account exists if coinbase payment non-zero"); 754 | let payment_tx = proposer_payment_tx( 755 | &attributes.wallet, 756 | builder_acct.nonce, 757 | base_fee, 758 | cfg_env.chain_id.to::(), 759 | &attributes.inner.suggested_fee_recipient, 760 | proposer_payment, 761 | ); 762 | 763 | // execute payment to proposer fee recipient 764 | // 765 | // if the payment transaction fails, then the entire payload build fails 766 | let execution = execute( 767 | &mut db, 768 | &mut post_state, 769 | &cfg_env, 770 | &block_env, 771 | cumulative_gas_used, 772 | Some(payment_tx.clone()), 773 | ) 774 | .map_err(PayloadBuilderError::EvmExecutionError)?; 775 | cumulative_gas_used = execution.cumulative_gas_used; 776 | txs.push(payment_tx); 777 | } 778 | 779 | // NOTE: here we assume post-shanghai 780 | let balance_increments = post_block_withdrawals_balance_increments( 781 | &chain, 782 | attributes.inner.timestamp, 783 | &attributes.inner.withdrawals, 784 | ); 785 | for (address, increment) in balance_increments { 786 | increment_account_balance(&mut db, &mut post_state, block_num, address, increment)?; 787 | } 788 | 789 | Ok(UnpackagedPayload { 790 | attributes: attributes.inner, 791 | block_env, 792 | state, 793 | post_state, 794 | extra_data: attributes.extra_data, 795 | txs: txs.into_iter().map(|tx| tx.into_signed()).collect(), 796 | bundles: bundle_ids, 797 | cumulative_gas_used, 798 | proposer_payment, 799 | }) 800 | } 801 | 802 | #[derive(Clone, Debug)] 803 | struct Execution { 804 | #[allow(dead_code)] 805 | access_list: AccessList, 806 | cumulative_gas_used: u64, 807 | coinbase_payment: U256, 808 | } 809 | 810 | fn execute( 811 | db: &mut CacheDB>>, 812 | post_state: &mut PostState, 813 | cfg_env: &CfgEnv, 814 | block_env: &BlockEnv, 815 | mut cumulative_gas_used: u64, 816 | txs: I, 817 | ) -> Result> 818 | where 819 | S: StateProvider, 820 | I: IntoIterator, 821 | { 822 | let block_num = block_env.number.to::(); 823 | 824 | // NOTE: the `AccessListInspector` does not always include the `from` (i.e. sender) and `to` 825 | // (i.e. recipient) for a transaction, so we ensure that those values are included 826 | let mut access_list_base = vec![]; 827 | 828 | let mut inspector = AccessListInspector::default(); 829 | 830 | // determine the initial balance of the account at the coinbase address 831 | let coinbase_acct = db.basic(block_env.coinbase).map_err(EVMError::Database)?; 832 | let initial_coinbase_balance = coinbase_acct.map_or(U256::ZERO, |acct| acct.balance); 833 | 834 | for tx in txs { 835 | // construct EVM 836 | let tx_env = tx_env_with_recovered(&tx); 837 | let env = Env { 838 | cfg: cfg_env.clone(), 839 | block: block_env.clone(), 840 | tx: tx_env.clone(), 841 | }; 842 | let mut evm = EVM::with_env(env); 843 | evm.database(&mut *db); 844 | 845 | // execute transaction 846 | let ResultAndState { result, state } = evm.inspect(&mut inspector)?; 847 | 848 | // commit changes to DB and post state 849 | commit_state_changes(db, post_state, block_num, state, true); 850 | 851 | cumulative_gas_used += result.gas_used(); 852 | 853 | post_state.add_receipt( 854 | block_num, 855 | Receipt { 856 | tx_type: tx.tx_type(), 857 | success: result.is_success(), 858 | cumulative_gas_used, 859 | logs: result.logs().into_iter().map(into_reth_log).collect(), 860 | }, 861 | ); 862 | 863 | // add the `from` and `to` values for the transaction to the access list 864 | access_list_base.push(AccessListItem { 865 | address: tx.signer(), 866 | ..Default::default() 867 | }); 868 | if let Some(to) = tx.to() { 869 | access_list_base.push(AccessListItem { 870 | address: to, 871 | ..Default::default() 872 | }); 873 | } 874 | } 875 | 876 | // remove any precompiles from access list 877 | let mut access_list = inspector.into_access_list(); 878 | let precompiles = precompiles(cfg_env); 879 | access_list 880 | .0 881 | .retain(|item| !precompiles.contains(&item.address)); 882 | 883 | // ensure that all callers and recipients are included in the access list 884 | access_list.0.append(&mut access_list_base); 885 | 886 | let coinbase_payment = 887 | compute_coinbase_payment(&block_env.coinbase, initial_coinbase_balance, post_state); 888 | 889 | Ok(Execution { 890 | access_list, 891 | cumulative_gas_used, 892 | coinbase_payment, 893 | }) 894 | } 895 | 896 | /// Computes the payment to `coinbase` based on `initial_balance` and `post_state`. 897 | /// 898 | /// NOTE: If the ending balance is less than `initial_balance`, then we define the payment as zero. 899 | /// If the account has been deleted, then we define the payment as zero. If the account was not 900 | /// modified, then the payment will be zero. 901 | fn compute_coinbase_payment( 902 | coinbase: &B160, 903 | initial_balance: U256, 904 | post_state: &PostState, 905 | ) -> U256 { 906 | match post_state.account(coinbase) { 907 | Some(Some(acct)) => acct.balance.saturating_sub(initial_balance), 908 | Some(None) => U256::ZERO, 909 | None => U256::ZERO, 910 | } 911 | } 912 | 913 | /// Constructs a transfer transaction to pay `amount` to `proposer`. 914 | fn proposer_payment_tx( 915 | wallet: &LocalWallet, 916 | nonce: u64, 917 | base_fee: u64, 918 | chain_id: u64, 919 | proposer: &B160, 920 | amount: U256, 921 | ) -> TransactionSignedEcRecovered { 922 | let tx = Eip1559TransactionRequest::new() 923 | .from(wallet.address()) 924 | .to(NameOrAddress::Address(ethers::types::H160::from_slice( 925 | proposer.as_bytes(), 926 | ))) 927 | .gas(21000) 928 | .max_fee_per_gas(base_fee) 929 | .max_priority_fee_per_gas(0) 930 | .value(amount) 931 | .data(EthersBytes::default()) 932 | .access_list(EthersAccessList::default()) 933 | .nonce(nonce) 934 | .chain_id(chain_id); 935 | let tx = TypedTransaction::Eip1559(tx); 936 | let signature = wallet.sign_transaction_sync(&tx).expect("can sign tx"); 937 | let tx_encoded = tx.rlp_signed(&signature); 938 | let tx = TransactionSigned::decode_enveloped(Bytes::from(tx_encoded.as_ref())) 939 | .expect("can decode tx"); 940 | tx.into_ecrecovered().expect("can recover tx signer") 941 | } 942 | 943 | fn precompiles(cfg_env: &CfgEnv) -> &Precompiles { 944 | Precompiles::new(PrecompileSpecId::from_spec_id(cfg_env.spec_id)) 945 | } 946 | 947 | #[cfg(test)] 948 | mod tests { 949 | use super::*; 950 | 951 | use ethers::{ 952 | signers::{LocalWallet, Signer}, 953 | types::{Eip1559TransactionRequest, NameOrAddress, H160 as EthersAddress}, 954 | }; 955 | use reth_primitives::{Address, Bytes, TxType}; 956 | use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; 957 | use reth_revm::revm::primitives::{specification::SpecId, B256}; 958 | 959 | const TRANSFER_GAS_LIMIT: u64 = 21000; 960 | 961 | fn env(coinbase: Address, basefee: U256) -> (CfgEnv, BlockEnv) { 962 | let cfg_env = CfgEnv { 963 | chain_id: U256::from(1), 964 | spec_id: SpecId::CANCUN, 965 | ..Default::default() 966 | }; 967 | let block_env = BlockEnv { 968 | number: U256::ZERO, 969 | coinbase, 970 | timestamp: U256::ZERO, 971 | difficulty: U256::ZERO, 972 | prevrandao: Some(B256::random()), 973 | basefee, 974 | gas_limit: U256::from(15000000), 975 | }; 976 | 977 | (cfg_env, block_env) 978 | } 979 | 980 | fn tx( 981 | from: &LocalWallet, 982 | to: EthersAddress, 983 | gas_limit: u64, 984 | max_fee_per_gas: u64, 985 | max_priority_fee_per_gas: u64, 986 | value: u64, 987 | nonce: u64, 988 | ) -> TransactionSignedEcRecovered { 989 | let tx = Eip1559TransactionRequest::new() 990 | .from(from.address()) 991 | .to(NameOrAddress::Address(to)) 992 | .gas(gas_limit) 993 | .max_fee_per_gas(max_fee_per_gas) 994 | .max_priority_fee_per_gas(max_priority_fee_per_gas) 995 | .value(value) 996 | .data(ethers::types::Bytes::default()) 997 | .access_list(ethers::types::transaction::eip2930::AccessList::default()) 998 | .nonce(nonce) 999 | .chain_id(from.chain_id()); 1000 | let tx = TypedTransaction::Eip1559(tx); 1001 | let signature = from.sign_transaction_sync(&tx).expect("can sign tx"); 1002 | let tx_encoded = tx.rlp_signed(&signature); 1003 | let tx = TransactionSigned::decode_enveloped(Bytes::from(tx_encoded.as_ref())) 1004 | .expect("can decode tx"); 1005 | tx.into_ecrecovered().expect("can recover tx signer") 1006 | } 1007 | 1008 | #[test] 1009 | fn execute_transfer() { 1010 | let state = MockEthProvider::default(); 1011 | 1012 | // add sender account to state 1013 | let sender_wallet = LocalWallet::new(&mut rand::thread_rng()); 1014 | let initial_sender_balance = 10000000; 1015 | let sender_nonce = 0; 1016 | let sender_account = ExtendedAccount::new(sender_nonce, U256::from(initial_sender_balance)); 1017 | state.add_account(sender_wallet.address().into(), sender_account); 1018 | 1019 | let state = State::new(state); 1020 | let mut db = CacheDB::new(Arc::new(state)); 1021 | let mut post_state = PostState::default(); 1022 | 1023 | // builder will be the coinbase (i.e. beneficiary) 1024 | let builder_wallet = LocalWallet::new(&mut rand::thread_rng()); 1025 | 1026 | let (cfg_env, block_env) = env(builder_wallet.address().into(), U256::ZERO); 1027 | 1028 | let receiver_wallet = LocalWallet::new(&mut rand::thread_rng()); 1029 | let transfer_amount = 100; 1030 | let max_priority_fee = 100; 1031 | let max_fee = block_env.basefee.to::() + max_priority_fee; 1032 | 1033 | // construct the transfer transaction for execution 1034 | let transfer_tx = tx( 1035 | &sender_wallet, 1036 | receiver_wallet.address(), 1037 | TRANSFER_GAS_LIMIT, 1038 | max_fee, 1039 | max_priority_fee, 1040 | transfer_amount, 1041 | sender_nonce, 1042 | ); 1043 | 1044 | // execute the transfer transaction 1045 | let execution = execute( 1046 | &mut db, 1047 | &mut post_state, 1048 | &cfg_env, 1049 | &block_env, 1050 | 0, 1051 | Some(transfer_tx), 1052 | ) 1053 | .expect("execution doesn't fail"); 1054 | let Execution { 1055 | access_list, 1056 | cumulative_gas_used, 1057 | coinbase_payment, 1058 | } = execution; 1059 | 1060 | // expected gas usage is the transfer transaction's gas limit 1061 | let expected_cumulative_gas_used = TRANSFER_GAS_LIMIT; 1062 | 1063 | // check post state contains transaction receipt 1064 | let receipt = post_state 1065 | .receipts(block_env.number.to::()) 1066 | .first() 1067 | .expect("post state contains receipt"); 1068 | assert!(receipt.success); 1069 | assert_eq!(receipt.tx_type, TxType::EIP1559); 1070 | assert_eq!(receipt.cumulative_gas_used, expected_cumulative_gas_used); 1071 | 1072 | // check post-execution sender balance 1073 | let sender_account = post_state 1074 | .account(&Address::from(sender_wallet.address())) 1075 | .expect("sender account touched") 1076 | .expect("sender account not destroyed"); 1077 | let expected_sender_balance = 1078 | initial_sender_balance - transfer_amount - (max_fee * receipt.cumulative_gas_used); 1079 | assert_eq!(sender_account.balance, U256::from(expected_sender_balance)); 1080 | 1081 | // check post-execution receiver balance 1082 | let receiver_account = post_state 1083 | .account(&Address::from(receiver_wallet.address())) 1084 | .expect("receiver account touched") 1085 | .expect("receiver account not destroyed"); 1086 | assert_eq!(receiver_account.balance, U256::from(transfer_amount)); 1087 | 1088 | // check access list 1089 | let access_list_addrs: HashSet<_> = 1090 | access_list.0.into_iter().map(|item| item.address).collect(); 1091 | assert!(access_list_addrs.contains(&Address::from(sender_wallet.address()))); 1092 | assert!(access_list_addrs.contains(&Address::from(receiver_wallet.address()))); 1093 | 1094 | // check gas usage 1095 | assert_eq!(cumulative_gas_used, expected_cumulative_gas_used); 1096 | 1097 | // check coinbase payment 1098 | let expected_coinbase_payment = cumulative_gas_used * max_priority_fee; 1099 | let builder_account = post_state 1100 | .account(&Address::from(builder_wallet.address())) 1101 | .expect("builder account touched") 1102 | .expect("builder account not destroyed"); 1103 | assert_eq!( 1104 | builder_account.balance, 1105 | U256::from(expected_coinbase_payment) 1106 | ); 1107 | assert_eq!(coinbase_payment, U256::from(expected_coinbase_payment)); 1108 | } 1109 | 1110 | #[test] 1111 | fn execute_coinbase_transfer() { 1112 | let state = MockEthProvider::default(); 1113 | 1114 | // populate coinbase transfer smart contract in the DB 1115 | // 1116 | // h/t lightclient: https://github.com/lightclient/sendall 1117 | let contract_addr = Address::random(); 1118 | let bytecode = vec![0x5f, 0x5f, 0x5f, 0x5f, 0x47, 0x41, 0x5a, 0xf1, 0x00]; 1119 | let contract_acct = ExtendedAccount::new(0, U256::ZERO).with_bytecode(bytecode.into()); 1120 | state.add_account(contract_addr, contract_acct); 1121 | 1122 | // add caller account to state 1123 | let sender_wallet = LocalWallet::new(&mut rand::thread_rng()); 1124 | let initial_sender_balance = 10000000; 1125 | let sender_nonce = 0; 1126 | let sender_account = ExtendedAccount::new(sender_nonce, U256::from(initial_sender_balance)); 1127 | state.add_account(sender_wallet.address().into(), sender_account); 1128 | 1129 | let state = State::new(state); 1130 | let mut db = CacheDB::new(Arc::new(state)); 1131 | let mut post_state = PostState::default(); 1132 | 1133 | // builder will be the coinbase (i.e. beneficiary) 1134 | let builder_wallet = LocalWallet::new(&mut rand::thread_rng()); 1135 | 1136 | let (cfg_env, block_env) = env(builder_wallet.address().into(), U256::ZERO); 1137 | 1138 | let tx_value = 100; 1139 | let tx_gas_limit = 84000; 1140 | let max_priority_fee = 100; 1141 | let max_fee = block_env.basefee.to::() + max_priority_fee; 1142 | 1143 | // construct the contract call transaction for execution 1144 | let call_tx = tx( 1145 | &sender_wallet, 1146 | EthersAddress(*contract_addr), 1147 | tx_gas_limit, 1148 | max_fee, 1149 | max_priority_fee, 1150 | tx_value, 1151 | sender_nonce, 1152 | ); 1153 | 1154 | let execution = execute( 1155 | &mut db, 1156 | &mut post_state, 1157 | &cfg_env, 1158 | &block_env, 1159 | 0, 1160 | Some(call_tx), 1161 | ) 1162 | .expect("execution doesn't fail"); 1163 | let Execution { 1164 | access_list, 1165 | coinbase_payment, 1166 | cumulative_gas_used, 1167 | } = execution; 1168 | 1169 | // check access list 1170 | let access_list_addrs: HashSet<_> = 1171 | access_list.0.into_iter().map(|item| item.address).collect(); 1172 | assert!(access_list_addrs.contains(&Address::from(sender_wallet.address()))); 1173 | assert!(access_list_addrs.contains(&Address::from(contract_addr))); 1174 | 1175 | // check coinbase payment 1176 | let expected_coinbase_payment = tx_value + (cumulative_gas_used * max_priority_fee); 1177 | let builder_account = post_state 1178 | .account(&Address::from(builder_wallet.address())) 1179 | .expect("builder account touched") 1180 | .expect("builder account not destroyed"); 1181 | assert_eq!( 1182 | builder_account.balance, 1183 | U256::from(expected_coinbase_payment) 1184 | ); 1185 | assert_eq!(coinbase_payment, U256::from(expected_coinbase_payment)); 1186 | } 1187 | } 1188 | -------------------------------------------------------------------------------- /src/bundle/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::ops::RangeInclusive; 3 | 4 | use reth_primitives::{BlockNumber, TransactionSignedEcRecovered}; 5 | 6 | pub mod pool; 7 | 8 | #[derive(Clone, Debug, Eq, PartialEq, Hash)] 9 | pub(crate) struct BundleCompact(pub Vec); 10 | 11 | impl BundleCompact { 12 | /// returns whether `self` conflicts with `other` in the sense that both cannot be executed 13 | pub fn conflicts(&self, other: &Self) -> bool { 14 | let hashes = self 15 | .0 16 | .iter() 17 | .map(|tx| tx.hash_ref()) 18 | .collect::>(); 19 | let other_hashes = other 20 | .0 21 | .iter() 22 | .map(|tx| tx.hash_ref()) 23 | .collect::>(); 24 | !hashes.is_disjoint(&other_hashes) 25 | } 26 | } 27 | 28 | pub type BundleId = u64; 29 | 30 | #[derive(Clone, Debug, Eq, PartialEq, Hash)] 31 | pub struct Bundle { 32 | pub id: BundleId, 33 | pub txs: Vec, 34 | pub block_num: BlockNumber, 35 | pub eligibility: RangeInclusive, 36 | } 37 | -------------------------------------------------------------------------------- /src/bundle/pool.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::time::{SystemTime, UNIX_EPOCH}; 3 | 4 | use super::{Bundle, BundleId}; 5 | 6 | use reth_primitives::BlockNumber; 7 | use reth_provider::CanonStateNotification; 8 | 9 | #[derive(Default)] 10 | pub struct BundlePool(pub(crate) HashSet); 11 | 12 | impl BundlePool { 13 | /// returns all bundles eligible w.r.t. time `now` and canonical chain tip `block` 14 | pub fn eligible(&self, block: BlockNumber, now: SystemTime) -> Vec { 15 | let now = now.duration_since(UNIX_EPOCH).unwrap().as_secs(); 16 | self.0 17 | .iter() 18 | .filter(|bundle| bundle.eligibility.contains(&now) && bundle.block_num == block) 19 | .cloned() 20 | .collect() 21 | } 22 | 23 | /// removes all bundles whose eligibility expires w.r.t. time `now` 24 | pub fn tick(&mut self, now: SystemTime) { 25 | let now = now.duration_since(UNIX_EPOCH).unwrap().as_secs(); 26 | self.0.retain(|bundle| *bundle.eligibility.end() >= now); 27 | } 28 | 29 | /// maintains the pool based on updates to the canonical state. 30 | /// 31 | /// returns the IDs of the bundles removed from the pool. 32 | pub fn maintain(&mut self, _event: CanonStateNotification) -> Vec { 33 | // remove all bundles 34 | self.0.drain().map(|bundle| bundle.id).collect() 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod builder; 2 | pub mod bundle; 3 | pub mod rpc; 4 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod builder; 2 | mod bundle; 3 | mod rpc; 4 | 5 | use std::collections::HashSet; 6 | use std::ops::Deref; 7 | use std::str::FromStr; 8 | use std::sync::Arc; 9 | use std::time::{Duration, Instant}; 10 | 11 | use beacon_api_client::mainnet::Client as BeaconClient; 12 | use clap::Parser; 13 | use ethereum_consensus::{ 14 | capella::mainnet::ExecutionPayload, 15 | clock::{self, Clock, SystemTimeProvider}, 16 | configs::mainnet::SECONDS_PER_SLOT, 17 | crypto::SecretKey, 18 | phase0::mainnet::SLOTS_PER_EPOCH, 19 | primitives::{BlsPublicKey, Bytes32, ExecutionAddress, Slot}, 20 | ssz::{ByteList, ByteVector}, 21 | state_transition::Context, 22 | }; 23 | use ethers::signers::{LocalWallet, Signer}; 24 | use futures_util::StreamExt; 25 | use mev_rs::{ 26 | blinded_block_relayer::{BlindedBlockRelayer, Client as RelayClient}, 27 | signing::sign_builder_message, 28 | types::{BidTrace, SignedBidSubmission}, 29 | ProposerScheduler, ValidatorRegistry, 30 | }; 31 | use reth::{ 32 | cli::config::RethRpcConfig, 33 | cli::{ 34 | config::PayloadBuilderConfig, 35 | ext::{RethCliExt, RethNodeCommandConfig}, 36 | Cli, 37 | }, 38 | network::{NetworkInfo, Peers}, 39 | rpc::builder::{RethModuleRegistry, TransportRpcModules}, 40 | tasks::TaskSpawner, 41 | }; 42 | use reth_payload_builder::{ 43 | BuiltPayload, PayloadBuilderAttributes, PayloadBuilderHandle, PayloadBuilderService, 44 | }; 45 | use reth_primitives::{Bloom, Bytes, Chain, ChainSpec, SealedBlock, H160, H256, U256}; 46 | use reth_provider::{ 47 | BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, 48 | StateProviderFactory, 49 | }; 50 | use reth_rpc_types::engine::PayloadAttributes; 51 | use reth_transaction_pool::TransactionPool; 52 | use ssz_rs::prelude::*; 53 | use tokio::{ 54 | sync::mpsc, 55 | time::{interval, interval_at}, 56 | }; 57 | use tokio_util::time::DelayQueue; 58 | use url::Url; 59 | 60 | use builder::{Builder, BuilderConfig}; 61 | use rpc::{EthExt, EthExtApiServer}; 62 | 63 | #[derive(Debug, Clone, clap::Args)] 64 | struct EvaRethNodeCommandExt { 65 | /// hex-encoded secret key corresponding to builder's wallet 66 | #[clap(long, required(true))] 67 | pub wallet_sk: String, 68 | /// hex-encoded secret key corresponding to builder's mev-boost BLS public key 69 | #[clap(long, required(true))] 70 | pub boost_bls_sk: String, 71 | /// URL endpoint of beacon API 72 | #[clap(long, required(true))] 73 | pub beacon_endpoint: Url, 74 | /// URL endpoint of mev-boost relay 75 | #[clap(long, required(true))] 76 | pub relay_endpoint: Url, 77 | } 78 | 79 | impl RethNodeCommandConfig for EvaRethNodeCommandExt { 80 | fn extend_rpc_modules( 81 | &mut self, 82 | _config: &Conf, 83 | registry: &mut RethModuleRegistry, 84 | modules: &mut TransportRpcModules<()>, 85 | ) -> eyre::Result<()> 86 | where 87 | Conf: RethRpcConfig, 88 | Provider: BlockReaderIdExt 89 | + StateProviderFactory 90 | + EvmEnvProvider 91 | + ChainSpecProvider 92 | + ChangeSetReader 93 | + Clone 94 | + Unpin 95 | + 'static, 96 | Pool: TransactionPool + Clone + 'static, 97 | Network: NetworkInfo + Peers + Clone + 'static, 98 | Tasks: TaskSpawner + Clone + 'static, 99 | Events: CanonStateSubscriptions + Clone + 'static, 100 | { 101 | let ext = EthExt::new(registry.pool().clone()); 102 | modules.merge_configured(ext.into_rpc())?; 103 | 104 | Ok(()) 105 | } 106 | 107 | fn spawn_payload_builder_service( 108 | &mut self, 109 | conf: &Conf, 110 | provider: Provider, 111 | pool: Pool, 112 | executor: Tasks, 113 | chain_spec: Arc, 114 | ) -> eyre::Result 115 | where 116 | Conf: PayloadBuilderConfig, 117 | Provider: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, 118 | Pool: TransactionPool + Unpin + 'static, 119 | Tasks: TaskSpawner + Clone + Unpin + 'static, 120 | { 121 | // beacon client 122 | tracing::info!("EVA's beacon API endpoint {}", self.beacon_endpoint); 123 | let beacon_client = Arc::new(BeaconClient::new(self.beacon_endpoint.clone())); 124 | 125 | // relay client 126 | tracing::info!("EVA's relay API endpoint {}", self.relay_endpoint); 127 | let relay_client = BeaconClient::new(self.relay_endpoint.clone()); 128 | let relay_client = Arc::new(RelayClient::new(relay_client)); 129 | 130 | // builder extra data 131 | let extra_data = Bytes::from(conf.extradata().as_bytes()); 132 | 133 | // builder wallet 134 | let wallet = LocalWallet::from_str(&self.wallet_sk).expect("wallet secret key valid"); 135 | 136 | // builder BLS key 137 | let bls_sk = SecretKey::try_from(self.boost_bls_sk.clone()).expect("BLS secret key valid"); 138 | let bls_sk = Arc::new(bls_sk); 139 | let bls_pk = Arc::new(bls_sk.public_key()); 140 | 141 | // chain info 142 | let (context, clock) = context_and_clock(&chain_spec).expect("recognized chain spec"); 143 | let context = Arc::new(context); 144 | 145 | tracing::info!( 146 | "EVA booting up...\n\textra data {}\n\texecution address {}\n\tBLS public key {}", 147 | extra_data, 148 | wallet.address(), 149 | bls_pk, 150 | ); 151 | 152 | // TODO: spawn server that will listen for bundles and channel them through the sender 153 | let (_, bundles) = mpsc::unbounded_channel(); 154 | 155 | // TODO: maybe we don't need events here. cancellations? 156 | let (_, events) = mpsc::unbounded_channel(); 157 | 158 | let (jobs_tx, mut jobs_rx) = mpsc::unbounded_channel(); 159 | 160 | // construct and start the builder 161 | tracing::info!("spawning builder"); 162 | let build_deadline = Duration::from_secs(SECONDS_PER_SLOT); 163 | let build_interval = Duration::from_millis(500); 164 | let config = BuilderConfig { 165 | extra_data, 166 | wallet, 167 | deadline: build_deadline, 168 | interval: build_interval, 169 | }; 170 | let builder = Builder::new(config, chain_spec.deref().clone(), provider, pool, jobs_tx); 171 | builder.start(bundles, events); 172 | 173 | // spawn payload builder service to drive payload build jobs forward 174 | tracing::info!("spawning payload builder service"); 175 | let (payload_service, payload_builder) = PayloadBuilderService::new(builder); 176 | executor.spawn_critical("payload builder service", Box::pin(payload_service)); 177 | 178 | // spawn task to participate in mev-boost auction 179 | tracing::info!("spawning mev-boost auction"); 180 | let other_payload_builder = payload_builder.clone(); 181 | let other_executor = executor.clone(); 182 | executor.spawn_critical("mev-boost auction", Box::pin(async move { 183 | // construct types to manage proposer preferences 184 | let validators = Arc::new(ValidatorRegistry::new(beacon_client.deref().clone())); 185 | let scheduler = Arc::new(ProposerScheduler::new(beacon_client.deref().clone())); 186 | 187 | // refresh the consensus and mev-boost info each epoch 188 | let seconds_per_epoch = SECONDS_PER_SLOT * SLOTS_PER_EPOCH; 189 | let validator_info_refresh_interval = Duration::from_secs(seconds_per_epoch); 190 | let mut validator_info_refresh_interval = interval(validator_info_refresh_interval); 191 | 192 | // keep track of the jobs that we initiate 193 | let mut initiated_jobs = HashSet::new(); 194 | let mut jobs_removal_queue = DelayQueue::new(); 195 | 196 | // keep track of the current slot 197 | let mut current_slot = clock.current_slot().expect("beyond genesis"); 198 | let slots = clock.stream_slots(); 199 | tokio::pin!(slots); 200 | 201 | loop { 202 | tokio::select! { 203 | Some(slot) = slots.next() => { 204 | current_slot = slot; 205 | } 206 | _ = validator_info_refresh_interval.tick() => { 207 | tracing::info!("refreshing consensus and mev-boost info..."); 208 | 209 | // load validators 210 | tracing::info!("loading validators..."); 211 | if let Err(err) = validators.load().await { 212 | tracing::error!("unable to load validators {err}"); 213 | } else { 214 | tracing::info!("successfully loaded validators"); 215 | } 216 | 217 | // retrieve proposer duties for the current epoch 218 | tracing::info!("retrieving proposer duties..."); 219 | let epoch = clock.current_epoch().expect("beyond genesis"); 220 | if let Err(err) = scheduler.fetch_duties(epoch).await { 221 | tracing::error!("unable to retrieve proposer duties {err}"); 222 | } else { 223 | tracing::info!("successfully retrieved proposer duties"); 224 | } 225 | 226 | // retrieve proposer schedule from relay and validate registrations 227 | tracing::info!("retrieving proposer schedule..."); 228 | match relay_client.get_proposal_schedule().await { 229 | Ok(schedule) => { 230 | tracing::info!("successfully retrieved proposer schedule, validating registrations..."); 231 | let timestamp = clock::get_current_unix_time_in_secs(); 232 | let mut registrations: Vec<_> = schedule.into_iter().map(|entry| entry.entry).collect(); 233 | if let Err(err) = validators.validate_registrations(&mut registrations, timestamp, &context) { 234 | tracing::error!("invalid registration {err}"); 235 | } else { 236 | tracing::info!("successfully validated registrations"); 237 | } 238 | } 239 | Err(err) => { 240 | tracing::error!("unable to load proposer schedule from relay {err}"); 241 | } 242 | } 243 | } 244 | Some((mut attrs, cancel)) = jobs_rx.recv() => { 245 | let mut payload_id = attrs.id; 246 | 247 | // if this is a job we initiated below, then move on 248 | if initiated_jobs.contains(&payload_id) { 249 | continue; 250 | } 251 | 252 | let payload_slot = clock.slot_at_time(attrs.timestamp).expect("beyond genesis"); 253 | 254 | // cancel the job, since we only want to keep jobs that we initiated 255 | tracing::debug!( 256 | slot = %payload_slot, 257 | payload = %payload_id, 258 | "cancelling non-mev-boost payload job" 259 | ); 260 | cancel.cancel(); 261 | 262 | // if the payload attributes are for a slot prior to the current slot, then 263 | // do not attempt to create a new job 264 | if payload_slot < current_slot { 265 | continue; 266 | } 267 | 268 | // look up the proposer preferences for the slot if available 269 | let proposer = match scheduler.get_proposer_for(payload_slot) { 270 | Ok(proposer) => proposer, 271 | Err(err) => { 272 | tracing::warn!( 273 | slot = %payload_slot, 274 | "unable to retrieve proposer for slot {err}, not bidding for slot" 275 | ); 276 | continue; 277 | } 278 | }; 279 | let prefs = match validators.get_preferences(&proposer) { 280 | Some(prefs) => { 281 | tracing::info!( 282 | slot = %payload_slot, 283 | proposer = %proposer, 284 | "found mev-boost registration for proposer {prefs:?}" 285 | ); 286 | prefs 287 | } 288 | None => { 289 | tracing::info!( 290 | slot = %payload_slot, 291 | proposer = %proposer, 292 | "mev-boost registration not found for proposer, not bidding for slot" 293 | ); 294 | continue; 295 | } 296 | }; 297 | 298 | // TODO: pass gas limit to builder somehow 299 | 300 | // if the fee recipient in the payload attributes does not match the one 301 | // in the registration, then update the attributes with the proper fee 302 | // recipient 303 | let boost_fee_recipient = H160::from_slice(prefs.fee_recipient.as_slice()); 304 | if attrs.suggested_fee_recipient != boost_fee_recipient { 305 | let attributes = PayloadAttributes { 306 | timestamp: attrs.timestamp.into(), 307 | prev_randao: attrs.prev_randao, 308 | suggested_fee_recipient: boost_fee_recipient, 309 | withdrawals: Some(attrs.withdrawals.clone()), 310 | parent_beacon_block_root: None, 311 | }; 312 | attrs = PayloadBuilderAttributes::new(attrs.parent, attributes); 313 | payload_id = attrs.payload_id(); 314 | } 315 | 316 | // if we already initiated a job with identical attributes, then move on 317 | if initiated_jobs.contains(&payload_id) { 318 | continue; 319 | } 320 | 321 | // initiate the new payload job 322 | if let Err(err) = other_payload_builder.new_payload(attrs).await { 323 | tracing::error!( 324 | slot = %payload_slot, 325 | "unable to initiate new payload job {err}, not bidding for slot" 326 | ); 327 | continue; 328 | } 329 | 330 | initiated_jobs.insert(payload_id); 331 | jobs_removal_queue.insert(payload_id, Duration::from_secs(60)); 332 | tracing::info!( 333 | slot = %payload_slot, 334 | proposer = %proposer, 335 | payload = %payload_id, 336 | "successfully initiated new payload job for mev-boost auction" 337 | ); 338 | 339 | // spawn a task to periodically poll the payload job and submit bids to the 340 | // mev-boost relay 341 | let proposer = Arc::new(proposer); 342 | let inner_payload_builder = other_payload_builder.clone(); 343 | let inner_relay_client = Arc::clone(&relay_client); 344 | let inner_bls_pk = Arc::clone(&bls_pk); 345 | let inner_bls_sk = Arc::clone(&bls_sk); 346 | let inner_context = Arc::clone(&context); 347 | other_executor.spawn(Box::pin(async move { 348 | // starting 500ms from now, poll the job every 500ms 349 | let payload_poll_interval = Duration::from_millis(500); 350 | let start = Instant::now() + payload_poll_interval; 351 | let mut payload_poll_interval = interval_at(start.into(), payload_poll_interval); 352 | 353 | // keep track of the highest bid we have sent to the mev-boost relay 354 | let mut highest_bid = U256::ZERO; 355 | 356 | // TODO: watch auction so that we can terminate early and so that we 357 | // can know whether we won or lost 358 | let start = Instant::now(); 359 | loop { 360 | // only poll the job for the duration of a slot 361 | if start.elapsed() > Duration::from_secs(SECONDS_PER_SLOT) { 362 | break; 363 | } 364 | 365 | payload_poll_interval.tick().await; 366 | 367 | // poll the job for the best available payload 368 | match inner_payload_builder.best_payload(payload_id).await { 369 | Some(Ok(payload)) => { 370 | if payload.fees() > highest_bid { 371 | tracing::info!( 372 | slot = %payload_slot, 373 | proposer = %proposer, 374 | payload = %payload_id, 375 | "submitting bid for payload with higher value {} to relay", 376 | payload.fees() 377 | ); 378 | 379 | // construct signed bid 380 | let mut message = built_payload_to_bid_trace( 381 | &payload, 382 | payload_slot, 383 | inner_bls_pk.clone(), 384 | proposer.clone(), 385 | to_bytes20(boost_fee_recipient) 386 | ); 387 | let execution_payload = block_to_execution_payload(payload.block()); 388 | let signature = sign_builder_message( 389 | &mut message, 390 | &inner_bls_sk, 391 | &inner_context 392 | ).expect("can sign with BLS sk"); 393 | let submission = SignedBidSubmission { 394 | message, 395 | execution_payload, 396 | signature, 397 | }; 398 | 399 | // submit signed bid 400 | if let Err(err) = inner_relay_client.submit_bid(&submission).await { 401 | tracing::warn!( 402 | slot = %payload_slot, 403 | proposer = %proposer, 404 | payload = %payload_id, 405 | "unable to submit higher bid to relay {err}" 406 | ); 407 | } else { 408 | highest_bid = payload.fees(); 409 | tracing::info!( 410 | slot = %payload_slot, 411 | proposer = %proposer, 412 | payload = %payload_id, 413 | "successfully submitted bid to relay with value {highest_bid}" 414 | ); 415 | } 416 | } 417 | } 418 | Some(Err(err)) => { 419 | tracing::warn!( 420 | slot = %payload_slot, 421 | proposer = %proposer, 422 | payload = %payload_id, 423 | "unable to retrieve best payload from build job {err}" 424 | ); 425 | } 426 | None => { 427 | tracing::warn!( 428 | slot = %payload_slot, 429 | proposer = %proposer, 430 | payload = %payload_id, 431 | "payload not available for submission" 432 | ); 433 | } 434 | } 435 | } 436 | })); 437 | } 438 | Some(job) = jobs_removal_queue.next() => { 439 | initiated_jobs.remove(job.get_ref()); 440 | } 441 | } 442 | } 443 | })); 444 | 445 | Ok(payload_builder) 446 | } 447 | } 448 | 449 | struct EvaRethCliExt; 450 | 451 | impl RethCliExt for EvaRethCliExt { 452 | type Node = EvaRethNodeCommandExt; 453 | } 454 | 455 | fn main() { 456 | Cli::::parse().run().unwrap(); 457 | } 458 | 459 | // COMPATIBILITY 460 | // 461 | // TODO: move into separate module 462 | 463 | fn context_and_clock>( 464 | chain: T, 465 | ) -> Option<(Context, Clock)> { 466 | let chain = chain.as_ref().chain(); 467 | if chain == Chain::mainnet() { 468 | Some((Context::for_mainnet(), clock::for_mainnet())) 469 | } else if chain == Chain::goerli() { 470 | Some((Context::for_goerli(), clock::for_goerli())) 471 | } else if chain == Chain::sepolia() { 472 | Some((Context::for_sepolia(), clock::for_sepolia())) 473 | } else { 474 | None 475 | } 476 | } 477 | 478 | fn to_bytes32(value: H256) -> Bytes32 { 479 | Bytes32::try_from(value.as_bytes()).unwrap() 480 | } 481 | 482 | fn to_bytes20(value: H160) -> ExecutionAddress { 483 | ExecutionAddress::try_from(value.as_bytes()).unwrap() 484 | } 485 | 486 | fn to_byte_vector(value: Bloom) -> ByteVector<256> { 487 | ByteVector::<256>::try_from(value.as_bytes()).unwrap() 488 | } 489 | 490 | fn to_u256(value: U256) -> ssz_rs::U256 { 491 | ssz_rs::U256::try_from_bytes_le(&value.to_le_bytes::<32>()).unwrap() 492 | } 493 | 494 | fn built_payload_to_bid_trace, P: AsRef>( 495 | payload: &BuiltPayload, 496 | slot: Slot, 497 | builder_public_key: B, 498 | proposer_public_key: P, 499 | proposer_fee_recipient: ExecutionAddress, 500 | ) -> BidTrace { 501 | BidTrace { 502 | slot, 503 | parent_hash: to_bytes32(payload.block().parent_hash), 504 | block_hash: to_bytes32(payload.block().hash), 505 | builder_public_key: builder_public_key.as_ref().clone(), 506 | proposer_public_key: proposer_public_key.as_ref().clone(), 507 | proposer_fee_recipient, 508 | gas_limit: payload.block().gas_limit, 509 | gas_used: payload.block().gas_used, 510 | value: to_u256(payload.fees()), 511 | } 512 | } 513 | 514 | fn block_to_execution_payload(block: &SealedBlock) -> ExecutionPayload { 515 | let header = &block.header; 516 | let transactions = block 517 | .body 518 | .iter() 519 | .map(|t| { 520 | ethereum_consensus::capella::mainnet::Transaction::try_from( 521 | t.envelope_encoded().as_ref(), 522 | ) 523 | .unwrap() 524 | }) 525 | .collect::>(); 526 | let withdrawals = block 527 | .withdrawals 528 | .as_ref() 529 | .unwrap() 530 | .iter() 531 | .map(|w| ethereum_consensus::capella::mainnet::Withdrawal { 532 | index: w.index as usize, 533 | validator_index: w.validator_index as usize, 534 | address: to_bytes20(w.address), 535 | amount: w.amount, 536 | }) 537 | .collect::>(); 538 | ExecutionPayload { 539 | parent_hash: to_bytes32(header.parent_hash), 540 | fee_recipient: to_bytes20(header.beneficiary), 541 | state_root: to_bytes32(header.state_root), 542 | receipts_root: to_bytes32(header.receipts_root), 543 | logs_bloom: to_byte_vector(header.logs_bloom), 544 | prev_randao: to_bytes32(header.mix_hash), 545 | block_number: header.number, 546 | gas_limit: header.gas_limit, 547 | gas_used: header.gas_used, 548 | timestamp: header.timestamp, 549 | extra_data: ByteList::try_from(header.extra_data.as_ref()).unwrap(), 550 | base_fee_per_gas: ssz_rs::U256::from(header.base_fee_per_gas.unwrap_or_default()), 551 | block_hash: to_bytes32(block.hash()), 552 | transactions: TryFrom::try_from(transactions).unwrap(), 553 | withdrawals: TryFrom::try_from(withdrawals).unwrap(), 554 | } 555 | } 556 | -------------------------------------------------------------------------------- /src/rpc.rs: -------------------------------------------------------------------------------- 1 | use jsonrpsee::{ 2 | core::{async_trait, RpcResult}, 3 | proc_macros::rpc, 4 | }; 5 | use reth_primitives::{Bytes, FromRecoveredTransaction, TransactionSigned, H256}; 6 | use reth_rpc::eth::error::{EthApiError, RpcPoolError}; 7 | use reth_transaction_pool::{TransactionOrigin, TransactionPool}; 8 | 9 | #[rpc(server, namespace = "eth")] 10 | pub trait EthExtApi { 11 | #[method(name = "sendPrivateTransaction")] 12 | async fn send_private_transaction(&self, tx: Bytes) -> RpcResult; 13 | } 14 | 15 | pub struct EthExt { 16 | pool: Pool, 17 | } 18 | 19 | impl EthExt { 20 | pub fn new(pool: Pool) -> Self { 21 | Self { pool } 22 | } 23 | } 24 | 25 | #[async_trait] 26 | impl EthExtApiServer for EthExt 27 | where 28 | Pool: TransactionPool + Clone + 'static, 29 | { 30 | async fn send_private_transaction(&self, tx: Bytes) -> RpcResult { 31 | if tx.is_empty() { 32 | return Err(EthApiError::EmptyRawTransactionData)?; 33 | } 34 | let tx = TransactionSigned::decode_enveloped(tx) 35 | .or(Err(EthApiError::FailedToDecodeSignedTransaction))?; 36 | let tx = tx 37 | .into_ecrecovered() 38 | .ok_or(EthApiError::InvalidTransactionSignature)?; 39 | let tx = ::from_recovered_transaction(tx); 40 | let hash = self 41 | .pool 42 | .add_transaction(TransactionOrigin::Private, tx) 43 | .await 44 | .map_err(RpcPoolError::from)?; 45 | Ok(hash) 46 | } 47 | } 48 | --------------------------------------------------------------------------------