├── .github └── workflows │ └── build-check.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── accounts-testnet.csv ├── examples ├── accounts_meta_to_csv.rs ├── bench_geyser_grpc_accounts.rs ├── debouncer.rs ├── dump_slots_stream_samples.rs ├── dump_txs_stream_samples.rs ├── parse_timestamp_tagged_logs.rs ├── parse_yellowstone_timetagged_log.rs ├── stream_blocks_autoconnect.rs ├── stream_blocks_mainnet_stream.rs ├── stream_blocks_mainnet_task.rs ├── stream_blocks_processed.rs ├── stream_blocks_single.rs ├── stream_token_accounts.rs ├── stream_vote_transactions.rs └── subscribe_accounts.rs ├── rust-toolchain.toml └── src ├── channel_plugger.rs ├── grpc_subscription_autoreconnect_streams.rs ├── grpc_subscription_autoreconnect_tasks.rs ├── grpcmultiplex_fastestwins.rs ├── histogram_percentiles.rs ├── lib.rs ├── obfuscate.rs └── yellowstone_grpc_util.rs /.github/workflows/build-check.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | pull_request: 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | # The toolchain action should definitely be run before the cache action 15 | - uses: actions-rust-lang/setup-rust-toolchain@v1 16 | with: 17 | components: rustfmt, clippy 18 | cache: false 19 | # avoid the default "-D warnings" which thrashes cache 20 | rustflags: "" 21 | 22 | # https://github.com/actions/cache/blob/main/examples.md#rust---cargo 23 | - uses: actions/cache@v3 24 | with: 25 | path: | 26 | ~/.cargo/bin/ 27 | ~/.cargo/registry/index/ 28 | ~/.cargo/registry/cache/ 29 | ~/.cargo/git/db/ 30 | target/ 31 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} 32 | 33 | - name: Run fmt+clippy 34 | run: | 35 | cargo fmt --all --check 36 | cargo clippy --workspace --all-targets 37 | 38 | - name: Build 39 | run: | 40 | cargo build --all-targets 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .DS_Store 3 | .idea/ 4 | .vscode/ 5 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "geyser-grpc-connector" 3 | version = "0.13.1+yellowstone.5.0.0-solana.2.1" 4 | edition = "2021" 5 | 6 | description = "Multiplexing and Reconnection on Yellowstone gRPC Geyser client streaming" 7 | license = "Apache-2.0" 8 | authors = ["GroovieGermanikus "] 9 | repository = "https://github.com/blockworks-foundation/geyser-grpc-connector" 10 | 11 | [dependencies] 12 | yellowstone-grpc-client = { version = "6.0.0", git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v6.0.0+solana.2.2.12" } 13 | yellowstone-grpc-proto = { version = "6.0.0", git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v6.0.0+solana.2.2.12" } 14 | 15 | # required for CommitmentConfig 16 | solana-sdk = "~2.2.2" 17 | 18 | url = "2.5.0" 19 | async-stream = "0.3.5" 20 | tokio = { version = "1.28", features = ["rt-multi-thread"] } 21 | tokio-metrics = { version = "0.4.0", default-features = false } 22 | futures = "0.3.28" 23 | merge-streams = "0.1.2" 24 | anyhow = "1.0.70" 25 | log = "0.4.17" 26 | tracing = "0.1.37" 27 | itertools = "0.10.5" 28 | 29 | tonic = "0.12" 30 | tonic-health = "0.12" 31 | 32 | [dev-dependencies] 33 | tracing-subscriber = "0.3.16" 34 | solana-logger = "2" 35 | solana-account-decoder = "~2.2.7" 36 | 37 | base64 = "0.21.5" 38 | bincode = "1.3.3" 39 | csv = "1.3.0" 40 | lz4_flex = "0.11.3" 41 | dashmap = "6.1.0" 42 | regex = "1.10.4" 43 | clap = { version = "4.2", features = ["derive"] } 44 | 45 | # patch curve25519-dalek is gone 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Solana Geyser gRPC Multiplexing and Reconnect 4 | This project provides multiplexing of multiple [Yellowstone gRPC](https://github.com/rpcpool/yellowstone-grpc) subscriptions based on _Fastest Wins Strategy_. 5 | 6 | * Multiple _Futures_ get **merged** where the first next block that arrives will be emitted. 7 | * No __guarantees__ are made about if the messages are continuous or not. 8 | * __Reconnects__ are handled transparently inside the _Futures_. 9 | 10 | Disclaimer: The library is designed with the needs of 11 | [LiteRPC](https://github.com/blockworks-foundation/lite-rpc) in mind 12 | yet might be useful for other projects as well. 13 | 14 | The implementation is based on _Rust Futures_. 15 | 16 | Please open an issue if you have any questions or suggestions -> [New Issue](https://github.com/blockworks-foundation/geyser-grpc-connector/issues/new). 17 | 18 | ## Versions 19 | These are the currently maintained versions of the library: [see Wiki](https://github.com/blockworks-foundation/geyser-grpc-connector/wiki) 20 | 21 | ## Installation and Usage 22 | 23 | ```cargo add geyser-grpc-connector ``` 24 | 25 | 26 | An example how to use the library is provided in `stream_blocks_mainnet_stream.rs`. 27 | 28 | ## Known issues 29 | * Library does not support other data than Blocks/Slots very well. 30 | * Should not be used with commitment level __PROCESSED__ because slot numbers are not monotoic. 31 | * Library needs messages to be in order and provide slot information to work properly. 32 | 33 | -------------------------------------------------------------------------------- /accounts-testnet.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blockworks-foundation/geyser-grpc-connector/b2028f6ab343ab50733f76e862ce64761deee3b6/accounts-testnet.csv -------------------------------------------------------------------------------- /examples/accounts_meta_to_csv.rs: -------------------------------------------------------------------------------- 1 | use itertools::Itertools; 2 | use std::fs::File; 3 | use std::io; 4 | use std::io::BufRead; 5 | use std::path::PathBuf; 6 | 7 | pub fn main() { 8 | let accounts_meta_file = 9 | PathBuf::from("/Users/stefan/mango/projects/geyser-misc/ledger-debug-accounts.txt"); 10 | 11 | let file = File::open(accounts_meta_file).expect("file must exist"); 12 | let reader = io::BufReader::new(file); 13 | for blocks in &reader.lines().chunks(9) { 14 | let blocks = blocks.collect_vec(); 15 | let account_pk = blocks[0].as_ref().unwrap().replace(':', ""); 16 | if account_pk.is_empty() { 17 | break; 18 | } 19 | let owner_pk = blocks[2].as_ref().unwrap(); 20 | let ltick = owner_pk.find('\''); 21 | let rtick = owner_pk.rfind('\''); 22 | let owner_pk = &owner_pk[ltick.unwrap() + 1..rtick.unwrap()]; 23 | 24 | let data_len = blocks[6].as_ref().unwrap().replace(" data_len: ", ""); 25 | 26 | println!("{};{};{}", account_pk, owner_pk, data_len); 27 | } 28 | } 29 | 30 | /* 31 | 16FMCmgLzCNNz6eTwGanbyN2ZxvTBSLuQ6DZhgeMshg: 32 | balance: 0.00095352 SOL 33 | owner: 'Feature111111111111111111111111111111111111' 34 | executable: false 35 | slot: 0 36 | rent_epoch: 0 37 | data_len: 9 38 | data: 'AQAAAAAAAAAA' 39 | encoding: "base64" 40 | */ 41 | -------------------------------------------------------------------------------- /examples/bench_geyser_grpc_accounts.rs: -------------------------------------------------------------------------------- 1 | use itertools::Itertools; 2 | use log::{debug, info}; 3 | use solana_account_decoder::parse_token::spl_token_ids; 4 | use solana_sdk::clock::{Slot, UnixTimestamp}; 5 | use solana_sdk::commitment_config::CommitmentConfig; 6 | use solana_sdk::hash::{hash, Hash}; 7 | use solana_sdk::pubkey::Pubkey; 8 | use std::cmp::min; 9 | use std::collections::HashMap; 10 | use std::env; 11 | use std::str::FromStr; 12 | use std::sync::atomic::{AtomicU64, Ordering}; 13 | use std::sync::Arc; 14 | use std::time::{SystemTime, UNIX_EPOCH}; 15 | use tokio::sync::mpsc::Receiver; 16 | 17 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; 18 | use geyser_grpc_connector::{ 19 | histogram_percentiles, AtomicSlot, GeyserFilter, GrpcConnectionTimeouts, GrpcSourceConfig, 20 | Message, 21 | }; 22 | use tokio::time::{sleep, Duration}; 23 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 24 | use yellowstone_grpc_proto::geyser::{ 25 | SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterBlocksMeta, 26 | SubscribeRequestFilterSlots, 27 | }; 28 | 29 | mod debouncer; 30 | 31 | #[tokio::main] 32 | pub async fn main() { 33 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 34 | tracing_subscriber::fmt::init(); 35 | // console_subscriber::init(); 36 | 37 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 38 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 39 | 40 | info!( 41 | "Using grpc source on {} ({})", 42 | grpc_addr_green, 43 | grpc_x_token_green.is_some() 44 | ); 45 | 46 | let timeouts = GrpcConnectionTimeouts { 47 | connect_timeout: Duration::from_secs(25), 48 | request_timeout: Duration::from_secs(25), 49 | subscribe_timeout: Duration::from_secs(25), 50 | receive_timeout: Duration::from_secs(25), 51 | }; 52 | 53 | let config = GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 54 | 55 | info!("Write Block stream.."); 56 | 57 | let (autoconnect_tx, geyser_messages_rx) = tokio::sync::mpsc::channel(10); 58 | let (_exit, exit_notify) = tokio::sync::broadcast::channel(1); 59 | 60 | // let _accounts_task = create_geyser_autoconnection_task_with_mpsc( 61 | // config.clone(), 62 | // GeyserFilter(CommitmentConfig::processed()).accounts(), 63 | // autoconnect_tx.clone(), 64 | // exit_notify.resubscribe(), 65 | // ); 66 | // 67 | // let _blocksmeta_task = create_geyser_autoconnection_task_with_mpsc( 68 | // config.clone(), 69 | // GeyserFilter(CommitmentConfig::processed()).blocks_meta(), 70 | // autoconnect_tx.clone(), 71 | // exit_notify.resubscribe(), 72 | // ); 73 | 74 | let _all_accounts = create_geyser_autoconnection_task_with_mpsc( 75 | config.clone(), 76 | all_accounts(), 77 | autoconnect_tx.clone(), 78 | exit_notify.resubscribe(), 79 | ); 80 | 81 | // let _token_accounts_task = create_geyser_autoconnection_task_with_mpsc( 82 | // config.clone(), 83 | // token_accounts(), 84 | // autoconnect_tx.clone(), 85 | // exit_notify.resubscribe(), 86 | // ); 87 | 88 | let current_processed_slot = AtomicSlot::default(); 89 | start_tracking_slots(current_processed_slot.clone()); 90 | start_tracking_account_consumer(geyser_messages_rx, current_processed_slot.clone()); 91 | 92 | // "infinite" sleep 93 | sleep(Duration::from_secs(1800)).await; 94 | } 95 | 96 | // note processed might return a slot that night end up on a fork 97 | fn start_tracking_slots(current_processed_slot: AtomicSlot) { 98 | let grpc_slot_source1 = env::var("GRPC_SLOT1_ADDR").expect("need grpc url for slot source1"); 99 | let grpc_x_token_source1 = env::var("GRPC_SLOT1_X_TOKEN").ok(); 100 | 101 | let grpc_slot_source2 = env::var("GRPC_SLOT2_ADDR").expect("need grpc url for slot source2"); 102 | let grpc_x_token_source2 = env::var("GRPC_SLOT2_X_TOKEN").ok(); 103 | 104 | info!( 105 | "Using grpc sources for slot: {}, {}", 106 | grpc_slot_source1, grpc_slot_source2 107 | ); 108 | 109 | let timeouts = GrpcConnectionTimeouts { 110 | connect_timeout: Duration::from_secs(5), 111 | request_timeout: Duration::from_secs(5), 112 | subscribe_timeout: Duration::from_secs(5), 113 | receive_timeout: Duration::from_secs(5), 114 | }; 115 | 116 | let config1 = GrpcSourceConfig::new( 117 | grpc_slot_source1, 118 | grpc_x_token_source1, 119 | None, 120 | timeouts.clone(), 121 | ); 122 | let config2 = GrpcSourceConfig::new( 123 | grpc_slot_source2, 124 | grpc_x_token_source2, 125 | None, 126 | timeouts.clone(), 127 | ); 128 | 129 | tokio::spawn(async move { 130 | debug!("start tracking slots.."); 131 | 132 | let (multiplex_tx, mut multiplex_rx) = tokio::sync::mpsc::channel(10); 133 | // TODO expose 134 | let (_exit, exit_notify) = tokio::sync::broadcast::channel(1); 135 | 136 | let _blocksmeta_task1 = create_geyser_autoconnection_task_with_mpsc( 137 | config1.clone(), 138 | GeyserFilter(CommitmentConfig::processed()).slots(), 139 | multiplex_tx.clone(), 140 | exit_notify.resubscribe(), 141 | ); 142 | 143 | let _blocksmeta_task2 = create_geyser_autoconnection_task_with_mpsc( 144 | config2.clone(), 145 | GeyserFilter(CommitmentConfig::processed()).slots(), 146 | multiplex_tx.clone(), 147 | exit_notify.resubscribe(), 148 | ); 149 | 150 | // let mut tip: Slot = 0; 151 | 152 | loop { 153 | match multiplex_rx.recv().await { 154 | Some(Message::GeyserSubscribeUpdate(update)) => match update.update_oneof { 155 | Some(UpdateOneof::Slot(update)) => { 156 | let slot = update.slot; 157 | current_processed_slot.store(slot, Ordering::Relaxed); 158 | 159 | // don't do that with the mock impl as the slots restart when mock restarts 160 | // if slot > tip { 161 | // tip = slot; 162 | // current_processed_slot.store(slot, Ordering::Relaxed); 163 | // } 164 | } 165 | None => {} 166 | _ => {} 167 | }, 168 | None => { 169 | log::warn!("multiplexer channel closed - aborting"); 170 | return; 171 | } 172 | Some(Message::Connecting(_)) => {} 173 | } 174 | } 175 | }); 176 | } 177 | 178 | // note: this keeps track of lot of data and might blow up memory 179 | fn start_tracking_account_consumer( 180 | mut geyser_messages_rx: Receiver, 181 | current_processed_slot: Arc, 182 | ) { 183 | tokio::spawn(async move { 184 | let mut bytes_per_slot = HashMap::::new(); 185 | let mut updates_per_slot = HashMap::::new(); 186 | let mut wallclock_updates_per_slot_account = 187 | HashMap::<(Slot, Pubkey), Vec>::new(); 188 | // slot written by account update 189 | let mut current_slot: Slot = 0; 190 | let mut account_hashes = HashMap::>::new(); 191 | 192 | // seconds since epoch 193 | let block_time_per_slot = HashMap::::new(); 194 | 195 | let debouncer = debouncer::Debouncer::new(Duration::from_millis(50)); 196 | 197 | // Phoenix 4DoNfFBfF7UokCC2FQzriy7yHK6DY6NVdYpuekQ5pRgg 198 | // CzK26LWpoU9UjSrZkVu97oZj63abJrNv1zp9Hy2zZdy5 199 | // 6ojSigXF7nDPyhFRgmn3V9ywhYseKF9J32ZrranMGVSX 200 | // FV8EEHjJvDUD8Kkp1DcomTatZBA81Z6C5AhmvyUwvEAh 201 | // choose an account for which the diff should be calculated 202 | let selected_account_pk = 203 | Pubkey::from_str("4DoNfFBfF7UokCC2FQzriy7yHK6DY6NVdYpuekQ5pRgg").unwrap(); 204 | 205 | let mut last_account_data: Option> = None; 206 | 207 | loop { 208 | match geyser_messages_rx.recv().await { 209 | Some(Message::GeyserSubscribeUpdate(update)) => match update.update_oneof { 210 | Some(UpdateOneof::Account(update)) => { 211 | let now = SystemTime::now(); 212 | let account_info = update.account.unwrap(); 213 | let account_pk = Pubkey::try_from(account_info.pubkey).unwrap(); 214 | let _account_owner_pk = Pubkey::try_from(account_info.owner).unwrap(); 215 | // note: slot is referencing the block that is just built while the slot number reported from BlockMeta/Slot uses the slot after the block is built 216 | let slot = update.slot; 217 | let account_receive_time = get_epoch_sec(); 218 | 219 | if account_info.data.len() > 100000 { 220 | let hash = hash(&account_info.data); 221 | // info!("got account update!!! {} - {:?} - {} bytes - {} - {}lamps", 222 | // slot, account_pk, account_info.data.len(), hash, account_info.lamports); 223 | 224 | account_hashes 225 | .entry(account_pk) 226 | .and_modify(|entry| entry.push(hash)) 227 | .or_insert(vec![hash]); 228 | } 229 | 230 | // if account_hashes.len() > 100 { 231 | // for (pubkey, hashes) in &account_hashes { 232 | // info!("account hashes for {:?}", pubkey); 233 | // for hash in hashes { 234 | // info!("- hash: {}", hash); 235 | // } 236 | // } 237 | // } 238 | 239 | if account_pk == selected_account_pk { 240 | info!( 241 | "got account update!!! {} - {:?} - {} bytes - {}", 242 | slot, 243 | account_pk, 244 | account_info.data.len(), 245 | account_info.lamports 246 | ); 247 | 248 | if let Some(prev_data) = last_account_data { 249 | let hash1 = hash(&prev_data); 250 | let hash2 = hash(&account_info.data); 251 | info!("diff: {} {}", hash1, hash2); 252 | 253 | delta_compress(&prev_data, &account_info.data); 254 | } 255 | 256 | last_account_data = Some(account_info.data.clone()); 257 | } 258 | 259 | bytes_per_slot 260 | .entry(slot) 261 | .and_modify(|entry| *entry += account_info.data.len()) 262 | .or_insert(account_info.data.len()); 263 | updates_per_slot 264 | .entry(slot) 265 | .and_modify(|entry| *entry += 1) 266 | .or_insert(1); 267 | wallclock_updates_per_slot_account 268 | .entry((slot, account_pk)) 269 | .and_modify(|entry| entry.push(now)) 270 | .or_insert(vec![now]); 271 | 272 | if current_slot != slot && current_slot != 0 { 273 | info!("New Slot: {}", slot); 274 | info!( 275 | "Slot: {} - account data transferred: {:.2} MiB", 276 | slot, 277 | *bytes_per_slot.get(¤t_slot).unwrap() as f64 278 | / 1024.0 279 | / 1024.0 280 | ); 281 | 282 | info!( 283 | "Slot: {} - num of update messages: {}", 284 | slot, 285 | updates_per_slot.get(¤t_slot).unwrap() 286 | ); 287 | 288 | let per_account_updates = wallclock_updates_per_slot_account 289 | .iter() 290 | .filter(|((slot, _pubkey), _)| slot == ¤t_slot) 291 | .map(|((_slot, _pubkey), updates)| updates.len() as f64) 292 | .sorted_by(|a, b| a.partial_cmp(b).unwrap()) 293 | .collect_vec(); 294 | let per_account_updates_histogram = 295 | histogram_percentiles::calculate_percentiles(&per_account_updates); 296 | info!( 297 | "Per-account updates histogram: {}", 298 | per_account_updates_histogram 299 | ); 300 | 301 | if let Some(actual_block_time) = block_time_per_slot.get(¤t_slot) 302 | { 303 | info!( 304 | "Block time for slot {}: delta {} seconds", 305 | current_slot, 306 | account_receive_time - *actual_block_time 307 | ); 308 | } 309 | 310 | let wallclock_minmax = wallclock_updates_per_slot_account 311 | .iter() 312 | .filter(|((slot, _pubkey), _)| slot == ¤t_slot) 313 | .flat_map(|((_slot, _pubkey), updates)| updates) 314 | .minmax(); 315 | if let Some((min, max)) = wallclock_minmax.into_option() { 316 | info!("Wallclock timestamp between first and last account update received for slot {}: {:.2}s", 317 | current_slot, 318 | max.duration_since(*min).unwrap().as_secs_f64() 319 | ); 320 | } 321 | } // -- slot changed 322 | current_slot = slot; 323 | 324 | let latest_slot = current_processed_slot.load(Ordering::Relaxed); 325 | 326 | if latest_slot != 0 { 327 | // the perfect is value "-1" 328 | let delta = (latest_slot as i64) - (slot as i64); 329 | if debouncer.can_fire() { 330 | let is_lagging = delta > -1; 331 | let is_lagging_a_lot = delta - 20 > -1; 332 | let info_text = if is_lagging { 333 | if is_lagging_a_lot { 334 | "A LOT" 335 | } else { 336 | "a bit" 337 | } 338 | } else { 339 | "good" 340 | }; 341 | // Account info for upcoming slot {} was {} behind current processed slot 342 | debug!( 343 | "Account update slot {}, delta: {} - {}", 344 | slot, delta, info_text 345 | ); 346 | } 347 | } 348 | } 349 | None => {} 350 | _ => {} 351 | }, 352 | None => { 353 | log::warn!("multiplexer channel closed - aborting"); 354 | return; 355 | } 356 | Some(Message::Connecting(_)) => {} 357 | } 358 | } 359 | }); 360 | } 361 | 362 | fn delta_compress(prev_data: &[u8], data: &[u8]) { 363 | let xor_region = min(prev_data.len(), data.len()); 364 | let mut xor_diff = vec![0u8; xor_region]; 365 | 366 | let mut equal = 0; 367 | for i in 0..xor_region { 368 | xor_diff[i] = prev_data[i] ^ data[i]; 369 | equal |= xor_diff[i]; 370 | } 371 | 372 | if equal == 0 && prev_data.len() == data.len() { 373 | info!("no difference in data"); 374 | return; 375 | } 376 | 377 | let count_non_zero = xor_diff.iter().filter(|&x| *x != 0).count(); 378 | info!( 379 | "count_non_zero={} xor_region={}", 380 | count_non_zero, xor_region 381 | ); 382 | // info!("hex {:02X?}", xor_data); 383 | 384 | let compressed_xor = lz4_flex::compress_prepend_size(&xor_diff); 385 | info!( 386 | "compressed size of xor: {} (was {})", 387 | compressed_xor.len(), 388 | xor_diff.len() 389 | ); 390 | 391 | let compressed_data = lz4_flex::compress_prepend_size(data); 392 | info!( 393 | "compressed size of data: {} (was {})", 394 | compressed_data.len(), 395 | data.len() 396 | ); 397 | } 398 | 399 | fn get_epoch_sec() -> UnixTimestamp { 400 | SystemTime::now() 401 | .duration_since(UNIX_EPOCH) 402 | .unwrap() 403 | .as_secs() as UnixTimestamp 404 | } 405 | 406 | pub fn token_accounts() -> SubscribeRequest { 407 | let mut accounts_subs = HashMap::new(); 408 | accounts_subs.insert( 409 | "client".to_string(), 410 | SubscribeRequestFilterAccounts { 411 | account: vec![], 412 | // vec!["4DoNfFBfF7UokCC2FQzriy7yHK6DY6NVdYpuekQ5pRgg".to_string()], 413 | owner: spl_token_ids() 414 | .iter() 415 | .map(|pubkey| pubkey.to_string()) 416 | .collect(), 417 | filters: vec![], 418 | nonempty_txn_signature: None, 419 | }, 420 | ); 421 | 422 | SubscribeRequest { 423 | accounts: accounts_subs, 424 | ..Default::default() 425 | } 426 | } 427 | 428 | pub fn all_accounts_and_blocksmeta() -> SubscribeRequest { 429 | let mut accounts_subs = HashMap::new(); 430 | accounts_subs.insert( 431 | "client".to_string(), 432 | SubscribeRequestFilterAccounts { 433 | account: vec![], 434 | owner: vec![], 435 | filters: vec![], 436 | nonempty_txn_signature: None, 437 | }, 438 | ); 439 | 440 | let mut slots_subs = HashMap::new(); 441 | slots_subs.insert( 442 | "client".to_string(), 443 | SubscribeRequestFilterSlots { 444 | filter_by_commitment: Some(true), 445 | interslot_updates: Some(false), 446 | }, 447 | ); 448 | 449 | let mut blocks_meta_subs = HashMap::new(); 450 | blocks_meta_subs.insert("client".to_string(), SubscribeRequestFilterBlocksMeta {}); 451 | 452 | SubscribeRequest { 453 | slots: slots_subs, 454 | accounts: accounts_subs, 455 | blocks_meta: blocks_meta_subs, 456 | ..Default::default() 457 | } 458 | } 459 | 460 | pub fn all_accounts() -> SubscribeRequest { 461 | let mut accounts_subs = HashMap::new(); 462 | accounts_subs.insert( 463 | "client".to_string(), 464 | SubscribeRequestFilterAccounts { 465 | account: vec![], 466 | owner: vec![], 467 | filters: vec![], 468 | nonempty_txn_signature: None, 469 | }, 470 | ); 471 | 472 | SubscribeRequest { 473 | accounts: accounts_subs, 474 | ..Default::default() 475 | } 476 | } 477 | -------------------------------------------------------------------------------- /examples/debouncer.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicI64, Ordering}; 2 | use std::time::{Duration, Instant}; 3 | 4 | #[derive(Debug)] 5 | pub struct Debouncer { 6 | started_at: Instant, 7 | cooldown_ms: i64, 8 | last: AtomicI64, 9 | } 10 | 11 | impl Debouncer { 12 | pub fn new(cooldown: Duration) -> Self { 13 | Self { 14 | started_at: Instant::now(), 15 | cooldown_ms: cooldown.as_millis() as i64, 16 | last: AtomicI64::new(0), 17 | } 18 | } 19 | pub fn can_fire(&self) -> bool { 20 | let passed_total_ms = self.started_at.elapsed().as_millis() as i64; 21 | 22 | let results = self 23 | .last 24 | .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |last| { 25 | if passed_total_ms - last > self.cooldown_ms { 26 | Some(passed_total_ms) 27 | } else { 28 | None 29 | } 30 | }); 31 | 32 | results.is_ok() 33 | } 34 | } 35 | 36 | #[allow(dead_code)] 37 | fn main() {} 38 | -------------------------------------------------------------------------------- /examples/dump_slots_stream_samples.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | /// 4 | /// subsribe to grpc in multiple ways: 5 | /// - all slots and processed accounts in one subscription 6 | /// - only processed accounts 7 | /// - only confirmed accounts 8 | /// - only finalized accounts 9 | /// 10 | /// we want to see if there is a difference in timing of "processed accounts" in the mix with slot vs "only processed accounts" 11 | use log::{info, warn}; 12 | use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel}; 13 | use std::collections::HashMap; 14 | use std::env; 15 | use std::time::SystemTime; 16 | 17 | use solana_sdk::pubkey::Pubkey; 18 | 19 | use tokio::sync::broadcast; 20 | use tokio::sync::mpsc::Receiver; 21 | 22 | use yellowstone_grpc_proto::geyser::{ 23 | SubscribeRequest, SubscribeRequestFilterBlocksMeta, SubscribeRequestFilterSlots, 24 | SubscribeRequestFilterTransactions, SubscribeUpdateSlot, 25 | }; 26 | 27 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; 28 | use geyser_grpc_connector::{ 29 | map_commitment_level, GrpcConnectionTimeouts, GrpcSourceConfig, Message, 30 | }; 31 | use tokio::time::{sleep, Duration}; 32 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 33 | use yellowstone_grpc_proto::prelude::SubscribeRequestFilterAccounts; 34 | 35 | fn start_all_slots_and_processed_accounts_consumer(mut slots_channel: Receiver) { 36 | tokio::spawn(async move { 37 | loop { 38 | match slots_channel.recv().await { 39 | Some(Message::GeyserSubscribeUpdate(update)) => match update.update_oneof { 40 | Some(UpdateOneof::Slot(update_slot)) => { 41 | let since_epoch_ms = SystemTime::now() 42 | .duration_since(SystemTime::UNIX_EPOCH) 43 | .unwrap() 44 | .as_millis(); 45 | 46 | let short_status = match map_slot_status(&update_slot) { 47 | CommitmentLevel::Processed => "P", 48 | CommitmentLevel::Confirmed => "C", 49 | CommitmentLevel::Finalized => "F", 50 | }; 51 | // DUMPSLOT 283356662,283356661,F,1723556492340 52 | info!( 53 | "MIXSLOT {},{:09},{},{}", 54 | update_slot.slot, 55 | update_slot.parent.unwrap_or(0), 56 | short_status, 57 | since_epoch_ms 58 | ); 59 | } 60 | Some(UpdateOneof::BlockMeta(update_block_meta)) => { 61 | info!("block meta {:?}", update_block_meta); 62 | } 63 | Some(UpdateOneof::Transaction(update_transaction)) => { 64 | info!("transaction slot {:?}", update_transaction.slot); 65 | } 66 | // Some(UpdateOneof::Account(update_account)) => { 67 | // let since_epoch_ms = SystemTime::now() 68 | // .duration_since(SystemTime::UNIX_EPOCH) 69 | // .unwrap() 70 | // .as_millis(); 71 | // 72 | // let account_info = update_account.account.unwrap(); 73 | // let slot = update_account.slot; 74 | // let account_pk = 75 | // Pubkey::new_from_array(account_info.pubkey.try_into().unwrap()); 76 | // let write_version = account_info.write_version; 77 | // let data_len = account_info.data.len(); 78 | // // DUMPACCOUNT 283417593,HTQeo4GNbZfGY5G4fAkDr1S5xnz5qWXFgueRwgw53aU1,1332997857270,752,1723582355872 79 | // info!( 80 | // "MIXACCOUNT {},{},{},{},{}", 81 | // slot, account_pk, write_version, data_len, since_epoch_ms 82 | // ); 83 | // } 84 | None => {} 85 | _ => {} 86 | }, 87 | None => { 88 | warn!("multiplexer channel closed - aborting"); 89 | return; 90 | } 91 | Some(Message::Connecting(_)) => {} 92 | } 93 | } 94 | }); 95 | } 96 | 97 | // need to provide the commitment level used to filter the accounts 98 | fn start_account_same_level( 99 | mut slots_channel: Receiver, 100 | commitment_level: CommitmentLevel, 101 | ) { 102 | tokio::spawn(async move { 103 | loop { 104 | match slots_channel.recv().await { 105 | Some(Message::GeyserSubscribeUpdate(update)) => match update.update_oneof { 106 | Some(UpdateOneof::Account(update_account)) => { 107 | let since_epoch_ms = SystemTime::now() 108 | .duration_since(SystemTime::UNIX_EPOCH) 109 | .unwrap() 110 | .as_millis(); 111 | 112 | let account_info = update_account.account.unwrap(); 113 | let slot = update_account.slot; 114 | let account_pk = 115 | Pubkey::new_from_array(account_info.pubkey.try_into().unwrap()); 116 | let write_version = account_info.write_version; 117 | let data_len = account_info.data.len(); 118 | 119 | let short_status = match commitment_level { 120 | CommitmentLevel::Processed => "P", 121 | CommitmentLevel::Confirmed => "C", 122 | CommitmentLevel::Finalized => "F", 123 | }; 124 | 125 | // DUMPACCOUNT 283417593,HTQeo4GNbZfGY5G4fAkDr1S5xnz5qWXFgueRwgw53aU1,1332997857270,752,1723582355872 126 | info!( 127 | "DUMPACCOUNT {},{},{},{},{},{}", 128 | slot, short_status, account_pk, write_version, data_len, since_epoch_ms 129 | ); 130 | } 131 | None => {} 132 | _ => {} 133 | }, 134 | None => { 135 | warn!("multiplexer channel closed - aborting"); 136 | return; 137 | } 138 | Some(Message::Connecting(_)) => {} 139 | } 140 | } 141 | }); 142 | } 143 | 144 | fn map_slot_status( 145 | slot_update: &SubscribeUpdateSlot, 146 | ) -> solana_sdk::commitment_config::CommitmentLevel { 147 | use solana_sdk::commitment_config::CommitmentLevel as solanaCL; 148 | use yellowstone_grpc_proto::geyser::CommitmentLevel as yCL; 149 | yellowstone_grpc_proto::geyser::CommitmentLevel::try_from(slot_update.status) 150 | .map(|v| match v { 151 | yCL::Processed => solanaCL::Processed, 152 | yCL::Confirmed => solanaCL::Confirmed, 153 | yCL::Finalized => solanaCL::Finalized, 154 | }) 155 | .expect("valid commitment level") 156 | } 157 | 158 | #[tokio::main(flavor = "current_thread")] 159 | pub async fn main() { 160 | tracing_subscriber::fmt::init(); 161 | 162 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 163 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 164 | 165 | info!( 166 | "Using gRPC source {} ({})", 167 | grpc_addr_green, 168 | grpc_x_token_green.is_some() 169 | ); 170 | 171 | let timeouts = GrpcConnectionTimeouts { 172 | connect_timeout: Duration::from_secs(5), 173 | request_timeout: Duration::from_secs(5), 174 | subscribe_timeout: Duration::from_secs(5), 175 | receive_timeout: Duration::from_secs(5), 176 | }; 177 | 178 | let green_config = 179 | GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 180 | 181 | let (_exit_signal, exit_notify) = broadcast::channel(1); 182 | 183 | // mix of (all) slots and processed accounts 184 | let (autoconnect_tx, slots_accounts_rx) = tokio::sync::mpsc::channel(10); 185 | let _green_stream_ah = create_geyser_autoconnection_task_with_mpsc( 186 | green_config.clone(), 187 | all_slots_and_processed_accounts_together(), 188 | autoconnect_tx.clone(), 189 | exit_notify.resubscribe(), 190 | ); 191 | 192 | // let (only_processed_accounts_tx, only_processed_accounts_rx) = tokio::sync::mpsc::channel(10); 193 | // let _accounts_processed_stream_ah = create_geyser_autoconnection_task_with_mpsc( 194 | // green_config.clone(), 195 | // accounts_at_level(CommitmentLevel::Processed), 196 | // only_processed_accounts_tx.clone(), 197 | // exit_notify.resubscribe(), 198 | // ); 199 | // 200 | // let (only_confirmed_accounts_tx, only_confirmed_accounts_rx) = tokio::sync::mpsc::channel(10); 201 | // let _accounts_confirmed_stream_ah = create_geyser_autoconnection_task_with_mpsc( 202 | // green_config.clone(), 203 | // accounts_at_level(CommitmentLevel::Confirmed), 204 | // only_confirmed_accounts_tx.clone(), 205 | // exit_notify.resubscribe(), 206 | // ); 207 | // 208 | // let (only_finalized_accounts_tx, only_finalized_accounts_rx) = tokio::sync::mpsc::channel(10); 209 | // let _accounts_finalized_stream_ah = create_geyser_autoconnection_task_with_mpsc( 210 | // green_config.clone(), 211 | // accounts_at_level(CommitmentLevel::Finalized), 212 | // only_finalized_accounts_tx.clone(), 213 | // exit_notify.resubscribe(), 214 | // ); 215 | 216 | start_all_slots_and_processed_accounts_consumer(slots_accounts_rx); 217 | // start_account_same_level(only_processed_accounts_rx, CommitmentLevel::Processed); 218 | // start_account_same_level(only_confirmed_accounts_rx, CommitmentLevel::Confirmed); 219 | // start_account_same_level(only_finalized_accounts_rx, CommitmentLevel::Finalized); 220 | 221 | // "infinite" sleep 222 | sleep(Duration::from_secs(3600 * 5)).await; 223 | } 224 | 225 | const RAYDIUM_AMM_PUBKEY: &str = "675kPX9MHTjS2zt1qfr1NYHuzeLXfQM9H24wFSUt1Mp8"; 226 | 227 | fn all_slots_and_processed_accounts_together() -> SubscribeRequest { 228 | let mut slot_subs = HashMap::new(); 229 | slot_subs.insert( 230 | "client".to_string(), 231 | SubscribeRequestFilterSlots { 232 | // implies all slots 233 | filter_by_commitment: None, 234 | interslot_updates: Some(false), 235 | }, 236 | ); 237 | 238 | let transactions = HashMap::from([( 239 | "geyser_tracker_tx".to_string(), 240 | SubscribeRequestFilterTransactions { 241 | vote: Some(false), 242 | failed: None, 243 | signature: None, 244 | account_include: vec![], 245 | account_exclude: vec![], 246 | account_required: vec![], 247 | }, 248 | )]); 249 | 250 | let blocks_meta = HashMap::from([( 251 | "geyser_tracker_blocks_meta".to_string(), 252 | SubscribeRequestFilterBlocksMeta {}, 253 | )]); 254 | 255 | // let mut account_subs = HashMap::new(); 256 | // account_subs.insert( 257 | // "client".to_string(), 258 | // SubscribeRequestFilterAccounts { 259 | // account: vec![], 260 | // owner: vec![RAYDIUM_AMM_PUBKEY.to_string()], 261 | // filters: vec![], 262 | // nonempty_txn_signature: None, 263 | // }, 264 | // ); 265 | 266 | SubscribeRequest { 267 | slots: slot_subs, 268 | // accounts: account_subs, 269 | ping: None, 270 | transactions, 271 | blocks_meta, 272 | // implies "processed" 273 | commitment: None, 274 | ..Default::default() 275 | } 276 | } 277 | 278 | fn accounts_at_level(commitment_level: CommitmentLevel) -> SubscribeRequest { 279 | let mut account_subs = HashMap::new(); 280 | account_subs.insert( 281 | "client".to_string(), 282 | SubscribeRequestFilterAccounts { 283 | account: vec![], 284 | owner: vec![RAYDIUM_AMM_PUBKEY.to_string()], 285 | filters: vec![], 286 | nonempty_txn_signature: None, 287 | }, 288 | ); 289 | 290 | SubscribeRequest { 291 | accounts: account_subs, 292 | ping: None, 293 | commitment: Some(map_commitment_level(CommitmentConfig { 294 | commitment: commitment_level, 295 | }) as i32), 296 | ..Default::default() 297 | } 298 | } 299 | 300 | #[test] 301 | fn parse_output() { 302 | let data = "283360248,000000000,C,1723558000558"; 303 | let mut rdr = ReaderBuilder::new() 304 | .has_headers(false) 305 | .from_reader(data.as_bytes()); 306 | 307 | let all_records = rdr.records().collect_vec(); 308 | assert_eq!(1, all_records.len()); 309 | let record = all_records[0].as_ref().unwrap(); 310 | 311 | let slot: u64 = record[0].parse().unwrap(); 312 | let parent: Option = record[1] 313 | .parse() 314 | .ok() 315 | .and_then(|v| if v == 0 { None } else { Some(v) }); 316 | let status = match record[2].to_string().as_str() { 317 | "P" => CommitmentLevel::Processed, 318 | "C" => CommitmentLevel::Confirmed, 319 | "F" => CommitmentLevel::Finalized, 320 | _ => panic!("invalid commitment level"), 321 | }; 322 | 323 | assert_eq!(283360248, slot); 324 | assert_eq!(None, parent); 325 | assert_eq!(CommitmentLevel::Confirmed, status); 326 | } 327 | -------------------------------------------------------------------------------- /examples/dump_txs_stream_samples.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::env; 3 | use itertools::Itertools; 4 | use log::info; 5 | use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel}; 6 | use solana_sdk::pubkey::Pubkey; 7 | use solana_sdk::signature::Signature; 8 | use tokio::sync::broadcast; 9 | use tokio::time::Duration; 10 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 11 | use yellowstone_grpc_proto::geyser::{SubscribeRequest, SubscribeRequestFilterTransactions}; 12 | 13 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; 14 | use geyser_grpc_connector::{ 15 | map_commitment_level, GrpcConnectionTimeouts, GrpcSourceConfig, Message, 16 | }; 17 | 18 | #[tokio::main] 19 | pub async fn main() { 20 | tracing_subscriber::fmt::init(); 21 | 22 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 23 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 24 | 25 | let (_foo, exit_notify) = broadcast::channel(1); 26 | 27 | info!( 28 | "Using gRPC source {} ({})", 29 | grpc_addr_green, 30 | grpc_x_token_green.is_some() 31 | ); 32 | 33 | let timeouts = GrpcConnectionTimeouts { 34 | connect_timeout: Duration::from_secs(5), 35 | request_timeout: Duration::from_secs(5), 36 | subscribe_timeout: Duration::from_secs(5), 37 | receive_timeout: Duration::from_secs(5), 38 | }; 39 | 40 | let green_config = 41 | GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 42 | 43 | let (autoconnect_tx, mut transactions_rx) = tokio::sync::mpsc::channel(10); 44 | let _tx_source_ah = create_geyser_autoconnection_task_with_mpsc( 45 | green_config.clone(), 46 | jupyter_and_dflow_trades(), 47 | autoconnect_tx.clone(), 48 | exit_notify, 49 | ); 50 | 51 | loop { 52 | let message = transactions_rx.recv().await; 53 | if let Some(Message::GeyserSubscribeUpdate(update)) = message { 54 | match update.update_oneof { 55 | Some(UpdateOneof::Transaction(update)) => { 56 | let tx = update.transaction.unwrap(); 57 | let sig = Signature::try_from(tx.signature.as_slice()).unwrap(); 58 | let account_keys = 59 | tx.transaction.unwrap().message.unwrap() 60 | .account_keys 61 | .into_iter() 62 | .map(|key| { 63 | let bytes: [u8; 32] = 64 | key.try_into().unwrap_or(Pubkey::default().to_bytes()); 65 | Pubkey::new_from_array(bytes) 66 | }) 67 | .collect_vec(); 68 | let is_jup = account_keys.iter().any(|key| { 69 | key.to_string() == "JUP6LkbZbjS1jKKwapdHNy74zcZ3tLUZoi5QNyVTaV4" 70 | }); 71 | let is_dflow = account_keys.iter().any(|key| { 72 | key.to_string() == "DF1ow4tspfHX9JwWJsAb9epbkA8hmpSEAtxXy1V27QBH" 73 | }); 74 | info!("tx {} ({},{})", sig, is_jup, is_dflow); 75 | } 76 | _ => unimplemented!(), 77 | } 78 | } 79 | } 80 | } 81 | 82 | fn jupyter_and_dflow_trades() -> SubscribeRequest { 83 | let mut transaction_subs = HashMap::new(); 84 | transaction_subs.insert( 85 | "client".to_string(), 86 | SubscribeRequestFilterTransactions { 87 | vote: Some(false), 88 | failed: Some(false), 89 | signature: None, 90 | account_include: vec![ 91 | "JUP6LkbZbjS1jKKwapdHNy74zcZ3tLUZoi5QNyVTaV4".to_string(), 92 | "DF1ow4tspfHX9JwWJsAb9epbkA8hmpSEAtxXy1V27QBH".to_string(),], 93 | account_exclude: vec![], 94 | account_required: vec![], 95 | }, 96 | ); 97 | 98 | SubscribeRequest { 99 | transactions: transaction_subs, 100 | ping: None, 101 | commitment: Some(map_commitment_level(CommitmentConfig { 102 | commitment: CommitmentLevel::Confirmed, 103 | }) as i32), 104 | ..Default::default() 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /examples/parse_timestamp_tagged_logs.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use regex::Regex; 3 | use std::collections::HashMap; 4 | use std::fs::File; 5 | use std::io; 6 | use std::io::BufRead; 7 | use std::path::PathBuf; 8 | 9 | pub fn parse_log_entry_subscriber(log_entry: &str) -> (u64, u64) { 10 | let re = Regex::new(r".*got account update: write_version=(?P\d+);timestamp_us=(?P\d+);slot=(?P\d+)").unwrap(); 11 | let caps = re.captures(log_entry).unwrap(); 12 | 13 | // let mut result = HashMap::new(); 14 | // result.insert("write_version".to_string(), caps["write_version"].to_string()); 15 | // result.insert("timestamp_us".to_string(), caps["timestamp_us"].to_string()); 16 | // result.insert("slot".to_string(), caps["slot"].to_string()); 17 | 18 | let write_version: u64 = caps["write_version"].parse().unwrap(); 19 | let timestamp_us: u64 = caps["timestamp_us"].parse().unwrap(); 20 | 21 | (write_version, timestamp_us) 22 | } 23 | 24 | pub fn parse_log_entry_source(log_entry: &str) -> (u64, u64) { 25 | let re = Regex::new(r".*account update: write_version=(?P\d+);timestamp_us=(?P\d+);slot=(?P\d+)").unwrap(); 26 | let caps = re.captures(log_entry).unwrap(); 27 | 28 | // let mut result = HashMap::new(); 29 | // result.insert("write_version".to_string(), caps["write_version"].to_string()); 30 | // result.insert("timestamp_us".to_string(), caps["timestamp_us"].to_string()); 31 | // result.insert("slot".to_string(), caps["slot"].to_string()); 32 | 33 | let write_version: u64 = caps["write_version"].parse().unwrap(); 34 | let timestamp_us: u64 = caps["timestamp_us"].parse().unwrap(); 35 | 36 | (write_version, timestamp_us) 37 | } 38 | 39 | fn read_subscriber_log(log_file: PathBuf) -> HashMap { 40 | let mut map: HashMap = HashMap::new(); 41 | 42 | let file = File::open(log_file).expect("file must exist"); 43 | let reader = io::BufReader::new(file); 44 | for line in reader.lines().take(1000) { 45 | let line = line.expect("must be parsable"); 46 | let (write_version, timestamp_us) = parse_log_entry_subscriber(&line); 47 | // println!("{:?}", parsed); 48 | map.insert(write_version, timestamp_us); 49 | } 50 | 51 | map 52 | } 53 | 54 | fn read_source_log(log_file: PathBuf) -> HashMap { 55 | let mut map: HashMap = HashMap::new(); 56 | 57 | let file = File::open(log_file).expect("file must exist"); 58 | let reader = io::BufReader::new(file); 59 | for line in reader.lines().take(1000) { 60 | let line = line.expect("must be parsable"); 61 | let (write_version, timestamp_us) = parse_log_entry_source(&line); 62 | // println!("{:?}", parsed); 63 | map.insert(write_version, timestamp_us); 64 | } 65 | 66 | map 67 | } 68 | 69 | // cat macbook.log |cut -b 111- | tr -d 'a-z_=' > macbook.log.csv 70 | // cat solana-validator-macbook.log | cut -b 96- | tr -d 'a-z_=' 71 | fn read_from_csv(csv_file: PathBuf) -> HashMap { 72 | csv::ReaderBuilder::new() 73 | .delimiter(b';') 74 | .has_headers(false) 75 | .from_path(csv_file) 76 | .unwrap() 77 | .into_deserialize() 78 | .map(|record| { 79 | let record: Vec = record.unwrap(); 80 | let write_version = record[0].parse::().unwrap(); 81 | let timestamp_us = record[1].parse::().unwrap(); 82 | (write_version, timestamp_us) 83 | }) 84 | .collect::>() 85 | } 86 | 87 | #[allow(dead_code)] 88 | fn read_subscriber_log_csv(csv_file: PathBuf) -> HashMap { 89 | csv::ReaderBuilder::new() 90 | .delimiter(b';') 91 | .has_headers(false) 92 | .from_path(csv_file) 93 | .unwrap() 94 | .into_deserialize() 95 | .map(|record| { 96 | let record: Vec = record.unwrap(); 97 | let write_version = record[0].parse::().unwrap(); 98 | let timestamp_us = record[1].parse::().unwrap(); 99 | (write_version, timestamp_us) 100 | }) 101 | .collect::>() 102 | } 103 | 104 | #[derive(Parser, Debug)] 105 | #[command(author, version, about, long_about = None)] 106 | pub struct Args { 107 | #[arg(long)] 108 | pub csv_file_source: String, 109 | #[arg(long)] 110 | pub csv_file_subscriber: String, 111 | } 112 | 113 | pub fn main() { 114 | let Args { 115 | csv_file_source, 116 | csv_file_subscriber, 117 | } = Args::parse(); 118 | 119 | println!("Reading source log ..."); 120 | let source_timestamps = read_from_csv(PathBuf::from(csv_file_source)); 121 | 122 | println!("Reading subscriber log ..."); 123 | let subscriber_timestamps = read_from_csv(PathBuf::from(csv_file_subscriber)); 124 | 125 | for (write_version, timestamp_us) in subscriber_timestamps.into_iter() { 126 | if let Some(source_timestamp) = source_timestamps.get(&write_version) { 127 | let diff = (timestamp_us as i128) - (*source_timestamp as i128); 128 | println!( 129 | "write_version: {}, subscriber: {}, source: {}, diff: {:.1}ms", 130 | write_version, 131 | timestamp_us, 132 | source_timestamp, 133 | diff as f64 / 1000.0 134 | ); 135 | } 136 | } 137 | } 138 | 139 | pub fn main__() { 140 | println!("Reading subscriber log ..."); 141 | let subscriber_timestamps = read_subscriber_log(PathBuf::from( 142 | "/Users/stefan/mango/projects/geyser-misc/accounts-stream-performance/macbook.log", 143 | )); 144 | 145 | println!("Reading source log ..."); 146 | let source_timestamps = read_source_log(PathBuf::from("/Users/stefan/mango/projects/geyser-misc/accounts-stream-performance/solana-validator-macbook.log")); 147 | 148 | println!("Comparing ..."); 149 | 150 | for (write_version, timestamp_us) in subscriber_timestamps.into_iter() { 151 | // println!("write_version: {}, subscriber: {}", write_version, timestamp_us); 152 | if let Some(source_timestamp) = source_timestamps.get(&write_version) { 153 | let diff = (timestamp_us as i128) - (*source_timestamp as i128); 154 | println!( 155 | "write_version: {}, subscriber: {}, source: {}, diff: {}", 156 | write_version, timestamp_us, source_timestamp, diff 157 | ); 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /examples/parse_yellowstone_timetagged_log.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use std::collections::HashMap; 3 | use std::fs::File; 4 | use std::io; 5 | use std::io::BufRead; 6 | use std::path::PathBuf; 7 | 8 | #[derive(Parser, Debug)] 9 | #[command(author, version, about, long_about = None)] 10 | pub struct Args { 11 | #[arg(long)] 12 | pub log_file: String, 13 | } 14 | 15 | pub fn main() { 16 | let Args { log_file } = Args::parse(); 17 | println!("Reading log file: {}", log_file); 18 | 19 | let log_file = PathBuf::from(log_file); 20 | 21 | const LIMIT_LINES: usize = 10000000; 22 | 23 | let mut timetag_sending_to_buffer: HashMap = HashMap::new(); 24 | let mut timetag_before_sending_grpc: HashMap = HashMap::new(); 25 | // contains only matches from previous sets 26 | let mut timetag_geyser: HashMap = HashMap::new(); 27 | 28 | let mut count_sending_to_buffer_channel = 0; 29 | { 30 | let file = File::open(&log_file).expect("file must exist"); 31 | let reader = io::BufReader::new(file); 32 | for line in reader.lines().take(LIMIT_LINES) { 33 | let line = line.expect("must be parsable"); 34 | // println!("-> buffer channel"); 35 | if let Some((write_version, timestamp_us)) = 36 | parse_log_entry_sending_to_buffer_channel(line) 37 | { 38 | count_sending_to_buffer_channel += 1; 39 | timetag_sending_to_buffer.insert(write_version, timestamp_us); 40 | } 41 | } 42 | } 43 | 44 | let mut count_sending_grpc = 0; 45 | { 46 | let file = File::open(&log_file).expect("file must exist"); 47 | let reader = io::BufReader::new(file); 48 | for line in reader.lines().take(LIMIT_LINES) { 49 | let line = line.expect("must be parsable"); 50 | // println!("-> when sending to grpc"); 51 | if let Some((write_version, timestamp_us)) = parse_log_entry_before_sending_grpc(line) { 52 | count_sending_grpc += 1; 53 | timetag_before_sending_grpc.insert(write_version, timestamp_us); 54 | } 55 | } 56 | } 57 | 58 | // THIS is by far the largest set 59 | let mut count_at_geyser = 0; 60 | { 61 | let file = File::open(&log_file).expect("file must exist"); 62 | let reader = io::BufReader::new(file); 63 | for line in reader.lines().take(LIMIT_LINES) { 64 | let line = line.expect("must be parsable"); 65 | // println!("-> at geyser interface"); 66 | if let Some((write_version, timestamp_us)) = parse_log_entry_at_geyser_interface(line) { 67 | count_at_geyser += 1; 68 | if timetag_sending_to_buffer.contains_key(&write_version) 69 | && timetag_before_sending_grpc.contains_key(&write_version) 70 | { 71 | timetag_geyser.insert(write_version, timestamp_us); 72 | } 73 | } 74 | } 75 | } 76 | 77 | println!("Count at geyser interface: {}", count_at_geyser); 78 | println!( 79 | "Count sending to buffer channel: {}", 80 | count_sending_to_buffer_channel 81 | ); 82 | println!("Count sending to grpc: {}", count_sending_grpc); 83 | 84 | for (write_version, geyser_timestamp_us) in timetag_geyser { 85 | let timestamp_sending_to_buffer = timetag_sending_to_buffer.get(&write_version).unwrap(); 86 | let timestamp_before_sending_grpc = 87 | timetag_before_sending_grpc.get(&write_version).unwrap(); 88 | let delta1 = timestamp_sending_to_buffer - geyser_timestamp_us; 89 | let delta2 = timestamp_before_sending_grpc - timestamp_sending_to_buffer; 90 | println!( 91 | "Write Version: {}, geyser - {}us - buffer - {}us - grpc", 92 | write_version, delta1, delta2 93 | ); 94 | } 95 | } 96 | 97 | fn parse_log_entry_at_geyser_interface(log_line: String) -> Option<(u64, u64)> { 98 | if !log_line.contains("account update inspect from geyser") { 99 | return None; 100 | } 101 | 102 | // Split the log line by ': ' to separate the prefix from the data 103 | let parts: Vec<&str> = log_line.split(": ").collect(); 104 | 105 | // The second part contains the data we need 106 | let data = parts[1]; 107 | 108 | // Split the data by ';' to separate the different fields 109 | let fields: Vec<&str> = data.split(';').collect(); 110 | 111 | // For each field, split by '=' to separate the key from the value 112 | let write_version: u64 = fields[0].split('=').collect::>()[1] 113 | .parse() 114 | .unwrap(); 115 | let timestamp_us: u64 = fields[1].split('=').collect::>()[1] 116 | .parse() 117 | .unwrap(); 118 | let _slot: u64 = fields[2].split('=').collect::>()[1] 119 | .parse() 120 | .unwrap(); 121 | 122 | Some((write_version, timestamp_us)) 123 | } 124 | 125 | fn parse_log_entry_sending_to_buffer_channel(log_line: String) -> Option<(u64, u64)> { 126 | if !log_line.contains("sending to buffer channel") { 127 | return None; 128 | } 129 | 130 | // Split the log line by ': ' to separate the prefix from the data 131 | let parts: Vec<&str> = log_line.split(": ").collect(); 132 | 133 | // The second part contains the data we need 134 | let data = parts[1]; 135 | 136 | // Split the data by ';' to separate the different fields 137 | let fields: Vec<&str> = data.split(';').collect(); 138 | 139 | // For each field, split by '=' to separate the key from the value 140 | let write_version: u64 = fields[0].split('=').collect::>()[1] 141 | .parse() 142 | .unwrap(); 143 | let timestamp_us: u64 = fields[1].split('=').collect::>()[1] 144 | .parse() 145 | .unwrap(); 146 | let _slot: u64 = fields[2].split('=').collect::>()[1] 147 | .parse() 148 | .unwrap(); 149 | 150 | Some((write_version, timestamp_us)) 151 | } 152 | 153 | fn parse_log_entry_before_sending_grpc(log_line: String) -> Option<(u64, u64)> { 154 | if !log_line.contains("before sending to grpc") { 155 | return None; 156 | } 157 | 158 | // Split the log line by ': ' to separate the prefix from the data 159 | let parts: Vec<&str> = log_line.split(": ").collect(); 160 | 161 | // The third part contains the data we need 162 | let data = parts[1]; 163 | 164 | // Split the data by ';' to separate the different fields 165 | let fields: Vec<&str> = data.split(';').collect(); 166 | 167 | // For each field, split by '=' to separate the key from the value 168 | let write_version: u64 = fields[0].split('=').collect::>()[1] 169 | .parse() 170 | .unwrap(); 171 | let timestamp_us: u64 = fields[1].split('=').collect::>()[1] 172 | .parse() 173 | .unwrap(); 174 | let _slot: u64 = fields[2].split('=').collect::>()[1] 175 | .parse() 176 | .unwrap(); 177 | 178 | Some((write_version, timestamp_us)) 179 | } 180 | -------------------------------------------------------------------------------- /examples/stream_blocks_autoconnect.rs: -------------------------------------------------------------------------------- 1 | use log::info; 2 | use solana_sdk::clock::Slot; 3 | use solana_sdk::commitment_config::CommitmentConfig; 4 | use std::env; 5 | use tokio::sync::broadcast; 6 | 7 | use geyser_grpc_connector::channel_plugger::spawn_broadcast_channel_plug; 8 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task; 9 | use geyser_grpc_connector::grpcmultiplex_fastestwins::FromYellowstoneExtractor; 10 | use geyser_grpc_connector::{GeyserFilter, GrpcConnectionTimeouts, GrpcSourceConfig, Message}; 11 | use tokio::time::{sleep, Duration}; 12 | use tracing::warn; 13 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 14 | use yellowstone_grpc_proto::geyser::SubscribeUpdate; 15 | use yellowstone_grpc_proto::prost::Message as _; 16 | 17 | pub struct BlockMini { 18 | pub blocksize: usize, 19 | pub slot: Slot, 20 | pub commitment_config: CommitmentConfig, 21 | } 22 | 23 | #[allow(dead_code)] 24 | struct BlockMiniExtractor(CommitmentConfig); 25 | 26 | impl FromYellowstoneExtractor for BlockMiniExtractor { 27 | type Target = BlockMini; 28 | fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(Slot, Self::Target)> { 29 | match update.update_oneof { 30 | Some(UpdateOneof::Block(update_block_message)) => { 31 | let blocksize = update_block_message.encoded_len(); 32 | let slot = update_block_message.slot; 33 | let mini = BlockMini { 34 | blocksize, 35 | slot, 36 | commitment_config: self.0, 37 | }; 38 | Some((slot, mini)) 39 | } 40 | Some(UpdateOneof::BlockMeta(update_blockmeta_message)) => { 41 | let blocksize = update_blockmeta_message.encoded_len(); 42 | let slot = update_blockmeta_message.slot; 43 | let mini = BlockMini { 44 | blocksize, 45 | slot, 46 | commitment_config: self.0, 47 | }; 48 | Some((slot, mini)) 49 | } 50 | _ => None, 51 | } 52 | } 53 | } 54 | 55 | #[allow(dead_code)] 56 | enum TestCases { 57 | Basic, 58 | SlowReceiverStartup, 59 | TemporaryLaggingReceiver, 60 | CloseAfterReceiving, 61 | AbortTaskFromOutside, 62 | } 63 | const TEST_CASE: TestCases = TestCases::Basic; 64 | 65 | #[tokio::main(flavor = "current_thread")] 66 | pub async fn main() { 67 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 68 | tracing_subscriber::fmt::init(); 69 | // console_subscriber::init(); 70 | 71 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 72 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 73 | 74 | info!( 75 | "Using grpc source on {} ({})", 76 | grpc_addr_green, 77 | grpc_x_token_green.is_some() 78 | ); 79 | 80 | let timeouts = GrpcConnectionTimeouts { 81 | connect_timeout: Duration::from_secs(5), 82 | request_timeout: Duration::from_secs(5), 83 | subscribe_timeout: Duration::from_secs(5), 84 | receive_timeout: Duration::from_secs(5), 85 | }; 86 | 87 | let green_config = 88 | GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 89 | 90 | info!("Write Block stream.."); 91 | 92 | let (_, exit_notify) = broadcast::channel(1); 93 | 94 | let (jh_geyser_task, message_channel) = create_geyser_autoconnection_task( 95 | green_config.clone(), 96 | GeyserFilter(CommitmentConfig::confirmed()).blocks_and_txs(), 97 | exit_notify, 98 | ); 99 | let mut message_channel = 100 | spawn_broadcast_channel_plug(tokio::sync::broadcast::channel(8), message_channel); 101 | 102 | tokio::spawn(async move { 103 | if let TestCases::SlowReceiverStartup = TEST_CASE { 104 | sleep(Duration::from_secs(5)).await; 105 | } 106 | 107 | let mut message_count = 0; 108 | while let Ok(message) = message_channel.recv().await { 109 | if let TestCases::AbortTaskFromOutside = TEST_CASE { 110 | if message_count > 5 { 111 | info!("(testcase) aborting task from outside"); 112 | jh_geyser_task.abort(); 113 | } 114 | } 115 | match message { 116 | Message::GeyserSubscribeUpdate(subscriber_update) => { 117 | message_count += 1; 118 | info!("got update - {} bytes", subscriber_update.encoded_len()); 119 | 120 | if let TestCases::CloseAfterReceiving = TEST_CASE { 121 | info!("(testcase) closing stream after receiving"); 122 | return; 123 | } 124 | } 125 | Message::Connecting(attempt) => { 126 | warn!("Connection attempt: {}", attempt); 127 | } 128 | } 129 | 130 | if let TestCases::TemporaryLaggingReceiver = TEST_CASE { 131 | if message_count % 3 == 1 { 132 | info!("(testcase) lagging a bit"); 133 | sleep(Duration::from_millis(1500)).await; 134 | } 135 | } 136 | } 137 | warn!("Stream aborted"); 138 | }); 139 | 140 | // "infinite" sleep 141 | sleep(Duration::from_secs(2000)).await; 142 | } 143 | -------------------------------------------------------------------------------- /examples/stream_blocks_mainnet_stream.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::pin::pin; 3 | 4 | use base64::Engine; 5 | use futures::{Stream, StreamExt}; 6 | use itertools::Itertools; 7 | use log::info; 8 | use solana_sdk::clock::Slot; 9 | use solana_sdk::commitment_config::CommitmentConfig; 10 | use solana_sdk::compute_budget::ComputeBudgetInstruction; 11 | use solana_sdk::hash::Hash; 12 | use solana_sdk::instruction::CompiledInstruction; 13 | use solana_sdk::message::v0::MessageAddressTableLookup; 14 | use solana_sdk::message::{v0, MessageHeader, VersionedMessage}; 15 | use solana_sdk::pubkey::Pubkey; 16 | use solana_sdk::signature::Signature; 17 | use solana_sdk::transaction::TransactionError; 18 | /// This file mocks the core model of the RPC server. 19 | use solana_sdk::{borsh1, compute_budget}; 20 | use solana_sdk::blake3::HASH_BYTES; 21 | use tokio::time::{sleep, Duration}; 22 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 23 | use yellowstone_grpc_proto::geyser::SubscribeUpdate; 24 | use yellowstone_grpc_proto::geyser::SubscribeUpdateBlock; 25 | 26 | use geyser_grpc_connector::grpc_subscription_autoreconnect_streams::create_geyser_reconnecting_stream; 27 | use geyser_grpc_connector::grpcmultiplex_fastestwins::{ 28 | create_multiplexed_stream, FromYellowstoneExtractor, 29 | }; 30 | use geyser_grpc_connector::{GeyserFilter, GrpcConnectionTimeouts, GrpcSourceConfig}; 31 | 32 | pub mod debouncer; 33 | 34 | fn start_example_block_consumer( 35 | multiplex_stream: impl Stream + Send + 'static, 36 | ) { 37 | tokio::spawn(async move { 38 | let mut block_stream = pin!(multiplex_stream); 39 | while let Some(block) = block_stream.next().await { 40 | info!( 41 | "emitted block #{}@{} from multiplexer", 42 | block.slot, block.commitment_config.commitment 43 | ); 44 | } 45 | }); 46 | } 47 | 48 | fn start_example_blockmeta_consumer( 49 | multiplex_stream: impl Stream + Send + 'static, 50 | ) { 51 | tokio::spawn(async move { 52 | let mut blockmeta_stream = pin!(multiplex_stream); 53 | while let Some(mini) = blockmeta_stream.next().await { 54 | info!( 55 | "emitted blockmeta #{}@{} from multiplexer", 56 | mini.slot, mini.commitment_config.commitment 57 | ); 58 | } 59 | }); 60 | } 61 | 62 | struct BlockExtractor(CommitmentConfig); 63 | 64 | impl FromYellowstoneExtractor for BlockExtractor { 65 | type Target = ProducedBlock; 66 | fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(Slot, Self::Target)> { 67 | match update.update_oneof { 68 | Some(UpdateOneof::Block(update_block_message)) => { 69 | let block = map_produced_block(update_block_message, self.0); 70 | Some((block.slot, block)) 71 | } 72 | _ => None, 73 | } 74 | } 75 | } 76 | 77 | pub struct BlockMetaMini { 78 | pub slot: Slot, 79 | pub commitment_config: CommitmentConfig, 80 | } 81 | 82 | struct BlockMetaExtractor(CommitmentConfig); 83 | 84 | impl FromYellowstoneExtractor for BlockMetaExtractor { 85 | type Target = BlockMetaMini; 86 | fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(Slot, Self::Target)> { 87 | match update.update_oneof { 88 | Some(UpdateOneof::BlockMeta(update_blockmeta_message)) => { 89 | let slot = update_blockmeta_message.slot; 90 | let mini = BlockMetaMini { 91 | slot, 92 | commitment_config: self.0, 93 | }; 94 | Some((slot, mini)) 95 | } 96 | _ => None, 97 | } 98 | } 99 | } 100 | 101 | #[tokio::main(flavor = "current_thread")] 102 | pub async fn main() { 103 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 104 | tracing_subscriber::fmt::init(); 105 | // console_subscriber::init(); 106 | 107 | let subscribe_blocks = true; 108 | let subscribe_blockmeta = false; 109 | 110 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 111 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 112 | let grpc_addr_blue = env::var("GRPC_ADDR2").expect("need grpc url for blue"); 113 | let grpc_x_token_blue = env::var("GRPC_X_TOKEN2").ok(); 114 | // via toxiproxy 115 | let grpc_addr_toxiproxy = "http://127.0.0.1:10001".to_string(); 116 | 117 | info!( 118 | "Using green on {} ({})", 119 | grpc_addr_green, 120 | grpc_x_token_green.is_some() 121 | ); 122 | info!( 123 | "Using blue on {} ({})", 124 | grpc_addr_blue, 125 | grpc_x_token_blue.is_some() 126 | ); 127 | info!("Using toxiproxy on {}", grpc_addr_toxiproxy); 128 | 129 | let timeouts = GrpcConnectionTimeouts { 130 | connect_timeout: Duration::from_secs(5), 131 | request_timeout: Duration::from_secs(5), 132 | subscribe_timeout: Duration::from_secs(5), 133 | receive_timeout: Duration::from_secs(5), 134 | }; 135 | 136 | let green_config = 137 | GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 138 | let blue_config = 139 | GrpcSourceConfig::new(grpc_addr_blue, grpc_x_token_blue, None, timeouts.clone()); 140 | let toxiproxy_config = GrpcSourceConfig::new(grpc_addr_toxiproxy, None, None, timeouts.clone()); 141 | 142 | if subscribe_blocks { 143 | info!("Write Block stream.."); 144 | let green_stream = create_geyser_reconnecting_stream( 145 | green_config.clone(), 146 | GeyserFilter(CommitmentConfig::confirmed()).blocks_and_txs(), 147 | ); 148 | let blue_stream = create_geyser_reconnecting_stream( 149 | blue_config.clone(), 150 | GeyserFilter(CommitmentConfig::confirmed()).blocks_and_txs(), 151 | ); 152 | let toxiproxy_stream = create_geyser_reconnecting_stream( 153 | toxiproxy_config.clone(), 154 | GeyserFilter(CommitmentConfig::confirmed()).blocks_and_txs(), 155 | ); 156 | let multiplex_stream = create_multiplexed_stream( 157 | vec![green_stream, blue_stream, toxiproxy_stream], 158 | BlockExtractor(CommitmentConfig::confirmed()), 159 | ); 160 | start_example_block_consumer(multiplex_stream); 161 | } 162 | 163 | if subscribe_blockmeta { 164 | info!("Write BlockMeta stream.."); 165 | let green_stream = create_geyser_reconnecting_stream( 166 | green_config.clone(), 167 | GeyserFilter(CommitmentConfig::confirmed()).blocks_meta(), 168 | ); 169 | let blue_stream = create_geyser_reconnecting_stream( 170 | blue_config.clone(), 171 | GeyserFilter(CommitmentConfig::confirmed()).blocks_meta(), 172 | ); 173 | let toxiproxy_stream = create_geyser_reconnecting_stream( 174 | toxiproxy_config.clone(), 175 | GeyserFilter(CommitmentConfig::confirmed()).blocks_meta(), 176 | ); 177 | let multiplex_stream = create_multiplexed_stream( 178 | vec![green_stream, blue_stream, toxiproxy_stream], 179 | BlockMetaExtractor(CommitmentConfig::confirmed()), 180 | ); 181 | start_example_blockmeta_consumer(multiplex_stream); 182 | } 183 | 184 | // "infinite" sleep 185 | sleep(Duration::from_secs(1800)).await; 186 | } 187 | 188 | #[derive(Default, Debug, Clone)] 189 | pub struct ProducedBlock { 190 | pub transactions: Vec, 191 | // pub leader_id: Option, 192 | pub blockhash: String, 193 | pub block_height: u64, 194 | pub slot: Slot, 195 | pub parent_slot: Slot, 196 | pub block_time: u64, 197 | pub commitment_config: CommitmentConfig, 198 | pub previous_blockhash: String, 199 | // pub rewards: Option>, 200 | } 201 | 202 | #[derive(Debug, Clone)] 203 | pub struct TransactionInfo { 204 | pub signature: String, 205 | pub err: Option, 206 | pub cu_requested: Option, 207 | pub prioritization_fees: Option, 208 | pub cu_consumed: Option, 209 | pub recent_blockhash: String, 210 | pub message: String, 211 | } 212 | 213 | pub fn map_produced_block( 214 | block: SubscribeUpdateBlock, 215 | commitment_config: CommitmentConfig, 216 | ) -> ProducedBlock { 217 | let txs: Vec = block 218 | .transactions 219 | .into_iter() 220 | .filter_map(|tx| { 221 | let meta = tx.meta?; 222 | let transaction = tx.transaction?; 223 | let message = transaction.message?; 224 | let header = message.header?; 225 | 226 | let signatures = transaction 227 | .signatures 228 | .into_iter() 229 | .filter_map(|sig| match Signature::try_from(sig) { 230 | Ok(sig) => Some(sig), 231 | Err(_) => { 232 | log::warn!( 233 | "Failed to read signature from transaction in block {} - skipping", 234 | block.blockhash 235 | ); 236 | None 237 | } 238 | }) 239 | .collect_vec(); 240 | 241 | let err = meta.err.map(|x| { 242 | bincode::deserialize::(&x.err) 243 | .expect("TransactionError should be deserialized") 244 | }); 245 | 246 | let signature = signatures[0]; 247 | let compute_units_consumed = meta.compute_units_consumed; 248 | 249 | let message = VersionedMessage::V0(v0::Message { 250 | header: MessageHeader { 251 | num_required_signatures: header.num_required_signatures as u8, 252 | num_readonly_signed_accounts: header.num_readonly_signed_accounts as u8, 253 | num_readonly_unsigned_accounts: header.num_readonly_unsigned_accounts as u8, 254 | }, 255 | account_keys: message 256 | .account_keys 257 | .into_iter() 258 | .map(|key| { 259 | let bytes: [u8; 32] = 260 | key.try_into().unwrap_or(Pubkey::default().to_bytes()); 261 | Pubkey::new_from_array(bytes) 262 | }) 263 | .collect(), 264 | recent_blockhash: Hash::new_from_array(<[u8; HASH_BYTES]>::try_from(message.recent_blockhash.as_slice()).unwrap()), 265 | instructions: message 266 | .instructions 267 | .into_iter() 268 | .map(|ix| CompiledInstruction { 269 | program_id_index: ix.program_id_index as u8, 270 | accounts: ix.accounts, 271 | data: ix.data, 272 | }) 273 | .collect(), 274 | address_table_lookups: message 275 | .address_table_lookups 276 | .into_iter() 277 | .map(|table| { 278 | let bytes: [u8; 32] = table 279 | .account_key 280 | .try_into() 281 | .unwrap_or(Pubkey::default().to_bytes()); 282 | MessageAddressTableLookup { 283 | account_key: Pubkey::new_from_array(bytes), 284 | writable_indexes: table.writable_indexes, 285 | readonly_indexes: table.readonly_indexes, 286 | } 287 | }) 288 | .collect(), 289 | }); 290 | 291 | let cu_requested = message.instructions().iter().find_map(|i| { 292 | if i.program_id(message.static_account_keys()) 293 | .eq(&compute_budget::id()) 294 | { 295 | if let Ok(ComputeBudgetInstruction::SetComputeUnitLimit(limit)) = 296 | borsh1::try_from_slice_unchecked(i.data.as_slice()) 297 | { 298 | return Some(limit); 299 | } 300 | } 301 | None 302 | }); 303 | 304 | let prioritization_fees = message.instructions().iter().find_map(|i| { 305 | if i.program_id(message.static_account_keys()) 306 | .eq(&compute_budget::id()) 307 | { 308 | if let Ok(ComputeBudgetInstruction::SetComputeUnitPrice(price)) = 309 | borsh1::try_from_slice_unchecked(i.data.as_slice()) 310 | { 311 | return Some(price); 312 | } 313 | } 314 | 315 | None 316 | }); 317 | 318 | Some(TransactionInfo { 319 | signature: signature.to_string(), 320 | err, 321 | cu_requested, 322 | prioritization_fees, 323 | cu_consumed: compute_units_consumed, 324 | recent_blockhash: message.recent_blockhash().to_string(), 325 | message: base64::engine::general_purpose::STANDARD.encode(message.serialize()), 326 | }) 327 | }) 328 | .collect(); 329 | 330 | // removed rewards 331 | 332 | ProducedBlock { 333 | transactions: txs, 334 | block_height: block 335 | .block_height 336 | .map(|block_height| block_height.block_height) 337 | .unwrap(), 338 | block_time: block.block_time.map(|time| time.timestamp).unwrap() as u64, 339 | blockhash: block.blockhash, 340 | previous_blockhash: block.parent_blockhash, 341 | commitment_config, 342 | // leader_id, 343 | parent_slot: block.parent_slot, 344 | slot: block.slot, 345 | // rewards, 346 | } 347 | } 348 | -------------------------------------------------------------------------------- /examples/stream_blocks_mainnet_task.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use base64::Engine; 4 | use itertools::Itertools; 5 | use log::{info, warn}; 6 | use solana_sdk::clock::Slot; 7 | use solana_sdk::commitment_config::CommitmentConfig; 8 | use solana_sdk::compute_budget::ComputeBudgetInstruction; 9 | use solana_sdk::hash::Hash; 10 | use solana_sdk::instruction::CompiledInstruction; 11 | use solana_sdk::message::v0::MessageAddressTableLookup; 12 | use solana_sdk::message::{v0, MessageHeader, VersionedMessage}; 13 | use solana_sdk::pubkey::Pubkey; 14 | use solana_sdk::signature::Signature; 15 | use solana_sdk::transaction::TransactionError; 16 | /// This file mocks the core model of the RPC server. 17 | use solana_sdk::{borsh1, compute_budget}; 18 | use solana_sdk::keccak::HASH_BYTES; 19 | use tokio::sync::mpsc::Receiver; 20 | use tokio::time::{sleep, Duration}; 21 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 22 | use yellowstone_grpc_proto::geyser::SubscribeUpdate; 23 | use yellowstone_grpc_proto::geyser::SubscribeUpdateBlock; 24 | 25 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; 26 | use geyser_grpc_connector::grpcmultiplex_fastestwins::FromYellowstoneExtractor; 27 | use geyser_grpc_connector::{GeyserFilter, GrpcConnectionTimeouts, GrpcSourceConfig, Message}; 28 | 29 | fn start_example_blockmeta_consumer(mut multiplex_channel: Receiver) { 30 | tokio::spawn(async move { 31 | loop { 32 | match multiplex_channel.recv().await { 33 | Some(Message::GeyserSubscribeUpdate(update)) => match update.update_oneof { 34 | Some(UpdateOneof::BlockMeta(meta)) => { 35 | info!("emitted blockmeta #{} from multiplexer", meta.slot); 36 | } 37 | None => {} 38 | _ => {} 39 | }, 40 | None => { 41 | warn!("multiplexer channel closed - aborting"); 42 | return; 43 | } 44 | Some(Message::Connecting(_)) => {} 45 | } 46 | } 47 | }); 48 | } 49 | 50 | #[allow(dead_code)] 51 | struct BlockExtractor(CommitmentConfig); 52 | 53 | impl FromYellowstoneExtractor for BlockExtractor { 54 | type Target = ProducedBlock; 55 | fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(Slot, Self::Target)> { 56 | match update.update_oneof { 57 | Some(UpdateOneof::Block(update_block_message)) => { 58 | let block = map_produced_block(update_block_message, self.0); 59 | Some((block.slot, block)) 60 | } 61 | _ => None, 62 | } 63 | } 64 | } 65 | 66 | pub struct BlockMetaMini { 67 | pub slot: Slot, 68 | pub commitment_config: CommitmentConfig, 69 | } 70 | 71 | #[allow(dead_code)] 72 | struct BlockMetaExtractor(CommitmentConfig); 73 | 74 | impl FromYellowstoneExtractor for BlockMetaExtractor { 75 | type Target = BlockMetaMini; 76 | fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(Slot, Self::Target)> { 77 | match update.update_oneof { 78 | Some(UpdateOneof::BlockMeta(update_blockmeta_message)) => { 79 | let slot = update_blockmeta_message.slot; 80 | let mini = BlockMetaMini { 81 | slot, 82 | commitment_config: self.0, 83 | }; 84 | Some((slot, mini)) 85 | } 86 | _ => None, 87 | } 88 | } 89 | } 90 | 91 | #[tokio::main(flavor = "current_thread")] 92 | pub async fn main() { 93 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 94 | tracing_subscriber::fmt::init(); 95 | // console_subscriber::init(); 96 | 97 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 98 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 99 | let grpc_addr_blue = env::var("GRPC_ADDR2").expect("need grpc url for blue"); 100 | let grpc_x_token_blue = env::var("GRPC_X_TOKEN2").ok(); 101 | // via toxiproxy 102 | let grpc_addr_toxiproxy = "http://127.0.0.1:10001".to_string(); 103 | 104 | info!( 105 | "Using green on {} ({})", 106 | grpc_addr_green, 107 | grpc_x_token_green.is_some() 108 | ); 109 | info!( 110 | "Using blue on {} ({})", 111 | grpc_addr_blue, 112 | grpc_x_token_blue.is_some() 113 | ); 114 | info!("Using toxiproxy on {}", grpc_addr_toxiproxy); 115 | 116 | let timeouts = GrpcConnectionTimeouts { 117 | connect_timeout: Duration::from_secs(5), 118 | request_timeout: Duration::from_secs(5), 119 | subscribe_timeout: Duration::from_secs(5), 120 | receive_timeout: Duration::from_secs(5), 121 | }; 122 | let (_, exit_notify) = tokio::sync::broadcast::channel(1); 123 | 124 | let green_config = 125 | GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 126 | let blue_config = 127 | GrpcSourceConfig::new(grpc_addr_blue, grpc_x_token_blue, None, timeouts.clone()); 128 | let toxiproxy_config = GrpcSourceConfig::new(grpc_addr_toxiproxy, None, None, timeouts.clone()); 129 | 130 | let (autoconnect_tx, blockmeta_rx) = tokio::sync::mpsc::channel(10); 131 | info!("Write BlockMeta stream.."); 132 | let _green_stream_ah = create_geyser_autoconnection_task_with_mpsc( 133 | green_config.clone(), 134 | GeyserFilter(CommitmentConfig::confirmed()).blocks_meta(), 135 | autoconnect_tx.clone(), 136 | exit_notify.resubscribe(), 137 | ); 138 | let _blue_stream_ah = create_geyser_autoconnection_task_with_mpsc( 139 | blue_config.clone(), 140 | GeyserFilter(CommitmentConfig::confirmed()).blocks_meta(), 141 | autoconnect_tx.clone(), 142 | exit_notify.resubscribe(), 143 | ); 144 | let _toxiproxy_stream_ah = create_geyser_autoconnection_task_with_mpsc( 145 | toxiproxy_config.clone(), 146 | GeyserFilter(CommitmentConfig::confirmed()).blocks_meta(), 147 | autoconnect_tx.clone(), 148 | exit_notify, 149 | ); 150 | start_example_blockmeta_consumer(blockmeta_rx); 151 | 152 | // "infinite" sleep 153 | sleep(Duration::from_secs(1800)).await; 154 | } 155 | 156 | #[derive(Default, Debug, Clone)] 157 | pub struct ProducedBlock { 158 | pub transactions: Vec, 159 | // pub leader_id: Option, 160 | pub blockhash: String, 161 | pub block_height: u64, 162 | pub slot: Slot, 163 | pub parent_slot: Slot, 164 | pub block_time: u64, 165 | pub commitment_config: CommitmentConfig, 166 | pub previous_blockhash: String, 167 | // pub rewards: Option>, 168 | } 169 | 170 | #[derive(Debug, Clone)] 171 | pub struct TransactionInfo { 172 | pub signature: String, 173 | pub err: Option, 174 | pub cu_requested: Option, 175 | pub prioritization_fees: Option, 176 | pub cu_consumed: Option, 177 | pub recent_blockhash: String, 178 | pub message: String, 179 | } 180 | 181 | pub fn map_produced_block( 182 | block: SubscribeUpdateBlock, 183 | commitment_config: CommitmentConfig, 184 | ) -> ProducedBlock { 185 | let txs: Vec = block 186 | .transactions 187 | .into_iter() 188 | .filter_map(|tx| { 189 | let meta = tx.meta?; 190 | let transaction = tx.transaction?; 191 | let message = transaction.message?; 192 | let header = message.header?; 193 | 194 | let signatures = transaction 195 | .signatures 196 | .into_iter() 197 | .filter_map(|sig| match Signature::try_from(sig) { 198 | Ok(sig) => Some(sig), 199 | Err(_) => { 200 | log::warn!( 201 | "Failed to read signature from transaction in block {} - skipping", 202 | block.blockhash 203 | ); 204 | None 205 | } 206 | }) 207 | .collect_vec(); 208 | 209 | let err = meta.err.map(|x| { 210 | bincode::deserialize::(&x.err) 211 | .expect("TransactionError should be deserialized") 212 | }); 213 | 214 | let signature = signatures[0]; 215 | let compute_units_consumed = meta.compute_units_consumed; 216 | 217 | let message = VersionedMessage::V0(v0::Message { 218 | header: MessageHeader { 219 | num_required_signatures: header.num_required_signatures as u8, 220 | num_readonly_signed_accounts: header.num_readonly_signed_accounts as u8, 221 | num_readonly_unsigned_accounts: header.num_readonly_unsigned_accounts as u8, 222 | }, 223 | account_keys: message 224 | .account_keys 225 | .into_iter() 226 | .map(|key| { 227 | let bytes: [u8; 32] = 228 | key.try_into().unwrap_or(Pubkey::default().to_bytes()); 229 | Pubkey::new_from_array(bytes) 230 | }) 231 | .collect(), 232 | recent_blockhash: Hash::new_from_array( <[u8; HASH_BYTES]>::try_from(message.recent_blockhash.as_slice()).unwrap()), 233 | instructions: message 234 | .instructions 235 | .into_iter() 236 | .map(|ix| CompiledInstruction { 237 | program_id_index: ix.program_id_index as u8, 238 | accounts: ix.accounts, 239 | data: ix.data, 240 | }) 241 | .collect(), 242 | address_table_lookups: message 243 | .address_table_lookups 244 | .into_iter() 245 | .map(|table| { 246 | let bytes: [u8; 32] = table 247 | .account_key 248 | .try_into() 249 | .unwrap_or(Pubkey::default().to_bytes()); 250 | MessageAddressTableLookup { 251 | account_key: Pubkey::new_from_array(bytes), 252 | writable_indexes: table.writable_indexes, 253 | readonly_indexes: table.readonly_indexes, 254 | } 255 | }) 256 | .collect(), 257 | }); 258 | 259 | let cu_requested = message.instructions().iter().find_map(|i| { 260 | if i.program_id(message.static_account_keys()) 261 | .eq(&compute_budget::id()) 262 | { 263 | if let Ok(ComputeBudgetInstruction::SetComputeUnitLimit(limit)) = 264 | borsh1::try_from_slice_unchecked(i.data.as_slice()) 265 | { 266 | return Some(limit); 267 | } 268 | } 269 | None 270 | }); 271 | 272 | let prioritization_fees = message.instructions().iter().find_map(|i| { 273 | if i.program_id(message.static_account_keys()) 274 | .eq(&compute_budget::id()) 275 | { 276 | if let Ok(ComputeBudgetInstruction::SetComputeUnitPrice(price)) = 277 | borsh1::try_from_slice_unchecked(i.data.as_slice()) 278 | { 279 | return Some(price); 280 | } 281 | } 282 | 283 | None 284 | }); 285 | 286 | Some(TransactionInfo { 287 | signature: signature.to_string(), 288 | err, 289 | cu_requested, 290 | prioritization_fees, 291 | cu_consumed: compute_units_consumed, 292 | recent_blockhash: message.recent_blockhash().to_string(), 293 | message: base64::engine::general_purpose::STANDARD.encode(message.serialize()), 294 | }) 295 | }) 296 | .collect(); 297 | 298 | // removed rewards 299 | 300 | ProducedBlock { 301 | transactions: txs, 302 | block_height: block 303 | .block_height 304 | .map(|block_height| block_height.block_height) 305 | .unwrap(), 306 | block_time: block.block_time.map(|time| time.timestamp).unwrap() as u64, 307 | blockhash: block.blockhash, 308 | previous_blockhash: block.parent_blockhash, 309 | commitment_config, 310 | // leader_id, 311 | parent_slot: block.parent_slot, 312 | slot: block.slot, 313 | // rewards, 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /examples/stream_blocks_processed.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use log::info; 3 | use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel}; 4 | use std::collections::HashMap; 5 | use std::env; 6 | use std::str::FromStr; 7 | use std::time::Duration; 8 | use tokio::sync::broadcast; 9 | 10 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; 11 | use geyser_grpc_connector::{ 12 | map_commitment_level, GrpcConnectionTimeouts, GrpcSourceConfig, Message, 13 | }; 14 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 15 | use yellowstone_grpc_proto::geyser::{ 16 | SubscribeRequest, SubscribeRequestFilterBlocks, SubscribeRequestFilterSlots, 17 | }; 18 | 19 | #[derive(Parser, Debug)] 20 | #[command(author, version, about, long_about = None)] 21 | pub struct Args { 22 | #[arg(long)] 23 | pub commitment_level: String, 24 | } 25 | 26 | #[tokio::main(flavor = "current_thread")] 27 | pub async fn main() { 28 | // RUST_LOG=info,stream_blocks_processed=debug,geyser_grpc_connector=trace 29 | tracing_subscriber::fmt::init(); 30 | // console_subscriber::init(); 31 | 32 | let args = Args::parse(); 33 | 34 | let commitment_level = CommitmentLevel::from_str(&args.commitment_level.to_ascii_lowercase()) 35 | .unwrap_or_else(|_| { 36 | panic!( 37 | "Invalid argument commitment level: {}", 38 | args.commitment_level 39 | ) 40 | }); 41 | let mut commitment_level_short = commitment_level.to_string().to_ascii_uppercase(); 42 | commitment_level_short.truncate(1); 43 | 44 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 45 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 46 | 47 | info!( 48 | "Using grpc source on {} ({})", 49 | grpc_addr_green, 50 | grpc_x_token_green.is_some() 51 | ); 52 | 53 | let timeouts = GrpcConnectionTimeouts { 54 | connect_timeout: Duration::from_secs(5), 55 | request_timeout: Duration::from_secs(5), 56 | subscribe_timeout: Duration::from_secs(5), 57 | receive_timeout: Duration::from_secs(5), 58 | }; 59 | 60 | let green_config = 61 | GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 62 | 63 | info!("Write Block stream.."); 64 | let (autoconnect_tx, mut blocks_rx) = tokio::sync::mpsc::channel(10); 65 | 66 | let (_exit, exit_notify) = broadcast::channel(1); 67 | 68 | let _green_stream_ah = create_geyser_autoconnection_task_with_mpsc( 69 | green_config.clone(), 70 | build_subscription(commitment_level), 71 | autoconnect_tx.clone(), 72 | exit_notify.resubscribe(), 73 | ); 74 | 75 | loop { 76 | match blocks_rx.recv().await { 77 | Some(Message::GeyserSubscribeUpdate(update)) => match update.update_oneof { 78 | Some(UpdateOneof::Slot(_update_slot)) => {} 79 | Some(UpdateOneof::Block(update_block)) => { 80 | info!( 81 | "({}) block {:?}: {} txs", 82 | commitment_level_short, 83 | update_block.slot, 84 | update_block.transactions.len() 85 | ); 86 | } 87 | None => {} 88 | _ => {} 89 | }, 90 | None => { 91 | log::warn!("multiplexer channel closed - aborting"); 92 | return; 93 | } 94 | Some(Message::Connecting(_)) => {} 95 | } 96 | } 97 | } 98 | 99 | fn build_subscription(commitment_level: CommitmentLevel) -> SubscribeRequest { 100 | let mut slots_subs = HashMap::new(); 101 | slots_subs.insert( 102 | "geyser_slots".to_string(), 103 | SubscribeRequestFilterSlots { 104 | filter_by_commitment: Some(true), 105 | interslot_updates: Some(false), 106 | }, 107 | ); 108 | 109 | let mut blocks_subs = HashMap::new(); 110 | blocks_subs.insert( 111 | "geyser_full_blocks".to_string(), 112 | SubscribeRequestFilterBlocks { 113 | account_include: Default::default(), 114 | include_transactions: Some(true), 115 | include_accounts: Some(false), 116 | include_entries: Some(false), 117 | }, 118 | ); 119 | 120 | SubscribeRequest { 121 | slots: slots_subs, 122 | blocks: blocks_subs, 123 | commitment: Some(map_commitment_level(CommitmentConfig { 124 | commitment: commitment_level, 125 | }) as i32), 126 | ..Default::default() 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /examples/stream_blocks_single.rs: -------------------------------------------------------------------------------- 1 | use futures::{Stream, StreamExt}; 2 | use log::info; 3 | use solana_sdk::clock::Slot; 4 | use solana_sdk::commitment_config::CommitmentConfig; 5 | use solana_sdk::pubkey::Pubkey; 6 | use std::env; 7 | use std::pin::pin; 8 | 9 | use geyser_grpc_connector::grpc_subscription_autoreconnect_streams::create_geyser_reconnecting_stream; 10 | use geyser_grpc_connector::grpcmultiplex_fastestwins::FromYellowstoneExtractor; 11 | use geyser_grpc_connector::{GeyserFilter, GrpcConnectionTimeouts, GrpcSourceConfig, Message}; 12 | use tokio::time::{sleep, Duration}; 13 | use tracing::warn; 14 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 15 | use yellowstone_grpc_proto::geyser::SubscribeUpdate; 16 | use yellowstone_grpc_proto::prost::Message as _; 17 | 18 | #[allow(dead_code)] 19 | fn start_example_blockmini_consumer( 20 | multiplex_stream: impl Stream + Send + 'static, 21 | ) { 22 | tokio::spawn(async move { 23 | let mut blockmeta_stream = pin!(multiplex_stream); 24 | while let Some(mini) = blockmeta_stream.next().await { 25 | info!( 26 | "emitted block mini #{}@{} with {} bytes from multiplexer", 27 | mini.slot, mini.commitment_config.commitment, mini.blocksize 28 | ); 29 | } 30 | }); 31 | } 32 | 33 | pub struct BlockMini { 34 | pub blocksize: usize, 35 | pub slot: Slot, 36 | pub commitment_config: CommitmentConfig, 37 | } 38 | 39 | struct BlockMiniExtractor(CommitmentConfig); 40 | 41 | impl FromYellowstoneExtractor for BlockMiniExtractor { 42 | type Target = BlockMini; 43 | fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(Slot, Self::Target)> { 44 | match update.update_oneof { 45 | Some(UpdateOneof::Block(update_block_message)) => { 46 | let blocksize = update_block_message.encoded_len(); 47 | let slot = update_block_message.slot; 48 | let mini = BlockMini { 49 | blocksize, 50 | slot, 51 | commitment_config: self.0, 52 | }; 53 | Some((slot, mini)) 54 | } 55 | Some(UpdateOneof::BlockMeta(update_blockmeta_message)) => { 56 | let blocksize = update_blockmeta_message.encoded_len(); 57 | let slot = update_blockmeta_message.slot; 58 | let mini = BlockMini { 59 | blocksize, 60 | slot, 61 | commitment_config: self.0, 62 | }; 63 | Some((slot, mini)) 64 | } 65 | _ => None, 66 | } 67 | } 68 | } 69 | 70 | #[tokio::main(flavor = "current_thread")] 71 | pub async fn main() { 72 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 73 | tracing_subscriber::fmt::init(); 74 | // console_subscriber::init(); 75 | 76 | let commitment_level = CommitmentConfig::processed(); 77 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 78 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 79 | 80 | info!( 81 | "Using grpc source on {} ({})", 82 | grpc_addr_green, 83 | grpc_x_token_green.is_some() 84 | ); 85 | 86 | let timeouts = GrpcConnectionTimeouts { 87 | connect_timeout: Duration::from_secs(25), 88 | request_timeout: Duration::from_secs(25), 89 | subscribe_timeout: Duration::from_secs(25), 90 | receive_timeout: Duration::from_secs(25), 91 | }; 92 | 93 | let config = GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 94 | 95 | info!("Write Block stream.."); 96 | 97 | let green_stream = create_geyser_reconnecting_stream( 98 | config.clone(), 99 | GeyserFilter(commitment_level).accounts(), 100 | ); 101 | 102 | let blue_stream = create_geyser_reconnecting_stream( 103 | config.clone(), 104 | GeyserFilter(commitment_level).blocks_and_txs(), 105 | ); 106 | 107 | tokio::spawn(async move { 108 | let mut green_stream = pin!(green_stream); 109 | while let Some(message) = green_stream.next().await { 110 | #[allow(clippy::single_match)] 111 | match message { 112 | Message::GeyserSubscribeUpdate(subscriber_update) => { 113 | match subscriber_update.update_oneof { 114 | Some(UpdateOneof::Account(update)) => { 115 | let account_info = update.account.unwrap(); 116 | let account_pk = Pubkey::try_from(account_info.pubkey).unwrap(); 117 | info!( 118 | "got account update (green)!!! {} - {:?} - {} bytes", 119 | update.slot, 120 | account_pk, 121 | account_info.data.len() 122 | ); 123 | } 124 | _ => {} 125 | } 126 | } 127 | Message::Connecting(attempt) => { 128 | warn!("Connection attempt: {}", attempt); 129 | } 130 | } 131 | } 132 | warn!("Stream aborted"); 133 | }); 134 | 135 | tokio::spawn(async move { 136 | let mut blue_stream = pin!(blue_stream); 137 | let extractor = BlockMiniExtractor(commitment_level); 138 | while let Some(message) = blue_stream.next().await { 139 | match message { 140 | Message::GeyserSubscribeUpdate(subscriber_update) => { 141 | let mapped = extractor.map_yellowstone_update(*subscriber_update); 142 | if let Some((slot, block_mini)) = mapped { 143 | info!( 144 | "got update (blue)!!! block: {} - {} bytes", 145 | slot, block_mini.blocksize 146 | ); 147 | } 148 | } 149 | Message::Connecting(attempt) => { 150 | warn!("Connection attempt: {}", attempt); 151 | } 152 | } 153 | } 154 | warn!("Stream aborted"); 155 | }); 156 | 157 | // "infinite" sleep 158 | sleep(Duration::from_secs(1800)).await; 159 | } 160 | 161 | #[allow(dead_code)] 162 | fn map_block_update(update: SubscribeUpdate) -> Option { 163 | match update.update_oneof { 164 | Some(UpdateOneof::Block(update_block_message)) => { 165 | let slot = update_block_message.slot; 166 | Some(slot) 167 | } 168 | _ => None, 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /examples/stream_token_accounts.rs: -------------------------------------------------------------------------------- 1 | use dashmap::DashMap; 2 | use log::{debug, info, trace}; 3 | #[allow(deprecated)] 4 | use solana_account_decoder::parse_token::{ 5 | parse_token, spl_token_ids, TokenAccountType, UiTokenAccount, 6 | }; 7 | use solana_sdk::clock::{Clock, Slot}; 8 | use solana_sdk::commitment_config::CommitmentConfig; 9 | use solana_sdk::pubkey::Pubkey; 10 | use solana_sdk::sysvar::clock; 11 | use std::collections::hash_map::Entry; 12 | use std::collections::HashMap; 13 | use std::env; 14 | use std::str::FromStr; 15 | use std::sync::Arc; 16 | use std::time::Instant; 17 | 18 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; 19 | use geyser_grpc_connector::{ 20 | map_commitment_level, GrpcConnectionTimeouts, GrpcSourceConfig, Message, 21 | }; 22 | use tokio::time::{sleep, Duration}; 23 | use tracing::warn; 24 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 25 | use yellowstone_grpc_proto::geyser::{ 26 | SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, 27 | }; 28 | 29 | const ENABLE_TIMESTAMP_TAGGING: bool = false; 30 | 31 | #[tokio::main] 32 | pub async fn main() { 33 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 34 | tracing_subscriber::fmt::init(); 35 | // console_subscriber::init(); 36 | 37 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 38 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 39 | 40 | info!( 41 | "Using grpc source on {} ({})", 42 | grpc_addr_green, 43 | grpc_x_token_green.is_some() 44 | ); 45 | 46 | let timeouts = GrpcConnectionTimeouts { 47 | connect_timeout: Duration::from_secs(25), 48 | request_timeout: Duration::from_secs(25), 49 | subscribe_timeout: Duration::from_secs(25), 50 | receive_timeout: Duration::from_secs(25), 51 | }; 52 | 53 | let config = GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 54 | 55 | info!("Write Block stream.."); 56 | 57 | let (exit_signal, _exit_notify) = tokio::sync::broadcast::channel(1); 58 | let (autoconnect_tx, mut accounts_rx) = tokio::sync::mpsc::channel(1000); 59 | 60 | let _jh_green = create_geyser_autoconnection_task_with_mpsc( 61 | config.clone(), 62 | token_accounts(), 63 | autoconnect_tx.clone(), 64 | exit_signal.subscribe(), 65 | ); 66 | 67 | let _jh_blue = create_geyser_autoconnection_task_with_mpsc( 68 | config.clone(), 69 | token_accounts_finalized(), 70 | autoconnect_tx.clone(), 71 | exit_signal.subscribe(), 72 | ); 73 | 74 | // owner x mint -> amount 75 | let token_account_by_ownermint: Arc>> = 76 | Arc::new(DashMap::with_capacity(10000)); 77 | let token_account_by_ownermint_read = token_account_by_ownermint.clone(); 78 | let token_account_by_ownermint = token_account_by_ownermint.clone(); 79 | 80 | tokio::spawn(async move { 81 | let mut bytes_per_slot: HashMap = HashMap::new(); 82 | let mut updates_per_slot: HashMap = HashMap::new(); 83 | 84 | let mut changing_slot = 0; 85 | let mut current_slot = 0; 86 | 87 | let mut account_write_first_timestamp: HashMap = HashMap::new(); 88 | 89 | while let Some(message) = accounts_rx.recv().await { 90 | match message { 91 | Message::GeyserSubscribeUpdate(subscriber_update) => { 92 | match subscriber_update.update_oneof { 93 | Some(UpdateOneof::Slot(update)) => { 94 | current_slot = update.slot; 95 | } 96 | Some(UpdateOneof::Account(update)) => { 97 | let slot = update.slot as Slot; 98 | let account = update.account.unwrap(); 99 | let account_pk = Pubkey::try_from(account.pubkey).unwrap(); 100 | let size = account.data.len() as u64; 101 | 102 | info!( 103 | "got account update: {} - {:?} - {} bytes", 104 | update.slot, 105 | account_pk, 106 | account.data.len() 107 | ); 108 | 109 | if clock::id() == account_pk { 110 | let clock: Clock = bincode::deserialize(&account.data).unwrap(); 111 | info!("clock: {:#?}", clock); 112 | } 113 | 114 | info!("got account write: {}", account.write_version); 115 | match account_write_first_timestamp.entry(account.write_version) { 116 | Entry::Occupied(o) => { 117 | let first_timestamp = o.get(); 118 | info!("got second account update for same write version with delta of {:?}", first_timestamp.elapsed()); 119 | } 120 | Entry::Vacant(v) => { 121 | v.insert(Instant::now()); 122 | } 123 | } 124 | 125 | trace!( 126 | "got account update: {} - {:?} - {} bytes", 127 | update.slot, 128 | account_pk, 129 | account.data.len() 130 | ); 131 | 132 | if ENABLE_TIMESTAMP_TAGGING { 133 | let since_the_epoch = std::time::SystemTime::now() 134 | .duration_since(std::time::SystemTime::UNIX_EPOCH) 135 | .expect("Time went backwards"); 136 | info!( 137 | "got account update: write_version={};timestamp_us={};slot={}", 138 | account.write_version, 139 | since_the_epoch.as_micros(), 140 | update.slot 141 | ); 142 | } 143 | 144 | #[allow(deprecated)] 145 | match parse_token(&account.data, Some(6)) { 146 | Ok(TokenAccountType::Account(account_ui)) => { 147 | // UiTokenAccount { 148 | // mint: "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", 149 | // owner: "9un5wqE3q4oCjyrDkwsdD48KteCJitQX5978Vh7KKxHo", 150 | // token_amount: UiTokenAmount { 151 | // ui_amount: Some(8229995.070667), 152 | // decimals: 6, amount: "8229995070667", 153 | // ui_amount_string: "8229995.070667" 154 | // }, 155 | // delegate: None, 156 | // state: Initialized, 157 | // is_native: false, 158 | // rent_exempt_reserve: None, 159 | // delegated_amount: None, 160 | // close_authority: None, 161 | // extensions: [] 162 | // } 163 | // all different states are covered 164 | // is_native: both true+false are sent 165 | assert!(!account.executable); 166 | assert_eq!(account.rent_epoch, u64::MAX); 167 | 168 | let owner = Pubkey::from_str(&account_ui.owner).unwrap(); 169 | let mint = Pubkey::from_str(&account_ui.mint).unwrap(); 170 | // 6 decimals as requested 171 | let amount = &account_ui.token_amount.amount; 172 | // groovie wallet 173 | if account_ui.owner.starts_with("66fEFnKy") { 174 | info!( 175 | "update balance for mint {} of owner {}: {}", 176 | mint, owner, amount 177 | ); 178 | } 179 | // if pubkey.starts_with(b"JUP") { 180 | // info!("update balance for mint {} of owner {}: {}", mint, owner, amount); 181 | // } 182 | 183 | token_account_by_ownermint 184 | .entry(owner) 185 | .or_default() 186 | .insert(mint, account_ui); 187 | 188 | bytes_per_slot 189 | .entry(slot) 190 | .and_modify(|total| *total += size) 191 | .or_insert(size); 192 | 193 | updates_per_slot 194 | .entry(slot) 195 | .and_modify(|total| *total += 1) 196 | .or_insert(1); 197 | 198 | let delta = (slot as i64) - (current_slot as i64); 199 | if delta > 1 { 200 | debug!("delta: {}", (slot as i64) - (current_slot as i64)); 201 | } 202 | 203 | if slot != changing_slot && changing_slot != 0 { 204 | let total_bytes = 205 | bytes_per_slot.get(&changing_slot).unwrap(); 206 | let updates_count = 207 | updates_per_slot.get(&changing_slot).unwrap(); 208 | info!( 209 | "Slot {} - Total bytes: {}, {} updates", 210 | slot, total_bytes, updates_count 211 | ); 212 | } 213 | changing_slot = slot; 214 | } 215 | Ok(TokenAccountType::Mint(_mint)) => { 216 | // not interesting 217 | } 218 | Ok(TokenAccountType::Multisig(_)) => {} 219 | Err(parse_error) => { 220 | trace!( 221 | "Could not parse account {} - {}", 222 | account_pk, 223 | parse_error 224 | ); 225 | } 226 | } 227 | } 228 | _ => {} 229 | } 230 | } 231 | Message::Connecting(attempt) => { 232 | warn!("Connection attempt: {}", attempt); 233 | } 234 | } 235 | } 236 | warn!("Stream aborted"); 237 | }); 238 | 239 | tokio::spawn(async move { 240 | loop { 241 | let mut total = 0; 242 | for accounts_by_mint in token_account_by_ownermint_read.iter() { 243 | for token_account_mint in accounts_by_mint.iter() { 244 | total += 1; 245 | let (_owner, _mint, _account) = ( 246 | accounts_by_mint.key(), 247 | token_account_mint.key(), 248 | token_account_mint.value(), 249 | ); 250 | // debug!("{} - {} - {}", owner, mint, account.token_amount.ui_amount_string); 251 | } 252 | } 253 | info!("Total owner x mint entries in cache map: {}", total); 254 | sleep(Duration::from_millis(1500)).await; 255 | } 256 | }); 257 | 258 | // "infinite" sleep 259 | sleep(Duration::from_secs(1800)).await; 260 | } 261 | 262 | pub fn token_accounts() -> SubscribeRequest { 263 | let mut accounts_subs = HashMap::new(); 264 | accounts_subs.insert( 265 | "client".to_string(), 266 | SubscribeRequestFilterAccounts { 267 | // account: vec!["4DoNfFBfF7UokCC2FQzriy7yHK6DY6NVdYpuekQ5pRgg".to_string()], 268 | account: vec![clock::id().to_string()], 269 | owner: vec![], 270 | // spl_token_ids().iter().map(|pubkey| pubkey.to_string()).collect(), 271 | filters: vec![], 272 | nonempty_txn_signature: None, 273 | }, 274 | ); 275 | 276 | let mut slots_subs = HashMap::new(); 277 | slots_subs.insert( 278 | "client".to_string(), 279 | SubscribeRequestFilterSlots { 280 | filter_by_commitment: Some(true), 281 | interslot_updates: Some(false), 282 | }, 283 | ); 284 | 285 | SubscribeRequest { 286 | slots: slots_subs, 287 | accounts: accounts_subs, 288 | commitment: Some(map_commitment_level(CommitmentConfig::processed()).into()), 289 | ..SubscribeRequest::default() 290 | } 291 | } 292 | 293 | // find out if fialiized makes a difference wrt accounts 294 | pub fn token_accounts_finalized() -> SubscribeRequest { 295 | let mut accounts_subs = HashMap::new(); 296 | accounts_subs.insert( 297 | "client".to_string(), 298 | SubscribeRequestFilterAccounts { 299 | account: vec!["4DoNfFBfF7UokCC2FQzriy7yHK6DY6NVdYpuekQ5pRgg".to_string()], 300 | owner: spl_token_ids() 301 | .iter() 302 | .map(|pubkey| pubkey.to_string()) 303 | .collect(), 304 | filters: vec![], 305 | nonempty_txn_signature: None, 306 | }, 307 | ); 308 | 309 | let mut slots_subs = HashMap::new(); 310 | slots_subs.insert( 311 | "client".to_string(), 312 | SubscribeRequestFilterSlots { 313 | filter_by_commitment: Some(true), 314 | interslot_updates: Some(false), 315 | }, 316 | ); 317 | 318 | SubscribeRequest { 319 | slots: slots_subs, 320 | accounts: accounts_subs, 321 | commitment: Some(map_commitment_level(CommitmentConfig::confirmed()).into()), 322 | ..SubscribeRequest::default() 323 | } 324 | } 325 | -------------------------------------------------------------------------------- /examples/stream_vote_transactions.rs: -------------------------------------------------------------------------------- 1 | use futures::StreamExt; 2 | use itertools::Itertools; 3 | use log::info; 4 | use solana_sdk::clock::{Slot, UnixTimestamp}; 5 | use solana_sdk::program_utils::limited_deserialize; 6 | use solana_sdk::vote::instruction::VoteInstruction; 7 | use std::collections::{HashMap, HashSet}; 8 | use std::env; 9 | use std::pin::pin; 10 | 11 | use geyser_grpc_connector::grpc_subscription_autoreconnect_streams::create_geyser_reconnecting_stream; 12 | use geyser_grpc_connector::histogram_percentiles::calculate_percentiles; 13 | use geyser_grpc_connector::{GrpcConnectionTimeouts, GrpcSourceConfig, Message}; 14 | use tokio::time::{sleep, Duration}; 15 | use tracing::warn; 16 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 17 | use yellowstone_grpc_proto::geyser::{SubscribeRequest, SubscribeRequestFilterTransactions}; 18 | 19 | #[tokio::main] 20 | pub async fn main() { 21 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 22 | tracing_subscriber::fmt::init(); 23 | // console_subscriber::init(); 24 | 25 | let grpc_addr_green = env::var("GRPC_ADDR").expect("need grpc url for green"); 26 | let grpc_x_token_green = env::var("GRPC_X_TOKEN").ok(); 27 | 28 | info!( 29 | "Using grpc source on {} ({})", 30 | grpc_addr_green, 31 | grpc_x_token_green.is_some() 32 | ); 33 | 34 | let timeouts = GrpcConnectionTimeouts { 35 | connect_timeout: Duration::from_secs(25), 36 | request_timeout: Duration::from_secs(25), 37 | subscribe_timeout: Duration::from_secs(25), 38 | receive_timeout: Duration::from_secs(25), 39 | }; 40 | 41 | let config = GrpcSourceConfig::new(grpc_addr_green, grpc_x_token_green, None, timeouts.clone()); 42 | 43 | info!("Write Block stream.."); 44 | 45 | let green_stream = create_geyser_reconnecting_stream(config.clone(), transaction_filter()); 46 | 47 | tokio::spawn(async move { 48 | let mut vote_times_by_slot: HashMap> = HashMap::new(); 49 | 50 | let mut green_stream = pin!(green_stream); 51 | while let Some(message) = green_stream.next().await { 52 | #[allow(clippy::single_match)] 53 | match message { 54 | Message::GeyserSubscribeUpdate(subscriber_update) => { 55 | match subscriber_update.update_oneof { 56 | Some(UpdateOneof::Transaction(update)) => { 57 | let message = update 58 | .transaction 59 | .unwrap() 60 | .transaction 61 | .unwrap() 62 | .message 63 | .unwrap(); 64 | let slot = update.slot; 65 | 66 | // https://docs.solanalabs.com/implemented-proposals/validator-timestamp-oracle 67 | for ci in message.instructions { 68 | let vote_instruction = 69 | limited_deserialize::(&ci.data).unwrap(); 70 | let last_voted_slot = vote_instruction.last_voted_slot().unwrap(); 71 | info!( 72 | "vote_instruction: {:?}", 73 | vote_instruction.timestamp().unwrap() 74 | ); 75 | vote_times_by_slot 76 | .entry(last_voted_slot) 77 | .or_default() 78 | .insert(vote_instruction.timestamp().unwrap()); 79 | } 80 | 81 | // hack to look at reasonable settled slot 82 | // print_spread(&vote_times_by_slot, slot); 83 | if vote_times_by_slot.contains_key(&(slot - 10)) { 84 | print_spread(&vote_times_by_slot, slot - 10); 85 | } 86 | } 87 | _ => {} 88 | } 89 | } 90 | Message::Connecting(attempt) => { 91 | warn!("Connection attempt: {}", attempt); 92 | } 93 | } 94 | } 95 | warn!("Stream aborted"); 96 | }); 97 | 98 | // "infinite" sleep 99 | sleep(Duration::from_secs(1800)).await; 100 | } 101 | 102 | fn print_spread(vote_times_by_slot: &HashMap>, slot: Slot) { 103 | let slots = vote_times_by_slot.get(&slot).unwrap(); 104 | let min_slot = slots.iter().min().unwrap(); 105 | let array = slots 106 | .iter() 107 | .sorted() 108 | .map(|x| (*x - min_slot) as f64) 109 | .collect_vec(); 110 | let histo = calculate_percentiles(&array); 111 | info!("slot: {} histo: {}", slot, histo); 112 | } 113 | 114 | pub fn transaction_filter() -> SubscribeRequest { 115 | let mut trnasactions_subs = HashMap::new(); 116 | trnasactions_subs.insert( 117 | "client".to_string(), 118 | SubscribeRequestFilterTransactions { 119 | vote: Some(true), 120 | failed: Some(false), 121 | signature: None, 122 | // TODO 123 | account_include: vec![], 124 | account_exclude: vec![], 125 | account_required: vec![], 126 | }, 127 | ); 128 | 129 | SubscribeRequest { 130 | transactions: trnasactions_subs, 131 | ..SubscribeRequest::default() 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /examples/subscribe_accounts.rs: -------------------------------------------------------------------------------- 1 | // 2 | // ``` 3 | // ssh eclipse-rpc -Nv 4 | // ``` 5 | // 6 | 7 | use log::info; 8 | use solana_account_decoder::parse_token::spl_token_ids; 9 | use solana_sdk::clock::UnixTimestamp; 10 | use solana_sdk::pubkey::Pubkey; 11 | use std::collections::HashMap; 12 | use std::env; 13 | use std::str::FromStr; 14 | use std::sync::atomic::AtomicU64; 15 | use std::sync::Arc; 16 | use std::time::{SystemTime, UNIX_EPOCH}; 17 | use tokio::sync::mpsc::{Receiver, Sender}; 18 | 19 | use geyser_grpc_connector::grpc_subscription_autoreconnect_tasks::create_geyser_autoconnection_task_with_mpsc; 20 | use geyser_grpc_connector::{GrpcConnectionTimeouts, GrpcSourceConfig, Message}; 21 | use tokio::time::{sleep, Duration}; 22 | use tonic::transport::ClientTlsConfig; 23 | use yellowstone_grpc_proto::geyser::subscribe_update::UpdateOneof; 24 | use yellowstone_grpc_proto::geyser::{ 25 | SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterBlocksMeta, 26 | SubscribeRequestFilterSlots, 27 | }; 28 | 29 | type AtomicSlot = Arc; 30 | 31 | #[tokio::main] 32 | pub async fn main() { 33 | // RUST_LOG=info,stream_blocks_mainnet=debug,geyser_grpc_connector=trace 34 | tracing_subscriber::fmt::init(); 35 | // console_subscriber::init(); 36 | 37 | let grpc_addr = env::var("GRPC_ADDR").expect("need grpc url"); 38 | let grpc_x_token = env::var("GRPC_X_TOKEN").ok(); 39 | 40 | info!( 41 | "Using grpc source on {} ({})", 42 | grpc_addr, 43 | grpc_x_token.is_some() 44 | ); 45 | 46 | let timeouts = GrpcConnectionTimeouts { 47 | connect_timeout: Duration::from_secs(25), 48 | request_timeout: Duration::from_secs(25), 49 | subscribe_timeout: Duration::from_secs(25), 50 | receive_timeout: Duration::from_secs(25), 51 | }; 52 | 53 | let tls_config = ClientTlsConfig::new().with_native_roots(); 54 | let config = GrpcSourceConfig::new(grpc_addr, grpc_x_token, Some(tls_config), timeouts.clone()); 55 | 56 | let (autoconnect_tx, geyser_messages_rx) = tokio::sync::mpsc::channel(10); 57 | let (_exit_tx, exit_rx) = tokio::sync::broadcast::channel::<()>(1); 58 | let (subscribe_filter_update_tx, mut _subscribe_filter_update_rx) = 59 | tokio::sync::mpsc::channel::(1); 60 | 61 | let _jh = create_geyser_autoconnection_task_with_mpsc( 62 | config.clone(), 63 | jito2_account(), 64 | autoconnect_tx.clone(), 65 | exit_rx.resubscribe(), 66 | ); 67 | 68 | // testcase 1 69 | // test if the autoconnector continues to work even if the channel drops 70 | // drop(subscribe_filter_update_tx); 71 | 72 | // testcase 2 73 | spawn_subscribe_filter_updater(subscribe_filter_update_tx.clone()); 74 | 75 | // testcase 3 76 | // spawn_subscribe_broken_filter_updater(subscribe_filter_update_tx.clone()); 77 | 78 | let current_processed_slot = AtomicSlot::default(); 79 | start_tracking_account_consumer(geyser_messages_rx, current_processed_slot.clone()); 80 | 81 | // "infinite" sleep 82 | sleep(Duration::from_secs(1800)).await; 83 | } 84 | 85 | // note: this keeps track of lot of data and might blow up memory 86 | fn start_tracking_account_consumer( 87 | mut geyser_messages_rx: Receiver, 88 | _current_processed_slot: Arc, 89 | ) { 90 | tokio::spawn(async move { 91 | loop { 92 | match geyser_messages_rx.recv().await { 93 | Some(Message::GeyserSubscribeUpdate(update)) => match update.update_oneof { 94 | Some(UpdateOneof::Account(update)) => { 95 | let account_info = update.account.unwrap(); 96 | let account_pk = Pubkey::try_from(account_info.pubkey).unwrap(); 97 | let account_owner_pk = Pubkey::try_from(account_info.owner).unwrap(); 98 | // note: slot is referencing the block that is just built while the slot number reported from BlockMeta/Slot uses the slot after the block is built 99 | let slot = update.slot; 100 | let account_receive_time = get_epoch_sec(); 101 | 102 | info!( 103 | "Account update: slot: {}, account_pk: {}, account_owner_pk: {}, account_receive_time: {}", 104 | slot, account_pk, account_owner_pk, account_receive_time 105 | ); 106 | } 107 | None => {} 108 | _ => {} 109 | }, 110 | None => { 111 | log::warn!("multiplexer channel closed - aborting"); 112 | return; 113 | } 114 | Some(Message::Connecting(_)) => {} 115 | } 116 | } 117 | }); 118 | } 119 | 120 | #[allow(dead_code)] 121 | fn spawn_subscribe_filter_updater(subscribe_filter_update_tx: Sender) { 122 | tokio::spawn(async move { 123 | loop { 124 | sleep(Duration::from_secs(5)).await; 125 | info!("updating filters"); 126 | subscribe_filter_update_tx 127 | .send(jito1_account()) 128 | .await 129 | .expect("send"); 130 | } 131 | }); 132 | } 133 | 134 | #[allow(dead_code)] 135 | fn spawn_subscribe_broken_filter_updater(subscribe_filter_update_tx: Sender) { 136 | tokio::spawn(async move { 137 | loop { 138 | sleep(Duration::from_secs(5)).await; 139 | info!("updating filters"); 140 | subscribe_filter_update_tx 141 | .send(broken_subscription()) 142 | .await 143 | .expect("send"); 144 | } 145 | }); 146 | } 147 | 148 | fn get_epoch_sec() -> UnixTimestamp { 149 | SystemTime::now() 150 | .duration_since(UNIX_EPOCH) 151 | .unwrap() 152 | .as_secs() as UnixTimestamp 153 | } 154 | 155 | pub fn token_accounts() -> SubscribeRequest { 156 | let mut accounts_subs = HashMap::new(); 157 | accounts_subs.insert( 158 | "client".to_string(), 159 | SubscribeRequestFilterAccounts { 160 | account: vec![], 161 | // vec!["4DoNfFBfF7UokCC2FQzriy7yHK6DY6NVdYpuekQ5pRgg".to_string()], 162 | owner: spl_token_ids() 163 | .iter() 164 | .map(|pubkey| pubkey.to_string()) 165 | .collect(), 166 | filters: vec![], 167 | nonempty_txn_signature: None, 168 | }, 169 | ); 170 | 171 | SubscribeRequest { 172 | accounts: accounts_subs, 173 | ..Default::default() 174 | } 175 | } 176 | 177 | pub fn all_accounts_and_blocksmeta() -> SubscribeRequest { 178 | let mut accounts_subs = HashMap::new(); 179 | accounts_subs.insert( 180 | "client".to_string(), 181 | SubscribeRequestFilterAccounts { 182 | account: vec![], 183 | owner: vec![], 184 | filters: vec![], 185 | nonempty_txn_signature: None, 186 | }, 187 | ); 188 | 189 | let mut slots_subs = HashMap::new(); 190 | slots_subs.insert( 191 | "client".to_string(), 192 | SubscribeRequestFilterSlots { 193 | filter_by_commitment: Some(true), 194 | interslot_updates: Some(false), 195 | }, 196 | ); 197 | 198 | let mut blocks_meta_subs = HashMap::new(); 199 | blocks_meta_subs.insert("client".to_string(), SubscribeRequestFilterBlocksMeta {}); 200 | 201 | SubscribeRequest { 202 | slots: slots_subs, 203 | accounts: accounts_subs, 204 | blocks_meta: blocks_meta_subs, 205 | ..Default::default() 206 | } 207 | } 208 | 209 | pub fn all_accounts() -> SubscribeRequest { 210 | let mut accounts_subs = HashMap::new(); 211 | accounts_subs.insert( 212 | "client".to_string(), 213 | SubscribeRequestFilterAccounts { 214 | account: vec![], 215 | owner: vec![], 216 | filters: vec![], 217 | nonempty_txn_signature: None, 218 | }, 219 | ); 220 | 221 | SubscribeRequest { 222 | accounts: accounts_subs, 223 | ..Default::default() 224 | } 225 | } 226 | 227 | pub fn jito1_account() -> SubscribeRequest { 228 | // Jito1 229 | let account = Pubkey::from_str("CXPeim1wQMkcTvEHx9QdhgKREYYJD8bnaCCqPRwJ1to1").unwrap(); 230 | 231 | let mut accounts_subs = HashMap::new(); 232 | accounts_subs.insert( 233 | "client".to_string(), 234 | SubscribeRequestFilterAccounts { 235 | account: vec![account.to_string()], 236 | owner: vec![], 237 | filters: vec![], 238 | nonempty_txn_signature: None, 239 | }, 240 | ); 241 | 242 | SubscribeRequest { 243 | accounts: accounts_subs, 244 | ..Default::default() 245 | } 246 | } 247 | 248 | pub fn jito2_account() -> SubscribeRequest { 249 | // Jito2 250 | let account = Pubkey::from_str("A4hyMd3FyvUJSRafDUSwtLLaQcxRP4r1BRC9w2AJ1to2").unwrap(); 251 | 252 | let mut accounts_subs = HashMap::new(); 253 | accounts_subs.insert( 254 | "client".to_string(), 255 | SubscribeRequestFilterAccounts { 256 | account: vec![account.to_string()], 257 | owner: vec![], 258 | filters: vec![], 259 | nonempty_txn_signature: None, 260 | }, 261 | ); 262 | 263 | SubscribeRequest { 264 | accounts: accounts_subs, 265 | ..Default::default() 266 | } 267 | } 268 | 269 | pub fn broken_subscription() -> SubscribeRequest { 270 | let mut accounts_subs = HashMap::new(); 271 | accounts_subs.insert( 272 | "broken_subscription".to_string(), 273 | SubscribeRequestFilterAccounts { 274 | account: vec!["nota_pubkey".to_string()], 275 | owner: vec![], 276 | filters: vec![], 277 | nonempty_txn_signature: None, 278 | }, 279 | ); 280 | 281 | SubscribeRequest { 282 | accounts: accounts_subs, 283 | ..Default::default() 284 | } 285 | } 286 | 287 | pub fn slots() -> SubscribeRequest { 288 | let mut slots_subs = HashMap::new(); 289 | slots_subs.insert( 290 | "client".to_string(), 291 | SubscribeRequestFilterSlots { 292 | filter_by_commitment: None, 293 | interslot_updates: Some(false), 294 | }, 295 | ); 296 | 297 | SubscribeRequest { 298 | slots: slots_subs, 299 | ..Default::default() 300 | } 301 | } 302 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.84.1" 3 | components = ["clippy", "rustfmt", "rust-analyzer"] 4 | targets = [] 5 | profile = "minimal" 6 | -------------------------------------------------------------------------------- /src/channel_plugger.rs: -------------------------------------------------------------------------------- 1 | use log::debug; 2 | 3 | /// usage: see plug_pattern test 4 | pub fn spawn_broadcast_channel_plug( 5 | downstream_broadcast: ( 6 | tokio::sync::broadcast::Sender, 7 | tokio::sync::broadcast::Receiver, 8 | ), 9 | upstream: tokio::sync::mpsc::Receiver, 10 | ) -> tokio::sync::broadcast::Receiver { 11 | spawn_plugger_mpcs_to_broadcast(upstream, downstream_broadcast.0); 12 | downstream_broadcast.1 13 | } 14 | 15 | /// note: backpressure will NOT get propagated to upstream 16 | pub fn spawn_plugger_mpcs_to_broadcast( 17 | mut upstream: tokio::sync::mpsc::Receiver, 18 | downstream: tokio::sync::broadcast::Sender, 19 | // TODO allow multiple downstreams + fanout 20 | ) { 21 | // abort forwarder by closing the sender 22 | let _private_handler = tokio::spawn(async move { 23 | while let Some(value) = upstream.recv().await { 24 | match downstream.send(value) { 25 | Ok(n_subscribers) => { 26 | debug!("forwarded to {} subscribers", n_subscribers); 27 | } 28 | Err(_dropped_msg) => { 29 | // decide to continue if no subscribers 30 | debug!("no subscribers - dropping payload and continue"); 31 | } 32 | } 33 | } 34 | debug!("no more messages from producer - shutting down connector"); 35 | }); 36 | } 37 | 38 | #[cfg(test)] 39 | mod tests { 40 | use super::*; 41 | use log::{info, warn}; 42 | use std::time::Duration; 43 | use tokio::sync::broadcast::error::RecvError; 44 | use tokio::sync::mpsc::error::SendTimeoutError; 45 | use tokio::time::{sleep, timeout}; 46 | 47 | #[tokio::test] 48 | async fn plug_pattern() { 49 | let (_jh_task, message_channel) = tokio::sync::mpsc::channel::(1); 50 | let _broadcast_rx = 51 | spawn_broadcast_channel_plug(tokio::sync::broadcast::channel(8), message_channel); 52 | } 53 | 54 | #[tokio::test] 55 | async fn connect_broadcast_to_mpsc() { 56 | solana_logger::setup_with_default("debug"); 57 | 58 | let (tx1, rx1) = tokio::sync::mpsc::channel::(1); 59 | let (tx2, rx2) = tokio::sync::broadcast::channel::(2); 60 | drop(rx2); 61 | 62 | let jh_producer = tokio::spawn(async move { 63 | for i in 1..=10 { 64 | info!("producer sending {}", i); 65 | if let Err(SendTimeoutError::Timeout(message)) = 66 | tx1.send_timeout(i, Duration::from_millis(200)).await 67 | { 68 | info!("producer send was blocked"); 69 | tx1.send(message).await.unwrap(); 70 | } 71 | sleep(Duration::from_millis(500)).await; 72 | } 73 | }); 74 | 75 | // downstream receiver A connected to broadcast 76 | let mut channel_a = tx2.subscribe(); 77 | tokio::spawn(async move { 78 | loop { 79 | match channel_a.recv().await { 80 | Ok(msg) => { 81 | info!("A: {:?} (len={})", msg, channel_a.len()); 82 | } 83 | Err(RecvError::Lagged(n_missed)) => { 84 | warn!("channel A lagged {} messages", n_missed); 85 | } 86 | Err(RecvError::Closed) => { 87 | info!("channel A closed (by forwarder)"); 88 | break; 89 | } 90 | } 91 | } 92 | }); 93 | 94 | // downstream receiver B connected to broadcast 95 | let mut channel_b = tx2.subscribe(); 96 | tokio::spawn(async move { 97 | loop { 98 | match channel_b.recv().await { 99 | Ok(msg) => { 100 | info!("B: {:?} (len={})", msg, channel_b.len()); 101 | // slow receiver 102 | sleep(Duration::from_millis(1000)).await; 103 | } 104 | Err(RecvError::Lagged(n_missed)) => { 105 | warn!("channel B lagged {} messages", n_missed); 106 | } 107 | Err(RecvError::Closed) => { 108 | info!("channel B closed (by forwarder)"); 109 | break; 110 | } 111 | } 112 | } 113 | }); 114 | 115 | // connect them 116 | spawn_plugger_mpcs_to_broadcast(rx1, tx2); 117 | 118 | // wait forever 119 | info!("Started tasks .. waiting for producer to finish"); 120 | // should take 5 secs 121 | assert!( 122 | timeout(Duration::from_secs(10), jh_producer).await.is_ok(), 123 | "timeout" 124 | ); 125 | info!("producer done - wait a bit longer ..."); 126 | sleep(Duration::from_secs(3)).await; 127 | info!("done."); 128 | 129 | // note how messages pile up for slow receiver B 130 | } 131 | 132 | #[tokio::test] 133 | async fn connect_broadcast_to_mpsc_nosubscribers() { 134 | solana_logger::setup_with_default("debug"); 135 | 136 | let (tx1, rx1) = tokio::sync::mpsc::channel::(1); 137 | let (tx2, rx2) = tokio::sync::broadcast::channel::(2); 138 | 139 | let jh_producer = tokio::spawn(async move { 140 | for i in 1..=10 { 141 | info!("producer sending {}", i); 142 | if let Err(SendTimeoutError::Timeout(message)) = 143 | tx1.send_timeout(i, Duration::from_millis(200)).await 144 | { 145 | info!("producer send was blocked"); 146 | tx1.send(message).await.unwrap(); 147 | } 148 | sleep(Duration::from_millis(500)).await; 149 | } 150 | }); 151 | 152 | // connect them 153 | spawn_plugger_mpcs_to_broadcast(rx1, tx2); 154 | 155 | sleep(Duration::from_secs(3)).await; 156 | info!("dropping subscriber"); 157 | drop(rx2); 158 | 159 | // wait forever 160 | info!("Started tasks .. waiting for producer to finish"); 161 | // should take 5 secs 162 | assert!( 163 | timeout(Duration::from_secs(10), jh_producer).await.is_ok(), 164 | "timeout" 165 | ); 166 | info!("producer done - wait a bit longer ..."); 167 | sleep(Duration::from_secs(3)).await; 168 | info!("done."); 169 | 170 | // note how messages pile up for slow receiver B 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /src/grpc_subscription_autoreconnect_streams.rs: -------------------------------------------------------------------------------- 1 | /// NOT MAINTAINED - please use the `grpc_subscription_autoreconnect_streams` module instead 2 | use std::time::Duration; 3 | 4 | use async_stream::stream; 5 | use futures::{Stream, StreamExt}; 6 | use log::{debug, info, log, trace, warn, Level}; 7 | use tokio::task::JoinHandle; 8 | use tokio::time::{sleep, timeout}; 9 | use yellowstone_grpc_client::GeyserGrpcClientResult; 10 | use yellowstone_grpc_proto::geyser::{SubscribeRequest, SubscribeUpdate}; 11 | use yellowstone_grpc_proto::tonic::Status; 12 | 13 | use crate::yellowstone_grpc_util::{ 14 | connect_with_timeout_with_buffers, GeyserGrpcClientBufferConfig, 15 | }; 16 | use crate::{Attempt, GrpcSourceConfig, Message}; 17 | 18 | enum ConnectionState>> { 19 | NotConnected(Attempt), 20 | Connecting(Attempt, JoinHandle>), 21 | Ready(S), 22 | WaitReconnect(Attempt), 23 | } 24 | 25 | // Take geyser filter, connect to Geyser and return a generic stream of SubscribeUpdate 26 | // note: stream never terminates 27 | pub fn create_geyser_reconnecting_stream( 28 | grpc_source: GrpcSourceConfig, 29 | subscribe_filter: SubscribeRequest, 30 | ) -> impl Stream { 31 | let mut state = ConnectionState::NotConnected(1); 32 | 33 | // in case of cancellation, we restart from here: 34 | // thus we want to keep the progression in a state object outside the stream! macro 35 | let the_stream = stream! { 36 | loop { 37 | let yield_value; 38 | 39 | (state, yield_value) = match state { 40 | 41 | ConnectionState::NotConnected(attempt) => { 42 | 43 | let connection_task = tokio::spawn({ 44 | let addr = grpc_source.grpc_addr.clone(); 45 | let token = grpc_source.grpc_x_token.clone(); 46 | let config = grpc_source.tls_config.clone(); 47 | let connect_timeout = grpc_source.timeouts.as_ref().map(|t| t.connect_timeout); 48 | let request_timeout = grpc_source.timeouts.as_ref().map(|t| t.request_timeout); 49 | let subscribe_timeout = grpc_source.timeouts.as_ref().map(|t| t.subscribe_timeout); 50 | let subscribe_filter = subscribe_filter.clone(); 51 | log!(if attempt > 1 { Level::Warn } else { Level::Debug }, "Connecting attempt #{} to {}", attempt, addr); 52 | async move { 53 | 54 | let connect_result = connect_with_timeout_with_buffers( 55 | addr, 56 | token, 57 | config, 58 | connect_timeout, 59 | request_timeout, 60 | GeyserGrpcClientBufferConfig::optimize_for_subscription(&subscribe_filter), 61 | None, 62 | ) 63 | .await; 64 | 65 | let mut client = connect_result.unwrap(); // FIXME how to handle this? 66 | debug!("Subscribe with filter {:?}", subscribe_filter); 67 | 68 | let subscribe_result = timeout(subscribe_timeout.unwrap_or(Duration::MAX), 69 | client.subscribe_once(subscribe_filter)).await; 70 | 71 | // maybe not optimal 72 | subscribe_result.map_err(|_| Status::unknown("unspecific subscribe timeout"))? 73 | } 74 | }); 75 | 76 | (ConnectionState::Connecting(attempt + 1, connection_task), Message::Connecting(attempt)) 77 | } 78 | 79 | ConnectionState::Connecting(attempt, connection_task) => { 80 | let subscribe_result = connection_task.await; 81 | 82 | match subscribe_result { 83 | Ok(Ok(subscribed_stream)) => (ConnectionState::Ready(subscribed_stream), Message::Connecting(attempt)), 84 | Ok(Err(geyser_error)) => { 85 | // ATM we consider all errors recoverable 86 | warn!("subscribe failed on {} - retrying: {:#}", grpc_source, geyser_error); 87 | (ConnectionState::WaitReconnect(attempt + 1), Message::Connecting(attempt)) 88 | }, 89 | Err(geyser_grpc_task_error) => { 90 | warn!("connection task aborted on {} - retrying: {:#}", grpc_source, geyser_grpc_task_error); 91 | (ConnectionState::WaitReconnect(attempt + 1), Message::Connecting(attempt)) 92 | } 93 | } 94 | 95 | } 96 | 97 | ConnectionState::Ready(mut geyser_stream) => { 98 | let receive_timeout = grpc_source.timeouts.as_ref().map(|t| t.receive_timeout); 99 | match timeout(receive_timeout.unwrap_or(Duration::MAX), geyser_stream.next()).await { 100 | Ok(Some(Ok(update_message))) => { 101 | trace!("> recv update message from {}", grpc_source); 102 | (ConnectionState::Ready(geyser_stream), Message::GeyserSubscribeUpdate(Box::new(update_message))) 103 | } 104 | Ok(Some(Err(tonic_status))) => { 105 | // ATM we consider all errors recoverable 106 | warn!("error on {} - retrying: {:#}", grpc_source, tonic_status); 107 | (ConnectionState::WaitReconnect(1), Message::Connecting(1)) 108 | } 109 | Ok(None) => { 110 | // should not arrive here, Mean the stream close. 111 | warn!("geyser stream closed on {} - retrying", grpc_source); 112 | (ConnectionState::WaitReconnect(1), Message::Connecting(1)) 113 | } 114 | Err(_elapsed) => { 115 | // timeout 116 | warn!("geyser stream timeout on {} - retrying", grpc_source); 117 | (ConnectionState::WaitReconnect(1), Message::Connecting(1)) 118 | } 119 | } 120 | 121 | } 122 | 123 | ConnectionState::WaitReconnect(attempt) => { 124 | let backoff_secs = 1.5_f32.powi(attempt as i32).min(15.0); 125 | info!("waiting {} seconds, then reconnect to {}", backoff_secs, grpc_source); 126 | sleep(Duration::from_secs_f32(backoff_secs)).await; 127 | (ConnectionState::NotConnected(attempt), Message::Connecting(attempt)) 128 | } 129 | 130 | }; // -- match 131 | 132 | yield yield_value 133 | } 134 | 135 | }; // -- stream! 136 | 137 | the_stream 138 | } 139 | 140 | #[cfg(test)] 141 | mod tests { 142 | use crate::GrpcConnectionTimeouts; 143 | 144 | use super::*; 145 | 146 | #[tokio::test] 147 | async fn test_debug_no_secrets() { 148 | let timeout_config = GrpcConnectionTimeouts { 149 | connect_timeout: Duration::from_secs(1), 150 | request_timeout: Duration::from_secs(2), 151 | subscribe_timeout: Duration::from_secs(3), 152 | receive_timeout: Duration::from_secs(3), 153 | }; 154 | assert_eq!( 155 | format!( 156 | "{:?}", 157 | GrpcSourceConfig::new( 158 | "http://localhost:1234".to_string(), 159 | Some("my-secret".to_string()), 160 | None, 161 | timeout_config, 162 | ) 163 | ), 164 | "grpc_addr http://localhost:1234" 165 | ); 166 | } 167 | 168 | #[tokio::test] 169 | async fn test_display_no_secrets() { 170 | let timeout_config = GrpcConnectionTimeouts { 171 | connect_timeout: Duration::from_secs(1), 172 | request_timeout: Duration::from_secs(2), 173 | subscribe_timeout: Duration::from_secs(3), 174 | receive_timeout: Duration::from_secs(3), 175 | }; 176 | assert_eq!( 177 | format!( 178 | "{}", 179 | GrpcSourceConfig::new( 180 | "http://localhost:1234".to_string(), 181 | Some("my-secret".to_string()), 182 | None, 183 | timeout_config, 184 | ) 185 | ), 186 | "grpc_addr http://localhost:1234" 187 | ); 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /src/grpc_subscription_autoreconnect_tasks.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::fmt::Display; 3 | use std::future::Future; 4 | use std::time::Duration; 5 | 6 | use futures::{Sink, SinkExt, Stream, StreamExt}; 7 | use log::{debug, error, info, log, trace, warn, Level}; 8 | use tokio::select; 9 | use tokio::sync::broadcast; 10 | use tokio::sync::broadcast::error::RecvError; 11 | use tokio::sync::mpsc; 12 | use tokio::sync::mpsc::error::SendTimeoutError; 13 | use tokio::task::JoinHandle; 14 | use tokio::time::{sleep, timeout, Instant}; 15 | use yellowstone_grpc_client::{GeyserGrpcBuilderError, GeyserGrpcClient, GeyserGrpcClientError}; 16 | use yellowstone_grpc_proto::geyser::{SubscribeRequest, SubscribeUpdate}; 17 | use yellowstone_grpc_proto::tonic::service::Interceptor; 18 | use yellowstone_grpc_proto::tonic::Status; 19 | 20 | use crate::yellowstone_grpc_util::{ 21 | connect_with_timeout_with_buffers, GeyserGrpcClientBufferConfig, 22 | }; 23 | use crate::{Attempt, GrpcSourceConfig, Message}; 24 | 25 | enum ConnectionState< 26 | S: Stream>, 27 | F: Interceptor, 28 | R: Sink, 29 | > { 30 | NotConnected(Attempt), 31 | // connected but not subscribed 32 | Connecting(Attempt, GeyserGrpcClient), 33 | Ready(S, R), 34 | // error states 35 | RecoverableConnectionError(Attempt), 36 | // non-recoverable error 37 | FatalError(Attempt, FatalErrorReason), 38 | WaitReconnect(Attempt), 39 | // exit signal received 40 | GracefulShutdown, 41 | } 42 | 43 | enum FatalErrorReason { 44 | DownstreamChannelClosed, 45 | ConfigurationError, 46 | NetworkError, 47 | SubscribeError, 48 | } 49 | 50 | pub fn create_geyser_autoconnection_task( 51 | grpc_source: GrpcSourceConfig, 52 | subscribe_filter: SubscribeRequest, 53 | exit_notify: broadcast::Receiver<()>, 54 | ) -> (JoinHandle<()>, mpsc::Receiver) { 55 | let (sender, receiver_channel) = tokio::sync::mpsc::channel::(1); 56 | 57 | let join_handle = create_geyser_autoconnection_task_with_mpsc( 58 | grpc_source, 59 | subscribe_filter, 60 | sender, 61 | exit_notify, 62 | ); 63 | 64 | (join_handle, receiver_channel) 65 | } 66 | 67 | // compat 68 | pub fn create_geyser_autoconnection_task_with_mpsc( 69 | grpc_source: GrpcSourceConfig, 70 | subscribe_filter: SubscribeRequest, 71 | mpsc_downstream: mpsc::Sender, 72 | exit_notify: broadcast::Receiver<()>, 73 | ) -> JoinHandle<()> { 74 | create_geyser_autoconnection_task_with_updater( 75 | grpc_source, 76 | subscribe_filter, 77 | mpsc_downstream, 78 | exit_notify, 79 | None, 80 | ) 81 | } 82 | 83 | // compat 84 | pub fn create_geyser_autoconnection_task_with_updater( 85 | grpc_source: GrpcSourceConfig, 86 | subscribe_filter: SubscribeRequest, 87 | mpsc_downstream: mpsc::Sender, 88 | exit_notify: broadcast::Receiver<()>, 89 | subscribe_filter_update_rx: Option>, 90 | ) -> JoinHandle<()> { 91 | create_geyser_autoconnection_task_with_log_tag( 92 | grpc_source, 93 | subscribe_filter, 94 | mpsc_downstream, 95 | exit_notify, 96 | subscribe_filter_update_rx, 97 | &None, 98 | ) 99 | } 100 | 101 | #[derive(Clone)] 102 | pub struct LogTag(pub String); 103 | 104 | impl Display for LogTag { 105 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 106 | write!(f, "tag={}", self.0) 107 | } 108 | } 109 | 110 | /// connect to grpc source performing autoconnect if required, 111 | /// returns mpsc channel; task will abort on fatal error 112 | /// will shut down when receiver is dropped 113 | /// 114 | /// read this for argument: http://www.randomhacks.net/2019/03/08/should-rust-channels-panic-on-send/ 115 | pub fn create_geyser_autoconnection_task_with_log_tag( 116 | grpc_source: GrpcSourceConfig, 117 | subscribe_filter: SubscribeRequest, 118 | mpsc_downstream: mpsc::Sender, 119 | mut exit_notify: broadcast::Receiver<()>, 120 | mut subscribe_filter_update_rx: Option>, 121 | log_tag: &Option, 122 | ) -> JoinHandle<()> { 123 | let log_tag = log_tag 124 | .as_ref() 125 | .map(|tag| format!(", {}", tag)) 126 | .unwrap_or("".to_string()); 127 | 128 | // task will be aborted when downstream receiver gets dropped 129 | // there are two ways to terminate: 1) using break 'main_loop 2) return from task 130 | let jh_geyser_task = tokio::spawn(async move { 131 | // use this filter for initial connect and update it if the client requests a change via client_subscribe_tx channel 132 | let mut subscribe_filter_on_connect = subscribe_filter; 133 | 134 | let (_dummy_filter_tx, dummy_filter_rx) = mpsc::channel::(1); 135 | let mut subscribe_filter_update_rx = 136 | subscribe_filter_update_rx.take().unwrap_or(dummy_filter_rx); 137 | let mut state = ConnectionState::NotConnected(1); 138 | let mut messages_forwarded = 0; 139 | 140 | 'main_loop: loop { 141 | state = match state { 142 | ConnectionState::NotConnected(attempt) => { 143 | let addr = grpc_source.grpc_addr.clone(); 144 | let token = grpc_source.grpc_x_token.clone(); 145 | let config = grpc_source.tls_config.clone(); 146 | let connect_timeout = grpc_source.timeouts.as_ref().map(|t| t.connect_timeout); 147 | let request_timeout = grpc_source.timeouts.as_ref().map(|t| t.request_timeout); 148 | let compression = grpc_source.compression; 149 | log!( 150 | if attempt > 1 { 151 | Level::Warn 152 | } else { 153 | Level::Debug 154 | }, 155 | "Connection attempt: to={}, attempt={}{}", 156 | attempt, 157 | addr, 158 | log_tag 159 | ); 160 | 161 | // let buffer_config = yellowstone_grpc_util::GeyserGrpcClientBufferConfig::optimize_for_subscription(&subscribe_filter); 162 | let buffer_config = buffer_config_from_env(); 163 | debug!( 164 | "Using Grpc Buffer config: config={:?}{}", 165 | buffer_config, log_tag 166 | ); 167 | 168 | let connection_handler = |connect_result| match connect_result { 169 | Ok(client) => ConnectionState::Connecting(attempt, client), 170 | Err(GeyserGrpcBuilderError::MetadataValueError(_)) => { 171 | ConnectionState::FatalError( 172 | attempt + 1, 173 | FatalErrorReason::ConfigurationError, 174 | ) 175 | } 176 | Err(GeyserGrpcBuilderError::TonicError(tonic_error)) => { 177 | warn!( 178 | "connect failed - aborting: to={}, error={:#}{}", 179 | grpc_source, tonic_error, log_tag 180 | ); 181 | ConnectionState::FatalError(attempt + 1, FatalErrorReason::NetworkError) 182 | } 183 | }; 184 | 185 | let fut_connector = connect_with_timeout_with_buffers( 186 | addr, 187 | token, 188 | config, 189 | connect_timeout, 190 | request_timeout, 191 | buffer_config, 192 | compression, 193 | ); 194 | 195 | match await_or_exit(fut_connector, exit_notify.recv(), log_tag.clone()).await { 196 | MaybeExit::Continue(connection_result) => { 197 | connection_handler(connection_result) 198 | } 199 | MaybeExit::Exit => ConnectionState::GracefulShutdown, 200 | } 201 | } 202 | ConnectionState::Connecting(attempt, mut client) => { 203 | let subscribe_timeout = 204 | grpc_source.timeouts.as_ref().map(|t| t.subscribe_timeout); 205 | let subscribe_filter_on_connect = subscribe_filter_on_connect.clone(); 206 | debug!( 207 | "Subscribe initially: filter={:?}{}", 208 | subscribe_filter_on_connect, log_tag 209 | ); 210 | 211 | let fut_subscribe = timeout( 212 | subscribe_timeout.unwrap_or(Duration::MAX), 213 | client.subscribe_with_request(Some(subscribe_filter_on_connect)), 214 | ); 215 | 216 | match await_or_exit(fut_subscribe, exit_notify.recv(), log_tag.clone()).await { 217 | MaybeExit::Continue(subscribe_result_timeout) => { 218 | match subscribe_result_timeout { 219 | Ok(subscribe_result) => { 220 | match subscribe_result { 221 | Ok((geyser_subscribe_tx, geyser_stream)) => { 222 | if attempt > 1 { 223 | debug!( 224 | "Subscribed after failed attempts: to={}, attempt={}{}", 225 | grpc_source, attempt, log_tag 226 | ); 227 | } 228 | ConnectionState::Ready( 229 | geyser_stream, 230 | geyser_subscribe_tx, 231 | ) 232 | } 233 | Err(GeyserGrpcClientError::TonicStatus(status)) => { 234 | warn!( 235 | "subscribe failed after attempts - retrying: to={}, attempt={}, status={:#}{}", 236 | grpc_source, attempt, status, log_tag 237 | ); 238 | ConnectionState::RecoverableConnectionError(attempt + 1) 239 | } 240 | // non-recoverable 241 | Err(unrecoverable_error) => { 242 | error!( 243 | "subscribe failed with unrecoverable error: to={}, error={:#}{}", 244 | grpc_source, unrecoverable_error, log_tag 245 | ); 246 | ConnectionState::FatalError( 247 | attempt + 1, 248 | FatalErrorReason::SubscribeError, 249 | ) 250 | } 251 | } 252 | } 253 | Err(_elapsed) => { 254 | warn!( 255 | "subscribe failed with timeout - retrying: to={}{}", 256 | grpc_source, log_tag 257 | ); 258 | ConnectionState::RecoverableConnectionError(attempt + 1) 259 | } 260 | } 261 | } 262 | MaybeExit::Exit => ConnectionState::GracefulShutdown, 263 | } 264 | } 265 | ConnectionState::RecoverableConnectionError(attempt) => { 266 | let backoff_secs = 1.5_f32.powi(attempt as i32).min(15.0); 267 | info!( 268 | "waiting after connection error, then reconnecting: wait_secs={}, to={}, attempt={}{}", 269 | backoff_secs, grpc_source, attempt, log_tag 270 | ); 271 | 272 | let fut_sleep = sleep(Duration::from_secs_f32(backoff_secs)); 273 | 274 | match await_or_exit(fut_sleep, exit_notify.recv(), log_tag.clone()).await { 275 | MaybeExit::Continue(()) => ConnectionState::NotConnected(attempt), 276 | MaybeExit::Exit => ConnectionState::GracefulShutdown, 277 | } 278 | } 279 | ConnectionState::FatalError(_attempt, reason) => match reason { 280 | FatalErrorReason::DownstreamChannelClosed => { 281 | warn!("downstream closed - aborting{}", log_tag); 282 | return; 283 | } 284 | FatalErrorReason::ConfigurationError => { 285 | warn!("fatal configuration error - aborting{}", log_tag); 286 | return; 287 | } 288 | FatalErrorReason::NetworkError => { 289 | warn!("fatal network error - aborting{}", log_tag); 290 | return; 291 | } 292 | FatalErrorReason::SubscribeError => { 293 | warn!("fatal grpc subscribe error - aborting{}", log_tag); 294 | return; 295 | } 296 | }, 297 | ConnectionState::WaitReconnect(attempt) => { 298 | let backoff_secs = 1.5_f32.powi(attempt as i32).min(15.0); 299 | info!( 300 | "waiting, then reconnecting: wait_secs={}, to={}{}", 301 | backoff_secs, grpc_source, log_tag 302 | ); 303 | 304 | let fut_sleep = sleep(Duration::from_secs_f32(backoff_secs)); 305 | 306 | match await_or_exit(fut_sleep, exit_notify.recv(), log_tag.clone()).await { 307 | MaybeExit::Continue(()) => ConnectionState::NotConnected(attempt), 308 | MaybeExit::Exit => ConnectionState::GracefulShutdown, 309 | } 310 | } 311 | ConnectionState::Ready(mut geyser_stream, mut geyser_subscribe_tx) => { 312 | let receive_timeout = grpc_source.timeouts.as_ref().map(|t| t.receive_timeout); 313 | 314 | 'recv_loop: loop { 315 | select! { 316 | exit_res = exit_notify.recv() => { 317 | match exit_res { 318 | Ok(_) => { 319 | debug!("exit on signal{}", log_tag); 320 | } 321 | Err(recv_error) => { 322 | warn!("exit on signal: error={:?}{}", recv_error, log_tag); 323 | } 324 | } 325 | break 'recv_loop ConnectionState::GracefulShutdown; 326 | }, 327 | // could model subscribe_filter_update_rx as optional here but did not figure out how 328 | client_subscribe_update = subscribe_filter_update_rx.recv() => { 329 | match client_subscribe_update { 330 | Some(subscribe_request) => { 331 | debug!("Subscription update from client: filter={:?}{}", subscribe_request, log_tag); 332 | subscribe_filter_on_connect = subscribe_request.clone(); 333 | // note: if the subscription is invalid, it will trigger a Tonic error: 334 | // Status { code: InvalidArgument, message: "failed to create filter: Invalid Base58 string", source: None } 335 | if let Err(send_err) = geyser_subscribe_tx.send(subscribe_request).await { 336 | warn!("fail to send subscription update - disconnect and retry: error={:#}{}", send_err, log_tag); 337 | break 'recv_loop ConnectionState::WaitReconnect(1); 338 | }; 339 | } 340 | None => { 341 | trace!("client subscribe channel closed, continue without{}", log_tag); 342 | continue 'recv_loop; 343 | } 344 | } 345 | }, 346 | geyser_stream_res = timeout( 347 | receive_timeout.unwrap_or(Duration::MAX), 348 | geyser_stream.next(), 349 | ) => { 350 | 351 | match geyser_stream_res { 352 | Ok(Some(Ok(update_message))) => { 353 | trace!("> recv update message: from={}{}", grpc_source, log_tag); 354 | // note: first send never blocks as the mpsc channel has capacity 1 355 | let warning_threshold = if messages_forwarded == 1 { 356 | Duration::from_millis(3000) 357 | } else { 358 | Duration::from_millis(500) 359 | }; 360 | let started_at = Instant::now(); 361 | 362 | let fut_send = mpsc_downstream.send_timeout( 363 | Message::GeyserSubscribeUpdate(Box::new(update_message)), 364 | warning_threshold, 365 | ); 366 | 367 | let MaybeExit::Continue(mpsc_downstream_result) = 368 | await_or_exit(fut_send, exit_notify.recv(), log_tag.clone()).await 369 | else { 370 | break 'recv_loop ConnectionState::GracefulShutdown; 371 | }; 372 | 373 | match mpsc_downstream_result { 374 | Ok(()) => { 375 | messages_forwarded += 1; 376 | if messages_forwarded == 1 { 377 | // note: first send never blocks - do not print time as this is a lie 378 | trace!("queued first update message{}", log_tag); 379 | } else { 380 | trace!( 381 | "queued update message: #={}, elapsed={:.02}ms{}", 382 | messages_forwarded, 383 | started_at.elapsed().as_secs_f32() * 1000.0, 384 | log_tag 385 | ); 386 | } 387 | continue 'recv_loop; 388 | } 389 | Err(SendTimeoutError::Timeout(the_message)) => { 390 | warn!( 391 | "downstream receiver did not pick up message until timeout - keep waiting: timeout={}ms{}", 392 | warning_threshold.as_millis(), log_tag 393 | ); 394 | 395 | let fut_send = mpsc_downstream.send(the_message); 396 | 397 | let MaybeExit::Continue(mpsc_downstream_result) = 398 | await_or_exit(fut_send, exit_notify.recv(), log_tag.clone()).await 399 | else { 400 | break 'recv_loop ConnectionState::GracefulShutdown; 401 | }; 402 | 403 | match mpsc_downstream_result { 404 | Ok(()) => { 405 | messages_forwarded += 1; 406 | trace!( 407 | "queued delayed update message: #={}, elapsed={:.02}ms{}", 408 | messages_forwarded, 409 | started_at.elapsed().as_secs_f32() * 1000.0, 410 | log_tag 411 | ); 412 | } 413 | Err(_send_error) => { 414 | warn!("downstream receiver closed, message is lost - aborting{}", log_tag); 415 | break 'recv_loop ConnectionState::FatalError( 416 | 0, 417 | FatalErrorReason::DownstreamChannelClosed, 418 | ); 419 | } 420 | } 421 | } 422 | Err(SendTimeoutError::Closed(_)) => { 423 | warn!("downstream receiver closed - aborting{}", log_tag); 424 | break 'recv_loop ConnectionState::FatalError( 425 | 0, 426 | FatalErrorReason::DownstreamChannelClosed, 427 | ); 428 | } 429 | } 430 | } 431 | Ok(Some(Err(tonic_status))) => { 432 | // all tonic errors are recoverable 433 | warn!("tonic error - retrying: source={}, status={:#}{}", grpc_source, tonic_status, log_tag); 434 | break 'recv_loop ConnectionState::WaitReconnect(1); 435 | } 436 | Ok(None) => { 437 | warn!("geyser stream closed - retrying: source={}{}", grpc_source, log_tag); 438 | break 'recv_loop ConnectionState::WaitReconnect(1); 439 | } 440 | Err(_elapsed) => { 441 | warn!("timeout - retrying: source={}{}", grpc_source, log_tag); 442 | break 'recv_loop ConnectionState::WaitReconnect(1); 443 | } 444 | }; // -- END match 445 | 446 | }, 447 | } 448 | } // -- END receive loop 449 | } 450 | ConnectionState::GracefulShutdown => { 451 | debug!( 452 | "shutting down gracefully on exit signal: source={}{}", 453 | grpc_source, log_tag 454 | ); 455 | break 'main_loop; 456 | } 457 | } // -- END match 458 | } // -- state loop; break ONLY on graceful shutdown 459 | debug!("gracefully exiting geyser task loop{}", log_tag); 460 | }); 461 | 462 | jh_geyser_task 463 | } 464 | 465 | fn buffer_config_from_env() -> GeyserGrpcClientBufferConfig { 466 | if env::var("BUFFER_SIZE").is_err() 467 | || env::var("CONN_WINDOW").is_err() 468 | || env::var("STREAM_WINDOW").is_err() 469 | { 470 | debug!("BUFFER_SIZE, CONN_WINDOW, STREAM_WINDOW not set; using default buffer config"); 471 | return GeyserGrpcClientBufferConfig::default(); 472 | } 473 | 474 | let buffer_size = env::var("BUFFER_SIZE") 475 | .expect("buffer_size") 476 | .parse::() 477 | .expect("integer(bytes)"); 478 | let conn_window = env::var("CONN_WINDOW") 479 | .expect("conn_window") 480 | .parse::() 481 | .expect("integer(bytes)"); 482 | let stream_window = env::var("STREAM_WINDOW") 483 | .expect("stream_window") 484 | .parse::() 485 | .expect("integer(bytes)"); 486 | 487 | // conn_window should be larger than stream_window 488 | GeyserGrpcClientBufferConfig { 489 | buffer_size: Some(buffer_size), 490 | conn_window: Some(conn_window), 491 | stream_window: Some(stream_window), 492 | } 493 | } 494 | 495 | enum MaybeExit { 496 | Continue(T), 497 | Exit, 498 | } 499 | 500 | async fn await_or_exit(future: F, exit_notify: E, log_tag: String) -> MaybeExit 501 | where 502 | F: Future, 503 | E: Future>, 504 | { 505 | tokio::select! { 506 | res = future => { 507 | MaybeExit::Continue(res) 508 | }, 509 | res = exit_notify => { 510 | match res { 511 | Ok(_) => { 512 | debug!("exit on signal{}", log_tag); 513 | } 514 | Err(recv_error) => { 515 | warn!("exit on signal: error={:?}{}", recv_error, log_tag); 516 | } 517 | } 518 | MaybeExit::Exit 519 | } 520 | } 521 | } 522 | 523 | #[cfg(test)] 524 | mod tests { 525 | use crate::GrpcConnectionTimeouts; 526 | 527 | use super::*; 528 | 529 | #[tokio::test] 530 | async fn test_debug_no_secrets() { 531 | let timeout_config = GrpcConnectionTimeouts { 532 | connect_timeout: Duration::from_secs(1), 533 | request_timeout: Duration::from_secs(2), 534 | subscribe_timeout: Duration::from_secs(3), 535 | receive_timeout: Duration::from_secs(3), 536 | }; 537 | assert_eq!( 538 | format!( 539 | "{:?}", 540 | GrpcSourceConfig::new( 541 | "http://localhost:1234".to_string(), 542 | Some("my-secret".to_string()), 543 | None, 544 | timeout_config, 545 | ) 546 | ), 547 | "grpc_addr http://localhost:1234" 548 | ); 549 | } 550 | 551 | #[tokio::test] 552 | async fn test_display_no_secrets() { 553 | let timeout_config = GrpcConnectionTimeouts { 554 | connect_timeout: Duration::from_secs(1), 555 | request_timeout: Duration::from_secs(2), 556 | subscribe_timeout: Duration::from_secs(3), 557 | receive_timeout: Duration::from_secs(3), 558 | }; 559 | assert_eq!( 560 | format!( 561 | "{}", 562 | GrpcSourceConfig::new( 563 | "http://localhost:1234".to_string(), 564 | Some("my-secret".to_string()), 565 | None, 566 | timeout_config, 567 | ) 568 | ), 569 | "grpc_addr http://localhost:1234" 570 | ); 571 | } 572 | } 573 | -------------------------------------------------------------------------------- /src/grpcmultiplex_fastestwins.rs: -------------------------------------------------------------------------------- 1 | use crate::Message; 2 | use crate::Message::GeyserSubscribeUpdate; 3 | use async_stream::stream; 4 | use futures::{Stream, StreamExt}; 5 | use log::{info, warn}; 6 | use merge_streams::MergeStreams; 7 | use solana_sdk::clock::Slot; 8 | use yellowstone_grpc_proto::geyser::SubscribeUpdate; 9 | 10 | pub trait FromYellowstoneExtractor { 11 | // Target is something like ProducedBlock 12 | type Target; 13 | fn map_yellowstone_update(&self, update: SubscribeUpdate) -> Option<(Slot, Self::Target)>; 14 | } 15 | 16 | /// use streams created by ``create_geyser_reconnecting_stream`` 17 | /// this is agnostic to the type of the stream 18 | /// CAUTION: do not try to use with commitment level "processed" as this will form trees (forks) and not a sequence 19 | pub fn create_multiplexed_stream( 20 | grpc_source_streams: Vec>, 21 | extractor: E, 22 | ) -> impl Stream 23 | where 24 | E: FromYellowstoneExtractor, 25 | { 26 | if grpc_source_streams.is_empty() { 27 | panic!("Must have at least one grpc source"); 28 | } 29 | 30 | info!( 31 | "Starting multiplexer with {} sources", 32 | grpc_source_streams.len() 33 | ); 34 | 35 | let mut streams = vec![]; 36 | for (idx, grpc_stream) in grpc_source_streams.into_iter().enumerate() { 37 | let tagged = grpc_stream.map(move |msg| TaggedMessage { 38 | stream_idx: idx, 39 | payload: msg, 40 | }); 41 | streams.push(Box::pin(tagged)); 42 | } 43 | 44 | let merged_streams = streams.merge(); 45 | 46 | extract_payload_from_geyser_updates(merged_streams, extractor) 47 | } 48 | 49 | struct TaggedMessage { 50 | pub stream_idx: usize, 51 | pub payload: Message, 52 | } 53 | 54 | fn extract_payload_from_geyser_updates( 55 | merged_stream: impl Stream, 56 | extractor: E, 57 | ) -> impl Stream 58 | where 59 | E: FromYellowstoneExtractor, 60 | { 61 | let mut tip: Slot = 0; 62 | stream! { 63 | for await TaggedMessage {stream_idx, payload} in merged_stream { 64 | match payload { 65 | GeyserSubscribeUpdate(update) => { 66 | // take only the update messages we want 67 | if let Some((proposed_slot, block)) = extractor.map_yellowstone_update(*update) { 68 | if proposed_slot > tip { 69 | tip = proposed_slot; 70 | yield block; 71 | } 72 | } 73 | } 74 | Message::Connecting(attempt) => { 75 | if attempt > 1 { 76 | warn!("Stream-{} performs reconnect attempt {}", stream_idx, attempt); 77 | } 78 | } 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/histogram_percentiles.rs: -------------------------------------------------------------------------------- 1 | use itertools::Itertools; 2 | use std::fmt::Display; 3 | use std::iter::zip; 4 | 5 | // #[derive(Clone, Copy, Debug, Default)] 6 | pub struct Point { 7 | pub priority: f64, 8 | pub value: f64, 9 | } 10 | 11 | impl From<(f64, f64)> for Point { 12 | fn from((priority, cu_consumed): (f64, f64)) -> Self { 13 | Point { 14 | priority, 15 | value: cu_consumed, 16 | } 17 | } 18 | } 19 | 20 | // #[derive(Clone, Debug, Eq, PartialEq, Hash)] 21 | pub struct HistValue { 22 | pub percentile: f32, 23 | pub value: f64, 24 | } 25 | 26 | /// `quantile` function is the same as the median if q=50, the same as the minimum if q=0 and the same as the maximum if q=100. 27 | 28 | pub fn calculate_percentiles(input: &[f64]) -> Percentiles { 29 | if input.is_empty() { 30 | // note: percentile for empty array is undefined 31 | return Percentiles { 32 | v: vec![], 33 | p: vec![], 34 | }; 35 | } 36 | 37 | let is_monotonic = input.windows(2).all(|w| w[0] <= w[1]); 38 | assert!(is_monotonic, "array of values must be sorted"); 39 | 40 | let p_step = 5; 41 | let i_percentiles = (0..=100).step_by(p_step).collect_vec(); 42 | 43 | let mut bucket_values = Vec::with_capacity(i_percentiles.len()); 44 | let mut percentiles = Vec::with_capacity(i_percentiles.len()); 45 | for p in i_percentiles { 46 | let value = { 47 | let index = input.len() * p / 100; 48 | let cap_index = index.min(input.len() - 1); 49 | input[cap_index] 50 | }; 51 | 52 | bucket_values.push(value); 53 | percentiles.push(p as f32 / 100.0); 54 | } 55 | 56 | Percentiles { 57 | v: bucket_values, 58 | p: percentiles, 59 | } 60 | } 61 | 62 | pub fn calculate_cummulative(values: &[Point]) -> PercentilesCummulative { 63 | if values.is_empty() { 64 | // note: percentile for empty array is undefined 65 | return PercentilesCummulative { 66 | bucket_values: vec![], 67 | percentiles: vec![], 68 | }; 69 | } 70 | 71 | let is_monotonic = values.windows(2).all(|w| w[0].priority <= w[1].priority); 72 | assert!(is_monotonic, "array of values must be sorted"); 73 | 74 | let value_sum: f64 = values.iter().map(|x| x.value).sum(); 75 | let mut agg: f64 = values[0].value; 76 | let mut index = 0; 77 | let p_step = 5; 78 | 79 | let percentiles = (0..=100).step_by(p_step).map(|p| p as f64).collect_vec(); 80 | 81 | let dist = percentiles 82 | .iter() 83 | .map(|percentile| { 84 | while agg < (value_sum * *percentile) / 100.0 { 85 | index += 1; 86 | agg += values[index].value; 87 | } 88 | let priority = values[index].priority; 89 | HistValue { 90 | percentile: *percentile as f32, 91 | value: priority, 92 | } 93 | }) 94 | .collect_vec(); 95 | 96 | PercentilesCummulative { 97 | bucket_values: dist.iter().map(|hv| hv.value).collect_vec(), 98 | percentiles: dist.iter().map(|hv| hv.percentile / 100.0).collect_vec(), 99 | } 100 | } 101 | 102 | pub struct Percentiles { 103 | // value 104 | pub v: Vec, 105 | // percentile in range 0.0..1.0 106 | pub p: Vec, 107 | } 108 | 109 | impl Display for Percentiles { 110 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 111 | for i in 0..self.v.len() { 112 | write!(f, "p{}=>{} ", self.p[i] * 100.0, self.v[i])?; 113 | } 114 | Ok(()) 115 | } 116 | } 117 | 118 | #[allow(dead_code)] 119 | impl Percentiles { 120 | fn get_bucket_value(&self, percentile: f32) -> Option { 121 | zip(&self.p, &self.v) 122 | .find(|(&p, _v)| p == percentile) 123 | .map(|(_p, &v)| v) 124 | } 125 | } 126 | 127 | pub struct PercentilesCummulative { 128 | pub bucket_values: Vec, 129 | pub percentiles: Vec, 130 | } 131 | 132 | #[allow(dead_code)] 133 | impl PercentilesCummulative { 134 | fn get_bucket_value(&self, percentile: f32) -> Option { 135 | zip(&self.percentiles, &self.bucket_values) 136 | .find(|(&p, _cu)| p == percentile) 137 | .map(|(_p, &cu)| cu) 138 | } 139 | } 140 | 141 | #[cfg(test)] 142 | mod tests { 143 | use super::*; 144 | 145 | #[test] 146 | fn test_calculate_percentiles() { 147 | let mut values = vec![2.0, 4.0, 5.0, 3.0, 1.0]; 148 | values.sort_by_key(|&x| (x * 100.0) as i64); 149 | let percentiles = calculate_percentiles(&values).v; 150 | assert_eq!(percentiles[0], 1.0); 151 | assert_eq!(percentiles[10], 3.0); 152 | assert_eq!(percentiles[15], 4.0); 153 | assert_eq!(percentiles[18], 5.0); 154 | assert_eq!(percentiles[20], 5.0); 155 | } 156 | 157 | #[test] 158 | fn test_calculate_percentiles_by_cu() { 159 | // total of 20000 CU where consumed 160 | let values = vec![Point::from((100.0, 10000.0)), Point::from((200.0, 10000.0))]; 161 | let PercentilesCummulative { 162 | bucket_values: by_cu, 163 | percentiles: by_cu_percentiles, 164 | .. 165 | } = calculate_cummulative(&values); 166 | assert_eq!(by_cu_percentiles[10], 0.5); 167 | assert_eq!(by_cu[10], 100.0); // need more than 100 to beat 50% of the CU 168 | assert_eq!(by_cu[11], 200.0); // need more than 200 to beat 55% of the CU 169 | assert_eq!(by_cu[20], 200.0); // need more than 200 to beat 100% of the CU 170 | } 171 | 172 | #[test] 173 | fn test_empty_array() { 174 | let values = vec![]; 175 | let percentiles = calculate_percentiles(&values).v; 176 | // note: this is controversal 177 | assert!(percentiles.is_empty()); 178 | } 179 | #[test] 180 | fn test_zeros() { 181 | let values = vec![Point::from((0.0, 0.0)), Point::from((0.0, 0.0))]; 182 | let percentiles = calculate_cummulative(&values).bucket_values; 183 | assert_eq!(percentiles[0], 0.0); 184 | } 185 | 186 | #[test] 187 | fn test_statisticshowto() { 188 | let values = vec![30.0, 33.0, 43.0, 53.0, 56.0, 67.0, 68.0, 72.0]; 189 | let percentiles = calculate_percentiles(&values); 190 | assert_eq!(percentiles.v[5], 43.0); 191 | assert_eq!(percentiles.p[5], 0.25); 192 | assert_eq!(percentiles.get_bucket_value(0.25), Some(43.0)); 193 | 194 | let values = vec![ 195 | Point::from((30.0, 1.0)), 196 | Point::from((33.0, 2.0)), 197 | Point::from((43.0, 3.0)), 198 | Point::from((53.0, 4.0)), 199 | Point::from((56.0, 5.0)), 200 | Point::from((67.0, 6.0)), 201 | Point::from((68.0, 7.0)), 202 | Point::from((72.0, 8.0)), 203 | ]; 204 | let percentiles = calculate_cummulative(&values); 205 | assert_eq!(percentiles.percentiles[20], 1.0); 206 | assert_eq!(percentiles.bucket_values[20], 72.0); 207 | } 208 | 209 | #[test] 210 | fn test_simple_non_integer_index() { 211 | // Messwerte: 3 – 5 – 5 – 6 – 7 – 7 – 8 – 10 – 10 212 | // In diesem Fall lautet es also 5. 213 | let values = vec![3.0, 5.0, 5.0, 6.0, 7.0, 7.0, 8.0, 10.0, 10.0]; 214 | 215 | let percentiles = calculate_percentiles(&values); 216 | assert_eq!(percentiles.p[4], 0.20); 217 | assert_eq!(percentiles.v[5], 5.0); 218 | 219 | let values = vec![ 220 | Point::from((3.0, 1.0)), 221 | Point::from((5.0, 2.0)), 222 | Point::from((5.0, 3.0)), 223 | Point::from((6.0, 4.0)), 224 | Point::from((7.0, 5.0)), 225 | Point::from((7.0, 6.0)), 226 | Point::from((8.0, 7.0)), 227 | Point::from((10.0, 8.0)), 228 | Point::from((10.0, 9.0)), 229 | ]; 230 | let percentiles = calculate_cummulative(&values); 231 | assert_eq!(percentiles.percentiles[19], 0.95); 232 | assert_eq!(percentiles.percentiles[20], 1.0); 233 | assert_eq!(percentiles.bucket_values[19], 10.0); 234 | assert_eq!(percentiles.bucket_values[20], 10.0); 235 | } 236 | 237 | #[test] 238 | fn test_large_list() { 239 | let values = (0..1000).map(|i| i as f64).collect_vec(); 240 | let percentiles = calculate_percentiles(&values); 241 | assert_eq!(percentiles.v[19], 950.0); 242 | assert_eq!(percentiles.p[19], 0.95); 243 | } 244 | } 245 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt::{Debug, Display}; 3 | use std::sync::atomic::AtomicU64; 4 | use std::sync::Arc; 5 | use std::time::Duration; 6 | 7 | use solana_sdk::commitment_config::CommitmentConfig; 8 | use tonic::codec::CompressionEncoding; 9 | use yellowstone_grpc_proto::geyser::{ 10 | CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, 11 | SubscribeRequestFilterBlocks, SubscribeRequestFilterBlocksMeta, SubscribeRequestFilterSlots, 12 | SubscribeUpdate, 13 | }; 14 | use yellowstone_grpc_proto::tonic::transport::ClientTlsConfig; 15 | 16 | use crate::obfuscate::url_obfuscate_api_token; 17 | pub use yellowstone_grpc_client::{ 18 | GeyserGrpcClient, GeyserGrpcClientError, GeyserGrpcClientResult, 19 | }; 20 | 21 | pub mod channel_plugger; 22 | pub mod grpc_subscription_autoreconnect_streams; 23 | pub mod grpc_subscription_autoreconnect_tasks; 24 | pub mod grpcmultiplex_fastestwins; 25 | pub mod histogram_percentiles; 26 | mod obfuscate; 27 | pub mod yellowstone_grpc_util; 28 | 29 | pub type AtomicSlot = Arc; 30 | 31 | // 1-based attempt counter 32 | type Attempt = u32; 33 | 34 | // wraps payload and status messages 35 | // clone is required by broacast channel 36 | #[derive(Clone)] 37 | pub enum Message { 38 | GeyserSubscribeUpdate(Box), 39 | // connect (attempt=1) or reconnect(attempt=2..) 40 | Connecting(Attempt), 41 | } 42 | 43 | #[derive(Clone, Debug)] 44 | pub struct GrpcConnectionTimeouts { 45 | pub connect_timeout: Duration, 46 | pub request_timeout: Duration, 47 | pub subscribe_timeout: Duration, 48 | pub receive_timeout: Duration, 49 | } 50 | 51 | #[derive(Clone)] 52 | pub struct GrpcSourceConfig { 53 | pub grpc_addr: String, 54 | pub grpc_x_token: Option, 55 | tls_config: Option, 56 | timeouts: Option, 57 | compression: Option, 58 | } 59 | 60 | impl Display for GrpcSourceConfig { 61 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 62 | write!( 63 | f, 64 | "grpc_addr {} (token? {}, compression {})", 65 | url_obfuscate_api_token(&self.grpc_addr), 66 | if self.grpc_x_token.is_some() { 67 | "yes" 68 | } else { 69 | "no" 70 | }, 71 | self.compression 72 | .as_ref() 73 | .map(|c| c.to_string()) 74 | .unwrap_or("none".to_string()) 75 | )?; 76 | 77 | Ok(()) 78 | } 79 | } 80 | 81 | impl Debug for GrpcSourceConfig { 82 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 83 | std::fmt::Display::fmt(&self, f) 84 | } 85 | } 86 | 87 | impl GrpcSourceConfig { 88 | /// Create a grpc source without tls and timeouts 89 | pub fn new_simple(grpc_addr: String) -> Self { 90 | Self { 91 | grpc_addr, 92 | grpc_x_token: None, 93 | tls_config: None, 94 | timeouts: None, 95 | compression: None, 96 | } 97 | } 98 | pub fn new( 99 | grpc_addr: String, 100 | grpc_x_token: Option, 101 | tls_config: Option, 102 | timeouts: GrpcConnectionTimeouts, 103 | ) -> Self { 104 | Self { 105 | grpc_addr, 106 | grpc_x_token, 107 | tls_config, 108 | timeouts: Some(timeouts), 109 | compression: None, 110 | } 111 | } 112 | pub fn new_compressed( 113 | grpc_addr: String, 114 | grpc_x_token: Option, 115 | tls_config: Option, 116 | timeouts: GrpcConnectionTimeouts, 117 | ) -> Self { 118 | Self { 119 | grpc_addr, 120 | grpc_x_token, 121 | tls_config, 122 | timeouts: Some(timeouts), 123 | compression: Some(CompressionEncoding::Zstd), 124 | } 125 | } 126 | } 127 | 128 | #[derive(Clone)] 129 | pub struct GeyserFilter(pub CommitmentConfig); 130 | 131 | impl GeyserFilter { 132 | pub fn blocks_and_txs(&self) -> SubscribeRequest { 133 | let mut blocks_subs = HashMap::new(); 134 | blocks_subs.insert( 135 | "client".to_string(), 136 | SubscribeRequestFilterBlocks { 137 | account_include: Default::default(), 138 | include_transactions: Some(true), 139 | include_accounts: Some(false), 140 | include_entries: Some(false), 141 | }, 142 | ); 143 | 144 | SubscribeRequest { 145 | blocks: blocks_subs, 146 | commitment: Some(map_commitment_level(self.0) as i32), 147 | ..Default::default() 148 | } 149 | } 150 | 151 | pub fn blocks_meta(&self) -> SubscribeRequest { 152 | let mut blocksmeta_subs = HashMap::new(); 153 | blocksmeta_subs.insert("client".to_string(), SubscribeRequestFilterBlocksMeta {}); 154 | 155 | SubscribeRequest { 156 | blocks_meta: blocksmeta_subs, 157 | commitment: Some(map_commitment_level(self.0) as i32), 158 | ..Default::default() 159 | } 160 | } 161 | 162 | pub fn slots(&self) -> SubscribeRequest { 163 | let mut slots_subs = HashMap::new(); 164 | slots_subs.insert( 165 | "client".to_string(), 166 | SubscribeRequestFilterSlots { 167 | filter_by_commitment: Some(true), 168 | interslot_updates: Some(false), 169 | }, 170 | ); 171 | 172 | SubscribeRequest { 173 | slots: slots_subs, 174 | commitment: Some(map_commitment_level(self.0) as i32), 175 | ..Default::default() 176 | } 177 | } 178 | 179 | pub fn accounts(&self) -> SubscribeRequest { 180 | let mut accounts_subs = HashMap::new(); 181 | accounts_subs.insert( 182 | "client".to_string(), 183 | SubscribeRequestFilterAccounts { 184 | account: vec![], 185 | owner: vec![], 186 | filters: vec![], 187 | nonempty_txn_signature: None, 188 | }, 189 | ); 190 | 191 | SubscribeRequest { 192 | accounts: accounts_subs, 193 | commitment: Some(map_commitment_level(self.0) as i32), 194 | ..Default::default() 195 | } 196 | } 197 | } 198 | 199 | pub fn map_commitment_level(commitment_config: CommitmentConfig) -> CommitmentLevel { 200 | // solana_sdk -> yellowstone 201 | match commitment_config.commitment { 202 | solana_sdk::commitment_config::CommitmentLevel::Processed => CommitmentLevel::Processed, 203 | solana_sdk::commitment_config::CommitmentLevel::Confirmed => CommitmentLevel::Confirmed, 204 | solana_sdk::commitment_config::CommitmentLevel::Finalized => CommitmentLevel::Finalized, 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/obfuscate.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use url::Url; 3 | 4 | /// obfuscate urls with api token like http://mango.rpcpool.com/a991fba00fagbad 5 | pub fn url_obfuscate_api_token(url: &str) -> Cow { 6 | if let Ok(mut parsed) = Url::parse(url) { 7 | if parsed.path() == "/" { 8 | return Cow::Borrowed(url); 9 | } else { 10 | parsed.set_path("omitted-secret"); 11 | Cow::Owned(parsed.to_string()) 12 | } 13 | } else { 14 | Cow::Borrowed(url) 15 | } 16 | } 17 | 18 | #[test] 19 | fn test_obfuscate_path() { 20 | let url_mango = "http://mango.rpcpool.com/121sdfsdf21"; 21 | let obfuscated = url_obfuscate_api_token(url_mango); 22 | assert_eq!(obfuscated, "http://mango.rpcpool.com/omitted-secret"); 23 | } 24 | 25 | #[test] 26 | fn test_obfuscate_nopath() { 27 | let url_localhost = "http://127.0.0.1"; 28 | let obfuscated = url_obfuscate_api_token(url_localhost); 29 | assert_eq!(obfuscated, "http://127.0.0.1"); 30 | } 31 | 32 | #[test] 33 | fn test_obfuscate_invalid() { 34 | let url_localhost = "::::invalid"; 35 | let obfuscated = url_obfuscate_api_token(url_localhost); 36 | assert_eq!(obfuscated, "::::invalid"); 37 | } 38 | -------------------------------------------------------------------------------- /src/yellowstone_grpc_util.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | use tonic::codec::CompressionEncoding; 3 | 4 | use tonic::metadata::errors::InvalidMetadataValue; 5 | use tonic::metadata::AsciiMetadataValue; 6 | use tonic::service::Interceptor; 7 | use tonic::transport::ClientTlsConfig; 8 | use tonic_health::pb::health_client::HealthClient; 9 | use yellowstone_grpc_client::{GeyserGrpcBuilderResult, GeyserGrpcClient, InterceptorXToken}; 10 | use yellowstone_grpc_proto::geyser::geyser_client::GeyserClient; 11 | use yellowstone_grpc_proto::geyser::SubscribeRequest; 12 | use yellowstone_grpc_proto::prost::bytes::Bytes; 13 | 14 | pub async fn connect_with_timeout( 15 | endpoint: E, 16 | x_token: Option, 17 | tls_config: Option, 18 | connect_timeout: Option, 19 | request_timeout: Option, 20 | compression: Option, 21 | ) -> GeyserGrpcBuilderResult> 22 | where 23 | E: Into, 24 | T: TryInto, 25 | { 26 | connect_with_timeout_with_buffers( 27 | endpoint, 28 | x_token, 29 | tls_config, 30 | connect_timeout, 31 | request_timeout, 32 | GeyserGrpcClientBufferConfig::default(), 33 | compression, 34 | ) 35 | .await 36 | } 37 | 38 | // see https://github.com/hyperium/tonic/blob/v0.10.2/tonic/src/transport/channel/mod.rs 39 | const DEFAULT_BUFFER_SIZE: usize = 1024; 40 | // see https://github.com/hyperium/hyper/blob/v0.14.28/src/proto/h2/client.rs#L45 41 | const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb 42 | const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb 43 | 44 | #[derive(Debug, Clone)] 45 | pub struct GeyserGrpcClientBufferConfig { 46 | pub buffer_size: Option, 47 | pub conn_window: Option, 48 | pub stream_window: Option, 49 | } 50 | 51 | impl Default for GeyserGrpcClientBufferConfig { 52 | fn default() -> Self { 53 | GeyserGrpcClientBufferConfig { 54 | buffer_size: Some(DEFAULT_BUFFER_SIZE), 55 | conn_window: Some(DEFAULT_CONN_WINDOW), 56 | stream_window: Some(DEFAULT_STREAM_WINDOW), 57 | } 58 | } 59 | } 60 | 61 | impl GeyserGrpcClientBufferConfig { 62 | pub fn optimize_for_subscription(filter: &SubscribeRequest) -> GeyserGrpcClientBufferConfig { 63 | if !filter.blocks.is_empty() { 64 | GeyserGrpcClientBufferConfig { 65 | buffer_size: Some(65536), // 64kb (default: 1k) 66 | conn_window: Some(5242880), // 5mb (=default) 67 | stream_window: Some(4194304), // 4mb (default: 2m) 68 | } 69 | } else { 70 | GeyserGrpcClientBufferConfig::default() 71 | } 72 | } 73 | } 74 | 75 | pub async fn connect_with_timeout_with_buffers( 76 | endpoint: E, 77 | x_token: Option, 78 | tls_config: Option, 79 | connect_timeout: Option, 80 | request_timeout: Option, 81 | buffer_config: GeyserGrpcClientBufferConfig, 82 | compression: Option, 83 | ) -> GeyserGrpcBuilderResult> 84 | where 85 | E: Into, 86 | T: TryInto, 87 | { 88 | // see https://github.com/blockworks-foundation/geyser-grpc-connector/issues/10 89 | let mut endpoint = tonic::transport::Endpoint::from_shared(endpoint)? 90 | .tcp_nodelay(true) 91 | .http2_adaptive_window(true) 92 | .buffer_size(buffer_config.buffer_size) 93 | .initial_connection_window_size(buffer_config.conn_window) 94 | .initial_stream_window_size(buffer_config.stream_window); 95 | 96 | if let Some(tls_config) = tls_config { 97 | endpoint = endpoint.tls_config(tls_config)?; 98 | } 99 | 100 | if let Some(connect_timeout) = connect_timeout { 101 | endpoint = endpoint.timeout(connect_timeout); 102 | } 103 | 104 | if let Some(request_timeout) = request_timeout { 105 | endpoint = endpoint.timeout(request_timeout); 106 | } 107 | 108 | let x_token: Option = match x_token { 109 | Some(x_token) => Some(x_token.try_into()?), 110 | None => None, 111 | }; 112 | let interceptor = InterceptorXToken { 113 | x_token, 114 | x_request_snapshot: false, 115 | }; 116 | 117 | let channel = endpoint.connect_lazy(); 118 | 119 | let health_client = HealthClient::with_interceptor(channel.clone(), interceptor.clone()); 120 | 121 | let geyser_client = GeyserClient::with_interceptor(channel.clone(), interceptor.clone()) 122 | .max_decoding_message_size(usize::MAX); 123 | let geyser_client = if let Some(compression_encoding) = compression { 124 | geyser_client.accept_compressed(compression_encoding) 125 | } else { 126 | geyser_client 127 | }; 128 | 129 | let client = GeyserGrpcClient::new(health_client, geyser_client); 130 | Ok(client) 131 | } 132 | --------------------------------------------------------------------------------