├── .gitignore ├── deny.toml ├── rustfmt.toml ├── src ├── rpc │ ├── api.rs │ ├── mod.rs │ ├── workers.rs │ ├── server.rs │ └── api_httpget.rs ├── source │ ├── mod.rs │ ├── sfa.rs │ ├── transaction.rs │ ├── fees.rs │ ├── block.rs │ ├── http.rs │ └── stream.rs ├── lib.rs ├── storage │ ├── mod.rs │ ├── util.rs │ ├── sync.rs │ ├── memory.rs │ ├── slots.rs │ ├── source.rs │ ├── files.rs │ └── blocks.rs ├── util.rs ├── version.rs ├── log.rs ├── metrics.rs ├── bin │ ├── bench.rs │ └── alpamayo.rs └── config.rs ├── rust-toolchain.toml ├── .github ├── issue_template.md └── workflows │ ├── test.yml │ └── release.yml ├── .editorconfig ├── Cargo.toml ├── README.md ├── config.yml └── CHANGELOG.md /.gitignore: -------------------------------------------------------------------------------- 1 | # project 2 | /db 3 | config.dev.yml 4 | 5 | # rust 6 | /target 7 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [graph] 2 | all-features = true 3 | 4 | [advisories] 5 | ignore = [] 6 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2024" 2 | imports_granularity = "One" 3 | group_imports = "One" 4 | -------------------------------------------------------------------------------- /src/rpc/api.rs: -------------------------------------------------------------------------------- 1 | pub const X_ERROR: &str = "x-error"; 2 | pub const X_SLOT: &str = "x-slot"; 3 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.91.1" 3 | components = ["clippy", "rustfmt"] 4 | targets = [] 5 | profile = "minimal" 6 | -------------------------------------------------------------------------------- /src/source/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod block; 2 | pub mod fees; 3 | pub mod http; 4 | pub mod sfa; 5 | pub mod stream; 6 | pub mod transaction; 7 | -------------------------------------------------------------------------------- /src/rpc/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod api_httpget; 3 | pub mod api_jsonrpc; 4 | pub mod server; 5 | pub mod upstream; 6 | pub mod workers; 7 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod config; 2 | pub mod log; 3 | pub mod metrics; 4 | pub mod rpc; 5 | pub mod source; 6 | pub mod storage; 7 | pub mod util; 8 | pub mod version; 9 | -------------------------------------------------------------------------------- /src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod blocks; 2 | pub mod files; 3 | pub mod memory; 4 | pub mod read; 5 | pub mod rocksdb; 6 | pub mod slots; 7 | pub mod source; 8 | pub mod sync; 9 | pub mod util; 10 | pub mod write; 11 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | pub type HashMap = std::collections::HashMap; 2 | pub type HashSet = std::collections::HashSet; 3 | 4 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 5 | pub enum VecSide { 6 | Back, 7 | Front, 8 | } 9 | -------------------------------------------------------------------------------- /.github/issue_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Default issue 3 | --- 4 | 5 | Please use issues only for reporting bugs or discussing feature-related topics. If you're having trouble running alpamayo or need insights about the configuration, please post your question in the Telegram group: [https://t.me/lamportsdev](https://t.me/lamportsdev) 6 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | charset = utf-8 6 | trim_trailing_whitespace = true 7 | insert_final_newline = true 8 | 9 | [*.{diff,md}] 10 | trim_trailing_whitespace = false 11 | 12 | [*.{js,json,proto,yaml,yml}] 13 | indent_style = space 14 | indent_size = 2 15 | 16 | [*.{rs,toml}] 17 | indent_style = space 18 | indent_size = 4 19 | 20 | [{Makefile,**.mk}] 21 | indent_style = tab 22 | -------------------------------------------------------------------------------- /src/version.rs: -------------------------------------------------------------------------------- 1 | use {richat_shared::version::Version, std::env}; 2 | 3 | pub const VERSION: Version = Version { 4 | package: env!("CARGO_PKG_NAME"), 5 | version: env!("CARGO_PKG_VERSION"), 6 | proto: env!("YELLOWSTONE_GRPC_PROTO_VERSION"), 7 | proto_richat: env!("RICHAT_PROTO_VERSION"), 8 | solana: env!("SOLANA_SDK_VERSION"), 9 | git: env!("GIT_VERSION"), 10 | rustc: env!("VERGEN_RUSTC_SEMVER"), 11 | buildts: env!("VERGEN_BUILD_TIMESTAMP"), 12 | }; 13 | -------------------------------------------------------------------------------- /src/storage/util.rs: -------------------------------------------------------------------------------- 1 | use { 2 | anyhow::Context, 3 | std::path::{Path, PathBuf}, 4 | tokio_uring::fs::{File, OpenOptions}, 5 | }; 6 | 7 | pub async fn open(path: &PathBuf) -> anyhow::Result<(File, u64)> { 8 | create_dir_all(path).await?; 9 | 10 | let file = OpenOptions::new() 11 | .read(true) 12 | .write(true) 13 | .create(true) 14 | .open(&path) 15 | .await 16 | .with_context(|| format!("failed to open file {path:?}"))?; 17 | 18 | let stx = file 19 | .statx() 20 | .await 21 | .with_context(|| format!("failed to get file size {path:?}"))?; 22 | 23 | Ok((file, stx.stx_size)) 24 | } 25 | 26 | pub async fn create_dir_all(path: &Path) -> anyhow::Result<()> { 27 | if let Some(path) = path.parent() { 28 | tokio_uring::fs::create_dir_all(path) 29 | .await 30 | .with_context(|| format!("failed to create dirs on path {path:?}"))?; 31 | } 32 | Ok(()) 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | concurrency: 2 | group: ${{ github.workflow }}-${{ github.ref }} 3 | cancel-in-progress: true 4 | 5 | on: 6 | pull_request: 7 | push: 8 | branches: 9 | - master 10 | - agave-v* 11 | workflow_dispatch: 12 | 13 | env: 14 | CARGO_TERM_COLOR: always 15 | 16 | jobs: 17 | test: 18 | strategy: 19 | matrix: 20 | os: 21 | - ubuntu-22.04 22 | - ubuntu-24.04 23 | runs-on: ["${{ matrix.os }}"] 24 | steps: 25 | - name: Install dependencies 26 | run: | 27 | sudo apt-get update 28 | sudo apt-get install -y liburing-dev 29 | 30 | - uses: actions/checkout@v4 31 | 32 | - uses: fanatid/rust-github-ci-prepare@master 33 | with: 34 | cache-version: v0002-test 35 | 36 | - name: cargo deny check advisories 37 | uses: EmbarkStudios/cargo-deny-action@v1 38 | with: 39 | command: check advisories 40 | 41 | - name: run clippy 42 | run: cargo clippy --workspace --all-targets -- -Dwarnings 43 | 44 | - name: run test 45 | run: cargo test --all-targets 46 | -------------------------------------------------------------------------------- /src/log.rs: -------------------------------------------------------------------------------- 1 | use { 2 | std::io::{self, IsTerminal}, 3 | thiserror::Error, 4 | tracing::Subscriber, 5 | tracing_subscriber::{ 6 | filter::{EnvFilter, FromEnvError, LevelFilter}, 7 | fmt::layer, 8 | layer::{Layer, SubscriberExt}, 9 | registry::LookupSpan, 10 | util::{SubscriberInitExt, TryInitError}, 11 | }, 12 | }; 13 | 14 | #[derive(Debug, Error)] 15 | pub enum LogSetupError { 16 | #[error(transparent)] 17 | FromEnv(#[from] FromEnvError), 18 | #[error(transparent)] 19 | Init(#[from] TryInitError), 20 | } 21 | 22 | pub fn setup(json: bool) -> Result<(), LogSetupError> { 23 | let env = EnvFilter::builder() 24 | .with_default_directive(LevelFilter::INFO.into()) 25 | .from_env()?; 26 | 27 | tracing_subscriber::registry() 28 | .with(env) 29 | .with(create_io_layer(json)) 30 | .try_init()?; 31 | 32 | Ok(()) 33 | } 34 | 35 | fn create_io_layer(json: bool) -> Box + Send + Sync + 'static> 36 | where 37 | S: Subscriber, 38 | for<'a> S: LookupSpan<'a>, 39 | { 40 | let is_atty = io::stdout().is_terminal() && io::stderr().is_terminal(); 41 | let io_layer = layer().with_ansi(is_atty).with_line_number(true); 42 | 43 | if json { 44 | Box::new(io_layer.json()) 45 | } else { 46 | Box::new(io_layer) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/storage/sync.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | source::block::BlockWithBinary, 4 | storage::{ 5 | blocks::{StoredBlockPushSync, StoredBlocksRead}, 6 | files::StorageFilesSyncInit, 7 | rocksdb::RocksdbRead, 8 | }, 9 | }, 10 | solana_sdk::clock::Slot, 11 | std::sync::Arc, 12 | }; 13 | 14 | #[derive(Debug, Clone)] 15 | pub enum ReadWriteSyncMessage { 16 | // once, on initialization 17 | Init { 18 | blocks: StoredBlocksRead, 19 | db_read: RocksdbRead, 20 | storage_files_init: StorageFilesSyncInit, 21 | recent_blocks: Vec<(Slot, Arc)>, 22 | }, 23 | // when we build the block 24 | BlockNew { 25 | slot: Slot, 26 | block: Arc, 27 | }, 28 | // block marked as dead 29 | BlockDead { 30 | slot: Slot, 31 | }, 32 | // block confirmed 33 | BlockConfirmed { 34 | slot: Slot, 35 | block: Option>, 36 | }, 37 | SlotFinalized { 38 | slot: Slot, 39 | }, 40 | // block removed (back, purged) 41 | ConfirmedBlockPopBack, 42 | // block removed (front, by request) 43 | ConfirmedBlockPopFront, 44 | // block added to storage (back, backfilling) 45 | ConfirmedBlockPushBack { 46 | block: StoredBlockPushSync, 47 | }, 48 | // block added to storage (front, new data) 49 | ConfirmedBlockPushFront { 50 | block: StoredBlockPushSync, 51 | }, 52 | } 53 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | permissions: 2 | contents: write 3 | 4 | concurrency: 5 | group: ${{ github.workflow }}-${{ github.ref }} 6 | cancel-in-progress: true 7 | 8 | on: 9 | pull_request: 10 | paths: 11 | - '.github/workflows/release.yml' 12 | push: 13 | branches: 14 | - master 15 | - agave-v* 16 | tags: 17 | - 'v*' 18 | workflow_dispatch: 19 | 20 | jobs: 21 | test: 22 | strategy: 23 | matrix: 24 | os: 25 | - ubuntu-22.04 26 | - ubuntu-24.04 27 | runs-on: ["${{ matrix.os }}"] 28 | steps: 29 | - name: Install dependencies 30 | run: | 31 | sudo apt-get update 32 | sudo apt-get install -y liburing-dev 33 | 34 | - uses: actions/checkout@v4 35 | 36 | - uses: fanatid/rust-github-ci-prepare@master 37 | with: 38 | cache-version: v0002-release 39 | 40 | - name: Build alpamayo 41 | run: | 42 | cargo build -p alpamayo --release 43 | cd target/release && \ 44 | mv alpamayo alpamayo-${{ matrix.os }} 45 | 46 | - name: Upload artifact 47 | uses: actions/upload-artifact@v4 48 | with: 49 | name: alpamayo-${{ matrix.os }}-${{ github.sha }} 50 | path: | 51 | target/release/alpamayo-${{ matrix.os }} 52 | 53 | - name: Upload release 54 | if: startsWith(github.ref, 'refs/tags/') 55 | uses: softprops/action-gh-release@v2 56 | with: 57 | files: | 58 | target/release/alpamayo-${{ matrix.os }} 59 | -------------------------------------------------------------------------------- /src/source/sfa.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::storage::rocksdb::SfaIndex, 3 | solana_sdk::{ 4 | clock::Slot, pubkey::Pubkey, signature::Signature, transaction::TransactionError, 5 | }, 6 | }; 7 | 8 | #[derive(Debug)] 9 | pub struct SignatureForAddress { 10 | pub key: [u8; 16], 11 | pub address_hash: [u8; 8], 12 | pub address: Pubkey, 13 | pub signature: Signature, 14 | pub err: Option, 15 | pub memo: Option, 16 | } 17 | 18 | impl SignatureForAddress { 19 | pub fn new( 20 | slot: Slot, 21 | address: Pubkey, 22 | signature: Signature, 23 | err: Option, 24 | memo: Option, 25 | ) -> Self { 26 | Self { 27 | key: SfaIndex::encode(&address, slot), 28 | address_hash: SfaIndex::address_hash(&address), 29 | address, 30 | signature, 31 | err, 32 | memo, 33 | } 34 | } 35 | } 36 | 37 | #[derive(Debug)] 38 | pub struct SignaturesForAddress { 39 | pub key: [u8; 16], 40 | pub address_hash: [u8; 8], 41 | pub signatures: Vec, 42 | } 43 | 44 | impl SignaturesForAddress { 45 | pub fn new(sfa: SignatureForAddress) -> Self { 46 | Self { 47 | key: sfa.key, 48 | address_hash: sfa.address_hash, 49 | signatures: vec![SignatureStatus { 50 | signature: sfa.signature, 51 | err: sfa.err, 52 | memo: sfa.memo, 53 | }], 54 | } 55 | } 56 | 57 | pub fn merge(&mut self, sfa: SignatureForAddress) { 58 | self.signatures.push(SignatureStatus { 59 | signature: sfa.signature, 60 | err: sfa.err, 61 | memo: sfa.memo, 62 | }); 63 | } 64 | } 65 | 66 | #[derive(Debug, Clone)] 67 | pub struct SignatureStatus { 68 | pub signature: Signature, 69 | pub err: Option, 70 | pub memo: Option, 71 | } 72 | -------------------------------------------------------------------------------- /src/source/transaction.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | source::{fees::TransactionFees, sfa::SignatureForAddress}, 4 | storage::rocksdb::TransactionIndex, 5 | }, 6 | prost::Message as _, 7 | solana_sdk::{clock::Slot, signature::Signature, transaction::TransactionError}, 8 | solana_storage_proto::convert::generated, 9 | solana_transaction_status::{TransactionWithStatusMeta, extract_and_fmt_memos}, 10 | }; 11 | 12 | #[derive(Debug)] 13 | pub struct TransactionWithBinary { 14 | pub key: [u8; 8], 15 | pub signature: Signature, 16 | pub err: Option, 17 | pub sfa: Vec, 18 | pub fees: Option, 19 | pub protobuf: Vec, 20 | } 21 | 22 | impl TransactionWithBinary { 23 | pub fn new(slot: Slot, tx: TransactionWithStatusMeta, is_vote: Option) -> Self { 24 | let signature = *tx.transaction_signature(); 25 | let key = TransactionIndex::encode(&signature); 26 | 27 | let (err, sfa) = match &tx { 28 | TransactionWithStatusMeta::MissingMetadata(_) => (None, vec![]), 29 | TransactionWithStatusMeta::Complete(tx) => { 30 | let account_keys = tx.account_keys(); 31 | let err = tx.meta.status.clone().err(); 32 | let memo = extract_and_fmt_memos(tx); 33 | let mut sfa = Vec::with_capacity(account_keys.len()); 34 | for pubkey in account_keys.iter() { 35 | sfa.push(SignatureForAddress::new( 36 | slot, 37 | *pubkey, 38 | signature, 39 | err.clone(), 40 | memo.clone(), 41 | )) 42 | } 43 | (err, sfa) 44 | } 45 | }; 46 | 47 | let fees = TransactionFees::create(&tx, is_vote); 48 | 49 | let protobuf = generated::ConfirmedTransaction::from(tx).encode_to_vec(); 50 | 51 | Self { 52 | key, 53 | signature, 54 | err, 55 | sfa, 56 | fees, 57 | protobuf, 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "alpamayo" 3 | version = "0.17.0" 4 | authors = ["Lamports Dev"] 5 | edition = "2024" 6 | description = "Part of Solana RPC stack for sealed data" 7 | homepage = "https://lamports.dev" 8 | repository = "https://github.com/lamports-dev/alpamayo" 9 | license = "Apache-2.0" 10 | keywords = ["solana", "rpc"] 11 | publish = false 12 | 13 | [dependencies] 14 | affinity-linux = "1.0.1" 15 | anyhow = "1.0.96" 16 | base64 = "0.22.1" 17 | bincode = "1.3.3" 18 | bitflags = "2.9.0" 19 | bitvec = "1.0.1" 20 | bs58 = "0.5.1" 21 | clap = { version = "4.5.30", features = ["derive"] } 22 | crossbeam = "0.8.4" 23 | foldhash = "0.2.0" 24 | futures = "0.3.31" 25 | http-body-util = "0.1.3" 26 | human-size = "0.4.3" 27 | humantime-serde = "1.1.1" 28 | hyper = "1.6.0" 29 | hyper-util = { version = "0.1.10", features = ["server-auto", "server-graceful"] } 30 | json5 = "1.0.0" 31 | jsonrpsee-types = "0.26.0" 32 | libc = "0.2.170" 33 | maplit = "1.0.2" 34 | metrics = "0.24.1" 35 | metrics-exporter-prometheus = { version = "0.18.0", default-features = false } 36 | num_cpus = "1.16.0" 37 | prost = "0.11.9" 38 | quanta = "0.12.5" 39 | rand = "0.9.1" 40 | rayon = "1.10.0" 41 | regex = "1.11.1" 42 | reqwest = "0.12.14" 43 | richat-client = "7.0.0" 44 | richat-metrics = "1.0.1" 45 | richat-proto = "7.0.0" 46 | richat-shared = { version = "7.0.0", default-features = false, features = ["config", "jsonrpc", "version"] } 47 | rocksdb = "0.24.0" 48 | serde = { version = "1.0.218", features = ["derive"] } 49 | serde_json = "1.0.139" 50 | serde_yaml = "0.9.33" 51 | signal-hook = "0.3.17" 52 | solana-client = "~3.0.4" 53 | solana-commitment-config = "3.0.0" 54 | solana-compute-budget = "~3.0.4" 55 | solana-compute-budget-interface = { version = "3.0.0", features = ["borsh"] } 56 | solana-rpc-client = "~3.0.4" 57 | solana-rpc-client-api = "~3.0.4" 58 | solana-sdk = "3.0.0" 59 | solana-storage-proto = "~3.0.4" 60 | solana-transaction-context = "~3.0.4" 61 | solana-transaction-status = "~3.0.4" 62 | solana-version = "~3.0.4" 63 | thiserror = "2.0.11" 64 | tikv-jemallocator = { version = "0.6.0", features = ["unprefixed_malloc_on_supported_platforms"] } 65 | tokio = "1.43.0" 66 | tokio-uring = "0.5.0" 67 | tokio-util = "0.7.16" 68 | tonic = "0.14.1" 69 | tracing = "0.1.41" 70 | tracing-subscriber = { version = "0.3.19", features = ["ansi", "env-filter", "json"] } 71 | url = "2.5.4" 72 | 73 | [build-dependencies] 74 | anyhow = "1.0.96" 75 | cargo-lock = "11.0.0" 76 | git-version = "0.3.9" 77 | vergen = { version = "9.0.4", features = ["build", "rustc"] } 78 | 79 | [lints.clippy] 80 | clone_on_ref_ptr = "deny" 81 | missing_const_for_fn = "deny" 82 | trivially_copy_pass_by_ref = "deny" 83 | 84 | [profile.release] 85 | lto = true 86 | codegen-units = 1 87 | -------------------------------------------------------------------------------- /src/rpc/workers.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | config::ConfigRpcWorkers, 4 | rpc::api_jsonrpc::{RpcRequestBlockWorkRequest, RpcRequestTransactionWorkRequest}, 5 | }, 6 | crossbeam::channel::{self, Receiver, RecvTimeoutError, Sender}, 7 | futures::future::{BoxFuture, FutureExt, TryFutureExt, try_join_all}, 8 | std::{thread::Builder, time::Duration}, 9 | tokio::{task::JoinError, time::sleep}, 10 | tokio_util::sync::CancellationToken, 11 | }; 12 | 13 | pub enum WorkRequest { 14 | Block(RpcRequestBlockWorkRequest), 15 | Transaction(RpcRequestTransactionWorkRequest), 16 | } 17 | 18 | #[allow(clippy::type_complexity)] 19 | pub fn start( 20 | config: ConfigRpcWorkers, 21 | shutdown: CancellationToken, 22 | ) -> anyhow::Result<( 23 | Sender, 24 | BoxFuture<'static, Result<(), JoinError>>, 25 | )> { 26 | anyhow::ensure!(config.threads > 0, "number of threads can't be zero"); 27 | 28 | let (tx, rx) = channel::bounded(config.channel_size); 29 | 30 | let mut jhs = Vec::with_capacity(config.threads); 31 | for index in 0..config.threads { 32 | let cpus = config.affinity.as_ref().map(|affinity| { 33 | if config.threads == affinity.len() { 34 | vec![affinity[index]] 35 | } else { 36 | affinity.clone() 37 | } 38 | }); 39 | 40 | let th = Builder::new().name(format!("alpRpcWrk{index:02}")).spawn({ 41 | let rx = rx.clone(); 42 | let shutdown = shutdown.clone(); 43 | 44 | move || { 45 | if let Some(cpus) = cpus { 46 | affinity_linux::set_thread_affinity(cpus.into_iter()) 47 | .expect("failed to set affinity"); 48 | } 49 | 50 | wrk_loop(rx, shutdown) 51 | } 52 | })?; 53 | 54 | let jh = tokio::spawn({ 55 | let shutdown = shutdown.clone(); 56 | 57 | async move { 58 | while !th.is_finished() { 59 | let ms = if shutdown.is_cancelled() { 10 } else { 500 }; 60 | sleep(Duration::from_millis(ms)).await; 61 | } 62 | th.join().expect("failed to join thread") 63 | } 64 | }); 65 | jhs.push(jh); 66 | } 67 | 68 | Ok((tx, try_join_all(jhs).map_ok(|_| ()).boxed())) 69 | } 70 | 71 | fn wrk_loop(rx: Receiver, shutdown: CancellationToken) { 72 | loop { 73 | let request = match rx.recv_timeout(Duration::from_millis(500)) { 74 | Ok(request) => request, 75 | Err(RecvTimeoutError::Timeout) => { 76 | if shutdown.is_cancelled() { 77 | return; 78 | } else { 79 | continue; 80 | } 81 | } 82 | Err(RecvTimeoutError::Disconnected) => return, 83 | }; 84 | 85 | match request { 86 | WorkRequest::Block(request) => request.process(), 87 | WorkRequest::Transaction(request) => request.process(), 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # alpamayo 2 | 3 | Lightweight drop-in replacement for the Solana RPC stack, designed for frozen data (blocks, transactions, etc.). 4 | 5 | Please use issues only for reporting bugs or discussing feature-related topics. If you're having trouble running alpamayo or need insights about the configuration, please post your question in the Telegram group: [https://t.me/lamportsdev](https://t.me/lamportsdev) 6 | 7 | ## Sponsored by 8 | 9 | - [Triton One](https://triton.one/) — https://x.com/_fanatid/status/1956431719961166218 10 | 11 | ## Supported methods 12 | 13 | ### Solana Rpc methods: 14 | 15 | #### Blocks based 16 | 17 | - [x] `getBlock` 18 | - [x] `getBlockHeight` 19 | - [x] `getBlocks` 20 | - [x] `getBlocksWithLimit` 21 | - [x] `getBlockTime` 22 | - [x] `getFirstAvailableBlock` 23 | - [x] `getLatestBlockhash` 24 | - [x] `getRecentPrioritizationFees` 25 | - [x] `getSignaturesForAddress` 26 | - [x] `getSignatureStatuses` 27 | - [x] `getSlot` 28 | - [x] `getTransaction` 29 | - [x] `isBlockhashValid` 30 | 31 | #### Smart cache 32 | 33 | - [x] `getClusterNodes` 34 | - [x] `getInflationReward` 35 | - [x] `getLeaderSchedule` 36 | 37 | #### Static 38 | 39 | - [x] `getVersion` 40 | 41 | ### Extra methods: 42 | 43 | - [x] `/block/${slot}` 44 | - [x] `/tx/${signature}` 45 | - [x] `/version` 46 | 47 | ## Blueprint 48 | 49 | ```mermaid 50 | flowchart LR 51 | subgraph source1 [**agave**] 52 | subgraph source1_geyser1 [Yellowstone-gRPC] 53 | end 54 | subgraph source_geyser2 [richat-plugin-agave] 55 | end 56 | subgraph source_rpc1 [RPC-Server] 57 | end 58 | end 59 | 60 | subgraph source2 [**richat**] 61 | end 62 | 63 | subgraph alpamayo1 [**alpamayo**] 64 | subgraph tokio1 [Tokio Runtime] 65 | tokio1_receiver(subscriber) 66 | tokio1_bank[(transactions)] 67 | tokio1_rpc(RPC-Client) 68 | tokio1_metrics(Metrics / Prometheus) 69 | end 70 | 71 | subgraph storage1 [Thread / Storage] 72 | storage1_processor(processor) 73 | storage1_processed[(processed blocks
**memory**)] 74 | storage1_confirmed[(confirmed blocks
**files**)] 75 | end 76 | 77 | subgraph rpc1 [Tokio Runtime] 78 | rpc1_http(RPC-Server) 79 | end 80 | 81 | subgraph workers1 [Thread Pool / RPC] 82 | workers1_th1(Worker 1) 83 | workers1_thn(Worker N) 84 | end 85 | 86 | alpamayo1_slots[(**current slots:**
first stored
finalized
confirmed
processed)] 87 | end 88 | 89 | client1(client) 90 | client2(client/monitoring) 91 | 92 | source1_geyser1 -.->|gRPC| tokio1_receiver 93 | tokio1_rpc --> source_rpc1 94 | source_geyser2 -->|Tcp / gRPC / Quic
full stream| source2 95 | source2 -.->|gRPC| tokio1_receiver 96 | tokio1_receiver --> tokio1_bank 97 | tokio1_bank --> storage1_processor 98 | storage1_processor --> tokio1_rpc 99 | storage1_processor --> storage1_processed 100 | storage1_processor --> storage1_confirmed 101 | storage1_processor --> alpamayo1_slots 102 | rpc1_http --> storage1_processor 103 | rpc1_http --> workers1_th1 104 | rpc1_http --> workers1_thn 105 | rpc1_http --> source_rpc1 106 | rpc1_http --> alpamayo1_slots 107 | client1 --> rpc1_http 108 | client2 --> tokio1_metrics 109 | ``` 110 | -------------------------------------------------------------------------------- /src/rpc/server.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | config::ConfigRpc, 4 | rpc::{api_httpget, api_jsonrpc, workers}, 5 | storage::{read::ReadRequest, rocksdb::RocksdbWriteInflationReward, slots::StoredSlots}, 6 | }, 7 | futures::future::{TryFutureExt, ready}, 8 | http_body_util::{BodyExt, Empty as BodyEmpty}, 9 | hyper::{Request, Response, StatusCode, body::Incoming as BodyIncoming, service::service_fn}, 10 | hyper_util::{ 11 | rt::tokio::{TokioExecutor, TokioIo}, 12 | server::conn::auto::Builder as ServerBuilder, 13 | }, 14 | std::sync::Arc, 15 | tokio::{net::TcpListener, sync::mpsc, task::JoinError}, 16 | tokio_util::sync::CancellationToken, 17 | tracing::{debug, error, info}, 18 | }; 19 | 20 | pub async fn spawn( 21 | config: ConfigRpc, 22 | stored_slots: StoredSlots, 23 | requests_tx: mpsc::Sender, 24 | db_write_inflation_reward: RocksdbWriteInflationReward, 25 | shutdown: CancellationToken, 26 | ) -> anyhow::Result>> { 27 | let (workers_tx, workers_jhs) = workers::start(config.workers.clone(), shutdown.clone())?; 28 | 29 | let listener = TcpListener::bind(config.endpoint).await?; 30 | info!("start server at: {}", config.endpoint); 31 | 32 | let api_httpget_state = Arc::new(api_httpget::State::new( 33 | &config, 34 | stored_slots.clone(), 35 | requests_tx.clone(), 36 | )?); 37 | let api_jsonrpc_processor = Arc::new(api_jsonrpc::create_request_processor( 38 | config, 39 | stored_slots, 40 | requests_tx, 41 | db_write_inflation_reward, 42 | workers_tx, 43 | )?); 44 | 45 | let jh = tokio::spawn(async move { 46 | let http = ServerBuilder::new(TokioExecutor::new()); 47 | let graceful = hyper_util::server::graceful::GracefulShutdown::new(); 48 | 49 | loop { 50 | let stream = tokio::select! { 51 | incoming = listener.accept() => match incoming { 52 | Ok((stream, addr)) => { 53 | debug!("new connection from {addr}"); 54 | stream 55 | } 56 | Err(error) => { 57 | error!("failed to accept new connection: {error}"); 58 | break; 59 | } 60 | }, 61 | () = shutdown.cancelled() => break, 62 | }; 63 | 64 | let service = service_fn({ 65 | let api_httpget_state = Arc::clone(&api_httpget_state); 66 | let api_jsonrpc_processor = Arc::clone(&api_jsonrpc_processor); 67 | move |req: Request| { 68 | let api_httpget_state = Arc::clone(&api_httpget_state); 69 | let api_jsonrpc_processor = Arc::clone(&api_jsonrpc_processor); 70 | async move { 71 | // JSON-RPC 72 | if req.uri().path() == "/" { 73 | return api_jsonrpc_processor.on_request(req).await; 74 | } 75 | 76 | // Http/Get 77 | if let Some(handler) = api_httpget_state.get_handler(req) { 78 | return handler.await; 79 | } 80 | 81 | Response::builder() 82 | .status(StatusCode::NOT_FOUND) 83 | .body(BodyEmpty::new().boxed()) 84 | } 85 | } 86 | }); 87 | 88 | let connection = http.serve_connection(TokioIo::new(stream), service); 89 | let fut = graceful.watch(connection.into_owned()); 90 | 91 | tokio::spawn(async move { 92 | if let Err(error) = fut.await { 93 | error!("Error serving HTTP connection: {error:?}"); 94 | } 95 | }); 96 | } 97 | 98 | drop(listener); 99 | graceful.shutdown().await; 100 | 101 | workers_jhs.await 102 | }); 103 | 104 | Ok(jh.and_then(ready)) 105 | } 106 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{config::ConfigMetrics, storage::slots::StoredSlots, version::VERSION as VERSION_INFO}, 3 | anyhow::Context, 4 | metrics::{counter, describe_counter, describe_gauge, describe_histogram}, 5 | metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}, 6 | richat_shared::jsonrpc::metrics::{ 7 | RPC_REQUESTS_DURATION_SECONDS, describe as describe_jsonrpc_metrics, 8 | }, 9 | std::{future::Future, time::Duration}, 10 | tokio::{task::JoinError, time::sleep}, 11 | }; 12 | 13 | pub const STORAGE_STORED_SLOTS: &str = "storage_stored_slots"; // type 14 | pub const STORAGE_FILES_SPACE: &str = "storage_files_space_bytes"; // id, type 15 | 16 | pub const READ_DISK_SECONDS_TOTAL: &str = "read_disk_seconds_total"; // x_subscription_id, type 17 | 18 | pub const WRITE_BLOCK_SYNC_SECONDS: &str = "write_block_sync_seconds"; 19 | 20 | pub const RPC_WORKERS_CPU_SECONDS_TOTAL: &str = "rpc_workers_cpu_seconds_total"; // x_subscription_id, method 21 | 22 | pub const RPC_UPSTREAM_REQUESTS_TOTAL: &str = "rpc_upstream_requests_total"; // x_subscription_id, upstream, method 23 | pub const RPC_UPSTREAM_DURATION_SECONDS: &str = "rpc_upstream_duration_seconds"; // x_subscription_id, upstream, method 24 | pub const RPC_UPSTREAM_BANDWIDTH_TOTAL: &str = "rpc_upstream_bandwidth_total"; // x_subscription_id, upstream, method 25 | 26 | pub fn setup() -> anyhow::Result { 27 | let handle = PrometheusBuilder::new() 28 | .set_buckets_for_metric( 29 | Matcher::Full(WRITE_BLOCK_SYNC_SECONDS.to_owned()), 30 | &[0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.075, 0.1, 0.2, 0.4], 31 | )? 32 | .set_buckets_for_metric( 33 | Matcher::Full(RPC_REQUESTS_DURATION_SECONDS.to_owned()), 34 | &[ 35 | 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 36 | ], 37 | )? 38 | .set_buckets_for_metric( 39 | Matcher::Full(RPC_UPSTREAM_DURATION_SECONDS.to_owned()), 40 | &[ 41 | 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 42 | ], 43 | )? 44 | .install_recorder() 45 | .context("failed to install prometheus exporter")?; 46 | 47 | describe_jsonrpc_metrics(); 48 | 49 | describe_counter!("version", "Alpamayo version info"); 50 | counter!( 51 | "version", 52 | "buildts" => VERSION_INFO.buildts, 53 | "git" => VERSION_INFO.git, 54 | "package" => VERSION_INFO.package, 55 | "proto_dragonsmouth" => VERSION_INFO.proto, 56 | "proto_richat" => VERSION_INFO.proto_richat, 57 | "rustc" => VERSION_INFO.rustc, 58 | "solana" => VERSION_INFO.solana, 59 | "version" => VERSION_INFO.version, 60 | ) 61 | .absolute(1); 62 | 63 | describe_gauge!(STORAGE_STORED_SLOTS, "Stored slots in db"); 64 | describe_gauge!(STORAGE_FILES_SPACE, "Storage space in files for blocks"); 65 | 66 | describe_gauge!( 67 | READ_DISK_SECONDS_TOTAL, 68 | "Read disk time by x-subscription-id and type" 69 | ); 70 | 71 | describe_histogram!(WRITE_BLOCK_SYNC_SECONDS, "Write block sync time"); 72 | 73 | describe_gauge!( 74 | RPC_WORKERS_CPU_SECONDS_TOTAL, 75 | "CPU consumption by RPC workers by x-subscription-id and method" 76 | ); 77 | 78 | describe_counter!( 79 | RPC_UPSTREAM_REQUESTS_TOTAL, 80 | "Number of RPC requests to upstream by x-subscription-id, upstream name, method and timeout" 81 | ); 82 | describe_histogram!( 83 | RPC_UPSTREAM_DURATION_SECONDS, 84 | "RPC request time to upstream by x-subscription-id, upstream name and method" 85 | ); 86 | describe_counter!( 87 | RPC_UPSTREAM_BANDWIDTH_TOTAL, 88 | "Number of bytes received by RPC requests to upstream by x-subscription-id, upstream and method" 89 | ); 90 | 91 | Ok(handle) 92 | } 93 | 94 | pub async fn spawn_server( 95 | config: ConfigMetrics, 96 | handle: PrometheusHandle, 97 | stored_slots: StoredSlots, 98 | shutdown: impl Future + Send + 'static, 99 | ) -> anyhow::Result>> { 100 | let recorder_handle = handle.clone(); 101 | tokio::spawn(async move { 102 | loop { 103 | sleep(Duration::from_secs(1)).await; 104 | recorder_handle.run_upkeep(); 105 | } 106 | }); 107 | 108 | richat_metrics::spawn_server( 109 | richat_metrics::ConfigMetrics { 110 | endpoint: config.endpoint, 111 | }, 112 | move || handle.render().into_bytes(), // metrics 113 | || true, // health 114 | move || stored_slots.is_ready(), // ready 115 | shutdown, 116 | ) 117 | .await 118 | .map_err(Into::into) 119 | } 120 | -------------------------------------------------------------------------------- /src/bin/bench.rs: -------------------------------------------------------------------------------- 1 | use { 2 | anyhow::Context, 3 | clap::Parser, 4 | futures::future::try_join_all, 5 | hyper::{StatusCode, header::CONTENT_TYPE}, 6 | quanta::Instant, 7 | rand::random_range, 8 | reqwest::Client, 9 | solana_rpc_client_api::config::RpcBlockConfig, 10 | solana_sdk::clock::Slot, 11 | std::{ops::Range, sync::Arc, time::Duration}, 12 | tokio::sync::Mutex, 13 | url::Url, 14 | }; 15 | 16 | #[derive(Debug, Parser)] 17 | struct Args { 18 | /// Alpamayo endpoint 19 | #[clap(long, default_value_t = String::from("http://127.0.0.1:9000"))] 20 | endpoint: String, 21 | 22 | /// Slots interval, like m..n 23 | #[clap(long)] 24 | interval: String, 25 | 26 | /// How many blocks to request 27 | #[clap(long, default_value_t = 5)] 28 | count: usize, 29 | 30 | /// Requests concurrency 31 | #[clap(long, default_value_t = 1)] 32 | concurrency: usize, 33 | 34 | /// Enable warmup 35 | #[clap(long, default_value_t = false)] 36 | warmup: bool, 37 | 38 | /// Request only http/get 39 | #[clap(long, default_value_t = false)] 40 | only_httpget: bool, 41 | } 42 | 43 | #[tokio::main] 44 | async fn main() -> anyhow::Result<()> { 45 | let args = Args::parse(); 46 | 47 | let url = Url::parse(&args.endpoint)?; 48 | 49 | let Some((start, end)) = args.interval.split_once("..") else { 50 | anyhow::bail!("expected valid interval"); 51 | }; 52 | let range = Range { 53 | start: start.parse::().context("failed to parse start")?, 54 | end: end.parse().context("failed to parse end")?, 55 | }; 56 | 57 | let count = Arc::new(Mutex::new(args.count)); 58 | 59 | let elapsed_jsonrpc = Arc::new(Mutex::new((Duration::ZERO, 0))); 60 | let elapsed_httpget = Arc::new(Mutex::new((Duration::ZERO, 0))); 61 | 62 | try_join_all((0..args.concurrency).map(|_| { 63 | make_requests( 64 | url.clone(), 65 | range.clone(), 66 | Arc::clone(&count), 67 | args.warmup, 68 | args.only_httpget, 69 | Arc::clone(&elapsed_jsonrpc), 70 | Arc::clone(&elapsed_httpget), 71 | ) 72 | })) 73 | .await?; 74 | 75 | let (elapsed_jsonrpc, size) = *elapsed_jsonrpc.lock().await; 76 | println!( 77 | "jsonrpc: total {elapsed_jsonrpc:?} / avg: {:?} / total transfered: {}", 78 | elapsed_jsonrpc.div_f64(args.count as f64), 79 | human_bytes(size) 80 | ); 81 | let (elapsed_httpget, size) = *elapsed_httpget.lock().await; 82 | println!( 83 | "httpget: total {elapsed_httpget:?} / avg: {:?} / total transfered: {}", 84 | elapsed_httpget.div_f64(args.count as f64), 85 | human_bytes(size) 86 | ); 87 | 88 | Ok(()) 89 | } 90 | 91 | async fn make_requests( 92 | url: Url, 93 | range: Range, 94 | count: Arc>, 95 | warmup: bool, 96 | only_httpget: bool, 97 | elapsed_jsonrpc: Arc>, 98 | elapsed_httpget: Arc>, 99 | ) -> anyhow::Result<()> { 100 | let mut skip = false; 101 | loop { 102 | if skip { 103 | skip = false; 104 | } else { 105 | let mut locked = count.lock().await; 106 | if *locked == 0 { 107 | break; 108 | } 109 | *locked -= 1; 110 | drop(locked); 111 | } 112 | 113 | let slot = random_range(range.clone()); 114 | 115 | // warmup 116 | if warmup && !fetch_slot_get(url.clone(), slot).await?.0 { 117 | skip = true; 118 | continue; 119 | } 120 | 121 | // measure 122 | let ts = Instant::now(); 123 | let (exists, size) = fetch_slot_get(url.clone(), slot).await?; 124 | if !warmup && !exists { 125 | skip = true; 126 | continue; 127 | } 128 | let mut locked = elapsed_httpget.lock().await; 129 | locked.0 += ts.elapsed(); 130 | locked.1 += size; 131 | drop(locked); 132 | 133 | if !only_httpget { 134 | let ts = Instant::now(); 135 | let size = fetch_slot_json(url.clone(), slot).await?; 136 | let mut locked = elapsed_jsonrpc.lock().await; 137 | locked.0 += ts.elapsed(); 138 | locked.1 += size; 139 | drop(locked); 140 | } 141 | } 142 | 143 | Ok(()) 144 | } 145 | 146 | async fn fetch_slot_json(url: Url, slot: Slot) -> anyhow::Result { 147 | let body = serde_json::json!({ 148 | "jsonrpc": "2.0", 149 | "method": "getBlock", 150 | "id": 0, 151 | "params": [slot, RpcBlockConfig { 152 | max_supported_transaction_version: Some(0), 153 | ..Default::default() 154 | }] 155 | }) 156 | .to_string(); 157 | 158 | let mut response = Client::builder() 159 | .build() 160 | .context("failed to build http client")? 161 | .post(url.to_string()) 162 | .header(CONTENT_TYPE, "application/json") 163 | .body(body) 164 | .send() 165 | .await 166 | .context("failed to send jsonrpc request")?; 167 | 168 | anyhow::ensure!( 169 | response.status() == StatusCode::OK, 170 | "unexpected response code from jsonrpc {}", 171 | response.status(), 172 | ); 173 | 174 | let mut size = 0; 175 | while let Some(chunk) = response 176 | .chunk() 177 | .await 178 | .context("failed to fetch body of jsonrpc request")? 179 | { 180 | size += chunk.len(); 181 | } 182 | 183 | Ok(size) 184 | } 185 | 186 | async fn fetch_slot_get(mut url: Url, slot: Slot) -> anyhow::Result<(bool, usize)> { 187 | let slot = slot.to_string(); 188 | if let Ok(mut segments) = url.path_segments_mut() { 189 | segments.extend(&["block", &slot]); 190 | } 191 | 192 | let mut response = Client::builder() 193 | .build() 194 | .context("failed to build http client")? 195 | .get(url.to_string()) 196 | .send() 197 | .await 198 | .context("failed to send jsonrpc request")?; 199 | 200 | if response.status() == StatusCode::BAD_REQUEST { 201 | return Ok((false, 0)); 202 | } 203 | 204 | anyhow::ensure!( 205 | response.status() == StatusCode::OK, 206 | "unexpected response code from httpget {}", 207 | response.status(), 208 | ); 209 | 210 | let mut size = 0; 211 | while let Some(chunk) = response 212 | .chunk() 213 | .await 214 | .context("failed to fetch body of httpget request")? 215 | { 216 | size += chunk.len(); 217 | } 218 | 219 | Ok((true, size)) 220 | } 221 | 222 | fn human_bytes(size: usize) -> String { 223 | if size > 1024 * 1024 * 1024 { 224 | format!("{:.3} GiB", size as f64 / 1024.0 / 1024.0 / 1024.0) 225 | } else if size > 1024 * 1024 { 226 | format!("{:.3} MiB", size as f64 / 1024.0 / 1024.0) 227 | } else { 228 | format!("{:.3} KiB", size as f64 / 1024.0) 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /src/storage/memory.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::source::block::BlockWithBinary, 3 | solana_sdk::clock::Slot, 4 | std::{collections::VecDeque, sync::Arc}, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub enum MemoryConfirmedBlock { 9 | // we don't received info about that block 10 | Missed { 11 | slot: Slot, 12 | }, 13 | // block is dead 14 | Dead { 15 | slot: Slot, 16 | }, 17 | // constructed block 18 | Block { 19 | slot: Slot, 20 | block: Arc, 21 | }, 22 | } 23 | 24 | impl MemoryConfirmedBlock { 25 | const fn missed_or_dead(slot: Slot, dead: bool) -> Self { 26 | if dead { 27 | Self::Dead { slot } 28 | } else { 29 | Self::Missed { slot } 30 | } 31 | } 32 | 33 | pub const fn get_slot(&self) -> Slot { 34 | *match self { 35 | Self::Missed { slot } => slot, 36 | Self::Dead { slot } => slot, 37 | Self::Block { slot, .. } => slot, 38 | } 39 | } 40 | } 41 | 42 | #[derive(Debug)] 43 | struct BlockInfo { 44 | slot: Slot, 45 | block: Option>, 46 | dead: bool, 47 | confirmed: bool, 48 | } 49 | 50 | impl BlockInfo { 51 | const fn new(slot: Slot) -> Self { 52 | Self { 53 | slot, 54 | block: None, 55 | dead: false, 56 | confirmed: false, 57 | } 58 | } 59 | } 60 | 61 | #[derive(Debug, Default)] 62 | pub struct StorageMemory { 63 | blocks: VecDeque, 64 | confirmed: Slot, 65 | gen_next_slot: Slot, 66 | } 67 | 68 | impl StorageMemory { 69 | // create empty slots 70 | fn add_slot(&mut self, slot: Slot) -> Option<&mut BlockInfo> { 71 | // initialize 72 | if self.gen_next_slot == 0 { 73 | self.gen_next_slot = slot; 74 | } 75 | 76 | // drop if we already reported about that slot 77 | if slot < self.gen_next_slot { 78 | return None; 79 | } 80 | 81 | if self.blocks.is_empty() { 82 | self.blocks.push_back(BlockInfo::new(slot)); 83 | } else if slot < self.blocks[0].slot { 84 | for slot in (slot..self.blocks[0].slot).rev() { 85 | self.blocks.push_front(BlockInfo::new(slot)); 86 | } 87 | } else if slot > self.blocks[self.blocks.len() - 1].slot { 88 | for slot in self.blocks[self.blocks.len() - 1].slot + 1..=slot { 89 | self.blocks.push_back(BlockInfo::new(slot)); 90 | } 91 | } 92 | 93 | let index = (slot - self.blocks[0].slot) as usize; 94 | Some(&mut self.blocks[index]) 95 | } 96 | 97 | pub fn add_processed(&mut self, slot: Slot, block: Arc) { 98 | if let Some(item) = self.add_slot(slot) { 99 | item.block = Some(block); 100 | } 101 | } 102 | 103 | pub fn set_dead(&mut self, slot: Slot) { 104 | if let Some(item) = self.add_slot(slot) { 105 | assert!(!item.confirmed, "trying to mark confirmed slot as dead"); 106 | item.dead = true; 107 | } 108 | } 109 | 110 | pub fn set_confirmed(&mut self, slot: Slot) { 111 | assert!(self.confirmed < slot, "attempt to backward confirmed"); 112 | 113 | if let Some(item) = self.add_slot(slot) { 114 | assert!(!item.dead, "trying to mark dead slot as confirmed"); 115 | item.confirmed = true; 116 | self.confirmed = slot; 117 | } 118 | } 119 | 120 | pub fn pop_confirmed(&mut self) -> Option { 121 | // check that confirmed & gen_next_slot is set 122 | if self.confirmed == 0 || self.gen_next_slot == 0 { 123 | return None; 124 | } 125 | 126 | // get first slot 127 | let first_slot = self.blocks.front().map(|b| b.slot)?; 128 | 129 | // get index of confirmed slot 130 | let mut confirmed_index = self 131 | .blocks 132 | .iter() 133 | .enumerate() 134 | .find_map(|(index, block)| block.confirmed.then_some(index))?; 135 | 136 | let block = loop { 137 | let item = &self.blocks[confirmed_index]; 138 | match &item.block { 139 | Some(block) => { 140 | // update confirmed index 141 | if first_slot <= block.parent_slot { 142 | confirmed_index = (block.parent_slot - first_slot) as usize; 143 | continue; 144 | } 145 | 146 | // we don't have info about block 147 | if self.gen_next_slot <= block.parent_slot { 148 | let slot = self.gen_next_slot; 149 | break MemoryConfirmedBlock::Missed { slot }; 150 | } 151 | 152 | // missed slots 153 | if self.gen_next_slot < first_slot { 154 | let slot = self.gen_next_slot; 155 | break MemoryConfirmedBlock::Dead { slot }; 156 | } 157 | 158 | // missed if not marked as dead 159 | if self.gen_next_slot == first_slot { 160 | for index in 0..confirmed_index { 161 | self.blocks[index].block = None; 162 | self.blocks[index].dead = true; 163 | } 164 | 165 | let BlockInfo { 166 | slot, block, dead, .. 167 | } = self.blocks.pop_front().expect("existed"); 168 | break if let Some(block) = block { 169 | MemoryConfirmedBlock::Block { slot, block } 170 | } else { 171 | MemoryConfirmedBlock::missed_or_dead(slot, dead) 172 | }; 173 | } 174 | } 175 | None => { 176 | // we don't have any info, definitely missed 177 | if self.gen_next_slot < first_slot { 178 | let slot = self.gen_next_slot; 179 | break MemoryConfirmedBlock::Missed { slot }; 180 | } 181 | 182 | // missed if not marked as dead 183 | if self.gen_next_slot == first_slot { 184 | let BlockInfo { slot, dead, .. } = 185 | self.blocks.pop_front().expect("existed"); 186 | break MemoryConfirmedBlock::missed_or_dead(slot, dead); 187 | } 188 | } 189 | } 190 | 191 | panic!( 192 | "failed to get next block, gen next slot = {}, first slot = {first_slot}, confirmed slot = {}, confirmed block = {}, confirmed dead = {}", 193 | self.gen_next_slot, 194 | item.slot, 195 | item.block.is_some(), 196 | item.dead, 197 | ); 198 | }; 199 | 200 | self.gen_next_slot += 1; 201 | Some(block) 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /src/storage/slots.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{metrics::STORAGE_STORED_SLOTS, util::HashSet}, 3 | metrics::{Gauge, gauge}, 4 | richat_shared::mutex_lock, 5 | solana_sdk::clock::Slot, 6 | std::{ 7 | collections::BTreeMap, 8 | ops::Deref, 9 | sync::{ 10 | Arc, Mutex, 11 | atomic::{AtomicBool, AtomicU64, Ordering}, 12 | }, 13 | }, 14 | }; 15 | 16 | #[derive(Debug)] 17 | struct Metrics { 18 | processed: Gauge, 19 | confirmed: Gauge, 20 | finalized: Gauge, 21 | first_available: Gauge, 22 | total: Gauge, 23 | } 24 | 25 | impl Default for Metrics { 26 | fn default() -> Self { 27 | Self { 28 | processed: gauge!(STORAGE_STORED_SLOTS, "type" => "processed"), 29 | confirmed: gauge!(STORAGE_STORED_SLOTS, "type" => "confirmed"), 30 | finalized: gauge!(STORAGE_STORED_SLOTS, "type" => "finalized"), 31 | first_available: gauge!(STORAGE_STORED_SLOTS, "type" => "first_available"), 32 | total: gauge!(STORAGE_STORED_SLOTS, "type" => "total"), 33 | } 34 | } 35 | } 36 | 37 | #[derive(Debug)] 38 | pub struct StoredSlotsInner { 39 | processed: AtomicU64, 40 | confirmed: AtomicU64, 41 | finalized: AtomicU64, 42 | first_available: AtomicU64, 43 | max_recent_blockhashes: AtomicBool, 44 | metrics: Metrics, 45 | } 46 | 47 | impl Default for StoredSlotsInner { 48 | fn default() -> Self { 49 | Self { 50 | processed: AtomicU64::new(u64::MIN), 51 | confirmed: AtomicU64::new(u64::MIN), 52 | finalized: AtomicU64::new(u64::MIN), 53 | first_available: AtomicU64::new(u64::MAX), 54 | max_recent_blockhashes: AtomicBool::new(false), 55 | metrics: Metrics::default(), 56 | } 57 | } 58 | } 59 | 60 | #[derive(Debug, Default, Clone)] 61 | pub struct StoredSlots { 62 | inner: Arc, 63 | } 64 | 65 | impl Deref for StoredSlots { 66 | type Target = StoredSlotsInner; 67 | 68 | fn deref(&self) -> &Self::Target { 69 | &self.inner 70 | } 71 | } 72 | 73 | impl StoredSlots { 74 | pub fn is_ready(&self) -> bool { 75 | self.processed_load() != u64::MIN 76 | && self.confirmed_load() != u64::MIN 77 | && self.finalized_load() != u64::MIN 78 | && self.first_available_load() != u64::MAX 79 | && self.max_recent_blockhashes.load(Ordering::Relaxed) 80 | } 81 | 82 | pub fn processed_load(&self) -> Slot { 83 | self.processed.load(Ordering::SeqCst) 84 | } 85 | 86 | fn processed_store_max(&self, slot: Slot) { 87 | let slot = self.processed.fetch_max(slot, Ordering::SeqCst).max(slot); 88 | self.metrics.processed.set(slot as f64); 89 | } 90 | 91 | pub fn confirmed_load(&self) -> Slot { 92 | self.confirmed.load(Ordering::SeqCst) 93 | } 94 | 95 | fn confirmed_store(&self, slot: Slot) { 96 | self.confirmed.store(slot, Ordering::SeqCst); 97 | self.metrics.confirmed.set(slot as f64); 98 | } 99 | 100 | pub fn finalized_load(&self) -> Slot { 101 | self.finalized.load(Ordering::Relaxed) 102 | } 103 | 104 | fn finalized_store(&self, slot: Slot) { 105 | if slot >= self.first_available_load() { 106 | self.finalized.store(slot, Ordering::Relaxed); 107 | self.metrics.finalized.set(slot as f64); 108 | } 109 | } 110 | 111 | pub fn first_available_load(&self) -> Slot { 112 | self.first_available.load(Ordering::SeqCst) 113 | } 114 | 115 | pub fn first_available_store(&self, slot: Option) { 116 | let slot = slot.unwrap_or(u64::MAX); 117 | self.first_available.store(slot, Ordering::SeqCst); 118 | self.metrics.first_available.set(slot as f64); 119 | } 120 | 121 | pub fn set_total(&self, total: usize) { 122 | self.metrics.total.set(total as f64); 123 | } 124 | } 125 | 126 | #[derive(Debug, Clone)] 127 | pub struct StoredSlotsRead { 128 | stored_slots: StoredSlots, 129 | slots_processed: Arc>>>, 130 | slots_confirmed: Arc>>>, 131 | slots_finalized: Arc>>>, 132 | max_recent_blockhashes: Arc>, 133 | max_recent_blockhashes_ready: bool, 134 | total_readers: usize, 135 | } 136 | 137 | impl StoredSlotsRead { 138 | pub fn new(stored_slots: StoredSlots, total_readers: usize) -> Self { 139 | Self { 140 | stored_slots, 141 | slots_processed: Arc::default(), 142 | slots_confirmed: Arc::default(), 143 | slots_finalized: Arc::default(), 144 | max_recent_blockhashes: Arc::default(), 145 | max_recent_blockhashes_ready: false, 146 | total_readers, 147 | } 148 | } 149 | 150 | fn set( 151 | &self, 152 | map: &Arc>>>, 153 | index: usize, 154 | slot: Slot, 155 | ) -> bool { 156 | let mut lock = mutex_lock(map); 157 | 158 | let entry = lock.entry(slot).or_default(); 159 | entry.insert(index); 160 | 161 | if entry.len() == self.total_readers { 162 | lock.remove(&slot); 163 | true 164 | } else { 165 | false 166 | } 167 | } 168 | 169 | pub fn set_processed(&self, index: usize, slot: Slot) { 170 | if self.set(&self.slots_processed, index, slot) { 171 | self.stored_slots.processed_store_max(slot); 172 | } 173 | } 174 | 175 | pub fn set_confirmed(&self, index: usize, slot: Slot) { 176 | if self.set(&self.slots_confirmed, index, slot) { 177 | self.stored_slots.processed_store_max(slot); 178 | self.stored_slots.confirmed_store(slot); 179 | } 180 | } 181 | 182 | pub fn set_finalized(&self, index: usize, slot: Slot) { 183 | if self.set(&self.slots_finalized, index, slot) { 184 | self.stored_slots.finalized_store(slot); 185 | 186 | for map in &[ 187 | &self.slots_processed, 188 | &self.slots_confirmed, 189 | &self.slots_finalized, 190 | ] { 191 | let mut lock = mutex_lock(map); 192 | loop { 193 | match lock.first_key_value().map(|(slot, _)| *slot) { 194 | Some(map_slot) if map_slot <= slot => { 195 | lock.remove(&map_slot); 196 | } 197 | _ => break, 198 | } 199 | } 200 | } 201 | } 202 | } 203 | 204 | // max recent blockhashes 205 | pub fn set_ready(&mut self, ready: bool) { 206 | if ready && !self.max_recent_blockhashes_ready { 207 | self.max_recent_blockhashes_ready = true; 208 | 209 | let mut lock = mutex_lock(&self.max_recent_blockhashes); 210 | *lock += 1; 211 | 212 | if *lock == self.total_readers { 213 | self.stored_slots 214 | .max_recent_blockhashes 215 | .store(true, Ordering::Relaxed); 216 | } 217 | } 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/storage/source.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | config::{ConfigSource, ConfigSourceStream}, 4 | source::{ 5 | block::BlockWithBinary, 6 | http::{GetBlockError, HttpSource}, 7 | stream::{StreamSource, StreamSourceMessage}, 8 | }, 9 | }, 10 | futures::stream::StreamExt, 11 | solana_client::client_error::ClientError, 12 | solana_sdk::clock::Slot, 13 | std::sync::Arc, 14 | thiserror::Error, 15 | tokio::{ 16 | sync::{Notify, mpsc, oneshot}, 17 | time::sleep, 18 | }, 19 | tokio_util::sync::CancellationToken, 20 | tracing::error, 21 | }; 22 | 23 | #[derive(Debug)] 24 | pub enum HttpRequest { 25 | Slots { 26 | tx: oneshot::Sender>, 27 | }, 28 | FirstAvailableBlock { 29 | tx: oneshot::Sender>, 30 | }, 31 | Block { 32 | slot: Slot, 33 | httpget: bool, 34 | tx: oneshot::Sender>, 35 | }, 36 | } 37 | 38 | #[derive(Debug, Error)] 39 | pub enum HttpSourceConnectedError { 40 | #[error("send channel is closed")] 41 | SendError, 42 | #[error("recv channel is closed")] 43 | RecvError, 44 | #[error(transparent)] 45 | Error(#[from] E), 46 | } 47 | 48 | pub type HttpSourceConnectedResult = Result>; 49 | 50 | #[derive(Debug, Clone)] 51 | pub struct HttpSourceConnected { 52 | http_tx: mpsc::Sender, 53 | } 54 | 55 | impl HttpSourceConnected { 56 | pub fn new() -> (Self, mpsc::Receiver) { 57 | let (http_tx, http_rx) = mpsc::channel(1); 58 | let this = Self { http_tx }; 59 | (this, http_rx) 60 | } 61 | 62 | async fn send( 63 | &self, 64 | request: HttpRequest, 65 | rx: oneshot::Receiver>, 66 | ) -> HttpSourceConnectedResult { 67 | if self.http_tx.send(request).await.is_err() { 68 | Err(HttpSourceConnectedError::SendError) 69 | } else { 70 | match rx.await { 71 | Ok(Ok(result)) => Ok(result), 72 | Ok(Err(error)) => Err(HttpSourceConnectedError::Error(error)), 73 | Err(_) => Err(HttpSourceConnectedError::RecvError), 74 | } 75 | } 76 | } 77 | 78 | pub async fn get_slots(&self) -> HttpSourceConnectedResult<(Slot, Slot), ClientError> { 79 | let (tx, rx) = oneshot::channel(); 80 | self.send(HttpRequest::Slots { tx }, rx).await 81 | } 82 | 83 | pub async fn get_block( 84 | &self, 85 | slot: Slot, 86 | httpget: bool, 87 | ) -> HttpSourceConnectedResult { 88 | let (tx, rx) = oneshot::channel(); 89 | self.send(HttpRequest::Block { slot, httpget, tx }, rx) 90 | .await 91 | } 92 | 93 | pub async fn get_first_available_block(&self) -> HttpSourceConnectedResult { 94 | let (tx, rx) = oneshot::channel(); 95 | self.send(HttpRequest::FirstAvailableBlock { tx }, rx).await 96 | } 97 | } 98 | 99 | pub async fn start( 100 | config: ConfigSource, 101 | mut http_rx: mpsc::Receiver, 102 | stream_start: Arc, 103 | stream_tx: mpsc::Sender, 104 | shutdown: CancellationToken, 105 | ) -> anyhow::Result<()> { 106 | let http = Arc::new(HttpSource::new(config.http).await?); 107 | let stream = start_stream(config.stream, stream_tx, stream_start); 108 | 109 | tokio::pin!(shutdown); 110 | tokio::pin!(stream); 111 | 112 | let mut finished = false; 113 | while !finished { 114 | finished = tokio::select! { 115 | () = shutdown.cancelled() => true, 116 | item = http_rx.recv() => handle_http(item, Arc::clone(&http)), 117 | result = &mut stream => return result, 118 | }; 119 | } 120 | shutdown.cancel(); 121 | 122 | Ok(()) 123 | } 124 | 125 | async fn start_stream( 126 | config: ConfigSourceStream, 127 | stream_tx: mpsc::Sender, 128 | stream_start: Arc, 129 | ) -> anyhow::Result<()> { 130 | let mut backoff_duration = config.reconnect.map(|c| c.backoff_max); 131 | let backoff_max = config.reconnect.map(|c| c.backoff_max).unwrap_or_default(); 132 | 133 | stream_start.notified().await; 134 | loop { 135 | let mut stream = loop { 136 | match StreamSource::new(config.clone()).await { 137 | Ok(stream) => break stream, 138 | Err(error) => { 139 | if let Some(sleep_duration) = backoff_duration { 140 | error!(?error, "failed to connect to gRPC stream"); 141 | sleep(sleep_duration).await; 142 | backoff_duration = Some(backoff_max.min(sleep_duration * 2)); 143 | } else { 144 | return Err(error.into()); 145 | } 146 | } 147 | } 148 | }; 149 | if stream_tx.send(StreamSourceMessage::Start).await.is_err() { 150 | error!("failed to send a message to the stream"); 151 | return Ok(()); 152 | } 153 | 154 | loop { 155 | match stream.next().await { 156 | Some(Ok(message)) => { 157 | if stream_tx.send(message).await.is_err() { 158 | error!("failed to send a message to the stream"); 159 | return Ok(()); 160 | } 161 | } 162 | Some(Err(error)) => { 163 | error!(?error, "gRPC stream error"); 164 | break; 165 | } 166 | None => { 167 | error!("gRPC stream is finished"); 168 | break; 169 | } 170 | } 171 | } 172 | 173 | if let Some(config) = config.reconnect { 174 | backoff_duration = Some(config.backoff_init); 175 | } else { 176 | return Ok(()); 177 | } 178 | } 179 | } 180 | 181 | fn handle_http(item: Option, http: Arc) -> bool { 182 | match item { 183 | Some(request) => { 184 | tokio::spawn(async move { 185 | match request { 186 | HttpRequest::Slots { tx } => { 187 | let result = 188 | tokio::try_join!(http.get_finalized_slot(), http.get_confirmed_slot()); 189 | let _ = tx.send(result); 190 | } 191 | HttpRequest::FirstAvailableBlock { tx } => { 192 | let result = http.get_first_available_block().await; 193 | let _ = tx.send(result); 194 | } 195 | HttpRequest::Block { slot, httpget, tx } => { 196 | let result = http.get_block(slot, httpget).await; 197 | let _ = tx.send(result); 198 | } 199 | } 200 | }); 201 | false 202 | } 203 | None => { 204 | error!("RPC requests stream is finished"); 205 | true 206 | } 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | logs: 3 | json: false 4 | metrics: 5 | # Prometheus metrics available on `/metrics` endpoint 6 | # Readiness probe available on `/ready` 7 | # Liveness probe available on `/health` 8 | endpoint: 127.0.0.1:8001 9 | source: 10 | # Tokio runtime for RPC requests and Geyser stream 11 | tokio: 12 | worker_threads: null # Number of threads in Tokio runtime, by default all available cores 13 | affinity: null # Pin threads to specific CPUs, optional (taskset syntax) 14 | # RPC endpoint, used for edge-cases, stream reconnect, catch-up on restart, and backfilling 15 | # HttpGet endpoint can be used for catch-up on restart and backfilling (can be skipped) 16 | http: 17 | rpc: http://127.0.0.1:8899/ 18 | httpget: null 19 | timeout: 30s 20 | concurrency: 10 # Max number of requests in progress at one moment 21 | # Geyser stream 22 | stream: 23 | source: richat # richat (richat-geyser-plugin) or dragons_mouth (yellowstone grpc) 24 | reconnect: # Reconnect options, if `null` then reconnect is disabled 25 | backoff_init: 100ms 26 | backoff_max: 1s # Max delay between reconnect attempts 27 | endpoint: http://127.0.0.1:10000 28 | ca_certificate: null 29 | connect_timeout: null 30 | buffer_size: null 31 | http2_adaptive_window: null 32 | http2_keep_alive_interval: null 33 | initial_connection_window_size: null 34 | initial_stream_window_size: null 35 | keep_alive_timeout: null 36 | keep_alive_while_idle: false 37 | tcp_keepalive: 15s 38 | tcp_nodelay: true 39 | timeout: null 40 | max_decoding_message_size: 4_194_304 # 4MiB, better set to 16MiB (because account messages can be more than 10MiB) 41 | compression: # Optional compression, not recommended due to high traffic volume 42 | accept: [] # valid: gzip, zstd 43 | send: [] # valid: gzip, zstd 44 | x_token: null 45 | storage: 46 | # Backfill db to specified slot, if missed no backfilling at all 47 | # If you want to reach max blocks then set to 0 would be easiest solution 48 | # backfilling: 49 | # sync_to: 338_669_763 50 | blocks: 51 | max: 1_000 # Max number of stored slots, once limit is reached old blocks will be pruned 52 | http_getblock_max_retries: 10 # Max retries when trying to fetch blocks from RPC 53 | http_getblock_backoff_init: 100ms # Initial exponentional retry delay 54 | # Files to store blocks in protobuf, can be on different disks 55 | # Used in round-robin schedule. Once all space is used old blocks will be pruned 56 | files: 57 | - id: 0 58 | path: ./db/alpamayo/storage0 59 | size: 8gb 60 | new_blocks: true 61 | rocksdb: 62 | path: ./db/alpamayo/rocksdb # Rocksdb location, don't forget to increase `max_open_files` 63 | index_slot_compression: none # Slot info (transactions and touched addresses) compression: none, snappy, zlib, bz2, lz4, lz4hc, zstd 64 | index_sfa_compression: none # Address signatures per slot compression: none, snappy, zlib, bz2, lz4, lz4hc, zstd 65 | read_workers: null # Number of threads used to read data from Rocksdb, by default number of CPUs on the system 66 | write: 67 | affinity: null # Optional affinity of write thread, used to build blocks and write them to storage files 68 | read: 69 | threads: 1 # Number of single-threaded tokio runtimes used to read data from storage files 70 | affinity: null # Optional affinity of read tokio runtimes 71 | # Max number of async requests handled by every read thread 72 | # async requests: getBlock, getInflationReward, getSignaturesForAddress, getSignatureStatuses, getTransaction 73 | thread_max_async_requests: 1024 74 | # Max number of read requests from files storage, includes: getBlock, getTransaction 75 | thread_max_files_requests: 32 76 | rpc: 77 | endpoint: 127.0.0.1:9000 # RPC endpoint, implement same JSON-RPC API as Agave 78 | # RPC Tokio runtime, used to parse requests, serialize responses 79 | tokio: 80 | worker_threads: null # Number of threads in Tokio runtime, by default all available cores 81 | affinity: null # Pin threads to specified CPUs, optional 82 | body_limit: 50KiB # JSON-RPC body limit 83 | extra_headers: # Extra headers added to response 84 | # access-control-allow-origin: "*" 85 | # access-control-max-age: 86400 86 | # access-control-allow-methods: OPTIONS, POST 87 | request_timeout: 60s 88 | # httpget is plain HTTP interface with protobuf in response, allow to server blocks up to 15 times faster 89 | # `/block/${slot}` — block request 90 | # `/tx/${signature}` — transaction request 91 | calls_httpget: 92 | - getBlock 93 | - getTransaction 94 | # Supported methods in JSON-RPC 95 | calls_jsonrpc: 96 | - getBlock 97 | - getBlockHeight 98 | - getBlocks 99 | - getBlocksWithLimit 100 | - getBlockTime 101 | - getClusterNodes 102 | - getFirstAvailableBlock 103 | - getInflationReward 104 | - getLatestBlockhash 105 | - getLeaderSchedule 106 | - getRecentPrioritizationFees 107 | - getSignaturesForAddress 108 | - getSignatureStatuses 109 | - getSlot 110 | - getTransaction 111 | - getVersion 112 | - isBlockhashValid 113 | gsfa_limit: 1_000 # Max limit in `getSignaturesForAddress`, default value in Agave is 1000 114 | gss_transaction_history: true # Allow to handle `getSignatureStatus` from storage, not only from latest 300 slots 115 | grpf_percentile: true # Allow to use `percentile` option in `getRecentPrioritizationFees` 116 | gcn_cache_ttl: 1s # TTL for cached `getClusterNodes` 117 | request_channel_capacity: 4_096 # Queue size of requests to read threads (be aware, one JSON-RPC request can contain multiple requests) 118 | # fallback for httpget 119 | # upstream_httpget: 120 | # - name: main 121 | # calls: 122 | # - getBlock 123 | # - getTransaction 124 | # endpoint: http://127.0.0.1:8899 125 | # user_agent: alpamayo/v0.1.0 126 | # version: HTTP/1.1 127 | # timeout: 30s 128 | # fallback for JSON-RPC methods 129 | # upstream_jsonrpc: 130 | # - name: main 131 | # calls: 132 | # - getBlock 133 | # - getBlockHeight 134 | # - getBlocks 135 | # - getBlocksWithLimit 136 | # - getBlockTime 137 | # - getClusterNodes 138 | # - getFirstAvailableBlock 139 | # - getInflationReward 140 | # - getLatestBlockhash 141 | # - getLeaderSchedule 142 | # - getRecentPrioritizationFees 143 | # - getSignaturesForAddress 144 | # - getSignatureStatuses 145 | # - getSlot 146 | # - getTransaction 147 | # - getVersion 148 | # - isBlockhashValid 149 | # endpoint: http://127.0.0.1:8899 150 | # user_agent: alpamayo/v0.1.0 151 | # version: HTTP/1.1 152 | # timeout: 30s 153 | # getBlock and getTransactions requires a lot of CPU time to decode protobuf, encode to requested format, serialize data 154 | # getBlock can requires more than 100ms to do all work, it would be good to have these threads separated from everything rest 155 | # for example, you can pin: 156 | # - source runtime + write thread + read threads to one pool of CPUs 157 | # - rpc runtime to another pool of CPUs 158 | # - workers to some CPUs from rpc runtime 159 | workers: 160 | threads: null # Number of workers, by default number of CPUs 161 | affinity: null # Optional threads affinity 162 | channel_size: 4_096 # Queue size to worker threads 163 | -------------------------------------------------------------------------------- /src/bin/alpamayo.rs: -------------------------------------------------------------------------------- 1 | use { 2 | alpamayo::{config::Config, metrics, rpc, storage, version::VERSION}, 3 | anyhow::Context, 4 | clap::Parser, 5 | futures::future::{FutureExt, TryFutureExt, ready, try_join_all}, 6 | quanta::Instant, 7 | signal_hook::{consts::SIGINT, iterator::Signals}, 8 | std::{ 9 | sync::Arc, 10 | thread::{self, sleep}, 11 | time::Duration, 12 | }, 13 | tokio::sync::{Mutex, Notify, broadcast, mpsc}, 14 | tokio_util::sync::CancellationToken, 15 | tracing::{error, info, warn}, 16 | }; 17 | 18 | #[cfg(not(target_env = "msvc"))] 19 | #[global_allocator] 20 | static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; 21 | 22 | #[derive(Debug, Parser)] 23 | #[clap( 24 | author, 25 | version, 26 | about = "Alpamayo: part of Solana RPC stack for sealed data" 27 | )] 28 | struct Args { 29 | /// Path to config 30 | #[clap(short, long, default_value_t = String::from("config.yml"))] 31 | pub config: String, 32 | 33 | /// Only check config and exit 34 | #[clap(long, default_value_t = false)] 35 | pub check: bool, 36 | 37 | /// Revert first slots, rescue if invalid slot was added 38 | #[clap(long, hide = true)] 39 | pub pop_slots_back: Option, 40 | 41 | /// Revert latest slots, rescue if invalid slot was added 42 | #[clap(long, hide = true)] 43 | pub pop_slots_front: Option, 44 | } 45 | 46 | fn main() { 47 | if let Err(err) = try_main() { 48 | match std::env::var_os("RUST_BACKTRACE") { 49 | Some(value) if value == *"0" => error!("Error: {err}"), 50 | None => error!("Error: {err}"), 51 | _ => error!("Error: {err:?}"), 52 | } 53 | std::process::exit(1); 54 | } 55 | } 56 | 57 | fn try_main() -> anyhow::Result<()> { 58 | let args = Args::parse(); 59 | let config = Config::load_from_file(&args.config) 60 | .with_context(|| format!("failed to load config from {}", args.config)) 61 | .unwrap(); 62 | 63 | // Setup logs 64 | alpamayo::log::setup(config.logs.json).unwrap(); 65 | info!("version: {} / {}", VERSION.version, VERSION.git); 66 | 67 | let metrics_handle = metrics::setup()?; 68 | 69 | // Exit if we only check the config 70 | if args.check { 71 | info!("Config is OK!"); 72 | return Ok(()); 73 | } 74 | 75 | // Shutdown channel/flag 76 | let mut threads = Vec::<(String, _)>::with_capacity(8); 77 | let shutdown = CancellationToken::new(); 78 | 79 | // Source / storage write channels 80 | let stream_start = Arc::new(Notify::new()); 81 | let (stream_tx, stream_rx) = mpsc::channel(2048); 82 | let (http_storage_source, http_rx) = storage::source::HttpSourceConnected::new(); 83 | let http_concurrency = config.source.http.concurrency; 84 | 85 | // Storage write / storage read channels 86 | let (sync_tx, _) = broadcast::channel(1024); 87 | 88 | // Storage read / rpc channels 89 | let stored_slots = storage::slots::StoredSlots::default(); 90 | let (read_requests_tx, read_requests_rx) = mpsc::channel(config.rpc.request_channel_capacity); 91 | 92 | // Open Rocksdb for slots and indexes 93 | let ts = Instant::now(); 94 | let (db_write, db_write_inflation_reward, db_read, db_threads) = 95 | storage::rocksdb::Rocksdb::open(config.storage.rocksdb.clone(), sync_tx.clone())?; 96 | info!(elapsed = ?ts.elapsed(), "rocksdb opened"); 97 | threads.extend(db_threads); 98 | 99 | // Create source runtime 100 | let jh = thread::Builder::new().name("alpSource".to_owned()).spawn({ 101 | let stream_start = Arc::clone(&stream_start); 102 | let stored_slots = stored_slots.clone(); 103 | let shutdown = shutdown.clone(); 104 | move || { 105 | let runtime = config.source.tokio.clone().build_runtime("alpSourceRt")?; 106 | runtime.block_on(async move { 107 | let source_fut = tokio::spawn(storage::source::start( 108 | config.source, 109 | http_rx, 110 | stream_start, 111 | stream_tx, 112 | shutdown.clone(), 113 | )) 114 | .map_err(Into::into) 115 | .and_then(ready) 116 | .boxed(); 117 | 118 | let server_fut = metrics::spawn_server( 119 | config.metrics, 120 | metrics_handle, 121 | stored_slots, 122 | shutdown.cancelled_owned(), 123 | ) 124 | .await? 125 | .map_err(anyhow::Error::from) 126 | .boxed(); 127 | 128 | try_join_all(vec![source_fut, server_fut]).await.map(|_| ()) 129 | }) 130 | } 131 | })?; 132 | threads.push(("alpSource".to_owned(), Some(jh))); 133 | 134 | // Storage read runtimes 135 | let read_requests_rx = Arc::new(Mutex::new(read_requests_rx)); 136 | let stored_confirmed_slot = 137 | storage::slots::StoredSlotsRead::new(stored_slots.clone(), config.storage.read.threads); 138 | for index in 0..config.storage.read.threads { 139 | let affinity = config.storage.read.affinity.as_ref().map(|affinity| { 140 | if affinity.len() == config.storage.read.threads { 141 | vec![affinity[index]] 142 | } else { 143 | affinity.clone() 144 | } 145 | }); 146 | 147 | let jh = storage::read::start( 148 | index, 149 | affinity, 150 | sync_tx.subscribe(), 151 | config.storage.read.thread_max_async_requests, 152 | config.storage.read.thread_max_files_requests, 153 | Arc::clone(&read_requests_rx), 154 | stored_confirmed_slot.clone(), 155 | )?; 156 | threads.push((format!("alpStorageRd{index:02}"), Some(jh))); 157 | } 158 | drop(read_requests_rx); 159 | 160 | // Storage write runtime 161 | let jh = storage::write::start( 162 | args.pop_slots_back, 163 | args.pop_slots_front, 164 | config.storage.clone(), 165 | stored_slots.clone(), 166 | db_write, 167 | db_read, 168 | http_storage_source, 169 | http_concurrency, 170 | stream_start, 171 | stream_rx, 172 | sync_tx, 173 | shutdown.clone(), 174 | )?; 175 | threads.push(("alpStorageWrt".to_owned(), Some(jh))); 176 | 177 | // Rpc runtime 178 | let jh = thread::Builder::new().name("alpRpc".to_owned()).spawn({ 179 | let shutdown = shutdown.clone(); 180 | move || { 181 | let runtime = config.rpc.tokio.clone().build_runtime("alpRpcRt")?; 182 | runtime.block_on(async move { 183 | rpc::server::spawn( 184 | config.rpc, 185 | stored_slots, 186 | read_requests_tx, 187 | db_write_inflation_reward, 188 | shutdown.clone(), 189 | ) 190 | .await? 191 | .await?; 192 | Ok::<(), anyhow::Error>(()) 193 | }) 194 | } 195 | })?; 196 | threads.push(("alpRpc".to_owned(), Some(jh))); 197 | 198 | // Shutdown loop 199 | let mut signals = Signals::new([SIGINT])?; 200 | 'outer: while threads.iter().any(|th| th.1.is_some()) { 201 | for signal in signals.pending() { 202 | match signal { 203 | SIGINT => { 204 | if shutdown.is_cancelled() { 205 | warn!("SIGINT received again, shutdown now"); 206 | break 'outer; 207 | } 208 | info!("SIGINT received..."); 209 | shutdown.cancel(); 210 | } 211 | _ => unreachable!(), 212 | } 213 | } 214 | 215 | for (name, tjh) in threads.iter_mut() { 216 | if let Some(jh) = tjh.take() { 217 | if jh.is_finished() { 218 | jh.join() 219 | .unwrap_or_else(|_| panic!("{name} thread join failed"))?; 220 | info!("thread {name} finished"); 221 | } else { 222 | *tjh = Some(jh); 223 | } 224 | } 225 | } 226 | 227 | sleep(Duration::from_millis(25)); 228 | } 229 | 230 | Ok(()) 231 | } 232 | -------------------------------------------------------------------------------- /src/source/fees.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | source::transaction::TransactionWithBinary, 4 | util::{HashMap, HashSet}, 5 | }, 6 | solana_sdk::pubkey::Pubkey, 7 | solana_transaction_status::{ 8 | TransactionWithStatusMeta, VersionedTransactionWithStatusMeta, parse_vote::parse_vote, 9 | }, 10 | tracing::warn, 11 | }; 12 | 13 | #[derive(Debug)] 14 | pub struct TransactionFees { 15 | unit_price: u64, 16 | account_keys: HashSet, 17 | } 18 | 19 | impl TransactionFees { 20 | pub fn create(tx: &TransactionWithStatusMeta, is_vote: Option) -> Option { 21 | let TransactionWithStatusMeta::Complete(tx) = tx else { 22 | return None; 23 | }; 24 | 25 | if is_vote == Some(true) || Self::is_vote(tx) { 26 | return None; 27 | } 28 | 29 | let account_keys = tx.account_keys(); 30 | let Some(instructions) = tx 31 | .transaction 32 | .message 33 | .instructions() 34 | .iter() 35 | .map(|ix| { 36 | account_keys 37 | .get(ix.program_id_index as usize) 38 | .map(|program_id| (program_id, ix)) 39 | }) 40 | .collect::>>() 41 | else { 42 | warn!("failed to get program id"); 43 | return None; 44 | }; 45 | 46 | let computed_budget_limits = 47 | match compute_budget_processor::process_compute_budget_instructions( 48 | instructions.into_iter(), 49 | ) { 50 | Ok(value) => value, 51 | Err(error) => { 52 | warn!(?error, "failed to compute transaction budget"); 53 | return None; 54 | } 55 | }; 56 | 57 | // count only transactions with limit 58 | if computed_budget_limits.compute_unit_limit == 0 { 59 | return None; 60 | } 61 | 62 | let mut account_keys = HashSet::default(); 63 | for (index, pubkey) in tx 64 | .transaction 65 | .message 66 | .static_account_keys() 67 | .iter() 68 | .enumerate() 69 | { 70 | if tx.transaction.message.is_maybe_writable(index, None) { 71 | account_keys.insert(*pubkey); 72 | } 73 | } 74 | 75 | Some(Self { 76 | unit_price: computed_budget_limits.compute_unit_price, 77 | account_keys, 78 | }) 79 | } 80 | 81 | fn is_vote(tx: &VersionedTransactionWithStatusMeta) -> bool { 82 | let account_keys = tx.account_keys(); 83 | for instruction in tx.transaction.message.instructions() { 84 | if parse_vote(instruction, &account_keys).is_ok() { 85 | return true; 86 | } 87 | } 88 | false 89 | } 90 | } 91 | 92 | #[derive(Debug)] 93 | pub struct TransactionsFees { 94 | fees: Vec, 95 | writable_account_fees: HashMap>, 96 | } 97 | 98 | impl TransactionsFees { 99 | pub fn new(txs: &[TransactionWithBinary]) -> Self { 100 | let mut fees = Vec::with_capacity(txs.len()); 101 | let mut writable_account_fees = 102 | HashMap::>::with_capacity_and_hasher(txs.len(), Default::default()); 103 | 104 | for txfee in txs.iter().filter_map(|tx| tx.fees.as_ref()) { 105 | fees.push(txfee.unit_price); 106 | for account in txfee.account_keys.iter() { 107 | writable_account_fees 108 | .entry(*account) 109 | .or_default() 110 | .push(txfee.unit_price); 111 | } 112 | } 113 | 114 | fees.sort_unstable(); 115 | for value in writable_account_fees.values_mut() { 116 | value.sort_unstable(); 117 | } 118 | 119 | Self { 120 | fees, 121 | writable_account_fees, 122 | } 123 | } 124 | 125 | pub fn get_fee(&self, accounts: &[Pubkey], percentile: Option) -> u64 { 126 | let mut fee = Self::get_with_percentile(&self.fees, percentile); 127 | 128 | if let Some(afee) = accounts 129 | .iter() 130 | .filter_map(|account| { 131 | self.writable_account_fees 132 | .get(account) 133 | .map(|fees| Self::get_with_percentile(fees, percentile)) 134 | }) 135 | .reduce(|fee1, fee2| fee1.max(fee2)) 136 | { 137 | fee = fee.max(afee); 138 | } 139 | 140 | fee 141 | } 142 | 143 | fn get_with_percentile(fees: &[u64], percentile: Option) -> u64 { 144 | let fee = match percentile { 145 | Some(percentile) => Self::get_percentile(fees, percentile), 146 | None => fees.first().copied(), 147 | }; 148 | fee.unwrap_or_default() 149 | } 150 | 151 | fn get_percentile(fees: &[u64], percentile: u16) -> Option { 152 | let index = (percentile as usize).min(9_999) * fees.len() / 10_000; 153 | fees.get(index).copied() 154 | } 155 | } 156 | 157 | // Copied from the 2.0 crate 158 | // https://docs.rs/solana-compute-budget/2.0.25/src/solana_compute_budget/compute_budget_processor.rs.html#69-148 159 | mod compute_budget_processor { 160 | use { 161 | solana_compute_budget::compute_budget_limits::{ 162 | DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, MAX_HEAP_FRAME_BYTES, 163 | MIN_HEAP_FRAME_BYTES, 164 | }, 165 | solana_compute_budget_interface::ComputeBudgetInstruction, 166 | solana_sdk::{ 167 | borsh1::try_from_slice_unchecked, instruction::InstructionError, 168 | message::compiled_instruction::CompiledInstruction, pubkey::Pubkey, 169 | transaction::TransactionError, 170 | }, 171 | }; 172 | 173 | #[derive(Debug)] 174 | pub struct ComputeBudgetLimits { 175 | pub compute_unit_limit: u32, 176 | pub compute_unit_price: u64, 177 | } 178 | 179 | pub fn process_compute_budget_instructions<'a>( 180 | instructions: impl Iterator, 181 | ) -> Result { 182 | let mut num_non_compute_budget_instructions: u32 = 0; 183 | let mut updated_compute_unit_limit = None; 184 | let mut updated_compute_unit_price = None; 185 | let mut requested_heap_size = None; 186 | let mut updated_loaded_accounts_data_size_limit = None; 187 | 188 | for (i, (program_id, instruction)) in instructions.enumerate() { 189 | if solana_compute_budget_interface::check_id(program_id) { 190 | let invalid_instruction_data_error = TransactionError::InstructionError( 191 | i as u8, 192 | InstructionError::InvalidInstructionData, 193 | ); 194 | let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); 195 | 196 | match try_from_slice_unchecked(&instruction.data) { 197 | Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { 198 | if requested_heap_size.is_some() { 199 | return Err(duplicate_instruction_error); 200 | } 201 | if sanitize_requested_heap_size(bytes) { 202 | requested_heap_size = Some(bytes); 203 | } else { 204 | return Err(invalid_instruction_data_error); 205 | } 206 | } 207 | Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { 208 | if updated_compute_unit_limit.is_some() { 209 | return Err(duplicate_instruction_error); 210 | } 211 | updated_compute_unit_limit = Some(compute_unit_limit); 212 | } 213 | Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { 214 | if updated_compute_unit_price.is_some() { 215 | return Err(duplicate_instruction_error); 216 | } 217 | updated_compute_unit_price = Some(micro_lamports); 218 | } 219 | Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) => { 220 | if updated_loaded_accounts_data_size_limit.is_some() { 221 | return Err(duplicate_instruction_error); 222 | } 223 | updated_loaded_accounts_data_size_limit = Some(bytes); 224 | } 225 | _ => return Err(invalid_instruction_data_error), 226 | } 227 | } else { 228 | // only include non-request instructions in default max calc 229 | num_non_compute_budget_instructions = 230 | num_non_compute_budget_instructions.saturating_add(1); 231 | } 232 | } 233 | 234 | let compute_unit_limit = updated_compute_unit_limit 235 | .unwrap_or_else(|| { 236 | num_non_compute_budget_instructions 237 | .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) 238 | }) 239 | .min(MAX_COMPUTE_UNIT_LIMIT); 240 | 241 | let compute_unit_price = updated_compute_unit_price.unwrap_or(0); 242 | 243 | Ok(ComputeBudgetLimits { 244 | compute_unit_limit, 245 | compute_unit_price, 246 | }) 247 | } 248 | 249 | fn sanitize_requested_heap_size(bytes: u32) -> bool { 250 | (MIN_HEAP_FRAME_BYTES..=MAX_HEAP_FRAME_BYTES).contains(&bytes) && bytes.is_multiple_of(1024) 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | **Note:** Version 0 of Semantic Versioning is handled differently from version 1 and above. 9 | The minor version will be incremented upon a breaking change and the patch version will be incremented for features. 10 | 11 | ## [Unreleased] 12 | 13 | ### Fixes 14 | 15 | ### Features 16 | 17 | ### Breaking 18 | 19 | ## [0.17.0] - 2025-12-02 20 | 21 | ### Features 22 | 23 | - alpamayo: add metric `rpc_upstream_duration_seconds` ([#86](https://github.com/lamports-dev/alpamayo/pull/86)) 24 | 25 | ## [0.16.0] - 2025-10-24 26 | 27 | ### Breaking 28 | 29 | - alpamayo: upgrade to agave 3.0 ([#82](https://github.com/lamports-dev/alpamayo/pull/82)) 30 | 31 | ## [0.15.1] - 2025-10-07 32 | 33 | ### Fixes 34 | 35 | - rpc: remove not required partition index check ([#81](https://github.com/lamports-dev/alpamayo/pull/81)) 36 | - storage: fix substract overflow ([#84](https://github.com/lamports-dev/alpamayo/pull/84)) 37 | 38 | ## [0.15.0] - 2025-09-15 39 | 40 | ### Features 41 | 42 | - rpc: support multiple upstreams ([#79](https://github.com/lamports-dev/alpamayo/pull/79)) 43 | 44 | ## [0.14.2] - 2025-09-13 45 | 46 | ### Fixes 47 | 48 | - storage: panic on unexpected state in MemoryStorage ([#75](https://github.com/lamports-dev/alpamayo/pull/75)) 49 | 50 | ### Features 51 | 52 | - rpc: allow extra headers in response ([#78](https://github.com/lamports-dev/alpamayo/pull/78)) 53 | 54 | ## [0.14.1] - 2025-08-29 55 | 56 | ### Fixes 57 | 58 | - rpc: fix gIR / gBWL ([#74](https://github.com/lamports-dev/alpamayo/pull/74)) 59 | 60 | ## [0.14.0] - 2025-08-26 61 | 62 | ### Features 63 | 64 | - alpamayo: update richat to v5 ([#73](https://github.com/lamports-dev/alpamayo/pull/73)) 65 | 66 | ## [0.13.1] - 2025-08-20 67 | 68 | ### Fixes 69 | 70 | - rpc: fix gSFA duplicated entries for confirmed block ([#72](https://github.com/lamports-dev/alpamayo/pull/72)) 71 | 72 | ## [0.13.0] - 2025-08-16 73 | 74 | ### Fixes 75 | 76 | - rpc: fix gSFA block_time extraction ([#71](https://github.com/lamports-dev/alpamayo/pull/71)) 77 | 78 | ## [0.12.1] - 2025-07-15 79 | 80 | ### Features 81 | 82 | - storage: pass base block in partitioned inflation reward ([#69](https://github.com/lamports-dev/alpamayo/pull/69)) 83 | 84 | ## [0.12.0] - 2025-07-11 85 | 86 | ### Features 87 | 88 | - alpamayo: bump to agave v2.3 ([#68](https://github.com/lamports-dev/alpamayo/pull/68)) 89 | 90 | ## [0.11.1] - 2025-07-08 91 | 92 | ### Fixes 93 | 94 | - rpc: fix getBlock response ([#67](https://github.com/lamports-dev/alpamayo/pull/67)) 95 | 96 | ### Features 97 | 98 | - storage: improve encoding/decoding of SFA ([#66](https://github.com/lamports-dev/alpamayo/pull/66)) 99 | 100 | ## [0.11.0] - 2025-06-10 101 | 102 | ### Features 103 | 104 | - alpamayo: use jemalloc ([#65](https://github.com/lamports-dev/alpamayo/pull/65)) 105 | 106 | ## [0.10.0] - 2025-06-09 107 | 108 | ### Features 109 | 110 | - rpc: use httpget for catch-up and backfilling ([#64](https://github.com/lamports-dev/alpamayo/pull/64)) 111 | 112 | ## [0.9.3] - 2025-06-07 113 | 114 | ### Fixes 115 | 116 | - rpc: fix default params ([#61](https://github.com/lamports-dev/alpamayo/pull/61)) 117 | - rpc: fix gSS upstream parsing ([#62](https://github.com/lamports-dev/alpamayo/pull/62)) 118 | 119 | ## [0.9.2] - 2025-06-05 120 | 121 | ### Fixes 122 | 123 | - storage: remove outdated unsynced slots ([#60](https://github.com/lamports-dev/alpamayo/pull/60)) 124 | 125 | ## [0.9.1] - 2025-06-05 126 | 127 | ### Fixes 128 | 129 | - storage: fix backfilling stop condition ([#59](https://github.com/lamports-dev/alpamayo/pull/59)) 130 | 131 | ## [0.9.0] - 2025-05-30 132 | 133 | ### Breaking 134 | 135 | - alpamayo: bump agave to v2.2 ([#58](https://github.com/lamports-dev/alpamayo/pull/58)) 136 | 137 | ## [0.8.9] - 2025-05-28 138 | 139 | ## Fixes 140 | 141 | - storage: allow to pop back slots on startup ([#55](https://github.com/lamports-dev/alpamayo/pull/55)) 142 | - storage: fix first available slot and backfill condition ([#56](https://github.com/lamports-dev/alpamayo/pull/56)) 143 | - bin: log version on startup ([#57](https://github.com/lamports-dev/alpamayo/pull/57)) 144 | 145 | ## [0.8.8] - 2025-05-28 146 | 147 | ## Fixes 148 | 149 | - bin: print timestamp with error ([#54](https://github.com/lamports-dev/alpamayo/pull/54)) 150 | 151 | ## [0.8.7] - 2025-05-26 152 | 153 | ## Fixes 154 | 155 | - storage: allow to disable specific file ([#53](https://github.com/lamports-dev/alpamayo/pull/53)) 156 | - rpc: check first available slot on startup ([#52](https://github.com/lamports-dev/alpamayo/pull/52)) 157 | 158 | ## [0.8.6] - 2025-05-25 159 | 160 | ## Fixes 161 | 162 | - storage: fix dead slots in memory storage ([#51](https://github.com/lamports-dev/alpamayo/pull/51)) 163 | 164 | ## [0.8.5] - 2025-05-25 165 | 166 | ## Fixes 167 | 168 | - storage: fix dead slots in memory storage ([#50](https://github.com/lamports-dev/alpamayo/pull/50)) 169 | 170 | ## [0.8.4] - 2025-05-24 171 | 172 | ## Fixes 173 | 174 | - storage: fix dead slots in memory storage ([#49](https://github.com/lamports-dev/alpamayo/pull/49)) 175 | 176 | ## [0.8.3] - 2025-05-23 177 | 178 | ## Fixes 179 | 180 | - storage: fix backfilling for read layer ([#48](https://github.com/lamports-dev/alpamayo/pull/48)) 181 | 182 | ## [0.8.2] - 2025-05-22 183 | 184 | ## Fixes 185 | 186 | - storage: fix slots index loading ([#47](https://github.com/lamports-dev/alpamayo/pull/47)) 187 | 188 | ## [0.8.1] - 2025-05-22 189 | 190 | ### Fixes 191 | 192 | - storage: add height check for new blocks ([#45](https://github.com/lamports-dev/alpamayo/pull/45)) 193 | - rpc: use `-32009` for missed slot too ([#46](https://github.com/lamports-dev/alpamayo/pull/46)) 194 | 195 | ## [0.8.0] - 2025-05-18 196 | 197 | ### Features 198 | 199 | - storage: add read thread options ([#44](https://github.com/lamports-dev/alpamayo/pull/44)) 200 | 201 | ## [0.7.2] - 2025-05-15 202 | 203 | ### Fixes 204 | 205 | - storage: fix backfilling finish condition ([#42](https://github.com/lamports-dev/alpamayo/pull/42)) 206 | 207 | ## [0.7.1] - 2025-05-13 208 | 209 | ### Fixes 210 | 211 | - storage: use rocksdb `set_max_background_jobs` ([#41](https://github.com/lamports-dev/alpamayo/pull/41)) 212 | 213 | ## [0.7.0] - 2025-05-10 214 | 215 | ### Features 216 | 217 | - rpc: serialize to `Vec` instead of `serde_json::Value` ([#38](https://github.com/lamports-dev/alpamayo/pull/38)) 218 | - rpc: remove get_block concurrency ([#39](https://github.com/lamports-dev/alpamayo/pull/39)) 219 | - storage: impl backfilling ([#40](https://github.com/lamports-dev/alpamayo/pull/40)) 220 | 221 | ## [0.6.1] - 2025-04-30 222 | 223 | ### Fixes 224 | 225 | - rpc: ignore send result in read threads ([#37](https://github.com/lamports-dev/alpamayo/pull/37)) 226 | 227 | ## [0.6.0] - 2025-04-29 228 | 229 | ### Features 230 | 231 | - rpc: add `getInflationReward` ([#36](https://github.com/lamports-dev/alpamayo/pull/36)) 232 | 233 | ## [0.5.0] - 2025-04-26 234 | 235 | ### Features 236 | 237 | - rpc: add smart cache ([#34](https://github.com/lamports-dev/alpamayo/pull/34)) 238 | - source: add gRPC reconnect ([#35](https://github.com/lamports-dev/alpamayo/pull/35)) 239 | 240 | ## x[0.4.0] - 2025-04-22 241 | 242 | ### Features 243 | 244 | - rpc: add `getFirstAvailableBlock` support ([#31](https://github.com/lamports-dev/alpamayo/pull/31)) 245 | 246 | ## [0.3.0] - 2025-04-21 247 | 248 | ### Features 249 | 250 | - bench: add received size ([#29](https://github.com/lamports-dev/alpamayo/pull/29)) 251 | - rpc: use jsonrpc feature from richat ([#30](https://github.com/lamports-dev/alpamayo/pull/30)) 252 | 253 | ## [0.2.1] - 2025-04-17 254 | 255 | ### Fixes 256 | 257 | - storage: fix catch-up on startup ([#28](https://github.com/lamports-dev/alpamayo/pull/28)) 258 | 259 | ### Features 260 | 261 | - rpc: support HTTP/GET `/version` ([#28](https://github.com/lamports-dev/alpamayo/pull/28)) 262 | 263 | ## [0.2.0] - 2025-04-17 264 | 265 | ### Fixes 266 | 267 | - storage: fix dead block removal ([#27](https://github.com/lamports-dev/alpamayo/pull/27)) 268 | 269 | ### Features 270 | 271 | - rpc: support rest for block and tx ([#26](https://github.com/lamports-dev/alpamayo/pull/26)) 272 | 273 | ## [0.1.0] - 2025-04-14 274 | 275 | ### Fixes 276 | 277 | - rpc: add `getSlot` to config ([#1](https://github.com/lamports-dev/alpamayo/pull/1)) 278 | - rpc: change upstream header to `x-bigtable: disabled` ([#5](https://github.com/lamports-dev/alpamayo/pull/5)) 279 | - rpc: use confirmed during sync ([#6](https://github.com/lamports-dev/alpamayo/pull/6)) 280 | - storage: remove transaction index on slot remove ([#8](https://github.com/lamports-dev/alpamayo/pull/8)) 281 | - storage: set confirmed/finalized on first stream messages ([#17](https://github.com/lamports-dev/alpamayo/pull/17)) 282 | - rpc: load recent blocks on startup ([#23](https://github.com/lamports-dev/alpamayo/pull/23)) 283 | - rpc: move `ready` endpoint to metrics server ([#24](https://github.com/lamports-dev/alpamayo/pull/24)) 284 | 285 | ### Features 286 | 287 | - source: add fast ConfirmedBlock serialization ([#3](https://github.com/lamports-dev/alpamayo/pull/3)) 288 | - storage: add multiple readers ([#4](https://github.com/lamports-dev/alpamayo/pull/4)) 289 | - storage: support getTransaction ([#7](https://github.com/lamports-dev/alpamayo/pull/7)) 290 | - storage: support getBlockHeight ([#9](https://github.com/lamports-dev/alpamayo/pull/9)) 291 | - storage: support getSignaturesForAddress ([#10](https://github.com/lamports-dev/alpamayo/pull/10)) 292 | - storage: split slot index ([#11](https://github.com/lamports-dev/alpamayo/pull/11)) 293 | - storage: support compression for indexes ([#12](https://github.com/lamports-dev/alpamayo/pull/12)) 294 | - storage: store err in tx-index ([#13](https://github.com/lamports-dev/alpamayo/pull/13)) 295 | - rpc: support getVersion ([#14](https://github.com/lamports-dev/alpamayo/pull/14)) 296 | - rpc: support getBlockTime ([#15](https://github.com/lamports-dev/alpamayo/pull/15)) 297 | - rpc: support getBlocks / getBlocksWithLimit ([#16](https://github.com/lamports-dev/alpamayo/pull/16)) 298 | - rpc: custom gSFA limit ([#18](https://github.com/lamports-dev/alpamayo/pull/18)) 299 | - rpc: support gSS ([#19](https://github.com/lamports-dev/alpamayo/pull/19)) 300 | - rpc: support getLatestBlockhash ([#20](https://github.com/lamports-dev/alpamayo/pull/20)) 301 | - rpc: support isBlockhashValid ([#21](https://github.com/lamports-dev/alpamayo/pull/21)) 302 | - rpc: support getRecentPrioritizationFees ([#22](https://github.com/lamports-dev/alpamayo/pull/22)) 303 | - metrics: move to metrics.rs ([#25](https://github.com/lamports-dev/alpamayo/pull/25)) 304 | -------------------------------------------------------------------------------- /src/storage/files.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | config::ConfigStorageFile, 4 | metrics::STORAGE_FILES_SPACE, 5 | storage::{ 6 | blocks::{StoredBlock, StoredBlocksWrite}, 7 | util, 8 | }, 9 | util::{HashMap, VecSide}, 10 | }, 11 | anyhow::Context, 12 | futures::future::{FutureExt, LocalBoxFuture, TryFutureExt, join_all, try_join_all}, 13 | metrics::{Gauge, gauge}, 14 | std::{io, path::PathBuf, rc::Rc}, 15 | tokio_uring::fs::File, 16 | }; 17 | 18 | pub type StorageId = u8; 19 | 20 | #[derive(Debug)] 21 | pub struct StorageFilesRead { 22 | files: Vec>, 23 | id2file: HashMap, 24 | } 25 | 26 | impl StorageFilesRead { 27 | pub async fn open(config: StorageFilesSyncInit) -> anyhow::Result { 28 | let files = try_join_all( 29 | config 30 | .files_paths 31 | .iter() 32 | .map(|path| util::open(path).map_ok(|(file, _file_size)| Rc::new(file))), 33 | ) 34 | .await?; 35 | 36 | Ok(Self { 37 | files, 38 | id2file: config.id2file, 39 | }) 40 | } 41 | 42 | pub fn read<'a>( 43 | &self, 44 | storage_id: StorageId, 45 | offset: u64, 46 | size: u64, 47 | ) -> LocalBoxFuture<'a, io::Result>> { 48 | let file = self 49 | .id2file 50 | .get(&storage_id) 51 | .and_then(|index| self.files.get(*index)) 52 | .map(Rc::clone); 53 | 54 | async move { 55 | let Some(file) = file else { 56 | return Err(io::Error::other(format!( 57 | "failed to get file for id#{storage_id}" 58 | ))); 59 | }; 60 | 61 | let buffer = Vec::with_capacity(size as usize); 62 | let (res, buffer) = file.read_exact_at(buffer, offset).await; 63 | res?; 64 | 65 | Ok(buffer) 66 | } 67 | .boxed_local() 68 | } 69 | } 70 | 71 | #[derive(Debug)] 72 | pub struct StorageFilesWrite { 73 | files: Vec, 74 | id2file: HashMap, 75 | next_file: usize, 76 | metric_space_free: Gauge, 77 | } 78 | 79 | impl StorageFilesWrite { 80 | pub async fn open( 81 | configs: Vec, 82 | blocks: &StoredBlocksWrite, 83 | ) -> anyhow::Result<(Self, StorageFilesSyncInit)> { 84 | let files_paths = configs.iter().map(|config| config.path.clone()).collect(); 85 | let mut files = try_join_all(configs.into_iter().map(Self::open_file)).await?; 86 | files.sort_unstable_by_key(|file| file.id); 87 | 88 | // storage id map 89 | let mut id2file = HashMap::default(); 90 | for (index, file) in files.iter().enumerate() { 91 | id2file.insert(file.id, index); 92 | } 93 | 94 | // set tail and head 95 | let mut boundaries = blocks.get_stored_boundaries(); 96 | for (storage_id, index) in id2file.iter() { 97 | if let Some(boundaries) = boundaries.remove(storage_id) { 98 | let file = &mut files[*index]; 99 | file.tail = boundaries.tail().unwrap_or_default(); 100 | anyhow::ensure!( 101 | file.tail < file.size, 102 | "invalid tail for file id#{}", 103 | file.id 104 | ); 105 | file.head = boundaries.head().unwrap_or_default(); 106 | anyhow::ensure!(file.head <= file.size, "invalid head for id#{}", file.id); 107 | } 108 | } 109 | anyhow::ensure!(boundaries.is_empty(), "file storage is missed"); 110 | 111 | let write = Self { 112 | files, 113 | id2file, 114 | next_file: 0, 115 | metric_space_free: gauge!(STORAGE_FILES_SPACE, "id" => "*", "type" => "free"), 116 | }; 117 | 118 | gauge!(STORAGE_FILES_SPACE, "id" => "*", "type" => "total") 119 | .set(write.files.iter().map(|file| file.size as f64).sum::()); 120 | write.metric_space_free.set( 121 | write 122 | .files 123 | .iter() 124 | .map(|file| file.free_space() as f64) 125 | .sum::(), 126 | ); 127 | for file in write.files.iter() { 128 | file.metric_space_free.set(file.free_space() as f64); 129 | } 130 | 131 | let read_sync_init = StorageFilesSyncInit { 132 | files_paths, 133 | id2file: write.id2file.clone(), 134 | }; 135 | 136 | Ok((write, read_sync_init)) 137 | } 138 | 139 | async fn open_file(config: ConfigStorageFile) -> anyhow::Result { 140 | let (file, file_size) = util::open(&config.path).await?; 141 | 142 | // verify file size 143 | if file_size == 0 { 144 | file.fallocate(0, config.size, libc::FALLOC_FL_ZERO_RANGE) 145 | .await 146 | .with_context(|| format!("failed to preallocate {:?}", config.path))?; 147 | } else if config.size != file_size { 148 | anyhow::bail!( 149 | "invalid file size {:?}: {file_size} (expected: {})", 150 | config.path, 151 | config.size 152 | ); 153 | } 154 | 155 | gauge!(STORAGE_FILES_SPACE, "id" => config.id.to_string(), "type" => "total") 156 | .set(config.size as f64); 157 | 158 | Ok(StorageFile { 159 | id: config.id, 160 | file, 161 | tail: 0, 162 | head: 0, 163 | size: config.size, 164 | new_blocks: config.new_blocks, 165 | metric_space_free: gauge!(STORAGE_FILES_SPACE, "id" => config.id.to_string(), "type" => "free"), 166 | }) 167 | } 168 | 169 | pub async fn close(self) { 170 | join_all(self.files.into_iter().map(|file| async move { 171 | let _: io::Result<()> = file.file.close().await; 172 | })) 173 | .await; 174 | } 175 | 176 | pub async fn push_block_back( 177 | &mut self, 178 | buffer: Vec, 179 | ) -> anyhow::Result<(Vec, Option<(StorageId, u64)>)> { 180 | self.push_block(buffer, VecSide::Back).await 181 | } 182 | 183 | pub async fn push_block_front( 184 | &mut self, 185 | buffer: Vec, 186 | ) -> anyhow::Result<(Vec, Option<(StorageId, u64)>)> { 187 | self.push_block(buffer, VecSide::Front).await 188 | } 189 | 190 | async fn push_block( 191 | &mut self, 192 | buffer: Vec, 193 | side: VecSide, 194 | ) -> anyhow::Result<(Vec, Option<(StorageId, u64)>)> { 195 | let Some(index) = self.get_file_index_for_new_block(buffer.len() as u64) else { 196 | return Ok((buffer, None)); 197 | }; 198 | let file = &mut self.files[index]; 199 | 200 | let free_space_init = file.free_space() as f64; 201 | let (offset, buffer) = match side { 202 | VecSide::Back => file.write_back(buffer).await, 203 | VecSide::Front => file.write_front(buffer).await, 204 | } 205 | .with_context(|| format!("failed to write block to file id#{}", file.id))?; 206 | let free_space_new = file.free_space() as f64; 207 | file.metric_space_free.set(free_space_new); 208 | self.metric_space_free 209 | .increment(free_space_new - free_space_init); 210 | 211 | Ok((buffer, Some((file.id, offset)))) 212 | } 213 | 214 | fn get_file_index_for_new_block(&mut self, size: u64) -> Option { 215 | let current_index = self.next_file; 216 | loop { 217 | let index = self.next_file; 218 | self.next_file = (self.next_file + 1) % self.files.len(); 219 | 220 | let file = &self.files[index]; 221 | if file.new_blocks && file.free_space() >= size { 222 | return Some(index); 223 | } 224 | 225 | if self.next_file == current_index { 226 | return None; 227 | } 228 | } 229 | } 230 | 231 | pub fn pop_block_back(&mut self, block: StoredBlock) -> anyhow::Result<()> { 232 | self.pop_block(block, VecSide::Back) 233 | } 234 | 235 | pub fn pop_block_front(&mut self, block: StoredBlock) -> anyhow::Result<()> { 236 | self.pop_block(block, VecSide::Front) 237 | } 238 | 239 | fn pop_block(&mut self, block: StoredBlock, side: VecSide) -> anyhow::Result<()> { 240 | let Some(file_index) = self.id2file.get(&block.storage_id).copied() else { 241 | anyhow::bail!("unknown storage id: {}", block.storage_id); 242 | }; 243 | let file = &mut self.files[file_index]; 244 | 245 | let free_space_init = file.free_space() as f64; 246 | match side { 247 | VecSide::Back => { 248 | file.tail = (block.offset + block.size) % file.size; 249 | } 250 | VecSide::Front => { 251 | file.head = block.offset; 252 | } 253 | } 254 | anyhow::ensure!( 255 | file.head < file.size, 256 | "file storage head overflow, {} vs {}", 257 | file.head, 258 | file.size 259 | ); 260 | anyhow::ensure!( 261 | file.tail <= file.size, 262 | "file storage tail overflow, {} vs {}", 263 | file.tail, 264 | file.size 265 | ); 266 | let free_space_new = file.free_space() as f64; 267 | file.metric_space_free.set(free_space_new); 268 | self.metric_space_free 269 | .increment(free_space_new - free_space_init); 270 | 271 | Ok(()) 272 | } 273 | } 274 | 275 | #[derive(Debug)] 276 | struct StorageFile { 277 | id: StorageId, 278 | file: File, 279 | tail: u64, 280 | head: u64, 281 | size: u64, 282 | new_blocks: bool, 283 | metric_space_free: Gauge, 284 | } 285 | 286 | impl StorageFile { 287 | fn free_space(&self) -> u64 { 288 | if self.head < self.tail { 289 | self.tail - self.head 290 | } else { 291 | self.tail.max(self.size - self.head) 292 | } 293 | } 294 | 295 | async fn write_back(&mut self, buffer: Vec) -> anyhow::Result<(u64, Vec)> { 296 | let len = buffer.len() as u64; 297 | anyhow::ensure!(self.free_space() >= len, "not enough space"); 298 | 299 | // update tail 300 | self.tail = self.tail.checked_sub(len).unwrap_or(self.size - len); 301 | 302 | let (result, buffer) = self.file.write_all_at(buffer, self.tail).await; 303 | let () = result?; 304 | self.file.sync_data().await?; 305 | 306 | Ok((self.tail, buffer)) 307 | } 308 | 309 | async fn write_front(&mut self, buffer: Vec) -> anyhow::Result<(u64, Vec)> { 310 | let len = buffer.len() as u64; 311 | anyhow::ensure!(self.free_space() >= len, "not enough space"); 312 | 313 | // update head if not enough space 314 | if self.head > self.tail && self.size - self.head < len { 315 | self.head = 0; 316 | } 317 | 318 | let (result, buffer) = self.file.write_all_at(buffer, self.head).await; 319 | let () = result?; 320 | self.file.sync_data().await?; 321 | 322 | let offset = self.head; 323 | self.head += len; 324 | anyhow::ensure!( 325 | self.head <= self.size, 326 | "file storage head overflow, {} vs {}", 327 | self.head, 328 | self.size 329 | ); 330 | 331 | Ok((offset, buffer)) 332 | } 333 | } 334 | 335 | #[derive(Debug, Clone)] 336 | pub struct StorageFilesSyncInit { 337 | files_paths: Vec, 338 | id2file: HashMap, 339 | } 340 | -------------------------------------------------------------------------------- /src/rpc/api_httpget.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | config::{ConfigRpc, ConfigRpcCallHttpGet, ConfigRpcCallJson}, 4 | rpc::{ 5 | api::{X_ERROR, X_SLOT}, 6 | upstream::RpcClientHttpget, 7 | }, 8 | storage::{ 9 | read::{ReadRequest, ReadResultBlock, ReadResultTransaction}, 10 | slots::StoredSlots, 11 | }, 12 | version::VERSION, 13 | }, 14 | futures::future::BoxFuture, 15 | http_body_util::{BodyExt, Full as BodyFull}, 16 | hyper::{ 17 | HeaderMap, StatusCode, 18 | body::{Body, Bytes, Incoming as BodyIncoming}, 19 | http::Result as HttpResult, 20 | }, 21 | metrics::counter, 22 | regex::Regex, 23 | richat_shared::jsonrpc::{ 24 | helpers::{ 25 | RpcResponse, get_x_bigtable_disabled, get_x_subscription_id, response_200, response_500, 26 | }, 27 | metrics::RPC_REQUESTS_TOTAL, 28 | }, 29 | solana_sdk::{clock::Slot, signature::Signature}, 30 | std::{ 31 | collections::HashSet, 32 | str::FromStr, 33 | sync::Arc, 34 | time::{Duration, Instant}, 35 | }, 36 | tokio::sync::{mpsc, oneshot}, 37 | }; 38 | 39 | #[derive(Debug)] 40 | struct SupportedCalls { 41 | get_block: Option, 42 | get_transaction: Option, 43 | } 44 | 45 | impl SupportedCalls { 46 | fn new(calls: &HashSet) -> anyhow::Result { 47 | Ok(Self { 48 | get_block: calls 49 | .contains(&ConfigRpcCallHttpGet::GetBlock) 50 | .then(|| Regex::new(r"^/block/(\d{1,9})/?$")) 51 | .transpose()?, 52 | get_transaction: calls 53 | .contains(&ConfigRpcCallHttpGet::GetTransaction) 54 | .then(|| Regex::new(r"^/tx/([1-9A-HJ-NP-Za-km-z]{64,88})/?$")) 55 | .transpose()?, 56 | }) 57 | } 58 | } 59 | 60 | #[derive(Debug)] 61 | pub struct State { 62 | stored_slots: StoredSlots, 63 | extra_headers: HeaderMap, 64 | request_timeout: Duration, 65 | supported_calls: SupportedCalls, 66 | requests_tx: mpsc::Sender, 67 | upstreams: Vec, 68 | } 69 | 70 | impl State { 71 | pub fn new( 72 | config: &ConfigRpc, 73 | stored_slots: StoredSlots, 74 | requests_tx: mpsc::Sender, 75 | ) -> anyhow::Result { 76 | Ok(Self { 77 | stored_slots, 78 | extra_headers: config.extra_headers.clone(), 79 | request_timeout: config.request_timeout, 80 | supported_calls: SupportedCalls::new(&config.calls_httpget)?, 81 | requests_tx, 82 | upstreams: config 83 | .upstream_httpget 84 | .iter() 85 | .map(|config| RpcClientHttpget::new(config.clone())) 86 | .collect::>()?, 87 | }) 88 | } 89 | 90 | pub fn get_handler( 91 | self: Arc, 92 | req: hyper::Request, 93 | ) -> Option>> { 94 | let path = req.uri().path(); 95 | 96 | if let Some(re) = &self.supported_calls.get_block 97 | && let Some(slot) = re.captures(path).and_then(|c| c.get(1).map(|m| m.as_str())) 98 | && let Ok(slot) = Slot::from_str(slot) 99 | { 100 | return Some(Box::pin(async move { 101 | match self.process_block(req, slot).await { 102 | Ok(response) => response, 103 | Err(error) => response_500(error), 104 | } 105 | })); 106 | } 107 | 108 | if let Some(re) = &self.supported_calls.get_transaction 109 | && let Some(slot) = re.captures(path).and_then(|c| c.get(1).map(|m| m.as_str())) 110 | && let Ok(signature) = Signature::from_str(slot) 111 | { 112 | return Some(Box::pin(async move { 113 | match self.process_transaction(req, signature).await { 114 | Ok(response) => response, 115 | Err(error) => response_500(error), 116 | } 117 | })); 118 | } 119 | 120 | if path == "/version" { 121 | return Some(Box::pin(async move { 122 | response_200( 123 | serde_json::json!({ 124 | "version": VERSION.version, 125 | "solana": VERSION.solana, 126 | "git": VERSION.git, 127 | }) 128 | .to_string(), 129 | &self.extra_headers, 130 | ) 131 | })); 132 | } 133 | 134 | None 135 | } 136 | 137 | async fn process_block( 138 | &self, 139 | req: hyper::Request, 140 | slot: Slot, 141 | ) -> anyhow::Result> { 142 | let deadline = Instant::now() + self.request_timeout; 143 | 144 | let x_subscription_id = get_x_subscription_id(req.headers()); 145 | let upstream_disabled = get_x_bigtable_disabled(req.headers()); 146 | 147 | counter!( 148 | RPC_REQUESTS_TOTAL, 149 | "x_subscription_id" => Arc::clone(&x_subscription_id), 150 | "method" => "getBlock_rest", 151 | ) 152 | .increment(1); 153 | 154 | // check slot before sending request 155 | let slot_tip = self.stored_slots.confirmed_load(); 156 | if slot > slot_tip { 157 | return Self::block_error_not_available(slot); 158 | } 159 | if slot <= self.stored_slots.first_available_load() { 160 | return self 161 | .get_block_upstream(upstream_disabled, x_subscription_id, deadline, slot) 162 | .await; 163 | } 164 | 165 | // request 166 | let (tx, rx) = oneshot::channel(); 167 | anyhow::ensure!( 168 | self.requests_tx 169 | .send(ReadRequest::Block { 170 | deadline, 171 | slot, 172 | tx, 173 | x_subscription_id: Arc::clone(&x_subscription_id), 174 | }) 175 | .await 176 | .is_ok(), 177 | "request channel is closed" 178 | ); 179 | let Ok(result) = rx.await else { 180 | anyhow::bail!("rx channel is closed"); 181 | }; 182 | let bytes = match result { 183 | ReadResultBlock::Timeout => anyhow::bail!("timeout"), 184 | ReadResultBlock::Removed => { 185 | return self 186 | .get_block_upstream(upstream_disabled, x_subscription_id, deadline, slot) 187 | .await; 188 | } 189 | ReadResultBlock::Dead => { 190 | return Self::block_error_skipped(slot); 191 | } 192 | ReadResultBlock::NotAvailable => { 193 | return Self::block_error_not_available(slot); 194 | } 195 | ReadResultBlock::Block(bytes) => bytes, 196 | ReadResultBlock::ReadError(error) => anyhow::bail!("read error: {error}"), 197 | }; 198 | 199 | // verify that we still have data for that block (i.e. we read correct data) 200 | if slot <= self.stored_slots.first_available_load() { 201 | return self 202 | .get_block_upstream(upstream_disabled, x_subscription_id, deadline, slot) 203 | .await; 204 | } 205 | 206 | Ok(hyper::Response::builder().body(BodyFull::from(bytes).boxed())) 207 | } 208 | 209 | async fn get_block_upstream( 210 | &self, 211 | upstream_disabled: bool, 212 | x_subscription_id: Arc, 213 | deadline: Instant, 214 | slot: Slot, 215 | ) -> anyhow::Result> { 216 | if let Some(upstream) = (!upstream_disabled) 217 | .then(|| { 218 | self.upstreams 219 | .iter() 220 | .find(|upstream| upstream.is_supported(ConfigRpcCallJson::GetBlock)) 221 | }) 222 | .flatten() 223 | { 224 | upstream.get_block(x_subscription_id, deadline, slot).await 225 | } else { 226 | Self::block_error_skipped_long_term_storage(slot) 227 | } 228 | } 229 | 230 | fn block_error_not_available(slot: Slot) -> anyhow::Result> { 231 | let msg = format!("Block not available for slot {slot}\n"); 232 | Ok(response_400(msg, "BlockNotAvailable".into())) 233 | } 234 | 235 | fn block_error_skipped(slot: Slot) -> anyhow::Result> { 236 | let msg = 237 | format!("Slot {slot} was skipped, or missing due to ledger jump to recent snapshot\n"); 238 | Ok(response_400(msg, "SlotSkipped".into())) 239 | } 240 | 241 | fn block_error_skipped_long_term_storage( 242 | slot: Slot, 243 | ) -> anyhow::Result> { 244 | let msg = format!("Slot {slot} was skipped, or missing in long-term storage\n"); 245 | Ok(response_400(msg, "LongTermStorageSlotSkipped".into())) 246 | } 247 | 248 | async fn process_transaction( 249 | self: Arc, 250 | req: hyper::Request, 251 | signature: Signature, 252 | ) -> anyhow::Result> { 253 | let deadline = Instant::now() + self.request_timeout; 254 | 255 | let x_subscription_id = get_x_subscription_id(req.headers()); 256 | let upstream_disabled = get_x_bigtable_disabled(req.headers()); 257 | 258 | counter!( 259 | RPC_REQUESTS_TOTAL, 260 | "x_subscription_id" => Arc::clone(&x_subscription_id), 261 | "method" => "getTransaction_rest", 262 | ) 263 | .increment(1); 264 | 265 | // request 266 | let (tx, rx) = oneshot::channel(); 267 | anyhow::ensure!( 268 | self.requests_tx 269 | .send(ReadRequest::Transaction { 270 | deadline, 271 | signature, 272 | tx, 273 | x_subscription_id: Arc::clone(&x_subscription_id), 274 | }) 275 | .await 276 | .is_ok(), 277 | "request channel is closed" 278 | ); 279 | let Ok(result) = rx.await else { 280 | anyhow::bail!("rx channel is closed"); 281 | }; 282 | let (slot, bytes) = match result { 283 | ReadResultTransaction::Timeout => anyhow::bail!("timeout"), 284 | ReadResultTransaction::NotFound => { 285 | return self 286 | .get_transaction_upstream( 287 | upstream_disabled, 288 | x_subscription_id, 289 | deadline, 290 | signature, 291 | ) 292 | .await; 293 | } 294 | ReadResultTransaction::Transaction { 295 | slot, 296 | block_time: _, 297 | bytes, 298 | } => (slot, bytes), 299 | ReadResultTransaction::ReadError(error) => anyhow::bail!("read error: {error}"), 300 | }; 301 | 302 | // verify that we still have data for that block (i.e. we read correct data) 303 | if slot <= self.stored_slots.first_available_load() { 304 | return self 305 | .get_transaction_upstream(upstream_disabled, x_subscription_id, deadline, signature) 306 | .await; 307 | } 308 | 309 | Ok(hyper::Response::builder() 310 | .header(X_SLOT, slot) 311 | .body(BodyFull::from(bytes).boxed())) 312 | } 313 | 314 | async fn get_transaction_upstream( 315 | &self, 316 | upstream_disabled: bool, 317 | x_subscription_id: Arc, 318 | deadline: Instant, 319 | signature: Signature, 320 | ) -> anyhow::Result> { 321 | if let Some(upstream) = (!upstream_disabled) 322 | .then(|| { 323 | self.upstreams 324 | .iter() 325 | .find(|upstream| upstream.is_supported(ConfigRpcCallJson::GetTransaction)) 326 | }) 327 | .flatten() 328 | { 329 | upstream 330 | .get_transaction(x_subscription_id, deadline, signature) 331 | .await 332 | } else { 333 | Self::transaction_error_history_not_available() 334 | } 335 | } 336 | 337 | fn transaction_error_history_not_available() -> anyhow::Result> { 338 | let msg = "Transaction history is not available from this node\n".to_owned(); 339 | Ok(response_400(msg, "TransactionHistoryNotAvailable".into())) 340 | } 341 | } 342 | 343 | fn response_400(body: B, x_error: Vec) -> HttpResult 344 | where 345 | B: BodyExt + Send + Sync + 'static, 346 | B: Body, 347 | { 348 | hyper::Response::builder() 349 | .header(X_ERROR, x_error) 350 | .status(StatusCode::BAD_REQUEST) 351 | .body(body.boxed()) 352 | } 353 | -------------------------------------------------------------------------------- /src/source/block.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | source::{ 4 | fees::TransactionsFees, sfa::SignaturesForAddress, transaction::TransactionWithBinary, 5 | }, 6 | util::HashMap, 7 | }, 8 | prost::{ 9 | DecodeError, Message, 10 | bytes::{Buf, BufMut}, 11 | encoding::{ 12 | self, DecodeContext, WireType, encode_key, encode_varint, encoded_len_varint, key_len, 13 | }, 14 | }, 15 | solana_sdk::{ 16 | clock::{Slot, UnixTimestamp}, 17 | pubkey::Pubkey, 18 | signature::Signature, 19 | transaction::TransactionError, 20 | }, 21 | solana_storage_proto::convert::generated, 22 | solana_transaction_status::{ConfirmedBlock, Reward, RewardType, Rewards}, 23 | std::{collections::hash_map::Entry as HashMapEntry, ops::Deref, sync::Arc}, 24 | }; 25 | 26 | #[derive(Debug, Clone)] 27 | pub struct BlockTransactionOffset { 28 | pub key: [u8; 8], 29 | pub offset: u64, 30 | pub size: u64, 31 | pub err: Option, 32 | } 33 | 34 | #[derive(Debug)] 35 | pub struct BlockWithBinary { 36 | pub blockhash: String, 37 | pub parent_slot: Slot, 38 | pub block_time: Option, 39 | pub block_height: Option, 40 | pub protobuf: Vec, 41 | pub txs_offset: Vec, 42 | pub transactions: HashMap, 43 | pub sfa: HashMap, 44 | pub fees: Arc, 45 | } 46 | 47 | impl BlockWithBinary { 48 | pub fn new_from_confirmed_block_and_slot(block: ConfirmedBlock, slot: Slot) -> Self { 49 | Self::new( 50 | block.previous_blockhash, 51 | block.blockhash, 52 | block.parent_slot, 53 | block 54 | .transactions 55 | .into_iter() 56 | .map(|tx| TransactionWithBinary::new(slot, tx, None)) 57 | .collect(), 58 | block.rewards, 59 | block.num_partitions, 60 | block.block_time, 61 | block.block_height, 62 | ) 63 | } 64 | 65 | #[allow(clippy::too_many_arguments)] 66 | pub fn new( 67 | previous_blockhash: String, 68 | blockhash: String, 69 | parent_slot: Slot, 70 | mut transactions: Vec, 71 | rewards: Rewards, 72 | num_partitions: Option, 73 | block_time: Option, 74 | block_height: Option, 75 | ) -> Self { 76 | let (protobuf, txs_offset) = ConfirmedBlockProtoRef { 77 | previous_blockhash: &previous_blockhash, 78 | blockhash: &blockhash, 79 | parent_slot, 80 | transactions: &transactions, 81 | rewards: &rewards, 82 | block_time: block_time.map(|timestamp| generated::UnixTimestamp { timestamp }), 83 | block_height: block_height.map(|block_height| generated::BlockHeight { block_height }), 84 | num_partitions: num_partitions 85 | .map(|num_partitions| generated::NumPartitions { num_partitions }), 86 | } 87 | .encode_with_tx_offsets(); 88 | 89 | let mut sfa = HashMap::::default(); 90 | for tx in transactions.iter_mut().rev() { 91 | for tx_sfa in tx.sfa.drain(..) { 92 | match sfa.entry(tx_sfa.address) { 93 | HashMapEntry::Occupied(mut entry) => { 94 | entry.get_mut().merge(tx_sfa); 95 | } 96 | HashMapEntry::Vacant(entry) => { 97 | entry.insert(SignaturesForAddress::new(tx_sfa)); 98 | } 99 | } 100 | } 101 | } 102 | 103 | let fees = Arc::new(TransactionsFees::new(&transactions)); 104 | 105 | let transactions = transactions 106 | .into_iter() 107 | .map(|tx| (tx.signature, tx)) 108 | .collect(); 109 | 110 | Self { 111 | blockhash, 112 | parent_slot, 113 | block_time, 114 | block_height, 115 | protobuf, 116 | txs_offset, 117 | transactions, 118 | sfa, 119 | fees, 120 | } 121 | } 122 | } 123 | 124 | #[derive(Debug)] 125 | struct ConfirmedBlockProtoRef<'a> { 126 | previous_blockhash: &'a String, 127 | blockhash: &'a String, 128 | parent_slot: Slot, 129 | transactions: &'a [TransactionWithBinary], 130 | rewards: &'a [Reward], 131 | block_time: Option, 132 | block_height: Option, 133 | num_partitions: Option, 134 | } 135 | 136 | impl ConfirmedBlockProtoRef<'_> { 137 | fn encode_with_tx_offsets(&self) -> (Vec, Vec) { 138 | let mut buf = Vec::with_capacity(self.encoded_len()); 139 | 140 | if !self.previous_blockhash.is_empty() { 141 | bytes_encode(1, self.previous_blockhash.as_ref(), &mut buf); 142 | } 143 | if !self.blockhash.is_empty() { 144 | bytes_encode(2, self.blockhash.as_ref(), &mut buf); 145 | } 146 | if self.parent_slot != 0 { 147 | encoding::uint64::encode(3, &self.parent_slot, &mut buf); 148 | } 149 | let mut offsets = Vec::with_capacity(self.transactions.len()); 150 | for tx in self.transactions.iter() { 151 | encode_key(4, WireType::LengthDelimited, &mut buf); 152 | encode_varint(tx.protobuf.len() as u64, &mut buf); 153 | let offset = buf.len() as u64; 154 | buf.put_slice(&tx.protobuf); 155 | offsets.push(BlockTransactionOffset { 156 | key: tx.key, 157 | offset, 158 | size: tx.protobuf.len() as u64, 159 | err: tx.err.clone(), 160 | }); 161 | } 162 | for reward in self.rewards { 163 | encoding::message::encode(5, &RewardWrapper(reward), &mut buf); 164 | } 165 | if let Some(block_time) = &self.block_time { 166 | encoding::message::encode(6, block_time, &mut buf); 167 | } 168 | if let Some(block_height) = &self.block_height { 169 | encoding::message::encode(7, block_height, &mut buf); 170 | } 171 | if let Some(num_partitions) = &self.num_partitions { 172 | encoding::message::encode(8, num_partitions, &mut buf); 173 | } 174 | 175 | (buf, offsets) 176 | } 177 | 178 | fn encoded_len(&self) -> usize { 179 | (if !self.previous_blockhash.is_empty() { 180 | bytes_encoded_len(1, self.previous_blockhash.as_ref()) 181 | } else { 182 | 0 183 | }) + if !self.blockhash.is_empty() { 184 | bytes_encoded_len(2, self.blockhash.as_ref()) 185 | } else { 186 | 0 187 | } + if self.parent_slot != 0 { 188 | encoding::uint64::encoded_len(3, &self.parent_slot) 189 | } else { 190 | 0 191 | } + (key_len(4u32) * self.transactions.len() 192 | + self 193 | .transactions 194 | .iter() 195 | .map(|tx| tx.protobuf.len()) 196 | .map(|len| len + encoded_len_varint(len as u64)) 197 | .sum::()) 198 | + (key_len(5u32) * self.rewards.len() 199 | + self 200 | .rewards 201 | .iter() 202 | .map(|reward| RewardWrapper(reward).encoded_len()) 203 | .map(|len| len + encoded_len_varint(len as u64)) 204 | .sum::()) 205 | + if let Some(block_time) = &self.block_time { 206 | encoding::message::encoded_len(6, block_time) 207 | } else { 208 | 0 209 | } 210 | + if let Some(block_height) = &self.block_height { 211 | encoding::message::encoded_len(7, block_height) 212 | } else { 213 | 0 214 | } 215 | + if let Some(num_partitions) = &self.num_partitions { 216 | encoding::message::encoded_len(8, num_partitions) 217 | } else { 218 | 0 219 | } 220 | } 221 | } 222 | 223 | #[derive(Debug)] 224 | struct RewardWrapper<'a>(&'a Reward); 225 | 226 | impl Deref for RewardWrapper<'_> { 227 | type Target = Reward; 228 | 229 | fn deref(&self) -> &Self::Target { 230 | self.0 231 | } 232 | } 233 | 234 | impl Message for RewardWrapper<'_> { 235 | fn encode_raw(&self, buf: &mut B) 236 | where 237 | B: BufMut, 238 | Self: Sized, 239 | { 240 | if !self.pubkey.is_empty() { 241 | encoding::string::encode(1, &self.pubkey, buf); 242 | } 243 | if self.lamports != 0 { 244 | encoding::int64::encode(2, &self.lamports, buf); 245 | } 246 | if self.post_balance != 0 { 247 | encoding::uint64::encode(3, &self.post_balance, buf); 248 | } 249 | if self.reward_type.is_some() { 250 | encoding::int32::encode(4, &reward_type_as_i32(self.reward_type), buf); 251 | } 252 | if let Some(commission) = self.commission { 253 | bytes_encode(5, u8_to_static_str(commission).as_ref(), buf); 254 | } 255 | } 256 | 257 | fn encoded_len(&self) -> usize { 258 | (if !self.pubkey.is_empty() { 259 | encoding::string::encoded_len(1, &self.pubkey) 260 | } else { 261 | 0 262 | }) + if self.lamports != 0 { 263 | encoding::int64::encoded_len(2, &self.lamports) 264 | } else { 265 | 0 266 | } + if self.post_balance != 0 { 267 | encoding::uint64::encoded_len(3, &self.post_balance) 268 | } else { 269 | 0 270 | } + if self.reward_type.is_some() { 271 | encoding::int32::encoded_len(4, &reward_type_as_i32(self.reward_type)) 272 | } else { 273 | 0 274 | } + self.commission.map_or(0, |commission| { 275 | bytes_encoded_len(5, u8_to_static_str(commission).as_ref()) 276 | }) 277 | } 278 | 279 | fn clear(&mut self) { 280 | unimplemented!() 281 | } 282 | 283 | fn merge_field( 284 | &mut self, 285 | _tag: u32, 286 | _wire_type: WireType, 287 | _buf: &mut B, 288 | _ctx: DecodeContext, 289 | ) -> Result<(), DecodeError> 290 | where 291 | B: Buf, 292 | Self: Sized, 293 | { 294 | unimplemented!() 295 | } 296 | } 297 | 298 | const fn reward_type_as_i32(reward_type: Option) -> i32 { 299 | match reward_type { 300 | None => 0, 301 | Some(RewardType::Fee) => 1, 302 | Some(RewardType::Rent) => 2, 303 | Some(RewardType::Staking) => 3, 304 | Some(RewardType::Voting) => 4, 305 | } 306 | } 307 | 308 | #[inline] 309 | fn bytes_encode(tag: u32, value: &[u8], buf: &mut impl BufMut) { 310 | encode_key(tag, WireType::LengthDelimited, buf); 311 | encode_varint(value.len() as u64, buf); 312 | buf.put(value) 313 | } 314 | 315 | #[inline] 316 | fn bytes_encoded_len(tag: u32, value: &[u8]) -> usize { 317 | key_len(tag) + encoded_len_varint(value.len() as u64) + value.len() 318 | } 319 | 320 | const NUM_STRINGS: [&str; 256] = [ 321 | "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", 322 | "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", 323 | "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", 324 | "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", 325 | "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", 326 | "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", 327 | "97", "98", "99", "100", "101", "102", "103", "104", "105", "106", "107", "108", "109", "110", 328 | "111", "112", "113", "114", "115", "116", "117", "118", "119", "120", "121", "122", "123", 329 | "124", "125", "126", "127", "128", "129", "130", "131", "132", "133", "134", "135", "136", 330 | "137", "138", "139", "140", "141", "142", "143", "144", "145", "146", "147", "148", "149", 331 | "150", "151", "152", "153", "154", "155", "156", "157", "158", "159", "160", "161", "162", 332 | "163", "164", "165", "166", "167", "168", "169", "170", "171", "172", "173", "174", "175", 333 | "176", "177", "178", "179", "180", "181", "182", "183", "184", "185", "186", "187", "188", 334 | "189", "190", "191", "192", "193", "194", "195", "196", "197", "198", "199", "200", "201", 335 | "202", "203", "204", "205", "206", "207", "208", "209", "210", "211", "212", "213", "214", 336 | "215", "216", "217", "218", "219", "220", "221", "222", "223", "224", "225", "226", "227", 337 | "228", "229", "230", "231", "232", "233", "234", "235", "236", "237", "238", "239", "240", 338 | "241", "242", "243", "244", "245", "246", "247", "248", "249", "250", "251", "252", "253", 339 | "254", "255", 340 | ]; 341 | 342 | const fn u8_to_static_str(num: u8) -> &'static str { 343 | NUM_STRINGS[num as usize] 344 | } 345 | -------------------------------------------------------------------------------- /src/storage/blocks.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | rpc::api_jsonrpc::RpcRequestBlocksUntil, 4 | storage::{files::StorageId, slots::StoredSlots, sync::ReadWriteSyncMessage}, 5 | util::{HashMap, VecSide}, 6 | }, 7 | solana_sdk::clock::{MAX_RECENT_BLOCKHASHES, Slot, UnixTimestamp}, 8 | tokio::sync::broadcast, 9 | tracing::info, 10 | }; 11 | 12 | #[derive(Debug)] 13 | pub struct StoredBlocksWrite { 14 | blocks: Vec, 15 | tail: usize, // lowest slot 16 | head: usize, // highest slot 17 | stored_slots: StoredSlots, 18 | sync_tx: broadcast::Sender, 19 | } 20 | 21 | impl StoredBlocksWrite { 22 | pub fn new( 23 | mut blocks: Vec, 24 | max: usize, 25 | stored_slots: StoredSlots, 26 | sync_tx: broadcast::Sender, 27 | ) -> anyhow::Result { 28 | anyhow::ensure!( 29 | blocks.len() <= max, 30 | "shrinking of stored blocks is not supported yet" 31 | ); 32 | 33 | blocks.resize(max, StoredBlock::new_noexists()); 34 | 35 | let iter = blocks 36 | .iter() 37 | .enumerate() 38 | .filter(|(_index, block)| block.exists); 39 | let tail = iter 40 | .clone() 41 | .min_by_key(|(_index, block)| block.slot) 42 | .map(|(index, _block)| index) 43 | .unwrap_or_default(); 44 | let head = iter 45 | .max_by_key(|(_index, block)| block.slot) 46 | .map(|(index, _block)| index) 47 | .unwrap_or_else(|| blocks.len() - 1); 48 | info!( 49 | total = blocks.len(), 50 | tail_index = tail, 51 | tail_slot = blocks[tail].slot, 52 | head_index = head, 53 | head_slot = blocks[head].slot, 54 | "load blocks info" 55 | ); 56 | 57 | let this = Self { 58 | blocks, 59 | tail, 60 | head, 61 | stored_slots: stored_slots.clone(), 62 | sync_tx, 63 | }; 64 | 65 | stored_slots.first_available_store(this.get_back_slot()); 66 | 67 | Ok(this) 68 | } 69 | 70 | pub fn get_recent_blocks(&self) -> Vec { 71 | let mut blocks = Vec::with_capacity(MAX_RECENT_BLOCKHASHES); 72 | 73 | let mut index = self.head; 74 | while blocks.len() < MAX_RECENT_BLOCKHASHES 75 | && index != self.tail 76 | && self.blocks[index].exists 77 | { 78 | if !self.blocks[index].dead { 79 | blocks.push(self.blocks[index]); 80 | } 81 | 82 | index = if index == 0 { 83 | self.blocks.len() - 1 84 | } else { 85 | index - 1 86 | }; 87 | } 88 | 89 | blocks.reverse(); 90 | blocks 91 | } 92 | 93 | pub fn to_read(&self) -> StoredBlocksRead { 94 | StoredBlocksRead { 95 | blocks: self.blocks.clone(), 96 | tail: self.tail, 97 | head: self.head, 98 | } 99 | } 100 | 101 | pub fn is_full(&self) -> bool { 102 | let next = (self.head + 1) % self.blocks.len(); 103 | self.blocks[next].exists 104 | } 105 | 106 | pub fn get_stored_boundaries(&self) -> HashMap { 107 | let mut map = HashMap::::default(); 108 | for block in self.blocks.iter() { 109 | if block.exists && !block.dead { 110 | map.entry(block.storage_id).or_default().update(block); 111 | } 112 | } 113 | map 114 | } 115 | 116 | fn get_back(&self, filter: impl Fn(&StoredBlock) -> bool) -> Option<&StoredBlock> { 117 | // additional condition in case if zero blocks exists 118 | if self.blocks[self.tail].exists && self.blocks[self.head].exists { 119 | let mut index = self.tail; 120 | loop { 121 | let block = &self.blocks[index]; 122 | if filter(block) { 123 | return Some(block); 124 | } 125 | if index == self.head { 126 | break; 127 | } 128 | index = (index + 1) % self.blocks.len(); 129 | } 130 | } 131 | None 132 | } 133 | 134 | pub fn get_back_slot(&self) -> Option { 135 | self.get_back(|block| block.exists).map(|block| block.slot) 136 | } 137 | 138 | pub fn get_back_height(&self) -> Option<(Slot, Slot)> { 139 | self.get_back(|block| block.exists && !block.dead) 140 | .map(|block| (block.slot, block.block_height.expect("should have height"))) 141 | } 142 | 143 | pub fn get_front_slot(&self) -> Option { 144 | let block = self.blocks[self.head]; 145 | block.exists.then_some(block.slot) 146 | } 147 | 148 | pub fn get_front_height(&self) -> Option<(Slot, Slot)> { 149 | let mut index = self.head; 150 | loop { 151 | let block = self.blocks[index]; 152 | if block.exists && !block.dead { 153 | return Some((block.slot, block.block_height.expect("should have height"))); 154 | } 155 | if index == self.tail { 156 | return None; 157 | } 158 | index = index.checked_sub(1).unwrap_or(self.blocks.len() - 1); 159 | } 160 | } 161 | 162 | pub fn push_block_back_dead(&mut self, slot: Slot) -> anyhow::Result<()> { 163 | self.push_block_back2(StoredBlock::new_dead(slot)) 164 | } 165 | 166 | pub fn push_block_back_confirmed( 167 | &mut self, 168 | slot: Slot, 169 | block_time: Option, 170 | block_height: Option, 171 | storage_id: StorageId, 172 | offset: u64, 173 | block_size: u64, 174 | ) -> anyhow::Result<()> { 175 | self.push_block_back2(StoredBlock::new_confirmed( 176 | slot, 177 | block_time, 178 | block_height, 179 | storage_id, 180 | offset, 181 | block_size, 182 | )) 183 | } 184 | 185 | fn push_block_back2(&mut self, block: StoredBlock) -> anyhow::Result<()> { 186 | self.tail = self.tail.checked_sub(1).unwrap_or(self.blocks.len() - 1); 187 | anyhow::ensure!(!self.blocks[self.tail].exists, "no free slot (back)"); 188 | 189 | let _ = self 190 | .sync_tx 191 | .send(ReadWriteSyncMessage::ConfirmedBlockPushBack { 192 | block: block.into(), 193 | }); 194 | 195 | self.blocks[self.tail] = block; 196 | self.update_total(VecSide::Back); 197 | Ok(()) 198 | } 199 | 200 | pub fn push_block_front_dead(&mut self, slot: Slot) -> anyhow::Result<()> { 201 | self.push_block_front2(StoredBlock::new_dead(slot)) 202 | } 203 | 204 | pub fn push_block_front_confirmed( 205 | &mut self, 206 | slot: Slot, 207 | block_time: Option, 208 | block_height: Option, 209 | storage_id: StorageId, 210 | offset: u64, 211 | block_size: u64, 212 | ) -> anyhow::Result<()> { 213 | self.push_block_front2(StoredBlock::new_confirmed( 214 | slot, 215 | block_time, 216 | block_height, 217 | storage_id, 218 | offset, 219 | block_size, 220 | )) 221 | } 222 | 223 | fn push_block_front2(&mut self, block: StoredBlock) -> anyhow::Result<()> { 224 | self.head = (self.head + 1) % self.blocks.len(); 225 | anyhow::ensure!(!self.blocks[self.head].exists, "no free slot (front)"); 226 | 227 | let _ = self 228 | .sync_tx 229 | .send(ReadWriteSyncMessage::ConfirmedBlockPushFront { 230 | block: block.into(), 231 | }); 232 | 233 | self.blocks[self.head] = block; 234 | self.update_total(VecSide::Front); 235 | Ok(()) 236 | } 237 | 238 | fn update_total(&self, side: VecSide) { 239 | let total = if self.head >= self.tail { 240 | self.head - self.tail + 1 241 | } else { 242 | (self.blocks.len() - self.tail) + self.head + 1 243 | }; 244 | self.stored_slots.set_total(total); 245 | 246 | // update stored if db was initialized 247 | if side == VecSide::Back || (self.tail == 0 && self.head == 0) { 248 | self.stored_slots 249 | .first_available_store(self.get_back_slot()); 250 | } 251 | } 252 | 253 | pub fn pop_block_back(&mut self) -> Option { 254 | if self.blocks[self.tail].exists { 255 | let block = std::mem::replace(&mut self.blocks[self.tail], StoredBlock::new_noexists()); 256 | self.tail = (self.tail + 1) % self.blocks.len(); 257 | self.stored_slots 258 | .first_available_store(self.get_back_slot()); 259 | Some(block) 260 | } else { 261 | None 262 | } 263 | } 264 | 265 | pub fn pop_block_front(&mut self) -> Option { 266 | if self.blocks[self.head].exists { 267 | let block = std::mem::replace(&mut self.blocks[self.head], StoredBlock::new_noexists()); 268 | self.head = self.head.checked_sub(1).unwrap_or(self.blocks.len() - 1); 269 | Some(block) 270 | } else { 271 | None 272 | } 273 | } 274 | } 275 | 276 | #[derive(Debug, Clone)] 277 | pub struct StoredBlocksRead { 278 | blocks: Vec, 279 | tail: usize, // lowest slot 280 | head: usize, // highest slot 281 | } 282 | 283 | impl StoredBlocksRead { 284 | pub fn pop_block_back(&mut self) { 285 | self.blocks[self.tail] = StoredBlock::new_noexists(); 286 | self.tail = (self.tail + 1) % self.blocks.len(); 287 | } 288 | 289 | pub fn pop_block_front(&mut self) { 290 | self.blocks[self.head] = StoredBlock::new_noexists(); 291 | self.head = self.head.checked_sub(1).unwrap_or(self.blocks.len() - 1); 292 | } 293 | 294 | pub fn push_block_back(&mut self, message: StoredBlockPushSync) { 295 | self.tail = self.tail.checked_sub(1).unwrap_or(self.blocks.len() - 1); 296 | self.blocks[self.tail] = message.block; 297 | } 298 | 299 | pub fn push_block_front(&mut self, message: StoredBlockPushSync) { 300 | self.head = (self.head + 1) % self.blocks.len(); 301 | self.blocks[self.head] = message.block; 302 | } 303 | 304 | pub fn get_block_location(&self, slot: Slot) -> StorageBlockLocationResult { 305 | let tail = self.blocks[self.tail]; 306 | if !tail.exists || tail.slot > slot { 307 | return StorageBlockLocationResult::Removed; 308 | } 309 | 310 | let head = self.blocks[self.head]; 311 | if !head.exists || head.slot < slot { 312 | return StorageBlockLocationResult::NotAvailable; 313 | } 314 | 315 | let index = (self.tail + (slot - tail.slot) as usize) % self.blocks.len(); 316 | let block = self.blocks[index]; 317 | if block.exists && block.slot == slot { 318 | if block.dead { 319 | StorageBlockLocationResult::Dead 320 | } else { 321 | StorageBlockLocationResult::Found(block) 322 | } 323 | } else { 324 | StorageBlockLocationResult::SlotMismatch 325 | } 326 | } 327 | 328 | pub fn get_blocks( 329 | &self, 330 | start_slot: Slot, 331 | end_slot: Slot, 332 | until: RpcRequestBlocksUntil, 333 | ) -> anyhow::Result> { 334 | let tail = self.blocks[self.tail]; 335 | anyhow::ensure!( 336 | tail.exists && tail.slot <= start_slot, 337 | "requested start slot removed" 338 | ); 339 | 340 | let head = self.blocks[self.head]; 341 | anyhow::ensure!( 342 | head.exists && head.slot >= end_slot, 343 | "end slot out of limit" 344 | ); 345 | 346 | let mut blocks = Vec::with_capacity(match until { 347 | RpcRequestBlocksUntil::EndSlot(end_slot) => (end_slot - start_slot) as usize, 348 | RpcRequestBlocksUntil::Limit(limit) => limit, 349 | }); 350 | 351 | let mut index = (self.tail + (start_slot - tail.slot) as usize) % self.blocks.len(); 352 | loop { 353 | let block = self.blocks[index]; 354 | if !block.exists { 355 | break; 356 | } 357 | if block.dead { 358 | index = (index + 1) % self.blocks.len(); 359 | continue; 360 | } 361 | 362 | blocks.push(block.slot); 363 | 364 | match until { 365 | RpcRequestBlocksUntil::EndSlot(end_slot) if end_slot == block.slot => break, 366 | RpcRequestBlocksUntil::Limit(limit) if blocks.len() == limit => break, 367 | _ => {} 368 | } 369 | 370 | index = (index + 1) % self.blocks.len(); 371 | } 372 | 373 | Ok(blocks) 374 | } 375 | } 376 | 377 | #[derive(Debug, Default, Clone, Copy)] 378 | pub struct StoredBlock { 379 | pub exists: bool, 380 | pub dead: bool, 381 | pub slot: Slot, 382 | pub block_time: Option, 383 | pub block_height: Option, 384 | pub storage_id: StorageId, 385 | pub offset: u64, 386 | pub size: u64, 387 | } 388 | 389 | impl StoredBlock { 390 | fn new_noexists() -> Self { 391 | Self::default() 392 | } 393 | 394 | fn new_dead(slot: Slot) -> Self { 395 | Self { 396 | exists: true, 397 | dead: true, 398 | slot, 399 | ..Default::default() 400 | } 401 | } 402 | 403 | const fn new_confirmed( 404 | slot: Slot, 405 | block_time: Option, 406 | block_height: Option, 407 | storage_id: StorageId, 408 | offset: u64, 409 | size: u64, 410 | ) -> Self { 411 | Self { 412 | exists: true, 413 | dead: false, 414 | slot, 415 | block_time, 416 | block_height, 417 | storage_id, 418 | offset, 419 | size, 420 | } 421 | } 422 | } 423 | 424 | #[derive(Debug, Default, Clone, Copy)] 425 | pub struct StorageBlocksBoundaries { 426 | min: Option, 427 | max: Option, 428 | } 429 | 430 | impl StorageBlocksBoundaries { 431 | const fn update(&mut self, block: &StoredBlock) { 432 | if let Some(min) = &mut self.min { 433 | if block.slot < min.slot { 434 | *min = *block; 435 | } 436 | } else { 437 | self.min = Some(*block); 438 | } 439 | 440 | if let Some(max) = &mut self.max { 441 | if block.slot > max.slot { 442 | *max = *block; 443 | } 444 | } else { 445 | self.max = Some(*block); 446 | } 447 | } 448 | 449 | pub fn tail(&self) -> Option { 450 | self.min.map(|block| block.offset) 451 | } 452 | 453 | pub fn head(&self) -> Option { 454 | self.max.map(|block| block.offset + block.size) 455 | } 456 | } 457 | 458 | #[derive(Debug)] 459 | pub enum StorageBlockLocationResult { 460 | Removed, // block is not available anymore 461 | Dead, // skipped or forked block for this slot 462 | NotAvailable, // not confirmed yet 463 | SlotMismatch, 464 | Found(StoredBlock), 465 | } 466 | 467 | #[derive(Debug, Clone, Copy)] 468 | pub struct StoredBlockPushSync { 469 | block: StoredBlock, 470 | } 471 | 472 | impl From for StoredBlockPushSync { 473 | fn from(block: StoredBlock) -> Self { 474 | Self { block } 475 | } 476 | } 477 | 478 | impl StoredBlockPushSync { 479 | pub const fn slot(&self) -> Slot { 480 | self.block.slot 481 | } 482 | } 483 | -------------------------------------------------------------------------------- /src/source/http.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{config::ConfigSourceHttp, source::block::BlockWithBinary}, 3 | base64::{Engine, prelude::BASE64_STANDARD}, 4 | prost::Message as _, 5 | reqwest::{Client, StatusCode}, 6 | solana_client::{ 7 | client_error::{ClientError, ClientErrorKind}, 8 | nonblocking::rpc_client::RpcClient, 9 | rpc_client::RpcClientConfig, 10 | rpc_config::RpcBlockConfig, 11 | rpc_custom_error::{ 12 | JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE, 13 | JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED, 14 | JSON_RPC_SERVER_ERROR_SLOT_SKIPPED, 15 | }, 16 | rpc_request::RpcError, 17 | }, 18 | solana_commitment_config::CommitmentConfig, 19 | solana_rpc_client::http_sender::HttpSender, 20 | solana_sdk::{ 21 | clock::Slot, 22 | message::{ 23 | Message, VersionedMessage, compiled_instruction::CompiledInstruction, 24 | v0::LoadedAddresses, 25 | }, 26 | transaction::Transaction, 27 | }, 28 | solana_storage_proto::convert::generated, 29 | solana_transaction_context::TransactionReturnData, 30 | solana_transaction_status::{ 31 | ConfirmedBlock, EncodedTransactionWithStatusMeta, InnerInstruction, InnerInstructions, 32 | TransactionDetails, TransactionStatusMeta, TransactionTokenBalance, 33 | TransactionWithStatusMeta, UiCompiledInstruction, UiConfirmedBlock, UiInnerInstructions, 34 | UiInstruction, UiLoadedAddresses, UiReturnDataEncoding, UiTransactionEncoding, 35 | UiTransactionReturnData, UiTransactionStatusMeta, UiTransactionTokenBalance, 36 | VersionedTransactionWithStatusMeta, option_serializer::OptionSerializer, 37 | }, 38 | std::fmt, 39 | thiserror::Error, 40 | tokio::sync::Semaphore, 41 | tracing::{info, warn}, 42 | url::{ParseError, Url}, 43 | }; 44 | 45 | #[allow(clippy::large_enum_variant)] 46 | #[derive(Debug, Error)] 47 | pub enum ConnectError { 48 | #[error(transparent)] 49 | HttpUrl(#[from] ParseError), 50 | #[error(transparent)] 51 | Reqwest(#[from] reqwest::Error), 52 | #[error(transparent)] 53 | Client(#[from] ClientError), 54 | } 55 | 56 | #[allow(clippy::large_enum_variant)] 57 | #[derive(Debug, Error)] 58 | pub enum GetBlockError { 59 | #[error(transparent)] 60 | Request(#[from] ClientError), 61 | #[error("Slot {0} was skipped, or missing due to ledger jump to recent snapshot")] 62 | SlotSkipped(Slot), 63 | #[error("Block not available for slot {0}")] 64 | BlockNotAvailable(Slot), 65 | #[error(transparent)] 66 | Decode(#[from] BlockDecodeError), 67 | } 68 | 69 | #[derive(Debug, Error, Clone, Copy, PartialEq, Eq)] 70 | pub enum BlockDecodeError { 71 | #[error("UiConfirmedBlock::transactions is missed")] 72 | MissedTransactions, 73 | #[error("UiConfirmedBlock::rewards is missed")] 74 | MissedRewards, 75 | #[error("VersionedTransaction failed to create")] 76 | FailedVersionedTransaction, 77 | #[error("UiTransactionStatusMeta::loaded_addresses is missed")] 78 | MissedLoadedAddresses, 79 | #[error("TransactionStatusMeta::inner_instructions failed to create")] 80 | FailedInnerInstructions, 81 | #[error("LoadedAddresses::writable failed to create")] 82 | FailedLoadedAddressesWritable, 83 | #[error("LoadedAddresses::readonly failed to create")] 84 | FailedLoadedAddressesReadonly, 85 | #[error("TransactionReturnData::program_id failed to create")] 86 | FailedTransactionReturnDataProgramId, 87 | #[error("TransactionReturnData::data failed to create")] 88 | FailedTransactionReturnData, 89 | } 90 | 91 | pub struct HttpSource { 92 | httpurl: Option<(Url, Client)>, 93 | client: RpcClient, 94 | semaphore: Semaphore, 95 | } 96 | 97 | impl fmt::Debug for HttpSource { 98 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 99 | f.debug_struct("HttpSource").finish() 100 | } 101 | } 102 | 103 | impl HttpSource { 104 | pub async fn new(config: ConfigSourceHttp) -> Result { 105 | let httpurl = config 106 | .httpget 107 | .map(|url| { 108 | let url = Url::parse(&url)?; 109 | let client = Client::builder().timeout(config.timeout).build()?; 110 | Ok::<_, ConnectError>((url, client)) 111 | }) 112 | .transpose()?; 113 | 114 | let sender = HttpSender::new_with_timeout(config.rpc, config.timeout); 115 | let client = RpcClient::new_sender(sender, RpcClientConfig::default()); 116 | 117 | let version = client.get_version().await?; 118 | info!(version = version.solana_core, "connected to RPC"); 119 | 120 | Ok(Self { 121 | httpurl, 122 | client, 123 | semaphore: Semaphore::new(config.concurrency), 124 | }) 125 | } 126 | 127 | pub async fn get_confirmed_slot(&self) -> Result { 128 | self.client 129 | .get_slot_with_commitment(CommitmentConfig::confirmed()) 130 | .await 131 | } 132 | 133 | pub async fn get_finalized_slot(&self) -> Result { 134 | self.client 135 | .get_slot_with_commitment(CommitmentConfig::finalized()) 136 | .await 137 | } 138 | 139 | pub async fn get_first_available_block(&self) -> Result { 140 | self.client.get_first_available_block().await 141 | } 142 | 143 | pub async fn get_block( 144 | &self, 145 | slot: Slot, 146 | httpget: bool, 147 | ) -> Result { 148 | if httpget 149 | && self.httpurl.is_some() 150 | && let Some(block) = self.get_block_http(slot).await 151 | { 152 | return Ok(block); 153 | } 154 | 155 | self.get_block_rpc(slot).await 156 | } 157 | 158 | async fn get_block_http(&self, slot: Slot) -> Option { 159 | let (url, client) = self.httpurl.as_ref()?; 160 | 161 | let url = url.join(&format!("block/{slot}")).ok()?; 162 | let permit = self.semaphore.acquire().await.expect("unclosed"); 163 | let response = client.get(url).send().await.ok()?; 164 | drop(permit); 165 | 166 | if response.status() != StatusCode::OK { 167 | return None; 168 | } 169 | let bytes = response.bytes().await.ok()?; 170 | 171 | let block = generated::ConfirmedBlock::decode(bytes) 172 | .ok()? 173 | .try_into() 174 | .ok()?; 175 | 176 | Some(BlockWithBinary::new_from_confirmed_block_and_slot( 177 | block, slot, 178 | )) 179 | } 180 | 181 | async fn get_block_rpc(&self, slot: Slot) -> Result { 182 | let config = RpcBlockConfig { 183 | encoding: Some(UiTransactionEncoding::Base64), 184 | transaction_details: Some(TransactionDetails::Full), 185 | rewards: Some(true), 186 | commitment: Some(CommitmentConfig::confirmed()), 187 | max_supported_transaction_version: Some(u8::MAX), 188 | }; 189 | 190 | let permit = self.semaphore.acquire().await.expect("unclosed"); 191 | let response = self.client.get_block_with_config(slot, config).await; 192 | drop(permit); 193 | 194 | let block = match response { 195 | Ok(block) => block, 196 | Err(error) => { 197 | match error.kind() { 198 | // not confirmed yet? 199 | ClientErrorKind::RpcError(RpcError::RpcResponseError { 200 | code: JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE, 201 | .. 202 | }) => { 203 | return Err(GetBlockError::BlockNotAvailable(slot)); 204 | } 205 | // dead 206 | ClientErrorKind::RpcError(RpcError::RpcResponseError { 207 | code: JSON_RPC_SERVER_ERROR_SLOT_SKIPPED, 208 | .. 209 | }) => { 210 | return Err(GetBlockError::SlotSkipped(slot)); 211 | } 212 | // missed 213 | ClientErrorKind::RpcError(RpcError::RpcResponseError { 214 | code: JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED, 215 | .. 216 | }) => { 217 | return Err(GetBlockError::SlotSkipped(slot)); 218 | } 219 | _ => return Err(error.into()), 220 | } 221 | } 222 | }; 223 | 224 | let block = Self::block_decode(block)?; 225 | for tx in &block.transactions { 226 | if let TransactionWithStatusMeta::MissingMetadata(tx) = &tx { 227 | warn!(slot, signature = ?tx.signatures[0], "missing metadata"); 228 | } 229 | } 230 | 231 | Ok(BlockWithBinary::new_from_confirmed_block_and_slot( 232 | block, slot, 233 | )) 234 | } 235 | 236 | fn block_decode(block: UiConfirmedBlock) -> Result { 237 | Ok(ConfirmedBlock { 238 | previous_blockhash: block.previous_blockhash, 239 | blockhash: block.blockhash, 240 | parent_slot: block.parent_slot, 241 | transactions: block 242 | .transactions 243 | .ok_or(BlockDecodeError::MissedTransactions)? 244 | .into_iter() 245 | .map(Self::tx_decode) 246 | .collect::, _>>()?, 247 | rewards: block.rewards.ok_or(BlockDecodeError::MissedRewards)?, 248 | num_partitions: block.num_reward_partitions, 249 | block_time: block.block_time, 250 | block_height: block.block_height, 251 | }) 252 | } 253 | 254 | fn tx_decode( 255 | tx: EncodedTransactionWithStatusMeta, 256 | ) -> Result { 257 | let EncodedTransactionWithStatusMeta { 258 | transaction, 259 | meta, 260 | version: _version, 261 | } = tx; 262 | 263 | let transaction = transaction 264 | .decode() 265 | .ok_or(BlockDecodeError::FailedVersionedTransaction)?; 266 | 267 | match meta { 268 | Some(meta) => Ok(TransactionWithStatusMeta::Complete( 269 | VersionedTransactionWithStatusMeta { 270 | transaction, 271 | meta: Self::tx_meta_decode(meta)?, 272 | }, 273 | )), 274 | None => Ok(TransactionWithStatusMeta::MissingMetadata(Transaction { 275 | signatures: transaction.signatures, 276 | message: match transaction.message { 277 | VersionedMessage::Legacy(message) => message, 278 | VersionedMessage::V0(v0) => Message { 279 | header: v0.header, 280 | account_keys: v0.account_keys, 281 | recent_blockhash: v0.recent_blockhash, 282 | instructions: v0.instructions, 283 | }, 284 | }, 285 | })), 286 | } 287 | } 288 | 289 | fn tx_meta_decode( 290 | meta: UiTransactionStatusMeta, 291 | ) -> Result { 292 | let (la_writable, la_readonly) = match meta.loaded_addresses.into() { 293 | Some(UiLoadedAddresses { writable, readonly }) => (writable, readonly), 294 | None => return Err(BlockDecodeError::MissedLoadedAddresses), 295 | }; 296 | 297 | Ok(TransactionStatusMeta { 298 | status: meta.status.map_err(Into::into), 299 | fee: meta.fee, 300 | pre_balances: meta.pre_balances, 301 | post_balances: meta.post_balances, 302 | inner_instructions: Option::>::from(meta.inner_instructions) 303 | .map(Self::tx_meta_inner_instructions_conv) 304 | .transpose()?, 305 | log_messages: meta.log_messages.into(), 306 | pre_token_balances: Option::>::from( 307 | meta.pre_token_balances, 308 | ) 309 | .map(Self::tx_meta_token_balances_conv), 310 | post_token_balances: Option::>::from( 311 | meta.post_token_balances, 312 | ) 313 | .map(Self::tx_meta_token_balances_conv), 314 | rewards: meta.rewards.into(), 315 | loaded_addresses: LoadedAddresses { 316 | writable: la_writable 317 | .iter() 318 | .map(|pk| pk.parse().ok()) 319 | .collect::>>() 320 | .ok_or(BlockDecodeError::FailedLoadedAddressesWritable)?, 321 | readonly: la_readonly 322 | .iter() 323 | .map(|pk| pk.parse().ok()) 324 | .collect::>>() 325 | .ok_or(BlockDecodeError::FailedLoadedAddressesReadonly)?, 326 | }, 327 | return_data: Option::::from(meta.return_data) 328 | .map(Self::tx_meta_return_data_conv) 329 | .transpose()?, 330 | compute_units_consumed: meta.compute_units_consumed.into(), 331 | cost_units: meta.cost_units.into(), 332 | }) 333 | } 334 | 335 | fn tx_meta_inner_instructions_conv( 336 | ixs: Vec, 337 | ) -> Result, BlockDecodeError> { 338 | ixs.into_iter() 339 | .map(|ix| { 340 | ix.instructions 341 | .into_iter() 342 | .map(|ui_ix| match ui_ix { 343 | UiInstruction::Compiled(ix) => Some(Self::ix_compiled_decode(ix)), 344 | UiInstruction::Parsed(_) => None, 345 | }) 346 | .collect::>>() 347 | .map(|instructions| InnerInstructions { 348 | index: ix.index, 349 | instructions, 350 | }) 351 | }) 352 | .collect::>>() 353 | .ok_or(BlockDecodeError::FailedInnerInstructions) 354 | } 355 | 356 | fn ix_compiled_decode(ix: UiCompiledInstruction) -> InnerInstruction { 357 | InnerInstruction { 358 | instruction: CompiledInstruction { 359 | accounts: ix.accounts, 360 | program_id_index: ix.program_id_index, 361 | data: bs58::decode(ix.data).into_vec().unwrap(), 362 | }, 363 | stack_height: ix.stack_height, 364 | } 365 | } 366 | 367 | fn tx_meta_token_balances_conv( 368 | token_balances: Vec, 369 | ) -> Vec { 370 | token_balances 371 | .into_iter() 372 | .map(|balance| TransactionTokenBalance { 373 | account_index: balance.account_index, 374 | mint: balance.mint, 375 | ui_token_amount: balance.ui_token_amount, 376 | owner: match balance.owner { 377 | OptionSerializer::Some(value) => value, 378 | _ => Default::default(), 379 | }, 380 | program_id: match balance.program_id { 381 | OptionSerializer::Some(value) => value, 382 | _ => Default::default(), 383 | }, 384 | }) 385 | .collect::>() 386 | } 387 | 388 | fn tx_meta_return_data_conv( 389 | return_data: UiTransactionReturnData, 390 | ) -> Result { 391 | Ok(TransactionReturnData { 392 | program_id: return_data 393 | .program_id 394 | .parse() 395 | .map_err(|_error| BlockDecodeError::FailedTransactionReturnDataProgramId)?, 396 | data: match return_data.data.1 { 397 | UiReturnDataEncoding::Base64 => BASE64_STANDARD 398 | .decode(return_data.data.0) 399 | .map_err(|_error| BlockDecodeError::FailedTransactionReturnData)?, 400 | }, 401 | }) 402 | } 403 | } 404 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{storage::files::StorageId, version::VERSION}, 3 | human_size::Size, 4 | hyper::{ 5 | HeaderMap, 6 | header::{HeaderName, HeaderValue}, 7 | }, 8 | reqwest::Version, 9 | richat_client::grpc::ConfigGrpcClient, 10 | richat_shared::config::{ConfigTokio, deserialize_affinity, deserialize_num_str}, 11 | rocksdb::DBCompressionType, 12 | serde::{ 13 | Deserialize, 14 | de::{self, Deserializer}, 15 | }, 16 | solana_rpc_client_api::request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT, 17 | std::{ 18 | collections::{HashMap, HashSet}, 19 | fs::read_to_string as read_to_string_sync, 20 | net::{IpAddr, Ipv4Addr, SocketAddr}, 21 | path::{Path, PathBuf}, 22 | str::FromStr, 23 | time::Duration, 24 | }, 25 | }; 26 | 27 | #[derive(Debug, Clone, Deserialize)] 28 | #[serde(deny_unknown_fields)] 29 | pub struct Config { 30 | #[serde(default)] 31 | pub logs: ConfigLogs, 32 | #[serde(default)] 33 | pub metrics: ConfigMetrics, 34 | /// Rpc & Stream data sources 35 | #[serde(default)] 36 | pub source: ConfigSource, 37 | /// Storage 38 | pub storage: ConfigStorage, 39 | /// RPC 40 | pub rpc: ConfigRpc, 41 | } 42 | 43 | impl Config { 44 | pub fn load_from_file>(file: P) -> anyhow::Result { 45 | let config = read_to_string_sync(&file)?; 46 | if matches!( 47 | file.as_ref().extension().and_then(|e| e.to_str()), 48 | Some("yml") | Some("yaml") 49 | ) { 50 | serde_yaml::from_str(&config).map_err(Into::into) 51 | } else { 52 | json5::from_str(&config).map_err(Into::into) 53 | } 54 | } 55 | } 56 | 57 | #[derive(Debug, Clone, Default, Deserialize)] 58 | #[serde(deny_unknown_fields, default)] 59 | pub struct ConfigLogs { 60 | pub json: bool, 61 | } 62 | 63 | #[derive(Debug, Clone, Copy, Deserialize)] 64 | #[serde(deny_unknown_fields, default)] 65 | pub struct ConfigMetrics { 66 | /// Endpoint of Prometheus service 67 | pub endpoint: SocketAddr, 68 | } 69 | 70 | impl Default for ConfigMetrics { 71 | fn default() -> Self { 72 | Self { 73 | endpoint: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8001), 74 | } 75 | } 76 | } 77 | 78 | #[derive(Debug, Default, Clone, Deserialize)] 79 | #[serde(deny_unknown_fields, default)] 80 | pub struct ConfigSource { 81 | /// Tokio runtime: subscribe on new data, rpc requests, metrics server 82 | #[serde(default)] 83 | pub tokio: ConfigTokio, 84 | pub http: ConfigSourceHttp, 85 | pub stream: ConfigSourceStream, 86 | } 87 | 88 | #[derive(Debug, Clone, Deserialize)] 89 | #[serde(deny_unknown_fields, default)] 90 | pub struct ConfigSourceHttp { 91 | pub rpc: String, 92 | pub httpget: Option, 93 | #[serde(with = "humantime_serde")] 94 | pub timeout: Duration, 95 | #[serde(deserialize_with = "deserialize_num_str")] 96 | pub concurrency: usize, 97 | } 98 | 99 | impl Default for ConfigSourceHttp { 100 | fn default() -> Self { 101 | Self { 102 | rpc: "http://127.0.0.1:8899".to_owned(), 103 | httpget: None, 104 | timeout: Duration::from_secs(30), 105 | concurrency: 10, 106 | } 107 | } 108 | } 109 | 110 | #[derive(Debug, Default, Clone, Deserialize)] 111 | #[serde(deny_unknown_fields, default)] 112 | pub struct ConfigSourceStream { 113 | pub source: ConfigSourceStreamKind, 114 | #[serde(default)] 115 | pub reconnect: Option, 116 | #[serde(flatten)] 117 | pub config: ConfigGrpcClient, 118 | } 119 | 120 | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Deserialize)] 121 | #[serde(deny_unknown_fields, rename_all = "snake_case")] 122 | pub enum ConfigSourceStreamKind { 123 | DragonsMouth, 124 | #[default] 125 | Richat, 126 | } 127 | 128 | #[derive(Debug, Clone, Copy, Deserialize)] 129 | #[serde(deny_unknown_fields, default)] 130 | pub struct ConfigSourceStreamReconnect { 131 | #[serde(with = "humantime_serde")] 132 | pub backoff_init: Duration, 133 | #[serde(with = "humantime_serde")] 134 | pub backoff_max: Duration, 135 | } 136 | 137 | impl Default for ConfigSourceStreamReconnect { 138 | fn default() -> Self { 139 | Self { 140 | backoff_init: Duration::from_millis(100), 141 | backoff_max: Duration::from_secs(1), 142 | } 143 | } 144 | } 145 | 146 | #[derive(Debug, Clone, Deserialize)] 147 | #[serde(deny_unknown_fields)] 148 | pub struct ConfigStorage { 149 | /// Backfilling options 150 | pub backfilling: Option, 151 | /// Storage files for blocks 152 | pub blocks: ConfigStorageBlocks, 153 | /// Storage of slots and tx index (RocksDB) 154 | pub rocksdb: ConfigStorageRocksdb, 155 | /// Write thread config 156 | #[serde(default)] 157 | pub write: ConfigStorageWrite, 158 | /// Read threads options 159 | #[serde(default)] 160 | pub read: ConfigStorageRead, 161 | } 162 | 163 | #[derive(Debug, Clone, Deserialize)] 164 | #[serde(deny_unknown_fields)] 165 | pub struct ConfigStorageBackfilling { 166 | #[serde(deserialize_with = "deserialize_num_str")] 167 | pub sync_to: u64, 168 | } 169 | 170 | #[derive(Debug, Clone, Deserialize)] 171 | #[serde(deny_unknown_fields)] 172 | pub struct ConfigStorageBlocks { 173 | #[serde(deserialize_with = "deserialize_num_str")] 174 | pub max: usize, 175 | #[serde( 176 | default = "ConfigStorageBlocks::default_http_getblock_max_retries", 177 | deserialize_with = "deserialize_num_str" 178 | )] 179 | pub http_getblock_max_retries: usize, 180 | #[serde( 181 | default = "ConfigStorageBlocks::default_http_getblock_backoff_init", 182 | with = "humantime_serde" 183 | )] 184 | pub http_getblock_backoff_init: Duration, 185 | pub files: Vec, 186 | } 187 | 188 | impl ConfigStorageBlocks { 189 | const fn default_http_getblock_max_retries() -> usize { 190 | 10 191 | } 192 | 193 | const fn default_http_getblock_backoff_init() -> Duration { 194 | Duration::from_millis(100) 195 | } 196 | } 197 | 198 | #[derive(Debug, Clone, Deserialize)] 199 | #[serde(deny_unknown_fields)] 200 | pub struct ConfigStorageFile { 201 | pub id: StorageId, 202 | pub path: PathBuf, 203 | #[serde(deserialize_with = "deserialize_humansize")] 204 | pub size: u64, 205 | #[serde(default = "ConfigStorageFile::default_new_blocks")] 206 | pub new_blocks: bool, 207 | } 208 | 209 | impl ConfigStorageFile { 210 | const fn default_new_blocks() -> bool { 211 | true 212 | } 213 | } 214 | 215 | #[derive(Debug, Clone, Deserialize)] 216 | #[serde(deny_unknown_fields)] 217 | pub struct ConfigStorageRocksdb { 218 | pub path: PathBuf, 219 | #[serde(default)] 220 | pub index_slot_compression: ConfigStorageRocksdbCompression, 221 | #[serde(default)] 222 | pub index_sfa_compression: ConfigStorageRocksdbCompression, 223 | #[serde( 224 | default = "ConfigStorageRocksdb::default_read_workers", 225 | deserialize_with = "deserialize_num_str" 226 | )] 227 | pub read_workers: usize, 228 | } 229 | 230 | impl ConfigStorageRocksdb { 231 | fn default_read_workers() -> usize { 232 | num_cpus::get() 233 | } 234 | } 235 | 236 | #[derive(Debug, Default, Clone, Copy, Deserialize)] 237 | #[serde(deny_unknown_fields, rename_all = "lowercase")] 238 | pub enum ConfigStorageRocksdbCompression { 239 | #[default] 240 | None, 241 | Snappy, 242 | Zlib, 243 | Bz2, 244 | Lz4, 245 | Lz4hc, 246 | Zstd, 247 | } 248 | 249 | impl From for DBCompressionType { 250 | fn from(value: ConfigStorageRocksdbCompression) -> Self { 251 | match value { 252 | ConfigStorageRocksdbCompression::None => Self::None, 253 | ConfigStorageRocksdbCompression::Snappy => Self::Snappy, 254 | ConfigStorageRocksdbCompression::Zlib => Self::Zlib, 255 | ConfigStorageRocksdbCompression::Bz2 => Self::Bz2, 256 | ConfigStorageRocksdbCompression::Lz4 => Self::Lz4, 257 | ConfigStorageRocksdbCompression::Lz4hc => Self::Lz4hc, 258 | ConfigStorageRocksdbCompression::Zstd => Self::Zstd, 259 | } 260 | } 261 | } 262 | 263 | #[derive(Debug, Default, Clone, Deserialize)] 264 | #[serde(deny_unknown_fields, default)] 265 | pub struct ConfigStorageWrite { 266 | // Thread affinity 267 | #[serde(deserialize_with = "deserialize_affinity")] 268 | pub affinity: Option>, 269 | } 270 | 271 | #[derive(Debug, Clone, Deserialize)] 272 | #[serde(deny_unknown_fields, default)] 273 | pub struct ConfigStorageRead { 274 | /// Number of threads 275 | pub threads: usize, 276 | /// Thread affinity 277 | #[serde(deserialize_with = "deserialize_affinity")] 278 | pub affinity: Option>, 279 | #[serde(deserialize_with = "deserialize_num_str")] 280 | pub thread_max_async_requests: usize, 281 | #[serde(deserialize_with = "deserialize_num_str")] 282 | pub thread_max_files_requests: usize, 283 | } 284 | 285 | impl Default for ConfigStorageRead { 286 | fn default() -> Self { 287 | Self { 288 | threads: 1, 289 | affinity: None, 290 | thread_max_async_requests: 1024, 291 | thread_max_files_requests: 32, 292 | } 293 | } 294 | } 295 | 296 | #[derive(Debug, Clone, Deserialize)] 297 | #[serde(deny_unknown_fields)] 298 | pub struct ConfigRpc { 299 | /// Endpoint of RPC service 300 | pub endpoint: SocketAddr, 301 | /// Tokio runtime for RPC 302 | #[serde(default)] 303 | pub tokio: ConfigTokio, 304 | /// Max body size limit in bytes 305 | #[serde( 306 | default = "ConfigRpc::default_body_limit", 307 | deserialize_with = "deserialize_humansize_usize" 308 | )] 309 | pub body_limit: usize, 310 | /// Extra headers added to response 311 | #[serde(default, deserialize_with = "ConfigRpc::deserialize_extra_headers")] 312 | pub extra_headers: HeaderMap, 313 | /// Request timeout 314 | #[serde( 315 | default = "ConfigRpc::default_request_timeout", 316 | with = "humantime_serde" 317 | )] 318 | pub request_timeout: Duration, 319 | /// Supported Http/Get methods 320 | #[serde(default)] 321 | pub calls_httpget: HashSet, 322 | /// Supported JSON-RPC calls 323 | #[serde(default)] 324 | pub calls_jsonrpc: HashSet, 325 | /// Maximum number of Signatures in getSignaturesForAddress 326 | #[serde( 327 | default = "ConfigRpc::default_gsfa_limit", 328 | deserialize_with = "deserialize_num_str" 329 | )] 330 | pub gsfa_limit: usize, 331 | /// Enable transaction history for getSignatureStatuses 332 | #[serde(default = "ConfigRpc::default_gss_transaction_history")] 333 | pub gss_transaction_history: bool, 334 | /// Enable `percentile` in getRecentPrioritizationFees 335 | #[serde(default = "ConfigRpc::default_grpf_percentile")] 336 | pub grpf_percentile: bool, 337 | /// TTL of getClusterNodes 338 | #[serde(default = "ConfigRpc::default_gcn_cache_ttl", with = "humantime_serde")] 339 | pub gcn_cache_ttl: Duration, 340 | /// Max number of requests in the queue 341 | #[serde( 342 | default = "ConfigRpc::default_request_channel_capacity", 343 | deserialize_with = "deserialize_num_str" 344 | )] 345 | pub request_channel_capacity: usize, 346 | /// In case of removed data upstream would be used to fetch data 347 | #[serde(default)] 348 | pub upstream_httpget: Vec, 349 | /// In case of removed data upstream would be used to fetch data 350 | #[serde(default)] 351 | pub upstream_jsonrpc: Vec, 352 | /// Thread pool to parse / encode data 353 | #[serde(default)] 354 | pub workers: ConfigRpcWorkers, 355 | } 356 | 357 | impl ConfigRpc { 358 | const fn default_body_limit() -> usize { 359 | 50 * 1024 // 50KiB 360 | } 361 | 362 | fn deserialize_extra_headers<'de, D>(deserializer: D) -> Result 363 | where 364 | D: Deserializer<'de>, 365 | { 366 | let mut map = HeaderMap::new(); 367 | for (key, value) in HashMap::::deserialize(deserializer)? { 368 | map.insert( 369 | HeaderName::try_from(&key) 370 | .map_err(|_| de::Error::custom("failed to parse header key: {key}"))?, 371 | HeaderValue::try_from(&value) 372 | .map_err(|_| de::Error::custom("failed to parse header value: {value}"))?, 373 | ); 374 | } 375 | Ok(map) 376 | } 377 | 378 | const fn default_request_timeout() -> Duration { 379 | Duration::from_secs(60) 380 | } 381 | 382 | const fn default_gsfa_limit() -> usize { 383 | MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT 384 | } 385 | 386 | const fn default_gss_transaction_history() -> bool { 387 | true 388 | } 389 | 390 | const fn default_grpf_percentile() -> bool { 391 | true 392 | } 393 | 394 | const fn default_gcn_cache_ttl() -> Duration { 395 | Duration::from_secs(1) 396 | } 397 | 398 | const fn default_request_channel_capacity() -> usize { 399 | 4096 400 | } 401 | } 402 | 403 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize)] 404 | #[serde(deny_unknown_fields, rename_all = "camelCase")] 405 | pub enum ConfigRpcCallHttpGet { 406 | GetBlock, 407 | GetTransaction, 408 | } 409 | 410 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize)] 411 | #[serde(deny_unknown_fields, rename_all = "camelCase")] 412 | pub enum ConfigRpcCallJson { 413 | GetBlock, 414 | GetBlockHeight, 415 | GetBlocks, 416 | GetBlocksWithLimit, 417 | GetBlockTime, 418 | GetClusterNodes, 419 | GetFirstAvailableBlock, 420 | GetInflationReward, 421 | GetLatestBlockhash, 422 | GetLeaderSchedule, 423 | GetRecentPrioritizationFees, 424 | GetSignaturesForAddress, 425 | GetSignatureStatuses, 426 | GetSlot, 427 | GetTransaction, 428 | GetVersion, 429 | IsBlockhashValid, 430 | } 431 | 432 | impl ConfigRpcCallJson { 433 | fn all() -> impl Iterator { 434 | [ 435 | Self::GetBlock, 436 | Self::GetBlockHeight, 437 | Self::GetBlocks, 438 | Self::GetBlocksWithLimit, 439 | Self::GetBlockTime, 440 | Self::GetClusterNodes, 441 | Self::GetFirstAvailableBlock, 442 | Self::GetInflationReward, 443 | Self::GetLatestBlockhash, 444 | Self::GetLeaderSchedule, 445 | Self::GetRecentPrioritizationFees, 446 | Self::GetSignaturesForAddress, 447 | Self::GetSignatureStatuses, 448 | Self::GetSlot, 449 | Self::GetTransaction, 450 | Self::GetVersion, 451 | Self::IsBlockhashValid, 452 | ] 453 | .into_iter() 454 | } 455 | } 456 | 457 | #[derive(Debug, Clone, Deserialize)] 458 | #[serde(deny_unknown_fields, default)] 459 | pub struct ConfigRpcUpstream { 460 | pub name: String, 461 | pub calls: HashSet, 462 | pub endpoint: String, 463 | pub user_agent: String, 464 | #[serde(deserialize_with = "ConfigRpcUpstream::deserialize_version")] 465 | pub version: Version, 466 | #[serde(with = "humantime_serde")] 467 | pub timeout: Duration, 468 | } 469 | 470 | impl Default for ConfigRpcUpstream { 471 | fn default() -> Self { 472 | Self { 473 | name: "main".to_owned(), 474 | calls: ConfigRpcCallJson::all().collect(), 475 | endpoint: "http://127.0.0.1:8899".to_owned(), 476 | user_agent: format!("alpamayo/v{}", VERSION.package), 477 | version: Version::default(), 478 | timeout: Duration::from_secs(30), 479 | } 480 | } 481 | } 482 | 483 | impl ConfigRpcUpstream { 484 | fn deserialize_version<'de, D>(deserializer: D) -> Result 485 | where 486 | D: Deserializer<'de>, 487 | { 488 | Ok(match String::deserialize(deserializer)?.as_str() { 489 | "HTTP/0.9" => Version::HTTP_09, 490 | "HTTP/1.0" => Version::HTTP_10, 491 | "HTTP/1.1" => Version::HTTP_11, 492 | "HTTP/2.0" => Version::HTTP_2, 493 | "HTTP/3.0" => Version::HTTP_3, 494 | value => { 495 | return Err(de::Error::custom(format!("unknown HTTP version: {value}"))); 496 | } 497 | }) 498 | } 499 | } 500 | 501 | #[derive(Debug, Clone, Deserialize)] 502 | #[serde(deny_unknown_fields, default)] 503 | pub struct ConfigRpcWorkers { 504 | /// Number of worker threads 505 | #[serde(deserialize_with = "deserialize_num_str")] 506 | pub threads: usize, 507 | /// Threads affinity 508 | #[serde(deserialize_with = "deserialize_affinity")] 509 | pub affinity: Option>, 510 | /// Queue size 511 | #[serde(deserialize_with = "deserialize_num_str")] 512 | pub channel_size: usize, 513 | } 514 | 515 | impl Default for ConfigRpcWorkers { 516 | fn default() -> Self { 517 | Self { 518 | threads: num_cpus::get(), 519 | affinity: None, 520 | channel_size: 4096, 521 | } 522 | } 523 | } 524 | 525 | fn deserialize_humansize<'de, D>(deserializer: D) -> Result 526 | where 527 | D: Deserializer<'de>, 528 | { 529 | let size: &str = Deserialize::deserialize(deserializer)?; 530 | 531 | Size::from_str(size) 532 | .map(|size| size.to_bytes()) 533 | .map_err(|error| de::Error::custom(format!("failed to parse size {size:?}: {error}"))) 534 | } 535 | 536 | fn deserialize_humansize_usize<'de, D>(deserializer: D) -> Result 537 | where 538 | D: Deserializer<'de>, 539 | { 540 | deserialize_humansize(deserializer).map(|value| value as usize) 541 | } 542 | -------------------------------------------------------------------------------- /src/source/stream.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | config::{ConfigSourceStream, ConfigSourceStreamKind}, 4 | source::{block::BlockWithBinary, transaction::TransactionWithBinary}, 5 | }, 6 | futures::{StreamExt, ready, stream::Stream}, 7 | maplit::hashmap, 8 | richat_client::{grpc::GrpcClientBuilderError, stream::SubscribeStream}, 9 | richat_proto::{ 10 | convert_from::{create_reward, create_tx_with_meta}, 11 | geyser::{ 12 | CommitmentLevel as CommitmentLevelProto, SlotStatus as SlotStatusProto, 13 | SubscribeRequest, SubscribeRequestFilterBlocksMeta, SubscribeRequestFilterSlots, 14 | SubscribeRequestFilterTransactions, SubscribeUpdate, SubscribeUpdateBlockMeta, 15 | SubscribeUpdateSlot, SubscribeUpdateTransaction, subscribe_update::UpdateOneof, 16 | }, 17 | richat::{GrpcSubscribeRequest, RichatFilter}, 18 | }, 19 | solana_sdk::clock::Slot, 20 | solana_transaction_status::TransactionWithStatusMeta, 21 | std::{ 22 | collections::BTreeMap, 23 | pin::Pin, 24 | task::{Context, Poll}, 25 | }, 26 | thiserror::Error, 27 | tracing::{info, warn}, 28 | }; 29 | 30 | #[derive(Debug, Error)] 31 | pub enum ConnectError { 32 | #[error(transparent)] 33 | Build(#[from] GrpcClientBuilderError), 34 | #[error(transparent)] 35 | Connect(#[from] tonic::Status), 36 | } 37 | 38 | #[derive(Debug, Error)] 39 | pub enum RecvError { 40 | #[error(transparent)] 41 | Recv(#[from] richat_client::error::ReceiveError), 42 | #[error("missed field: {0}")] 43 | MissedField(&'static str), 44 | #[error("unexpected message: {0}")] 45 | UnexpectedMessage(&'static str), 46 | #[error("unknown commitment level: {0}")] 47 | UnknownCommitmentLevel(i32), 48 | #[error("parent slot is missed for: {0}")] 49 | MissedParent(Slot), 50 | #[error("unexpected commitment level, {0:?} -> {1:?}")] 51 | UnexpectedCommitment(SlotStatusProto, SlotStatusProto), 52 | #[error("failed to decode Transaction: {0}")] 53 | TransactionWithMetaFailed(&'static str), 54 | #[error("failed to build reward: {0}")] 55 | RewardsFailed(&'static str), 56 | } 57 | 58 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 59 | pub enum StreamSourceSlotStatus { 60 | Dead, 61 | Confirmed, 62 | Finalized, 63 | } 64 | 65 | #[derive(Debug)] 66 | pub enum StreamSourceMessage { 67 | Start, 68 | Block { 69 | slot: Slot, 70 | block: BlockWithBinary, 71 | }, 72 | SlotStatus { 73 | slot: Slot, 74 | parent: Slot, 75 | status: StreamSourceSlotStatus, 76 | }, 77 | } 78 | 79 | #[derive(Debug)] 80 | struct SlotInfo { 81 | slot: Slot, 82 | status: SlotStatusProto, 83 | parent: Option, 84 | transactions: Vec<(u64, TransactionWithBinary)>, 85 | block_meta: Option, 86 | sealed: bool, 87 | ignore_block_build_fail: bool, 88 | } 89 | 90 | impl Drop for SlotInfo { 91 | fn drop(&mut self) { 92 | if !self.sealed && !self.ignore_block_build_fail { 93 | warn!( 94 | slot = self.slot, 95 | status = ?self.status, 96 | parent = self.parent, 97 | transactions = self.transactions.len(), 98 | executed_transaction_count = self 99 | .block_meta 100 | .as_ref() 101 | .map(|bm| bm.executed_transaction_count), 102 | "block build failed" 103 | ); 104 | } 105 | } 106 | } 107 | 108 | impl SlotInfo { 109 | fn new(slot: Slot) -> Self { 110 | Self { 111 | slot, 112 | status: SlotStatusProto::SlotCreatedBank, 113 | parent: None, 114 | transactions: Vec::with_capacity(8_192), 115 | block_meta: None, 116 | sealed: false, 117 | ignore_block_build_fail: false, 118 | } 119 | } 120 | 121 | fn try_build_block(&mut self) -> Option> { 122 | if self.sealed 123 | || self 124 | .block_meta 125 | .as_ref() 126 | .map(|bm| bm.executed_transaction_count) 127 | != Some(self.transactions.len() as u64) 128 | { 129 | return None; 130 | } 131 | self.sealed = true; 132 | 133 | let block_meta = self.block_meta.take()?; 134 | let (rewards, num_partitions) = match block_meta.rewards { 135 | Some(obj) => { 136 | match obj 137 | .rewards 138 | .into_iter() 139 | .map(create_reward) 140 | .collect::, _>>() 141 | { 142 | Ok(rewards) => (rewards, obj.num_partitions.map(|obj| obj.num_partitions)), 143 | Err(error) => return Some(Err(RecvError::RewardsFailed(error))), 144 | } 145 | } 146 | None => (vec![], None), 147 | }; 148 | 149 | let mut transactions = std::mem::take(&mut self.transactions); 150 | transactions.sort_unstable_by_key(|(index, _tx)| *index); 151 | 152 | Some(Ok(BlockWithBinary::new( 153 | block_meta.parent_blockhash, 154 | block_meta.blockhash, 155 | block_meta.parent_slot, 156 | transactions.into_iter().map(|(_index, tx)| tx).collect(), 157 | rewards, 158 | num_partitions, 159 | block_meta.block_time.map(|obj| obj.timestamp), 160 | block_meta.block_height.map(|obj| obj.block_height), 161 | ))) 162 | } 163 | } 164 | 165 | #[derive(Debug)] 166 | pub struct StreamSource { 167 | stream: SubscribeStream, 168 | slots: BTreeMap, 169 | first_processed: Option, 170 | } 171 | 172 | impl StreamSource { 173 | pub async fn new(config: ConfigSourceStream) -> Result { 174 | let mut connection = config.config.connect().await?; 175 | 176 | let version = connection.get_version().await?; 177 | info!(version = version.version, "connected to stream"); 178 | 179 | let stream = match config.source { 180 | ConfigSourceStreamKind::DragonsMouth => connection 181 | .subscribe_dragons_mouth_once(Self::create_dragons_mouth_filter()) 182 | .await? 183 | .into_parsed(), 184 | ConfigSourceStreamKind::Richat => connection 185 | .subscribe_richat(GrpcSubscribeRequest { 186 | replay_from_slot: None, 187 | filter: Self::create_richat_filter(), 188 | }) 189 | .await? 190 | .into_parsed(), 191 | }; 192 | 193 | Ok(Self { 194 | stream, 195 | slots: BTreeMap::new(), 196 | first_processed: None, 197 | }) 198 | } 199 | 200 | const fn create_richat_filter() -> Option { 201 | Some(RichatFilter { 202 | disable_accounts: true, 203 | disable_transactions: false, 204 | disable_entries: true, 205 | }) 206 | } 207 | 208 | fn create_dragons_mouth_filter() -> SubscribeRequest { 209 | SubscribeRequest { 210 | accounts: hashmap! {}, 211 | slots: hashmap! { "".to_owned() => SubscribeRequestFilterSlots { 212 | filter_by_commitment: Some(false), 213 | interslot_updates: Some(true), 214 | } }, 215 | transactions: hashmap! { "".to_owned() => SubscribeRequestFilterTransactions::default() }, 216 | transactions_status: hashmap! {}, 217 | blocks: hashmap! {}, 218 | blocks_meta: hashmap! { "".to_owned() => SubscribeRequestFilterBlocksMeta::default() }, 219 | entry: hashmap! {}, 220 | commitment: Some(CommitmentLevelProto::Processed as i32), 221 | accounts_data_slice: vec![], 222 | ping: None, 223 | from_slot: None, 224 | } 225 | } 226 | } 227 | 228 | impl Stream for StreamSource { 229 | type Item = Result; 230 | 231 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 232 | let mut this = self.as_mut(); 233 | loop { 234 | return Poll::Ready(Some(match ready!(this.stream.poll_next_unpin(cx)) { 235 | Some(Ok(SubscribeUpdate { update_oneof, .. })) => match update_oneof { 236 | Some(UpdateOneof::Account(_)) => Err(RecvError::UnexpectedMessage("Account")), 237 | Some(UpdateOneof::Slot(SubscribeUpdateSlot { 238 | slot, 239 | parent, 240 | status, 241 | .. 242 | })) => { 243 | let status = match SlotStatusProto::try_from(status) { 244 | Ok(SlotStatusProto::SlotProcessed) => continue, 245 | Ok(SlotStatusProto::SlotConfirmed) => SlotStatusProto::SlotConfirmed, 246 | Ok(SlotStatusProto::SlotFinalized) => SlotStatusProto::SlotFinalized, 247 | Ok(SlotStatusProto::SlotFirstShredReceived) => continue, 248 | Ok(SlotStatusProto::SlotCompleted) => continue, 249 | Ok(SlotStatusProto::SlotCreatedBank) => { 250 | SlotStatusProto::SlotCreatedBank 251 | } 252 | Ok(SlotStatusProto::SlotDead) => SlotStatusProto::SlotDead, 253 | Err(_error) => { 254 | return Poll::Ready(Some(Err(RecvError::UnknownCommitmentLevel( 255 | status, 256 | )))); 257 | } 258 | }; 259 | 260 | // store first processed slot 261 | if status == SlotStatusProto::SlotCreatedBank 262 | && this.first_processed.is_none() 263 | { 264 | this.first_processed = Some(slot); 265 | } 266 | 267 | // drop message if less or eq to first processed 268 | let first_processed = match this.first_processed { 269 | Some(first_processed) if slot > first_processed => first_processed, 270 | _ => continue, 271 | }; 272 | 273 | // drop outdated slots 274 | if status == SlotStatusProto::SlotFinalized { 275 | loop { 276 | match this.slots.keys().next().copied() { 277 | Some(slot_first) if slot_first < slot => { 278 | if let Some(mut slot_info) = this.slots.remove(&slot_first) 279 | { 280 | // ignore warn message if we expect that block would not be complete 281 | if slot_first <= first_processed { 282 | slot_info.ignore_block_build_fail = true; 283 | } 284 | drop(slot_info); 285 | } 286 | } 287 | _ => break, 288 | } 289 | } 290 | 291 | return Poll::Ready(Some(if let Some(parent) = parent { 292 | Ok(StreamSourceMessage::SlotStatus { 293 | slot, 294 | parent, 295 | status: StreamSourceSlotStatus::Finalized, 296 | }) 297 | } else { 298 | Err(RecvError::MissedParent(slot)) 299 | })); 300 | } 301 | 302 | // create slot info 303 | let entry = this.slots.entry(slot); 304 | let slot_info = entry.or_insert_with(|| SlotInfo::new(slot)); 305 | 306 | // store parent and drop message (only processed) 307 | if status == SlotStatusProto::SlotCreatedBank { 308 | if slot_info.parent.is_none() 309 | && slot_info.status == SlotStatusProto::SlotCreatedBank 310 | && let Some(parent) = parent 311 | { 312 | slot_info.parent = Some(parent); 313 | continue; 314 | } 315 | 316 | if first_processed <= slot { 317 | return Poll::Ready(Some(Err(RecvError::UnexpectedCommitment( 318 | slot_info.status, 319 | status, 320 | )))); 321 | } 322 | continue; 323 | } 324 | 325 | // update status 326 | let parent = match (slot_info.parent, slot_info.status, status) { 327 | ( 328 | Some(parent), 329 | SlotStatusProto::SlotCreatedBank, 330 | SlotStatusProto::SlotDead, 331 | ) => { 332 | slot_info.status = SlotStatusProto::SlotDead; 333 | parent 334 | } 335 | ( 336 | Some(parent), 337 | SlotStatusProto::SlotCreatedBank, 338 | SlotStatusProto::SlotConfirmed, 339 | ) => { 340 | slot_info.status = SlotStatusProto::SlotConfirmed; 341 | parent 342 | } 343 | _ => { 344 | return Poll::Ready(Some(Err(RecvError::UnexpectedCommitment( 345 | slot_info.status, 346 | status, 347 | )))); 348 | } 349 | }; 350 | 351 | Ok(StreamSourceMessage::SlotStatus { 352 | slot, 353 | parent, 354 | status: match status { 355 | SlotStatusProto::SlotDead => StreamSourceSlotStatus::Dead, 356 | SlotStatusProto::SlotConfirmed => StreamSourceSlotStatus::Confirmed, 357 | SlotStatusProto::SlotFinalized => StreamSourceSlotStatus::Finalized, 358 | _ => unreachable!(), 359 | }, 360 | }) 361 | } 362 | Some(UpdateOneof::Transaction(SubscribeUpdateTransaction { 363 | transaction, 364 | slot, 365 | })) => match transaction { 366 | Some(tx) => { 367 | let first_processed = this.first_processed; 368 | let entry = this.slots.entry(slot); 369 | let slot_info = entry.or_insert_with(|| SlotInfo::new(slot)); 370 | let is_vote = tx.is_vote; 371 | let index = tx.index; 372 | match create_tx_with_meta(tx) { 373 | Ok(tx) => { 374 | if let TransactionWithStatusMeta::MissingMetadata(tx) = &tx { 375 | warn!(slot, signature = ?tx.signatures[0], "missing metadata"); 376 | } 377 | 378 | slot_info.transactions.push(( 379 | index, 380 | TransactionWithBinary::new(slot, tx, Some(is_vote)), 381 | )); 382 | if let Some(first_processed) = first_processed 383 | && slot <= first_processed 384 | { 385 | continue; 386 | } 387 | match slot_info.try_build_block() { 388 | Some(Ok(block)) => { 389 | Ok(StreamSourceMessage::Block { slot, block }) 390 | } 391 | Some(Err(error)) => Err(error), 392 | None => continue, 393 | } 394 | } 395 | Err(error) => Err(RecvError::TransactionWithMetaFailed(error)), 396 | } 397 | } 398 | None => Err(RecvError::MissedField("transaction")), 399 | }, 400 | Some(UpdateOneof::TransactionStatus(_)) => { 401 | Err(RecvError::UnexpectedMessage("TransactionStatus")) 402 | } 403 | Some(UpdateOneof::Block(_)) => Err(RecvError::UnexpectedMessage("Block")), 404 | Some(UpdateOneof::Ping(_)) => continue, 405 | Some(UpdateOneof::Pong(_)) => Err(RecvError::UnexpectedMessage("Pong")), 406 | Some(UpdateOneof::BlockMeta(block_meta)) => { 407 | let slot = block_meta.slot; 408 | let first_processed = this.first_processed; 409 | let entry = this.slots.entry(slot); 410 | let slot_info = entry.or_insert_with(|| SlotInfo::new(slot)); 411 | slot_info.block_meta = Some(block_meta); 412 | if let Some(first_processed) = first_processed 413 | && slot <= first_processed 414 | { 415 | continue; 416 | } 417 | match slot_info.try_build_block() { 418 | Some(Ok(block)) => Ok(StreamSourceMessage::Block { slot, block }), 419 | Some(Err(error)) => Err(error), 420 | None => continue, 421 | } 422 | } 423 | Some(UpdateOneof::Entry(_)) => Err(RecvError::UnexpectedMessage("Entry")), 424 | None => Err(RecvError::MissedField("update_oneof")), 425 | }, 426 | Some(Err(error)) => Err(error.into()), 427 | None => return Poll::Ready(None), 428 | })); 429 | } 430 | } 431 | } 432 | --------------------------------------------------------------------------------