├── perf ├── .gitignore ├── src │ ├── lib.rs │ └── thread.rs └── Cargo.toml ├── metrics ├── .gitignore ├── Cargo.toml └── src │ └── lib.rs ├── net-utils ├── .gitignore ├── Cargo.toml └── src │ └── lib.rs ├── rpc ├── .gitignore ├── src │ ├── rpc_network_node.rs │ ├── lib.rs │ ├── input_validators.rs │ ├── logging.rs │ ├── rpc_network_info.rs │ ├── rpc_server.rs │ ├── deprecated.rs │ ├── rpc_core.rs │ ├── custom_error.rs │ ├── main.rs │ ├── middleware.rs │ ├── rpc_service.rs │ ├── genesis_unpack.rs │ └── cli.rs ├── build.rs └── Cargo.toml ├── storage-bigtable ├── proto │ ├── google.protobuf.rs │ └── google.rpc.rs ├── build-proto │ ├── .gitignore │ ├── README.md │ ├── build.sh │ ├── Cargo.toml │ └── src │ │ └── main.rs ├── init-bigtable.sh ├── src │ ├── root_ca_certificate.rs │ └── access_token.rs ├── README.md └── Cargo.toml ├── storage-adapter ├── .gitignore ├── Cargo.toml └── src │ ├── compression.rs │ └── lib.rs ├── storage-hbase ├── .gitignore ├── README.md ├── Cargo.toml └── src │ └── hbase.rs ├── rust-toolchain.toml ├── .dockerignore ├── storage-proto ├── README.md ├── proto │ ├── car_index_entry.proto │ ├── confirmed_block.proto │ └── transaction_by_addr.proto ├── build.rs ├── Cargo.toml └── src │ └── lib.rs ├── .gitignore ├── Dockerfile ├── cargo ├── docker ├── Dockerfile.archival-rpc └── docker-compose.yml ├── Cargo.toml ├── README.md ├── .github └── workflows │ └── build-and-release.yml └── LICENSE /perf/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ -------------------------------------------------------------------------------- /metrics/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ -------------------------------------------------------------------------------- /net-utils/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ -------------------------------------------------------------------------------- /rpc/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | -------------------------------------------------------------------------------- /storage-bigtable/proto/google.protobuf.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /storage-adapter/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | /test-ledger/ -------------------------------------------------------------------------------- /storage-hbase/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | /test-ledger/ -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.86.0" 3 | -------------------------------------------------------------------------------- /perf/src/lib.rs: -------------------------------------------------------------------------------- 1 | 2 | extern crate log; 3 | 4 | pub mod thread; -------------------------------------------------------------------------------- /storage-hbase/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Solana HBase storage library -------------------------------------------------------------------------------- /storage-bigtable/build-proto/.gitignore: -------------------------------------------------------------------------------- 1 | googleapis/ 2 | target/ 3 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target/* 2 | !target/release/solana-storage-rpc 3 | tmp/* 4 | .git -------------------------------------------------------------------------------- /storage-bigtable/build-proto/README.md: -------------------------------------------------------------------------------- 1 | Helper project to build Rust bindings for BigTable, to avoid requiring all 2 | Solana developers have protoc installed 3 | -------------------------------------------------------------------------------- /storage-proto/README.md: -------------------------------------------------------------------------------- 1 | # Storage Protobufs 2 | 3 | The `solana-storage-proto` structs used in `src/convert.rs` and elsewhere are 4 | auto-generated from protobuf definitions on build. To update these structs, 5 | simply make the desired edits to `proto/*.proto` files. 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | /target/ 3 | 4 | **/*.rs.bk 5 | .cargo 6 | 7 | /config/ 8 | 9 | /tmp/ 10 | 11 | .cache 12 | 13 | # log files 14 | *.log 15 | log-*.txt 16 | log-*/ 17 | 18 | # intellij files 19 | .idea/ 20 | /solana.iml 21 | /.vscode/ 22 | 23 | .DS_Store 24 | 25 | -------------------------------------------------------------------------------- /storage-bigtable/build-proto/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ex 4 | cd "$(dirname "$0")" 5 | 6 | cargo="$(readlink -f "../../cargo")" 7 | 8 | if [[ ! -d googleapis ]]; then 9 | git clone https://github.com/googleapis/googleapis.git 10 | fi 11 | 12 | exec "$cargo" run 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM debian:bookworm-slim 3 | 4 | RUN mkdir -p /solana 5 | WORKDIR /solana 6 | 7 | #ARG AMD64_BINARY 8 | #ARG ARM64_BINARY 9 | ARG TARGETARCH 10 | 11 | COPY output/linux/${TARGETARCH}/archival-rpc /solana/archival-rpc 12 | 13 | # Expose the necessary port 14 | EXPOSE 8899 15 | 16 | ENV RUST_LOG=info 17 | 18 | CMD ["./archival-rpc", "--bind-address=0.0.0.0", "--enable-rpc-hbase-ledger-storage", "--rpc-hbase-address=hbase:9090"] 19 | -------------------------------------------------------------------------------- /metrics/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-metrics" 3 | description = "Solana Metrics" 4 | documentation = "https://docs.rs/solana-net-utils" 5 | version = { workspace = true } 6 | authors = { workspace = true } 7 | repository = { workspace = true } 8 | homepage = { workspace = true } 9 | license = { workspace = true } 10 | edition = { workspace = true } 11 | 12 | [dependencies] 13 | prometheus = { workspace = true } 14 | 15 | [lib] 16 | name = "solana_metrics" 17 | 18 | [package.metadata.docs.rs] 19 | targets = ["x86_64-unknown-linux-gnu"] 20 | -------------------------------------------------------------------------------- /net-utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-net-utils" 3 | description = "Solana Network Utilities" 4 | documentation = "https://docs.rs/solana-net-utils" 5 | version = { workspace = true } 6 | authors = { workspace = true } 7 | repository = { workspace = true } 8 | homepage = { workspace = true } 9 | license = { workspace = true } 10 | edition = { workspace = true } 11 | 12 | [dependencies] 13 | url = { workspace = true } 14 | 15 | [lib] 16 | name = "solana_net_utils" 17 | 18 | [package.metadata.docs.rs] 19 | targets = ["x86_64-unknown-linux-gnu"] 20 | -------------------------------------------------------------------------------- /storage-proto/proto/car_index_entry.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package solana.storage.CarIndexEntry; 4 | 5 | message CarIndexEntry { 6 | uint64 slot = 1; 7 | string block_hash = 2; 8 | uint64 offset = 3; 9 | uint64 length = 4; 10 | uint64 start_slot = 5; 11 | uint64 end_slot = 6; 12 | UnixTimestamp timestamp = 7; 13 | string previous_block_hash = 8; 14 | BlockHeight block_height = 9; 15 | UnixTimestamp block_time = 10; 16 | } 17 | 18 | message UnixTimestamp { 19 | int64 timestamp = 1; 20 | } 21 | 22 | message BlockHeight { 23 | uint64 block_height = 1; 24 | } -------------------------------------------------------------------------------- /storage-bigtable/init-bigtable.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Configures a BigTable instance with the expected tables 4 | # 5 | 6 | set -e 7 | 8 | instance=solana-ledger 9 | 10 | cbt=( 11 | cbt 12 | -instance 13 | "$instance" 14 | ) 15 | if [[ -n $BIGTABLE_EMULATOR_HOST ]]; then 16 | cbt+=(-project emulator) 17 | fi 18 | 19 | for table in blocks tx tx-by-addr; do 20 | ( 21 | set -x 22 | "${cbt[@]}" createtable $table 23 | "${cbt[@]}" createfamily $table x 24 | "${cbt[@]}" setgcpolicy $table x maxversions=1 25 | "${cbt[@]}" setgcpolicy $table x maxage=360d 26 | ) 27 | done 28 | -------------------------------------------------------------------------------- /storage-bigtable/build-proto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | description = "Blockchain, Rebuilt for Scale" 3 | name = "proto" 4 | publish = false 5 | version = { workspace = true } 6 | authors = { workspace = true } 7 | repository = { workspace = true } 8 | homepage = { workspace = true } 9 | license = { workspace = true } 10 | edition = { workspace = true } 11 | 12 | [dependencies] 13 | tonic-build = { workspace = true } 14 | 15 | # windows users should install the protobuf compiler manually and set the PROTOC 16 | # envar to point to the installed binary 17 | [target."cfg(not(windows))".dependencies] 18 | protobuf-src = { workspace = true } 19 | -------------------------------------------------------------------------------- /rpc/src/rpc_network_node.rs: -------------------------------------------------------------------------------- 1 | 2 | use { 3 | crate::{ 4 | rpc_network_info::RpcNetworkInfo, 5 | }, 6 | std::{ 7 | net::{ 8 | SocketAddr, 9 | IpAddr, 10 | }, 11 | }, 12 | }; 13 | 14 | #[derive(Debug)] 15 | pub struct RpcNetworkNode { 16 | pub info: RpcNetworkInfo, 17 | } 18 | 19 | impl RpcNetworkNode { 20 | pub fn new_single_bind( 21 | rpc_port: u16, 22 | bind_ip_addr: IpAddr, 23 | ) -> Self { 24 | let info = RpcNetworkInfo { 25 | rpc: SocketAddr::new(bind_ip_addr, rpc_port), 26 | }; 27 | 28 | RpcNetworkNode { 29 | info, 30 | } 31 | } 32 | } -------------------------------------------------------------------------------- /perf/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-perf" 3 | description = "Solana Performance APIs" 4 | documentation = "https://docs.rs/solana-perf" 5 | version = { workspace = true } 6 | authors = { workspace = true } 7 | repository = { workspace = true } 8 | homepage = { workspace = true } 9 | license = { workspace = true } 10 | edition = { workspace = true } 11 | 12 | [dependencies] 13 | log = { workspace = true } 14 | 15 | [target."cfg(target_os = \"linux\")".dependencies] 16 | caps = { workspace = true } 17 | libc = { workspace = true } 18 | nix = { workspace = true, features = ["user"] } 19 | 20 | [lib] 21 | name = "solana_perf" 22 | 23 | [package.metadata.docs.rs] 24 | targets = ["x86_64-unknown-linux-gnu"] 25 | -------------------------------------------------------------------------------- /rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::integer_arithmetic)] 2 | #![recursion_limit = "2048"] 3 | 4 | pub mod custom_error; 5 | 6 | pub mod request_processor; 7 | pub mod rpc_service; 8 | 9 | #[macro_use] 10 | extern crate log; 11 | 12 | #[macro_use] 13 | extern crate serde_derive; 14 | 15 | #[cfg(test)] 16 | #[macro_use] 17 | extern crate serde_json; 18 | 19 | pub mod cli; 20 | 21 | pub mod logging; 22 | 23 | pub mod middleware; 24 | 25 | pub mod rpc; 26 | 27 | pub mod rpc_server; 28 | 29 | pub mod rpc_network_node; 30 | 31 | #[macro_use] 32 | pub mod rpc_network_info; 33 | 34 | pub mod rpc_core; 35 | 36 | pub mod input_validators; 37 | 38 | pub mod deprecated; 39 | 40 | pub mod genesis_unpack; 41 | 42 | #[cfg(test)] 43 | #[macro_use] 44 | extern crate matches; 45 | 46 | -------------------------------------------------------------------------------- /rpc/src/input_validators.rs: -------------------------------------------------------------------------------- 1 | use { 2 | std::{ 3 | fmt::Display, 4 | str::FromStr 5 | }, 6 | }; 7 | 8 | fn is_parsable_generic(string: T) -> Result<(), String> 9 | where 10 | T: AsRef + Display, 11 | U: FromStr, 12 | U::Err: Display, 13 | { 14 | string 15 | .as_ref() 16 | .parse::() 17 | .map(|_| ()) 18 | .map_err(|err| format!("error parsing '{string}': {err}")) 19 | } 20 | 21 | // Return an error if string cannot be parsed as type T. 22 | // Takes a String to avoid second type parameter when used as a clap validator 23 | pub fn is_parsable(string: String) -> Result<(), String> 24 | where 25 | T: FromStr, 26 | T::Err: Display, 27 | { 28 | is_parsable_generic::(string) 29 | } 30 | -------------------------------------------------------------------------------- /cargo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # shellcheck source=ci/rust-version.sh 4 | here=$(dirname "$0") 5 | 6 | toolchain= 7 | case "$1" in 8 | stable) 9 | source "${here}"/ci/rust-version.sh stable 10 | # shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh 11 | toolchain="$rust_stable" 12 | shift 13 | ;; 14 | nightly) 15 | source "${here}"/ci/rust-version.sh nightly 16 | # shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh 17 | toolchain="$rust_nightly" 18 | shift 19 | ;; 20 | *) 21 | source "${here}"/ci/rust-version.sh stable 22 | # shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh 23 | toolchain="$rust_stable" 24 | ;; 25 | esac 26 | 27 | set -x 28 | exec cargo "+${toolchain}" "${@}" 29 | -------------------------------------------------------------------------------- /storage-bigtable/build-proto/src/main.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), std::io::Error> { 2 | const PROTOC_ENVAR: &str = "PROTOC"; 3 | if std::env::var(PROTOC_ENVAR).is_err() { 4 | #[cfg(not(windows))] 5 | std::env::set_var(PROTOC_ENVAR, protobuf_src::protoc()); 6 | } 7 | 8 | let manifest_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); 9 | 10 | let out_dir = manifest_dir.join("../proto"); 11 | let googleapis = manifest_dir.join("googleapis"); 12 | 13 | println!("Google API directory: {}", googleapis.display()); 14 | println!("output directory: {}", out_dir.display()); 15 | 16 | tonic_build::configure() 17 | .build_client(true) 18 | .build_server(false) 19 | .out_dir(&out_dir) 20 | .compile( 21 | &[googleapis.join("google/bigtable/v2/bigtable.proto")], 22 | &[googleapis], 23 | ) 24 | } 25 | -------------------------------------------------------------------------------- /storage-bigtable/src/root_ca_certificate.rs: -------------------------------------------------------------------------------- 1 | use { 2 | std::{fs::File, io::Read}, 3 | tonic::transport::Certificate, 4 | }; 5 | 6 | pub fn load() -> Result { 7 | // Respect the standard GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment variable if present, 8 | // otherwise use the built-in root certificate 9 | let pem = match std::env::var("GRPC_DEFAULT_SSL_ROOTS_FILE_PATH").ok() { 10 | Some(cert_file) => File::open(&cert_file) 11 | .and_then(|mut file| { 12 | let mut pem = Vec::new(); 13 | file.read_to_end(&mut pem).map(|_| pem) 14 | }) 15 | .map_err(|err| format!("Failed to read {cert_file}: {err}"))?, 16 | None => { 17 | // PEM file from Google Trust Services (https://pki.goog/roots.pem) 18 | include_bytes!("pki-goog-roots.pem").to_vec() 19 | } 20 | }; 21 | Ok(Certificate::from_pem(pem)) 22 | } 23 | -------------------------------------------------------------------------------- /net-utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | use { 2 | std::{ 3 | net::{IpAddr, ToSocketAddrs}, 4 | }, 5 | url::Url, 6 | }; 7 | 8 | pub fn parse_host(host: &str) -> Result { 9 | // First, check if the host syntax is valid. This check is needed because addresses 10 | // such as `("localhost:1234", 0)` will resolve to IPs on some networks. 11 | let parsed_url = Url::parse(&format!("http://{host}")).map_err(|e| e.to_string())?; 12 | if parsed_url.port().is_some() { 13 | return Err(format!("Expected port in URL: {host}")); 14 | } 15 | 16 | // Next, check to see if it resolves to an IP address 17 | let ips: Vec<_> = (host, 0) 18 | .to_socket_addrs() 19 | .map_err(|err| err.to_string())? 20 | .map(|socket_address| socket_address.ip()) 21 | .collect(); 22 | if ips.is_empty() { 23 | Err(format!("Unable to resolve host: {host}")) 24 | } else { 25 | Ok(ips[0]) 26 | } 27 | } 28 | 29 | pub fn is_host(string: String) -> Result<(), String> { 30 | parse_host(&string).map(|_| ()) 31 | } 32 | 33 | -------------------------------------------------------------------------------- /storage-proto/build.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), std::io::Error> { 2 | const PROTOC_ENVAR: &str = "PROTOC"; 3 | if std::env::var(PROTOC_ENVAR).is_err() { 4 | #[cfg(not(windows))] 5 | std::env::set_var(PROTOC_ENVAR, protobuf_src::protoc()); 6 | } 7 | 8 | let proto_base_path = std::path::PathBuf::from("proto"); 9 | let proto_files = ["confirmed_block.proto", "transaction_by_addr.proto", "car_index_entry.proto"]; 10 | let mut protos = Vec::new(); 11 | for proto_file in &proto_files { 12 | let proto = proto_base_path.join(proto_file); 13 | println!("cargo::rerun-if-changed={}", proto.display()); 14 | protos.push(proto); 15 | } 16 | 17 | tonic_build::configure() 18 | .build_client(true) 19 | .build_server(false) 20 | .type_attribute( 21 | "TransactionErrorType", 22 | "#[cfg_attr(test, derive(enum_iterator::Sequence))]", 23 | ) 24 | .type_attribute( 25 | "InstructionErrorType", 26 | "#[cfg_attr(test, derive(enum_iterator::Sequence))]", 27 | ) 28 | .compile(&protos, &[proto_base_path]) 29 | } 30 | -------------------------------------------------------------------------------- /rpc/build.rs: -------------------------------------------------------------------------------- 1 | extern crate rustc_version; 2 | use rustc_version::{version_meta, Channel}; 3 | 4 | fn main() { 5 | // Copied and adapted from 6 | // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example 7 | // Licensed under Apache-2.0 + MIT 8 | match version_meta().unwrap().channel { 9 | Channel::Stable => { 10 | println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); 11 | } 12 | Channel::Beta => { 13 | println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); 14 | } 15 | Channel::Nightly => { 16 | println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); 17 | } 18 | Channel::Dev => { 19 | println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); 20 | // See https://github.com/solana-labs/solana/issues/11055 21 | // We may be running the custom `rust-bpf-builder` toolchain, 22 | // which currently needs `#![feature(proc_macro_hygiene)]` to 23 | // be applied. 24 | println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE"); 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /docker/Dockerfile.archival-rpc: -------------------------------------------------------------------------------- 1 | FROM rust:1.83-slim-bullseye AS build 2 | 3 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --no-install-recommends \ 4 | apt-utils \ 5 | software-properties-common \ 6 | cmake \ 7 | build-essential \ 8 | libclang-dev \ 9 | libudev-dev \ 10 | libssl-dev \ 11 | && apt-get clean \ 12 | && rm -rf /var/lib/apt/lists/* 13 | 14 | 15 | RUN USER=root cargo new --bin solana 16 | WORKDIR /solana 17 | 18 | COPY . /solana 19 | 20 | RUN cargo build --release 21 | 22 | 23 | FROM rust:1.83-slim-bullseye 24 | 25 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \ 26 | curl \ 27 | && apt-get update \ 28 | && apt-get clean \ 29 | && rm -rf /var/lib/apt/lists/* 30 | 31 | WORKDIR /usr/local/bin 32 | 33 | COPY --from=build /solana/target/release/archival-rpc . 34 | #COPY docker/config/.env.test ./.env 35 | 36 | RUN chmod +x archival-rpc 37 | 38 | ENV RUST_LOG=debug 39 | ENV RUST_BACKTRACE=1 40 | 41 | ENTRYPOINT ["./archival-rpc", "--bind-address=0.0.0.0", "--enable-rpc-hbase-ledger-storage", "--rpc-hbase-address=hbase:9090", "--hdfs-url=hdfs://51.158.61.150:8020/", "--hdfs-path=/chain-archives/sol/car_test"] 42 | -------------------------------------------------------------------------------- /storage-bigtable/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## BigTable Setup 3 | 4 | ### Development Environment 5 | The Cloud BigTable emulator can be used during development/test. See 6 | https://cloud.google.com/bigtable/docs/emulator for general setup information. 7 | 8 | Process: 9 | 1. Run `gcloud beta emulators bigtable start` in the background 10 | 2. Run `$(gcloud beta emulators bigtable env-init)` to establish the `BIGTABLE_EMULATOR_HOST` environment variable 11 | 3. Run `./init-bigtable.sh` to configure the emulator 12 | 4. Develop/test 13 | 14 | ### Production Environment 15 | Export a standard `GOOGLE_APPLICATION_CREDENTIALS` environment variable to your 16 | service account credentials. The project should contain a BigTable instance 17 | called `solana-ledger` that has been initialized by running the `./init-bigtable.sh` script. 18 | 19 | Depending on what operation mode is required, either the 20 | `https://www.googleapis.com/auth/bigtable.data` or 21 | `https://www.googleapis.com/auth/bigtable.data.readonly` OAuth scope will be 22 | requested using the provided credentials. 23 | 24 | #### Forward proxy 25 | Export `BIGTABLE_PROXY` environment variable for the forward proxy as you would 26 | for `HTTP_PROXY`. This will establish a tunnel through the forward proxy for 27 | gRPC traffic (the tunneled traffic will still use TLS as normal). 28 | -------------------------------------------------------------------------------- /storage-adapter/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-storage-adapter" 3 | version = { workspace = true } 4 | description = "Solana Storage Adapter" 5 | repository = "https://github.com/solana-labs/solana" 6 | license = "AGPL-3.0-only" 7 | homepage = "https://solana.com/" 8 | edition = "2021" 9 | 10 | [dependencies] 11 | async-trait = { workspace = true } 12 | log = { workspace = true } 13 | solana-transaction-status = { workspace = true } 14 | thiserror = { workspace = true } 15 | tokio = { workspace = true } 16 | serde = { workspace = true } 17 | serde_derive = { workspace = true } 18 | md5 = { workspace = true } 19 | enum-iterator = "1.2.0" 20 | bzip2 = "0.4.3" 21 | flate2 = "1.0.24" 22 | zstd = "0.11.2" 23 | bincode = "1.3.3" 24 | xxhash-rust = { version = "0.8.12", features = ["xxh3", "xxh32"] } 25 | 26 | solana-clock = { workspace = true } 27 | solana-pubkey = { workspace = true } 28 | solana-signature = { workspace = true } 29 | solana-message = { workspace = true } 30 | solana-serde = { workspace = true } 31 | solana-transaction = { workspace = true } 32 | solana-transaction-error = { workspace = true } 33 | solana-transaction-status-client-types = { workspace = true } 34 | 35 | [lib] 36 | crate-type = ["lib"] 37 | name = "solana_storage_adapter" 38 | 39 | [package.metadata.docs.rs] 40 | targets = ["x86_64-unknown-linux-gnu"] -------------------------------------------------------------------------------- /storage-bigtable/proto/google.rpc.rs: -------------------------------------------------------------------------------- 1 | /// The `Status` type defines a logical error model that is suitable for 2 | /// different programming environments, including REST APIs and RPC APIs. It is 3 | /// used by \[gRPC\](). Each `Status` message contains 4 | /// three pieces of data: error code, error message, and error details. 5 | /// 6 | /// You can find out more about this error model and how to work with it in the 7 | /// [API Design Guide](). 8 | #[allow(clippy::derive_partial_eq_without_eq)] 9 | #[derive(Clone, PartialEq, ::prost::Message)] 10 | pub struct Status { 11 | /// The status code, which should be an enum value of \[google.rpc.Code][google.rpc.Code\]. 12 | #[prost(int32, tag = "1")] 13 | pub code: i32, 14 | /// A developer-facing error message, which should be in English. Any 15 | /// user-facing error message should be localized and sent in the 16 | /// \[google.rpc.Status.details][google.rpc.Status.details\] field, or localized by the client. 17 | #[prost(string, tag = "2")] 18 | pub message: ::prost::alloc::string::String, 19 | /// A list of messages that carry the error details. There is a common set of 20 | /// message types for APIs to use. 21 | #[prost(message, repeated, tag = "3")] 22 | pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, 23 | } 24 | -------------------------------------------------------------------------------- /storage-proto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-storage-proto" 3 | description = "Solana Storage Protobuf Definitions" 4 | documentation = "https://docs.rs/solana-storage-proto" 5 | version = { workspace = true } 6 | authors = { workspace = true } 7 | repository = { workspace = true } 8 | homepage = { workspace = true } 9 | license = { workspace = true } 10 | edition = { workspace = true } 11 | 12 | [dependencies] 13 | bincode = { workspace = true } 14 | prost = { workspace = true } 15 | serde = { workspace = true } 16 | 17 | solana-transaction-status = { workspace = true } 18 | solana-serde = { workspace = true } 19 | solana-transaction-context = { workspace = true } 20 | solana-message = { workspace = true } 21 | solana-transaction = { workspace = true } 22 | solana-transaction-error = { workspace = true } 23 | solana-pubkey = { workspace = true } 24 | solana-signature = { workspace = true } 25 | solana-hash = { workspace = true } 26 | solana-instruction = { workspace = true } 27 | solana-clock = { workspace = true } 28 | solana-account-decoder-client-types = { workspace = true } 29 | solana-reward-info = { workspace = true } 30 | solana-transaction-status-client-types = { workspace = true } 31 | 32 | [dev-dependencies] 33 | enum-iterator = { workspace = true } 34 | bs58 = { workspace = true } 35 | 36 | [lib] 37 | crate-type = ["lib"] 38 | name = "solana_storage_proto" 39 | 40 | [package.metadata.docs.rs] 41 | targets = ["x86_64-unknown-linux-gnu"] 42 | 43 | [build-dependencies] 44 | tonic-build = { workspace = true } 45 | 46 | # windows users should install the protobuf compiler manually and set the PROTOC 47 | # envar to point to the installed binary 48 | [target."cfg(not(windows))".build-dependencies] 49 | protobuf-src = { workspace = true } 50 | -------------------------------------------------------------------------------- /storage-bigtable/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-storage-bigtable" 3 | description = "Solana Storage BigTable" 4 | documentation = "https://docs.rs/solana-storage-bigtable" 5 | version = { workspace = true } 6 | authors = { workspace = true } 7 | repository = { workspace = true } 8 | homepage = { workspace = true } 9 | license = { workspace = true } 10 | edition = { workspace = true } 11 | 12 | [dependencies] 13 | async-trait = { workspace = true } 14 | backoff = { workspace = true, features = ["tokio"] } 15 | bincode = { workspace = true } 16 | bytes = { workspace = true } 17 | bzip2 = { workspace = true } 18 | enum-iterator = { workspace = true } 19 | flate2 = { workspace = true } 20 | futures = { workspace = true } 21 | goauth = { workspace = true } 22 | http = { workspace = true } 23 | hyper = { workspace = true } 24 | hyper-proxy = { workspace = true } 25 | log = { workspace = true } 26 | prost = { workspace = true } 27 | prost-types = { workspace = true } 28 | serde = { workspace = true } 29 | serde_derive = { workspace = true } 30 | smpl_jwt = { workspace = true } 31 | solana-storage-adapter = { workspace = true } 32 | solana-storage-proto = { workspace = true } 33 | solana-transaction-status = { workspace = true } 34 | thiserror = { workspace = true } 35 | tokio = { workspace = true } 36 | tonic = { workspace = true, features = ["tls", "transport"] } 37 | zstd = { workspace = true } 38 | 39 | solana-clock = { workspace = true } 40 | solana-pubkey = { workspace = true } 41 | solana-signature = { workspace = true } 42 | solana-sysvar = { workspace = true } 43 | solana-time-utils = { workspace = true } 44 | solana-transaction-status-client-types = { workspace = true } 45 | agave-reserved-account-keys = { workspace = true } 46 | 47 | # openssl is a dependency of the goauth and smpl_jwt crates, but explicitly 48 | # declare it here as well to activate the "vendored" feature that builds OpenSSL 49 | # statically... 50 | [target."cfg(not(windows))".dependencies] 51 | openssl = { workspace = true, features = ["vendored"] } 52 | # ...except on Windows to avoid having to deal with getting CI past a build-time 53 | # Perl dependency 54 | [target."cfg(windows)".dependencies] 55 | openssl = { workspace = true, features = [] } 56 | 57 | [lib] 58 | crate-type = ["lib"] 59 | name = "solana_storage_bigtable" 60 | 61 | [package.metadata.docs.rs] 62 | targets = ["x86_64-unknown-linux-gnu"] 63 | -------------------------------------------------------------------------------- /storage-hbase/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-storage-hbase" 3 | version = { workspace = true } 4 | description = "Solana Storage HBase" 5 | repository = "https://github.com/solana-labs/solana" 6 | license = "Apache-2.0" 7 | homepage = "https://solana.com/" 8 | edition = "2021" 9 | 10 | [dependencies] 11 | async-trait = { workspace = true } 12 | hbase-thrift = { workspace = true } 13 | thrift = { workspace = true } 14 | 15 | backoff = { workspace = true } 16 | bincode = { workspace = true } 17 | bytes = { workspace = true } 18 | enum-iterator = { workspace = true } 19 | bzip2 = { workspace = true } 20 | flate2 = { workspace = true } 21 | zstd = { workspace = true } 22 | futures = { workspace = true } 23 | log = { workspace = true } 24 | prost = { workspace = true } 25 | serde = { workspace = true } 26 | serde_derive = { workspace = true } 27 | memcache = { workspace = true } 28 | solana-metrics = { workspace = true } 29 | solana-storage-adapter = { workspace = true } 30 | solana-storage-proto = { workspace = true } 31 | solana-transaction-status = { workspace = true } 32 | thiserror = { workspace = true } 33 | chrono = { workspace = true } 34 | dexter-ipfs-car = { workspace = true } 35 | tokio-util = { workspace = true } 36 | async-compression = { workspace = true } 37 | hdfs-native = { workspace = true } 38 | anyhow = { workspace = true } 39 | reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "http2", "gzip", "stream"] } 40 | 41 | solana-clock = { workspace = true } 42 | solana-pubkey = { workspace = true } 43 | solana-signature = { workspace = true } 44 | solana-sysvar = { workspace = true } 45 | solana-message = { workspace = true } 46 | solana-transaction = { workspace = true } 47 | solana-transaction-status-client-types = { workspace = true } 48 | agave-reserved-account-keys = { workspace = true } 49 | 50 | [dependencies.tokio] 51 | features = ["rt-multi-thread", "macros", "io-util", "rt", "net", "time"] 52 | version = "1.43.0" 53 | 54 | # openssl is a dependency of the goauth and smpl_jwt crates, but explicitly 55 | # declare it here as well to activate the "vendored" feature that builds OpenSSL 56 | # statically... 57 | [target."cfg(not(windows))".dependencies] 58 | openssl = { version = "0.10", features = ["vendored"] } 59 | # ...except on Windows to avoid having to deal with getting CI past a build-time 60 | # Perl dependency 61 | [target."cfg(windows)".dependencies] 62 | openssl = { version = "0.10", features = [] } 63 | 64 | [lib] 65 | crate-type = ["lib"] 66 | name = "solana_storage_hbase" 67 | 68 | [package.metadata.docs.rs] 69 | targets = ["x86_64-unknown-linux-gnu"] 70 | -------------------------------------------------------------------------------- /metrics/src/lib.rs: -------------------------------------------------------------------------------- 1 | use { 2 | prometheus::{ 3 | HistogramTimer, 4 | HistogramVec, 5 | IntCounterVec, 6 | IntGauge, 7 | register_histogram_vec, 8 | register_int_counter_vec, 9 | register_int_gauge, 10 | }, 11 | std::{ 12 | sync::{ 13 | Arc 14 | }, 15 | }, 16 | }; 17 | 18 | #[derive(Clone)] 19 | pub struct Metrics { 20 | request_counter: Arc, 21 | tx_request_counter: Arc, 22 | request_duration_histogram: Arc, 23 | pub idle_threads_counter: Arc, 24 | active_threads_counter: Arc, 25 | } 26 | 27 | impl Metrics { 28 | pub fn new() -> Self { 29 | Self { 30 | request_counter: Arc::new(register_int_counter_vec!( 31 | "requests_total", 32 | "Total number of RPC requests", 33 | &["method"] 34 | ).unwrap()), 35 | 36 | tx_request_counter: Arc::new(register_int_counter_vec!( 37 | "tx_requests_total", 38 | "Total number of getTransaction RPC requests", 39 | &["method", "source", "epoch", "type"] 40 | ).unwrap()), 41 | 42 | request_duration_histogram: Arc::new(register_histogram_vec!( 43 | "request_duration_seconds", 44 | "Duration of RPC requests in seconds", 45 | &["method"] 46 | ).unwrap()), 47 | 48 | idle_threads_counter: Arc::new(register_int_gauge!( 49 | "rpc_idle_threads_total", 50 | "Total number of idle threads in the RPC service" 51 | ).unwrap()), 52 | 53 | active_threads_counter: Arc::new(register_int_gauge!( 54 | "rpc_active_threads_total", 55 | "Total number of active threads in the RPC service" 56 | ).unwrap()), 57 | } 58 | } 59 | 60 | pub fn increment_total_requests(&self, method: &str) { 61 | self.request_counter.with_label_values(&[method]).inc(); 62 | } 63 | 64 | pub fn record_transaction(&self, source: &str, epoch: u64, tx_type: &str) { 65 | self.tx_request_counter 66 | .with_label_values(&["getTransaction", source, &epoch.to_string(), tx_type]) 67 | .inc(); 68 | } 69 | 70 | pub fn record_duration(&self, method: &str) -> HistogramTimer { 71 | self.request_duration_histogram.with_label_values(&[method]).start_timer() 72 | } 73 | 74 | pub fn thread_started(&self) { 75 | self.active_threads_counter.inc(); 76 | self.idle_threads_counter.dec(); 77 | } 78 | 79 | pub fn thread_stopped(&self) { 80 | self.active_threads_counter.dec(); 81 | self.idle_threads_counter.inc(); 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /rpc/src/logging.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::integer_arithmetic)] 2 | use { 3 | std::{ 4 | env, 5 | fs::{OpenOptions}, 6 | process::exit, 7 | thread::JoinHandle, 8 | }, 9 | }; 10 | 11 | #[cfg(unix)] 12 | fn redirect_stderr(filename: &str) { 13 | use std::os::unix::io::AsRawFd; 14 | match OpenOptions::new() 15 | .write(true) 16 | .create(true) 17 | .append(true) 18 | .open(filename) 19 | { 20 | Ok(file) => unsafe { 21 | libc::dup2(file.as_raw_fd(), libc::STDERR_FILENO); 22 | }, 23 | Err(err) => eprintln!("Unable to open {filename}: {err}"), 24 | } 25 | } 26 | 27 | // Redirect stderr to a file with support for logrotate by sending a SIGUSR1 to the process. 28 | // 29 | // Upon success, future `log` macros and `eprintln!()` can be found in the specified log file. 30 | pub fn redirect_stderr_to_file(logfile: Option) -> Option> { 31 | // Default to RUST_BACKTRACE=1 for more informative launcher logs 32 | if env::var_os("RUST_BACKTRACE").is_none() { 33 | env::set_var("RUST_BACKTRACE", "1") 34 | } 35 | 36 | let filter = "solana=info"; 37 | match logfile { 38 | None => { 39 | solana_logger::setup_with_default(filter); 40 | None 41 | } 42 | Some(logfile) => { 43 | #[cfg(unix)] 44 | { 45 | use log::info; 46 | let mut signals = 47 | signal_hook::iterator::Signals::new([signal_hook::consts::SIGUSR1]) 48 | .unwrap_or_else(|err| { 49 | eprintln!("Unable to register SIGUSR1 handler: {err:?}"); 50 | exit(1); 51 | }); 52 | 53 | solana_logger::setup_with_default(filter); 54 | redirect_stderr(&logfile); 55 | Some( 56 | std::thread::Builder::new() 57 | .name("solSigUsr1".into()) 58 | .spawn(move || { 59 | for signal in signals.forever() { 60 | info!( 61 | "received SIGUSR1 ({}), reopening log file: {:?}", 62 | signal, logfile 63 | ); 64 | redirect_stderr(&logfile); 65 | } 66 | }) 67 | .unwrap(), 68 | ) 69 | } 70 | #[cfg(not(unix))] 71 | { 72 | println!("logrotate is not supported on this platform"); 73 | solana_logger::setup_file_with_default(&logfile, filter); 74 | None 75 | } 76 | } 77 | } 78 | } 79 | 80 | -------------------------------------------------------------------------------- /rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana-rpc" 3 | description = "Solana RPC" 4 | documentation = "https://docs.rs/solana-rpc" 5 | version = { workspace = true } 6 | authors = { workspace = true } 7 | repository = { workspace = true } 8 | homepage = { workspace = true } 9 | license = { workspace = true } 10 | edition = { workspace = true } 11 | 12 | [dependencies] 13 | crossbeam-channel = { workspace = true } 14 | jsonrpc-core = { workspace = true } 15 | jsonrpc-core-client = { workspace = true } 16 | jsonrpc-derive = { workspace = true } 17 | jsonrpc-http-server = { workspace = true } 18 | jsonrpc-server-utils = { workspace = true } 19 | prometheus = { workspace = true } 20 | lazy_static = { workspace = true } 21 | futures = { workspace = true } 22 | libc = { workspace = true } 23 | log = { workspace = true } 24 | serde = { workspace = true } 25 | serde_derive = { workspace = true } 26 | serde_json = { workspace = true } 27 | 28 | solana-perf = { workspace = true } 29 | solana-rpc-client-api = { workspace = true } 30 | solana-storage-hbase = { workspace = true } 31 | solana-storage-adapter = { workspace = true } 32 | solana-transaction-status = { workspace = true } 33 | solana-version = { workspace = true } 34 | solana-metrics = { workspace = true } 35 | 36 | thiserror = { workspace = true } 37 | tokio = { workspace = true } 38 | tokio-util = { workspace = true, features = ["codec", "compat"] } 39 | 40 | solana-clock = { workspace = true } 41 | solana-validator-exit = { workspace = true } 42 | solana-commitment-config = { workspace = true } 43 | solana-pubkey = { workspace = true } 44 | solana-signature = { workspace = true } 45 | solana-hash = { workspace = true } 46 | solana-transaction-status-client-types = { workspace = true } 47 | 48 | clap = { workspace = true } 49 | console = { workspace = true } 50 | num_cpus = { workspace = true } 51 | #solana-clap-utils = { workspace = true } 52 | solana-logger = { workspace = true } 53 | solana-net-utils = { workspace = true } 54 | solana-storage-bigtable = { workspace = true } 55 | symlink = { workspace = true } 56 | rayon = { workspace = true } 57 | solana-epoch-schedule = "3.0.0" 58 | solana-genesis-config = { version = "3.0.0", features = ["serde"] } 59 | solana-reward-info = "3.0.0" 60 | solana-epoch-rewards-hasher = "3.0.0" 61 | tar = { workspace = true } 62 | rand = { workspace = true } 63 | bzip2 = { workspace = true } 64 | 65 | [dev-dependencies] 66 | matches = { workspace = true } 67 | 68 | [build-dependencies] 69 | rustc_version = { workspace = true } 70 | 71 | [target.'cfg(not(target_env = "msvc"))'.dependencies] 72 | jemallocator = { workspace = true } 73 | 74 | [target."cfg(unix)".dependencies] 75 | libc = { workspace = true } 76 | signal-hook = { workspace = true } 77 | 78 | [lib] 79 | crate-type = ["lib"] 80 | name = "solana_rpc" 81 | 82 | [[bin]] 83 | name = "archival-rpc" 84 | path = "src/main.rs" 85 | 86 | [package.metadata.docs.rs] 87 | targets = ["x86_64-unknown-linux-gnu"] 88 | 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /rpc/src/rpc_network_info.rs: -------------------------------------------------------------------------------- 1 | use { 2 | std::net::{SocketAddr, IpAddr}, 3 | thiserror::Error, 4 | }; 5 | 6 | #[derive(Debug, Error)] 7 | pub enum Error { 8 | #[error("Duplicate IP address: {0}")] 9 | DuplicateIpAddr(IpAddr), 10 | #[error("Duplicate socket: {0}")] 11 | DuplicateSocket(/*key:*/ u8), 12 | #[error("Invalid IP address index: {index}, num addrs: {num_addrs}")] 13 | InvalidIpAddrIndex { index: u8, num_addrs: usize }, 14 | #[error("Invalid port: {0}")] 15 | InvalidPort(/*port:*/ u16), 16 | #[error("Invalid {0:?} (udp) and {1:?} (quic) sockets")] 17 | InvalidQuicSocket(Option, Option), 18 | #[error("IP addresses saturated")] 19 | IpAddrsSaturated, 20 | #[error("Multicast IP address: {0}")] 21 | MulticastIpAddr(IpAddr), 22 | #[error("Port offsets overflow")] 23 | PortOffsetsOverflow, 24 | #[error("Socket not found: {0}")] 25 | SocketNotFound(/*key:*/ u8), 26 | #[error("Unspecified IP address: {0}")] 27 | UnspecifiedIpAddr(IpAddr), 28 | #[error("Unused IP address: {0}")] 29 | UnusedIpAddr(IpAddr), 30 | } 31 | 32 | /// Structure representing a node on the network 33 | #[derive( 34 | Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Deserialize, Serialize, 35 | )] 36 | pub struct RpcNetworkInfo { 37 | /// address to which to send JSON-RPC requests 38 | pub rpc: SocketAddr, 39 | } 40 | 41 | macro_rules! get_socket { 42 | ($name:ident) => { 43 | pub fn $name(&self) -> Result { 44 | let socket = &self.$name; 45 | sanitize_socket(socket)?; 46 | Ok(socket).copied() 47 | } 48 | }; 49 | } 50 | 51 | macro_rules! set_socket { 52 | ($name:ident, $key:ident) => { 53 | pub fn $name(&mut self, socket: T) -> Result<(), Error> 54 | where 55 | SocketAddr: From, 56 | { 57 | let socket = SocketAddr::from(socket); 58 | sanitize_socket(&socket)?; 59 | self.$key = socket; 60 | Ok(()) 61 | } 62 | }; 63 | } 64 | 65 | #[macro_export] 66 | macro_rules! socketaddr { 67 | ($ip:expr, $port:expr) => { 68 | std::net::SocketAddr::from((std::net::Ipv4Addr::from($ip), $port)) 69 | }; 70 | ($str:expr) => {{ 71 | $str.parse::().unwrap() 72 | }}; 73 | } 74 | 75 | #[macro_export] 76 | macro_rules! socketaddr_any { 77 | () => { 78 | socketaddr!(std::net::Ipv4Addr::UNSPECIFIED, 0) 79 | }; 80 | } 81 | 82 | impl Default for RpcNetworkInfo { 83 | fn default() -> Self { 84 | RpcNetworkInfo { 85 | rpc: socketaddr_any!(), 86 | } 87 | } 88 | } 89 | 90 | impl RpcNetworkInfo { 91 | get_socket!(rpc); 92 | set_socket!(set_rpc, rpc); 93 | } 94 | 95 | pub(crate) fn sanitize_socket(socket: &SocketAddr) -> Result<(), Error> { 96 | if socket.port() == 0u16 { 97 | return Err(Error::InvalidPort(socket.port())); 98 | } 99 | let addr = socket.ip(); 100 | if addr.is_unspecified() { 101 | return Err(Error::UnspecifiedIpAddr(addr)); 102 | } 103 | if addr.is_multicast() { 104 | return Err(Error::MulticastIpAddr(addr)); 105 | } 106 | Ok(()) 107 | } 108 | -------------------------------------------------------------------------------- /storage-adapter/src/compression.rs: -------------------------------------------------------------------------------- 1 | use { 2 | enum_iterator::{all, Sequence}, 3 | std::io::{self, BufReader, Read, Write}, 4 | }; 5 | 6 | #[derive(Debug, Serialize, Deserialize, Sequence)] 7 | pub enum CompressionMethod { 8 | NoCompression, 9 | Bzip2, 10 | Gzip, 11 | Zstd, 12 | } 13 | 14 | fn decompress_reader<'a, R: Read + 'a>( 15 | method: CompressionMethod, 16 | stream: R, 17 | ) -> Result, io::Error> { 18 | let buf_reader = BufReader::new(stream); 19 | let decompress_reader: Box = match method { 20 | CompressionMethod::Bzip2 => Box::new(bzip2::bufread::BzDecoder::new(buf_reader)), 21 | CompressionMethod::Gzip => Box::new(flate2::read::GzDecoder::new(buf_reader)), 22 | CompressionMethod::Zstd => Box::new(zstd::stream::read::Decoder::new(buf_reader)?), 23 | CompressionMethod::NoCompression => Box::new(buf_reader), 24 | }; 25 | Ok(decompress_reader) 26 | } 27 | 28 | pub fn decompress(data: &[u8]) -> Result, io::Error> { 29 | let method_size = bincode::serialized_size(&CompressionMethod::NoCompression).unwrap(); 30 | if (data.len() as u64) < method_size { 31 | return Err(io::Error::new( 32 | io::ErrorKind::Other, 33 | format!("data len too small: {}", data.len()), 34 | )); 35 | } 36 | let method = bincode::deserialize(&data[..method_size as usize]).map_err(|err| { 37 | io::Error::new( 38 | io::ErrorKind::Other, 39 | format!("method deserialize failed: {err}"), 40 | ) 41 | })?; 42 | 43 | let mut reader = decompress_reader(method, &data[method_size as usize..])?; 44 | let mut uncompressed_data = vec![]; 45 | reader.read_to_end(&mut uncompressed_data)?; 46 | Ok(uncompressed_data) 47 | } 48 | 49 | pub fn compress(method: CompressionMethod, data: &[u8]) -> Result, io::Error> { 50 | let mut compressed_data = bincode::serialize(&method).unwrap(); 51 | compressed_data.extend( 52 | match method { 53 | CompressionMethod::Bzip2 => { 54 | let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::best()); 55 | e.write_all(data)?; 56 | e.finish()? 57 | } 58 | CompressionMethod::Gzip => { 59 | let mut e = 60 | flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default()); 61 | e.write_all(data)?; 62 | e.finish()? 63 | } 64 | CompressionMethod::Zstd => { 65 | let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); 66 | e.write_all(data)?; 67 | e.finish()? 68 | } 69 | CompressionMethod::NoCompression => data.to_vec(), 70 | } 71 | .into_iter(), 72 | ); 73 | 74 | Ok(compressed_data) 75 | } 76 | 77 | pub fn compress_best(data: &[u8]) -> Result, io::Error> { 78 | let mut candidates = vec![]; 79 | for method in all::() { 80 | candidates.push(compress(method, data)?); 81 | } 82 | 83 | Ok(candidates 84 | .into_iter() 85 | .min_by(|a, b| a.len().cmp(&b.len())) 86 | .unwrap()) 87 | } 88 | 89 | #[cfg(test)] 90 | mod test { 91 | use super::*; 92 | 93 | #[test] 94 | fn test_compress_uncompress() { 95 | let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; 96 | assert_eq!( 97 | decompress(&compress_best(&data).expect("compress_best")).expect("decompress"), 98 | data 99 | ); 100 | } 101 | 102 | #[test] 103 | fn test_compress() { 104 | let data = vec![0; 256]; 105 | assert!(compress_best(&data).expect("compress_best").len() < data.len()); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /rpc/src/rpc_server.rs: -------------------------------------------------------------------------------- 1 | 2 | use { 3 | crate::{ 4 | rpc_network_node::*, 5 | request_processor::JsonRpcConfig, 6 | rpc_service::JsonRpcService, 7 | }, 8 | solana_metrics::Metrics, 9 | solana_validator_exit::{ 10 | Exit, 11 | }, 12 | std::{ 13 | net::SocketAddr, 14 | path::{ 15 | Path, 16 | }, 17 | sync::{ 18 | atomic::{ 19 | AtomicBool, 20 | Ordering 21 | }, 22 | Arc, RwLock, 23 | }, 24 | }, 25 | }; 26 | 27 | pub struct RpcServiceConfig { 28 | pub rpc_config: JsonRpcConfig, 29 | pub rpc_addr: Option, 30 | pub enforce_ulimit_nofile: bool, 31 | pub rpc_service_exit: Arc>, 32 | } 33 | 34 | impl Default for RpcServiceConfig { 35 | fn default() -> Self { 36 | Self { 37 | rpc_config: JsonRpcConfig::default(), 38 | rpc_addr: None, 39 | enforce_ulimit_nofile: true, 40 | 41 | rpc_service_exit: Arc::new(RwLock::new(Exit::default())), 42 | } 43 | } 44 | } 45 | 46 | impl RpcServiceConfig { 47 | pub fn default_for_storage_rpc() -> Self { 48 | Self { 49 | enforce_ulimit_nofile: false, 50 | rpc_config: JsonRpcConfig::default_for_storage_rpc(), 51 | ..Self::default() 52 | } 53 | } 54 | } 55 | 56 | 57 | pub struct RpcService { 58 | rpc_service_exit: Arc>, 59 | json_rpc_service: Option, 60 | } 61 | 62 | impl RpcService { 63 | #[allow(clippy::too_many_arguments)] 64 | pub fn new( 65 | node: RpcNetworkNode, 66 | log_path: &Path, 67 | config: &RpcServiceConfig, 68 | // should_check_duplicate_instance: bool, 69 | ) -> Result { 70 | if rayon::ThreadPoolBuilder::new() 71 | .thread_name(|ix| format!("solRayonGlob{ix:02}")) 72 | .build_global() 73 | .is_err() 74 | { 75 | warn!("Rayon global thread pool already initialized"); 76 | } 77 | 78 | if !log_path.is_dir() { 79 | return Err(format!( 80 | "log directory does not exist or is not accessible: {log_path:?}" 81 | )); 82 | } 83 | 84 | let exit = Arc::new(AtomicBool::new(false)); 85 | { 86 | let exit = exit.clone(); 87 | config 88 | .rpc_service_exit 89 | .write() 90 | .unwrap() 91 | .register_exit(Box::new(move || exit.store(true, Ordering::Relaxed))); 92 | } 93 | 94 | Self::print_node_info(&node); 95 | 96 | let metrics = Arc::new(Metrics::new()); 97 | 98 | let json_rpc_service= if let Some(rpc_addr) = config.rpc_addr { 99 | let json_rpc_service = JsonRpcService::new( 100 | rpc_addr, 101 | config.rpc_config.clone(), 102 | log_path, 103 | config.rpc_service_exit.clone(), 104 | metrics, 105 | )?; 106 | 107 | Some(json_rpc_service) 108 | } else { 109 | None 110 | }; 111 | 112 | // datapoint_info!( 113 | // "launcher-new", 114 | // ("version", solana_version::version!(), String) 115 | // ); 116 | 117 | Ok(Self { 118 | json_rpc_service, 119 | rpc_service_exit: config.rpc_service_exit.clone(), 120 | }) 121 | } 122 | 123 | pub fn exit(&mut self) { 124 | self.rpc_service_exit.write().unwrap().exit(); 125 | } 126 | 127 | pub fn close(mut self) { 128 | self.exit(); 129 | self.join(); 130 | } 131 | 132 | fn print_node_info(node: &RpcNetworkNode) { 133 | info!("{:?}", node.info); 134 | } 135 | 136 | pub fn join(self) { 137 | if let Some(json_rpc_service) = self.json_rpc_service { 138 | json_rpc_service.join().expect("rpc_service"); 139 | } 140 | } 141 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | # "rpc-core", 4 | "rpc", 5 | "storage-adapter", 6 | "storage-hbase", 7 | "storage-bigtable", 8 | "storage-bigtable/build-proto", 9 | "storage-proto", 10 | # "launcher", 11 | "net-utils", 12 | "metrics", 13 | "perf" 14 | ] 15 | 16 | exclude = [] 17 | 18 | # This prevents a Travis CI error when building for Windows. 19 | resolver = "2" 20 | 21 | [workspace.package] 22 | version = "3.0.1" 23 | authors = ["Aurimas Narkevicius "] 24 | repository = "https://github.com/dexterlaboss/archival-rpc" 25 | homepage = "https://dexterlab.com/" 26 | license = "AGPL-3.0-only" 27 | edition = "2021" 28 | 29 | [workspace.dependencies] 30 | async-trait = "0.1.68" 31 | backoff = { version = "0.4.0", features = ["tokio"] } 32 | base64 = "0.21.2" 33 | bincode = "1.3.3" 34 | bs58 = "0.4.0" 35 | bytes = "1.2" 36 | bzip2 = "0.4.4" 37 | clap = "2.33.1" 38 | console = "0.15.7" 39 | crossbeam-channel = "0.5.8" 40 | enum-iterator = "1.4.1" 41 | fd-lock = "3.0.12" 42 | flate2 = "1.0.26" 43 | futures = "0.3.28" 44 | goauth = "0.13.1" 45 | http = "0.2.9" 46 | hyper = "0.14.26" 47 | hyper-proxy = "0.9.1" 48 | Inflector = "0.11.4" 49 | jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } 50 | jsonrpc-core = "18.0.0" 51 | jsonrpc-core-client = "18.0.0" 52 | jsonrpc-derive = "18.0.0" 53 | jsonrpc-http-server = "18.0.0" 54 | jsonrpc-server-utils = "18.0" 55 | prometheus = "0.13.4" 56 | lazy_static = "1.4.0" 57 | libc = "0.2.144" 58 | log = "0.4.17" 59 | matches = "0.1.10" 60 | num_cpus = "1.15.0" 61 | openssl = "0.10" 62 | prost = "0.11.9" 63 | prost-types = "0.11.9" 64 | protobuf-src = "1.1.0" 65 | rayon = "1.7.0" 66 | rustc_version = "0.4" 67 | serde = "1.0.163" 68 | serde_bytes = "0.11.9" 69 | serde_derive = "1.0.103" 70 | serde_json = "1.0.96" 71 | serde_with = { version = "2.3.3", default-features = false } 72 | signal-hook = "0.3.15" 73 | smpl_jwt = "0.7.1" 74 | md5 = "0.7" 75 | memcache = "0.17.2" 76 | tar = "0.4.43" 77 | rand = "0.8.5" 78 | 79 | solana-logger = "=3.0.0" 80 | solana-rpc-client-api = "=3.0.10" 81 | solana-transaction-status = "=3.0.10" 82 | solana-version = "=3.0.10" 83 | 84 | solana-clock = "=3.0.0" 85 | solana-validator-exit = "=3.0.0" 86 | solana-commitment-config = "=3.0.0" 87 | solana-pubkey = "=3.0.0" 88 | solana-signature = "=3.1.0" 89 | solana-message = "=3.0.1" 90 | solana-serde = "=3.0.0" 91 | solana-transaction = "=3.0.1" 92 | solana-transaction-error = "=3.0.0" 93 | solana-transaction-context = { version = "=3.0.10", features = ["serde"] } 94 | solana-sysvar = "=3.0.0" 95 | solana-time-utils = "=3.0.0" 96 | solana-hash = "=3.0.0" 97 | solana-instruction = "=3.0.0" 98 | solana-reward-info = "=3.0.0" 99 | agave-reserved-account-keys = "=3.0.10" 100 | 101 | solana-account-decoder-client-types = "=3.0.10" 102 | solana-transaction-status-client-types = "=3.0.10" 103 | 104 | 105 | solana-metrics = { path = "metrics", version = "=3.0.1" } 106 | solana-net-utils = { path = "net-utils", version = "=3.0.1" } 107 | solana-perf = { path = "perf", version = "=3.0.1" } 108 | solana-rpc = { path = "rpc", version = "=3.0.1" } 109 | solana-storage-adapter = { path = "storage-adapter", version = "=3.0.1" } 110 | solana-storage-bigtable = { path = "storage-bigtable", version = "=3.0.1" } 111 | solana-storage-hbase = { path = "storage-hbase", version = "=3.0.1" } 112 | solana-storage-proto = { path = "storage-proto", version = "=3.0.1" } 113 | 114 | spl-memo = "=3.0.1" 115 | symlink = "0.1.0" 116 | sys-info = "0.9.1" 117 | thiserror = "1.0.40" 118 | tokio = { version = "1.43.0", features = ["full"] } 119 | tokio-serde = "0.8" 120 | tokio-util = { version = "0.7", features = ["io"] } 121 | tonic = "0.8.3" 122 | tonic-build = "0.8.4" 123 | zstd = "0.11.2" 124 | url = "2.3.1" 125 | nix = "0.29.0" 126 | caps = "0.5.5" 127 | chrono = "0.4.38" 128 | async-compression = { version = "0.4.18", features = ["tokio", "gzip"] } 129 | hdfs-native = "0.11" 130 | anyhow = "1.0" 131 | hbase-thrift = "1.1.0" 132 | thrift = "0.17.0" 133 | 134 | dexter-ipfs-car = "0.3.0" 135 | 136 | [dependency-overrides] 137 | solana-sha256-hasher = "=3.0.0" 138 | 139 | -------------------------------------------------------------------------------- /storage-proto/proto/confirmed_block.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package solana.storage.ConfirmedBlock; 4 | 5 | message ConfirmedBlock { 6 | string previous_blockhash = 1; 7 | string blockhash = 2; 8 | uint64 parent_slot = 3; 9 | repeated ConfirmedTransaction transactions = 4; 10 | repeated Reward rewards = 5; 11 | UnixTimestamp block_time = 6; 12 | BlockHeight block_height = 7; 13 | NumPartitions num_partitions = 8; 14 | } 15 | 16 | message ConfirmedTransaction { 17 | Transaction transaction = 1; 18 | TransactionStatusMeta meta = 2; 19 | } 20 | 21 | message ConfirmedTransactionWithStatusMeta { 22 | uint64 slot = 1; 23 | ConfirmedTransaction tx_with_meta = 2; 24 | UnixTimestamp block_time = 3; 25 | } 26 | 27 | message Transaction { 28 | repeated bytes signatures = 1; 29 | Message message = 2; 30 | } 31 | 32 | message Message { 33 | MessageHeader header = 1; 34 | repeated bytes account_keys = 2; 35 | bytes recent_blockhash = 3; 36 | repeated CompiledInstruction instructions = 4; 37 | bool versioned = 5; 38 | repeated MessageAddressTableLookup address_table_lookups = 6; 39 | } 40 | 41 | message MessageHeader { 42 | uint32 num_required_signatures = 1; 43 | uint32 num_readonly_signed_accounts = 2; 44 | uint32 num_readonly_unsigned_accounts = 3; 45 | } 46 | 47 | message MessageAddressTableLookup { 48 | bytes account_key = 1; 49 | bytes writable_indexes = 2; 50 | bytes readonly_indexes = 3; 51 | } 52 | 53 | message TransactionStatusMeta { 54 | TransactionError err = 1; 55 | uint64 fee = 2; 56 | repeated uint64 pre_balances = 3; 57 | repeated uint64 post_balances = 4; 58 | repeated InnerInstructions inner_instructions = 5; 59 | bool inner_instructions_none = 10; 60 | repeated string log_messages = 6; 61 | bool log_messages_none = 11; 62 | repeated TokenBalance pre_token_balances = 7; 63 | repeated TokenBalance post_token_balances = 8; 64 | repeated Reward rewards = 9; 65 | repeated bytes loaded_writable_addresses = 12; 66 | repeated bytes loaded_readonly_addresses = 13; 67 | ReturnData return_data = 14; 68 | bool return_data_none = 15; 69 | 70 | // Sum of compute units consumed by all instructions. 71 | // Available since Solana v1.10.35 / v1.11.6. 72 | // Set to `None` for txs executed on earlier versions. 73 | optional uint64 compute_units_consumed = 16; 74 | 75 | // Total transaction cost 76 | optional uint64 cost_units = 17; 77 | } 78 | 79 | message TransactionError { 80 | bytes err = 1; 81 | } 82 | 83 | message InnerInstructions { 84 | uint32 index = 1; 85 | repeated InnerInstruction instructions = 2; 86 | } 87 | 88 | message InnerInstruction { 89 | uint32 program_id_index = 1; 90 | bytes accounts = 2; 91 | bytes data = 3; 92 | 93 | // Invocation stack height of an inner instruction. 94 | // Available since Solana v1.14.6 95 | // Set to `None` for txs executed on earlier versions. 96 | optional uint32 stack_height = 4; 97 | } 98 | 99 | message CompiledInstruction { 100 | uint32 program_id_index = 1; 101 | bytes accounts = 2; 102 | bytes data = 3; 103 | } 104 | 105 | message TokenBalance { 106 | uint32 account_index = 1; 107 | string mint = 2; 108 | UiTokenAmount ui_token_amount = 3; 109 | string owner = 4; 110 | string program_id = 5; 111 | } 112 | 113 | message UiTokenAmount { 114 | double ui_amount = 1; 115 | uint32 decimals = 2; 116 | string amount = 3; 117 | string ui_amount_string = 4; 118 | } 119 | 120 | message ReturnData { 121 | bytes program_id = 1; 122 | bytes data = 2; 123 | } 124 | 125 | enum RewardType { 126 | Unspecified = 0; 127 | Fee = 1; 128 | Rent = 2; 129 | Staking = 3; 130 | Voting = 4; 131 | } 132 | 133 | message Reward { 134 | string pubkey = 1; 135 | int64 lamports = 2; 136 | uint64 post_balance = 3; 137 | RewardType reward_type = 4; 138 | string commission = 5; 139 | } 140 | 141 | message Rewards { 142 | repeated Reward rewards = 1; 143 | } 144 | 145 | message UnixTimestamp { 146 | int64 timestamp = 1; 147 | } 148 | 149 | message BlockHeight { 150 | uint64 block_height = 1; 151 | } 152 | 153 | message NumPartitions { 154 | uint64 num_partitions = 1; 155 | } -------------------------------------------------------------------------------- /rpc/src/deprecated.rs: -------------------------------------------------------------------------------- 1 | #![allow(deprecated)] 2 | use { 3 | solana_rpc_client_api::{ 4 | config::{ 5 | EncodingConfig, RpcBlockConfig, RpcEncodingConfigWrapper, RpcTransactionConfig, 6 | } 7 | }, 8 | solana_clock::{ 9 | Slot, 10 | }, 11 | solana_commitment_config::{ 12 | CommitmentConfig, 13 | }, 14 | solana_transaction_status_client_types::{ 15 | TransactionDetails, 16 | UiTransactionEncoding, 17 | }, 18 | }; 19 | 20 | #[deprecated( 21 | since = "1.7.0", 22 | note = "Please use RpcSignaturesForAddressConfig instead" 23 | )] 24 | #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] 25 | #[serde(rename_all = "camelCase")] 26 | pub struct RpcGetConfirmedSignaturesForAddress2Config { 27 | pub before: Option, // Signature as base-58 string 28 | pub until: Option, // Signature as base-58 string 29 | pub limit: Option, 30 | #[serde(flatten)] 31 | pub commitment: Option, 32 | } 33 | 34 | #[deprecated(since = "1.7.0", note = "Please use RpcBlockConfig instead")] 35 | #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] 36 | #[serde(rename_all = "camelCase")] 37 | pub struct RpcConfirmedBlockConfig { 38 | pub encoding: Option, 39 | pub transaction_details: Option, 40 | pub rewards: Option, 41 | #[serde(flatten)] 42 | pub commitment: Option, 43 | } 44 | 45 | impl EncodingConfig for RpcConfirmedBlockConfig { 46 | fn new_with_encoding(encoding: &Option) -> Self { 47 | Self { 48 | encoding: *encoding, 49 | ..Self::default() 50 | } 51 | } 52 | } 53 | 54 | impl RpcConfirmedBlockConfig { 55 | pub fn rewards_only() -> Self { 56 | Self { 57 | transaction_details: Some(TransactionDetails::None), 58 | ..Self::default() 59 | } 60 | } 61 | 62 | pub fn rewards_with_commitment(commitment: Option) -> Self { 63 | Self { 64 | transaction_details: Some(TransactionDetails::None), 65 | commitment, 66 | ..Self::default() 67 | } 68 | } 69 | } 70 | 71 | impl From for RpcEncodingConfigWrapper { 72 | fn from(config: RpcConfirmedBlockConfig) -> Self { 73 | RpcEncodingConfigWrapper::Current(Some(config)) 74 | } 75 | } 76 | 77 | impl From for RpcBlockConfig { 78 | fn from(config: RpcConfirmedBlockConfig) -> Self { 79 | Self { 80 | encoding: config.encoding, 81 | transaction_details: config.transaction_details, 82 | rewards: config.rewards, 83 | commitment: config.commitment, 84 | max_supported_transaction_version: None, 85 | } 86 | } 87 | } 88 | 89 | #[deprecated(since = "1.7.0", note = "Please use RpcTransactionConfig instead")] 90 | #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] 91 | #[serde(rename_all = "camelCase")] 92 | pub struct RpcConfirmedTransactionConfig { 93 | pub encoding: Option, 94 | #[serde(flatten)] 95 | pub commitment: Option, 96 | } 97 | 98 | impl EncodingConfig for RpcConfirmedTransactionConfig { 99 | fn new_with_encoding(encoding: &Option) -> Self { 100 | Self { 101 | encoding: *encoding, 102 | ..Self::default() 103 | } 104 | } 105 | } 106 | 107 | impl From for RpcTransactionConfig { 108 | fn from(config: RpcConfirmedTransactionConfig) -> Self { 109 | Self { 110 | encoding: config.encoding, 111 | commitment: config.commitment, 112 | max_supported_transaction_version: None, 113 | } 114 | } 115 | } 116 | 117 | #[deprecated(since = "1.7.0", note = "Please use RpcBlocksConfigWrapper instead")] 118 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 119 | #[serde(untagged)] 120 | pub enum RpcConfirmedBlocksConfigWrapper { 121 | EndSlotOnly(Option), 122 | CommitmentOnly(Option), 123 | } 124 | 125 | impl RpcConfirmedBlocksConfigWrapper { 126 | pub fn unzip(&self) -> (Option, Option) { 127 | match &self { 128 | RpcConfirmedBlocksConfigWrapper::EndSlotOnly(end_slot) => (*end_slot, None), 129 | RpcConfirmedBlocksConfigWrapper::CommitmentOnly(commitment) => (None, *commitment), 130 | } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /storage-proto/proto/transaction_by_addr.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package solana.storage.TransactionByAddr; 4 | 5 | message TransactionByAddr { 6 | repeated TransactionByAddrInfo tx_by_addrs = 1; 7 | } 8 | 9 | message TransactionByAddrInfo { 10 | bytes signature = 1; 11 | TransactionError err = 2; 12 | uint32 index = 3; 13 | Memo memo = 4; 14 | UnixTimestamp block_time = 5; 15 | } 16 | 17 | message Memo { 18 | string memo = 1; 19 | } 20 | 21 | message TransactionError { 22 | TransactionErrorType transaction_error = 1; 23 | InstructionError instruction_error = 2; 24 | TransactionDetails transaction_details = 3; 25 | } 26 | 27 | enum TransactionErrorType { 28 | ACCOUNT_IN_USE = 0; 29 | ACCOUNT_LOADED_TWICE = 1; 30 | ACCOUNT_NOT_FOUND = 2; 31 | PROGRAM_ACCOUNT_NOT_FOUND = 3; 32 | INSUFFICIENT_FUNDS_FOR_FEE = 4; 33 | INVALID_ACCOUNT_FOR_FEE = 5; 34 | ALREADY_PROCESSED = 6; 35 | BLOCKHASH_NOT_FOUND = 7; 36 | INSTRUCTION_ERROR = 8; 37 | CALL_CHAIN_TOO_DEEP = 9; 38 | MISSING_SIGNATURE_FOR_FEE = 10; 39 | INVALID_ACCOUNT_INDEX = 11; 40 | SIGNATURE_FAILURE = 12; 41 | INVALID_PROGRAM_FOR_EXECUTION = 13; 42 | SANITIZE_FAILURE = 14; 43 | CLUSTER_MAINTENANCE = 15; 44 | ACCOUNT_BORROW_OUTSTANDING_TX = 16; 45 | WOULD_EXCEED_MAX_BLOCK_COST_LIMIT = 17; 46 | UNSUPPORTED_VERSION = 18; 47 | INVALID_WRITABLE_ACCOUNT = 19; 48 | WOULD_EXCEED_MAX_ACCOUNT_COST_LIMIT = 20; 49 | WOULD_EXCEED_ACCOUNT_DATA_BLOCK_LIMIT = 21; 50 | TOO_MANY_ACCOUNT_LOCKS = 22; 51 | ADDRESS_LOOKUP_TABLE_NOT_FOUND = 23; 52 | INVALID_ADDRESS_LOOKUP_TABLE_OWNER = 24; 53 | INVALID_ADDRESS_LOOKUP_TABLE_DATA = 25; 54 | INVALID_ADDRESS_LOOKUP_TABLE_INDEX = 26; 55 | INVALID_RENT_PAYING_ACCOUNT = 27; 56 | WOULD_EXCEED_MAX_VOTE_COST_LIMIT = 28; 57 | WOULD_EXCEED_ACCOUNT_DATA_TOTAL_LIMIT = 29; 58 | DUPLICATE_INSTRUCTION = 30; 59 | INSUFFICIENT_FUNDS_FOR_RENT = 31; 60 | MAX_LOADED_ACCOUNTS_DATA_SIZE_EXCEEDED = 32; 61 | INVALID_LOADED_ACCOUNTS_DATA_SIZE_LIMIT = 33; 62 | RESANITIZATION_NEEDED = 34; 63 | PROGRAM_EXECUTION_TEMPORARILY_RESTRICTED = 35; 64 | UNBALANCED_TRANSACTION = 36; 65 | PROGRAM_CACHE_HIT_MAX_LIMIT = 37; 66 | COMMIT_CANCELLED = 38; 67 | } 68 | 69 | message InstructionError { 70 | uint32 index = 1; 71 | InstructionErrorType error = 2; 72 | CustomError custom = 3; 73 | } 74 | 75 | message TransactionDetails { 76 | uint32 index = 1; 77 | } 78 | 79 | enum InstructionErrorType { 80 | GENERIC_ERROR = 0; 81 | INVALID_ARGUMENT = 1; 82 | INVALID_INSTRUCTION_DATA = 2; 83 | INVALID_ACCOUNT_DATA = 3; 84 | ACCOUNT_DATA_TOO_SMALL = 4; 85 | INSUFFICIENT_FUNDS = 5; 86 | INCORRECT_PROGRAM_ID = 6; 87 | MISSING_REQUIRED_SIGNATURE = 7; 88 | ACCOUNT_ALREADY_INITIALIZED = 8; 89 | UNINITIALIZED_ACCOUNT = 9; 90 | UNBALANCED_INSTRUCTION = 10; 91 | MODIFIED_PROGRAM_ID = 11; 92 | EXTERNAL_ACCOUNT_LAMPORT_SPEND = 12; 93 | EXTERNAL_ACCOUNT_DATA_MODIFIED = 13; 94 | READONLY_LAMPORT_CHANGE = 14; 95 | READONLY_DATA_MODIFIED = 15; 96 | DUPLICATE_ACCOUNT_INDEX = 16; 97 | EXECUTABLE_MODIFIED = 17; 98 | RENT_EPOCH_MODIFIED = 18; 99 | NOT_ENOUGH_ACCOUNT_KEYS = 19; 100 | ACCOUNT_DATA_SIZE_CHANGED = 20; 101 | ACCOUNT_NOT_EXECUTABLE = 21; 102 | ACCOUNT_BORROW_FAILED = 22; 103 | ACCOUNT_BORROW_OUTSTANDING = 23; 104 | DUPLICATE_ACCOUNT_OUT_OF_SYNC = 24; 105 | CUSTOM = 25; 106 | INVALID_ERROR = 26; 107 | EXECUTABLE_DATA_MODIFIED = 27; 108 | EXECUTABLE_LAMPORT_CHANGE = 28; 109 | EXECUTABLE_ACCOUNT_NOT_RENT_EXEMPT = 29; 110 | UNSUPPORTED_PROGRAM_ID = 30; 111 | CALL_DEPTH = 31; 112 | MISSING_ACCOUNT = 32; 113 | REENTRANCY_NOT_ALLOWED = 33; 114 | MAX_SEED_LENGTH_EXCEEDED = 34; 115 | INVALID_SEEDS = 35; 116 | INVALID_REALLOC = 36; 117 | COMPUTATIONAL_BUDGET_EXCEEDED = 37; 118 | PRIVILEGE_ESCALATION = 38; 119 | PROGRAM_ENVIRONMENT_SETUP_FAILURE = 39; 120 | PROGRAM_FAILED_TO_COMPLETE = 40; 121 | PROGRAM_FAILED_TO_COMPILE = 41; 122 | IMMUTABLE = 42; 123 | INCORRECT_AUTHORITY = 43; 124 | BORSH_IO_ERROR = 44; 125 | ACCOUNT_NOT_RENT_EXEMPT = 45; 126 | INVALID_ACCOUNT_OWNER = 46; 127 | ARITHMETIC_OVERFLOW = 47; 128 | UNSUPPORTED_SYSVAR = 48; 129 | ILLEGAL_OWNER = 49; 130 | MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED = 50; 131 | MAX_ACCOUNTS_EXCEEDED = 51; 132 | MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED = 52; 133 | BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS = 53; 134 | } 135 | 136 | message UnixTimestamp { 137 | int64 timestamp = 1; 138 | } 139 | 140 | message CustomError { 141 | uint32 custom = 1; 142 | } 143 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Solana Archival RPC Service 2 | 3 | Fast and resource-efficient Solana RPC service that does not require as many resources as a full Solana validator node. Focusing on data retrieval endpoints and currently supporting all archival-related RPC calls. 4 | 5 | Archival RPC also introduces alternative storage with a self-hosted HBase database, which is 10x+ cheaper than BigTable and you can run it on-prem to achieve sub 10ms RPC calls to methods like getSignaturesForAddress. It reuses Solana Labs Rust validator code without any data structure changes, which means that you can just import BigTable backup to HBase and you are ready to go. 6 | 7 | ## Overview 8 | 9 | The Archival RPC is structured into two primary components for enhanced scalability and separation of concerns: 10 | 11 | - **RPC Server**: This is the repository you're currently viewing. It serves as the backbone for communication. 12 | - **Ingestor Module**: Located at [ingestor-kafka](https://github.com/dexterlaboss/ingestor-kafka) or [ingestor-kafka-hbase](https://github.com/dexterlaboss/ingestor-kafka-hbase), this component is dedicated to data ingestion.
 13 | By segregating data ingestion from the serving layer, we provide a scalable architecture that allows each component to scale independently based on demand. The ingestor module is equipped to pull full, unparsed blocks directly from a Kafka topic, with ongoing efforts to integrate GRPC support for enhanced data interchange. 14 | 15 | 16 | ![How we run it!](https://dexterlab.com/content/images/2024/02/Screenshot-2024-02-28-at-11.12.42-2.png "How we run it") 17 | 18 | 19 | ## Quick Setup Overview 20 | 21 | In the near future, we're introducing several resources to simplify the setup and maintenance process, including prebuild releases, Docker images, and Helm charts (with HBase support). Below is a brief guide for those eager to get started. Detailed documentation will follow. 22 | 23 | **Starting with Archival RPC** 24 | 25 | - **Clone and compile Archival RPC**: This is your first step towards setting up. 26 | - **Connecting to BigTable**: If you already have a BigTable instance, you can simply connect to it to offload expensive validator resources. 27 | 28 | **Full On-Prem Setup** 29 | 30 | For a comprehensive on-premise setup, additional steps are required: 31 | - **HBase and Kafka**: Ensure you have an operational HBase cluster and a Kafka instance. 32 | - **Writing to Kafka**: Utilize Greyser plugins or scripts to push raw blocks to your Kafka topic. 33 | - **Data Ingestion**: Clone and compile the dexterlaboss/ingestor-kafka. 34 | - **Configuration**: Provide the storage-ingestor with HBase credentials and specify the Kafka topic to stream full blocks.
 35 | Stay tuned for more detailed guides on each step of the process. 36 | 37 | ## Startup args 38 | 39 | | Environment Variable | Purpose | Default Value | 40 | |---------------------------------|---------------------------------|------------------------------------------------| 41 | | `--bind-address` | Address for the RPC node | 0.0.0.0 | 42 | | `--rpc-port` | Port for the RPC service | 8900 | 43 | | `--quiet` | Quiet mode: suppress normal output | false | 44 | | `--log` | Log mode: stream the launcher log | | 45 | | `--log-path` | Log file location | | 46 | | `--log-messages-bytes-limit` | Maximum number of bytes written to the program log before truncation | | 47 | | `--enable-rpc-hbase-ledger-storage` | Fetch historical transaction info from a HBase instance | | 48 | | `--rpc-hbase-address` | Address of HBase Thrift instance to use | | 49 | | `--enable-rpc-bigtable-ledger-storage` | Fetch historical transaction info from a BigTable instance | | 50 | | `--rpc-bigtable-instance-name` | Name of the Bigtable instance to use | | 51 | | `--rpc-bigtable-app-profile-id` | Bigtable application profile id to use for requests | | 52 | | `--rpc-bigtable-timeout` | Number of seconds before timing out BigTable requests | | 53 | 54 | ## Compatibility 55 | 56 | | RPC Method | 57 | |---------------------------------| 58 | | `getSignaturesForAddress` | 59 | | `getTransaction` | 60 | | `getBlock` | 61 | | `getBlockTime` | 62 | | `getBlocks` | 63 | | `getBlocksWithLimit` | 64 | 65 | -------------------------------------------------------------------------------- /storage-bigtable/src/access_token.rs: -------------------------------------------------------------------------------- 1 | pub use goauth::scopes::Scope; 2 | /// A module for managing a Google API access token 3 | use { 4 | crate::CredentialType, 5 | goauth::{ 6 | auth::{JwtClaims, Token}, 7 | credentials::Credentials, 8 | }, 9 | log::*, 10 | smpl_jwt::Jwt, 11 | std::{ 12 | str::FromStr, 13 | sync::{ 14 | atomic::{AtomicBool, Ordering}, 15 | {Arc, RwLock}, 16 | }, 17 | time::Instant, 18 | }, 19 | tokio::time, 20 | }; 21 | 22 | fn load_credentials(filepath: Option) -> Result { 23 | let path = match filepath { 24 | Some(f) => f, 25 | None => std::env::var("GOOGLE_APPLICATION_CREDENTIALS").map_err(|_| { 26 | "GOOGLE_APPLICATION_CREDENTIALS environment variable not found".to_string() 27 | })?, 28 | }; 29 | Credentials::from_file(&path) 30 | .map_err(|err| format!("Failed to read GCP credentials from {path}: {err}")) 31 | } 32 | 33 | fn load_stringified_credentials(credential: String) -> Result { 34 | Credentials::from_str(&credential).map_err(|err| format!("{err}")) 35 | } 36 | 37 | #[derive(Clone)] 38 | pub struct AccessToken { 39 | credentials: Credentials, 40 | scope: Scope, 41 | refresh_active: Arc, 42 | token: Arc>, 43 | } 44 | 45 | impl AccessToken { 46 | pub async fn new(scope: Scope, credential_type: CredentialType) -> Result { 47 | let credentials = match credential_type { 48 | CredentialType::Filepath(fp) => load_credentials(fp)?, 49 | CredentialType::Stringified(s) => load_stringified_credentials(s)?, 50 | }; 51 | 52 | if let Err(err) = credentials.rsa_key() { 53 | Err(format!("Invalid rsa key: {err}")) 54 | } else { 55 | let token = Arc::new(RwLock::new(Self::get_token(&credentials, &scope).await?)); 56 | let access_token = Self { 57 | credentials, 58 | scope, 59 | token, 60 | refresh_active: Arc::new(AtomicBool::new(false)), 61 | }; 62 | Ok(access_token) 63 | } 64 | } 65 | 66 | /// The project that this token grants access to 67 | pub fn project(&self) -> String { 68 | self.credentials.project() 69 | } 70 | 71 | async fn get_token( 72 | credentials: &Credentials, 73 | scope: &Scope, 74 | ) -> Result<(Token, Instant), String> { 75 | info!("Requesting token for {:?} scope", scope); 76 | let claims = JwtClaims::new( 77 | credentials.iss(), 78 | scope, 79 | credentials.token_uri(), 80 | None, 81 | None, 82 | ); 83 | let jwt = Jwt::new(claims, credentials.rsa_key().unwrap(), None); 84 | 85 | let token = goauth::get_token(&jwt, credentials) 86 | .await 87 | .map_err(|err| format!("Failed to refresh access token: {err}"))?; 88 | 89 | info!("Token expires in {} seconds", token.expires_in()); 90 | Ok((token, Instant::now())) 91 | } 92 | 93 | /// Call this function regularly to ensure the access token does not expire 94 | pub async fn refresh(&self) { 95 | // Check if it's time to try a token refresh 96 | { 97 | let token_r = self.token.read().unwrap(); 98 | if token_r.1.elapsed().as_secs() < token_r.0.expires_in() as u64 / 2 { 99 | return; 100 | } 101 | 102 | #[allow(deprecated)] 103 | if self 104 | .refresh_active 105 | .compare_and_swap(false, true, Ordering::Relaxed) 106 | { 107 | // Refresh already pending 108 | return; 109 | } 110 | } 111 | 112 | info!("Refreshing token"); 113 | match time::timeout( 114 | time::Duration::from_secs(5), 115 | Self::get_token(&self.credentials, &self.scope), 116 | ) 117 | .await 118 | { 119 | Ok(new_token) => match (new_token, self.token.write()) { 120 | (Ok(new_token), Ok(mut token_w)) => *token_w = new_token, 121 | (Ok(_new_token), Err(err)) => warn!("{}", err), 122 | (Err(err), _) => warn!("{}", err), 123 | }, 124 | Err(_) => { 125 | warn!("Token refresh timeout") 126 | } 127 | } 128 | self.refresh_active.store(false, Ordering::Relaxed); 129 | } 130 | 131 | /// Return an access token suitable for use in an HTTP authorization header 132 | pub fn get(&self) -> String { 133 | let token_r = self.token.read().unwrap(); 134 | format!("{} {}", token_r.0.token_type(), token_r.0.access_token()) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # docker/docker-compose.yml 2 | 3 | #version: '3.8' 4 | 5 | services: 6 | hbase: 7 | build: 8 | context: . 9 | dockerfile: Dockerfile.hbase 10 | container_name: hbase 11 | ports: 12 | - "16000:16000" # HBase Master port 13 | - "16010:16010" # HBase REST API 14 | - "16020:16020" # Region server 15 | - "16030:16030" # HBase Admin UI 16 | - "9090:9090" # Thrift port 17 | - "2181:2181" # Embedded Zookeeper port 18 | # volumes: 19 | # - hbase-data:/data/hbase 20 | # - hbase-zookeeper-data:/data/zookeeper 21 | # environment: 22 | # - HBASE_CONF_DIR=/opt/hbase/conf 23 | # - EXTRA_OPTS="-Dhbase.zookeeper.quorum=localhost" 24 | # - HBASE_MANAGES_ZK=false 25 | networks: 26 | - app-network 27 | healthcheck: 28 | test: ["CMD", "nc", "-z", "localhost", "16010"] 29 | interval: 10s 30 | timeout: 10s 31 | retries: 5 32 | 33 | kafka-zookeeper: 34 | image: 'confluentinc/cp-zookeeper:7.3.1' 35 | container_name: kafka-zookeeper 36 | environment: 37 | ZOOKEEPER_CLIENT_PORT: 2181 38 | ZOOKEEPER_TICK_TIME: 2000 39 | ports: 40 | - '2182:2181' 41 | networks: 42 | - app-network 43 | healthcheck: 44 | test: ["CMD", "echo", "ruok", "|", "nc", "localhost", "2181", "|", "grep", "imok"] 45 | interval: 10s 46 | timeout: 5s 47 | retries: 5 48 | 49 | kafka: 50 | image: 'confluentinc/cp-kafka:7.3.1' 51 | container_name: kafka 52 | depends_on: 53 | - kafka-zookeeper 54 | ports: 55 | - '9092:9092' 56 | environment: 57 | KAFKA_BROKER_ID: 1 58 | KAFKA_ZOOKEEPER_CONNECT: 'kafka-zookeeper:2181' 59 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092 60 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 61 | KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' 62 | networks: 63 | - app-network 64 | healthcheck: 65 | test: ["CMD", "/usr/bin/kafka-topics", "--bootstrap-server", "localhost:9092", "--list"] 66 | interval: 10s 67 | timeout: 5s 68 | retries: 5 69 | 70 | kafka-init: 71 | image: 'confluentinc/cp-kafka:7.3.1' 72 | container_name: kafka-init 73 | depends_on: 74 | - kafka 75 | entrypoint: 76 | - /bin/bash 77 | - -c 78 | - > 79 | echo "Checking and creating Kafka topics."; 80 | if /usr/bin/kafka-topics --list --bootstrap-server kafka:9092 | grep -q "^sol.blocks$"; then 81 | echo "Topic 'sol.blocks' already exists."; 82 | else 83 | /usr/bin/kafka-topics --create --topic sol.blocks --bootstrap-server kafka:9092 --partitions 1 --replication-factor 1 || { 84 | echo "Failed to create topic 'sol.blocks'!"; 85 | exit 1; 86 | } 87 | echo "Topic 'sol.blocks' created successfully."; 88 | fi; 89 | echo "Kafka topic initialization completed." 90 | networks: 91 | - app-network 92 | restart: "no" 93 | 94 | # Setup Service 95 | # setup: 96 | # build: 97 | # context: . 98 | # dockerfile: Dockerfile.setup 99 | # container_name: setup 100 | # depends_on: 101 | # kafka: 102 | # condition: service_healthy 103 | # networks: 104 | # - app-network 105 | # environment: 106 | # - HBASE_CONF_DIR=/scripts/conf 107 | # entrypoint: ["/scripts/setup.sh"] 108 | # healthcheck: 109 | # test: ["CMD", "hbase", "shell", "-e", "status"] 110 | # interval: 30s 111 | # timeout: 10s 112 | # retries: 5 113 | 114 | # Archival RPC 115 | archival-rpc: 116 | build: 117 | context: .. 118 | dockerfile: docker/Dockerfile.archival-rpc 119 | container_name: archival-rpc 120 | depends_on: 121 | # setup: 122 | # condition: service_completed_successfully 123 | kafka: 124 | condition: service_healthy 125 | hbase: 126 | condition: service_healthy 127 | kafka-init: 128 | condition: service_completed_successfully 129 | networks: 130 | - app-network 131 | # entrypoint: ["/usr/local/bin/archival-rpc"] 132 | restart: unless-stopped 133 | 134 | # CAR generator 135 | car-gen: 136 | build: 137 | context: .. 138 | dockerfile: docker/Dockerfile.car-gen 139 | container_name: car-gen 140 | depends_on: 141 | # setup: 142 | # condition: service_completed_successfully 143 | kafka: 144 | condition: service_healthy 145 | hbase: 146 | condition: service_healthy 147 | environment: 148 | - HADOOP_ROOT_LOGGER=INFO,console 149 | networks: 150 | - app-network 151 | entrypoint: ["/usr/local/bin/car-gen"] 152 | # entrypoint: ["ping", "10.0.68.184"] 153 | restart: unless-stopped 154 | 155 | networks: 156 | app-network: 157 | driver: bridge 158 | 159 | #volumes: 160 | # hbase-data: 161 | # hbase-zookeeper-data: 162 | # kafka-zookeeper-data: 163 | # kafka-data: 164 | -------------------------------------------------------------------------------- /rpc/src/rpc_core.rs: -------------------------------------------------------------------------------- 1 | 2 | 3 | use { 4 | crate::{ 5 | rpc_server::*, 6 | rpc_network_node::*, 7 | request_processor::JsonRpcConfig, 8 | }, 9 | log::*, 10 | solana_validator_exit::{ 11 | Exit, 12 | }, 13 | std::{ 14 | net::{IpAddr, Ipv4Addr, SocketAddr}, 15 | path::{Path, PathBuf}, 16 | sync::{Arc, RwLock}, 17 | process::exit, 18 | env, 19 | }, 20 | }; 21 | 22 | pub const DEFAULT_RPC_PORT: u16 = 8899; 23 | 24 | #[derive(Debug)] 25 | pub struct RpcNodeConfig { 26 | bind_ip_addr: IpAddr, 27 | } 28 | 29 | impl Default for RpcNodeConfig { 30 | fn default() -> Self { 31 | let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); 32 | 33 | Self { 34 | bind_ip_addr, 35 | } 36 | } 37 | } 38 | 39 | pub struct RpcNodeBuilder { 40 | log_path: Option, 41 | rpc_config: JsonRpcConfig, 42 | rpc_port: Option, 43 | node_config: RpcNodeConfig, 44 | pub rpc_service_exit: Arc>, 45 | } 46 | 47 | impl Default for RpcNodeBuilder { 48 | fn default() -> Self { 49 | Self { 50 | log_path: Option::::default(), 51 | rpc_config: JsonRpcConfig::default_for_storage_rpc(), 52 | rpc_port: Option::::default(), 53 | node_config: RpcNodeConfig::default(), 54 | rpc_service_exit: Arc::>::default(), 55 | } 56 | } 57 | } 58 | 59 | impl RpcNodeBuilder { 60 | pub fn log_path>(&mut self, log_path: P) -> &mut Self { 61 | self.log_path = Some(log_path.into()); 62 | self 63 | } 64 | 65 | /// Check if a given RpcNode ledger has already been initialized 66 | pub fn ledger_exists(log_path: &Path) -> bool { 67 | log_path.exists() 68 | } 69 | 70 | pub fn rpc_config(&mut self, rpc_config: JsonRpcConfig) -> &mut Self { 71 | self.rpc_config = rpc_config; 72 | self 73 | } 74 | 75 | pub fn rpc_port(&mut self, rpc_port: u16) -> &mut Self { 76 | self.rpc_port = Some(rpc_port); 77 | self 78 | } 79 | 80 | pub fn bind_ip_addr(&mut self, bind_ip_addr: IpAddr) -> &mut Self { 81 | self.node_config.bind_ip_addr = bind_ip_addr; 82 | self 83 | } 84 | 85 | pub fn start( 86 | &self, 87 | ) -> Result> { 88 | RpcNode::start(self).map(|rpc_node| { 89 | rpc_node 90 | }) 91 | } 92 | } 93 | 94 | 95 | pub struct RpcNode { 96 | rpc_url: String, 97 | rpc_service: Option, 98 | } 99 | 100 | impl RpcNode { 101 | /// Initialize the log directory 102 | fn init_log_dir( 103 | config: &RpcNodeBuilder, 104 | ) -> Result> { 105 | let log_path = match &config.log_path { 106 | None => match env::current_dir() { 107 | Ok(current_dir) => current_dir, 108 | Err(e) => { 109 | println!("Error getting current working directory: {:?}", e); 110 | exit(1); 111 | } 112 | }, 113 | Some(log_path) => { 114 | log_path.to_path_buf() 115 | } 116 | }; 117 | 118 | Ok(log_path) 119 | } 120 | 121 | fn start( 122 | config: &RpcNodeBuilder, 123 | ) -> Result> { 124 | let log_path = RpcNode::init_log_dir(config)?; 125 | 126 | info!("Starting rpc server at {:?}", config.node_config.bind_ip_addr); 127 | 128 | let mut node = RpcNetworkNode::new_single_bind( 129 | // rpc_port::DEFAULT_RPC_PORT, 130 | DEFAULT_RPC_PORT, 131 | config.node_config.bind_ip_addr, 132 | ); 133 | if let Some(rpc) = config.rpc_port { 134 | node.info.rpc = SocketAddr::new(config.node_config.bind_ip_addr, rpc); 135 | } 136 | 137 | let rpc_url = format!("http://{}", node.info.rpc); 138 | 139 | let rpc_service_config = RpcServiceConfig { 140 | rpc_addr: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), node.info.rpc.port())), 141 | rpc_config: config.rpc_config.clone(), 142 | rpc_service_exit: config.rpc_service_exit.clone(), 143 | ..RpcServiceConfig::default_for_storage_rpc() 144 | }; 145 | 146 | let rpc_service = Some(RpcService::new( 147 | node, 148 | &log_path, 149 | &rpc_service_config, 150 | )?); 151 | 152 | let rpc_node = RpcNode { 153 | rpc_url, 154 | rpc_service, 155 | }; 156 | Ok(rpc_node) 157 | } 158 | 159 | /// Return the launcher's JSON RPC URL 160 | pub fn rpc_url(&self) -> String { 161 | self.rpc_url.clone() 162 | } 163 | 164 | pub fn join(mut self) { 165 | if let Some(rpc_service) = self.rpc_service.take() { 166 | rpc_service.join(); 167 | } 168 | } 169 | } -------------------------------------------------------------------------------- /perf/src/thread.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | #[cfg(target_os = "linux")] 4 | use log::warn; 5 | 6 | /// Wrapper for `nice(3)`. 7 | #[cfg(target_os = "linux")] 8 | fn nice(adjustment: i8) -> Result { 9 | unsafe { 10 | *libc::__errno_location() = 0; 11 | let niceness = libc::nice(libc::c_int::from(adjustment)); 12 | let errno = *libc::__errno_location(); 13 | if (niceness == -1) && (errno != 0) { 14 | Err(errno) 15 | } else { 16 | Ok(niceness) 17 | } 18 | } 19 | .map(|niceness| i8::try_from(niceness).expect("Unexpected niceness value")) 20 | .map_err(nix::errno::Errno::from_raw) 21 | } 22 | 23 | /// Adds `adjustment` to the nice value of calling thread. Negative `adjustment` increases priority, 24 | /// positive `adjustment` decreases priority. New thread inherits nice value from current thread 25 | /// when created. 26 | /// 27 | /// Fails on non-Linux systems for all `adjustment` values except of zero. 28 | #[cfg(target_os = "linux")] 29 | pub fn renice_this_thread(adjustment: i8) -> Result<(), String> { 30 | // On Linux, the nice value is a per-thread attribute. See `man 7 sched` for details. 31 | // Other systems probably should use pthread_setschedprio(), but, on Linux, thread priority 32 | // is fixed to zero for SCHED_OTHER threads (which is the default). 33 | nice(adjustment) 34 | .map(|_| ()) 35 | .map_err(|err| format!("Failed to change thread's nice value: {err}")) 36 | } 37 | 38 | /// Adds `adjustment` to the nice value of calling thread. Negative `adjustment` increases priority, 39 | /// positive `adjustment` decreases priority. New thread inherits nice value from current thread 40 | /// when created. 41 | /// 42 | /// Fails on non-Linux systems for all `adjustment` values except of zero. 43 | #[cfg(not(target_os = "linux"))] 44 | pub fn renice_this_thread(adjustment: i8) -> Result<(), String> { 45 | if adjustment == 0 { 46 | Ok(()) 47 | } else { 48 | Err(String::from( 49 | "Failed to change thread's nice value: only supported on Linux", 50 | )) 51 | } 52 | } 53 | 54 | /// Check whether the nice value can be changed by `adjustment`. 55 | #[cfg(target_os = "linux")] 56 | pub fn is_renice_allowed(adjustment: i8) -> bool { 57 | use caps::{CapSet, Capability}; 58 | 59 | if adjustment >= 0 { 60 | true 61 | } else { 62 | nix::unistd::geteuid().is_root() 63 | || caps::has_cap(None, CapSet::Effective, Capability::CAP_SYS_NICE) 64 | .map_err(|err| warn!("Failed to get thread's capabilities: {}", err)) 65 | .unwrap_or(false) 66 | } 67 | } 68 | 69 | /// Check whether the nice value can be changed by `adjustment`. 70 | #[cfg(not(target_os = "linux"))] 71 | pub fn is_renice_allowed(adjustment: i8) -> bool { 72 | adjustment == 0 73 | } 74 | 75 | pub fn is_niceness_adjustment_valid(value: T) -> Result<(), String> 76 | where 77 | T: AsRef + Display, 78 | { 79 | let adjustment = value 80 | .as_ref() 81 | .parse::() 82 | .map_err(|err| format!("error parsing niceness adjustment value '{value}': {err}"))?; 83 | if is_renice_allowed(adjustment) { 84 | Ok(()) 85 | } else { 86 | Err(String::from( 87 | "niceness adjustment supported only on Linux; negative adjustment \ 88 | (priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \ 89 | for details)", 90 | )) 91 | } 92 | } 93 | 94 | #[cfg(test)] 95 | mod tests { 96 | #[cfg(target_os = "linux")] 97 | use super::*; 98 | 99 | #[cfg(target_os = "linux")] 100 | #[test] 101 | fn test_nice() { 102 | // No change / get current niceness 103 | let niceness = nice(0).unwrap(); 104 | 105 | // Decrease priority (allowed for unprivileged processes) 106 | let result = std::thread::spawn(|| nice(1)).join().unwrap(); 107 | assert_eq!(result, Ok(niceness + 1)); 108 | 109 | // Sanity check: ensure that current thread's nice value not changed after previous call 110 | // from different thread 111 | assert_eq!(nice(0), Ok(niceness)); 112 | 113 | // Sanity check: ensure that new thread inherits nice value from current thread 114 | let inherited_niceness = std::thread::spawn(|| { 115 | nice(1).unwrap(); 116 | std::thread::spawn(|| nice(0).unwrap()).join().unwrap() 117 | }) 118 | .join() 119 | .unwrap(); 120 | assert_eq!(inherited_niceness, niceness + 1); 121 | 122 | if !is_renice_allowed(-1) { 123 | // Increase priority (not allowed for unprivileged processes) 124 | let result = std::thread::spawn(|| nice(-1)).join().unwrap(); 125 | assert!(result.is_err()); 126 | } 127 | } 128 | 129 | #[test] 130 | fn test_is_niceness_adjustment_valid() { 131 | use super::is_niceness_adjustment_valid; 132 | assert_eq!(is_niceness_adjustment_valid("0"), Ok(())); 133 | assert!(is_niceness_adjustment_valid("128").is_err()); 134 | assert!(is_niceness_adjustment_valid("-129").is_err()); 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /.github/workflows/build-and-release.yml: -------------------------------------------------------------------------------- 1 | name: Build and Release Solana Archival RPC 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | build_and_push: 10 | runs-on: ubuntu-22.04 11 | steps: 12 | # Checkout the code 13 | - uses: actions/checkout@v3 14 | 15 | # Set up Rust 16 | - name: Set up Rust 17 | uses: actions-rs/toolchain@v1 18 | with: 19 | toolchain: 1.86.0 20 | profile: minimal 21 | override: true 22 | 23 | - name: Install dependencies 24 | run: sudo apt-get update && sudo apt-get install -y software-properties-common cmake build-essential libclang-dev libudev-dev libssl-dev libsasl2-dev 25 | 26 | - name: Install dependencies for cross-compilation 27 | run: | 28 | sudo apt-get update 29 | sudo apt-get install -y \ 30 | gcc-aarch64-linux-gnu \ 31 | g++-aarch64-linux-gnu \ 32 | libc6-dev-arm64-cross 33 | 34 | # Add Rust targets 35 | - name: Add Rust targets 36 | run: | 37 | rustup target add x86_64-unknown-linux-gnu 38 | rustup target add aarch64-unknown-linux-gnu 39 | 40 | # Set up Cargo configuration for cross-compilation 41 | - name: Configure Cargo for cross-compilation 42 | run: | 43 | mkdir -p .cargo 44 | echo '[target.aarch64-unknown-linux-gnu]' >> .cargo/config.toml 45 | echo 'linker = "aarch64-linux-gnu-gcc"' >> .cargo/config.toml 46 | 47 | - uses: Swatinem/rust-cache@v2 48 | with: 49 | prefix-key: "v1-rust-cache" 50 | shared-key: dependencies-only 51 | 52 | # Build arm64 binary 53 | - name: Build arm64 binary 54 | run: | 55 | cargo build --release --target aarch64-unknown-linux-gnu 56 | mkdir -p output/linux/arm64 57 | cp target/aarch64-unknown-linux-gnu/release/archival-rpc output/linux/arm64/ 58 | 59 | # Build x86_64 binary 60 | - name: Build x86_64 binary 61 | run: | 62 | cargo build --release --target x86_64-unknown-linux-gnu 63 | mkdir -p output/linux/amd64 64 | cp target/x86_64-unknown-linux-gnu/release/archival-rpc output/linux/amd64/ 65 | 66 | # Log in to DockerHub 67 | - name: Login to DockerHub 68 | uses: docker/login-action@v2 69 | with: 70 | username: ${{ secrets.DOCKER_USERNAME }} 71 | password: ${{ secrets.DOCKER_PASSWORD }} 72 | 73 | # Build and push Docker images 74 | - name: Build and push Docker images 75 | run: | 76 | docker buildx create --use 77 | docker buildx build \ 78 | --platform linux/amd64,linux/arm64 \ 79 | --build-arg AMD64_BINARY="$(pwd)/output/linux/amd64/archival-rpc" \ 80 | --build-arg ARM64_BINARY="$(pwd)/output/linux/arm64/archival-rpc" \ 81 | --tag dexterlaboss/archival-rpc:${GITHUB_REF_NAME} \ 82 | --push . 83 | 84 | # Extract x86_64 binary from Docker container 85 | - name: Extract x86_64 binary 86 | run: | 87 | mkdir -p ${{ github.workspace }}/output 88 | docker create --platform linux/amd64 --name extract-amd64 dexterlaboss/archival-rpc:${GITHUB_REF_NAME} 89 | docker cp extract-amd64:/solana/archival-rpc ${{ github.workspace }}/output/archival_rpc_${{ github.ref_name }}_linux_amd64 90 | docker rm extract-amd64 91 | 92 | # Extract arm64 binary from Docker container 93 | - name: Extract arm64 binary 94 | run: | 95 | mkdir -p ${{ github.workspace }}/output 96 | docker create --platform linux/arm64 --name extract-arm64 dexterlaboss/archival-rpc:${GITHUB_REF_NAME} 97 | docker cp extract-arm64:/solana/archival-rpc ${{ github.workspace }}/output/archival_rpc_${{ github.ref_name }}_linux_arm64 98 | docker rm extract-arm64 99 | 100 | # Create GitHub Release 101 | - name: Create GitHub Release 102 | id: gh-release 103 | uses: softprops/action-gh-release@v1 104 | with: 105 | tag_name: ${{ github.ref }} 106 | name: Release ${{ github.ref }} 107 | body: 'Release of version ${{ github.ref }}' 108 | env: 109 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 110 | 111 | # Upload x86_64 binary to GitHub Release 112 | - name: Upload x86_64 binary to GitHub Release 113 | uses: actions/upload-release-asset@v1 114 | with: 115 | upload_url: ${{ steps.gh-release.outputs.upload_url }} 116 | asset_path: ./output/linux/amd64/archival-rpc 117 | asset_name: archival-rpc_${{ github.ref_name }}_linux_amd64 118 | asset_content_type: application/octet-stream 119 | env: 120 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 121 | 122 | # Upload arm64 binary to GitHub Release 123 | - name: Upload arm64 binary to GitHub Release 124 | uses: actions/upload-release-asset@v1 125 | with: 126 | upload_url: ${{ steps.gh-release.outputs.upload_url }} 127 | asset_path: ./output/linux/arm64/archival-rpc 128 | asset_name: archival-rpc_${{ github.ref_name }}_linux_arm64 129 | asset_content_type: application/octet-stream 130 | env: 131 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 132 | -------------------------------------------------------------------------------- /rpc/src/custom_error.rs: -------------------------------------------------------------------------------- 1 | //! Implementation defined RPC server errors 2 | use { 3 | jsonrpc_core::{Error, ErrorCode}, 4 | serde::{Deserialize, Serialize}, 5 | solana_clock::Slot, 6 | solana_transaction_status_client_types::{ 7 | EncodeError, 8 | }, 9 | thiserror::Error, 10 | }; 11 | 12 | pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004; 13 | pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY: i64 = -32005; 14 | pub const JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED: i64 = -32009; 15 | pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011; 16 | pub const JSON_RPC_SCAN_ERROR: i64 = -32012; 17 | pub const JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION: i64 = -32015; 18 | 19 | pub const JSON_RPC_HBASE_ERROR: i64 = -32017; 20 | pub const JSON_RPC_SERVER_ERROR_SLOT_NOT_EPOCH_BOUNDARY: i64 = -32018; 21 | pub const JSON_RPC_SERVER_ERROR_EPOCH_REWARDS_PERIOD_ACTIVE: i64 = -32019; 22 | pub const JSON_RPC_SERVER_ERROR_METHOD_NOT_SUPPORTED: i64 = -32020; 23 | 24 | #[derive(Error, Debug)] 25 | pub enum RpcCustomError { 26 | #[error("BlockNotAvailable")] 27 | BlockNotAvailable { slot: Slot }, 28 | #[error("NodeUnhealthy")] 29 | NodeUnhealthy { num_slots_behind: Option }, 30 | #[error("LongTermStorageSlotSkipped")] 31 | LongTermStorageSlotSkipped { slot: Slot }, 32 | #[error("TransactionHistoryNotAvailable")] 33 | TransactionHistoryNotAvailable, 34 | #[error("ScanError")] 35 | ScanError { message: String }, 36 | #[error("UnsupportedTransactionVersion")] 37 | UnsupportedTransactionVersion(u8), 38 | #[error("HBaseError")] 39 | HBaseError { message: String }, 40 | #[error("SlotNotEpochBoundary")] 41 | SlotNotEpochBoundary { slot: Slot }, 42 | #[error("EpochRewardsPeriodActive")] 43 | EpochRewardsPeriodActive { 44 | slot: Slot, 45 | current_block_height: u64, 46 | rewards_complete_block_height: u64, 47 | }, 48 | #[error("MethodNotSupported")] 49 | MethodNotSupported(String), 50 | } 51 | 52 | #[derive(Debug, Serialize, Deserialize)] 53 | #[serde(rename_all = "camelCase")] 54 | pub struct NodeUnhealthyErrorData { 55 | pub num_slots_behind: Option, 56 | } 57 | 58 | impl From for RpcCustomError { 59 | fn from(err: EncodeError) -> Self { 60 | match err { 61 | EncodeError::UnsupportedTransactionVersion(version) => { 62 | Self::UnsupportedTransactionVersion(version) 63 | } 64 | } 65 | } 66 | } 67 | 68 | impl From for Error { 69 | fn from(e: RpcCustomError) -> Self { 70 | match e { 71 | RpcCustomError::BlockNotAvailable { slot } => Self { 72 | code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE), 73 | message: format!("Block not available for slot {slot}"), 74 | data: None, 75 | }, 76 | RpcCustomError::NodeUnhealthy { num_slots_behind } => Self { 77 | code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY), 78 | message: if let Some(num_slots_behind) = num_slots_behind { 79 | format!("Node is behind by {num_slots_behind} slots") 80 | } else { 81 | "Node is unhealthy".to_string() 82 | }, 83 | data: Some(serde_json::json!(NodeUnhealthyErrorData { 84 | num_slots_behind 85 | })), 86 | }, 87 | RpcCustomError::LongTermStorageSlotSkipped { slot } => Self { 88 | code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED), 89 | message: format!("Slot {slot} was skipped, or missing in long-term storage"), 90 | data: None, 91 | }, 92 | RpcCustomError::TransactionHistoryNotAvailable => Self { 93 | code: ErrorCode::ServerError( 94 | JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE, 95 | ), 96 | message: "Transaction history is not available from this node".to_string(), 97 | data: None, 98 | }, 99 | RpcCustomError::ScanError { message } => Self { 100 | code: ErrorCode::ServerError(JSON_RPC_SCAN_ERROR), 101 | message, 102 | data: None, 103 | }, 104 | RpcCustomError::UnsupportedTransactionVersion(version) => Self { 105 | code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION), 106 | message: format!( 107 | "Transaction version ({version}) is not supported by the requesting client. \ 108 | Please try the request again with the following configuration parameter: \ 109 | \"maxSupportedTransactionVersion\": {version}" 110 | ), 111 | data: None, 112 | }, 113 | RpcCustomError::HBaseError { message } => Self { 114 | code: ErrorCode::ServerError(JSON_RPC_HBASE_ERROR), 115 | message, 116 | data: None, 117 | }, 118 | RpcCustomError::SlotNotEpochBoundary { slot } => Self { 119 | code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_SLOT_NOT_EPOCH_BOUNDARY), 120 | message: format!("Slot {slot} is not an epoch boundary"), 121 | data: None, 122 | }, 123 | RpcCustomError::EpochRewardsPeriodActive { 124 | slot, 125 | current_block_height, 126 | rewards_complete_block_height, 127 | } => Self { 128 | code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_EPOCH_REWARDS_PERIOD_ACTIVE), 129 | message: format!( 130 | "Epoch rewards period is active. Slot: {slot}, current block height: {current_block_height}, rewards will be complete at block height: {rewards_complete_block_height}" 131 | ), 132 | data: None, 133 | }, 134 | RpcCustomError::MethodNotSupported(message) => Self { 135 | code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_METHOD_NOT_SUPPORTED), 136 | message, 137 | data: None, 138 | }, 139 | } 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /rpc/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::arithmetic_side_effects)] 2 | #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] 3 | use jemallocator::Jemalloc; 4 | use { 5 | solana_rpc::{ 6 | rpc_core::RpcNodeBuilder, 7 | cli, 8 | logging::redirect_stderr_to_file, 9 | request_processor::{JsonRpcConfig, RpcHBaseConfig}, 10 | }, 11 | clap::{ 12 | value_t_or_exit, 13 | }, 14 | log::*, 15 | std::{ 16 | fs, 17 | path::{ 18 | PathBuf 19 | }, 20 | process::exit, 21 | time::{ 22 | SystemTime, 23 | UNIX_EPOCH 24 | }, 25 | }, 26 | }; 27 | 28 | #[derive(PartialEq, Eq)] 29 | enum Output { 30 | None, 31 | Log, 32 | } 33 | 34 | #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] 35 | #[global_allocator] 36 | static GLOBAL: Jemalloc = Jemalloc; 37 | 38 | fn main() { 39 | let default_args = cli::DefaultStorageRpcArgs::new(); 40 | let version = solana_version::version!(); 41 | let matches = cli::storage_rpc_service(version, &default_args).get_matches(); 42 | 43 | let output = if matches.is_present("quiet") { 44 | Output::None 45 | } else { 46 | Output::Log 47 | }; 48 | 49 | let log_path = value_t_or_exit!(matches, "log_path", PathBuf); 50 | 51 | if !log_path.exists() { 52 | fs::create_dir(&log_path).unwrap_or_else(|err| { 53 | println!( 54 | "Error: Unable to create directory {}: {}", 55 | log_path.display(), 56 | err 57 | ); 58 | exit(1); 59 | }); 60 | } 61 | 62 | let rpc_service_log_symlink = log_path.join("service.log"); 63 | 64 | let logfile = if output != Output::Log { 65 | let rpc_service_log_with_timestamp = format!( 66 | "service-{}.log", 67 | SystemTime::now() 68 | .duration_since(UNIX_EPOCH) 69 | .unwrap() 70 | .as_millis() 71 | ); 72 | 73 | let _ = fs::remove_file(&rpc_service_log_symlink); 74 | symlink::symlink_file(&rpc_service_log_with_timestamp, &rpc_service_log_symlink).unwrap(); 75 | 76 | Some( 77 | log_path 78 | .join(rpc_service_log_with_timestamp) 79 | .into_os_string() 80 | .into_string() 81 | .unwrap(), 82 | ) 83 | } else { 84 | None 85 | }; 86 | let _logger_thread = redirect_stderr_to_file(logfile); 87 | 88 | info!("solana-storage-rpc {}", solana_version::version!()); 89 | info!("Starting storage rpc service with: {:#?}", std::env::args_os()); 90 | 91 | let rpc_port = value_t_or_exit!(matches, "rpc_port", u16); 92 | 93 | let bind_address = matches.value_of("bind_address").map(|bind_address| { 94 | solana_net_utils::parse_host(bind_address).unwrap_or_else(|err| { 95 | eprintln!("Failed to parse --bind-address: {err}"); 96 | exit(1); 97 | }) 98 | }); 99 | 100 | // let full_api = matches.is_present("full_rpc_api"); 101 | 102 | let mut builder = RpcNodeBuilder::default(); 103 | 104 | let rpc_hbase_config = Some(RpcHBaseConfig { 105 | enable_hbase_ledger_upload: false, 106 | hbase_address: value_t_or_exit!(matches, "rpc_hbase_address", String), 107 | namespace: if matches.is_present("hbase_namespace") { 108 | Some(value_t_or_exit!( 109 | matches, 110 | "hbase_namespace", 111 | String 112 | )) 113 | } else { 114 | None 115 | }, 116 | hdfs_url: if matches.is_present("use_webhdfs") { 117 | // Not required when using WebHDFS; leave default from storage-hbase 118 | solana_storage_hbase::DEFAULT_HDFS_URL.to_string() 119 | } else { 120 | value_t_or_exit!(matches, "hdfs_url", String) 121 | }, 122 | hdfs_path: value_t_or_exit!(matches, "hdfs_path", String), 123 | // hdfs_url: if matches.is_present("hdfs_url") { 124 | // Some(value_t_or_exit!( 125 | // matches, 126 | // "hdfs_url", 127 | // String 128 | // )) 129 | // } else { 130 | // None 131 | // }, 132 | // hdfs_path: if matches.is_present("hdfs_path") { 133 | // Some(value_t_or_exit!( 134 | // matches, 135 | // "hdfs_path", 136 | // String 137 | // )) 138 | // } else { 139 | // None 140 | // }, 141 | fallback_hbase_address: if matches.is_present("fallback_hbase_address") { 142 | Some(value_t_or_exit!( 143 | matches, 144 | "fallback_hbase_address", 145 | String 146 | )) 147 | } else { 148 | None 149 | }, 150 | timeout: None, 151 | // block_cache: if matches.is_present("block_cache") { 152 | // Some(value_t_or_exit!( 153 | // matches, 154 | // "block_cache", 155 | // NonZeroUsize 156 | // )) 157 | // } else { 158 | // None 159 | // }, 160 | use_md5_row_key_salt: matches.is_present("use_md5_row_key_salt"), 161 | hash_tx_full_row_keys: matches.is_present("hash_tx_full_row_keys"), 162 | enable_full_tx_cache: matches.is_present("enable_full_tx_cache"), 163 | disable_tx_fallback: matches.is_present("disable_tx_fallback"), 164 | cache_address: if matches.is_present("cache_address") { 165 | Some(value_t_or_exit!( 166 | matches, 167 | "cache_address", 168 | String 169 | )) 170 | } else { 171 | None 172 | }, 173 | use_block_car_files: !matches.is_present("disable_block_car_files"), 174 | use_hbase_blocks_meta: matches.is_present("use_hbase_blocks_meta"), 175 | use_webhdfs: matches.is_present("use_webhdfs"), 176 | webhdfs_url: if matches.is_present("use_webhdfs") { 177 | Some(value_t_or_exit!(matches, "webhdfs_url", String)) 178 | } else { None }, 179 | }); 180 | 181 | builder.rpc_port(rpc_port); 182 | 183 | builder.rpc_config(JsonRpcConfig { 184 | enable_rpc_transaction_history: true, 185 | rpc_hbase_config, 186 | // full_api, 187 | obsolete_v1_7_api: matches.is_present("obsolete_v1_7_rpc_api"), 188 | rpc_threads: value_t_or_exit!(matches, "rpc_threads", usize), 189 | rpc_niceness_adj: value_t_or_exit!(matches, "rpc_niceness_adj", i8), 190 | max_request_body_size: Some(value_t_or_exit!( 191 | matches, 192 | "rpc_max_request_body_size", 193 | usize 194 | )), 195 | max_get_blocks_range: if matches.is_present("max_get_blocks_range") { 196 | Some(value_t_or_exit!( 197 | matches, 198 | "max_get_blocks_range", 199 | u64 200 | )) 201 | } else { 202 | None 203 | }, 204 | genesis_config_path: matches.value_of("genesis_config_path").map(|s| s.to_string()), 205 | ..JsonRpcConfig::default_for_storage_rpc() 206 | }); 207 | 208 | if let Some(bind_address) = bind_address { 209 | builder.bind_ip_addr(bind_address); 210 | } 211 | 212 | match builder.start() { 213 | Ok(rpc_node) => { 214 | rpc_node.join(); 215 | } 216 | Err(err) => { 217 | println!("Error: failed to start storage rpc service: {err}"); 218 | exit(1); 219 | } 220 | } 221 | } -------------------------------------------------------------------------------- /rpc/src/middleware.rs: -------------------------------------------------------------------------------- 1 | use { 2 | solana_metrics::Metrics, 3 | jsonrpc_core::{ 4 | Middleware, 5 | Call, 6 | Output, 7 | Response, 8 | Metadata, 9 | Error, 10 | Failure, 11 | }, 12 | jsonrpc_http_server::{ 13 | hyper, 14 | RequestMiddleware, 15 | RequestMiddlewareAction, 16 | }, 17 | prometheus::{ 18 | TextEncoder, 19 | Encoder, 20 | }, 21 | hyper::{ 22 | Request, Body, StatusCode 23 | }, 24 | std::{ 25 | path::{ 26 | PathBuf 27 | }, 28 | sync::{ 29 | Arc, 30 | }, 31 | future::Future, 32 | panic::AssertUnwindSafe, 33 | }, 34 | futures::future::{Either, FutureExt, BoxFuture}, 35 | serde_json::Value, 36 | }; 37 | 38 | #[derive(Clone)] 39 | pub struct MetricsMiddleware { 40 | metrics: Arc, 41 | } 42 | 43 | impl MetricsMiddleware { 44 | pub fn new(metrics: Arc) -> Self { 45 | Self { metrics } 46 | } 47 | } 48 | 49 | impl Middleware for MetricsMiddleware { 50 | type Future = BoxFuture<'static, Option>; 51 | type CallFuture = BoxFuture<'static, Option>; 52 | 53 | fn on_call(&self, call: Call, meta: M, next: F) -> Either 54 | where 55 | F: FnOnce(Call, M) -> X + Send + Sync, 56 | X: Future> + Send + 'static, 57 | { 58 | info!("Request on_call executed"); 59 | if let Call::MethodCall(ref request) = call { 60 | let method = request.method.clone(); 61 | let metrics = self.metrics.clone(); 62 | 63 | // Ignore getTransaction in middleware; it'll be tracked in another module 64 | if method != "getTransaction" { 65 | metrics.increment_total_requests(&method); 66 | } 67 | 68 | // Record request duration for all methods 69 | let timer = metrics.record_duration(&method); 70 | 71 | // Mark thread as started 72 | metrics.thread_started(); 73 | 74 | let request_id = request.id.clone(); 75 | let request_jsonrpc = request.jsonrpc.clone(); 76 | 77 | let future = AssertUnwindSafe(next(call, meta)) 78 | .catch_unwind() 79 | .then(move |result| { 80 | timer.observe_duration(); 81 | 82 | // Mark thread as stopped 83 | metrics.thread_stopped(); 84 | 85 | match result { 86 | Err(_) => { 87 | let error = Error::new(jsonrpc_core::ErrorCode::InternalError); 88 | debug!("Request panicked with error: {:?}", error); 89 | 90 | let failure_output = Output::Failure(Failure { 91 | jsonrpc: request_jsonrpc, 92 | error, 93 | id: request_id, 94 | }); 95 | 96 | futures::future::ready(Some(failure_output)) 97 | } 98 | 99 | Ok(Some(output)) => { 100 | futures::future::ready(Some(output)) 101 | } 102 | Ok(None) => { 103 | let error = Error::new(jsonrpc_core::ErrorCode::InternalError); 104 | debug!("Request failed with error: {:?}", error); 105 | 106 | let failure_output = Output::Failure(Failure { 107 | jsonrpc: request_jsonrpc, 108 | error, 109 | id: request_id, 110 | }); 111 | 112 | futures::future::ready(Some(failure_output)) 113 | } 114 | } 115 | }); 116 | 117 | Either::Left(Box::pin(future)) 118 | } else { 119 | Either::Right(next(call, meta)) 120 | } 121 | } 122 | } 123 | 124 | pub struct RpcRequestMiddleware { 125 | // log_path: PathBuf, 126 | } 127 | 128 | impl RpcRequestMiddleware { 129 | pub fn new( 130 | _log_path: PathBuf, 131 | ) -> Self { 132 | Self { 133 | // log_path, 134 | } 135 | } 136 | 137 | #[allow(dead_code)] 138 | fn internal_server_error() -> hyper::Response { 139 | hyper::Response::builder() 140 | .status(hyper::StatusCode::INTERNAL_SERVER_ERROR) 141 | .body(hyper::Body::empty()) 142 | .unwrap() 143 | } 144 | 145 | fn health_check(&self) -> &'static str { 146 | let response = "ok"; 147 | info!("health check: {}", response); 148 | response 149 | } 150 | } 151 | 152 | impl RequestMiddleware for RpcRequestMiddleware { 153 | fn on_request(&self, request: Request) -> RequestMiddlewareAction { 154 | trace!("request uri: {}", request.uri()); 155 | 156 | if let Some(result) = process_rest(request.uri().path()) { 157 | hyper::Response::builder() 158 | .status(StatusCode::OK) 159 | .body(Body::from(result)) 160 | .unwrap() 161 | .into() 162 | } else if request.uri().path() == "/health" { 163 | hyper::Response::builder() 164 | .status(StatusCode::OK) 165 | .body(Body::from(self.health_check())) 166 | .unwrap() 167 | .into() 168 | } else { 169 | request.into() 170 | } 171 | 172 | // 173 | // This gets the number of requests in a batch. 174 | // 175 | 176 | // let (parts, body) = request.into_parts(); 177 | // 178 | // let body_bytes_future = async { 179 | // match to_bytes(body).await { 180 | // Ok(bytes) => { 181 | // let count = count_jsonrpc_requests(&bytes); 182 | // debug!("Received JSON-RPC batch size: {}", count); 183 | // bytes 184 | // } 185 | // Err(e) => { 186 | // error!("Error reading body: {}", e); 187 | // hyper::body::Bytes::new() 188 | // } 189 | // } 190 | // }; 191 | // 192 | // let new_body = Body::wrap_stream( 193 | // stream::once(body_bytes_future) 194 | // .map(|bytes| Ok::<_, hyper::Error>(bytes)) 195 | // ); 196 | // 197 | // Request::from_parts(parts, new_body).into() 198 | } 199 | } 200 | 201 | #[allow(dead_code)] 202 | fn count_jsonrpc_requests(body: &hyper::body::Bytes) -> usize { 203 | match serde_json::from_slice::(body) { 204 | Ok(Value::Array(batch)) => batch.len(), // JSON-RPC batch request 205 | Ok(_) => 1, // Single JSON-RPC request 206 | Err(_) => { 207 | error!("Failed to parse JSON-RPC request"); 208 | 0 // Invalid JSON 209 | } 210 | } 211 | } 212 | 213 | fn process_rest(path: &str) -> Option { 214 | match path { 215 | "/metrics" => { 216 | let encoder = TextEncoder::new(); 217 | 218 | let metric_families = prometheus::gather(); 219 | 220 | let mut buffer = Vec::new(); 221 | encoder.encode(&metric_families, &mut buffer).unwrap(); 222 | 223 | let metrics_output = String::from_utf8(buffer).unwrap(); 224 | 225 | Some(metrics_output) 226 | } 227 | _ => None, 228 | } 229 | } -------------------------------------------------------------------------------- /storage-proto/src/lib.rs: -------------------------------------------------------------------------------- 1 | 2 | use { 3 | serde::{Deserialize, Serialize}, 4 | solana_account_decoder_client_types::{ 5 | token::{ 6 | real_number_string_trimmed, 7 | UiTokenAmount 8 | }, 9 | }, 10 | solana_serde::{ 11 | default_on_eof, 12 | }, 13 | solana_clock::{ 14 | UnixTimestamp, 15 | }, 16 | solana_message::{ 17 | v0::LoadedAddresses, 18 | }, 19 | solana_transaction_error::TransactionResult as Result, 20 | solana_transaction_context::{ 21 | TransactionReturnData, 22 | }, 23 | solana_reward_info::RewardType, 24 | solana_transaction_status_client_types::{ 25 | InnerInstructions, 26 | Reward, 27 | TransactionStatusMeta, 28 | TransactionTokenBalance, 29 | }, 30 | std::str::FromStr, 31 | }; 32 | 33 | pub mod convert; 34 | 35 | pub type StringAmount = String; 36 | 37 | pub type StoredExtendedRewards = Vec; 38 | 39 | #[derive(Serialize, Deserialize)] 40 | pub struct StoredExtendedReward { 41 | pubkey: String, 42 | lamports: i64, 43 | #[serde(deserialize_with = "default_on_eof")] 44 | post_balance: u64, 45 | #[serde(deserialize_with = "default_on_eof")] 46 | reward_type: Option, 47 | #[serde(deserialize_with = "default_on_eof")] 48 | commission: Option, 49 | } 50 | 51 | impl From for Reward { 52 | fn from(value: StoredExtendedReward) -> Self { 53 | let StoredExtendedReward { 54 | pubkey, 55 | lamports, 56 | post_balance, 57 | reward_type, 58 | commission, 59 | } = value; 60 | Self { 61 | pubkey, 62 | lamports, 63 | post_balance, 64 | reward_type, 65 | commission, 66 | } 67 | } 68 | } 69 | 70 | impl From for StoredExtendedReward { 71 | fn from(value: Reward) -> Self { 72 | let Reward { 73 | pubkey, 74 | lamports, 75 | post_balance, 76 | reward_type, 77 | commission, 78 | } = value; 79 | Self { 80 | pubkey, 81 | lamports, 82 | post_balance, 83 | reward_type, 84 | commission, 85 | } 86 | } 87 | } 88 | 89 | #[derive(Serialize, Deserialize)] 90 | pub struct StoredTokenAmount { 91 | pub ui_amount: f64, 92 | pub decimals: u8, 93 | pub amount: StringAmount, 94 | } 95 | 96 | impl From for UiTokenAmount { 97 | fn from(value: StoredTokenAmount) -> Self { 98 | let StoredTokenAmount { 99 | ui_amount, 100 | decimals, 101 | amount, 102 | } = value; 103 | let ui_amount_string = 104 | real_number_string_trimmed(u64::from_str(&amount).unwrap_or(0), decimals); 105 | Self { 106 | ui_amount: Some(ui_amount), 107 | decimals, 108 | amount, 109 | ui_amount_string, 110 | } 111 | } 112 | } 113 | 114 | impl From for StoredTokenAmount { 115 | fn from(value: UiTokenAmount) -> Self { 116 | let UiTokenAmount { 117 | ui_amount, 118 | decimals, 119 | amount, 120 | .. 121 | } = value; 122 | Self { 123 | ui_amount: ui_amount.unwrap_or(0.0), 124 | decimals, 125 | amount, 126 | } 127 | } 128 | } 129 | 130 | #[derive(Serialize, Deserialize)] 131 | pub struct StoredTransactionTokenBalance { 132 | pub account_index: u8, 133 | pub mint: String, 134 | pub ui_token_amount: StoredTokenAmount, 135 | #[serde(deserialize_with = "default_on_eof")] 136 | pub owner: String, 137 | #[serde(deserialize_with = "default_on_eof")] 138 | pub program_id: String, 139 | } 140 | 141 | impl From for TransactionTokenBalance { 142 | fn from(value: StoredTransactionTokenBalance) -> Self { 143 | let StoredTransactionTokenBalance { 144 | account_index, 145 | mint, 146 | ui_token_amount, 147 | owner, 148 | program_id, 149 | } = value; 150 | Self { 151 | account_index, 152 | mint, 153 | ui_token_amount: ui_token_amount.into(), 154 | owner, 155 | program_id, 156 | } 157 | } 158 | } 159 | 160 | impl From for StoredTransactionTokenBalance { 161 | fn from(value: TransactionTokenBalance) -> Self { 162 | let TransactionTokenBalance { 163 | account_index, 164 | mint, 165 | ui_token_amount, 166 | owner, 167 | program_id, 168 | } = value; 169 | Self { 170 | account_index, 171 | mint, 172 | ui_token_amount: ui_token_amount.into(), 173 | owner, 174 | program_id, 175 | } 176 | } 177 | } 178 | 179 | #[derive(Serialize, Deserialize)] 180 | pub struct StoredTransactionStatusMeta { 181 | pub status: Result<()>, 182 | pub fee: u64, 183 | pub pre_balances: Vec, 184 | pub post_balances: Vec, 185 | #[serde(deserialize_with = "default_on_eof")] 186 | pub inner_instructions: Option>, 187 | #[serde(deserialize_with = "default_on_eof")] 188 | pub log_messages: Option>, 189 | #[serde(deserialize_with = "default_on_eof")] 190 | pub pre_token_balances: Option>, 191 | #[serde(deserialize_with = "default_on_eof")] 192 | pub post_token_balances: Option>, 193 | #[serde(deserialize_with = "default_on_eof")] 194 | pub rewards: Option>, 195 | #[serde(deserialize_with = "default_on_eof")] 196 | pub return_data: Option, 197 | #[serde(deserialize_with = "default_on_eof")] 198 | pub compute_units_consumed: Option, 199 | #[serde(deserialize_with = "default_on_eof")] 200 | pub cost_units: Option, 201 | } 202 | 203 | impl From for TransactionStatusMeta { 204 | fn from(value: StoredTransactionStatusMeta) -> Self { 205 | let StoredTransactionStatusMeta { 206 | status, 207 | fee, 208 | pre_balances, 209 | post_balances, 210 | inner_instructions, 211 | log_messages, 212 | pre_token_balances, 213 | post_token_balances, 214 | rewards, 215 | return_data, 216 | compute_units_consumed, 217 | cost_units, 218 | } = value; 219 | Self { 220 | status, 221 | fee, 222 | pre_balances, 223 | post_balances, 224 | inner_instructions, 225 | log_messages, 226 | pre_token_balances: pre_token_balances 227 | .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), 228 | post_token_balances: post_token_balances 229 | .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), 230 | rewards: rewards 231 | .map(|rewards| rewards.into_iter().map(|reward| reward.into()).collect()), 232 | loaded_addresses: LoadedAddresses::default(), 233 | return_data, 234 | compute_units_consumed, 235 | cost_units, 236 | } 237 | } 238 | } 239 | 240 | impl TryFrom for StoredTransactionStatusMeta { 241 | type Error = bincode::Error; 242 | fn try_from(value: TransactionStatusMeta) -> std::result::Result { 243 | let TransactionStatusMeta { 244 | status, 245 | fee, 246 | pre_balances, 247 | post_balances, 248 | inner_instructions, 249 | log_messages, 250 | pre_token_balances, 251 | post_token_balances, 252 | rewards, 253 | loaded_addresses, 254 | return_data, 255 | compute_units_consumed, 256 | cost_units, 257 | } = value; 258 | 259 | if !loaded_addresses.is_empty() { 260 | // Deprecated bincode serialized status metadata doesn't support 261 | // loaded addresses. 262 | return Err( 263 | bincode::ErrorKind::Custom("Bincode serialization is deprecated".into()).into(), 264 | ); 265 | } 266 | 267 | Ok(Self { 268 | status, 269 | fee, 270 | pre_balances, 271 | post_balances, 272 | inner_instructions, 273 | log_messages, 274 | pre_token_balances: pre_token_balances 275 | .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), 276 | post_token_balances: post_token_balances 277 | .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), 278 | rewards: rewards 279 | .map(|rewards| rewards.into_iter().map(|reward| reward.into()).collect()), 280 | return_data, 281 | compute_units_consumed, 282 | cost_units, 283 | }) 284 | } 285 | } 286 | 287 | #[derive(Serialize, Deserialize, Debug)] 288 | pub struct StoredCarIndexEntry { 289 | pub slot: u64, 290 | pub block_hash: String, 291 | pub offset: u64, 292 | pub length: u64, 293 | pub start_slot: u64, 294 | pub end_slot: u64, 295 | pub timestamp: UnixTimestamp, 296 | pub previous_block_hash: String, 297 | pub block_height: Option, 298 | pub block_time: Option, 299 | } 300 | -------------------------------------------------------------------------------- /rpc/src/rpc_service.rs: -------------------------------------------------------------------------------- 1 | 2 | use { 3 | crate::{ 4 | rpc::{ 5 | storage_rpc_deprecated_v1_7::*, 6 | storage_rpc_full::*, 7 | storage_rpc_minimal::*, 8 | *, 9 | }, 10 | request_processor::{ 11 | *, 12 | }, 13 | middleware::{ 14 | MetricsMiddleware, 15 | RpcRequestMiddleware, 16 | }, 17 | }, 18 | solana_metrics::Metrics, 19 | crossbeam_channel::unbounded, 20 | jsonrpc_core::{ 21 | MetaIoHandler, 22 | }, 23 | jsonrpc_http_server::{ 24 | hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, 25 | ServerBuilder, 26 | }, 27 | solana_storage_adapter::LedgerStorageAdapter, 28 | solana_perf::thread::renice_this_thread, 29 | solana_validator_exit::{ 30 | Exit, 31 | }, 32 | 33 | std::{ 34 | net::SocketAddr, 35 | path::{ 36 | Path, 37 | }, 38 | sync::{ 39 | Arc, RwLock, 40 | }, 41 | thread::{self, Builder, JoinHandle}, 42 | }, 43 | 44 | }; 45 | 46 | pub struct JsonRpcService { 47 | thread_hdl: JoinHandle<()>, 48 | 49 | #[cfg(test)] 50 | pub request_processor: JsonRpcRequestProcessor, 51 | 52 | close_handle: Option, 53 | } 54 | 55 | impl JsonRpcService { 56 | pub fn new( 57 | rpc_addr: SocketAddr, 58 | config: JsonRpcConfig, 59 | log_path: &Path, 60 | rpc_service_exit: Arc>, 61 | metrics: Arc, 62 | ) -> Result { 63 | info!("rpc bound to {:?}", rpc_addr); 64 | info!("rpc configuration: {:?}", config); 65 | let rpc_threads = 1.max(config.rpc_threads); 66 | let rpc_niceness_adj = config.rpc_niceness_adj; 67 | 68 | let runtime = Arc::new( 69 | tokio::runtime::Builder::new_multi_thread() 70 | .worker_threads(rpc_threads) 71 | .on_thread_start( 72 | move || { 73 | renice_this_thread(rpc_niceness_adj).unwrap(); 74 | } 75 | ) 76 | .thread_name("solRpcEl") 77 | .enable_all() 78 | .build() 79 | .expect("Runtime"), 80 | ); 81 | 82 | let hbase_ledger_storage = 83 | if let Some(RpcHBaseConfig { 84 | enable_hbase_ledger_upload: false, 85 | ref hbase_address, 86 | ref namespace, 87 | ref hdfs_url, 88 | ref hdfs_path, 89 | timeout, 90 | // block_cache, 91 | use_md5_row_key_salt, 92 | hash_tx_full_row_keys, 93 | enable_full_tx_cache, 94 | disable_tx_fallback, 95 | ref cache_address, 96 | use_block_car_files, 97 | use_hbase_blocks_meta, 98 | use_webhdfs, 99 | ref webhdfs_url, 100 | .. 101 | }) = config.rpc_hbase_config 102 | { 103 | let hbase_config = solana_storage_hbase::LedgerStorageConfig { 104 | read_only: true, 105 | timeout, 106 | address: hbase_address.clone(), 107 | namespace: namespace.clone(), 108 | hdfs_url: hdfs_url.clone(), 109 | hdfs_path: hdfs_path.clone(), 110 | // block_cache, 111 | use_md5_row_key_salt, 112 | hash_tx_full_row_keys, 113 | enable_full_tx_cache, 114 | disable_tx_fallback, 115 | cache_address: cache_address.clone(), 116 | use_block_car_files: use_block_car_files, 117 | use_hbase_blocks_meta, 118 | use_webhdfs: use_webhdfs, 119 | webhdfs_url: webhdfs_url.clone(), 120 | }; 121 | runtime 122 | .block_on(solana_storage_hbase::LedgerStorage::new_with_config(hbase_config, metrics.clone())) 123 | .map(|hbase_ledger_storage| { 124 | info!("HBase ledger storage initialized"); 125 | Some(Box::new(hbase_ledger_storage) as Box) 126 | }) 127 | .unwrap_or_else(|err| { 128 | error!("Failed to initialize HBase ledger storage: {:?}", err); 129 | None 130 | }) 131 | } else { 132 | None 133 | }; 134 | 135 | let fallback_ledger_storage = 136 | if let Some(RpcHBaseConfig { 137 | enable_hbase_ledger_upload: false, 138 | ref fallback_hbase_address, 139 | ref namespace, 140 | ref hdfs_url, 141 | ref hdfs_path, 142 | timeout, 143 | // block_cache, 144 | use_md5_row_key_salt, 145 | hash_tx_full_row_keys, 146 | enable_full_tx_cache, 147 | ref cache_address, 148 | use_block_car_files, 149 | use_hbase_blocks_meta, 150 | use_webhdfs, 151 | ref webhdfs_url, 152 | .. 153 | }) = config.rpc_hbase_config 154 | { 155 | if let Some(fallback_address) = fallback_hbase_address { 156 | let fallback_config = solana_storage_hbase::LedgerStorageConfig { 157 | read_only: true, 158 | timeout, 159 | address: fallback_address.clone(), 160 | namespace: namespace.clone(), 161 | hdfs_url: hdfs_url.clone(), 162 | hdfs_path: hdfs_path.clone(), 163 | // block_cache, 164 | use_md5_row_key_salt, 165 | hash_tx_full_row_keys, 166 | enable_full_tx_cache, 167 | disable_tx_fallback: false, 168 | cache_address: cache_address.clone(), 169 | use_block_car_files: use_block_car_files, 170 | use_hbase_blocks_meta, 171 | use_webhdfs: use_webhdfs, 172 | webhdfs_url: webhdfs_url.clone(), 173 | }; 174 | runtime 175 | .block_on(solana_storage_hbase::LedgerStorage::new_with_config(fallback_config, metrics.clone())) 176 | .map(|fallback_ledger_storage| { 177 | info!("Fallback ledger storage initialized"); 178 | Some(Box::new(fallback_ledger_storage) as Box) 179 | }) 180 | .unwrap_or_else(|err| { 181 | error!("Failed to initialize Fallback ledger storage: {:?}", err); 182 | None 183 | }) 184 | } else { 185 | None 186 | } 187 | } else { 188 | None 189 | }; 190 | 191 | let full_api = config.full_api; 192 | let max_request_body_size = config 193 | .max_request_body_size 194 | .unwrap_or(MAX_REQUEST_BODY_SIZE); 195 | // let (request_processor, _receiver) = JsonRpcRequestProcessor::new( 196 | let request_processor = JsonRpcRequestProcessor::new( 197 | config, 198 | rpc_service_exit.clone(), 199 | hbase_ledger_storage, 200 | fallback_ledger_storage, 201 | ); 202 | 203 | #[cfg(test)] 204 | let test_request_processor = request_processor.clone(); 205 | 206 | let log_path = log_path.to_path_buf(); 207 | 208 | let (close_handle_sender, close_handle_receiver) = unbounded(); 209 | let thread_hdl = Builder::new() 210 | .name("solJsonRpcSvc".to_string()) 211 | .spawn({ 212 | move || { 213 | renice_this_thread(rpc_niceness_adj).unwrap(); 214 | 215 | let metrics_middleware = MetricsMiddleware::new(metrics.clone()); 216 | metrics.idle_threads_counter.set(rpc_threads as i64); 217 | 218 | // Create the MetaIoHandler and apply the middleware 219 | let mut io = MetaIoHandler::with_middleware(metrics_middleware); 220 | 221 | io.extend_with(storage_rpc_minimal::MinimalImpl.to_delegate()); 222 | if full_api { 223 | io.extend_with(storage_rpc_full::FullImpl.to_delegate()); 224 | io.extend_with(storage_rpc_deprecated_v1_7::DeprecatedV1_7Impl.to_delegate()); 225 | } 226 | 227 | let request_middleware = RpcRequestMiddleware::new( 228 | log_path, 229 | ); 230 | let server = ServerBuilder::with_meta_extractor( 231 | io, 232 | move |_req: &hyper::Request| request_processor.clone(), 233 | ) 234 | .event_loop_executor(runtime.handle().clone()) 235 | .threads(1) 236 | .cors(DomainsValidation::AllowOnly(vec![ 237 | AccessControlAllowOrigin::Any, 238 | ])) 239 | .cors_max_age(86400) 240 | .request_middleware(request_middleware) 241 | .max_request_body_size(max_request_body_size) 242 | .start_http(&rpc_addr); 243 | 244 | if let Err(e) = server { 245 | warn!( 246 | "JSON RPC service unavailable error: {:?}. \n\ 247 | Also, check that port {} is not already in use by another application", 248 | e, 249 | rpc_addr.port() 250 | ); 251 | close_handle_sender.send(Err(e.to_string())).unwrap(); 252 | return; 253 | } 254 | 255 | let server = server.unwrap(); 256 | close_handle_sender.send(Ok(server.close_handle())).unwrap(); 257 | server.wait(); 258 | } 259 | }) 260 | .unwrap(); 261 | 262 | let close_handle = close_handle_receiver.recv().unwrap()?; 263 | let close_handle_ = close_handle.clone(); 264 | rpc_service_exit 265 | .write() 266 | .unwrap() 267 | .register_exit(Box::new(move || close_handle_.close())); 268 | Ok(Self { 269 | thread_hdl, 270 | #[cfg(test)] 271 | request_processor: test_request_processor, 272 | close_handle: Some(close_handle), 273 | }) 274 | } 275 | 276 | pub fn exit(&mut self) { 277 | if let Some(c) = self.close_handle.take() { 278 | c.close() 279 | } 280 | } 281 | 282 | pub fn join(self) -> thread::Result<()> { 283 | self.thread_hdl.join() 284 | } 285 | } 286 | 287 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /rpc/src/genesis_unpack.rs: -------------------------------------------------------------------------------- 1 | use { 2 | bzip2::bufread::BzDecoder, 3 | log::*, 4 | // rand::{thread_rng, Rng}, 5 | solana_genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE}, 6 | std::{ 7 | // collections::HashMap, 8 | fs::{self, File}, 9 | io::{BufReader, Read}, 10 | path::{ 11 | Component::{self, CurDir, Normal}, 12 | Path, PathBuf, 13 | }, 14 | time::Instant, 15 | }, 16 | tar::{ 17 | Archive, 18 | EntryType::{Directory, GNUSparse, Regular}, 19 | }, 20 | thiserror::Error, 21 | }; 22 | 23 | #[derive(Error, Debug)] 24 | pub enum UnpackError { 25 | #[error("IO error: {0}")] 26 | Io(#[from] std::io::Error), 27 | #[error("Archive error: {0}")] 28 | Archive(String), 29 | } 30 | 31 | const MAX_GENESIS_ARCHIVE_UNPACKED_COUNT: u64 = 100; 32 | 33 | pub type Result = std::result::Result; 34 | 35 | #[derive(Error, Debug)] 36 | pub enum OpenGenesisConfigError { 37 | #[error("unpack error: {0}")] 38 | Unpack(#[from] UnpackError), 39 | #[error("Genesis load error: {0}")] 40 | Load(#[from] std::io::Error), 41 | } 42 | 43 | pub fn open_genesis_config( 44 | ledger_path: &Path, 45 | max_genesis_archive_unpacked_size: u64, 46 | ) -> std::result::Result { 47 | match GenesisConfig::load(ledger_path) { 48 | Ok(genesis_config) => Ok(genesis_config), 49 | Err(load_err) => { 50 | warn!( 51 | "Failed to load genesis_config at {ledger_path:?}: {load_err}. \ 52 | Will attempt to unpack genesis archive and then retry loading." 53 | ); 54 | 55 | let genesis_package = ledger_path.join(DEFAULT_GENESIS_ARCHIVE); 56 | unpack_genesis_archive( 57 | &genesis_package, 58 | ledger_path, 59 | max_genesis_archive_unpacked_size, 60 | )?; 61 | GenesisConfig::load(ledger_path).map_err(OpenGenesisConfigError::Load) 62 | } 63 | } 64 | } 65 | 66 | pub fn unpack_genesis_archive( 67 | archive_filename: &Path, 68 | destination_dir: &Path, 69 | max_genesis_archive_unpacked_size: u64, 70 | ) -> std::result::Result<(), UnpackError> { 71 | info!("Extracting {:?}...", archive_filename); 72 | let extract_start = Instant::now(); 73 | 74 | fs::create_dir_all(destination_dir)?; 75 | let tar_bz2 = File::open(archive_filename)?; 76 | let tar = BzDecoder::new(BufReader::new(tar_bz2)); 77 | let mut archive = Archive::new(tar); 78 | unpack_genesis( 79 | &mut archive, 80 | destination_dir, 81 | max_genesis_archive_unpacked_size, 82 | )?; 83 | info!( 84 | "Extracted {:?} in {:?}", 85 | archive_filename, 86 | Instant::now().duration_since(extract_start) 87 | ); 88 | Ok(()) 89 | } 90 | 91 | fn unpack_genesis( 92 | archive: &mut Archive, 93 | unpack_dir: &Path, 94 | max_genesis_archive_unpacked_size: u64, 95 | ) -> Result<()> { 96 | unpack_archive( 97 | archive, 98 | max_genesis_archive_unpacked_size, 99 | max_genesis_archive_unpacked_size, 100 | MAX_GENESIS_ARCHIVE_UNPACKED_COUNT, 101 | |p, k| is_valid_genesis_archive_entry(unpack_dir, p, k), 102 | |_| {}, 103 | ) 104 | } 105 | 106 | fn is_valid_genesis_archive_entry<'a>( 107 | unpack_dir: &'a Path, 108 | parts: &[&str], 109 | kind: tar::EntryType, 110 | ) -> UnpackPath<'a> { 111 | trace!("validating: {:?} {:?}", parts, kind); 112 | #[allow(clippy::match_like_matches_macro)] 113 | match (parts, kind) { 114 | ([DEFAULT_GENESIS_FILE], GNUSparse) => UnpackPath::Valid(unpack_dir), 115 | ([DEFAULT_GENESIS_FILE], Regular) => UnpackPath::Valid(unpack_dir), 116 | (["rocksdb"], Directory) => UnpackPath::Ignore, 117 | (["rocksdb", _], GNUSparse) => UnpackPath::Ignore, 118 | (["rocksdb", _], Regular) => UnpackPath::Ignore, 119 | (["rocksdb_fifo"], Directory) => UnpackPath::Ignore, 120 | (["rocksdb_fifo", _], GNUSparse) => UnpackPath::Ignore, 121 | (["rocksdb_fifo", _], Regular) => UnpackPath::Ignore, 122 | _ => UnpackPath::Invalid, 123 | } 124 | } 125 | 126 | fn checked_total_size_sum(total_size: u64, entry_size: u64, limit_size: u64) -> Result { 127 | trace!( 128 | "checked_total_size_sum: {} + {} < {}", 129 | total_size, 130 | entry_size, 131 | limit_size, 132 | ); 133 | let total_size = total_size.saturating_add(entry_size); 134 | if total_size > limit_size { 135 | return Err(UnpackError::Archive(format!( 136 | "too large archive: {total_size} than limit: {limit_size}", 137 | ))); 138 | } 139 | Ok(total_size) 140 | } 141 | 142 | fn checked_total_count_increment(total_count: u64, limit_count: u64) -> Result { 143 | let total_count = total_count + 1; 144 | if total_count > limit_count { 145 | return Err(UnpackError::Archive(format!( 146 | "too many files in snapshot: {total_count:?}" 147 | ))); 148 | } 149 | Ok(total_count) 150 | } 151 | 152 | fn check_unpack_result(unpack_result: bool, path: String) -> Result<()> { 153 | if !unpack_result { 154 | return Err(UnpackError::Archive(format!("failed to unpack: {path:?}"))); 155 | } 156 | Ok(()) 157 | } 158 | 159 | // return Err on file system error 160 | // return Some(path) if path is good 161 | // return None if we should skip this file 162 | fn sanitize_path(entry_path: &Path, dst: &Path) -> Result> { 163 | // We cannot call unpack_in because it errors if we try to use 2 account paths. 164 | // So, this code is borrowed from unpack_in 165 | // ref: https://docs.rs/tar/*/tar/struct.Entry.html#method.unpack_in 166 | let mut file_dst = dst.to_path_buf(); 167 | const SKIP: Result> = Ok(None); 168 | { 169 | let path = entry_path; 170 | for part in path.components() { 171 | match part { 172 | // Leading '/' characters, root paths, and '.' 173 | // components are just ignored and treated as "empty 174 | // components" 175 | Component::Prefix(..) | Component::RootDir | Component::CurDir => continue, 176 | 177 | // If any part of the filename is '..', then skip over 178 | // unpacking the file to prevent directory traversal 179 | // security issues. See, e.g.: CVE-2001-1267, 180 | // CVE-2002-0399, CVE-2005-1918, CVE-2007-4131 181 | Component::ParentDir => return SKIP, 182 | 183 | Component::Normal(part) => file_dst.push(part), 184 | } 185 | } 186 | } 187 | 188 | // Skip cases where only slashes or '.' parts were seen, because 189 | // this is effectively an empty filename. 190 | if *dst == *file_dst { 191 | return SKIP; 192 | } 193 | 194 | // Skip entries without a parent (i.e. outside of FS root) 195 | let Some(parent) = file_dst.parent() else { 196 | return SKIP; 197 | }; 198 | 199 | fs::create_dir_all(parent)?; 200 | 201 | // Here we are different than untar_in. The code for tar::unpack_in internally calling unpack is a little different. 202 | // ignore return value here 203 | validate_inside_dst(dst, parent)?; 204 | let target = parent.join(entry_path.file_name().unwrap()); 205 | 206 | Ok(Some(target)) 207 | } 208 | 209 | #[derive(Debug, PartialEq, Eq)] 210 | pub enum UnpackPath<'a> { 211 | Valid(&'a Path), 212 | Ignore, 213 | Invalid, 214 | } 215 | 216 | fn unpack_archive<'a, A, C, D>( 217 | archive: &mut Archive, 218 | apparent_limit_size: u64, 219 | actual_limit_size: u64, 220 | limit_count: u64, 221 | mut entry_checker: C, // checks if entry is valid 222 | entry_processor: D, // processes entry after setting permissions 223 | ) -> Result<()> 224 | where 225 | A: Read, 226 | C: FnMut(&[&str], tar::EntryType) -> UnpackPath<'a>, 227 | D: Fn(PathBuf), 228 | { 229 | let mut apparent_total_size: u64 = 0; 230 | let mut actual_total_size: u64 = 0; 231 | let mut total_count: u64 = 0; 232 | 233 | let mut total_entries = 0; 234 | for entry in archive.entries()? { 235 | let mut entry = entry?; 236 | let path = entry.path()?; 237 | let path_str = path.display().to_string(); 238 | 239 | // Although the `tar` crate safely skips at the actual unpacking, fail 240 | // first by ourselves when there are odd paths like including `..` or / 241 | // for our clearer pattern matching reasoning: 242 | // https://docs.rs/tar/0.4.26/src/tar/entry.rs.html#371 243 | let parts = path 244 | .components() 245 | .map(|p| match p { 246 | CurDir => Ok("."), 247 | Normal(c) => c.to_str().ok_or(()), 248 | _ => Err(()), // Prefix (for Windows) and RootDir are forbidden 249 | }) 250 | .collect::, _>>(); 251 | 252 | // Reject old-style BSD directory entries that aren't explicitly tagged as directories 253 | let legacy_dir_entry = 254 | entry.header().as_ustar().is_none() && entry.path_bytes().ends_with(b"/"); 255 | let kind = entry.header().entry_type(); 256 | let reject_legacy_dir_entry = legacy_dir_entry && (kind != Directory); 257 | let (Ok(parts), false) = (parts, reject_legacy_dir_entry) else { 258 | return Err(UnpackError::Archive(format!( 259 | "invalid path found: {path_str:?}" 260 | ))); 261 | }; 262 | 263 | let unpack_dir = match entry_checker(parts.as_slice(), kind) { 264 | UnpackPath::Invalid => { 265 | return Err(UnpackError::Archive(format!( 266 | "extra entry found: {:?} {:?}", 267 | path_str, 268 | entry.header().entry_type(), 269 | ))); 270 | } 271 | UnpackPath::Ignore => { 272 | continue; 273 | } 274 | UnpackPath::Valid(unpack_dir) => unpack_dir, 275 | }; 276 | 277 | apparent_total_size = checked_total_size_sum( 278 | apparent_total_size, 279 | entry.header().size()?, 280 | apparent_limit_size, 281 | )?; 282 | actual_total_size = checked_total_size_sum( 283 | actual_total_size, 284 | entry.header().entry_size()?, 285 | actual_limit_size, 286 | )?; 287 | total_count = checked_total_count_increment(total_count, limit_count)?; 288 | 289 | let account_filename = match parts.as_slice() { 290 | ["accounts", account_filename] => Some(PathBuf::from(account_filename)), 291 | _ => None, 292 | }; 293 | let entry_path = if let Some(account) = account_filename { 294 | // Special case account files. We're unpacking an account entry inside one of the 295 | // account_paths returned by `entry_checker`. We want to unpack into 296 | // account_path/ instead of account_path/accounts/ so we strip the 297 | // accounts/ prefix. 298 | sanitize_path(&account, unpack_dir) 299 | } else { 300 | sanitize_path(&path, unpack_dir) 301 | }?; // ? handles file system errors 302 | let Some(entry_path) = entry_path else { 303 | continue; // skip it 304 | }; 305 | 306 | let unpack = entry.unpack(&entry_path); 307 | check_unpack_result(unpack.map(|_unpack| true)?, path_str)?; 308 | 309 | // Sanitize permissions. 310 | let mode = match entry.header().entry_type() { 311 | GNUSparse | Regular => 0o644, 312 | _ => 0o755, 313 | }; 314 | set_perms(&entry_path, mode)?; 315 | 316 | // Process entry after setting permissions 317 | entry_processor(entry_path); 318 | 319 | total_entries += 1; 320 | } 321 | info!("unpacked {} entries total", total_entries); 322 | 323 | return Ok(()); 324 | 325 | #[cfg(unix)] 326 | fn set_perms(dst: &Path, mode: u32) -> std::io::Result<()> { 327 | use std::os::unix::fs::PermissionsExt; 328 | 329 | let perm = fs::Permissions::from_mode(mode as _); 330 | fs::set_permissions(dst, perm) 331 | } 332 | 333 | #[cfg(windows)] 334 | fn set_perms(dst: &Path, _mode: u32) -> std::io::Result<()> { 335 | let mut perm = fs::metadata(dst)?.permissions(); 336 | // This is OK for Windows, but clippy doesn't realize we're doing this 337 | // only on Windows. 338 | #[allow(clippy::permissions_set_readonly_false)] 339 | perm.set_readonly(false); 340 | fs::set_permissions(dst, perm) 341 | } 342 | } 343 | 344 | // copied from: 345 | // https://github.com/alexcrichton/tar-rs/blob/d90a02f582c03dfa0fd11c78d608d0974625ae5d/src/entry.rs#L781 346 | fn validate_inside_dst(dst: &Path, file_dst: &Path) -> Result { 347 | // Abort if target (canonical) parent is outside of `dst` 348 | let canon_parent = file_dst.canonicalize().map_err(|err| { 349 | UnpackError::Archive(format!( 350 | "{} while canonicalizing {}", 351 | err, 352 | file_dst.display() 353 | )) 354 | })?; 355 | let canon_target = dst.canonicalize().map_err(|err| { 356 | UnpackError::Archive(format!("{} while canonicalizing {}", err, dst.display())) 357 | })?; 358 | if !canon_parent.starts_with(&canon_target) { 359 | return Err(UnpackError::Archive(format!( 360 | "trying to unpack outside of destination path: {}", 361 | canon_target.display() 362 | ))); 363 | } 364 | Ok(canon_target) 365 | } -------------------------------------------------------------------------------- /storage-adapter/src/lib.rs: -------------------------------------------------------------------------------- 1 | 2 | use { 3 | async_trait::async_trait, 4 | log::*, 5 | serde::{Deserialize, Serialize}, 6 | solana_clock::{ 7 | Slot, 8 | UnixTimestamp, 9 | }, 10 | solana_pubkey::{ 11 | Pubkey, 12 | }, 13 | solana_signature::{ 14 | Signature, 15 | }, 16 | solana_message::{ 17 | v0::LoadedAddresses, 18 | }, 19 | solana_serde::{ 20 | default_on_eof, 21 | }, 22 | solana_transaction::{ 23 | versioned::VersionedTransaction, 24 | }, 25 | solana_transaction_error::{ 26 | TransactionError, 27 | }, 28 | solana_transaction_status::{ 29 | ConfirmedBlock, 30 | ConfirmedTransactionStatusWithSignature, 31 | ConfirmedTransactionWithStatusMeta, 32 | TransactionWithStatusMeta, 33 | VersionedTransactionWithStatusMeta, 34 | VersionedConfirmedBlock, 35 | TransactionByAddrInfo, 36 | }, 37 | solana_transaction_status_client_types::{ 38 | TransactionConfirmationStatus, 39 | TransactionStatus, 40 | TransactionStatusMeta, 41 | Reward, 42 | }, 43 | std::{ 44 | boxed::Box, 45 | format, 46 | }, 47 | thiserror::Error, 48 | tokio::task::JoinError, 49 | xxhash_rust::{ 50 | xxh3::{xxh3_128}, 51 | xxh32::{xxh32}, 52 | }, 53 | }; 54 | 55 | #[macro_use] 56 | extern crate serde_derive; 57 | 58 | pub mod compression; 59 | 60 | #[derive(Debug, Error)] 61 | pub enum Error { 62 | #[error("Storage Error: {0}")] 63 | StorageBackendError(Box), 64 | 65 | #[error("I/O Error: {0}")] 66 | IoError(std::io::Error), 67 | 68 | #[error("Transaction encoded is not supported")] 69 | UnsupportedTransactionEncoding, 70 | 71 | #[error("Block not found: {0}")] 72 | BlockNotFound(Slot), 73 | 74 | #[error("Signature not found")] 75 | SignatureNotFound, 76 | 77 | #[error("tokio error")] 78 | TokioJoinError(JoinError), 79 | 80 | #[error("Cache Error: {0}")] 81 | CacheError(String), 82 | } 83 | 84 | impl std::convert::From for Error { 85 | fn from(err: std::io::Error) -> Self { 86 | Self::IoError(err) 87 | } 88 | } 89 | 90 | pub type Result = std::result::Result; 91 | 92 | // Convert a slot to its bucket representation whereby lower slots are always lexically ordered 93 | // before higher slots 94 | pub fn slot_to_key(slot: Slot) -> String { 95 | format!("{slot:016x}") 96 | } 97 | 98 | pub fn slot_to_blocks_key(slot: Slot, use_md5: bool) -> String { 99 | let slot_hex = slot_to_key(slot); 100 | 101 | if use_md5 { 102 | let hash_result = md5::compute(&slot_hex); 103 | let truncated_hash_hex = format!("{:x}", hash_result)[..10].to_string(); 104 | 105 | // Concatenate the truncated hash with the slot hex to form the row key 106 | format!("{}{}", truncated_hash_hex, slot_hex) 107 | } else { 108 | slot_hex 109 | } 110 | } 111 | 112 | pub fn slot_to_tx_by_addr_key(slot: Slot) -> String { 113 | slot_to_key(!slot) 114 | } 115 | 116 | // Reverse of `slot_to_key` 117 | pub fn key_to_slot(key: &str) -> Option { 118 | match Slot::from_str_radix(key, 16) { 119 | Ok(slot) => Some(slot), 120 | Err(err) => { 121 | // bucket data is probably corrupt 122 | warn!("Failed to parse object key as a slot: {}: {}", key, err); 123 | None 124 | } 125 | } 126 | } 127 | 128 | pub fn signature_to_tx_full_key(signature: &Signature, use_hash: bool) -> String { 129 | if use_hash { 130 | let signature_bytes = signature.as_ref(); // Convert signature to bytes 131 | let hash_128 = xxh3_128(signature_bytes); 132 | let hash_32 = xxh32(signature_bytes, 0); 133 | 134 | // Concatenate the two hashes for a 160bit hash 135 | format!("{:x}{:x}", hash_128, hash_32) 136 | } else { 137 | signature.to_string() 138 | } 139 | } 140 | 141 | // A serialized `StoredConfirmedBlock` is stored in the `block` table 142 | // 143 | // StoredConfirmedBlock holds the same contents as ConfirmedBlock, but is slightly compressed and avoids 144 | // some serde JSON directives that cause issues with bincode 145 | // 146 | // Note: in order to continue to support old bincode-serialized bigtable entries, if new fields are 147 | // added to ConfirmedBlock, they must either be excluded or set to `default_on_eof` here 148 | // 149 | #[derive(Serialize, Deserialize)] 150 | pub struct StoredConfirmedBlock { 151 | previous_blockhash: String, 152 | blockhash: String, 153 | parent_slot: Slot, 154 | transactions: Vec, 155 | rewards: StoredConfirmedBlockRewards, 156 | // pub num_partitions: Option, 157 | block_time: Option, 158 | #[serde(deserialize_with = "default_on_eof")] 159 | block_height: Option, 160 | } 161 | 162 | 163 | #[derive(Serialize, Deserialize)] 164 | pub struct StoredConfirmedTransactionWithStatusMeta { 165 | pub slot: Slot, 166 | // pub tx_with_meta: TransactionWithStatusMeta, 167 | pub tx_with_meta: StoredConfirmedBlockTransaction, 168 | pub block_time: Option, 169 | } 170 | 171 | impl From for StoredConfirmedTransactionWithStatusMeta { 172 | fn from(value: ConfirmedTransactionWithStatusMeta) -> Self { 173 | Self { 174 | slot: value.slot, 175 | tx_with_meta: value.tx_with_meta.into(), 176 | block_time: value.block_time, 177 | } 178 | } 179 | } 180 | 181 | impl From for ConfirmedTransactionWithStatusMeta { 182 | fn from(value: StoredConfirmedTransactionWithStatusMeta) -> Self { 183 | Self { 184 | slot: value.slot, 185 | tx_with_meta: value.tx_with_meta.into(), 186 | block_time: value.block_time, 187 | } 188 | } 189 | } 190 | 191 | // #[cfg(test)] 192 | impl From for StoredConfirmedBlock { 193 | fn from(confirmed_block: ConfirmedBlock) -> Self { 194 | let ConfirmedBlock { 195 | previous_blockhash, 196 | blockhash, 197 | parent_slot, 198 | transactions, 199 | rewards, 200 | num_partitions: _num_partitions, 201 | block_time, 202 | block_height, 203 | } = confirmed_block; 204 | 205 | Self { 206 | previous_blockhash, 207 | blockhash, 208 | parent_slot, 209 | transactions: transactions.into_iter().map(|tx| tx.into()).collect(), 210 | rewards: rewards.into_iter().map(|reward| reward.into()).collect(), 211 | block_time, 212 | block_height, 213 | } 214 | } 215 | } 216 | 217 | impl From for ConfirmedBlock { 218 | fn from(confirmed_block: StoredConfirmedBlock) -> Self { 219 | let StoredConfirmedBlock { 220 | previous_blockhash, 221 | blockhash, 222 | parent_slot, 223 | transactions, 224 | rewards, 225 | // num_partitions: _num_partitions, 226 | block_time, 227 | block_height, 228 | } = confirmed_block; 229 | 230 | Self { 231 | previous_blockhash, 232 | blockhash, 233 | parent_slot, 234 | transactions: transactions.into_iter().map(|tx| tx.into()).collect(), 235 | rewards: rewards.into_iter().map(|reward| reward.into()).collect(), 236 | num_partitions: None, 237 | block_time, 238 | block_height, 239 | } 240 | } 241 | } 242 | 243 | #[derive(Serialize, Deserialize)] 244 | pub struct StoredConfirmedBlockTransaction { 245 | transaction: VersionedTransaction, 246 | meta: Option, 247 | } 248 | 249 | // #[cfg(test)] 250 | impl From for StoredConfirmedBlockTransaction { 251 | fn from(value: TransactionWithStatusMeta) -> Self { 252 | match value { 253 | TransactionWithStatusMeta::MissingMetadata(transaction) => Self { 254 | transaction: VersionedTransaction::from(transaction), 255 | meta: None, 256 | }, 257 | TransactionWithStatusMeta::Complete(VersionedTransactionWithStatusMeta { 258 | transaction, 259 | meta, 260 | }) => Self { 261 | transaction, 262 | meta: Some(meta.into()), 263 | }, 264 | } 265 | } 266 | } 267 | 268 | impl From for TransactionWithStatusMeta { 269 | fn from(tx_with_meta: StoredConfirmedBlockTransaction) -> Self { 270 | let StoredConfirmedBlockTransaction { transaction, meta } = tx_with_meta; 271 | match meta { 272 | None => Self::MissingMetadata( 273 | transaction 274 | .into_legacy_transaction() 275 | .expect("versioned transactions always have meta"), 276 | ), 277 | Some(meta) => Self::Complete(VersionedTransactionWithStatusMeta { 278 | transaction, 279 | meta: meta.into(), 280 | }), 281 | } 282 | } 283 | } 284 | 285 | #[derive(Serialize, Deserialize)] 286 | pub struct StoredConfirmedBlockTransactionStatusMeta { 287 | err: Option, 288 | fee: u64, 289 | pre_balances: Vec, 290 | post_balances: Vec, 291 | } 292 | 293 | impl From for TransactionStatusMeta { 294 | fn from(value: StoredConfirmedBlockTransactionStatusMeta) -> Self { 295 | let StoredConfirmedBlockTransactionStatusMeta { 296 | err, 297 | fee, 298 | pre_balances, 299 | post_balances, 300 | } = value; 301 | let status = match &err { 302 | None => Ok(()), 303 | Some(err) => Err(err.clone()), 304 | }; 305 | Self { 306 | status, 307 | fee, 308 | pre_balances, 309 | post_balances, 310 | inner_instructions: None, 311 | log_messages: None, 312 | pre_token_balances: None, 313 | post_token_balances: None, 314 | rewards: None, 315 | loaded_addresses: LoadedAddresses::default(), 316 | return_data: None, 317 | compute_units_consumed: None, 318 | cost_units: None, 319 | } 320 | } 321 | } 322 | 323 | impl From for StoredConfirmedBlockTransactionStatusMeta { 324 | fn from(value: TransactionStatusMeta) -> Self { 325 | let TransactionStatusMeta { 326 | status, 327 | fee, 328 | pre_balances, 329 | post_balances, 330 | .. 331 | } = value; 332 | Self { 333 | err: status.err(), 334 | fee, 335 | pre_balances, 336 | post_balances, 337 | } 338 | } 339 | } 340 | 341 | pub type StoredConfirmedBlockRewards = Vec; 342 | 343 | #[derive(Serialize, Deserialize)] 344 | pub struct StoredConfirmedBlockReward { 345 | pubkey: String, 346 | lamports: i64, 347 | } 348 | 349 | impl From for Reward { 350 | fn from(value: StoredConfirmedBlockReward) -> Self { 351 | let StoredConfirmedBlockReward { pubkey, lamports } = value; 352 | Self { 353 | pubkey, 354 | lamports, 355 | post_balance: 0, 356 | reward_type: None, 357 | commission: None, 358 | } 359 | } 360 | } 361 | 362 | impl From for StoredConfirmedBlockReward { 363 | fn from(value: Reward) -> Self { 364 | let Reward { 365 | pubkey, lamports, .. 366 | } = value; 367 | Self { pubkey, lamports } 368 | } 369 | } 370 | 371 | // impl From for TransactionWithStatusMeta { 372 | // fn from(item: VersionedTransactionWithStatusMeta) -> Self { 373 | // TransactionWithStatusMeta::Complete(item) 374 | // } 375 | // } 376 | 377 | // A serialized `TransactionInfo` is stored in the `tx` table 378 | #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] 379 | pub struct TransactionInfo { 380 | pub slot: Slot, // The slot that contains the block with this transaction in it 381 | pub index: u32, // Where the transaction is located in the block 382 | pub err: Option, // None if the transaction executed successfully 383 | // pub memo: Option, // Transaction memo 384 | } 385 | 386 | // Part of a serialized `TransactionInfo` which is stored in the `tx` table 387 | #[derive(PartialEq, Eq, Debug)] 388 | pub struct UploadedTransaction { 389 | pub slot: Slot, // The slot that contains the block with this transaction in it 390 | pub index: u32, // Where the transaction is located in the block 391 | pub err: Option, // None if the transaction executed successfully 392 | } 393 | 394 | impl From for UploadedTransaction { 395 | fn from(transaction_info: TransactionInfo) -> Self { 396 | Self { 397 | slot: transaction_info.slot, 398 | index: transaction_info.index, 399 | err: transaction_info.err, 400 | } 401 | } 402 | } 403 | 404 | impl From for TransactionStatus { 405 | fn from(transaction_info: TransactionInfo) -> Self { 406 | let TransactionInfo { slot, err, .. } = transaction_info; 407 | let status = match &err { 408 | None => Ok(()), 409 | Some(err) => Err(err.clone()), 410 | }; 411 | Self { 412 | slot, 413 | confirmations: None, 414 | status, 415 | err, 416 | confirmation_status: Some(TransactionConfirmationStatus::Finalized), 417 | } 418 | } 419 | } 420 | 421 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] 422 | pub struct LegacyTransactionByAddrInfo { 423 | pub signature: Signature, // The transaction signature 424 | pub err: Option, // None if the transaction executed successfully 425 | pub index: u32, // Where the transaction is located in the block 426 | pub memo: Option, // Transaction memo 427 | } 428 | 429 | impl From for TransactionByAddrInfo { 430 | fn from(legacy: LegacyTransactionByAddrInfo) -> Self { 431 | let LegacyTransactionByAddrInfo { 432 | signature, 433 | err, 434 | index, 435 | memo, 436 | } = legacy; 437 | 438 | Self { 439 | signature, 440 | err, 441 | index, 442 | memo, 443 | block_time: None, 444 | } 445 | } 446 | } 447 | 448 | #[async_trait] 449 | pub trait LedgerStorageAdapter: Send + Sync { 450 | async fn get_first_available_block(&self) -> Result>; 451 | 452 | async fn get_confirmed_blocks(&self, start_slot: Slot, limit: usize) -> Result>; 453 | 454 | async fn get_confirmed_block(&self, slot: Slot, use_cache: bool) -> Result; 455 | 456 | async fn get_signature_status(&self, signature: &Signature) -> Result; 457 | 458 | async fn get_full_transaction( 459 | &self, 460 | signature: &Signature, 461 | ) -> Result>; 462 | 463 | async fn get_confirmed_transaction( 464 | &self, 465 | signature: &Signature, 466 | ) -> Result>; 467 | 468 | async fn get_confirmed_signatures_for_address( 469 | &self, 470 | address: &Pubkey, 471 | before_signature: Option<&Signature>, 472 | until_signature: Option<&Signature>, 473 | limit: usize, 474 | reversed: Option, 475 | ) -> Result>; 476 | 477 | async fn get_signatures_forward( 478 | &self, 479 | address: &Pubkey, 480 | before_signature: Option<&Signature>, 481 | until_signature: Option<&Signature>, 482 | limit: usize, 483 | ) -> Result>; 484 | 485 | async fn get_signatures_backward( 486 | &self, 487 | address: &Pubkey, 488 | before_signature: Option<&Signature>, 489 | until_signature: Option<&Signature>, 490 | limit: usize, 491 | ) -> Result>; 492 | 493 | async fn get_latest_stored_slot(&self) -> Result; 494 | 495 | async fn upload_confirmed_block( 496 | &self, 497 | slot: Slot, 498 | confirmed_block: VersionedConfirmedBlock, 499 | ) -> Result<()>; 500 | 501 | fn clone_box(&self) -> Box; 502 | 503 | /// Fetch the confirmed block from the desired slot 504 | async fn get_confirmed_block_from_legacy_storage(&self, slot: Slot, _use_cache: bool) -> Result; 505 | 506 | /// For downcasting to concrete implementations 507 | fn as_any(&self) -> &dyn std::any::Any; 508 | } 509 | 510 | #[cfg(test)] 511 | mod test { 512 | use super::*; 513 | 514 | #[test] 515 | fn test_slot_to_key() { 516 | assert_eq!(slot_to_key(0), "0000000000000000"); 517 | assert_eq!(slot_to_key(!0), "ffffffffffffffff"); 518 | } 519 | } -------------------------------------------------------------------------------- /rpc/src/cli.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | request_processor::MAX_REQUEST_BODY_SIZE, 4 | input_validators::{ 5 | is_parsable, 6 | }, 7 | }, 8 | clap::{ 9 | App, 10 | Arg, 11 | ArgMatches, 12 | }, 13 | log::warn, 14 | solana_perf::thread::is_niceness_adjustment_valid, 15 | }; 16 | 17 | pub const DEFAULT_RPC_PORT: u16 = 8899; 18 | 19 | /// Deprecated argument description should be moved into the [`deprecated_arguments()`] function, 20 | /// expressed as an instance of this type. 21 | struct DeprecatedArg { 22 | /// Deprecated argument description, moved here as is. 23 | /// 24 | /// `hidden` property will be modified by [`deprecated_arguments()`] to only show this argument 25 | /// if [`hidden_unless_forced()`] says they should be displayed. 26 | arg: Arg<'static, 'static>, 27 | 28 | /// If simply replaced by a different argument, this is the name of the replacement. 29 | /// 30 | /// Content should be an argument name, as presented to users. 31 | replaced_by: Option<&'static str>, 32 | 33 | /// An explanation to be shown to the user if they still use this argument. 34 | /// 35 | /// Content should be a complete sentence or several, ending with a period. 36 | usage_warning: Option<&'static str>, 37 | } 38 | 39 | fn deprecated_arguments() -> Vec { 40 | let mut res = vec![]; 41 | 42 | // This macro reduces indentation and removes some noise from the argument declaration list. 43 | macro_rules! add_arg { 44 | ( 45 | $arg:expr 46 | $( , replaced_by: $replaced_by:expr )? 47 | $( , usage_warning: $usage_warning:expr )? 48 | $(,)? 49 | ) => { 50 | let replaced_by = add_arg!(@into-option $( $replaced_by )?); 51 | let usage_warning = add_arg!(@into-option $( $usage_warning )?); 52 | res.push(DeprecatedArg { 53 | arg: $arg, 54 | replaced_by, 55 | usage_warning, 56 | }); 57 | }; 58 | 59 | (@into-option) => { None }; 60 | (@into-option $v:expr) => { Some($v) }; 61 | } 62 | 63 | add_arg!(Arg::with_name("minimal_rpc_api") 64 | .long("minimal-rpc-api") 65 | .takes_value(false) 66 | .help("Only expose the RPC methods required to serve snapshots to other nodes")); 67 | 68 | res 69 | } 70 | 71 | pub fn warn_for_deprecated_arguments(matches: &ArgMatches) { 72 | for DeprecatedArg { 73 | arg, 74 | replaced_by, 75 | usage_warning, 76 | } in deprecated_arguments().into_iter() 77 | { 78 | if matches.is_present(arg.b.name) { 79 | let mut msg = format!("--{} is deprecated", arg.b.name.replace('_', "-")); 80 | if let Some(replaced_by) = replaced_by { 81 | msg.push_str(&format!(", please use --{replaced_by}")); 82 | } 83 | msg.push('.'); 84 | if let Some(usage_warning) = usage_warning { 85 | msg.push_str(&format!(" {usage_warning}")); 86 | if !msg.ends_with('.') { 87 | msg.push('.'); 88 | } 89 | } 90 | warn!("{}", msg); 91 | } 92 | } 93 | } 94 | 95 | pub fn port_validator(port: String) -> Result<(), String> { 96 | port.parse::() 97 | .map(|_| ()) 98 | .map_err(|e| format!("{e:?}")) 99 | } 100 | 101 | pub fn storage_rpc_service<'a>(version: &'a str, default_args: &'a DefaultStorageRpcArgs) -> App<'a, 'a> { 102 | return App::new("solana-storage-rpc") 103 | .about("Solana Storage RPC Service") 104 | .version(version) 105 | .arg( 106 | Arg::with_name("log_path") 107 | .short("l") 108 | .long("log-path") 109 | .value_name("DIR") 110 | .takes_value(true) 111 | .required(true) 112 | // .launcher(|value| { 113 | // value 114 | // .parse::() 115 | // .map_err(|err| format!("error parsing '{value}': {err}")) 116 | // .and_then(|path| { 117 | // if path.exists() && path.is_dir() { 118 | // Ok(()) 119 | // } else { 120 | // Err(format!("path does not exist or is not a directory: {value}")) 121 | // } 122 | // }) 123 | // }) 124 | .default_value("log") 125 | .help("Use DIR as log location"), 126 | ) 127 | .arg( 128 | Arg::with_name("quiet") 129 | .short("q") 130 | .long("quiet") 131 | .takes_value(false) 132 | .conflicts_with("log") 133 | .help("Quiet mode: suppress normal output"), 134 | ) 135 | .arg( 136 | Arg::with_name("log") 137 | .long("log") 138 | .takes_value(false) 139 | .conflicts_with("quiet") 140 | .help("Log mode: stream the launcher log"), 141 | ) 142 | .arg( 143 | Arg::with_name("rpc_port") 144 | .long("rpc-port") 145 | .value_name("PORT") 146 | .takes_value(true) 147 | .default_value(&default_args.rpc_port) 148 | .validator(port_validator) 149 | .help("Port for the RPC service"), 150 | ) 151 | .arg( 152 | Arg::with_name("enable_rpc_transaction_history") 153 | .long("enable-rpc-transaction-history") 154 | .takes_value(false) 155 | .help("Enable historical transaction info over JSON RPC, \ 156 | including the 'getConfirmedBlock' API."), 157 | ) 158 | .arg( 159 | Arg::with_name("enable_rpc_hbase_ledger_storage") 160 | .long("enable-rpc-hbase-ledger-storage") 161 | .takes_value(false) 162 | .hidden(true) 163 | .help("Fetch historical transaction info from a HBase instance"), 164 | ) 165 | .arg( 166 | Arg::with_name("rpc_hbase_address") 167 | .long("rpc-hbase-address") 168 | .value_name("ADDRESS") 169 | .takes_value(true) 170 | .hidden(true) 171 | .default_value("127.0.0.1:9090") 172 | .help("Address of HBase instance to use"), 173 | ) 174 | .arg( 175 | Arg::with_name("fallback_hbase_address") 176 | .long("fallback-hbase-address") 177 | .value_name("ADDRESS") 178 | .takes_value(true) 179 | .hidden(true) 180 | // .default_value("127.0.0.1:9090") 181 | .help("Address of fallback HBase instance to use"), 182 | ) 183 | .arg( 184 | Arg::with_name("hbase_namespace") 185 | .long("hbase-namespace") 186 | .value_name("NAMESPACE") 187 | .takes_value(true) 188 | .help("Namespace to use for HBase tables"), 189 | ) 190 | .arg( 191 | Arg::with_name("hdfs_url") 192 | .long("hdfs-url") 193 | .value_name("HDFS_URL") 194 | .takes_value(true) 195 | .help("Hdfs address to use"), 196 | ) 197 | .arg( 198 | Arg::with_name("hdfs_path") 199 | .long("hdfs-path") 200 | .value_name("HDFS_PATH") 201 | .takes_value(true) 202 | .help("Hdfs archive base path"), 203 | ) 204 | .arg( 205 | Arg::with_name("disable_block_car_files") 206 | .long("disable-block-car-files") 207 | .takes_value(false) 208 | .help("Disable CAR file reads; use HBase 'blocks' table for getBlock"), 209 | ) 210 | .arg( 211 | Arg::with_name("use_webhdfs") 212 | .long("use-webhdfs") 213 | .takes_value(false) 214 | .requires("webhdfs_url") 215 | .help("Use WebHDFS instead of native HDFS client for CAR file reads"), 216 | ) 217 | .arg( 218 | Arg::with_name("webhdfs_url") 219 | .long("webhdfs-url") 220 | .value_name("WEBHDFS_URL") 221 | .takes_value(true) 222 | .help("Base WebHDFS URL, e.g. http(s)://namenode:50070/webhdfs/v1"), 223 | ) 224 | .arg( 225 | Arg::with_name("rpc_hbase_timeout") 226 | .long("rpc-hbase-timeout") 227 | .value_name("SECONDS") 228 | .validator(is_parsable::) 229 | .takes_value(true) 230 | .default_value(&default_args.rpc_hbase_timeout) 231 | .help("Number of seconds before timing out RPC requests backed by HBase"), 232 | ) 233 | .arg( 234 | Arg::with_name("enable_rpc_bigtable_ledger_storage") 235 | .long("enable-rpc-bigtable-ledger-storage") 236 | .takes_value(false) 237 | .hidden(true) 238 | .help("Fetch historical transaction info from a BigTable instance"), 239 | ) 240 | .arg( 241 | Arg::with_name("rpc_bigtable_instance_name") 242 | .long("rpc-bigtable-instance-name") 243 | .takes_value(true) 244 | .value_name("INSTANCE_NAME") 245 | .default_value(&default_args.rpc_bigtable_instance_name) 246 | .help("Name of the Bigtable instance to use") 247 | ) 248 | .arg( 249 | Arg::with_name("rpc_bigtable_app_profile_id") 250 | .long("rpc-bigtable-app-profile-id") 251 | .takes_value(true) 252 | .value_name("APP_PROFILE_ID") 253 | .default_value(&default_args.rpc_bigtable_app_profile_id) 254 | .help("Bigtable application profile id to use for requests") 255 | ) 256 | .arg( 257 | Arg::with_name("rpc_bigtable_timeout") 258 | .long("rpc-bigtable-timeout") 259 | .value_name("SECONDS") 260 | .validator(is_parsable::) 261 | .takes_value(true) 262 | .default_value(&default_args.rpc_bigtable_timeout) 263 | .help("Number of seconds before timing out RPC requests backed by BigTable"), 264 | ) 265 | .arg( 266 | Arg::with_name("bind_address") 267 | .long("bind-address") 268 | .value_name("HOST") 269 | .takes_value(true) 270 | .validator(solana_net_utils::is_host) 271 | .default_value("0.0.0.0") 272 | .help("IP address to bind the rpc service [default: 0.0.0.0]"), 273 | ) 274 | .arg( 275 | Arg::with_name("rpc_threads") 276 | .long("rpc-threads") 277 | .value_name("NUMBER") 278 | .validator(is_parsable::) 279 | .takes_value(true) 280 | .default_value(&default_args.rpc_threads) 281 | .help("Number of threads to use for servicing RPC requests"), 282 | ) 283 | .arg( 284 | Arg::with_name("rpc_niceness_adj") 285 | .long("rpc-niceness-adjustment") 286 | .value_name("ADJUSTMENT") 287 | .takes_value(true) 288 | .validator(is_niceness_adjustment_valid) 289 | .default_value(&default_args.rpc_niceness_adjustment) 290 | .help("Add this value to niceness of RPC threads. Negative value \ 291 | increases priority, positive value decreases priority.") 292 | ) 293 | .arg( 294 | Arg::with_name("rpc_max_request_body_size") 295 | .long("rpc-max-request-body-size") 296 | .value_name("BYTES") 297 | .takes_value(true) 298 | .validator(is_parsable::) 299 | .default_value(&default_args.rpc_max_request_body_size) 300 | .help("The maximum request body size accepted by rpc service"), 301 | ) 302 | .arg( 303 | Arg::with_name("max_get_blocks_range") 304 | .long("max-get-blocks-range") 305 | .value_name("LIMIT") 306 | .takes_value(true) 307 | .validator(is_parsable::) 308 | .hidden(true) 309 | .help("The maximum get_blocks range limit"), 310 | ) 311 | // .arg( 312 | // Arg::with_name("block_cache") 313 | // .long("block-cache") 314 | // .value_name("SIZE") 315 | // .takes_value(true) 316 | // .validator(is_parsable::) 317 | // .help("HBase storage block cache size"), 318 | // ) 319 | .arg( 320 | Arg::with_name("log_messages_bytes_limit") 321 | .long("log-messages-bytes-limit") 322 | .value_name("BYTES") 323 | .validator(is_parsable::) 324 | .takes_value(true) 325 | .help("Maximum number of bytes written to the program log before truncation") 326 | ) 327 | .arg( 328 | Arg::with_name("use_md5_row_key_salt") 329 | .long("use-md5-row-key-salt") 330 | .takes_value(false) 331 | .help("Enable md5 row key salt for block HBase keys."), 332 | ) 333 | .arg( 334 | Arg::with_name("hash_tx_full_row_keys") 335 | .long("hash-tx-full-row-keys") 336 | .takes_value(false) 337 | .help("Enable tx_full table key hashing"), 338 | ) 339 | .arg( 340 | Arg::with_name("enable_full_tx_cache") 341 | .long("enable-full-tx-cache") 342 | .takes_value(false) 343 | .help("Enable block transaction cache."), 344 | ) 345 | .arg( 346 | Arg::with_name("disable_tx_fallback") 347 | .long("disable-tx-fallback") 348 | .takes_value(false) 349 | .help("Disable full_tx query fallback to tx index and blocks."), 350 | ) 351 | .arg( 352 | Arg::with_name("cache_address") 353 | .long("cache-address") 354 | .value_name("ADDRESS") 355 | .takes_value(true) 356 | .help("Block transaction cache server address"), 357 | ) 358 | .arg( 359 | Arg::with_name("use_hbase_blocks_meta") 360 | .long("use-hbase-blocks-meta") 361 | .takes_value(false) 362 | .help("Use HBase blocks_meta table for block metadata queries instead of blocks table"), 363 | ) 364 | .arg( 365 | Arg::with_name("genesis_config_path") 366 | .long("genesis-config-path") 367 | .value_name("PATH") 368 | .takes_value(true) 369 | .help("Path to the genesis config directory. Required for getInflationReward and getEpochSchedule RPC methods"), 370 | ) 371 | ; 372 | } 373 | 374 | pub struct DefaultStorageRpcArgs { 375 | pub rpc_port: String, 376 | pub rpc_hbase_timeout: String, 377 | pub rpc_threads: String, 378 | pub rpc_niceness_adjustment: String, 379 | pub rpc_max_request_body_size: String, 380 | pub rpc_bigtable_timeout: String, 381 | pub rpc_bigtable_instance_name: String, 382 | pub rpc_bigtable_app_profile_id: String, 383 | // pub block_cache: Option, 384 | pub max_get_blocks_range: Option, 385 | } 386 | 387 | impl DefaultStorageRpcArgs { 388 | pub fn new() -> Self { 389 | DefaultStorageRpcArgs { 390 | // rpc_port: rpc_port::DEFAULT_RPC_PORT.to_string(), 391 | rpc_port: DEFAULT_RPC_PORT.to_string(), 392 | rpc_hbase_timeout: "5".to_string(), 393 | rpc_threads: num_cpus::get().to_string(), 394 | rpc_niceness_adjustment: "0".to_string(), 395 | rpc_max_request_body_size: MAX_REQUEST_BODY_SIZE.to_string(), 396 | rpc_bigtable_timeout: "30".to_string(), 397 | rpc_bigtable_instance_name: solana_storage_bigtable::DEFAULT_INSTANCE_NAME.to_string(), 398 | rpc_bigtable_app_profile_id: solana_storage_bigtable::DEFAULT_APP_PROFILE_ID 399 | .to_string(), 400 | // block_cache: None, 401 | max_get_blocks_range: None, 402 | } 403 | } 404 | } 405 | 406 | impl Default for DefaultStorageRpcArgs { 407 | fn default() -> Self { 408 | Self::new() 409 | } 410 | } 411 | 412 | #[cfg(test)] 413 | mod test { 414 | use super::*; 415 | 416 | #[test] 417 | fn make_sure_deprecated_arguments_are_sorted_alphabetically() { 418 | let deprecated = deprecated_arguments(); 419 | 420 | for i in 0..deprecated.len().saturating_sub(1) { 421 | let curr_name = deprecated[i].arg.b.name; 422 | let next_name = deprecated[i + 1].arg.b.name; 423 | 424 | assert!( 425 | curr_name != next_name, 426 | "Arguments in `deprecated_arguments()` should be distinct.\n\ 427 | Arguments {} and {} use the same name: {}", 428 | i, 429 | i + 1, 430 | curr_name, 431 | ); 432 | 433 | assert!( 434 | curr_name < next_name, 435 | "To generate better diffs and for readability purposes, `deprecated_arguments()` \ 436 | should list arguments in alphabetical order.\n\ 437 | Arguments {} and {} are not.\n\ 438 | Argument {} name: {}\n\ 439 | Argument {} name: {}", 440 | i, 441 | i + 1, 442 | i, 443 | curr_name, 444 | i + 1, 445 | next_name, 446 | ); 447 | } 448 | } 449 | } 450 | -------------------------------------------------------------------------------- /storage-hbase/src/hbase.rs: -------------------------------------------------------------------------------- 1 | use { 2 | solana_storage_adapter::{ 3 | compression::{compress_best, decompress}, 4 | }, 5 | backoff::{future::retry, ExponentialBackoff}, 6 | log::*, 7 | thiserror::Error, 8 | hbase_thrift::hbase::{BatchMutation, HbaseSyncClient, THbaseSyncClient, TScan}, 9 | hbase_thrift::{ 10 | MutationBuilder 11 | }, 12 | thrift::{ 13 | protocol::{ 14 | TBinaryInputProtocol, TBinaryOutputProtocol, 15 | }, 16 | transport::{TBufferedReadTransport, TBufferedWriteTransport, TIoChannel, TTcpChannel}, 17 | }, 18 | std::collections::BTreeMap, 19 | std::convert::TryInto, 20 | }; 21 | 22 | pub type RowKey = String; 23 | pub type RowData = Vec<(CellName, CellValue)>; 24 | pub type RowDataSlice<'a> = &'a [(CellName, CellValue)]; 25 | pub type CellName = String; 26 | pub type CellValue = Vec; 27 | pub enum CellData { 28 | Bincode(B), 29 | Protobuf(P), 30 | } 31 | 32 | #[derive(Debug, Error)] 33 | pub enum Error { 34 | #[error("I/O: {0}")] 35 | Io(std::io::Error), 36 | 37 | #[error("Row not found")] 38 | RowNotFound, 39 | 40 | #[error("Row write failed")] 41 | RowWriteFailed, 42 | 43 | #[error("Row delete failed")] 44 | RowDeleteFailed, 45 | 46 | #[error("Object not found: {0}")] 47 | ObjectNotFound(String), 48 | 49 | #[error("Object is corrupt: {0}")] 50 | ObjectCorrupt(String), 51 | 52 | #[error("Timeout")] 53 | Timeout, 54 | 55 | #[error("Thrift")] 56 | Thrift(thrift::Error), 57 | } 58 | 59 | impl std::convert::From for Error { 60 | fn from(err: std::io::Error) -> Self { 61 | Self::Io(err) 62 | } 63 | } 64 | 65 | impl std::convert::From for Error { 66 | fn from(err: thrift::Error) -> Self { 67 | Self::Thrift(err) 68 | } 69 | } 70 | 71 | pub type Result = std::result::Result; 72 | 73 | #[derive(Clone, Debug)] 74 | pub struct HBaseConnection { 75 | address: String, 76 | // timeout: Option, 77 | namespace: Option, 78 | } 79 | 80 | impl HBaseConnection { 81 | pub async fn new( 82 | address: &str, 83 | namespace: Option<&str>, 84 | ) -> Result { 85 | debug!("Creating HBase connection instance"); 86 | 87 | Ok(Self { 88 | address: address.to_string(), 89 | namespace: namespace.map(|ns| ns.to_string()), 90 | }) 91 | } 92 | 93 | // pub fn client(&self) -> HBase { 94 | // let mut channel = TTcpChannel::new(); 95 | // channel.open(self.address.clone()).unwrap(); 96 | // 97 | // let (input_chan, output_chan) = channel.split().unwrap(); 98 | // let input_prot = TBinaryInputProtocol::new(TBufferedReadTransport::new(input_chan), true); 99 | // let output_prot = TBinaryOutputProtocol::new(TBufferedWriteTransport::new(output_chan), true); 100 | // 101 | // let client = HbaseSyncClient::new(input_prot, output_prot); 102 | // 103 | // HBase { 104 | // client, 105 | // namespace: self.namespace.clone(), 106 | // } 107 | // } 108 | 109 | pub fn client(&self) -> HBase { 110 | let mut channel = TTcpChannel::new(); 111 | 112 | channel.open(self.address.clone()).unwrap(); 113 | 114 | let (input_chan, output_chan) = channel.split().unwrap(); 115 | 116 | let input_prot = TBinaryInputProtocol::new( 117 | TBufferedReadTransport::new(input_chan), 118 | true 119 | ); 120 | let output_prot = TBinaryOutputProtocol::new( 121 | TBufferedWriteTransport::new(output_chan), 122 | true 123 | ); 124 | 125 | let client = HbaseSyncClient::new( 126 | input_prot, 127 | output_prot 128 | ); 129 | 130 | HBase { 131 | client, 132 | namespace: self.namespace.clone(), 133 | // timeout: self.timeout, 134 | } 135 | } 136 | 137 | pub async fn put_bincode_cells_with_retry( 138 | &self, 139 | table: &str, 140 | cells: &[(RowKey, T)], 141 | ) -> Result 142 | where 143 | T: serde::ser::Serialize, 144 | { 145 | retry(ExponentialBackoff::default(), || async { 146 | let mut client = self.client(); 147 | Ok(client.put_bincode_cells(table, cells).await?) 148 | }) 149 | .await 150 | } 151 | 152 | pub async fn put_protobuf_cells_with_retry( 153 | &self, 154 | table: &str, 155 | cells: &[(RowKey, T)], 156 | ) -> Result 157 | where 158 | T: prost::Message, 159 | { 160 | retry(ExponentialBackoff::default(), || async { 161 | let mut client = self.client(); 162 | Ok(client.put_protobuf_cells(table, cells).await?) 163 | }) 164 | .await 165 | } 166 | } 167 | 168 | type InputProtocol = TBinaryInputProtocol>>; 169 | type OutputProtocol = TBinaryOutputProtocol>>; 170 | 171 | pub struct HBase { 172 | client: HbaseSyncClient, 173 | // timeout: Option, 174 | namespace: Option, 175 | } 176 | 177 | impl HBase { 178 | fn qualified_table_name(&self, table_name: &str) -> String { 179 | if let Some(namespace) = &self.namespace { 180 | format!("{}:{}", namespace, table_name) 181 | } else { 182 | table_name.to_string() 183 | } 184 | } 185 | 186 | /// Get `table` row keys in lexical order. 187 | /// 188 | /// If `start_at` is provided, the row key listing will start with key. 189 | /// Otherwise the listing will start from the start of the table. 190 | /// 191 | /// If `end_at` is provided, the row key listing will end at the key. Otherwise it will 192 | /// continue until the `rows_limit` is reached or the end of the table, whichever comes first. 193 | /// If `rows_limit` is zero, this method will return an empty array. 194 | pub async fn get_row_keys( 195 | &mut self, 196 | table_name: &str, 197 | start_at: Option, 198 | end_at: Option, 199 | rows_limit: i64, 200 | reversed: bool, 201 | ) -> Result> { 202 | if rows_limit == 0 { 203 | return Ok(vec![]); 204 | } 205 | 206 | debug!("Trying to get row keys in range {:?} - {:?} with limit {:?}", start_at, end_at, rows_limit); 207 | 208 | let qualified_name = self.qualified_table_name(table_name); 209 | 210 | let mut scan = TScan::default(); 211 | scan.start_row = start_at.map(|start_key| { 212 | start_key.into_bytes() 213 | }); 214 | scan.stop_row = end_at.map(|end_key| { 215 | end_key.into_bytes() 216 | }); 217 | scan.columns = None; 218 | scan.batch_size = Some(rows_limit as i32); 219 | scan.timestamp = None; 220 | scan.caching = rows_limit.try_into().ok(); 221 | scan.reversed = Some(reversed); 222 | scan.filter_string = Some(b"KeyOnlyFilter()".to_vec()); 223 | 224 | let scan_id = self.client.scanner_open_with_scan( 225 | qualified_name.as_bytes().to_vec(), 226 | scan, 227 | BTreeMap::new(), 228 | )?; 229 | 230 | let mut results: Vec<(RowKey, RowData)> = Vec::new(); 231 | let mut count = 0; 232 | loop { 233 | let row_results = self.client.scanner_get_list( 234 | scan_id, 235 | rows_limit as i32 236 | )?; 237 | if row_results.is_empty() { 238 | break; 239 | } 240 | for row_result in row_results { 241 | let row_key_bytes = row_result.row.unwrap(); 242 | let row_key = String::from_utf8(row_key_bytes.clone()).unwrap(); 243 | let mut column_values: RowData = Vec::new(); 244 | for (key, column) in row_result.columns.unwrap_or_default() { 245 | let column_value_bytes = column.value.unwrap_or_default(); 246 | column_values.push((String::from_utf8(key).unwrap(), column_value_bytes.into())); 247 | } 248 | results.push((row_key, column_values)); 249 | count += 1; 250 | if count >= rows_limit { 251 | break; 252 | } 253 | } 254 | if count >= rows_limit { 255 | break; 256 | } 257 | } 258 | 259 | self.client.scanner_close(scan_id)?; 260 | 261 | Ok(results.into_iter().map(|r| r.0).collect()) 262 | } 263 | 264 | /// Get latest data from `table`. 265 | /// 266 | /// All column families are accepted, and only the latest version of each column cell will be 267 | /// returned. 268 | /// 269 | /// If `start_at` is provided, the row key listing will start with key, or the next key in the 270 | /// table if the explicit key does not exist. Otherwise the listing will start from the start 271 | /// of the table. 272 | /// 273 | /// If `end_at` is provided, the row key listing will end at the key. Otherwise it will 274 | /// continue until the `rows_limit` is reached or the end of the table, whichever comes first. 275 | /// If `rows_limit` is zero, this method will return an empty array. 276 | pub async fn get_row_data( 277 | &mut self, 278 | table_name: &str, 279 | start_at: Option, 280 | end_at: Option, 281 | rows_limit: i64, 282 | reversed: bool, 283 | ) -> Result> { 284 | if rows_limit == 0 { 285 | return Ok(vec![]); 286 | } 287 | 288 | debug!("Trying to get rows in range {:?} - {:?} with limit {:?}", start_at, end_at, rows_limit); 289 | 290 | let qualified_name = self.qualified_table_name(table_name); 291 | 292 | let mut scan = TScan::default(); 293 | 294 | scan.start_row = start_at.map(|start_key| { 295 | start_key.into_bytes() 296 | }); 297 | scan.stop_row = end_at.map(|end_key| { 298 | end_key.into_bytes() 299 | }); 300 | scan.columns = Some(vec!["x".as_bytes().to_vec()]); 301 | scan.batch_size = Some(rows_limit as i32); 302 | scan.timestamp = None; 303 | scan.caching = rows_limit.try_into().ok(); 304 | scan.reversed = Some(reversed); 305 | scan.filter_string = Some(b"ColumnPaginationFilter(1,0)".to_vec()); 306 | 307 | let scan_id = self.client.scanner_open_with_scan( 308 | qualified_name.as_bytes().to_vec(), 309 | scan, 310 | BTreeMap::new(), 311 | )?; 312 | // ).unwrap_or_else(|err| { 313 | // println!("scanner_open_with_scan error: {:?}", err); 314 | // std::process::exit(1); 315 | // }); 316 | 317 | let mut results: Vec<(RowKey, RowData)> = Vec::new(); 318 | let mut count = 0; 319 | 320 | loop { 321 | let row_results = self.client.scanner_get_list( 322 | scan_id, 323 | rows_limit as i32 324 | )?; 325 | // ).unwrap_or_else(|err| { 326 | // println!("scanner_get_list error: {:?}", err); 327 | // std::process::exit(1); 328 | // }); 329 | 330 | if row_results.is_empty() { 331 | break; 332 | } 333 | 334 | for row_result in row_results { 335 | let row_key_bytes = row_result.row.unwrap(); 336 | let row_key = String::from_utf8(row_key_bytes.clone()).unwrap(); 337 | let mut column_values: RowData = Vec::new(); 338 | for (key, column) in row_result.columns.unwrap_or_default() { 339 | let column_value_bytes = column.value.unwrap_or_default(); 340 | column_values.push((String::from_utf8(key).unwrap(), column_value_bytes.into())); 341 | } 342 | results.push((row_key, column_values)); 343 | count += 1; 344 | if count >= rows_limit { 345 | break; 346 | } 347 | } 348 | if count >= rows_limit { 349 | break; 350 | } 351 | } 352 | 353 | self.client.scanner_close(scan_id)?; 354 | 355 | Ok(results) 356 | } 357 | 358 | pub async fn get_single_row_data( 359 | &mut self, 360 | table_name: &str, 361 | row_key: RowKey, 362 | ) -> Result { 363 | debug!("Trying to get row data with key {:?} from table {:?}", row_key, table_name); 364 | 365 | let qualified_name = self.qualified_table_name(table_name); 366 | 367 | let row_result = self.client.get_row_with_columns( 368 | qualified_name.as_bytes().to_vec(), 369 | row_key.as_bytes().to_vec(), 370 | vec!["x".as_bytes().to_vec()], 371 | BTreeMap::new(), 372 | )?; 373 | 374 | let first_row_result = &row_result.into_iter() 375 | .next() 376 | .ok_or(Error::RowNotFound)?; 377 | 378 | let mut result_value: RowData = vec![]; 379 | if let Some(cols) = &first_row_result.columns { 380 | for (col_name, cell) in cols { 381 | if let Some(value) = &cell.value { 382 | result_value.push((String::from_utf8(col_name.to_vec()).unwrap().to_string(), value.to_vec())); 383 | } 384 | } 385 | } 386 | 387 | Ok(result_value) 388 | } 389 | 390 | pub async fn get_bincode_cell(&mut self, table: &str, key: RowKey) -> Result 391 | where 392 | T: serde::de::DeserializeOwned, 393 | { 394 | let row_data = self.get_single_row_data(table, key.clone()).await?; 395 | deserialize_bincode_cell_data(&row_data, table, key.to_string()) 396 | } 397 | 398 | pub async fn get_protobuf_or_bincode_cell( 399 | &mut self, 400 | table: &str, 401 | key: RowKey, 402 | ) -> Result> 403 | where 404 | B: serde::de::DeserializeOwned, 405 | P: prost::Message + Default, 406 | { 407 | let row_data = self.get_single_row_data(table, key.clone()).await?; 408 | deserialize_protobuf_or_bincode_cell_data(&row_data, table, key) 409 | } 410 | 411 | pub async fn get_protobuf_or_bincode_cell_serialized( 412 | &mut self, 413 | table: &str, 414 | key: RowKey, 415 | ) -> Result 416 | where 417 | B: serde::de::DeserializeOwned, 418 | P: prost::Message + Default, 419 | { 420 | self.get_single_row_data(table, key.clone()).await 421 | } 422 | 423 | pub async fn put_bincode_cells( 424 | &mut self, 425 | table: &str, 426 | cells: &[(RowKey, T)], 427 | ) -> Result 428 | where 429 | T: serde::ser::Serialize, 430 | { 431 | let mut bytes_written = 0; 432 | let mut new_row_data = vec![]; 433 | for (row_key, data) in cells { 434 | let data = compress_best(&bincode::serialize(&data).unwrap())?; 435 | bytes_written += data.len(); 436 | new_row_data.push((row_key, vec![("bin".to_string(), data)])); 437 | } 438 | 439 | self.put_row_data(table, "x", &new_row_data).await?; 440 | Ok(bytes_written) 441 | } 442 | 443 | pub async fn put_protobuf_cells( 444 | &mut self, 445 | table: &str, 446 | cells: &[(RowKey, T)], 447 | ) -> Result 448 | where 449 | T: prost::Message, 450 | { 451 | let mut bytes_written = 0; 452 | let mut new_row_data = vec![]; 453 | for (row_key, data) in cells { 454 | let mut buf = Vec::with_capacity(data.encoded_len()); 455 | data.encode(&mut buf).unwrap(); 456 | let data = compress_best(&buf)?; 457 | bytes_written += data.len(); 458 | new_row_data.push((row_key, vec![("proto".to_string(), data)])); 459 | } 460 | 461 | self.put_row_data(table, "x", &new_row_data).await?; 462 | Ok(bytes_written) 463 | } 464 | 465 | async fn put_row_data( 466 | &mut self, 467 | table_name: &str, 468 | family_name: &str, 469 | row_data: &[(&RowKey, RowData)], 470 | ) -> Result<()> { 471 | let mut mutation_batches = Vec::new(); 472 | for (row_key, cell_data) in row_data { 473 | let mut mutations = Vec::new(); 474 | for (cell_name, cell_value) in cell_data { 475 | let mut mutation_builder = MutationBuilder::default(); 476 | mutation_builder.column(family_name, cell_name); 477 | mutation_builder.value(cell_value.clone()); 478 | mutations.push(mutation_builder.build()); 479 | } 480 | mutation_batches.push(BatchMutation::new(Some(row_key.as_bytes().to_vec()), mutations)); 481 | } 482 | 483 | self.client.mutate_rows(table_name.as_bytes().to_vec(), mutation_batches, Default::default())?; 484 | 485 | Ok(()) 486 | } 487 | 488 | pub async fn get_last_row_key(&mut self, table_name: &str) -> Result { 489 | let row_keys = self.get_row_keys(table_name, None, None, 1, true).await?; 490 | if let Some(last_row_key) = row_keys.first() { 491 | Ok(last_row_key.clone()) 492 | } else { 493 | Err(Error::RowNotFound) 494 | } 495 | } 496 | } 497 | 498 | pub(crate) fn deserialize_protobuf_or_bincode_cell_data( 499 | row_data: RowDataSlice, 500 | table: &str, 501 | key: RowKey, 502 | ) -> Result> 503 | where 504 | B: serde::de::DeserializeOwned, 505 | P: prost::Message + Default, 506 | { 507 | match deserialize_protobuf_cell_data(row_data, table, key.to_string()) { 508 | Ok(result) => { 509 | return Ok(CellData::Protobuf(result)) 510 | }, 511 | Err(err) => { 512 | match err { 513 | Error::ObjectNotFound(_) => {} 514 | _ => return Err(err), 515 | } 516 | }, 517 | } 518 | deserialize_bincode_cell_data(row_data, table, key).map(CellData::Bincode) 519 | } 520 | 521 | pub(crate) fn deserialize_protobuf_cell_data( 522 | row_data: RowDataSlice, 523 | table: &str, 524 | key: RowKey, 525 | ) -> Result 526 | where 527 | T: prost::Message + Default, 528 | { 529 | let value = &row_data 530 | .iter() 531 | .find(|(name, _)| name == "x:proto") 532 | .ok_or_else(|| Error::ObjectNotFound(format!("{table}/{key}")))? 533 | .1; 534 | 535 | let data = decompress(value)?; 536 | T::decode(&data[..]).map_err(|err| { 537 | warn!("Failed to deserialize {}/{}: {}", table, key, err); 538 | Error::ObjectCorrupt(format!("{table}/{key}")) 539 | }) 540 | } 541 | 542 | pub(crate) fn deserialize_bincode_cell_data( 543 | row_data: RowDataSlice, 544 | table: &str, 545 | key: RowKey, 546 | ) -> Result 547 | where 548 | T: serde::de::DeserializeOwned, 549 | { 550 | let value = &row_data 551 | .iter() 552 | .find(|(name, _)| name == "x:bin") 553 | .ok_or_else(|| Error::ObjectNotFound(format!("{table}/{key}")))? 554 | .1; 555 | 556 | let data = decompress(value)?; 557 | bincode::deserialize(&data).map_err(|err| { 558 | warn!("Failed to deserialize {}/{}: {}", table, key, err); 559 | Error::ObjectCorrupt(format!("{table}/{key}")) 560 | }) 561 | } 562 | --------------------------------------------------------------------------------