├── .github └── workflows │ └── test.yml ├── .gitignore ├── Cargo.toml ├── README.md ├── ci ├── cargo-build-test.sh ├── install-build-deps.sh ├── rust-version.sh └── start_postgres.sh ├── scripts ├── create_schema.sql ├── drop_schema.sql └── postgresql.conf ├── src ├── accounts_selector.rs ├── geyser_plugin_postgres.rs ├── inline_spl_token.rs ├── inline_spl_token_2022.rs ├── lib.rs ├── postgres_client.rs ├── postgres_client │ ├── postgres_client_account_index.rs │ ├── postgres_client_block_metadata.rs │ └── postgres_client_transaction.rs └── transaction_selector.rs └── tests └── test_postgres_plugin.rs /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ master ] 4 | pull_request: 5 | 6 | env: 7 | CARGO_TERM_COLOR: always 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | 15 | - uses: actions/checkout@v2 16 | 17 | - name: Set env vars 18 | run: | 19 | source ci/rust-version.sh 20 | echo "RUST_STABLE=$rust_stable" | tee -a $GITHUB_ENV 21 | 22 | - if: runner.os == 'Linux' 23 | run: | 24 | sudo apt-get update 25 | sudo apt-get install -y libudev-dev protobuf-compiler libclang-dev 26 | 27 | - uses: actions-rs/toolchain@v1 28 | with: 29 | toolchain: ${{ env.RUST_STABLE }} 30 | override: true 31 | profile: minimal 32 | components: rustfmt, clippy 33 | 34 | - uses: actions/cache@v2 35 | with: 36 | path: | 37 | ~/.cargo/registry 38 | ~/.cargo/git 39 | key: cargo-build-${{ hashFiles('**/Cargo.lock') }}-${{ env.RUST_STABLE}} 40 | 41 | - name: Install dependencies 42 | run: | 43 | ./ci/install-build-deps.sh 44 | 45 | - name: Start PostgreSQL database 46 | run: ./ci/start_postgres.sh 47 | 48 | - name: cargo fmt 49 | uses: actions-rs/cargo@v1 50 | with: 51 | command: fmt 52 | args: --all -- --check 53 | 54 | - name: cargo clippy 55 | uses: actions-rs/cargo@v1 56 | with: 57 | command: clippy 58 | args: --workspace --all-targets -- --deny=warnings 59 | 60 | - name: Build 61 | run: ./ci/cargo-build-test.sh 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | farf/ 13 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Solana Maintainers "] 3 | edition = "2021" 4 | name = "solana-geyser-plugin-postgres" 5 | description = "The Solana AccountsDb plugin for PostgreSQL database." 6 | version = "1.17.3" 7 | repository = "https://github.com/solana-labs/solana-accountsdb-plugin-postgres" 8 | license = "Apache-2.0" 9 | homepage = "https://solana.com/" 10 | documentation = "https://docs.rs/solana-validator" 11 | 12 | [lib] 13 | crate-type = ["cdylib", "rlib"] 14 | 15 | [dependencies] 16 | bs58 = "0.4.0" 17 | bytemuck = "1.12.1" 18 | chrono = { version = "0.4.24", features = ["serde"] } 19 | crossbeam-channel = "0.5.7" 20 | log = "0.4.17" 21 | openssl = { version = "0.10.42" } 22 | postgres = { version = "0.19.4", features = ["with-chrono-0_4"] } 23 | postgres-types = { version = "0.2.4", features = ["derive"] } 24 | postgres-openssl = { version = "0.5.0"} 25 | serde = "1.0.145" 26 | serde_derive = "1.0.145" 27 | serde_json = "1.0.85" 28 | solana-geyser-plugin-interface = { version = "=1.17.3" } 29 | solana-logger = { version = "1.17.3" } 30 | solana-measure = { version = "1.17.3" } 31 | solana-metrics = { version = "1.17.3" } 32 | solana-runtime = { version = "1.17.3" } 33 | solana-sdk = { version = "1.17.3" } 34 | solana-transaction-status = { version = "1.17.3" } 35 | thiserror = "1.0.37" 36 | tokio-postgres = "0.7.7" 37 | 38 | [dev-dependencies] 39 | libc = "0.2.134" 40 | libloading = "0.7.3" 41 | serial_test = "0.9.0" 42 | socket2 = { version = "0.4.7", features = ["all"] } 43 | 44 | solana-account-decoder = { version = "1.17.3" } 45 | solana-core = { version = "1.17.3" } 46 | solana-local-cluster = { version = "1.17.3" } 47 | solana-net-utils = { version = "1.17.3" } 48 | solana-streamer = { version = "1.17.3" } 49 | tempfile = "3.3.0" 50 | 51 | [package.metadata.docs.rs] 52 | targets = ["x86_64-unknown-linux-gnu"] 53 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | The `solana-geyser-plugin-postgres` crate implements a plugin storing 2 | account data to a PostgreSQL database to illustrate how a plugin can be 3 | developed to work with Solana validators using the [Plugin Framework](https://docs.solana.com/developing/plugins/geyser-plugins). 4 | 5 | ### Configuration File Format 6 | 7 | The plugin is configured using the input configuration file. An example 8 | configuration file looks like the following: 9 | 10 | ``` 11 | { 12 | "libpath": "/solana/target/release/libsolana_geyser_plugin_postgres.so", 13 | "host": "postgres-server", 14 | "user": "solana", 15 | "port": 5433, 16 | "threads": 20, 17 | "batch_size": 20, 18 | "panic_on_db_errors": true, 19 | "accounts_selector" : { 20 | "accounts" : ["*"] 21 | } 22 | } 23 | ``` 24 | 25 | The `host`, `user`, and `port` control the PostgreSQL configuration 26 | information. For more advanced connection options, please use the 27 | `connection_str` field. Please see [Rust Postgres Configuration](https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html). 28 | 29 | To improve the throughput to the database, the plugin supports connection pooling 30 | using multiple threads, each maintaining a connection to the PostgreSQL database. 31 | The count of the threads is controlled by the `threads` field. A higher thread 32 | count usually offers better performance. 33 | 34 | To further improve performance when saving large numbers of accounts at 35 | startup, the plugin uses bulk inserts. The batch size is controlled by the 36 | `batch_size` parameter. This can help reduce the round trips to the database. 37 | 38 | The `panic_on_db_errors` can be used to panic the validator in case of database 39 | errors to ensure data consistency. 40 | 41 | ### Support Connection Using SSL 42 | 43 | To connect to the PostgreSQL database via SSL, set `use_ssl` to true, and specify 44 | the server certificate, the client certificate and the client key files in PEM format 45 | using the `server_ca`, `client_cert` and `client_key` fields respectively. 46 | For example: 47 | 48 | ``` 49 | "use_ssl": true, 50 | "server_ca": "/solana/.ssh/server-ca.pem", 51 | "client_cert": "/solana/.ssh/client-cert.pem", 52 | "client_key": "/solana/.ssh/client-key.pem", 53 | ``` 54 | 55 | ### Account Selection 56 | 57 | The `accounts_selector` can be used to filter the accounts that should be persisted. 58 | 59 | For example, one can use the following to persist only the accounts with particular 60 | Base58-encoded Pubkeys, 61 | 62 | ``` 63 | "accounts_selector" : { 64 | "accounts" : ["pubkey-1", "pubkey-2", ..., "pubkey-n"], 65 | } 66 | ``` 67 | 68 | Or use the following to select accounts with certain program owners: 69 | 70 | ``` 71 | "accounts_selector" : { 72 | "owners" : ["pubkey-owner-1", "pubkey-owner-2", ..., "pubkey-owner-m"], 73 | } 74 | ``` 75 | 76 | To select all accounts, use the wildcard character (*): 77 | 78 | ``` 79 | "accounts_selector" : { 80 | "accounts" : ["*"], 81 | } 82 | ``` 83 | 84 | ### Transaction Selection 85 | 86 | `transaction_selector`, controls if and what transactions to store. 87 | If this field is missing, none of the transactions are stored. 88 | 89 | For example, one can use the following to select only the transactions 90 | referencing accounts with particular Base58-encoded Pubkeys, 91 | 92 | ``` 93 | "transaction_selector" : { 94 | "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\], 95 | } 96 | ``` 97 | 98 | The `mentions` field supports wildcards to select all transaction or 99 | all 'vote' transactions. For example, to select all transactions: 100 | 101 | ``` 102 | "transaction_selector" : { 103 | "mentions" : \["*"\], 104 | } 105 | ``` 106 | 107 | To select all vote transactions: 108 | 109 | ``` 110 | "transaction_selector" : { 111 | "mentions" : \["all_votes"\], 112 | } 113 | ``` 114 | 115 | ### Database Setup 116 | 117 | #### Install PostgreSQL Server 118 | 119 | Please follow [PostgreSQL Ubuntu Installation](https://www.postgresql.org/download/linux/ubuntu/) 120 | on instructions to install the PostgreSQL database server. For example, to 121 | install postgresql-14, 122 | 123 | ``` 124 | sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' 125 | wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - 126 | sudo apt-get update 127 | sudo apt-get -y install postgresql-14 128 | ``` 129 | #### Control the Database Access 130 | 131 | Modify the pg_hba.conf as necessary to grant the plugin to access the database. 132 | For example, in /etc/postgresql/14/main/pg_hba.conf, the following entry allows 133 | nodes with IPs in the CIDR 10.138.0.0/24 to access all databases. The validator 134 | runs in a node with an ip in the specified range. 135 | 136 | ``` 137 | host all all 10.138.0.0/24 trust 138 | ``` 139 | 140 | It is recommended to run the database server on a separate node from the validator for 141 | better performance. 142 | 143 | #### Configure the Database Performance Parameters 144 | 145 | Please refer to the [PostgreSQL Server Configuration](https://www.postgresql.org/docs/14/runtime-config.html) 146 | for configuration details. The referential implementation uses the following 147 | configurations for better database performance in the /etc/postgresql/14/main/postgresql.conf 148 | which are different from the default postgresql-14 installation. 149 | 150 | ``` 151 | max_connections = 200 # (change requires restart) 152 | shared_buffers = 1GB # min 128kB 153 | effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching 154 | wal_level = minimal # minimal, replica, or logical 155 | fsync = off # flush data to disk for crash safety 156 | synchronous_commit = off # synchronization level; 157 | full_page_writes = off # recover from partial page writes 158 | max_wal_senders = 0 # max number of walsender processes 159 | ``` 160 | 161 | The sample scripts/postgresql.conf can be used for reference. 162 | 163 | #### Create the Database Instance and the Role 164 | 165 | Start the server: 166 | 167 | ``` 168 | sudo systemctl start postgresql@14-main 169 | ``` 170 | 171 | Create the database. For example, the following creates a database named 'solana': 172 | 173 | ``` 174 | sudo -u postgres createdb solana -p 5433 175 | ``` 176 | 177 | Create the database user. For example, the following creates a regular user named 'solana': 178 | 179 | ``` 180 | sudo -u postgres createuser -p 5433 solana 181 | ``` 182 | 183 | Verify the database is working using psql. For example, assuming the node running 184 | PostgreSQL has the ip 10.138.0.9, the following command will land in a shell where 185 | SQL commands can be entered: 186 | 187 | ``` 188 | psql -U solana -p 5433 -h 10.138.0.9 -w -d solana 189 | ``` 190 | 191 | #### Create the Schema Objects 192 | 193 | Use the scripts/create_schema.sql 194 | 195 | ``` 196 | psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f scripts/create_schema.sql 197 | ``` 198 | 199 | After this, start the validator with the plugin by using the `--geyser-plugin-config` 200 | argument mentioned above. 201 | 202 | #### Destroy the Schema Objects 203 | 204 | To destroy the database objects, created by `create_schema.sql`, use 205 | drop_schema.sql. For example, 206 | 207 | ``` 208 | psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f scripts/drop_schema.sql 209 | ``` 210 | 211 | ### Capture Historical Account Data 212 | 213 | To capture account historical data, in the configuration file, turn 214 | `store_account_historical_data` to true. 215 | 216 | And ensure the database trigger is created to save data in the `audit_table` when 217 | records in `account` are updated, as shown in `create_schema.sql`, 218 | 219 | ``` 220 | CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$ 221 | BEGIN 222 | INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on) 223 | VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot, 224 | OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on); 225 | RETURN NEW; 226 | END; 227 | 228 | $audit_account_update$ LANGUAGE plpgsql; 229 | 230 | CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account 231 | FOR EACH ROW EXECUTE PROCEDURE audit_account_update(); 232 | ``` 233 | 234 | The trigger can be dropped to disable this feature, for example, 235 | 236 | ``` 237 | DROP TRIGGER account_update_trigger ON account; 238 | ``` 239 | 240 | Over time, the account_audit can accumulate large amount of data. You may choose to 241 | limit that by deleting older historical data. 242 | 243 | For example, the following SQL statement can be used to keep up to 1000 of the most 244 | recent records for an account: 245 | 246 | ``` 247 | delete from account_audit a2 where (pubkey, write_version) in 248 | (select pubkey, write_version from 249 | (select a.pubkey, a.updated_on, a.slot, a.write_version, a.lamports, 250 | rank() OVER ( partition by pubkey order by write_version desc) as rnk 251 | from account_audit a) ranked 252 | where ranked.rnk > 1000) 253 | ``` 254 | 255 | ### Main Tables 256 | 257 | The following are the tables in the Postgres database 258 | 259 | | Table | Description | 260 | |:--------------|:------------------------| 261 | | account | Account data | 262 | | block | Block metadata | 263 | | slot | Slot metadata | 264 | | transaction | Transaction data | 265 | | account_audit | Account historical data | 266 | 267 | 268 | ### Performance Considerations 269 | 270 | When a validator lacks sufficient computing power, the overhead of saving the 271 | account data can cause it to fall behind the network especially when all 272 | accounts or a large number of accounts are selected. The node hosting the 273 | PostgreSQL database needs to be powerful enough to handle the database loads 274 | as well. It has been found using GCP n2-standard-64 machine type for the 275 | validator and n2-highmem-32 for the PostgreSQL node is adequate for handling 276 | transmitting all accounts while keeping up with the network. In addition, it is 277 | best to keep the validator and the PostgreSQL in the same local network to 278 | reduce latency. You may need to size the validator and database nodes 279 | differently if serving other loads. 280 | -------------------------------------------------------------------------------- /ci/cargo-build-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | cd "$(dirname "$0")/.." 5 | 6 | source ./ci/rust-version.sh stable 7 | 8 | export RUSTFLAGS="-D warnings" 9 | export RUSTBACKTRACE=1 10 | 11 | set -x 12 | 13 | # Build/test all host crates 14 | cargo +"$rust_stable" build 15 | cargo +"$rust_stable" test -- --nocapture 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /ci/install-build-deps.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ex 4 | 5 | sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' 6 | wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - 7 | 8 | sudo apt-get update 9 | sudo apt-get install -y postgresql-14 10 | 11 | sudo /etc/init.d/postgresql start 12 | sudo -u postgres psql --command "CREATE USER solana WITH SUPERUSER PASSWORD 'solana';" 13 | sudo -u postgres createdb -O solana solana -------------------------------------------------------------------------------- /ci/rust-version.sh: -------------------------------------------------------------------------------- 1 | # 2 | # This file maintains the rust versions for use by CI. 3 | # 4 | # Obtain the environment variables without any automatic toolchain updating: 5 | # $ source ci/rust-version.sh 6 | # 7 | # Obtain the environment variables updating both stable and nightly, only stable, or 8 | # only nightly: 9 | # $ source ci/rust-version.sh all 10 | # $ source ci/rust-version.sh stable 11 | # $ source ci/rust-version.sh nightly 12 | 13 | # Then to build with either stable or nightly: 14 | # $ cargo +"$rust_stable" build 15 | # $ cargo +"$rust_nightly" build 16 | # 17 | 18 | if [[ -n $RUST_STABLE_VERSION ]]; then 19 | stable_version="$RUST_STABLE_VERSION" 20 | else 21 | stable_version=1.73.0 22 | fi 23 | 24 | if [[ -n $RUST_NIGHTLY_VERSION ]]; then 25 | nightly_version="$RUST_NIGHTLY_VERSION" 26 | else 27 | nightly_version=2023-10-05 28 | fi 29 | 30 | 31 | export rust_stable="$stable_version" 32 | export rust_stable_docker_image=solanalabs/rust:"$stable_version" 33 | 34 | export rust_nightly=nightly-"$nightly_version" 35 | export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version" 36 | 37 | [[ -z $1 ]] || ( 38 | 39 | rustup_install() { 40 | declare toolchain=$1 41 | if ! cargo +"$toolchain" -V > /dev/null; then 42 | echo "$0: Missing toolchain? Installing...: $toolchain" >&2 43 | rustup install "$toolchain" 44 | cargo +"$toolchain" -V 45 | fi 46 | } 47 | 48 | set -e 49 | cd "$(dirname "${BASH_SOURCE[0]}")" 50 | case $1 in 51 | stable) 52 | rustup_install "$rust_stable" 53 | ;; 54 | # nightly) 55 | # rustup_install "$rust_nightly" 56 | # ;; 57 | all) 58 | rustup_install "$rust_stable" 59 | rustup_install "$rust_nightly" 60 | ;; 61 | *) 62 | echo "$0: Note: ignoring unknown argument: $1" >&2 63 | ;; 64 | esac 65 | ) 66 | -------------------------------------------------------------------------------- /ci/start_postgres.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | sudo /etc/init.d/postgresql start 5 | PGPASSWORD=solana psql -U solana -p 5432 -h localhost -w -d solana -f scripts/create_schema.sql 6 | -------------------------------------------------------------------------------- /scripts/create_schema.sql: -------------------------------------------------------------------------------- 1 | /** 2 | * This plugin implementation for PostgreSQL requires the following tables 3 | */ 4 | -- The table storing accounts 5 | 6 | 7 | CREATE TABLE account ( 8 | pubkey BYTEA PRIMARY KEY, 9 | owner BYTEA, 10 | lamports BIGINT NOT NULL, 11 | slot BIGINT NOT NULL, 12 | executable BOOL NOT NULL, 13 | rent_epoch BIGINT NOT NULL, 14 | data BYTEA, 15 | write_version BIGINT NOT NULL, 16 | updated_on TIMESTAMP NOT NULL, 17 | txn_signature BYTEA 18 | ); 19 | 20 | CREATE INDEX account_owner ON account (owner); 21 | 22 | CREATE INDEX account_slot ON account (slot); 23 | 24 | -- The table storing slot information 25 | CREATE TABLE slot ( 26 | slot BIGINT PRIMARY KEY, 27 | parent BIGINT, 28 | status VARCHAR(16) NOT NULL, 29 | updated_on TIMESTAMP NOT NULL 30 | ); 31 | 32 | -- Types for Transactions 33 | 34 | Create TYPE "TransactionErrorCode" AS ENUM ( 35 | 'AccountInUse', 36 | 'AccountLoadedTwice', 37 | 'AccountNotFound', 38 | 'ProgramAccountNotFound', 39 | 'InsufficientFundsForFee', 40 | 'InvalidAccountForFee', 41 | 'AlreadyProcessed', 42 | 'BlockhashNotFound', 43 | 'InstructionError', 44 | 'CallChainTooDeep', 45 | 'MissingSignatureForFee', 46 | 'InvalidAccountIndex', 47 | 'SignatureFailure', 48 | 'InvalidProgramForExecution', 49 | 'SanitizeFailure', 50 | 'ClusterMaintenance', 51 | 'AccountBorrowOutstanding', 52 | 'WouldExceedMaxAccountCostLimit', 53 | 'WouldExceedMaxBlockCostLimit', 54 | 'UnsupportedVersion', 55 | 'InvalidWritableAccount', 56 | 'WouldExceedMaxAccountDataCostLimit', 57 | 'TooManyAccountLocks', 58 | 'AddressLookupTableNotFound', 59 | 'InvalidAddressLookupTableOwner', 60 | 'InvalidAddressLookupTableData', 61 | 'InvalidAddressLookupTableIndex', 62 | 'InvalidRentPayingAccount', 63 | 'WouldExceedMaxVoteCostLimit', 64 | 'WouldExceedAccountDataBlockLimit', 65 | 'WouldExceedAccountDataTotalLimit', 66 | 'DuplicateInstruction', 67 | 'InsufficientFundsForRent', 68 | 'MaxLoadedAccountsDataSizeExceeded', 69 | 'InvalidLoadedAccountsDataSizeLimit', 70 | 'ResanitizationNeeded', 71 | 'UnbalancedTransaction', 72 | 'ProgramExecutionTemporarilyRestricted' 73 | ); 74 | 75 | CREATE TYPE "TransactionError" AS ( 76 | error_code "TransactionErrorCode", 77 | error_detail VARCHAR(256) 78 | ); 79 | 80 | CREATE TYPE "CompiledInstruction" AS ( 81 | program_id_index SMALLINT, 82 | accounts SMALLINT[], 83 | data BYTEA 84 | ); 85 | 86 | CREATE TYPE "InnerInstructions" AS ( 87 | index SMALLINT, 88 | instructions "CompiledInstruction"[] 89 | ); 90 | 91 | CREATE TYPE "TransactionTokenBalance" AS ( 92 | account_index SMALLINT, 93 | mint VARCHAR(44), 94 | ui_token_amount DOUBLE PRECISION, 95 | owner VARCHAR(44) 96 | ); 97 | 98 | Create TYPE "RewardType" AS ENUM ( 99 | 'Fee', 100 | 'Rent', 101 | 'Staking', 102 | 'Voting' 103 | ); 104 | 105 | CREATE TYPE "Reward" AS ( 106 | pubkey VARCHAR(44), 107 | lamports BIGINT, 108 | post_balance BIGINT, 109 | reward_type "RewardType", 110 | commission SMALLINT 111 | ); 112 | 113 | CREATE TYPE "TransactionStatusMeta" AS ( 114 | error "TransactionError", 115 | fee BIGINT, 116 | pre_balances BIGINT[], 117 | post_balances BIGINT[], 118 | inner_instructions "InnerInstructions"[], 119 | log_messages TEXT[], 120 | pre_token_balances "TransactionTokenBalance"[], 121 | post_token_balances "TransactionTokenBalance"[], 122 | rewards "Reward"[] 123 | ); 124 | 125 | CREATE TYPE "TransactionMessageHeader" AS ( 126 | num_required_signatures SMALLINT, 127 | num_readonly_signed_accounts SMALLINT, 128 | num_readonly_unsigned_accounts SMALLINT 129 | ); 130 | 131 | CREATE TYPE "TransactionMessage" AS ( 132 | header "TransactionMessageHeader", 133 | account_keys BYTEA[], 134 | recent_blockhash BYTEA, 135 | instructions "CompiledInstruction"[] 136 | ); 137 | 138 | CREATE TYPE "TransactionMessageAddressTableLookup" AS ( 139 | account_key BYTEA, 140 | writable_indexes SMALLINT[], 141 | readonly_indexes SMALLINT[] 142 | ); 143 | 144 | CREATE TYPE "TransactionMessageV0" AS ( 145 | header "TransactionMessageHeader", 146 | account_keys BYTEA[], 147 | recent_blockhash BYTEA, 148 | instructions "CompiledInstruction"[], 149 | address_table_lookups "TransactionMessageAddressTableLookup"[] 150 | ); 151 | 152 | CREATE TYPE "LoadedAddresses" AS ( 153 | writable BYTEA[], 154 | readonly BYTEA[] 155 | ); 156 | 157 | CREATE TYPE "LoadedMessageV0" AS ( 158 | message "TransactionMessageV0", 159 | loaded_addresses "LoadedAddresses" 160 | ); 161 | 162 | -- The table storing transactions 163 | CREATE TABLE transaction ( 164 | slot BIGINT NOT NULL, 165 | signature BYTEA NOT NULL, 166 | is_vote BOOL NOT NULL, 167 | message_type SMALLINT, -- 0: legacy, 1: v0 message 168 | legacy_message "TransactionMessage", 169 | v0_loaded_message "LoadedMessageV0", 170 | signatures BYTEA[], 171 | message_hash BYTEA, 172 | meta "TransactionStatusMeta", 173 | write_version BIGINT, 174 | updated_on TIMESTAMP NOT NULL, 175 | index BIGINT NOT NULL, 176 | CONSTRAINT transaction_pk PRIMARY KEY (slot, signature) 177 | ); 178 | 179 | -- The table storing block metadata 180 | CREATE TABLE block ( 181 | slot BIGINT PRIMARY KEY, 182 | blockhash VARCHAR(44), 183 | rewards "Reward"[], 184 | block_time BIGINT, 185 | block_height BIGINT, 186 | updated_on TIMESTAMP NOT NULL 187 | ); 188 | 189 | -- The table storing spl token owner to account indexes 190 | CREATE TABLE spl_token_owner_index ( 191 | owner_key BYTEA NOT NULL, 192 | account_key BYTEA NOT NULL, 193 | slot BIGINT NOT NULL 194 | ); 195 | 196 | CREATE INDEX spl_token_owner_index_owner_key ON spl_token_owner_index (owner_key); 197 | CREATE UNIQUE INDEX spl_token_owner_index_owner_pair ON spl_token_owner_index (owner_key, account_key); 198 | 199 | -- The table storing spl mint to account indexes 200 | CREATE TABLE spl_token_mint_index ( 201 | mint_key BYTEA NOT NULL, 202 | account_key BYTEA NOT NULL, 203 | slot BIGINT NOT NULL 204 | ); 205 | 206 | CREATE INDEX spl_token_mint_index_mint_key ON spl_token_mint_index (mint_key); 207 | CREATE UNIQUE INDEX spl_token_mint_index_mint_pair ON spl_token_mint_index (mint_key, account_key); 208 | 209 | /** 210 | * The following is for keeping historical data for accounts and is not required for plugin to work. 211 | */ 212 | -- The table storing historical data for accounts 213 | CREATE TABLE account_audit ( 214 | pubkey BYTEA, 215 | owner BYTEA, 216 | lamports BIGINT NOT NULL, 217 | slot BIGINT NOT NULL, 218 | executable BOOL NOT NULL, 219 | rent_epoch BIGINT NOT NULL, 220 | data BYTEA, 221 | write_version BIGINT NOT NULL, 222 | updated_on TIMESTAMP NOT NULL, 223 | txn_signature BYTEA 224 | ); 225 | 226 | CREATE INDEX account_audit_account_key ON account_audit (pubkey, write_version); 227 | 228 | CREATE INDEX account_audit_pubkey_slot ON account_audit (pubkey, slot); 229 | 230 | CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$ 231 | BEGIN 232 | INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, 233 | rent_epoch, data, write_version, updated_on, txn_signature) 234 | VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot, 235 | OLD.executable, OLD.rent_epoch, OLD.data, 236 | OLD.write_version, OLD.updated_on, OLD.txn_signature); 237 | RETURN NEW; 238 | END; 239 | 240 | $audit_account_update$ LANGUAGE plpgsql; 241 | 242 | CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account 243 | FOR EACH ROW EXECUTE PROCEDURE audit_account_update(); 244 | -------------------------------------------------------------------------------- /scripts/drop_schema.sql: -------------------------------------------------------------------------------- 1 | /** 2 | * Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin. 3 | */ 4 | 5 | DROP TRIGGER account_update_trigger ON account; 6 | DROP FUNCTION audit_account_update; 7 | DROP TABLE account_audit; 8 | DROP TABLE account CASCADE; 9 | DROP TABLE slot; 10 | DROP TABLE transaction; 11 | DROP TABLE block; 12 | DROP TABLE spl_token_owner_index; 13 | DROP TABLE spl_token_mint_index; 14 | 15 | DROP TYPE "TransactionError" CASCADE; 16 | DROP TYPE "TransactionErrorCode" CASCADE; 17 | DROP TYPE "LoadedMessageV0" CASCADE; 18 | DROP TYPE "LoadedAddresses" CASCADE; 19 | DROP TYPE "TransactionMessageV0" CASCADE; 20 | DROP TYPE "TransactionMessage" CASCADE; 21 | DROP TYPE "TransactionMessageHeader" CASCADE; 22 | DROP TYPE "TransactionMessageAddressTableLookup" CASCADE; 23 | DROP TYPE "TransactionStatusMeta" CASCADE; 24 | DROP TYPE "RewardType" CASCADE; 25 | DROP TYPE "Reward" CASCADE; 26 | DROP TYPE "TransactionTokenBalance" CASCADE; 27 | DROP TYPE "InnerInstructions" CASCADE; 28 | DROP TYPE "CompiledInstruction" CASCADE; 29 | -------------------------------------------------------------------------------- /scripts/postgresql.conf: -------------------------------------------------------------------------------- 1 | # This a reference configuration file for the PostgreSQL database version 14. 2 | 3 | # ----------------------------- 4 | # PostgreSQL configuration file 5 | # ----------------------------- 6 | # 7 | # This file consists of lines of the form: 8 | # 9 | # name = value 10 | # 11 | # (The "=" is optional.) Whitespace may be used. Comments are introduced with 12 | # "#" anywhere on a line. The complete list of parameter names and allowed 13 | # values can be found in the PostgreSQL documentation. 14 | # 15 | # The commented-out settings shown in this file represent the default values. 16 | # Re-commenting a setting is NOT sufficient to revert it to the default value; 17 | # you need to reload the server. 18 | # 19 | # This file is read on server startup and when the server receives a SIGHUP 20 | # signal. If you edit the file on a running system, you have to SIGHUP the 21 | # server for the changes to take effect, run "pg_ctl reload", or execute 22 | # "SELECT pg_reload_conf()". Some parameters, which are marked below, 23 | # require a server shutdown and restart to take effect. 24 | # 25 | # Any parameter can also be given as a command-line option to the server, e.g., 26 | # "postgres -c log_connections=on". Some parameters can be changed at run time 27 | # with the "SET" SQL command. 28 | # 29 | # Memory units: B = bytes Time units: us = microseconds 30 | # kB = kilobytes ms = milliseconds 31 | # MB = megabytes s = seconds 32 | # GB = gigabytes min = minutes 33 | # TB = terabytes h = hours 34 | # d = days 35 | 36 | 37 | #------------------------------------------------------------------------------ 38 | # FILE LOCATIONS 39 | #------------------------------------------------------------------------------ 40 | 41 | # The default values of these variables are driven from the -D command-line 42 | # option or PGDATA environment variable, represented here as ConfigDir. 43 | 44 | data_directory = '/var/lib/postgresql/14/main' # use data in another directory 45 | # (change requires restart) 46 | 47 | hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file 48 | # (change requires restart) 49 | ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file 50 | # (change requires restart) 51 | 52 | # If external_pid_file is not explicitly set, no extra PID file is written. 53 | external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file 54 | # (change requires restart) 55 | 56 | 57 | #------------------------------------------------------------------------------ 58 | # CONNECTIONS AND AUTHENTICATION 59 | #------------------------------------------------------------------------------ 60 | 61 | # - Connection Settings - 62 | 63 | #listen_addresses = 'localhost' # what IP address(es) to listen on; 64 | # comma-separated list of addresses; 65 | # defaults to 'localhost'; use '*' for all 66 | # (change requires restart) 67 | listen_addresses = '*' 68 | port = 5433 # (change requires restart) 69 | max_connections = 200 # (change requires restart) 70 | #superuser_reserved_connections = 3 # (change requires restart) 71 | unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories 72 | # (change requires restart) 73 | #unix_socket_group = '' # (change requires restart) 74 | #unix_socket_permissions = 0777 # begin with 0 to use octal notation 75 | # (change requires restart) 76 | #bonjour = off # advertise server via Bonjour 77 | # (change requires restart) 78 | #bonjour_name = '' # defaults to the computer name 79 | # (change requires restart) 80 | 81 | # - TCP settings - 82 | # see "man tcp" for details 83 | 84 | #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; 85 | # 0 selects the system default 86 | #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; 87 | # 0 selects the system default 88 | #tcp_keepalives_count = 0 # TCP_KEEPCNT; 89 | # 0 selects the system default 90 | #tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; 91 | # 0 selects the system default 92 | 93 | #client_connection_check_interval = 0 # time between checks for client 94 | # disconnection while running queries; 95 | # 0 for never 96 | 97 | # - Authentication - 98 | 99 | #authentication_timeout = 1min # 1s-600s 100 | #password_encryption = scram-sha-256 # scram-sha-256 or md5 101 | #db_user_namespace = off 102 | 103 | # GSSAPI using Kerberos 104 | #krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' 105 | #krb_caseins_users = off 106 | 107 | # - SSL - 108 | 109 | ssl = on 110 | #ssl_ca_file = '' 111 | ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' 112 | #ssl_crl_file = '' 113 | #ssl_crl_dir = '' 114 | ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' 115 | #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers 116 | #ssl_prefer_server_ciphers = on 117 | #ssl_ecdh_curve = 'prime256v1' 118 | #ssl_min_protocol_version = 'TLSv1.2' 119 | #ssl_max_protocol_version = '' 120 | #ssl_dh_params_file = '' 121 | #ssl_passphrase_command = '' 122 | #ssl_passphrase_command_supports_reload = off 123 | 124 | 125 | #------------------------------------------------------------------------------ 126 | # RESOURCE USAGE (except WAL) 127 | #------------------------------------------------------------------------------ 128 | 129 | # - Memory - 130 | 131 | shared_buffers = 1GB # min 128kB 132 | # (change requires restart) 133 | #huge_pages = try # on, off, or try 134 | # (change requires restart) 135 | #huge_page_size = 0 # zero for system default 136 | # (change requires restart) 137 | #temp_buffers = 8MB # min 800kB 138 | #max_prepared_transactions = 0 # zero disables the feature 139 | # (change requires restart) 140 | # Caution: it is not advisable to set max_prepared_transactions nonzero unless 141 | # you actively intend to use prepared transactions. 142 | #work_mem = 4MB # min 64kB 143 | #hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem 144 | #maintenance_work_mem = 64MB # min 1MB 145 | #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem 146 | #logical_decoding_work_mem = 64MB # min 64kB 147 | #max_stack_depth = 2MB # min 100kB 148 | #shared_memory_type = mmap # the default is the first option 149 | # supported by the operating system: 150 | # mmap 151 | # sysv 152 | # windows 153 | # (change requires restart) 154 | dynamic_shared_memory_type = posix # the default is the first option 155 | # supported by the operating system: 156 | # posix 157 | # sysv 158 | # windows 159 | # mmap 160 | # (change requires restart) 161 | #min_dynamic_shared_memory = 0MB # (change requires restart) 162 | 163 | # - Disk - 164 | 165 | #temp_file_limit = -1 # limits per-process temp file space 166 | # in kilobytes, or -1 for no limit 167 | 168 | # - Kernel Resources - 169 | 170 | #max_files_per_process = 1000 # min 64 171 | # (change requires restart) 172 | 173 | # - Cost-Based Vacuum Delay - 174 | 175 | #vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) 176 | #vacuum_cost_page_hit = 1 # 0-10000 credits 177 | #vacuum_cost_page_miss = 2 # 0-10000 credits 178 | #vacuum_cost_page_dirty = 20 # 0-10000 credits 179 | #vacuum_cost_limit = 200 # 1-10000 credits 180 | 181 | # - Background Writer - 182 | 183 | #bgwriter_delay = 200ms # 10-10000ms between rounds 184 | #bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables 185 | #bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round 186 | #bgwriter_flush_after = 512kB # measured in pages, 0 disables 187 | 188 | # - Asynchronous Behavior - 189 | 190 | #backend_flush_after = 0 # measured in pages, 0 disables 191 | effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching 192 | #maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching 193 | #max_worker_processes = 8 # (change requires restart) 194 | #max_parallel_workers_per_gather = 2 # taken from max_parallel_workers 195 | #max_parallel_maintenance_workers = 2 # taken from max_parallel_workers 196 | #max_parallel_workers = 8 # maximum number of max_worker_processes that 197 | # can be used in parallel operations 198 | #parallel_leader_participation = on 199 | #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate 200 | # (change requires restart) 201 | 202 | 203 | #------------------------------------------------------------------------------ 204 | # WRITE-AHEAD LOG 205 | #------------------------------------------------------------------------------ 206 | 207 | # - Settings - 208 | 209 | wal_level = minimal # minimal, replica, or logical 210 | # (change requires restart) 211 | fsync = off # flush data to disk for crash safety 212 | # (turning this off can cause 213 | # unrecoverable data corruption) 214 | synchronous_commit = off # synchronization level; 215 | # off, local, remote_write, remote_apply, or on 216 | #wal_sync_method = fsync # the default is the first option 217 | # supported by the operating system: 218 | # open_datasync 219 | # fdatasync (default on Linux and FreeBSD) 220 | # fsync 221 | # fsync_writethrough 222 | # open_sync 223 | full_page_writes = off # recover from partial page writes 224 | #wal_log_hints = off # also do full page writes of non-critical updates 225 | # (change requires restart) 226 | #wal_compression = off # enable compression of full-page writes 227 | #wal_init_zero = on # zero-fill new WAL files 228 | #wal_recycle = on # recycle WAL files 229 | #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers 230 | # (change requires restart) 231 | #wal_writer_delay = 200ms # 1-10000 milliseconds 232 | #wal_writer_flush_after = 1MB # measured in pages, 0 disables 233 | #wal_skip_threshold = 2MB 234 | 235 | #commit_delay = 0 # range 0-100000, in microseconds 236 | #commit_siblings = 5 # range 1-1000 237 | 238 | # - Checkpoints - 239 | 240 | #checkpoint_timeout = 5min # range 30s-1d 241 | #checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 242 | #checkpoint_flush_after = 256kB # measured in pages, 0 disables 243 | #checkpoint_warning = 30s # 0 disables 244 | max_wal_size = 1GB 245 | min_wal_size = 80MB 246 | 247 | # - Archiving - 248 | 249 | #archive_mode = off # enables archiving; off, on, or always 250 | # (change requires restart) 251 | #archive_command = '' # command to use to archive a logfile segment 252 | # placeholders: %p = path of file to archive 253 | # %f = file name only 254 | # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' 255 | #archive_timeout = 0 # force a logfile segment switch after this 256 | # number of seconds; 0 disables 257 | 258 | # - Archive Recovery - 259 | 260 | # These are only used in recovery mode. 261 | 262 | #restore_command = '' # command to use to restore an archived logfile segment 263 | # placeholders: %p = path of file to restore 264 | # %f = file name only 265 | # e.g. 'cp /mnt/server/archivedir/%f %p' 266 | #archive_cleanup_command = '' # command to execute at every restartpoint 267 | #recovery_end_command = '' # command to execute at completion of recovery 268 | 269 | # - Recovery Target - 270 | 271 | # Set these only when performing a targeted recovery. 272 | 273 | #recovery_target = '' # 'immediate' to end recovery as soon as a 274 | # consistent state is reached 275 | # (change requires restart) 276 | #recovery_target_name = '' # the named restore point to which recovery will proceed 277 | # (change requires restart) 278 | #recovery_target_time = '' # the time stamp up to which recovery will proceed 279 | # (change requires restart) 280 | #recovery_target_xid = '' # the transaction ID up to which recovery will proceed 281 | # (change requires restart) 282 | #recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed 283 | # (change requires restart) 284 | #recovery_target_inclusive = on # Specifies whether to stop: 285 | # just after the specified recovery target (on) 286 | # just before the recovery target (off) 287 | # (change requires restart) 288 | #recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID 289 | # (change requires restart) 290 | #recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' 291 | # (change requires restart) 292 | 293 | 294 | #------------------------------------------------------------------------------ 295 | # REPLICATION 296 | #------------------------------------------------------------------------------ 297 | 298 | # - Sending Servers - 299 | 300 | # Set these on the primary and on any standby that will send replication data. 301 | 302 | max_wal_senders = 0 # max number of walsender processes 303 | # (change requires restart) 304 | #max_replication_slots = 10 # max number of replication slots 305 | # (change requires restart) 306 | #wal_keep_size = 0 # in megabytes; 0 disables 307 | #max_slot_wal_keep_size = -1 # in megabytes; -1 disables 308 | #wal_sender_timeout = 60s # in milliseconds; 0 disables 309 | #track_commit_timestamp = off # collect timestamp of transaction commit 310 | # (change requires restart) 311 | 312 | # - Primary Server - 313 | 314 | # These settings are ignored on a standby server. 315 | 316 | #synchronous_standby_names = '' # standby servers that provide sync rep 317 | # method to choose sync standbys, number of sync standbys, 318 | # and comma-separated list of application_name 319 | # from standby(s); '*' = all 320 | #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed 321 | 322 | # - Standby Servers - 323 | 324 | # These settings are ignored on a primary server. 325 | 326 | #primary_conninfo = '' # connection string to sending server 327 | #primary_slot_name = '' # replication slot on sending server 328 | #promote_trigger_file = '' # file name whose presence ends recovery 329 | #hot_standby = on # "off" disallows queries during recovery 330 | # (change requires restart) 331 | #max_standby_archive_delay = 30s # max delay before canceling queries 332 | # when reading WAL from archive; 333 | # -1 allows indefinite delay 334 | #max_standby_streaming_delay = 30s # max delay before canceling queries 335 | # when reading streaming WAL; 336 | # -1 allows indefinite delay 337 | #wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name 338 | # is not set 339 | #wal_receiver_status_interval = 10s # send replies at least this often 340 | # 0 disables 341 | #hot_standby_feedback = off # send info from standby to prevent 342 | # query conflicts 343 | #wal_receiver_timeout = 60s # time that receiver waits for 344 | # communication from primary 345 | # in milliseconds; 0 disables 346 | #wal_retrieve_retry_interval = 5s # time to wait before retrying to 347 | # retrieve WAL after a failed attempt 348 | #recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery 349 | 350 | # - Subscribers - 351 | 352 | # These settings are ignored on a publisher. 353 | 354 | #max_logical_replication_workers = 4 # taken from max_worker_processes 355 | # (change requires restart) 356 | #max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers 357 | 358 | 359 | #------------------------------------------------------------------------------ 360 | # QUERY TUNING 361 | #------------------------------------------------------------------------------ 362 | 363 | # - Planner Method Configuration - 364 | 365 | #enable_async_append = on 366 | #enable_bitmapscan = on 367 | #enable_gathermerge = on 368 | #enable_hashagg = on 369 | #enable_hashjoin = on 370 | #enable_incremental_sort = on 371 | #enable_indexscan = on 372 | #enable_indexonlyscan = on 373 | #enable_material = on 374 | #enable_memoize = on 375 | #enable_mergejoin = on 376 | #enable_nestloop = on 377 | #enable_parallel_append = on 378 | #enable_parallel_hash = on 379 | #enable_partition_pruning = on 380 | #enable_partitionwise_join = off 381 | #enable_partitionwise_aggregate = off 382 | #enable_seqscan = on 383 | #enable_sort = on 384 | #enable_tidscan = on 385 | 386 | # - Planner Cost Constants - 387 | 388 | #seq_page_cost = 1.0 # measured on an arbitrary scale 389 | #random_page_cost = 4.0 # same scale as above 390 | #cpu_tuple_cost = 0.01 # same scale as above 391 | #cpu_index_tuple_cost = 0.005 # same scale as above 392 | #cpu_operator_cost = 0.0025 # same scale as above 393 | #parallel_setup_cost = 1000.0 # same scale as above 394 | #parallel_tuple_cost = 0.1 # same scale as above 395 | #min_parallel_table_scan_size = 8MB 396 | #min_parallel_index_scan_size = 512kB 397 | #effective_cache_size = 4GB 398 | 399 | #jit_above_cost = 100000 # perform JIT compilation if available 400 | # and query more expensive than this; 401 | # -1 disables 402 | #jit_inline_above_cost = 500000 # inline small functions if query is 403 | # more expensive than this; -1 disables 404 | #jit_optimize_above_cost = 500000 # use expensive JIT optimizations if 405 | # query is more expensive than this; 406 | # -1 disables 407 | 408 | # - Genetic Query Optimizer - 409 | 410 | #geqo = on 411 | #geqo_threshold = 12 412 | #geqo_effort = 5 # range 1-10 413 | #geqo_pool_size = 0 # selects default based on effort 414 | #geqo_generations = 0 # selects default based on effort 415 | #geqo_selection_bias = 2.0 # range 1.5-2.0 416 | #geqo_seed = 0.0 # range 0.0-1.0 417 | 418 | # - Other Planner Options - 419 | 420 | #default_statistics_target = 100 # range 1-10000 421 | #constraint_exclusion = partition # on, off, or partition 422 | #cursor_tuple_fraction = 0.1 # range 0.0-1.0 423 | #from_collapse_limit = 8 424 | #jit = on # allow JIT compilation 425 | #join_collapse_limit = 8 # 1 disables collapsing of explicit 426 | # JOIN clauses 427 | #plan_cache_mode = auto # auto, force_generic_plan or 428 | # force_custom_plan 429 | 430 | 431 | #------------------------------------------------------------------------------ 432 | # REPORTING AND LOGGING 433 | #------------------------------------------------------------------------------ 434 | 435 | # - Where to Log - 436 | 437 | #log_destination = 'stderr' # Valid values are combinations of 438 | # stderr, csvlog, syslog, and eventlog, 439 | # depending on platform. csvlog 440 | # requires logging_collector to be on. 441 | 442 | # This is used when logging to stderr: 443 | #logging_collector = off # Enable capturing of stderr and csvlog 444 | # into log files. Required to be on for 445 | # csvlogs. 446 | # (change requires restart) 447 | 448 | # These are only used if logging_collector is on: 449 | #log_directory = 'log' # directory where log files are written, 450 | # can be absolute or relative to PGDATA 451 | #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, 452 | # can include strftime() escapes 453 | #log_file_mode = 0600 # creation mode for log files, 454 | # begin with 0 to use octal notation 455 | #log_rotation_age = 1d # Automatic rotation of logfiles will 456 | # happen after that time. 0 disables. 457 | #log_rotation_size = 10MB # Automatic rotation of logfiles will 458 | # happen after that much log output. 459 | # 0 disables. 460 | #log_truncate_on_rotation = off # If on, an existing log file with the 461 | # same name as the new log file will be 462 | # truncated rather than appended to. 463 | # But such truncation only occurs on 464 | # time-driven rotation, not on restarts 465 | # or size-driven rotation. Default is 466 | # off, meaning append to existing files 467 | # in all cases. 468 | 469 | # These are relevant when logging to syslog: 470 | #syslog_facility = 'LOCAL0' 471 | #syslog_ident = 'postgres' 472 | #syslog_sequence_numbers = on 473 | #syslog_split_messages = on 474 | 475 | # This is only relevant when logging to eventlog (Windows): 476 | # (change requires restart) 477 | #event_source = 'PostgreSQL' 478 | 479 | # - When to Log - 480 | 481 | #log_min_messages = warning # values in order of decreasing detail: 482 | # debug5 483 | # debug4 484 | # debug3 485 | # debug2 486 | # debug1 487 | # info 488 | # notice 489 | # warning 490 | # error 491 | # log 492 | # fatal 493 | # panic 494 | 495 | #log_min_error_statement = error # values in order of decreasing detail: 496 | # debug5 497 | # debug4 498 | # debug3 499 | # debug2 500 | # debug1 501 | # info 502 | # notice 503 | # warning 504 | # error 505 | # log 506 | # fatal 507 | # panic (effectively off) 508 | 509 | #log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements 510 | # and their durations, > 0 logs only 511 | # statements running at least this number 512 | # of milliseconds 513 | 514 | #log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements 515 | # and their durations, > 0 logs only a sample of 516 | # statements running at least this number 517 | # of milliseconds; 518 | # sample fraction is determined by log_statement_sample_rate 519 | 520 | #log_statement_sample_rate = 1.0 # fraction of logged statements exceeding 521 | # log_min_duration_sample to be logged; 522 | # 1.0 logs all such statements, 0.0 never logs 523 | 524 | 525 | #log_transaction_sample_rate = 0.0 # fraction of transactions whose statements 526 | # are logged regardless of their duration; 1.0 logs all 527 | # statements from all transactions, 0.0 never logs 528 | 529 | # - What to Log - 530 | 531 | #debug_print_parse = off 532 | #debug_print_rewritten = off 533 | #debug_print_plan = off 534 | #debug_pretty_print = on 535 | #log_autovacuum_min_duration = -1 # log autovacuum activity; 536 | # -1 disables, 0 logs all actions and 537 | # their durations, > 0 logs only 538 | # actions running at least this number 539 | # of milliseconds. 540 | #log_checkpoints = off 541 | #log_connections = off 542 | #log_disconnections = off 543 | #log_duration = off 544 | #log_error_verbosity = default # terse, default, or verbose messages 545 | #log_hostname = off 546 | log_line_prefix = '%m [%p] %q%u@%d ' # special values: 547 | # %a = application name 548 | # %u = user name 549 | # %d = database name 550 | # %r = remote host and port 551 | # %h = remote host 552 | # %b = backend type 553 | # %p = process ID 554 | # %P = process ID of parallel group leader 555 | # %t = timestamp without milliseconds 556 | # %m = timestamp with milliseconds 557 | # %n = timestamp with milliseconds (as a Unix epoch) 558 | # %Q = query ID (0 if none or not computed) 559 | # %i = command tag 560 | # %e = SQL state 561 | # %c = session ID 562 | # %l = session line number 563 | # %s = session start timestamp 564 | # %v = virtual transaction ID 565 | # %x = transaction ID (0 if none) 566 | # %q = stop here in non-session 567 | # processes 568 | # %% = '%' 569 | # e.g. '<%u%%%d> ' 570 | #log_lock_waits = off # log lock waits >= deadlock_timeout 571 | #log_recovery_conflict_waits = off # log standby recovery conflict waits 572 | # >= deadlock_timeout 573 | #log_parameter_max_length = -1 # when logging statements, limit logged 574 | # bind-parameter values to N bytes; 575 | # -1 means print in full, 0 disables 576 | #log_parameter_max_length_on_error = 0 # when logging an error, limit logged 577 | # bind-parameter values to N bytes; 578 | # -1 means print in full, 0 disables 579 | #log_statement = 'none' # none, ddl, mod, all 580 | #log_replication_commands = off 581 | #log_temp_files = -1 # log temporary files equal or larger 582 | # than the specified size in kilobytes; 583 | # -1 disables, 0 logs all temp files 584 | log_timezone = 'Etc/UTC' 585 | 586 | 587 | #------------------------------------------------------------------------------ 588 | # PROCESS TITLE 589 | #------------------------------------------------------------------------------ 590 | 591 | cluster_name = '14/main' # added to process titles if nonempty 592 | # (change requires restart) 593 | #update_process_title = on 594 | 595 | 596 | #------------------------------------------------------------------------------ 597 | # STATISTICS 598 | #------------------------------------------------------------------------------ 599 | 600 | # - Query and Index Statistics Collector - 601 | 602 | #track_activities = on 603 | #track_activity_query_size = 1024 # (change requires restart) 604 | #track_counts = on 605 | #track_io_timing = off 606 | #track_wal_io_timing = off 607 | #track_functions = none # none, pl, all 608 | stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp' 609 | 610 | 611 | # - Monitoring - 612 | 613 | #compute_query_id = auto 614 | #log_statement_stats = off 615 | #log_parser_stats = off 616 | #log_planner_stats = off 617 | #log_executor_stats = off 618 | 619 | 620 | #------------------------------------------------------------------------------ 621 | # AUTOVACUUM 622 | #------------------------------------------------------------------------------ 623 | 624 | #autovacuum = on # Enable autovacuum subprocess? 'on' 625 | # requires track_counts to also be on. 626 | #autovacuum_max_workers = 3 # max number of autovacuum subprocesses 627 | # (change requires restart) 628 | #autovacuum_naptime = 1min # time between autovacuum runs 629 | #autovacuum_vacuum_threshold = 50 # min number of row updates before 630 | # vacuum 631 | #autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts 632 | # before vacuum; -1 disables insert 633 | # vacuums 634 | #autovacuum_analyze_threshold = 50 # min number of row updates before 635 | # analyze 636 | #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum 637 | #autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table 638 | # size before insert vacuum 639 | #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze 640 | #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum 641 | # (change requires restart) 642 | #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age 643 | # before forced vacuum 644 | # (change requires restart) 645 | #autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for 646 | # autovacuum, in milliseconds; 647 | # -1 means use vacuum_cost_delay 648 | #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for 649 | # autovacuum, -1 means use 650 | # vacuum_cost_limit 651 | 652 | 653 | #------------------------------------------------------------------------------ 654 | # CLIENT CONNECTION DEFAULTS 655 | #------------------------------------------------------------------------------ 656 | 657 | # - Statement Behavior - 658 | 659 | #client_min_messages = notice # values in order of decreasing detail: 660 | # debug5 661 | # debug4 662 | # debug3 663 | # debug2 664 | # debug1 665 | # log 666 | # notice 667 | # warning 668 | # error 669 | #search_path = '"$user", public' # schema names 670 | #row_security = on 671 | #default_table_access_method = 'heap' 672 | #default_tablespace = '' # a tablespace name, '' uses the default 673 | #default_toast_compression = 'pglz' # 'pglz' or 'lz4' 674 | #temp_tablespaces = '' # a list of tablespace names, '' uses 675 | # only default tablespace 676 | #check_function_bodies = on 677 | #default_transaction_isolation = 'read committed' 678 | #default_transaction_read_only = off 679 | #default_transaction_deferrable = off 680 | #session_replication_role = 'origin' 681 | #statement_timeout = 0 # in milliseconds, 0 is disabled 682 | #lock_timeout = 0 # in milliseconds, 0 is disabled 683 | #idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled 684 | #idle_session_timeout = 0 # in milliseconds, 0 is disabled 685 | #vacuum_freeze_table_age = 150000000 686 | #vacuum_freeze_min_age = 50000000 687 | #vacuum_failsafe_age = 1600000000 688 | #vacuum_multixact_freeze_table_age = 150000000 689 | #vacuum_multixact_freeze_min_age = 5000000 690 | #vacuum_multixact_failsafe_age = 1600000000 691 | #bytea_output = 'hex' # hex, escape 692 | #xmlbinary = 'base64' 693 | #xmloption = 'content' 694 | #gin_pending_list_limit = 4MB 695 | 696 | # - Locale and Formatting - 697 | 698 | datestyle = 'iso, mdy' 699 | #intervalstyle = 'postgres' 700 | timezone = 'Etc/UTC' 701 | #timezone_abbreviations = 'Default' # Select the set of available time zone 702 | # abbreviations. Currently, there are 703 | # Default 704 | # Australia (historical usage) 705 | # India 706 | # You can create your own file in 707 | # share/timezonesets/. 708 | #extra_float_digits = 1 # min -15, max 3; any value >0 actually 709 | # selects precise output mode 710 | #client_encoding = sql_ascii # actually, defaults to database 711 | # encoding 712 | 713 | # These settings are initialized by initdb, but they can be changed. 714 | lc_messages = 'C.UTF-8' # locale for system error message 715 | # strings 716 | lc_monetary = 'C.UTF-8' # locale for monetary formatting 717 | lc_numeric = 'C.UTF-8' # locale for number formatting 718 | lc_time = 'C.UTF-8' # locale for time formatting 719 | 720 | # default configuration for text search 721 | default_text_search_config = 'pg_catalog.english' 722 | 723 | # - Shared Library Preloading - 724 | 725 | #local_preload_libraries = '' 726 | #session_preload_libraries = '' 727 | #shared_preload_libraries = '' # (change requires restart) 728 | #jit_provider = 'llvmjit' # JIT library to use 729 | 730 | # - Other Defaults - 731 | 732 | #dynamic_library_path = '$libdir' 733 | #extension_destdir = '' # prepend path when loading extensions 734 | # and shared objects (added by Debian) 735 | #gin_fuzzy_search_limit = 0 736 | 737 | 738 | #------------------------------------------------------------------------------ 739 | # LOCK MANAGEMENT 740 | #------------------------------------------------------------------------------ 741 | 742 | #deadlock_timeout = 1s 743 | #max_locks_per_transaction = 64 # min 10 744 | # (change requires restart) 745 | #max_pred_locks_per_transaction = 64 # min 10 746 | # (change requires restart) 747 | #max_pred_locks_per_relation = -2 # negative values mean 748 | # (max_pred_locks_per_transaction 749 | # / -max_pred_locks_per_relation) - 1 750 | #max_pred_locks_per_page = 2 # min 0 751 | 752 | 753 | #------------------------------------------------------------------------------ 754 | # VERSION AND PLATFORM COMPATIBILITY 755 | #------------------------------------------------------------------------------ 756 | 757 | # - Previous PostgreSQL Versions - 758 | 759 | #array_nulls = on 760 | #backslash_quote = safe_encoding # on, off, or safe_encoding 761 | #escape_string_warning = on 762 | #lo_compat_privileges = off 763 | #quote_all_identifiers = off 764 | #standard_conforming_strings = on 765 | #synchronize_seqscans = on 766 | 767 | # - Other Platforms and Clients - 768 | 769 | #transform_null_equals = off 770 | 771 | 772 | #------------------------------------------------------------------------------ 773 | # ERROR HANDLING 774 | #------------------------------------------------------------------------------ 775 | 776 | #exit_on_error = off # terminate session on any error? 777 | #restart_after_crash = on # reinitialize after backend crash? 778 | #data_sync_retry = off # retry or panic on failure to fsync 779 | # data? 780 | # (change requires restart) 781 | #recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) 782 | 783 | 784 | #------------------------------------------------------------------------------ 785 | # CONFIG FILE INCLUDES 786 | #------------------------------------------------------------------------------ 787 | 788 | # These options allow settings to be loaded from files other than the 789 | # default postgresql.conf. Note that these are directives, not variable 790 | # assignments, so they can usefully be given more than once. 791 | 792 | include_dir = 'conf.d' # include files ending in '.conf' from 793 | # a directory, e.g., 'conf.d' 794 | #include_if_exists = '...' # include file only if it exists 795 | #include = '...' # include file 796 | 797 | 798 | #------------------------------------------------------------------------------ 799 | # CUSTOMIZED OPTIONS 800 | #------------------------------------------------------------------------------ 801 | 802 | # Add settings for extensions here -------------------------------------------------------------------------------- /src/accounts_selector.rs: -------------------------------------------------------------------------------- 1 | use {log::*, std::collections::HashSet}; 2 | 3 | #[derive(Debug)] 4 | pub(crate) struct AccountsSelector { 5 | pub accounts: HashSet>, 6 | pub owners: HashSet>, 7 | pub select_all_accounts: bool, 8 | } 9 | 10 | impl AccountsSelector { 11 | pub fn default() -> Self { 12 | AccountsSelector { 13 | accounts: HashSet::default(), 14 | owners: HashSet::default(), 15 | select_all_accounts: true, 16 | } 17 | } 18 | 19 | pub fn new(accounts: &[String], owners: &[String]) -> Self { 20 | info!( 21 | "Creating AccountsSelector from accounts: {:?}, owners: {:?}", 22 | accounts, owners 23 | ); 24 | 25 | let select_all_accounts = accounts.iter().any(|key| key == "*"); 26 | if select_all_accounts { 27 | return AccountsSelector { 28 | accounts: HashSet::default(), 29 | owners: HashSet::default(), 30 | select_all_accounts, 31 | }; 32 | } 33 | let accounts = accounts 34 | .iter() 35 | .map(|key| bs58::decode(key).into_vec().unwrap()) 36 | .collect(); 37 | let owners = owners 38 | .iter() 39 | .map(|key| bs58::decode(key).into_vec().unwrap()) 40 | .collect(); 41 | AccountsSelector { 42 | accounts, 43 | owners, 44 | select_all_accounts, 45 | } 46 | } 47 | 48 | pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool { 49 | self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner) 50 | } 51 | 52 | /// Check if any account is of interested at all 53 | pub fn is_enabled(&self) -> bool { 54 | self.select_all_accounts || !self.accounts.is_empty() || !self.owners.is_empty() 55 | } 56 | } 57 | 58 | #[cfg(test)] 59 | pub(crate) mod tests { 60 | use super::*; 61 | 62 | #[test] 63 | fn test_create_accounts_selector() { 64 | AccountsSelector::new( 65 | &["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()], 66 | &[], 67 | ); 68 | 69 | AccountsSelector::new( 70 | &[], 71 | &["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()], 72 | ); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/geyser_plugin_postgres.rs: -------------------------------------------------------------------------------- 1 | /// Main entry for the PostgreSQL plugin 2 | use { 3 | crate::{ 4 | accounts_selector::AccountsSelector, 5 | postgres_client::{ParallelPostgresClient, PostgresClientBuilder}, 6 | transaction_selector::TransactionSelector, 7 | }, 8 | bs58, 9 | log::*, 10 | serde_derive::{Deserialize, Serialize}, 11 | serde_json, 12 | solana_geyser_plugin_interface::geyser_plugin_interface::{ 13 | GeyserPlugin, GeyserPluginError, ReplicaAccountInfoVersions, ReplicaBlockInfoVersions, 14 | ReplicaTransactionInfoVersions, Result, SlotStatus, 15 | }, 16 | solana_measure::measure::Measure, 17 | solana_metrics::*, 18 | std::{fs::File, io::Read}, 19 | thiserror::Error, 20 | }; 21 | 22 | #[derive(Default)] 23 | pub struct GeyserPluginPostgres { 24 | client: Option, 25 | accounts_selector: Option, 26 | transaction_selector: Option, 27 | batch_starting_slot: Option, 28 | } 29 | 30 | impl std::fmt::Debug for GeyserPluginPostgres { 31 | fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 32 | Ok(()) 33 | } 34 | } 35 | 36 | /// The Configuration for the PostgreSQL plugin 37 | #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] 38 | pub struct GeyserPluginPostgresConfig { 39 | /// The host name or IP of the PostgreSQL server 40 | pub host: Option, 41 | 42 | /// The user name of the PostgreSQL server. 43 | pub user: Option, 44 | 45 | /// The port number of the PostgreSQL database, the default is 5432 46 | pub port: Option, 47 | 48 | /// The connection string of PostgreSQL database, if this is set 49 | /// `host`, `user` and `port` will be ignored. 50 | pub connection_str: Option, 51 | 52 | /// Controls the number of threads establishing connections to 53 | /// the PostgreSQL server. The default is 10. 54 | pub threads: Option, 55 | 56 | /// Controls the batch size when bulk loading accounts. 57 | /// The default is 10. 58 | pub batch_size: Option, 59 | 60 | /// Controls whether to panic the validator in case of errors 61 | /// writing to PostgreSQL server. The default is false 62 | pub panic_on_db_errors: Option, 63 | 64 | /// Indicates whether to store historical data for accounts 65 | pub store_account_historical_data: Option, 66 | 67 | /// Controls whether to use SSL based connection to the database server. 68 | /// The default is false 69 | pub use_ssl: Option, 70 | 71 | /// Specify the path to PostgreSQL server's certificate file 72 | pub server_ca: Option, 73 | 74 | /// Specify the path to the local client's certificate file 75 | pub client_cert: Option, 76 | 77 | /// Specify the path to the local client's private PEM key file. 78 | pub client_key: Option, 79 | 80 | /// Controls whether to index the token owners. The default is false 81 | pub index_token_owner: Option, 82 | 83 | /// Controls whetherf to index the token mints. The default is false 84 | pub index_token_mint: Option, 85 | 86 | /// Controls if this plugin can read the database on_load() to find heighest slot 87 | /// and ignore upsetr accounts (at_startup) that should already exist in DB 88 | #[serde(default)] 89 | pub skip_upsert_existing_accounts_at_startup: bool, 90 | } 91 | 92 | #[derive(Error, Debug)] 93 | pub enum GeyserPluginPostgresError { 94 | #[error("Error connecting to the backend data store. Error message: ({msg})")] 95 | DataStoreConnectionError { msg: String }, 96 | 97 | #[error("Error preparing data store schema. Error message: ({msg})")] 98 | DataSchemaError { msg: String }, 99 | 100 | #[error("Error preparing data store schema. Error message: ({msg})")] 101 | ConfigurationError { msg: String }, 102 | 103 | #[error("Replica account V0.0.1 not supported anymore")] 104 | ReplicaAccountV001NotSupported, 105 | } 106 | 107 | impl GeyserPlugin for GeyserPluginPostgres { 108 | fn name(&self) -> &'static str { 109 | "GeyserPluginPostgres" 110 | } 111 | 112 | /// Do initialization for the PostgreSQL plugin. 113 | /// 114 | /// # Format of the config file: 115 | /// * The `accounts_selector` section allows the user to controls accounts selections. 116 | /// "accounts_selector" : { 117 | /// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\], 118 | /// } 119 | /// or: 120 | /// "accounts_selector" = { 121 | /// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\] 122 | /// } 123 | /// Accounts either satisyfing the accounts condition or owners condition will be selected. 124 | /// When only owners is specified, 125 | /// all accounts belonging to the owners will be streamed. 126 | /// The accounts field supports wildcard to select all accounts: 127 | /// "accounts_selector" : { 128 | /// "accounts" : \["*"\], 129 | /// } 130 | /// * "host", optional, specifies the PostgreSQL server. 131 | /// * "user", optional, specifies the PostgreSQL user. 132 | /// * "port", optional, specifies the PostgreSQL server's port. 133 | /// * "connection_str", optional, the custom PostgreSQL connection string. 134 | /// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration. 135 | /// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given, 136 | /// `host` and `user` must be given. 137 | /// "store_account_historical_data", optional, set it to 'true', to store historical account data to account_audit 138 | /// table. 139 | /// * "threads" optional, specifies the number of worker threads for the plugin. A thread 140 | /// maintains a PostgreSQL connection to the server. The default is '10'. 141 | /// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created 142 | /// from restoring a snapshot. The default is '10'. 143 | /// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the 144 | /// PostgreSQL database. The default is 'false'. 145 | /// * "transaction_selector", optional, controls if and what transaction to store. If this field is missing 146 | /// None of the transction is stored. 147 | /// "transaction_selector" : { 148 | /// "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\], 149 | /// } 150 | /// The `mentions` field support wildcard to select all transaction or all 'vote' transactions: 151 | /// For example, to select all transactions: 152 | /// "transaction_selector" : { 153 | /// "mentions" : \["*"\], 154 | /// } 155 | /// To select all vote transactions: 156 | /// "transaction_selector" : { 157 | /// "mentions" : \["all_votes"\], 158 | /// } 159 | /// # Examples 160 | /// 161 | /// { 162 | /// "libpath": "/home/solana/target/release/libsolana_geyser_plugin_postgres.so", 163 | /// "host": "host_foo", 164 | /// "user": "solana", 165 | /// "threads": 10, 166 | /// "accounts_selector" : { 167 | /// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"] 168 | /// } 169 | /// } 170 | 171 | fn on_load(&mut self, config_file: &str) -> Result<()> { 172 | solana_logger::setup_with_default("info"); 173 | info!( 174 | "Loading plugin {:?} from config_file {:?}", 175 | self.name(), 176 | config_file 177 | ); 178 | let mut file = File::open(config_file)?; 179 | let mut contents = String::new(); 180 | file.read_to_string(&mut contents)?; 181 | 182 | let result: serde_json::Value = serde_json::from_str(&contents).unwrap(); 183 | self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result)); 184 | self.transaction_selector = Some(Self::create_transaction_selector_from_config(&result)); 185 | 186 | let config: GeyserPluginPostgresConfig = 187 | serde_json::from_str(&contents).map_err(|err| { 188 | GeyserPluginError::ConfigFileReadError { 189 | msg: format!( 190 | "The config file is not in the JSON format expected: {:?}", 191 | err 192 | ), 193 | } 194 | })?; 195 | 196 | let (client, batch_optimize_by_skiping_older_slots) = 197 | PostgresClientBuilder::build_pararallel_postgres_client(&config)?; 198 | self.client = Some(client); 199 | self.batch_starting_slot = batch_optimize_by_skiping_older_slots; 200 | 201 | Ok(()) 202 | } 203 | 204 | fn on_unload(&mut self) { 205 | info!("Unloading plugin: {:?}", self.name()); 206 | 207 | match &mut self.client { 208 | None => {} 209 | Some(client) => { 210 | client.join().unwrap(); 211 | } 212 | } 213 | } 214 | 215 | fn update_account( 216 | &self, 217 | account: ReplicaAccountInfoVersions, 218 | slot: u64, 219 | is_startup: bool, 220 | ) -> Result<()> { 221 | // skip updating account on startup of batch_optimize_by_skiping_older_slots 222 | // is configured 223 | if is_startup 224 | && self 225 | .batch_starting_slot 226 | .map(|slot_limit| slot < slot_limit) 227 | .unwrap_or(false) 228 | { 229 | return Ok(()); 230 | } 231 | 232 | let mut measure_all = Measure::start("geyser-plugin-postgres-update-account-main"); 233 | match account { 234 | ReplicaAccountInfoVersions::V0_0_1(_) => { 235 | return Err(GeyserPluginError::Custom(Box::new( 236 | GeyserPluginPostgresError::ReplicaAccountV001NotSupported, 237 | ))); 238 | } 239 | ReplicaAccountInfoVersions::V0_0_2(_) => { 240 | return Err(GeyserPluginError::Custom(Box::new( 241 | GeyserPluginPostgresError::ReplicaAccountV001NotSupported, 242 | ))); 243 | } 244 | ReplicaAccountInfoVersions::V0_0_3(account) => { 245 | let mut measure_select = 246 | Measure::start("geyser-plugin-postgres-update-account-select"); 247 | if let Some(accounts_selector) = &self.accounts_selector { 248 | if !accounts_selector.is_account_selected(account.pubkey, account.owner) { 249 | return Ok(()); 250 | } 251 | } else { 252 | return Ok(()); 253 | } 254 | measure_select.stop(); 255 | inc_new_counter_debug!( 256 | "geyser-plugin-postgres-update-account-select-us", 257 | measure_select.as_us() as usize, 258 | 100000, 259 | 100000 260 | ); 261 | 262 | debug!( 263 | "Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}", 264 | bs58::encode(account.pubkey).into_string(), 265 | bs58::encode(account.owner).into_string(), 266 | slot, 267 | self.accounts_selector.as_ref().unwrap() 268 | ); 269 | 270 | match &self.client { 271 | None => { 272 | return Err(GeyserPluginError::Custom(Box::new( 273 | GeyserPluginPostgresError::DataStoreConnectionError { 274 | msg: "There is no connection to the PostgreSQL database." 275 | .to_string(), 276 | }, 277 | ))); 278 | } 279 | Some(client) => { 280 | let mut measure_update = 281 | Measure::start("geyser-plugin-postgres-update-account-client"); 282 | let result = { client.update_account(account, slot, is_startup) }; 283 | measure_update.stop(); 284 | 285 | inc_new_counter_debug!( 286 | "geyser-plugin-postgres-update-account-client-us", 287 | measure_update.as_us() as usize, 288 | 100000, 289 | 100000 290 | ); 291 | 292 | if let Err(err) = result { 293 | return Err(GeyserPluginError::AccountsUpdateError { 294 | msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err) 295 | }); 296 | } 297 | } 298 | } 299 | } 300 | } 301 | 302 | measure_all.stop(); 303 | 304 | inc_new_counter_debug!( 305 | "geyser-plugin-postgres-update-account-main-us", 306 | measure_all.as_us() as usize, 307 | 100000, 308 | 100000 309 | ); 310 | 311 | Ok(()) 312 | } 313 | 314 | fn update_slot_status(&self, slot: u64, parent: Option, status: SlotStatus) -> Result<()> { 315 | info!("Updating slot {:?} at with status {:?}", slot, status); 316 | 317 | match &self.client { 318 | None => { 319 | return Err(GeyserPluginError::Custom(Box::new( 320 | GeyserPluginPostgresError::DataStoreConnectionError { 321 | msg: "There is no connection to the PostgreSQL database.".to_string(), 322 | }, 323 | ))); 324 | } 325 | Some(client) => { 326 | let result = client.update_slot_status(slot, parent, status); 327 | 328 | if let Err(err) = result { 329 | return Err(GeyserPluginError::SlotStatusUpdateError{ 330 | msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err) 331 | }); 332 | } 333 | } 334 | } 335 | 336 | Ok(()) 337 | } 338 | 339 | fn notify_end_of_startup(&self) -> Result<()> { 340 | info!("Notifying the end of startup for accounts notifications"); 341 | match &self.client { 342 | None => { 343 | return Err(GeyserPluginError::Custom(Box::new( 344 | GeyserPluginPostgresError::DataStoreConnectionError { 345 | msg: "There is no connection to the PostgreSQL database.".to_string(), 346 | }, 347 | ))); 348 | } 349 | Some(client) => { 350 | let result = client.notify_end_of_startup(); 351 | 352 | if let Err(err) = result { 353 | return Err(GeyserPluginError::SlotStatusUpdateError{ 354 | msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err) 355 | }); 356 | } 357 | } 358 | } 359 | Ok(()) 360 | } 361 | 362 | fn notify_transaction( 363 | &self, 364 | transaction_info: ReplicaTransactionInfoVersions, 365 | slot: u64, 366 | ) -> Result<()> { 367 | match &self.client { 368 | None => { 369 | return Err(GeyserPluginError::Custom(Box::new( 370 | GeyserPluginPostgresError::DataStoreConnectionError { 371 | msg: "There is no connection to the PostgreSQL database.".to_string(), 372 | }, 373 | ))); 374 | } 375 | Some(client) => match transaction_info { 376 | ReplicaTransactionInfoVersions::V0_0_2(transaction_info) => { 377 | if let Some(transaction_selector) = &self.transaction_selector { 378 | if !transaction_selector.is_transaction_selected( 379 | transaction_info.is_vote, 380 | Box::new(transaction_info.transaction.message().account_keys().iter()), 381 | ) { 382 | return Ok(()); 383 | } 384 | } else { 385 | return Ok(()); 386 | } 387 | 388 | let result = client.log_transaction_info(transaction_info, slot); 389 | 390 | if let Err(err) = result { 391 | return Err(GeyserPluginError::SlotStatusUpdateError{ 392 | msg: format!("Failed to persist the transaction info to the PostgreSQL database. Error: {:?}", err) 393 | }); 394 | } 395 | } 396 | _ => { 397 | return Err(GeyserPluginError::SlotStatusUpdateError{ 398 | msg: "Failed to persist the transaction info to the PostgreSQL database. Unsupported format.".to_string() 399 | }); 400 | } 401 | }, 402 | } 403 | 404 | Ok(()) 405 | } 406 | 407 | fn notify_block_metadata(&self, block_info: ReplicaBlockInfoVersions) -> Result<()> { 408 | match &self.client { 409 | None => { 410 | return Err(GeyserPluginError::Custom(Box::new( 411 | GeyserPluginPostgresError::DataStoreConnectionError { 412 | msg: "There is no connection to the PostgreSQL database.".to_string(), 413 | }, 414 | ))); 415 | } 416 | Some(client) => match block_info { 417 | ReplicaBlockInfoVersions::V0_0_3(block_info) => { 418 | let result = client.update_block_metadata(block_info); 419 | 420 | if let Err(err) = result { 421 | return Err(GeyserPluginError::SlotStatusUpdateError{ 422 | msg: format!("Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", err) 423 | }); 424 | } 425 | } 426 | ReplicaBlockInfoVersions::V0_0_2(_block_info) => { 427 | return Err(GeyserPluginError::SlotStatusUpdateError{ 428 | msg: "Failed to persist the transaction info to the PostgreSQL database. Unsupported format.".to_string() 429 | }); 430 | } 431 | ReplicaBlockInfoVersions::V0_0_1(_) => { 432 | return Err(GeyserPluginError::SlotStatusUpdateError{ 433 | msg: "Failed to persist the transaction info to the PostgreSQL database. Unsupported format.".to_string() 434 | }); 435 | } 436 | }, 437 | } 438 | 439 | Ok(()) 440 | } 441 | 442 | /// Check if the plugin is interested in account data 443 | /// Default is true -- if the plugin is not interested in 444 | /// account data, please return false. 445 | fn account_data_notifications_enabled(&self) -> bool { 446 | self.accounts_selector 447 | .as_ref() 448 | .map_or_else(|| false, |selector| selector.is_enabled()) 449 | } 450 | 451 | /// Check if the plugin is interested in transaction data 452 | fn transaction_notifications_enabled(&self) -> bool { 453 | self.transaction_selector 454 | .as_ref() 455 | .map_or_else(|| false, |selector| selector.is_enabled()) 456 | } 457 | } 458 | 459 | impl GeyserPluginPostgres { 460 | fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector { 461 | let accounts_selector = &config["accounts_selector"]; 462 | 463 | if accounts_selector.is_null() { 464 | AccountsSelector::default() 465 | } else { 466 | let accounts = &accounts_selector["accounts"]; 467 | let accounts: Vec = if accounts.is_array() { 468 | accounts 469 | .as_array() 470 | .unwrap() 471 | .iter() 472 | .map(|val| val.as_str().unwrap().to_string()) 473 | .collect() 474 | } else { 475 | Vec::default() 476 | }; 477 | let owners = &accounts_selector["owners"]; 478 | let owners: Vec = if owners.is_array() { 479 | owners 480 | .as_array() 481 | .unwrap() 482 | .iter() 483 | .map(|val| val.as_str().unwrap().to_string()) 484 | .collect() 485 | } else { 486 | Vec::default() 487 | }; 488 | AccountsSelector::new(&accounts, &owners) 489 | } 490 | } 491 | 492 | fn create_transaction_selector_from_config(config: &serde_json::Value) -> TransactionSelector { 493 | let transaction_selector = &config["transaction_selector"]; 494 | 495 | if transaction_selector.is_null() { 496 | TransactionSelector::default() 497 | } else { 498 | let accounts = &transaction_selector["mentions"]; 499 | let accounts: Vec = if accounts.is_array() { 500 | accounts 501 | .as_array() 502 | .unwrap() 503 | .iter() 504 | .map(|val| val.as_str().unwrap().to_string()) 505 | .collect() 506 | } else { 507 | Vec::default() 508 | }; 509 | TransactionSelector::new(&accounts) 510 | } 511 | } 512 | 513 | pub fn new() -> Self { 514 | Self::default() 515 | } 516 | } 517 | 518 | #[no_mangle] 519 | #[allow(improper_ctypes_definitions)] 520 | /// # Safety 521 | /// 522 | /// This function returns the GeyserPluginPostgres pointer as trait GeyserPlugin. 523 | pub unsafe extern "C" fn _create_plugin() -> *mut dyn GeyserPlugin { 524 | let plugin = GeyserPluginPostgres::new(); 525 | let plugin: Box = Box::new(plugin); 526 | Box::into_raw(plugin) 527 | } 528 | 529 | #[cfg(test)] 530 | pub(crate) mod tests { 531 | use {super::*, serde_json}; 532 | 533 | #[test] 534 | fn test_accounts_selector_from_config() { 535 | let config = "{\"accounts_selector\" : { \ 536 | \"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \ 537 | }}"; 538 | 539 | let config: serde_json::Value = serde_json::from_str(config).unwrap(); 540 | GeyserPluginPostgres::create_accounts_selector_from_config(&config); 541 | } 542 | } 543 | -------------------------------------------------------------------------------- /src/inline_spl_token.rs: -------------------------------------------------------------------------------- 1 | /// Partial SPL Token declarations inlined to avoid an external dependency on the spl-token crate 2 | /// Copied from solana-runtime 3 | use solana_sdk::pubkey::{Pubkey, PUBKEY_BYTES}; 4 | 5 | solana_sdk::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA"); 6 | 7 | pub(crate) mod new_token_program { 8 | solana_sdk::declare_id!("nTok2oJvx1CgbYA2SznfJLmnKLEL6sYdh2ypZms2nhm"); 9 | } 10 | 11 | /* 12 | /// The SPL token definition -- we care about only the mint and owner fields for now. 13 | /// at offset 0 and 32 respectively. 14 | spl_token::state::Account { 15 | mint: Pubkey, 16 | owner: Pubkey, 17 | amount: u64, 18 | delegate: COption, 19 | state: AccountState, 20 | is_native: COption, 21 | delegated_amount: u64, 22 | close_authority: COption, 23 | } 24 | */ 25 | pub const SPL_TOKEN_ACCOUNT_MINT_OFFSET: usize = 0; 26 | pub const SPL_TOKEN_ACCOUNT_OWNER_OFFSET: usize = 32; 27 | const SPL_TOKEN_ACCOUNT_LENGTH: usize = 165; 28 | 29 | pub(crate) trait GenericTokenAccount { 30 | fn valid_account_data(account_data: &[u8]) -> bool; 31 | 32 | // Call after account length has already been verified 33 | fn unpack_account_owner_unchecked(account_data: &[u8]) -> &Pubkey { 34 | Self::unpack_pubkey_unchecked(account_data, SPL_TOKEN_ACCOUNT_OWNER_OFFSET) 35 | } 36 | 37 | // Call after account length has already been verified 38 | fn unpack_account_mint_unchecked(account_data: &[u8]) -> &Pubkey { 39 | Self::unpack_pubkey_unchecked(account_data, SPL_TOKEN_ACCOUNT_MINT_OFFSET) 40 | } 41 | 42 | // Call after account length has already been verified 43 | fn unpack_pubkey_unchecked(account_data: &[u8], offset: usize) -> &Pubkey { 44 | bytemuck::from_bytes(&account_data[offset..offset + PUBKEY_BYTES]) 45 | } 46 | 47 | fn unpack_account_owner(account_data: &[u8]) -> Option<&Pubkey> { 48 | if Self::valid_account_data(account_data) { 49 | Some(Self::unpack_account_owner_unchecked(account_data)) 50 | } else { 51 | None 52 | } 53 | } 54 | 55 | fn unpack_account_mint(account_data: &[u8]) -> Option<&Pubkey> { 56 | if Self::valid_account_data(account_data) { 57 | Some(Self::unpack_account_mint_unchecked(account_data)) 58 | } else { 59 | None 60 | } 61 | } 62 | } 63 | 64 | pub struct Account; 65 | impl Account { 66 | pub fn get_packed_len() -> usize { 67 | SPL_TOKEN_ACCOUNT_LENGTH 68 | } 69 | } 70 | 71 | impl GenericTokenAccount for Account { 72 | fn valid_account_data(account_data: &[u8]) -> bool { 73 | account_data.len() == SPL_TOKEN_ACCOUNT_LENGTH 74 | } 75 | } 76 | 77 | pub mod native_mint { 78 | solana_sdk::declare_id!("So11111111111111111111111111111111111111112"); 79 | 80 | /* 81 | Mint { 82 | mint_authority: COption::None, 83 | supply: 0, 84 | decimals: 9, 85 | is_initialized: true, 86 | freeze_authority: COption::None, 87 | } 88 | */ 89 | pub const ACCOUNT_DATA: [u8; 82] = [ 90 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 91 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 92 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 93 | ]; 94 | } 95 | -------------------------------------------------------------------------------- /src/inline_spl_token_2022.rs: -------------------------------------------------------------------------------- 1 | /// Partial SPL Token declarations inlined to avoid an external dependency on the spl-token-2022 crate 2 | /// Copied from solana-runtime 3 | use crate::inline_spl_token::{self, GenericTokenAccount}; 4 | 5 | solana_sdk::declare_id!("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); 6 | 7 | // `spl_token_program_2022::extension::AccountType::Account` ordinal value 8 | const ACCOUNTTYPE_ACCOUNT: u8 = 2; 9 | 10 | pub struct Account; 11 | impl GenericTokenAccount for Account { 12 | fn valid_account_data(account_data: &[u8]) -> bool { 13 | inline_spl_token::Account::valid_account_data(account_data) 14 | || ACCOUNTTYPE_ACCOUNT 15 | == *account_data 16 | .get(inline_spl_token::Account::get_packed_len()) 17 | .unwrap_or(&0) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod accounts_selector; 2 | pub mod geyser_plugin_postgres; 3 | pub mod inline_spl_token; 4 | pub mod inline_spl_token_2022; 5 | pub mod postgres_client; 6 | pub mod transaction_selector; 7 | -------------------------------------------------------------------------------- /src/postgres_client.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::arithmetic_side_effects)] 2 | 3 | mod postgres_client_account_index; 4 | mod postgres_client_block_metadata; 5 | mod postgres_client_transaction; 6 | 7 | /// A concurrent implementation for writing accounts into the PostgreSQL in parallel. 8 | use { 9 | crate::{ 10 | geyser_plugin_postgres::{GeyserPluginPostgresConfig, GeyserPluginPostgresError}, 11 | postgres_client::postgres_client_account_index::TokenSecondaryIndexEntry, 12 | }, 13 | chrono::Utc, 14 | crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender}, 15 | log::*, 16 | openssl::ssl::{SslConnector, SslFiletype, SslMethod}, 17 | postgres::{Client, NoTls, Statement}, 18 | postgres_client_block_metadata::DbBlockInfo, 19 | postgres_client_transaction::LogTransactionRequest, 20 | postgres_openssl::MakeTlsConnector, 21 | solana_geyser_plugin_interface::geyser_plugin_interface::{ 22 | GeyserPluginError, ReplicaAccountInfoV3, ReplicaBlockInfoV3, SlotStatus, 23 | }, 24 | solana_measure::measure::Measure, 25 | solana_metrics::*, 26 | solana_sdk::timing::AtomicInterval, 27 | std::{ 28 | collections::HashSet, 29 | sync::{ 30 | atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, 31 | Arc, Mutex, 32 | }, 33 | thread::{self, sleep, Builder, JoinHandle}, 34 | time::Duration, 35 | }, 36 | tokio_postgres::types, 37 | }; 38 | 39 | /// The maximum asynchronous requests allowed in the channel to avoid excessive 40 | /// memory usage. The downside -- calls after this threshold is reached can get blocked. 41 | const MAX_ASYNC_REQUESTS: usize = 40960; 42 | const SAFE_BATCH_STARTING_SLOT_CUSHION: u64 = 2 * 40960; 43 | const DEFAULT_POSTGRES_PORT: u16 = 5432; 44 | const DEFAULT_THREADS_COUNT: usize = 100; 45 | const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10; 46 | const ACCOUNT_COLUMN_COUNT: usize = 10; 47 | const DEFAULT_PANIC_ON_DB_ERROR: bool = false; 48 | const DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA: bool = false; 49 | 50 | struct PostgresSqlClientWrapper { 51 | client: Client, 52 | update_account_stmt: Statement, 53 | bulk_account_insert_stmt: Statement, 54 | update_slot_with_parent_stmt: Statement, 55 | update_slot_without_parent_stmt: Statement, 56 | update_transaction_log_stmt: Statement, 57 | update_block_metadata_stmt: Statement, 58 | insert_account_audit_stmt: Option, 59 | insert_token_owner_index_stmt: Option, 60 | insert_token_mint_index_stmt: Option, 61 | bulk_insert_token_owner_index_stmt: Option, 62 | bulk_insert_token_mint_index_stmt: Option, 63 | } 64 | 65 | pub struct SimplePostgresClient { 66 | batch_size: usize, 67 | slots_at_startup: HashSet, 68 | pending_account_updates: Vec, 69 | index_token_owner: bool, 70 | index_token_mint: bool, 71 | pending_token_owner_index: Vec, 72 | pending_token_mint_index: Vec, 73 | client: Mutex, 74 | } 75 | 76 | struct PostgresClientWorker { 77 | client: SimplePostgresClient, 78 | /// Indicating if accounts notification during startup is done. 79 | is_startup_done: bool, 80 | } 81 | 82 | impl Eq for DbAccountInfo {} 83 | 84 | #[derive(Clone, PartialEq, Debug)] 85 | pub struct DbAccountInfo { 86 | pub pubkey: Vec, 87 | pub lamports: i64, 88 | pub owner: Vec, 89 | pub executable: bool, 90 | pub rent_epoch: i64, 91 | pub data: Vec, 92 | pub slot: i64, 93 | pub write_version: i64, 94 | pub txn_signature: Option>, 95 | } 96 | 97 | pub(crate) fn abort() -> ! { 98 | #[cfg(not(test))] 99 | { 100 | // standard error is usually redirected to a log file, cry for help on standard output as 101 | // well 102 | eprintln!("Validator process aborted. The validator log may contain further details"); 103 | std::process::exit(1); 104 | } 105 | 106 | #[cfg(test)] 107 | panic!("process::exit(1) is intercepted for friendly test failure..."); 108 | } 109 | 110 | impl DbAccountInfo { 111 | fn new(account: &T, slot: u64) -> DbAccountInfo { 112 | let data = account.data().to_vec(); 113 | Self { 114 | pubkey: account.pubkey().to_vec(), 115 | lamports: account.lamports(), 116 | owner: account.owner().to_vec(), 117 | executable: account.executable(), 118 | rent_epoch: account.rent_epoch(), 119 | data, 120 | slot: slot as i64, 121 | write_version: account.write_version(), 122 | txn_signature: account.txn_signature().map(|v| v.to_vec()), 123 | } 124 | } 125 | } 126 | 127 | pub trait ReadableAccountInfo: Sized { 128 | fn pubkey(&self) -> &[u8]; 129 | fn owner(&self) -> &[u8]; 130 | fn lamports(&self) -> i64; 131 | fn executable(&self) -> bool; 132 | fn rent_epoch(&self) -> i64; 133 | fn data(&self) -> &[u8]; 134 | fn write_version(&self) -> i64; 135 | fn txn_signature(&self) -> Option<&[u8]>; 136 | } 137 | 138 | impl ReadableAccountInfo for DbAccountInfo { 139 | fn pubkey(&self) -> &[u8] { 140 | &self.pubkey 141 | } 142 | 143 | fn owner(&self) -> &[u8] { 144 | &self.owner 145 | } 146 | 147 | fn lamports(&self) -> i64 { 148 | self.lamports 149 | } 150 | 151 | fn executable(&self) -> bool { 152 | self.executable 153 | } 154 | 155 | fn rent_epoch(&self) -> i64 { 156 | self.rent_epoch 157 | } 158 | 159 | fn data(&self) -> &[u8] { 160 | &self.data 161 | } 162 | 163 | fn write_version(&self) -> i64 { 164 | self.write_version 165 | } 166 | 167 | fn txn_signature(&self) -> Option<&[u8]> { 168 | self.txn_signature.as_deref() 169 | } 170 | } 171 | 172 | impl<'a> ReadableAccountInfo for ReplicaAccountInfoV3<'a> { 173 | fn pubkey(&self) -> &[u8] { 174 | self.pubkey 175 | } 176 | 177 | fn owner(&self) -> &[u8] { 178 | self.owner 179 | } 180 | 181 | fn lamports(&self) -> i64 { 182 | self.lamports as i64 183 | } 184 | 185 | fn executable(&self) -> bool { 186 | self.executable 187 | } 188 | 189 | fn rent_epoch(&self) -> i64 { 190 | self.rent_epoch as i64 191 | } 192 | 193 | fn data(&self) -> &[u8] { 194 | self.data 195 | } 196 | 197 | fn write_version(&self) -> i64 { 198 | self.write_version as i64 199 | } 200 | 201 | fn txn_signature(&self) -> Option<&[u8]> { 202 | self.txn.map(|v| v.signature().as_ref()) 203 | } 204 | } 205 | 206 | pub trait PostgresClient { 207 | fn join(&mut self) -> thread::Result<()> { 208 | Ok(()) 209 | } 210 | 211 | fn update_account( 212 | &mut self, 213 | account: DbAccountInfo, 214 | is_startup: bool, 215 | ) -> Result<(), GeyserPluginError>; 216 | 217 | fn update_slot_status( 218 | &mut self, 219 | slot: u64, 220 | parent: Option, 221 | status: SlotStatus, 222 | ) -> Result<(), GeyserPluginError>; 223 | 224 | fn notify_end_of_startup(&mut self) -> Result<(), GeyserPluginError>; 225 | 226 | fn log_transaction( 227 | &mut self, 228 | transaction_log_info: LogTransactionRequest, 229 | ) -> Result<(), GeyserPluginError>; 230 | 231 | fn update_block_metadata( 232 | &mut self, 233 | block_info: UpdateBlockMetadataRequest, 234 | ) -> Result<(), GeyserPluginError>; 235 | } 236 | 237 | impl SimplePostgresClient { 238 | pub fn connect_to_db(config: &GeyserPluginPostgresConfig) -> Result { 239 | let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT); 240 | 241 | let connection_str = if let Some(connection_str) = &config.connection_str { 242 | connection_str.clone() 243 | } else { 244 | if config.host.is_none() || config.user.is_none() { 245 | let msg = format!( 246 | "\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified", 247 | config.connection_str, config.host, config.user 248 | ); 249 | return Err(GeyserPluginError::Custom(Box::new( 250 | GeyserPluginPostgresError::ConfigurationError { msg }, 251 | ))); 252 | } 253 | format!( 254 | "host={} user={} port={}", 255 | config.host.as_ref().unwrap(), 256 | config.user.as_ref().unwrap(), 257 | port 258 | ) 259 | }; 260 | 261 | let result = if let Some(true) = config.use_ssl { 262 | if config.server_ca.is_none() { 263 | let msg = "\"server_ca\" must be specified when \"use_ssl\" is set".to_string(); 264 | return Err(GeyserPluginError::Custom(Box::new( 265 | GeyserPluginPostgresError::ConfigurationError { msg }, 266 | ))); 267 | } 268 | if config.client_cert.is_none() { 269 | let msg = "\"client_cert\" must be specified when \"use_ssl\" is set".to_string(); 270 | return Err(GeyserPluginError::Custom(Box::new( 271 | GeyserPluginPostgresError::ConfigurationError { msg }, 272 | ))); 273 | } 274 | if config.client_key.is_none() { 275 | let msg = "\"client_key\" must be specified when \"use_ssl\" is set".to_string(); 276 | return Err(GeyserPluginError::Custom(Box::new( 277 | GeyserPluginPostgresError::ConfigurationError { msg }, 278 | ))); 279 | } 280 | let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); 281 | if let Err(err) = builder.set_ca_file(config.server_ca.as_ref().unwrap()) { 282 | let msg = format!( 283 | "Failed to set the server certificate specified by \"server_ca\": {}. Error: ({})", 284 | config.server_ca.as_ref().unwrap(), err); 285 | return Err(GeyserPluginError::Custom(Box::new( 286 | GeyserPluginPostgresError::ConfigurationError { msg }, 287 | ))); 288 | } 289 | if let Err(err) = 290 | builder.set_certificate_file(config.client_cert.as_ref().unwrap(), SslFiletype::PEM) 291 | { 292 | let msg = format!( 293 | "Failed to set the client certificate specified by \"client_cert\": {}. Error: ({})", 294 | config.client_cert.as_ref().unwrap(), err); 295 | return Err(GeyserPluginError::Custom(Box::new( 296 | GeyserPluginPostgresError::ConfigurationError { msg }, 297 | ))); 298 | } 299 | if let Err(err) = 300 | builder.set_private_key_file(config.client_key.as_ref().unwrap(), SslFiletype::PEM) 301 | { 302 | let msg = format!( 303 | "Failed to set the client key specified by \"client_key\": {}. Error: ({})", 304 | config.client_key.as_ref().unwrap(), 305 | err 306 | ); 307 | return Err(GeyserPluginError::Custom(Box::new( 308 | GeyserPluginPostgresError::ConfigurationError { msg }, 309 | ))); 310 | } 311 | 312 | let mut connector = MakeTlsConnector::new(builder.build()); 313 | connector.set_callback(|connect_config, _domain| { 314 | connect_config.set_verify_hostname(false); 315 | Ok(()) 316 | }); 317 | Client::connect(&connection_str, connector) 318 | } else { 319 | Client::connect(&connection_str, NoTls) 320 | }; 321 | 322 | match result { 323 | Err(err) => { 324 | let msg = format!( 325 | "Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}", 326 | err, connection_str 327 | ); 328 | error!("{}", msg); 329 | Err(GeyserPluginError::Custom(Box::new( 330 | GeyserPluginPostgresError::DataStoreConnectionError { msg }, 331 | ))) 332 | } 333 | Ok(client) => Ok(client), 334 | } 335 | } 336 | 337 | fn build_bulk_account_insert_statement( 338 | client: &mut Client, 339 | config: &GeyserPluginPostgresConfig, 340 | ) -> Result { 341 | let batch_size = config 342 | .batch_size 343 | .unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE); 344 | let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on, txn_signature) VALUES"); 345 | for j in 0..batch_size { 346 | let row = j * ACCOUNT_COLUMN_COUNT; 347 | let val_str = format!( 348 | "(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})", 349 | row + 1, 350 | row + 2, 351 | row + 3, 352 | row + 4, 353 | row + 5, 354 | row + 6, 355 | row + 7, 356 | row + 8, 357 | row + 9, 358 | row + 10, 359 | ); 360 | 361 | if j == 0 { 362 | stmt = format!("{} {}", &stmt, val_str); 363 | } else { 364 | stmt = format!("{}, {}", &stmt, val_str); 365 | } 366 | } 367 | 368 | let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \ 369 | data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on, txn_signature=excluded.txn_signature WHERE acct.slot < excluded.slot OR (\ 370 | acct.slot = excluded.slot AND acct.write_version < excluded.write_version)"; 371 | 372 | stmt = format!("{} {}", stmt, handle_conflict); 373 | 374 | info!("{}", stmt); 375 | let bulk_stmt = client.prepare(&stmt); 376 | 377 | match bulk_stmt { 378 | Err(err) => { 379 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 380 | msg: format!( 381 | "Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", 382 | err, config.host, config.user, config 383 | ), 384 | }))) 385 | } 386 | Ok(update_account_stmt) => Ok(update_account_stmt), 387 | } 388 | } 389 | 390 | fn build_single_account_upsert_statement( 391 | client: &mut Client, 392 | config: &GeyserPluginPostgresConfig, 393 | ) -> Result { 394 | let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on, txn_signature) \ 395 | VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) \ 396 | ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \ 397 | data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on, txn_signature=excluded.txn_signature WHERE acct.slot < excluded.slot OR (\ 398 | acct.slot = excluded.slot AND acct.write_version < excluded.write_version)"; 399 | 400 | let stmt = client.prepare(stmt); 401 | 402 | match stmt { 403 | Err(err) => { 404 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 405 | msg: format!( 406 | "Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", 407 | err, config.host, config.user, config 408 | ), 409 | }))) 410 | } 411 | Ok(update_account_stmt) => Ok(update_account_stmt), 412 | } 413 | } 414 | 415 | fn prepare_query_statement( 416 | client: &mut Client, 417 | config: &GeyserPluginPostgresConfig, 418 | stmt: &str, 419 | ) -> Result { 420 | let statement = client.prepare(stmt); 421 | 422 | match statement { 423 | Err(err) => { 424 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 425 | msg: format!( 426 | "Error in preparing for the statement {} for PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", 427 | stmt, err, config.host, config.user, config 428 | ), 429 | }))) 430 | } 431 | Ok(statement) => Ok(statement), 432 | } 433 | } 434 | 435 | fn build_account_audit_insert_statement( 436 | client: &mut Client, 437 | config: &GeyserPluginPostgresConfig, 438 | ) -> Result { 439 | let stmt = "INSERT INTO account_audit (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on, txn_signature) \ 440 | VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)"; 441 | 442 | let stmt = client.prepare(stmt); 443 | 444 | match stmt { 445 | Err(err) => { 446 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 447 | msg: format!( 448 | "Error in preparing for the account_audit update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", 449 | err, config.host, config.user, config 450 | ), 451 | }))) 452 | } 453 | Ok(stmt) => Ok(stmt), 454 | } 455 | } 456 | 457 | fn build_slot_upsert_statement_with_parent( 458 | client: &mut Client, 459 | config: &GeyserPluginPostgresConfig, 460 | ) -> Result { 461 | let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \ 462 | VALUES ($1, $2, $3, $4) \ 463 | ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on"; 464 | 465 | let stmt = client.prepare(stmt); 466 | 467 | match stmt { 468 | Err(err) => { 469 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 470 | msg: format!( 471 | "Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", 472 | err, config.host, config.user, config 473 | ), 474 | }))) 475 | } 476 | Ok(stmt) => Ok(stmt), 477 | } 478 | } 479 | 480 | fn build_slot_upsert_statement_without_parent( 481 | client: &mut Client, 482 | config: &GeyserPluginPostgresConfig, 483 | ) -> Result { 484 | let stmt = "INSERT INTO slot (slot, status, updated_on) \ 485 | VALUES ($1, $2, $3) \ 486 | ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on"; 487 | 488 | let stmt = client.prepare(stmt); 489 | 490 | match stmt { 491 | Err(err) => { 492 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 493 | msg: format!( 494 | "Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", 495 | err, config.host, config.user, config 496 | ), 497 | }))) 498 | } 499 | Ok(stmt) => Ok(stmt), 500 | } 501 | } 502 | 503 | /// Internal function for inserting an account into account_audit table. 504 | fn insert_account_audit( 505 | account: &DbAccountInfo, 506 | statement: &Statement, 507 | client: &mut Client, 508 | ) -> Result<(), GeyserPluginError> { 509 | let lamports = account.lamports(); 510 | let rent_epoch = account.rent_epoch(); 511 | let updated_on = Utc::now().naive_utc(); 512 | let result = client.execute( 513 | statement, 514 | &[ 515 | &account.pubkey(), 516 | &account.slot, 517 | &account.owner(), 518 | &lamports, 519 | &account.executable(), 520 | &rent_epoch, 521 | &account.data(), 522 | &account.write_version(), 523 | &updated_on, 524 | &account.txn_signature(), 525 | ], 526 | ); 527 | 528 | if let Err(err) = result { 529 | let msg = format!( 530 | "Failed to persist the insert of account_audit to the PostgreSQL database. Error: {:?}", 531 | err 532 | ); 533 | error!("{}", msg); 534 | return Err(GeyserPluginError::AccountsUpdateError { msg }); 535 | } 536 | Ok(()) 537 | } 538 | 539 | /// Internal function for updating or inserting a single account 540 | fn upsert_account_internal( 541 | account: &DbAccountInfo, 542 | statement: &Statement, 543 | client: &mut Client, 544 | insert_account_audit_stmt: &Option, 545 | insert_token_owner_index_stmt: &Option, 546 | insert_token_mint_index_stmt: &Option, 547 | ) -> Result<(), GeyserPluginError> { 548 | let lamports = account.lamports(); 549 | let rent_epoch = account.rent_epoch(); 550 | let updated_on = Utc::now().naive_utc(); 551 | let result = client.execute( 552 | statement, 553 | &[ 554 | &account.pubkey(), 555 | &account.slot, 556 | &account.owner(), 557 | &lamports, 558 | &account.executable(), 559 | &rent_epoch, 560 | &account.data(), 561 | &account.write_version(), 562 | &updated_on, 563 | &account.txn_signature(), 564 | ], 565 | ); 566 | 567 | if let Err(err) = result { 568 | let msg = format!( 569 | "Failed to persist the update of account to the PostgreSQL database. Error: {:?}", 570 | err 571 | ); 572 | error!("{}", msg); 573 | return Err(GeyserPluginError::AccountsUpdateError { msg }); 574 | } else if result.unwrap() == 0 && insert_account_audit_stmt.is_some() { 575 | // If no records modified (inserted or updated), it is because the account is updated 576 | // at an older slot, insert the record directly into the account_audit table. 577 | let statement = insert_account_audit_stmt.as_ref().unwrap(); 578 | Self::insert_account_audit(account, statement, client)?; 579 | } 580 | 581 | if let Some(insert_token_owner_index_stmt) = insert_token_owner_index_stmt { 582 | Self::update_token_owner_index(client, insert_token_owner_index_stmt, account)?; 583 | } 584 | 585 | if let Some(insert_token_mint_index_stmt) = insert_token_mint_index_stmt { 586 | Self::update_token_mint_index(client, insert_token_mint_index_stmt, account)?; 587 | } 588 | 589 | Ok(()) 590 | } 591 | 592 | /// Update or insert a single account 593 | fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), GeyserPluginError> { 594 | let client = self.client.get_mut().unwrap(); 595 | let insert_account_audit_stmt = &client.insert_account_audit_stmt; 596 | let statement = &client.update_account_stmt; 597 | let insert_token_owner_index_stmt = &client.insert_token_owner_index_stmt; 598 | let insert_token_mint_index_stmt = &client.insert_token_mint_index_stmt; 599 | let client = &mut client.client; 600 | Self::upsert_account_internal( 601 | account, 602 | statement, 603 | client, 604 | insert_account_audit_stmt, 605 | insert_token_owner_index_stmt, 606 | insert_token_mint_index_stmt, 607 | )?; 608 | 609 | Ok(()) 610 | } 611 | 612 | /// Insert accounts in batch to reduce network overhead 613 | fn insert_accounts_in_batch( 614 | &mut self, 615 | account: DbAccountInfo, 616 | ) -> Result<(), GeyserPluginError> { 617 | self.queue_secondary_indexes(&account); 618 | self.pending_account_updates.push(account); 619 | 620 | self.bulk_insert_accounts()?; 621 | self.bulk_insert_token_owner_index()?; 622 | self.bulk_insert_token_mint_index() 623 | } 624 | 625 | fn bulk_insert_accounts(&mut self) -> Result<(), GeyserPluginError> { 626 | if self.pending_account_updates.len() == self.batch_size { 627 | let mut measure = Measure::start("geyser-plugin-postgres-prepare-values"); 628 | 629 | let mut values: Vec<&(dyn types::ToSql + Sync)> = 630 | Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT); 631 | let updated_on = Utc::now().naive_utc(); 632 | for j in 0..self.batch_size { 633 | let account = &self.pending_account_updates[j]; 634 | 635 | values.push(&account.pubkey); 636 | values.push(&account.slot); 637 | values.push(&account.owner); 638 | values.push(&account.lamports); 639 | values.push(&account.executable); 640 | values.push(&account.rent_epoch); 641 | values.push(&account.data); 642 | values.push(&account.write_version); 643 | values.push(&updated_on); 644 | values.push(&account.txn_signature); 645 | } 646 | measure.stop(); 647 | inc_new_counter_debug!( 648 | "geyser-plugin-postgres-prepare-values-us", 649 | measure.as_us() as usize, 650 | 10000, 651 | 10000 652 | ); 653 | 654 | let mut measure = Measure::start("geyser-plugin-postgres-update-account"); 655 | let client = self.client.get_mut().unwrap(); 656 | let result = client 657 | .client 658 | .query(&client.bulk_account_insert_stmt, &values); 659 | 660 | self.pending_account_updates.clear(); 661 | 662 | if let Err(err) = result { 663 | let msg = format!( 664 | "Failed to persist the update of account to the PostgreSQL database. Error: {:?}", 665 | err 666 | ); 667 | error!("{}", msg); 668 | return Err(GeyserPluginError::AccountsUpdateError { msg }); 669 | } 670 | 671 | measure.stop(); 672 | inc_new_counter_debug!( 673 | "geyser-plugin-postgres-update-account-us", 674 | measure.as_us() as usize, 675 | 10000, 676 | 10000 677 | ); 678 | inc_new_counter_debug!( 679 | "geyser-plugin-postgres-update-account-count", 680 | self.batch_size, 681 | 10000, 682 | 10000 683 | ); 684 | } 685 | Ok(()) 686 | } 687 | 688 | /// Flush any left over accounts in batch which are not processed in the last batch 689 | fn flush_buffered_writes(&mut self) -> Result<(), GeyserPluginError> { 690 | let client = self.client.get_mut().unwrap(); 691 | let insert_account_audit_stmt = &client.insert_account_audit_stmt; 692 | let statement = &client.update_account_stmt; 693 | let insert_token_owner_index_stmt = &client.insert_token_owner_index_stmt; 694 | let insert_token_mint_index_stmt = &client.insert_token_mint_index_stmt; 695 | let insert_slot_stmt = &client.update_slot_without_parent_stmt; 696 | let client = &mut client.client; 697 | 698 | for account in self.pending_account_updates.drain(..) { 699 | Self::upsert_account_internal( 700 | &account, 701 | statement, 702 | client, 703 | insert_account_audit_stmt, 704 | insert_token_owner_index_stmt, 705 | insert_token_mint_index_stmt, 706 | )?; 707 | } 708 | 709 | let mut measure = Measure::start("geyser-plugin-postgres-flush-slots-us"); 710 | 711 | for slot in &self.slots_at_startup { 712 | Self::upsert_slot_status_internal( 713 | *slot, 714 | None, 715 | SlotStatus::Rooted, 716 | client, 717 | insert_slot_stmt, 718 | )?; 719 | } 720 | measure.stop(); 721 | 722 | datapoint_info!( 723 | "geyser_plugin_notify_account_restore_from_snapshot_summary", 724 | ("flush_slots-us", measure.as_us(), i64), 725 | ("flush-slots-counts", self.slots_at_startup.len(), i64), 726 | ); 727 | 728 | self.slots_at_startup.clear(); 729 | self.clear_buffered_indexes(); 730 | Ok(()) 731 | } 732 | 733 | fn upsert_slot_status_internal( 734 | slot: u64, 735 | parent: Option, 736 | status: SlotStatus, 737 | client: &mut Client, 738 | statement: &Statement, 739 | ) -> Result<(), GeyserPluginError> { 740 | let slot = slot as i64; // postgres only supports i64 741 | let parent = parent.map(|parent| parent as i64); 742 | let updated_on = Utc::now().naive_utc(); 743 | let status_str = status.as_str(); 744 | 745 | let result = match parent { 746 | Some(parent) => client.execute(statement, &[&slot, &parent, &status_str, &updated_on]), 747 | None => client.execute(statement, &[&slot, &status_str, &updated_on]), 748 | }; 749 | 750 | match result { 751 | Err(err) => { 752 | let msg = format!( 753 | "Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", 754 | err 755 | ); 756 | error!("{:?}", msg); 757 | return Err(GeyserPluginError::SlotStatusUpdateError { msg }); 758 | } 759 | Ok(rows) => { 760 | assert_eq!(1, rows, "Expected one rows to be updated a time"); 761 | } 762 | } 763 | 764 | Ok(()) 765 | } 766 | 767 | pub fn new(config: &GeyserPluginPostgresConfig) -> Result { 768 | info!("Creating SimplePostgresClient..."); 769 | let mut client = Self::connect_to_db(config)?; 770 | let bulk_account_insert_stmt = 771 | Self::build_bulk_account_insert_statement(&mut client, config)?; 772 | let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?; 773 | 774 | let update_slot_with_parent_stmt = 775 | Self::build_slot_upsert_statement_with_parent(&mut client, config)?; 776 | let update_slot_without_parent_stmt = 777 | Self::build_slot_upsert_statement_without_parent(&mut client, config)?; 778 | let update_transaction_log_stmt = 779 | Self::build_transaction_info_upsert_statement(&mut client, config)?; 780 | let update_block_metadata_stmt = 781 | Self::build_block_metadata_upsert_statement(&mut client, config)?; 782 | 783 | let batch_size = config 784 | .batch_size 785 | .unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE); 786 | 787 | let store_account_historical_data = config 788 | .store_account_historical_data 789 | .unwrap_or(DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA); 790 | 791 | let insert_account_audit_stmt = if store_account_historical_data { 792 | let stmt = Self::build_account_audit_insert_statement(&mut client, config)?; 793 | Some(stmt) 794 | } else { 795 | None 796 | }; 797 | 798 | let bulk_insert_token_owner_index_stmt = if let Some(true) = config.index_token_owner { 799 | let stmt = Self::build_bulk_token_owner_index_insert_statement(&mut client, config)?; 800 | Some(stmt) 801 | } else { 802 | None 803 | }; 804 | 805 | let bulk_insert_token_mint_index_stmt = if let Some(true) = config.index_token_mint { 806 | let stmt = Self::build_bulk_token_mint_index_insert_statement(&mut client, config)?; 807 | Some(stmt) 808 | } else { 809 | None 810 | }; 811 | 812 | let insert_token_owner_index_stmt = if let Some(true) = config.index_token_owner { 813 | Some(Self::build_single_token_owner_index_upsert_statement( 814 | &mut client, 815 | config, 816 | )?) 817 | } else { 818 | None 819 | }; 820 | 821 | let insert_token_mint_index_stmt = if let Some(true) = config.index_token_mint { 822 | Some(Self::build_single_token_mint_index_upsert_statement( 823 | &mut client, 824 | config, 825 | )?) 826 | } else { 827 | None 828 | }; 829 | 830 | info!("Created SimplePostgresClient."); 831 | Ok(Self { 832 | batch_size, 833 | pending_account_updates: Vec::with_capacity(batch_size), 834 | client: Mutex::new(PostgresSqlClientWrapper { 835 | client, 836 | update_account_stmt, 837 | bulk_account_insert_stmt, 838 | update_slot_with_parent_stmt, 839 | update_slot_without_parent_stmt, 840 | update_transaction_log_stmt, 841 | update_block_metadata_stmt, 842 | insert_account_audit_stmt, 843 | insert_token_owner_index_stmt, 844 | insert_token_mint_index_stmt, 845 | bulk_insert_token_owner_index_stmt, 846 | bulk_insert_token_mint_index_stmt, 847 | }), 848 | index_token_owner: config.index_token_owner.unwrap_or_default(), 849 | index_token_mint: config.index_token_mint.unwrap_or(false), 850 | pending_token_owner_index: Vec::with_capacity(batch_size), 851 | pending_token_mint_index: Vec::with_capacity(batch_size), 852 | slots_at_startup: HashSet::default(), 853 | }) 854 | } 855 | 856 | fn get_highest_available_slot(&mut self) -> Result { 857 | let client = self.client.get_mut().unwrap(); 858 | 859 | let last_slot_query = "SELECT slot FROM slot ORDER BY slot DESC LIMIT 1;"; 860 | 861 | let result = client.client.query_opt(last_slot_query, &[]); 862 | match result { 863 | Ok(opt_slot) => Ok(opt_slot 864 | .map(|row| { 865 | let raw_slot: i64 = row.get(0); 866 | raw_slot as u64 867 | }) 868 | .unwrap_or(0)), 869 | Err(err) => { 870 | let msg = format!( 871 | "Failed to receive last slot from PostgreSQL database. Error: {:?}", 872 | err 873 | ); 874 | error!("{}", msg); 875 | Err(GeyserPluginError::AccountsUpdateError { msg }) 876 | } 877 | } 878 | } 879 | } 880 | 881 | impl PostgresClient for SimplePostgresClient { 882 | fn update_account( 883 | &mut self, 884 | account: DbAccountInfo, 885 | is_startup: bool, 886 | ) -> Result<(), GeyserPluginError> { 887 | trace!( 888 | "Updating account {} with owner {} at slot {}", 889 | bs58::encode(account.pubkey()).into_string(), 890 | bs58::encode(account.owner()).into_string(), 891 | account.slot, 892 | ); 893 | if !is_startup { 894 | return self.upsert_account(&account); 895 | } 896 | 897 | self.slots_at_startup.insert(account.slot as u64); 898 | self.insert_accounts_in_batch(account) 899 | } 900 | 901 | fn update_slot_status( 902 | &mut self, 903 | slot: u64, 904 | parent: Option, 905 | status: SlotStatus, 906 | ) -> Result<(), GeyserPluginError> { 907 | info!("Updating slot {:?} at with status {:?}", slot, status); 908 | 909 | let client = self.client.get_mut().unwrap(); 910 | 911 | let statement = match parent { 912 | Some(_) => &client.update_slot_with_parent_stmt, 913 | None => &client.update_slot_without_parent_stmt, 914 | }; 915 | 916 | Self::upsert_slot_status_internal(slot, parent, status, &mut client.client, statement) 917 | } 918 | 919 | fn notify_end_of_startup(&mut self) -> Result<(), GeyserPluginError> { 920 | self.flush_buffered_writes() 921 | } 922 | 923 | fn log_transaction( 924 | &mut self, 925 | transaction_log_info: LogTransactionRequest, 926 | ) -> Result<(), GeyserPluginError> { 927 | self.log_transaction_impl(transaction_log_info) 928 | } 929 | 930 | fn update_block_metadata( 931 | &mut self, 932 | block_info: UpdateBlockMetadataRequest, 933 | ) -> Result<(), GeyserPluginError> { 934 | self.update_block_metadata_impl(block_info) 935 | } 936 | } 937 | 938 | struct UpdateAccountRequest { 939 | account: DbAccountInfo, 940 | is_startup: bool, 941 | } 942 | 943 | struct UpdateSlotRequest { 944 | slot: u64, 945 | parent: Option, 946 | slot_status: SlotStatus, 947 | } 948 | 949 | pub struct UpdateBlockMetadataRequest { 950 | pub block_info: DbBlockInfo, 951 | } 952 | 953 | #[warn(clippy::large_enum_variant)] 954 | enum DbWorkItem { 955 | UpdateAccount(Box), 956 | UpdateSlot(Box), 957 | LogTransaction(Box), 958 | UpdateBlockMetadata(Box), 959 | } 960 | 961 | impl PostgresClientWorker { 962 | fn new(config: GeyserPluginPostgresConfig) -> Result { 963 | let result = SimplePostgresClient::new(&config); 964 | match result { 965 | Ok(client) => Ok(PostgresClientWorker { 966 | client, 967 | is_startup_done: false, 968 | }), 969 | Err(err) => { 970 | error!("Error in creating SimplePostgresClient: {}", err); 971 | Err(err) 972 | } 973 | } 974 | } 975 | 976 | fn do_work( 977 | &mut self, 978 | receiver: Receiver, 979 | exit_worker: Arc, 980 | is_startup_done: Arc, 981 | startup_done_count: Arc, 982 | panic_on_db_errors: bool, 983 | ) -> Result<(), GeyserPluginError> { 984 | while !exit_worker.load(Ordering::Relaxed) { 985 | let mut measure = Measure::start("geyser-plugin-postgres-worker-recv"); 986 | let work = receiver.recv_timeout(Duration::from_millis(500)); 987 | measure.stop(); 988 | inc_new_counter_debug!( 989 | "geyser-plugin-postgres-worker-recv-us", 990 | measure.as_us() as usize, 991 | 100000, 992 | 100000 993 | ); 994 | match work { 995 | Ok(work) => match work { 996 | DbWorkItem::UpdateAccount(request) => { 997 | if let Err(err) = self 998 | .client 999 | .update_account(request.account, request.is_startup) 1000 | { 1001 | error!("Failed to update account: ({})", err); 1002 | if panic_on_db_errors { 1003 | abort(); 1004 | } 1005 | } 1006 | } 1007 | DbWorkItem::UpdateSlot(request) => { 1008 | if let Err(err) = self.client.update_slot_status( 1009 | request.slot, 1010 | request.parent, 1011 | request.slot_status, 1012 | ) { 1013 | error!("Failed to update slot: ({})", err); 1014 | if panic_on_db_errors { 1015 | abort(); 1016 | } 1017 | } 1018 | } 1019 | DbWorkItem::LogTransaction(transaction_log_info) => { 1020 | if let Err(err) = self.client.log_transaction(*transaction_log_info) { 1021 | error!("Failed to update transaction: ({})", err); 1022 | if panic_on_db_errors { 1023 | abort(); 1024 | } 1025 | } 1026 | } 1027 | DbWorkItem::UpdateBlockMetadata(block_info) => { 1028 | if let Err(err) = self.client.update_block_metadata(*block_info) { 1029 | error!("Failed to update block metadata: ({})", err); 1030 | if panic_on_db_errors { 1031 | abort(); 1032 | } 1033 | } 1034 | } 1035 | }, 1036 | Err(err) => match err { 1037 | RecvTimeoutError::Timeout => { 1038 | if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) { 1039 | if let Err(err) = self.client.notify_end_of_startup() { 1040 | error!("Error in notifying end of startup: ({})", err); 1041 | if panic_on_db_errors { 1042 | abort(); 1043 | } 1044 | } 1045 | self.is_startup_done = true; 1046 | startup_done_count.fetch_add(1, Ordering::Relaxed); 1047 | } 1048 | 1049 | continue; 1050 | } 1051 | _ => { 1052 | error!("Error in receiving the item {:?}", err); 1053 | if panic_on_db_errors { 1054 | abort(); 1055 | } 1056 | break; 1057 | } 1058 | }, 1059 | } 1060 | } 1061 | Ok(()) 1062 | } 1063 | } 1064 | pub struct ParallelPostgresClient { 1065 | workers: Vec>>, 1066 | exit_worker: Arc, 1067 | is_startup_done: Arc, 1068 | startup_done_count: Arc, 1069 | initialized_worker_count: Arc, 1070 | sender: Sender, 1071 | last_report: AtomicInterval, 1072 | transaction_write_version: AtomicU64, 1073 | } 1074 | 1075 | impl ParallelPostgresClient { 1076 | pub fn new(config: &GeyserPluginPostgresConfig) -> Result { 1077 | info!("Creating ParallelPostgresClient..."); 1078 | let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS); 1079 | let exit_worker = Arc::new(AtomicBool::new(false)); 1080 | let mut workers = Vec::default(); 1081 | let is_startup_done = Arc::new(AtomicBool::new(false)); 1082 | let startup_done_count = Arc::new(AtomicUsize::new(0)); 1083 | let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT); 1084 | let initialized_worker_count = Arc::new(AtomicUsize::new(0)); 1085 | for i in 0..worker_count { 1086 | let cloned_receiver = receiver.clone(); 1087 | let exit_clone = exit_worker.clone(); 1088 | let is_startup_done_clone = is_startup_done.clone(); 1089 | let startup_done_count_clone = startup_done_count.clone(); 1090 | let initialized_worker_count_clone = initialized_worker_count.clone(); 1091 | let config = config.clone(); 1092 | let worker = Builder::new() 1093 | .name(format!("worker-{}", i)) 1094 | .spawn(move || -> Result<(), GeyserPluginError> { 1095 | let panic_on_db_errors = *config 1096 | .panic_on_db_errors 1097 | .as_ref() 1098 | .unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR); 1099 | let result = PostgresClientWorker::new(config); 1100 | 1101 | match result { 1102 | Ok(mut worker) => { 1103 | initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed); 1104 | worker.do_work( 1105 | cloned_receiver, 1106 | exit_clone, 1107 | is_startup_done_clone, 1108 | startup_done_count_clone, 1109 | panic_on_db_errors, 1110 | )?; 1111 | Ok(()) 1112 | } 1113 | Err(err) => { 1114 | error!("Error when making connection to database: ({})", err); 1115 | if panic_on_db_errors { 1116 | abort(); 1117 | } 1118 | Err(err) 1119 | } 1120 | } 1121 | }) 1122 | .unwrap(); 1123 | 1124 | workers.push(worker); 1125 | } 1126 | 1127 | info!("Created ParallelPostgresClient."); 1128 | Ok(Self { 1129 | last_report: AtomicInterval::default(), 1130 | workers, 1131 | exit_worker, 1132 | is_startup_done, 1133 | startup_done_count, 1134 | initialized_worker_count, 1135 | sender, 1136 | transaction_write_version: AtomicU64::default(), 1137 | }) 1138 | } 1139 | 1140 | pub fn join(&mut self) -> thread::Result<()> { 1141 | self.exit_worker.store(true, Ordering::Relaxed); 1142 | while !self.workers.is_empty() { 1143 | let worker = self.workers.pop(); 1144 | if worker.is_none() { 1145 | break; 1146 | } 1147 | let worker = worker.unwrap(); 1148 | let result = worker.join().unwrap(); 1149 | if result.is_err() { 1150 | error!("The worker thread has failed: {:?}", result); 1151 | } 1152 | } 1153 | 1154 | Ok(()) 1155 | } 1156 | 1157 | pub fn update_account( 1158 | &self, 1159 | account: &ReplicaAccountInfoV3, 1160 | slot: u64, 1161 | is_startup: bool, 1162 | ) -> Result<(), GeyserPluginError> { 1163 | if !is_startup && account.txn.is_none() { 1164 | // we are not interested in accountsdb internal bookeeping updates 1165 | return Ok(()); 1166 | } 1167 | 1168 | if self.last_report.should_update(30000) { 1169 | datapoint_debug!( 1170 | "postgres-plugin-stats", 1171 | ("message-queue-length", self.sender.len() as i64, i64), 1172 | ); 1173 | } 1174 | let mut measure = Measure::start("geyser-plugin-posgres-create-work-item"); 1175 | let wrk_item = DbWorkItem::UpdateAccount(Box::new(UpdateAccountRequest { 1176 | account: DbAccountInfo::new(account, slot), 1177 | is_startup, 1178 | })); 1179 | 1180 | measure.stop(); 1181 | 1182 | inc_new_counter_debug!( 1183 | "geyser-plugin-posgres-create-work-item-us", 1184 | measure.as_us() as usize, 1185 | 100000, 1186 | 100000 1187 | ); 1188 | 1189 | let mut measure = Measure::start("geyser-plugin-posgres-send-msg"); 1190 | 1191 | if let Err(err) = self.sender.send(wrk_item) { 1192 | return Err(GeyserPluginError::AccountsUpdateError { 1193 | msg: format!( 1194 | "Failed to update the account {:?}, error: {:?}", 1195 | bs58::encode(account.pubkey()).into_string(), 1196 | err 1197 | ), 1198 | }); 1199 | } 1200 | 1201 | measure.stop(); 1202 | inc_new_counter_debug!( 1203 | "geyser-plugin-posgres-send-msg-us", 1204 | measure.as_us() as usize, 1205 | 100000, 1206 | 100000 1207 | ); 1208 | 1209 | Ok(()) 1210 | } 1211 | 1212 | pub fn update_slot_status( 1213 | &self, 1214 | slot: u64, 1215 | parent: Option, 1216 | status: SlotStatus, 1217 | ) -> Result<(), GeyserPluginError> { 1218 | if let Err(err) = self 1219 | .sender 1220 | .send(DbWorkItem::UpdateSlot(Box::new(UpdateSlotRequest { 1221 | slot, 1222 | parent, 1223 | slot_status: status, 1224 | }))) 1225 | { 1226 | return Err(GeyserPluginError::SlotStatusUpdateError { 1227 | msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err), 1228 | }); 1229 | } 1230 | Ok(()) 1231 | } 1232 | 1233 | pub fn update_block_metadata( 1234 | &self, 1235 | block_info: &ReplicaBlockInfoV3, 1236 | ) -> Result<(), GeyserPluginError> { 1237 | if let Err(err) = self.sender.send(DbWorkItem::UpdateBlockMetadata(Box::new( 1238 | UpdateBlockMetadataRequest { 1239 | block_info: DbBlockInfo::from(block_info), 1240 | }, 1241 | ))) { 1242 | return Err(GeyserPluginError::SlotStatusUpdateError { 1243 | msg: format!( 1244 | "Failed to update the block metadata at slot {:?}, error: {:?}", 1245 | block_info.slot, err 1246 | ), 1247 | }); 1248 | } 1249 | Ok(()) 1250 | } 1251 | 1252 | pub fn notify_end_of_startup(&self) -> Result<(), GeyserPluginError> { 1253 | info!("Notifying the end of startup"); 1254 | // Ensure all items in the queue has been received by the workers 1255 | while !self.sender.is_empty() { 1256 | sleep(Duration::from_millis(100)); 1257 | } 1258 | self.is_startup_done.store(true, Ordering::Relaxed); 1259 | 1260 | // Wait for all worker threads to be done with flushing 1261 | while self.startup_done_count.load(Ordering::Relaxed) 1262 | != self.initialized_worker_count.load(Ordering::Relaxed) 1263 | { 1264 | info!( 1265 | "Startup done count: {}, good worker thread count: {}", 1266 | self.startup_done_count.load(Ordering::Relaxed), 1267 | self.initialized_worker_count.load(Ordering::Relaxed) 1268 | ); 1269 | sleep(Duration::from_millis(100)); 1270 | } 1271 | 1272 | info!("Done with notifying the end of startup"); 1273 | Ok(()) 1274 | } 1275 | } 1276 | 1277 | pub struct PostgresClientBuilder {} 1278 | 1279 | impl PostgresClientBuilder { 1280 | pub fn build_pararallel_postgres_client( 1281 | config: &GeyserPluginPostgresConfig, 1282 | ) -> Result<(ParallelPostgresClient, Option), GeyserPluginError> { 1283 | let batch_optimize_by_skiping_older_slots = 1284 | match config.skip_upsert_existing_accounts_at_startup { 1285 | true => { 1286 | let mut on_load_client = SimplePostgresClient::new(config)?; 1287 | 1288 | // database if populated concurrently so we need to move some number of slots 1289 | // below highest available slot to make sure we do not skip anything that was already in DB. 1290 | let batch_slot_bound = on_load_client 1291 | .get_highest_available_slot()? 1292 | .saturating_sub(SAFE_BATCH_STARTING_SLOT_CUSHION); 1293 | info!( 1294 | "Set batch_optimize_by_skiping_older_slots to {}", 1295 | batch_slot_bound 1296 | ); 1297 | Some(batch_slot_bound) 1298 | } 1299 | false => None, 1300 | }; 1301 | 1302 | ParallelPostgresClient::new(config).map(|v| (v, batch_optimize_by_skiping_older_slots)) 1303 | } 1304 | } 1305 | -------------------------------------------------------------------------------- /src/postgres_client/postgres_client_account_index.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | DbAccountInfo, ReadableAccountInfo, SimplePostgresClient, 4 | DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE, 5 | }, 6 | crate::{ 7 | geyser_plugin_postgres::{GeyserPluginPostgresConfig, GeyserPluginPostgresError}, 8 | inline_spl_token::{self, GenericTokenAccount}, 9 | inline_spl_token_2022, 10 | }, 11 | log::*, 12 | postgres::{Client, Statement}, 13 | solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPluginError, 14 | solana_measure::measure::Measure, 15 | solana_metrics::*, 16 | solana_sdk::pubkey::Pubkey, 17 | tokio_postgres::types, 18 | }; 19 | 20 | const TOKEN_INDEX_COLUMN_COUNT: usize = 3; 21 | /// Struct for the secondary index for both token account's owner and mint index, 22 | pub struct TokenSecondaryIndexEntry { 23 | /// In case of token owner, the secondary key is the Pubkey of the owner and in case of 24 | /// token index the secondary_key is the Pubkey of mint. 25 | secondary_key: Vec, 26 | 27 | /// The Pubkey of the account 28 | account_key: Vec, 29 | 30 | /// Record the slot at which the index entry is created. 31 | slot: i64, 32 | } 33 | 34 | impl SimplePostgresClient { 35 | pub fn build_single_token_owner_index_upsert_statement( 36 | client: &mut Client, 37 | config: &GeyserPluginPostgresConfig, 38 | ) -> Result { 39 | const BULK_OWNER_INDEX_INSERT_STATEMENT: &str = 40 | "INSERT INTO spl_token_owner_index AS owner_index (owner_key, account_key, slot) \ 41 | VALUES ($1, $2, $3) \ 42 | ON CONFLICT (owner_key, account_key) \ 43 | DO UPDATE SET slot=excluded.slot \ 44 | WHERE owner_index.slot < excluded.slot"; 45 | 46 | Self::prepare_query_statement(client, config, BULK_OWNER_INDEX_INSERT_STATEMENT) 47 | } 48 | 49 | pub fn build_single_token_mint_index_upsert_statement( 50 | client: &mut Client, 51 | config: &GeyserPluginPostgresConfig, 52 | ) -> Result { 53 | const BULK_MINT_INDEX_INSERT_STATEMENT: &str = 54 | "INSERT INTO spl_token_mint_index AS mint_index (mint_key, account_key, slot) \ 55 | VALUES ($1, $2, $3) \ 56 | ON CONFLICT (mint_key, account_key) \ 57 | DO UPDATE SET slot=excluded.slot \ 58 | WHERE mint_index.slot < excluded.slot"; 59 | 60 | Self::prepare_query_statement(client, config, BULK_MINT_INDEX_INSERT_STATEMENT) 61 | } 62 | 63 | /// Common build the token mint index bulk insert statement. 64 | pub fn build_bulk_token_index_insert_statement_common( 65 | client: &mut Client, 66 | table: &str, 67 | source_key_name: &str, 68 | config: &GeyserPluginPostgresConfig, 69 | ) -> Result { 70 | let batch_size = config 71 | .batch_size 72 | .unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE); 73 | let mut stmt = format!( 74 | "INSERT INTO {} AS index ({}, account_key, slot) VALUES", 75 | table, source_key_name 76 | ); 77 | for j in 0..batch_size { 78 | let row = j * TOKEN_INDEX_COLUMN_COUNT; 79 | let val_str = format!("(${}, ${}, ${})", row + 1, row + 2, row + 3); 80 | 81 | if j == 0 { 82 | stmt = format!("{} {}", &stmt, val_str); 83 | } else { 84 | stmt = format!("{}, {}", &stmt, val_str); 85 | } 86 | } 87 | 88 | let handle_conflict = format!( 89 | "ON CONFLICT ({}, account_key) DO UPDATE SET slot=excluded.slot where index.slot < excluded.slot", 90 | source_key_name); 91 | 92 | stmt = format!("{} {}", stmt, handle_conflict); 93 | 94 | info!("{}", stmt); 95 | let bulk_stmt = client.prepare(&stmt); 96 | 97 | match bulk_stmt { 98 | Err(err) => { 99 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 100 | msg: format!( 101 | "Error in preparing for the {} index update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", 102 | table, err, config.host, config.user, config 103 | ), 104 | }))) 105 | } 106 | Ok(statement) => Ok(statement), 107 | } 108 | } 109 | 110 | /// Build the token owner index bulk insert statement 111 | pub fn build_bulk_token_owner_index_insert_statement( 112 | client: &mut Client, 113 | config: &GeyserPluginPostgresConfig, 114 | ) -> Result { 115 | Self::build_bulk_token_index_insert_statement_common( 116 | client, 117 | "spl_token_owner_index", 118 | "owner_key", 119 | config, 120 | ) 121 | } 122 | 123 | /// Build the token mint index bulk insert statement. 124 | pub fn build_bulk_token_mint_index_insert_statement( 125 | client: &mut Client, 126 | config: &GeyserPluginPostgresConfig, 127 | ) -> Result { 128 | Self::build_bulk_token_index_insert_statement_common( 129 | client, 130 | "spl_token_mint_index", 131 | "mint_key", 132 | config, 133 | ) 134 | } 135 | 136 | /// Execute the common token bulk insert query. 137 | fn bulk_insert_token_index_common( 138 | batch_size: usize, 139 | client: &mut Client, 140 | index_entries: &mut Vec, 141 | query: &Statement, 142 | ) -> Result<(), GeyserPluginError> { 143 | if index_entries.len() == batch_size { 144 | let mut measure = Measure::start("geyser-plugin-postgres-prepare-index-values"); 145 | 146 | let mut values: Vec<&(dyn types::ToSql + Sync)> = 147 | Vec::with_capacity(batch_size * TOKEN_INDEX_COLUMN_COUNT); 148 | for index in index_entries.iter().take(batch_size) { 149 | values.push(&index.secondary_key); 150 | values.push(&index.account_key); 151 | values.push(&index.slot); 152 | } 153 | measure.stop(); 154 | inc_new_counter_debug!( 155 | "geyser-plugin-postgres-prepare-index-values-us", 156 | measure.as_us() as usize, 157 | 10000, 158 | 10000 159 | ); 160 | 161 | let mut measure = Measure::start("geyser-plugin-postgres-update-index-account"); 162 | let result = client.query(query, &values); 163 | 164 | index_entries.clear(); 165 | 166 | if let Err(err) = result { 167 | let msg = format!( 168 | "Failed to persist the update of account to the PostgreSQL database. Error: {:?}", 169 | err 170 | ); 171 | error!("{}", msg); 172 | return Err(GeyserPluginError::AccountsUpdateError { msg }); 173 | } 174 | 175 | measure.stop(); 176 | inc_new_counter_debug!( 177 | "geyser-plugin-postgres-update-index-us", 178 | measure.as_us() as usize, 179 | 10000, 180 | 10000 181 | ); 182 | inc_new_counter_debug!( 183 | "geyser-plugin-postgres-update-index-count", 184 | batch_size, 185 | 10000, 186 | 10000 187 | ); 188 | } 189 | Ok(()) 190 | } 191 | 192 | /// Execute the token owner bulk insert query. 193 | pub fn bulk_insert_token_owner_index(&mut self) -> Result<(), GeyserPluginError> { 194 | let client = self.client.get_mut().unwrap(); 195 | if client.bulk_insert_token_owner_index_stmt.is_none() { 196 | return Ok(()); 197 | } 198 | let query = client.bulk_insert_token_owner_index_stmt.as_ref().unwrap(); 199 | Self::bulk_insert_token_index_common( 200 | self.batch_size, 201 | &mut client.client, 202 | &mut self.pending_token_owner_index, 203 | query, 204 | ) 205 | } 206 | 207 | /// Execute the token mint index bulk insert query. 208 | pub fn bulk_insert_token_mint_index(&mut self) -> Result<(), GeyserPluginError> { 209 | let client = self.client.get_mut().unwrap(); 210 | if client.bulk_insert_token_mint_index_stmt.is_none() { 211 | return Ok(()); 212 | } 213 | let query = client.bulk_insert_token_mint_index_stmt.as_ref().unwrap(); 214 | Self::bulk_insert_token_index_common( 215 | self.batch_size, 216 | &mut client.client, 217 | &mut self.pending_token_mint_index, 218 | query, 219 | ) 220 | } 221 | 222 | /// Generic function to queue the token owner index for bulk insert. 223 | fn queue_token_owner_index_generic( 224 | &mut self, 225 | token_id: &Pubkey, 226 | account: &DbAccountInfo, 227 | ) { 228 | if account.owner() == token_id.as_ref() { 229 | if let Some(owner_key) = G::unpack_account_owner(account.data()) { 230 | let owner_key = owner_key.as_ref().to_vec(); 231 | let pubkey = account.pubkey(); 232 | self.pending_token_owner_index 233 | .push(TokenSecondaryIndexEntry { 234 | secondary_key: owner_key, 235 | account_key: pubkey.to_vec(), 236 | slot: account.slot, 237 | }); 238 | } 239 | } 240 | } 241 | 242 | /// Generic function to queue the token mint index for bulk insert. 243 | fn queue_token_mint_index_generic( 244 | &mut self, 245 | token_id: &Pubkey, 246 | account: &DbAccountInfo, 247 | ) { 248 | if account.owner() == token_id.as_ref() { 249 | if let Some(mint_key) = G::unpack_account_mint(account.data()) { 250 | let mint_key = mint_key.as_ref().to_vec(); 251 | let pubkey = account.pubkey(); 252 | self.pending_token_mint_index 253 | .push(TokenSecondaryIndexEntry { 254 | secondary_key: mint_key, 255 | account_key: pubkey.to_vec(), 256 | slot: account.slot, 257 | }) 258 | } 259 | } 260 | } 261 | 262 | /// Queue bulk insert secondary indexes: token owner and token mint indexes. 263 | pub fn queue_secondary_indexes(&mut self, account: &DbAccountInfo) { 264 | if self.index_token_owner { 265 | self.queue_token_owner_index_generic::( 266 | &inline_spl_token::id(), 267 | account, 268 | ); 269 | self.queue_token_owner_index_generic::( 270 | &inline_spl_token_2022::id(), 271 | account, 272 | ); 273 | } 274 | 275 | if self.index_token_mint { 276 | self.queue_token_mint_index_generic::( 277 | &inline_spl_token::id(), 278 | account, 279 | ); 280 | self.queue_token_mint_index_generic::( 281 | &inline_spl_token_2022::id(), 282 | account, 283 | ); 284 | } 285 | } 286 | 287 | /// Generic function to update a single token owner index. 288 | fn update_token_owner_index_generic( 289 | client: &mut Client, 290 | statement: &Statement, 291 | token_id: &Pubkey, 292 | account: &DbAccountInfo, 293 | ) -> Result<(), GeyserPluginError> { 294 | if account.owner() == token_id.as_ref() { 295 | if let Some(owner_key) = G::unpack_account_owner(account.data()) { 296 | let owner_key = owner_key.as_ref().to_vec(); 297 | let pubkey = account.pubkey(); 298 | let slot = account.slot; 299 | let result = client.execute(statement, &[&owner_key, &pubkey, &slot]); 300 | if let Err(err) = result { 301 | let msg = format!( 302 | "Failed to update the token owner index to the PostgreSQL database. Error: {:?}", 303 | err 304 | ); 305 | error!("{}", msg); 306 | return Err(GeyserPluginError::AccountsUpdateError { msg }); 307 | } 308 | } 309 | } 310 | 311 | Ok(()) 312 | } 313 | 314 | /// Generic function to update a single token mint index. 315 | fn update_token_mint_index_generic( 316 | client: &mut Client, 317 | statement: &Statement, 318 | token_id: &Pubkey, 319 | account: &DbAccountInfo, 320 | ) -> Result<(), GeyserPluginError> { 321 | if account.owner() == token_id.as_ref() { 322 | if let Some(mint_key) = G::unpack_account_mint(account.data()) { 323 | let mint_key = mint_key.as_ref().to_vec(); 324 | let pubkey = account.pubkey(); 325 | let slot = account.slot; 326 | let result = client.execute(statement, &[&mint_key, &pubkey, &slot]); 327 | if let Err(err) = result { 328 | let msg = format!( 329 | "Failed to update the token mint index to the PostgreSQL database. Error: {:?}", 330 | err 331 | ); 332 | error!("{}", msg); 333 | return Err(GeyserPluginError::AccountsUpdateError { msg }); 334 | } 335 | } 336 | } 337 | 338 | Ok(()) 339 | } 340 | 341 | /// Function for updating a single token owner index. 342 | pub fn update_token_owner_index( 343 | client: &mut Client, 344 | statement: &Statement, 345 | account: &DbAccountInfo, 346 | ) -> Result<(), GeyserPluginError> { 347 | Self::update_token_owner_index_generic::( 348 | client, 349 | statement, 350 | &inline_spl_token::id(), 351 | account, 352 | )?; 353 | 354 | Self::update_token_owner_index_generic::( 355 | client, 356 | statement, 357 | &inline_spl_token_2022::id(), 358 | account, 359 | ) 360 | } 361 | 362 | /// Function for updating a single token mint index. 363 | pub fn update_token_mint_index( 364 | client: &mut Client, 365 | statement: &Statement, 366 | account: &DbAccountInfo, 367 | ) -> Result<(), GeyserPluginError> { 368 | Self::update_token_mint_index_generic::( 369 | client, 370 | statement, 371 | &inline_spl_token::id(), 372 | account, 373 | )?; 374 | 375 | Self::update_token_mint_index_generic::( 376 | client, 377 | statement, 378 | &inline_spl_token_2022::id(), 379 | account, 380 | ) 381 | } 382 | 383 | /// Clean up the buffered indexes -- we do not need to 384 | /// write them to disk individually as they have already been handled 385 | /// when the accounts were flushed out individually in `upsert_account_internal`. 386 | pub fn clear_buffered_indexes(&mut self) { 387 | self.pending_token_owner_index.clear(); 388 | self.pending_token_mint_index.clear(); 389 | } 390 | } 391 | -------------------------------------------------------------------------------- /src/postgres_client/postgres_client_block_metadata.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | geyser_plugin_postgres::{GeyserPluginPostgresConfig, GeyserPluginPostgresError}, 4 | postgres_client::{ 5 | postgres_client_transaction::DbReward, SimplePostgresClient, UpdateBlockMetadataRequest, 6 | }, 7 | }, 8 | chrono::Utc, 9 | log::*, 10 | postgres::{Client, Statement}, 11 | solana_geyser_plugin_interface::geyser_plugin_interface::{ 12 | GeyserPluginError, ReplicaBlockInfoV3, 13 | }, 14 | }; 15 | 16 | #[derive(Clone, Debug)] 17 | pub struct DbBlockInfo { 18 | pub slot: i64, 19 | pub blockhash: String, 20 | pub rewards: Vec, 21 | pub block_time: Option, 22 | pub block_height: Option, 23 | } 24 | 25 | impl<'a> From<&ReplicaBlockInfoV3<'a>> for DbBlockInfo { 26 | fn from(block_info: &ReplicaBlockInfoV3) -> Self { 27 | Self { 28 | slot: block_info.slot as i64, 29 | blockhash: block_info.blockhash.to_string(), 30 | rewards: block_info.rewards.iter().map(DbReward::from).collect(), 31 | block_time: block_info.block_time, 32 | block_height: block_info 33 | .block_height 34 | .map(|block_height| block_height as i64), 35 | } 36 | } 37 | } 38 | 39 | impl SimplePostgresClient { 40 | pub(crate) fn build_block_metadata_upsert_statement( 41 | client: &mut Client, 42 | config: &GeyserPluginPostgresConfig, 43 | ) -> Result { 44 | let stmt = 45 | "INSERT INTO block (slot, blockhash, rewards, block_time, block_height, updated_on) \ 46 | VALUES ($1, $2, $3, $4, $5, $6) \ 47 | ON CONFLICT (slot) DO UPDATE SET blockhash=excluded.blockhash, rewards=excluded.rewards, \ 48 | block_time=excluded.block_time, block_height=excluded.block_height, updated_on=excluded.updated_on"; 49 | 50 | let stmt = client.prepare(stmt); 51 | 52 | match stmt { 53 | Err(err) => { 54 | Err(GeyserPluginError::Custom(Box::new(GeyserPluginPostgresError::DataSchemaError { 55 | msg: format!( 56 | "Error in preparing for the block metadata update PostgreSQL database: ({}) host: {:?} user: {:?} config: {:?}", 57 | err, config.host, config.user, config 58 | ), 59 | }))) 60 | } 61 | Ok(stmt) => Ok(stmt), 62 | } 63 | } 64 | 65 | pub(crate) fn update_block_metadata_impl( 66 | &mut self, 67 | block_info: UpdateBlockMetadataRequest, 68 | ) -> Result<(), GeyserPluginError> { 69 | let client = self.client.get_mut().unwrap(); 70 | let statement = &client.update_block_metadata_stmt; 71 | let client = &mut client.client; 72 | let updated_on = Utc::now().naive_utc(); 73 | 74 | let block_info = block_info.block_info; 75 | let result = client.query( 76 | statement, 77 | &[ 78 | &block_info.slot, 79 | &block_info.blockhash, 80 | &block_info.rewards, 81 | &block_info.block_time, 82 | &block_info.block_height, 83 | &updated_on, 84 | ], 85 | ); 86 | 87 | if let Err(err) = result { 88 | let msg = format!( 89 | "Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", 90 | err); 91 | error!("{}", msg); 92 | return Err(GeyserPluginError::AccountsUpdateError { msg }); 93 | } 94 | 95 | Ok(()) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/transaction_selector.rs: -------------------------------------------------------------------------------- 1 | /// The transaction selector is responsible for filtering transactions 2 | /// in the plugin framework. 3 | use {log::*, solana_sdk::pubkey::Pubkey, std::collections::HashSet}; 4 | 5 | pub(crate) struct TransactionSelector { 6 | pub mentioned_addresses: HashSet>, 7 | pub select_all_transactions: bool, 8 | pub select_all_vote_transactions: bool, 9 | } 10 | 11 | #[allow(dead_code)] 12 | impl TransactionSelector { 13 | pub fn default() -> Self { 14 | Self { 15 | mentioned_addresses: HashSet::default(), 16 | select_all_transactions: false, 17 | select_all_vote_transactions: false, 18 | } 19 | } 20 | 21 | /// Create a selector based on the mentioned addresses 22 | /// To select all transactions use ["*"] or ["all"] 23 | /// To select all vote transactions, use ["all_votes"] 24 | /// To select transactions mentioning specific addresses use ["", "", ...] 25 | pub fn new(mentioned_addresses: &[String]) -> Self { 26 | info!( 27 | "Creating TransactionSelector from addresses: {:?}", 28 | mentioned_addresses 29 | ); 30 | 31 | let select_all_transactions = mentioned_addresses 32 | .iter() 33 | .any(|key| key == "*" || key == "all"); 34 | if select_all_transactions { 35 | return Self { 36 | mentioned_addresses: HashSet::default(), 37 | select_all_transactions, 38 | select_all_vote_transactions: true, 39 | }; 40 | } 41 | let select_all_vote_transactions = mentioned_addresses.iter().any(|key| key == "all_votes"); 42 | if select_all_vote_transactions { 43 | return Self { 44 | mentioned_addresses: HashSet::default(), 45 | select_all_transactions, 46 | select_all_vote_transactions: true, 47 | }; 48 | } 49 | 50 | let mentioned_addresses = mentioned_addresses 51 | .iter() 52 | .map(|key| bs58::decode(key).into_vec().unwrap()) 53 | .collect(); 54 | 55 | Self { 56 | mentioned_addresses, 57 | select_all_transactions: false, 58 | select_all_vote_transactions: false, 59 | } 60 | } 61 | 62 | /// Check if a transaction is of interest. 63 | pub fn is_transaction_selected( 64 | &self, 65 | is_vote: bool, 66 | mentioned_addresses: Box + '_>, 67 | ) -> bool { 68 | if !self.is_enabled() { 69 | return false; 70 | } 71 | 72 | if self.select_all_transactions || (self.select_all_vote_transactions && is_vote) { 73 | return true; 74 | } 75 | for address in mentioned_addresses { 76 | if self.mentioned_addresses.contains(address.as_ref()) { 77 | return true; 78 | } 79 | } 80 | false 81 | } 82 | 83 | /// Check if any transaction is of interest at all 84 | pub fn is_enabled(&self) -> bool { 85 | self.select_all_transactions 86 | || self.select_all_vote_transactions 87 | || !self.mentioned_addresses.is_empty() 88 | } 89 | } 90 | 91 | #[cfg(test)] 92 | pub(crate) mod tests { 93 | use super::*; 94 | 95 | #[test] 96 | fn test_select_transaction() { 97 | let pubkey1 = Pubkey::new_unique(); 98 | let pubkey2 = Pubkey::new_unique(); 99 | 100 | let selector = TransactionSelector::new(&[pubkey1.to_string()]); 101 | 102 | assert!(selector.is_enabled()); 103 | 104 | let addresses = [pubkey1]; 105 | 106 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 107 | 108 | let addresses = [pubkey2]; 109 | assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter()))); 110 | 111 | let addresses = [pubkey1, pubkey2]; 112 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 113 | } 114 | 115 | #[test] 116 | fn test_select_all_transaction_using_wildcard() { 117 | let pubkey1 = Pubkey::new_unique(); 118 | let pubkey2 = Pubkey::new_unique(); 119 | 120 | let selector = TransactionSelector::new(&["*".to_string()]); 121 | 122 | assert!(selector.is_enabled()); 123 | 124 | let addresses = [pubkey1]; 125 | 126 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 127 | 128 | let addresses = [pubkey2]; 129 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 130 | 131 | let addresses = [pubkey1, pubkey2]; 132 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 133 | } 134 | 135 | #[test] 136 | fn test_select_all_transaction_all() { 137 | let pubkey1 = Pubkey::new_unique(); 138 | let pubkey2 = Pubkey::new_unique(); 139 | 140 | let selector = TransactionSelector::new(&["all".to_string()]); 141 | 142 | assert!(selector.is_enabled()); 143 | 144 | let addresses = [pubkey1]; 145 | 146 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 147 | 148 | let addresses = [pubkey2]; 149 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 150 | 151 | let addresses = [pubkey1, pubkey2]; 152 | assert!(selector.is_transaction_selected(false, Box::new(addresses.iter()))); 153 | } 154 | 155 | #[test] 156 | fn test_select_all_vote_transaction() { 157 | let pubkey1 = Pubkey::new_unique(); 158 | let pubkey2 = Pubkey::new_unique(); 159 | 160 | let selector = TransactionSelector::new(&["all_votes".to_string()]); 161 | 162 | assert!(selector.is_enabled()); 163 | 164 | let addresses = [pubkey1]; 165 | 166 | assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter()))); 167 | 168 | let addresses = [pubkey2]; 169 | assert!(selector.is_transaction_selected(true, Box::new(addresses.iter()))); 170 | 171 | let addresses = [pubkey1, pubkey2]; 172 | assert!(selector.is_transaction_selected(true, Box::new(addresses.iter()))); 173 | } 174 | 175 | #[test] 176 | fn test_select_no_transaction() { 177 | let pubkey1 = Pubkey::new_unique(); 178 | let pubkey2 = Pubkey::new_unique(); 179 | 180 | let selector = TransactionSelector::new(&[]); 181 | 182 | assert!(!selector.is_enabled()); 183 | 184 | let addresses = [pubkey1]; 185 | 186 | assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter()))); 187 | 188 | let addresses = [pubkey2]; 189 | assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter()))); 190 | 191 | let addresses = [pubkey1, pubkey2]; 192 | assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter()))); 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /tests/test_postgres_plugin.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::arithmetic_side_effects)] 2 | 3 | use serde_json::json; 4 | 5 | /// Integration testing for the PostgreSQL plugin 6 | /// This requires a PostgreSQL database named 'solana' be setup at localhost at port 5432 7 | /// This is automatically setup in the CI environment. 8 | /// To setup manually on Ubuntu Linux, do the following, 9 | /// sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' 10 | /// wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - 11 | /// apt install -y postgresql-14 12 | /// sudo /etc/init.d/postgresql start 13 | /// 14 | /// sudo -u postgres psql --command "CREATE USER solana WITH SUPERUSER PASSWORD 'solana';" 15 | /// sudo -u postgres createdb -O solana solana 16 | /// PGPASSWORD=solana psql -U solana -p 5432 -h localhost -w -d solana -f scripts/create_schema.sql 17 | /// 18 | /// The test will cover transmitting accounts, transaction and slot, 19 | /// block metadata. 20 | /// 21 | /// To clean up the database: run the following, otherwise you may run into duplicate key violations: 22 | /// PGPASSWORD=solana psql -U solana -p 5432 -h localhost -w -d solana -f scripts/drop_schema.sql 23 | /// 24 | /// Before running 'cargo test', please run 'cargo build' 25 | use { 26 | libloading::Library, 27 | log::*, 28 | serial_test::serial, 29 | solana_core::validator::ValidatorConfig, 30 | solana_geyser_plugin_postgres::{ 31 | geyser_plugin_postgres::GeyserPluginPostgresConfig, postgres_client::SimplePostgresClient, 32 | }, 33 | solana_local_cluster::{ 34 | cluster::Cluster, 35 | local_cluster::{ClusterConfig, LocalCluster}, 36 | validator_configs::*, 37 | }, 38 | solana_runtime::{ 39 | snapshot_archive_info::SnapshotArchiveInfoGetter, snapshot_config::SnapshotConfig, 40 | snapshot_hash::SnapshotHash, snapshot_utils, 41 | }, 42 | solana_sdk::{ 43 | client::SyncClient, clock::Slot, commitment_config::CommitmentConfig, 44 | epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, 45 | }, 46 | solana_streamer::socket::SocketAddrSpace, 47 | std::{ 48 | fs::{self, File}, 49 | io::Read, 50 | io::Write, 51 | path::{Path, PathBuf}, 52 | thread::sleep, 53 | time::Duration, 54 | }, 55 | tempfile::TempDir, 56 | }; 57 | 58 | const RUST_LOG_FILTER: &str = 59 | "info,solana_core::replay_stage=warn,solana_local_cluster=info,local_cluster=info,solana_ledger=info"; 60 | 61 | fn wait_for_next_snapshot( 62 | cluster: &LocalCluster, 63 | snapshot_archives_dir: &Path, 64 | ) -> (PathBuf, (Slot, SnapshotHash)) { 65 | // Get slot after which this was generated 66 | let client = cluster 67 | .get_validator_client(cluster.entry_point_info.pubkey()) 68 | .unwrap(); 69 | let last_slot = client 70 | .get_slot_with_commitment(CommitmentConfig::processed()) 71 | .expect("Couldn't get slot"); 72 | 73 | // Wait for a snapshot for a bank >= last_slot to be made so we know that the snapshot 74 | // must include the transactions just pushed 75 | trace!( 76 | "Waiting for snapshot archive to be generated with slot > {}", 77 | last_slot 78 | ); 79 | loop { 80 | if let Some(full_snapshot_archive_info) = 81 | snapshot_utils::get_highest_full_snapshot_archive_info(snapshot_archives_dir) 82 | { 83 | trace!( 84 | "full snapshot for slot {} exists", 85 | full_snapshot_archive_info.slot() 86 | ); 87 | if full_snapshot_archive_info.slot() >= last_slot { 88 | return ( 89 | full_snapshot_archive_info.path().clone(), 90 | ( 91 | full_snapshot_archive_info.slot(), 92 | *full_snapshot_archive_info.hash(), 93 | ), 94 | ); 95 | } 96 | trace!( 97 | "full snapshot slot {} < last_slot {}", 98 | full_snapshot_archive_info.slot(), 99 | last_slot 100 | ); 101 | } 102 | sleep(Duration::from_millis(1000)); 103 | } 104 | } 105 | 106 | fn farf_dir() -> PathBuf { 107 | let dir: String = std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); 108 | fs::create_dir_all(dir.clone()).unwrap(); 109 | PathBuf::from(dir) 110 | } 111 | 112 | fn generate_account_paths(num_account_paths: usize) -> (Vec, Vec) { 113 | let account_storage_dirs: Vec = (0..num_account_paths) 114 | .map(|_| tempfile::tempdir_in(farf_dir()).unwrap()) 115 | .collect(); 116 | let account_storage_paths: Vec<_> = account_storage_dirs 117 | .iter() 118 | .map(|a| a.path().to_path_buf()) 119 | .collect(); 120 | (account_storage_dirs, account_storage_paths) 121 | } 122 | 123 | fn generate_geyser_plugin_config() -> (TempDir, PathBuf) { 124 | let tmp_dir = tempfile::tempdir_in(farf_dir()).unwrap(); 125 | let mut path = tmp_dir.path().to_path_buf(); 126 | path.push("accounts_db_plugin.json"); 127 | let mut config_file = File::create(path.clone()).unwrap(); 128 | 129 | // Need to specify the absolute path of the dynamic library 130 | // as the framework is looking for the library relative to the 131 | // config file otherwise. 132 | let lib_name = if std::env::consts::OS == "macos" { 133 | "libsolana_geyser_plugin_postgres.dylib" 134 | } else { 135 | "libsolana_geyser_plugin_postgres.so" 136 | }; 137 | 138 | let mut lib_path = path.clone(); 139 | 140 | lib_path.pop(); 141 | lib_path.pop(); 142 | lib_path.pop(); 143 | lib_path.push("target"); 144 | lib_path.push("debug"); 145 | lib_path.push(lib_name); 146 | 147 | let lib_path = lib_path.as_os_str().to_str().unwrap(); 148 | let config_content = json!({ 149 | "libpath": lib_path, 150 | "connection_str": "host=localhost user=solana password=solana port=5432", 151 | "threads": 20, 152 | "batch_size": 20, 153 | "panic_on_db_errors": true, 154 | "accounts_selector" : { 155 | "accounts" : ["*"] 156 | }, 157 | "transaction_selector" : { 158 | "mentions" : ["*"] 159 | } 160 | }); 161 | 162 | write!(config_file, "{}", config_content).unwrap(); 163 | (tmp_dir, path) 164 | } 165 | 166 | #[allow(dead_code)] 167 | struct SnapshotValidatorConfig { 168 | snapshot_dir: TempDir, 169 | snapshot_archives_dir: TempDir, 170 | account_storage_dirs: Vec, 171 | validator_config: ValidatorConfig, 172 | plugin_config_dir: TempDir, 173 | } 174 | 175 | fn setup_snapshot_validator_config( 176 | snapshot_interval_slots: u64, 177 | num_account_paths: usize, 178 | ) -> SnapshotValidatorConfig { 179 | // Create the snapshot config 180 | let bank_snapshots_dir = tempfile::tempdir_in(farf_dir()).unwrap(); 181 | let snapshot_archives_dir = tempfile::tempdir_in(farf_dir()).unwrap(); 182 | let snapshot_config = SnapshotConfig { 183 | full_snapshot_archive_interval_slots: snapshot_interval_slots, 184 | incremental_snapshot_archive_interval_slots: Slot::MAX, 185 | full_snapshot_archives_dir: snapshot_archives_dir.path().to_path_buf(), 186 | bank_snapshots_dir: bank_snapshots_dir.path().to_path_buf(), 187 | ..SnapshotConfig::default() 188 | }; 189 | 190 | // Create the account paths 191 | let (account_storage_dirs, account_storage_paths) = generate_account_paths(num_account_paths); 192 | 193 | let (plugin_config_dir, path) = generate_geyser_plugin_config(); 194 | 195 | let on_start_geyser_plugin_config_files = Some(vec![path]); 196 | 197 | // Create the validator config 198 | let validator_config = ValidatorConfig { 199 | snapshot_config, 200 | account_paths: account_storage_paths, 201 | accounts_hash_interval_slots: snapshot_interval_slots, 202 | on_start_geyser_plugin_config_files, 203 | enforce_ulimit_nofile: false, 204 | ..ValidatorConfig::default() 205 | }; 206 | 207 | SnapshotValidatorConfig { 208 | snapshot_dir: bank_snapshots_dir, 209 | snapshot_archives_dir, 210 | account_storage_dirs, 211 | validator_config, 212 | plugin_config_dir, 213 | } 214 | } 215 | 216 | fn test_local_cluster_start_and_exit_with_config(socket_addr_space: SocketAddrSpace) { 217 | const NUM_NODES: usize = 1; 218 | let config = ValidatorConfig { 219 | enforce_ulimit_nofile: false, 220 | ..ValidatorConfig::default() 221 | }; 222 | let mut config = ClusterConfig { 223 | validator_configs: make_identical_validator_configs(&config, NUM_NODES), 224 | node_stakes: vec![3; NUM_NODES], 225 | cluster_lamports: 100, 226 | ticks_per_slot: 8, 227 | slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH, 228 | stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH, 229 | ..ClusterConfig::default() 230 | }; 231 | let cluster = LocalCluster::new(&mut config, socket_addr_space); 232 | assert_eq!(cluster.validators.len(), NUM_NODES); 233 | } 234 | 235 | #[test] 236 | #[serial] 237 | fn test_without_plugin() { 238 | let socket_addr_space = SocketAddrSpace::new(true); 239 | test_local_cluster_start_and_exit_with_config(socket_addr_space); 240 | } 241 | 242 | #[test] 243 | #[serial] 244 | fn test_postgres_plugin() { 245 | solana_logger::setup_with_default(RUST_LOG_FILTER); 246 | 247 | unsafe { 248 | let filename = match std::env::consts::OS { 249 | "macos" => "libsolana_geyser_plugin_postgres.dylib", 250 | _ => "libsolana_geyser_plugin_postgres.so", 251 | }; 252 | 253 | let lib = Library::new(filename); 254 | if lib.is_err() { 255 | info!("Failed to load the dynamic library {} {:?}", filename, lib); 256 | return; 257 | } 258 | } 259 | 260 | let socket_addr_space = SocketAddrSpace::new(true); 261 | 262 | // First set up the cluster with 1 node 263 | let snapshot_interval_slots = 50; 264 | let num_account_paths = 3; 265 | 266 | let leader_snapshot_test_config = 267 | setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths); 268 | 269 | let mut file = File::open( 270 | &leader_snapshot_test_config 271 | .validator_config 272 | .on_start_geyser_plugin_config_files 273 | .as_ref() 274 | .unwrap()[0], 275 | ) 276 | .unwrap(); 277 | let mut contents = String::new(); 278 | file.read_to_string(&mut contents).unwrap(); 279 | let plugin_config: GeyserPluginPostgresConfig = serde_json::from_str(&contents).unwrap(); 280 | 281 | let result = SimplePostgresClient::connect_to_db(&plugin_config); 282 | if result.is_err() { 283 | info!("Failed to connecto the PostgreSQL database. Please setup the database to run the integration tests. {:?}", result.err()); 284 | return; 285 | } 286 | 287 | let stake = 10_000; 288 | let mut config = ClusterConfig { 289 | node_stakes: vec![stake], 290 | cluster_lamports: 1_000_000, 291 | validator_configs: make_identical_validator_configs( 292 | &leader_snapshot_test_config.validator_config, 293 | 1, 294 | ), 295 | ..ClusterConfig::default() 296 | }; 297 | 298 | let cluster = LocalCluster::new(&mut config, socket_addr_space); 299 | 300 | assert_eq!(cluster.validators.len(), 1); 301 | let contact_info = &cluster.entry_point_info; 302 | 303 | info!( 304 | "Contact info: {:?} {:?}", 305 | contact_info, 306 | leader_snapshot_test_config 307 | .validator_config 308 | .enforce_ulimit_nofile 309 | ); 310 | 311 | // Get slot after which this was generated 312 | let snapshot_archives_dir = &leader_snapshot_test_config 313 | .validator_config 314 | .snapshot_config 315 | .full_snapshot_archives_dir; 316 | info!("Waiting for snapshot"); 317 | let (archive_filename, archive_snapshot_hash) = 318 | wait_for_next_snapshot(&cluster, snapshot_archives_dir); 319 | info!("Found: {:?} {:?}", archive_filename, archive_snapshot_hash); 320 | } 321 | --------------------------------------------------------------------------------