├── src ├── lib.rs ├── agent │ ├── utils.rs │ ├── services.rs │ ├── pyth │ │ ├── rpc │ │ │ ├── get_all_products.rs │ │ │ ├── get_product_list.rs │ │ │ ├── get_product.rs │ │ │ ├── update_price.rs │ │ │ ├── subscribe_price.rs │ │ │ └── subscribe_price_sched.rs │ │ └── rpc.rs │ ├── services │ │ ├── notifier.rs │ │ ├── oracle.rs │ │ ├── keypairs.rs │ │ └── exporter.rs │ ├── state │ │ ├── keypairs.rs │ │ ├── local.rs │ │ ├── transactions.rs │ │ └── global.rs │ ├── pyth.rs │ ├── config.rs │ ├── solana.rs │ ├── metrics.rs │ └── utils │ │ └── rpc_multi_client.rs ├── bin │ └── agent.rs └── agent.rs ├── integration-tests ├── tests │ └── __init__.py ├── integration_tests │ └── __init__.py ├── message_buffer_client_codegen │ ├── __init__.py │ ├── program_id.py │ ├── accounts │ │ ├── __init__.py │ │ ├── message_buffer.py │ │ └── whitelist.py │ ├── instructions │ │ ├── __init__.py │ │ ├── initialize.py │ │ ├── update_whitelist_admin.py │ │ ├── delete_buffer.py │ │ ├── set_allowed_programs.py │ │ ├── create_buffer.py │ │ ├── put_all.py │ │ └── resize_buffer.py │ └── errors │ │ ├── __init__.py │ │ └── custom.py ├── program-binaries │ ├── oracle.so │ ├── message_buffer.so │ └── message_buffer_idl.json ├── agent_conf.toml ├── Dockerfile └── pyproject.toml ├── .dockerignore ├── rust-toolchain.toml ├── clippy.toml ├── tests.sh ├── canary.md5sum ├── .gitignore ├── shell.nix ├── .github └── workflows │ ├── rustfmt.yaml │ ├── rust.yaml │ └── image-push.yaml ├── Dockerfile ├── rustfmt.toml ├── proptest-regressions └── agent │ └── market_schedule.txt ├── .pre-commit-config.yaml ├── config ├── config.sample.pythnet.toml ├── config.sample.pythtest.toml └── config.toml ├── flake.nix ├── flake.lock ├── Cargo.toml ├── README.md └── LICENSE /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod agent; 2 | -------------------------------------------------------------------------------- /integration-tests/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | Dockerfile 3 | -------------------------------------------------------------------------------- /integration-tests/integration_tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/agent/utils.rs: -------------------------------------------------------------------------------- 1 | pub mod rpc_multi_client; 2 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.88.0" 3 | profile = "minimal" 4 | components = ["rustfmt", "clippy"] 5 | -------------------------------------------------------------------------------- /integration-tests/program-binaries/oracle.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyth-network/pyth-agent/HEAD/integration-tests/program-binaries/oracle.so -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | allow-unwrap-in-tests = true 2 | allow-expect-in-tests = true 3 | allow-indexing-slicing-in-tests = true 4 | allow-panic-in-tests = true 5 | -------------------------------------------------------------------------------- /integration-tests/program-binaries/message_buffer.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyth-network/pyth-agent/HEAD/integration-tests/program-binaries/message_buffer.so -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/program_id.py: -------------------------------------------------------------------------------- 1 | from solana.publickey import PublicKey 2 | 3 | PROGRAM_ID = PublicKey("Vbmv1jt4vyuqBZcpYPpnVhrqVe5e6ZPb6JxDcffRHUM") 4 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/accounts/__init__.py: -------------------------------------------------------------------------------- 1 | from .message_buffer import MessageBuffer, MessageBufferJSON 2 | from .whitelist import Whitelist, WhitelistJSON 3 | -------------------------------------------------------------------------------- /integration-tests/agent_conf.toml: -------------------------------------------------------------------------------- 1 | [metrics_server] 2 | bind_address="0.0.0.0:8888" 3 | 4 | [primary_network] 5 | oracle.poll_interval_duration = "1s" 6 | exporter.transaction_monitor.poll_interval_duration = "1s" 7 | -------------------------------------------------------------------------------- /tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -ev 2 | 3 | # Run Rust unit tests 4 | cargo test --workspace 5 | 6 | # Run Python integration tests 7 | cd integration-tests 8 | poetry install 9 | poetry run pytest -s --log-cli-level=debug 10 | -------------------------------------------------------------------------------- /canary.md5sum: -------------------------------------------------------------------------------- 1 | b213ae5b2a4137238c47bdc5951fc95d integration-tests/program-binaries/message_buffer_idl.json 2 | 1d5b5e43be31e10f6e747b20ef77f4e9 integration-tests/program-binaries/message_buffer.so 3 | 7c2782f6f58e9c91a95ce7c310a47927 integration-tests/program-binaries/oracle.so 4 | -------------------------------------------------------------------------------- /src/agent/services.rs: -------------------------------------------------------------------------------- 1 | pub mod exporter; 2 | pub mod keypairs; 3 | pub mod lazer_exporter; 4 | pub mod notifier; 5 | pub mod oracle; 6 | 7 | pub use { 8 | exporter::exporter, 9 | keypairs::keypairs, 10 | lazer_exporter::lazer_exporter, 11 | notifier::notifier, 12 | oracle::oracle, 13 | }; 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Generated by Nix 6 | # will contain the output of `nix build` 7 | result 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | __pycache__ 12 | keystore 13 | 14 | # Mac OS 15 | .DS_Store 16 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import {} 2 | , ... 3 | }: 4 | 5 | with pkgs; mkShell { 6 | buildInputs = [ 7 | clang 8 | llvmPackages.libclang 9 | openssl 10 | pkgconfig 11 | rustup 12 | ]; 13 | 14 | shellHook = '' 15 | export LIBCLANG_PATH="${llvmPackages.libclang.lib}/lib"; 16 | ''; 17 | } 18 | -------------------------------------------------------------------------------- /src/agent/pyth/rpc/get_all_products.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::agent::state, 3 | anyhow::Result, 4 | tracing::instrument, 5 | }; 6 | 7 | #[instrument(skip_all)] 8 | pub async fn get_all_products(state: &S) -> Result 9 | where 10 | S: state::Prices, 11 | { 12 | let products = state.get_all_products().await?; 13 | Ok(serde_json::to_value(products)?) 14 | } 15 | -------------------------------------------------------------------------------- /src/agent/pyth/rpc/get_product_list.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::agent::state, 3 | anyhow::Result, 4 | tracing::instrument, 5 | }; 6 | 7 | #[instrument(skip_all)] 8 | pub async fn get_product_list(state: &S) -> Result 9 | where 10 | S: state::Prices, 11 | { 12 | let product_list = state.get_product_list().await?; 13 | Ok(serde_json::to_value(product_list)?) 14 | } 15 | -------------------------------------------------------------------------------- /.github/workflows/rustfmt.yaml: -------------------------------------------------------------------------------- 1 | name: Check Rustfmt 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [main] 7 | 8 | jobs: 9 | pre-commit: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - uses: actions/setup-python@v2 14 | - uses: actions-rs/toolchain@v1 15 | with: 16 | profile: minimal 17 | toolchain: nightly 18 | components: rustfmt 19 | - uses: pre-commit/action@v3.0.1 20 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:slim-bookworm as builder 2 | 3 | RUN apt update && apt install -y curl libssl-dev pkg-config build-essential && apt clean all 4 | 5 | ADD . /agent 6 | WORKDIR /agent 7 | 8 | RUN cargo build --release 9 | 10 | FROM debian:12-slim 11 | 12 | RUN apt update && apt install -y libssl-dev ca-certificates && apt clean all 13 | 14 | COPY --from=builder /agent/target/release/agent /agent/ 15 | COPY --from=builder /agent/config/* /agent/config/ 16 | 17 | ENTRYPOINT ["/agent/agent"] 18 | -------------------------------------------------------------------------------- /.github/workflows/rust.yaml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | pull_request: 9 | branches: 10 | - main 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-22.04 15 | steps: 16 | - name: Check-out code 17 | uses: actions/checkout@v2 18 | - name: Build image 19 | run: docker build -f integration-tests/Dockerfile -t agent . 20 | - name: Run tests 21 | run: docker run -v $PWD:/agent agent ./tests.sh 22 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Merge all imports into a clean vertical list of module imports. 2 | imports_granularity = "One" 3 | group_imports = "One" 4 | imports_layout = "Vertical" 5 | 6 | # Better grep-ability. 7 | empty_item_single_line = false 8 | 9 | # Consistent pipe layout. 10 | match_arm_leading_pipes = "Preserve" 11 | 12 | # Align Fields 13 | enum_discrim_align_threshold = 80 14 | struct_field_align_threshold = 80 15 | 16 | # Allow up to two blank lines for visual grouping. 17 | blank_lines_upper_bound = 2 18 | -------------------------------------------------------------------------------- /proptest-regressions/agent/market_schedule.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 173b9a862e3ad1149b0fdef292a11164ecab5b67b395857178f63294c3c9c0b7 # shrinks to s = "0000-0060" 8 | cc 6cf32e18287cb6de4b40f4326d1e9fd3be409086af3ccf75eac6f980c1f67052 # shrinks to s = TimeRange(00:00:00, 00:00:01) 9 | -------------------------------------------------------------------------------- /integration-tests/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim-bullseye 2 | 3 | # Install Rust 4 | RUN apt update && apt install -y curl pkg-config libssl-dev build-essential 5 | RUN curl https://sh.rustup.rs -sSf | bash -s -- -y 6 | ENV PATH="/root/.cargo/bin:${PATH}" 7 | RUN rustup toolchain install nightly 8 | 9 | # Install poetry 10 | RUN pip install poetry 11 | ENV PATH="${PATH}:/root/.local/bin" 12 | RUN poetry config virtualenvs.in-project true 13 | 14 | # Install Solana Tool Suite 15 | RUN sh -c "$(curl -sSfL https://release.anza.xyz/v2.2.1/install)" 16 | ENV PATH="${PATH}:/root/.local/share/solana/install/active_release/bin" 17 | 18 | ADD . /agent 19 | WORKDIR /agent 20 | 21 | RUN cargo build --release 22 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v3.2.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-added-large-files 8 | - repo: local 9 | hooks: 10 | - id: cargo-fmt-nightly 11 | name: rustfmt 12 | language: "rust" 13 | entry: cargo +nightly fmt 14 | pass_filenames: false 15 | - id: integration-test-checksums 16 | name: Integration Test Artifact Checksums 17 | language: "system" 18 | files: integration-tests/program-binaries/.*\.(json|so|md5sum)$ 19 | entry: md5sum --check canary.md5sum 20 | pass_filenames: false 21 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/__init__.py: -------------------------------------------------------------------------------- 1 | from .initialize import initialize, InitializeArgs, InitializeAccounts 2 | from .set_allowed_programs import ( 3 | set_allowed_programs, 4 | SetAllowedProgramsArgs, 5 | SetAllowedProgramsAccounts, 6 | ) 7 | from .update_whitelist_admin import ( 8 | update_whitelist_admin, 9 | UpdateWhitelistAdminArgs, 10 | UpdateWhitelistAdminAccounts, 11 | ) 12 | from .put_all import put_all, PutAllArgs, PutAllAccounts 13 | from .create_buffer import create_buffer, CreateBufferArgs, CreateBufferAccounts 14 | from .resize_buffer import resize_buffer, ResizeBufferArgs, ResizeBufferAccounts 15 | from .delete_buffer import delete_buffer, DeleteBufferArgs, DeleteBufferAccounts 16 | -------------------------------------------------------------------------------- /integration-tests/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "integration-tests" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Pyth Data Association "] 6 | readme = "../README.md" 7 | packages = [{ include = "integration_tests" }] 8 | 9 | [tool.poetry.dependencies] 10 | python = "~3.10" 11 | 12 | [tool.poetry.dev-dependencies] 13 | anchorpy = "0.14.0" 14 | program-admin = { git = "https://github.com/pyth-network/program-admin.git", branch = "main" } 15 | pytest = "^7.2" 16 | pytest-asyncio = "^0.20.0" 17 | pre-commit = "^2.21.0" 18 | requests = "^2.28.2" 19 | jsonrpc_websocket = "^3.1.4" 20 | 21 | [build-system] 22 | requires = ["poetry-core"] 23 | build-backend = "poetry.core.masonry.api" 24 | 25 | [tool.pytest.ini_options] 26 | asyncio_mode = "auto" 27 | -------------------------------------------------------------------------------- /src/agent/pyth/rpc/get_product.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | GetProductParams, 4 | Method, 5 | }, 6 | crate::agent::state, 7 | anyhow::{ 8 | Result, 9 | anyhow, 10 | }, 11 | jrpc::{ 12 | Request, 13 | Value, 14 | }, 15 | tracing::instrument, 16 | }; 17 | 18 | #[instrument(skip_all, fields(account))] 19 | pub async fn get_product( 20 | state: &S, 21 | request: &Request, 22 | ) -> Result 23 | where 24 | S: state::Prices, 25 | { 26 | let params: GetProductParams = { 27 | let value = request.params.clone(); 28 | serde_json::from_value(value.ok_or_else(|| anyhow!("Missing request parameters"))?) 29 | }?; 30 | 31 | let account = params.account.parse::()?; 32 | tracing::Span::current().record("account", account.to_string()); 33 | 34 | let product = state.get_product(&account).await?; 35 | Ok(serde_json::to_value(product)?) 36 | } 37 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/errors/__init__.py: -------------------------------------------------------------------------------- 1 | import typing 2 | import re 3 | from solders.transaction_status import ( 4 | InstructionErrorCustom, 5 | TransactionErrorInstructionError, 6 | ) 7 | from solana.rpc.core import RPCException 8 | from solders.rpc.errors import SendTransactionPreflightFailureMessage 9 | from anchorpy.error import extract_code_and_logs 10 | from ..program_id import PROGRAM_ID 11 | from . import anchor 12 | from . import custom 13 | 14 | 15 | def from_code(code: int) -> typing.Union[custom.CustomError, anchor.AnchorError, None]: 16 | return custom.from_code(code) if code >= 6000 else anchor.from_code(code) 17 | 18 | 19 | error_re = re.compile(r"Program (\w+) failed: custom program error: (\w+)") 20 | 21 | 22 | def from_tx_error( 23 | error: RPCException, 24 | ) -> typing.Union[anchor.AnchorError, custom.CustomError, None]: 25 | err_info = error.args[0] 26 | extracted = extract_code_and_logs(err_info, PROGRAM_ID) 27 | if extracted is None: 28 | return None 29 | return from_code(extracted[0]) 30 | -------------------------------------------------------------------------------- /src/agent/services/notifier.rs: -------------------------------------------------------------------------------- 1 | //! Notifier 2 | //! 3 | //! The notifier is responsible for notifying subscribers who have registered 4 | //! for price sched updates. 5 | 6 | use { 7 | crate::agent::state::Prices, 8 | std::sync::Arc, 9 | tracing::instrument, 10 | }; 11 | 12 | #[instrument(skip(state))] 13 | pub async fn notifier(state: Arc) 14 | where 15 | S: Prices, 16 | { 17 | let mut interval = tokio::time::interval(state.notify_interval_duration()); 18 | let mut exit = crate::agent::EXIT.subscribe(); 19 | loop { 20 | Prices::drop_closed_subscriptions(&*state).await; 21 | tokio::select! { 22 | _ = exit.changed() => { 23 | tracing::info!("Shutdown signal received."); 24 | return; 25 | } 26 | _ = interval.tick() => { 27 | if let Err(err) = state.send_notify_price_sched().await { 28 | tracing::error!(err = ?err, "Notifier: failed to send notify price sched."); 29 | } 30 | } 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/agent/pyth/rpc/update_price.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | Method, 4 | UpdatePriceParams, 5 | }, 6 | crate::agent::state, 7 | anyhow::{ 8 | Result, 9 | anyhow, 10 | }, 11 | jrpc::{ 12 | Request, 13 | Value, 14 | }, 15 | tracing::instrument, 16 | }; 17 | 18 | #[instrument(skip_all, fields(account))] 19 | pub async fn update_price( 20 | state: &S, 21 | request: &Request, 22 | ) -> Result 23 | where 24 | S: state::Prices, 25 | { 26 | let params: UpdatePriceParams = serde_json::from_value( 27 | request 28 | .params 29 | .clone() 30 | .ok_or_else(|| anyhow!("Missing request parameters"))?, 31 | )?; 32 | 33 | tracing::Span::current().record("account", params.account.to_string()); 34 | 35 | state 36 | .update_local_price( 37 | ¶ms.account.parse::()?, 38 | params.price, 39 | params.conf, 40 | params.status, 41 | ) 42 | .await?; 43 | 44 | Ok(serde_json::to_value(0)?) 45 | } 46 | -------------------------------------------------------------------------------- /src/agent/pyth/rpc/subscribe_price.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | Method, 4 | NotifyPrice, 5 | SubscribePriceParams, 6 | SubscribeResult, 7 | }, 8 | crate::agent::state, 9 | anyhow::{ 10 | Result, 11 | anyhow, 12 | }, 13 | jrpc::{ 14 | Request, 15 | Value, 16 | }, 17 | tokio::sync::mpsc, 18 | tracing::instrument, 19 | }; 20 | 21 | #[instrument(skip_all, fields(account))] 22 | pub async fn subscribe_price( 23 | state: &S, 24 | notify_price_tx: &mpsc::Sender, 25 | request: &Request, 26 | ) -> Result 27 | where 28 | S: state::Prices, 29 | { 30 | let params: SubscribePriceParams = serde_json::from_value( 31 | request 32 | .params 33 | .clone() 34 | .ok_or_else(|| anyhow!("Missing request parameters"))?, 35 | )?; 36 | 37 | let account = params.account.parse::()?; 38 | tracing::Span::current().record("account", account.to_string()); 39 | 40 | let subscription = state 41 | .subscribe_price(&account, notify_price_tx.clone()) 42 | .await; 43 | 44 | Ok(serde_json::to_value(SubscribeResult { subscription })?) 45 | } 46 | -------------------------------------------------------------------------------- /src/agent/pyth/rpc/subscribe_price_sched.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | Method, 4 | NotifyPriceSched, 5 | SubscribePriceSchedParams, 6 | SubscribeResult, 7 | }, 8 | crate::agent::state, 9 | anyhow::{ 10 | Result, 11 | anyhow, 12 | }, 13 | jrpc::{ 14 | Request, 15 | Value, 16 | }, 17 | tokio::sync::mpsc, 18 | tracing::instrument, 19 | }; 20 | 21 | #[instrument(skip_all, fields(account))] 22 | pub async fn subscribe_price_sched( 23 | state: &S, 24 | notify_price_sched_tx: &mpsc::Sender, 25 | request: &Request, 26 | ) -> Result 27 | where 28 | S: state::Prices, 29 | { 30 | let params: SubscribePriceSchedParams = serde_json::from_value( 31 | request 32 | .params 33 | .clone() 34 | .ok_or_else(|| anyhow!("Missing request parameters"))?, 35 | )?; 36 | 37 | let account = params.account.parse::()?; 38 | tracing::Span::current().record("account", account.to_string()); 39 | 40 | let subscription = state 41 | .subscribe_price_sched(&account, notify_price_sched_tx.clone()) 42 | .await; 43 | 44 | Ok(serde_json::to_value(SubscribeResult { subscription })?) 45 | } 46 | -------------------------------------------------------------------------------- /.github/workflows/image-push.yaml: -------------------------------------------------------------------------------- 1 | name: Build and Push Pyth Agent Image 2 | on: 3 | push: 4 | tags: 5 | - v* 6 | workflow_dispatch: 7 | inputs: 8 | dispatch_description: 9 | description: "Dispatch description" 10 | required: true 11 | type: string 12 | jobs: 13 | agent-push-image: 14 | runs-on: ubuntu-latest 15 | permissions: 16 | id-token: write 17 | contents: read 18 | steps: 19 | - uses: actions/checkout@v2 20 | - uses: aws-actions/configure-aws-credentials@8a84b07f2009032ade05a88a28750d733cc30db1 21 | with: 22 | role-to-assume: arn:aws:iam::192824654885:role/github-actions-ecr 23 | aws-region: eu-west-2 24 | - uses: docker/login-action@v2 25 | with: 26 | registry: public.ecr.aws 27 | env: 28 | AWS_REGION: us-east-1 29 | - run: docker context create builders 30 | - uses: docker/setup-buildx-action@v2 31 | with: 32 | version: latest 33 | endpoint: builders 34 | - uses: haya14busa/action-cond@v1 35 | id: image_tag 36 | with: 37 | cond: ${{ startsWith(github.ref, 'refs/tags/') }} 38 | if_true: ${{ github.ref_name }} 39 | if_false: ${{ github.sha }} 40 | - uses: docker/build-push-action@v2 41 | with: 42 | push: true 43 | tags: public.ecr.aws/pyth-network/agent:${{ steps.image_tag.outputs.value }} 44 | -------------------------------------------------------------------------------- /config/config.sample.pythnet.toml: -------------------------------------------------------------------------------- 1 | [pythd_api_server] 2 | listen_address = "127.0.0.1:8910" 3 | 4 | [primary_network] 5 | 6 | # HTTP(S) endpoints of the RPC node. Public Pythnet RPC endpoints are usually 7 | # rate-limited, so a private endpoint should be used in most cases. 8 | # API calls will cycle through each on failure. 9 | rpc_urls = ["https://api2.pythnet.pyth.network"] 10 | 11 | # WS(S) endpoint of the RRC node. This is used to subscribe to account changes on the network. 12 | # This can be omitted when oracle.subscriber_enabled is set to false. 13 | wss_urls = ["wss://api2.pythnet.pyth.network"] 14 | 15 | # Path to your publishing keypair. 16 | key_store.publish_keypair_path = "/path/to/keypair.json" 17 | 18 | # Oracle program pubkey 19 | key_store.pyth_oracle_program_key = "FsJ3A3u2vn5cTVofAjvy6y5kwABJAqYWpe4975bi2epH" 20 | 21 | # The price store program key 22 | key_store.pyth_price_store_program_key = "3m6sv6HGqEbuyLV84mD7rJn4MAC9LhUa1y1AUNVqcPfr" 23 | 24 | # Set the max price updates per transaction to 47 25 | exporter.max_batch_size = 47 26 | 27 | # Compute unit per price update. 28 | exporter.compute_unit_limit = 5000 29 | 30 | # Configuration for the JRPC API 31 | [pythd_adapter] 32 | 33 | # The duration of the interval at which `notify_price_sched` notifications will be sent. 34 | # Note that this doesn't affect the rate at which transactions are published: 35 | # this is soley a backwards-compatibility API feature. 36 | notify_price_sched_interval_duration = "400ms" 37 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/initialize.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import typing 3 | from solana.publickey import PublicKey 4 | from solana.system_program import SYS_PROGRAM_ID 5 | from solana.transaction import TransactionInstruction, AccountMeta 6 | from anchorpy.borsh_extension import BorshPubkey 7 | import borsh_construct as borsh 8 | from ..program_id import PROGRAM_ID 9 | 10 | 11 | class InitializeArgs(typing.TypedDict): 12 | admin: PublicKey 13 | 14 | 15 | layout = borsh.CStruct("admin" / BorshPubkey) 16 | INITIALIZE_ACCOUNTS_WHITELIST = PublicKey.find_program_address( 17 | seeds=[b"message", b"whitelist"], 18 | program_id=PROGRAM_ID, 19 | )[0] 20 | 21 | 22 | class InitializeAccounts(typing.TypedDict): 23 | payer: PublicKey 24 | 25 | 26 | def initialize( 27 | args: InitializeArgs, 28 | accounts: InitializeAccounts, 29 | program_id: PublicKey = PROGRAM_ID, 30 | remaining_accounts: typing.Optional[typing.List[AccountMeta]] = None, 31 | ) -> TransactionInstruction: 32 | keys: list[AccountMeta] = [ 33 | AccountMeta(pubkey=accounts["payer"], is_signer=True, is_writable=True), 34 | AccountMeta( 35 | pubkey=INITIALIZE_ACCOUNTS_WHITELIST, is_signer=False, is_writable=True 36 | ), 37 | AccountMeta(pubkey=SYS_PROGRAM_ID, is_signer=False, is_writable=False), 38 | ] 39 | if remaining_accounts is not None: 40 | keys += remaining_accounts 41 | identifier = b"\xaf\xafm\x1f\r\x98\x9b\xed" 42 | encoded_args = layout.build( 43 | { 44 | "admin": args["admin"], 45 | } 46 | ) 47 | data = identifier + encoded_args 48 | return TransactionInstruction(keys, program_id, data) 49 | -------------------------------------------------------------------------------- /config/config.sample.pythtest.toml: -------------------------------------------------------------------------------- 1 | [pythd_api_server] 2 | listen_address = "127.0.0.1:8910" 3 | 4 | [primary_network] 5 | 6 | # HTTP(S) endpoints of the RPC node. 7 | # API calls will cycle through each on failure. 8 | rpc_urls = ["https://api.pythtest.pyth.network"] 9 | 10 | # WS(S) endpoint of the RRC node. This is used to subscribe to account changes 11 | # on the network. This can be omitted when oracle.subscriber_enabled is set to 12 | # false. 13 | wss_urls = ["wss://api.pythtest.pyth.network"] 14 | 15 | # Path to your publishing keypair. 16 | key_store.publish_keypair_path = "/path/to/keypair.json" 17 | 18 | # Oracle program pubkey 19 | key_store.pyth_oracle_program_key = "8tfDNiaEyrV6Q1U4DEXrEigs9DoDtkugzFbybENEbCDz" # conformance 20 | # key_store.pyth_oracle_program_key = "gSbePebfvPy7tRqimPoVecS2UsBvYv46ynrzWocc92s" # cross-chain 21 | 22 | # Pythtest accumulator key (only for the cross-chain oracle) 23 | # key_store.accumulator_key = "7Vbmv1jt4vyuqBZcpYPpnVhrqVe5e6ZPb6JxDcffRHUM" 24 | 25 | # The price store program key (only for the cross-chain oracle) 26 | # key_store.pyth_price_store_program_key = "3m6sv6HGqEbuyLV84mD7rJn4MAC9LhUa1y1AUNVqcPfr" 27 | 28 | # Set the max price updates per transaction to 47 (only for the cross-chain oracle) 29 | # exporter.max_batch_size = 47 30 | 31 | # Duration of the interval at which to publish updates 32 | exporter.publish_interval_duration = "400ms" 33 | 34 | # Configuration for the JRPC API 35 | [pythd_adapter] 36 | 37 | # The duration of the interval at which `notify_price_sched` notifications will be sent. 38 | # Note that this doesn't affect the rate at which transactions are published: 39 | # this is soley a backwards-compatibility API feature. 40 | notify_price_sched_interval_duration = "400ms" 41 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/update_whitelist_admin.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import typing 3 | from solana.publickey import PublicKey 4 | from solana.transaction import TransactionInstruction, AccountMeta 5 | from anchorpy.borsh_extension import BorshPubkey 6 | import borsh_construct as borsh 7 | from ..program_id import PROGRAM_ID 8 | 9 | 10 | class UpdateWhitelistAdminArgs(typing.TypedDict): 11 | new_admin: PublicKey 12 | 13 | 14 | layout = borsh.CStruct("new_admin" / BorshPubkey) 15 | UPDATE_WHITELIST_ADMIN_ACCOUNTS_WHITELIST = PublicKey.find_program_address( 16 | seeds=[b"message", b"whitelist"], 17 | program_id=PROGRAM_ID, 18 | )[0] 19 | 20 | 21 | class UpdateWhitelistAdminAccounts(typing.TypedDict): 22 | payer: PublicKey 23 | admin: PublicKey 24 | 25 | 26 | def update_whitelist_admin( 27 | args: UpdateWhitelistAdminArgs, 28 | accounts: UpdateWhitelistAdminAccounts, 29 | program_id: PublicKey = PROGRAM_ID, 30 | remaining_accounts: typing.Optional[typing.List[AccountMeta]] = None, 31 | ) -> TransactionInstruction: 32 | keys: list[AccountMeta] = [ 33 | AccountMeta(pubkey=accounts["payer"], is_signer=True, is_writable=True), 34 | AccountMeta(pubkey=accounts["admin"], is_signer=True, is_writable=False), 35 | AccountMeta( 36 | pubkey=UPDATE_WHITELIST_ADMIN_ACCOUNTS_WHITELIST, 37 | is_signer=False, 38 | is_writable=True, 39 | ), 40 | ] 41 | if remaining_accounts is not None: 42 | keys += remaining_accounts 43 | identifier = b"r\x1e\xf9\x9a\xb6\x17\x94b" 44 | encoded_args = layout.build( 45 | { 46 | "new_admin": args["new_admin"], 47 | } 48 | ) 49 | data = identifier + encoded_args 50 | return TransactionInstruction(keys, program_id, data) 51 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | # Note: 2 | # 3 | # This file provides a Flake for Nix/NixOs users. It allows users to 4 | # build or work with the project without having to worry about which 5 | # dependencies are required. It is not required to build the project 6 | # and can be ignored by users who do not use Nix/NixOs. 7 | # 8 | # See README for instructions on building the project. 9 | # 10 | # 11 | # Usage: 12 | # 13 | # ```bash 14 | # $ nix build # Build 15 | # $ nix develop # Instant Dev Environment 16 | # $ nix run . -- # Run pyth-agent without installing. 17 | # ``` 18 | # 19 | # You can still run `nix-shell` if you prefer to not use flakes. 20 | 21 | { 22 | description = "Pyth Agent"; 23 | nixConfig.bash-prompt = "\[nix@pyth-agent\]$ "; 24 | inputs.nixpkgs.url = "nixpkgs/nixos-unstable"; 25 | inputs.flake-utils.url = "github:numtide/flake-utils"; 26 | inputs.fenix.url = "github:nix-community/fenix"; 27 | inputs.fenix.inputs.nixpkgs.follows = "nixpkgs"; 28 | 29 | outputs = 30 | { self 31 | , nixpkgs 32 | , fenix 33 | , flake-utils 34 | }: 35 | 36 | # Generate a Flake Configuration for each supported system. 37 | flake-utils.lib.eachDefaultSystem (system: 38 | let 39 | pkgs = nixpkgs.legacyPackages.${system}; 40 | shell = import ./shell.nix { inherit pkgs; }; 41 | rust = pkgs.makeRustPlatform { 42 | inherit (fenix.packages.${system}.minimal) 43 | rustc 44 | cargo; 45 | }; 46 | 47 | in 48 | { 49 | devShells.default = shell; 50 | packages.default = rust.buildRustPackage { 51 | pname = "pyth-agent"; 52 | version = "0.0.1"; 53 | src = ./.; 54 | cargoLock = { lockFile = ./Cargo.lock; }; 55 | }; 56 | } 57 | ); 58 | } 59 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/delete_buffer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import typing 3 | from solana.publickey import PublicKey 4 | from solana.transaction import TransactionInstruction, AccountMeta 5 | from anchorpy.borsh_extension import BorshPubkey 6 | import borsh_construct as borsh 7 | from ..program_id import PROGRAM_ID 8 | 9 | 10 | class DeleteBufferArgs(typing.TypedDict): 11 | allowed_program_auth: PublicKey 12 | base_account_key: PublicKey 13 | buffer_bump: int 14 | 15 | 16 | layout = borsh.CStruct( 17 | "allowed_program_auth" / BorshPubkey, 18 | "base_account_key" / BorshPubkey, 19 | "buffer_bump" / borsh.U8, 20 | ) 21 | DELETE_BUFFER_ACCOUNTS_WHITELIST = PublicKey.find_program_address( 22 | seeds=[b"message", b"whitelist"], 23 | program_id=PROGRAM_ID, 24 | )[0] 25 | 26 | 27 | class DeleteBufferAccounts(typing.TypedDict): 28 | admin: PublicKey 29 | 30 | 31 | def delete_buffer( 32 | args: DeleteBufferArgs, 33 | accounts: DeleteBufferAccounts, 34 | program_id: PublicKey = PROGRAM_ID, 35 | remaining_accounts: typing.Optional[typing.List[AccountMeta]] = None, 36 | ) -> TransactionInstruction: 37 | keys: list[AccountMeta] = [ 38 | AccountMeta( 39 | pubkey=DELETE_BUFFER_ACCOUNTS_WHITELIST, is_signer=False, is_writable=False 40 | ), 41 | AccountMeta(pubkey=accounts["admin"], is_signer=True, is_writable=True), 42 | ] 43 | if remaining_accounts is not None: 44 | keys += remaining_accounts 45 | identifier = b"\xb8\xc2\xe5a\xdc\n\xe7Z" 46 | encoded_args = layout.build( 47 | { 48 | "allowed_program_auth": args["allowed_program_auth"], 49 | "base_account_key": args["base_account_key"], 50 | "buffer_bump": args["buffer_bump"], 51 | } 52 | ) 53 | data = identifier + encoded_args 54 | return TransactionInstruction(keys, program_id, data) 55 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/set_allowed_programs.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import typing 3 | from solana.publickey import PublicKey 4 | from solana.transaction import TransactionInstruction, AccountMeta 5 | from anchorpy.borsh_extension import BorshPubkey 6 | from construct import Construct 7 | import borsh_construct as borsh 8 | from ..program_id import PROGRAM_ID 9 | 10 | 11 | class SetAllowedProgramsArgs(typing.TypedDict): 12 | allowed_programs: list[PublicKey] 13 | 14 | 15 | layout = borsh.CStruct( 16 | "allowed_programs" / borsh.Vec(typing.cast(Construct, BorshPubkey)) 17 | ) 18 | SET_ALLOWED_PROGRAMS_ACCOUNTS_WHITELIST = PublicKey.find_program_address( 19 | seeds=[b"message", b"whitelist"], 20 | program_id=PROGRAM_ID, 21 | )[0] 22 | 23 | 24 | class SetAllowedProgramsAccounts(typing.TypedDict): 25 | payer: PublicKey 26 | admin: PublicKey 27 | 28 | 29 | def set_allowed_programs( 30 | args: SetAllowedProgramsArgs, 31 | accounts: SetAllowedProgramsAccounts, 32 | program_id: PublicKey = PROGRAM_ID, 33 | remaining_accounts: typing.Optional[typing.List[AccountMeta]] = None, 34 | ) -> TransactionInstruction: 35 | keys: list[AccountMeta] = [ 36 | AccountMeta(pubkey=accounts["payer"], is_signer=True, is_writable=True), 37 | AccountMeta(pubkey=accounts["admin"], is_signer=True, is_writable=False), 38 | AccountMeta( 39 | pubkey=SET_ALLOWED_PROGRAMS_ACCOUNTS_WHITELIST, 40 | is_signer=False, 41 | is_writable=True, 42 | ), 43 | ] 44 | if remaining_accounts is not None: 45 | keys += remaining_accounts 46 | identifier = b"\xf2\xf0\xb2\x03\xbc!\xa1\xb6" 47 | encoded_args = layout.build( 48 | { 49 | "allowed_programs": args["allowed_programs"], 50 | } 51 | ) 52 | data = identifier + encoded_args 53 | return TransactionInstruction(keys, program_id, data) 54 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/create_buffer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import typing 3 | from solana.publickey import PublicKey 4 | from solana.system_program import SYS_PROGRAM_ID 5 | from solana.transaction import TransactionInstruction, AccountMeta 6 | from anchorpy.borsh_extension import BorshPubkey 7 | import borsh_construct as borsh 8 | from ..program_id import PROGRAM_ID 9 | 10 | 11 | class CreateBufferArgs(typing.TypedDict): 12 | allowed_program_auth: PublicKey 13 | base_account_key: PublicKey 14 | target_size: int 15 | 16 | 17 | layout = borsh.CStruct( 18 | "allowed_program_auth" / BorshPubkey, 19 | "base_account_key" / BorshPubkey, 20 | "target_size" / borsh.U32, 21 | ) 22 | CREATE_BUFFER_ACCOUNTS_WHITELIST = PublicKey.find_program_address( 23 | seeds=[b"message", b"whitelist"], 24 | program_id=PROGRAM_ID, 25 | )[0] 26 | 27 | 28 | class CreateBufferAccounts(typing.TypedDict): 29 | admin: PublicKey 30 | 31 | 32 | def create_buffer( 33 | args: CreateBufferArgs, 34 | accounts: CreateBufferAccounts, 35 | program_id: PublicKey = PROGRAM_ID, 36 | remaining_accounts: typing.Optional[typing.List[AccountMeta]] = None, 37 | ) -> TransactionInstruction: 38 | keys: list[AccountMeta] = [ 39 | AccountMeta( 40 | pubkey=CREATE_BUFFER_ACCOUNTS_WHITELIST, is_signer=False, is_writable=False 41 | ), 42 | AccountMeta(pubkey=accounts["admin"], is_signer=True, is_writable=True), 43 | AccountMeta(pubkey=SYS_PROGRAM_ID, is_signer=False, is_writable=False), 44 | ] 45 | if remaining_accounts is not None: 46 | keys += remaining_accounts 47 | identifier = b"\xafLeJ\xe0\xf9h\xaa" 48 | encoded_args = layout.build( 49 | { 50 | "allowed_program_auth": args["allowed_program_auth"], 51 | "base_account_key": args["base_account_key"], 52 | "target_size": args["target_size"], 53 | } 54 | ) 55 | data = identifier + encoded_args 56 | return TransactionInstruction(keys, program_id, data) 57 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/put_all.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import typing 3 | from solana.publickey import PublicKey 4 | from solana.transaction import TransactionInstruction, AccountMeta 5 | from anchorpy.borsh_extension import BorshPubkey 6 | from construct import Construct 7 | import borsh_construct as borsh 8 | from ..program_id import PROGRAM_ID 9 | 10 | 11 | class PutAllArgs(typing.TypedDict): 12 | base_account_key: PublicKey 13 | messages: list[bytes] 14 | 15 | 16 | layout = borsh.CStruct( 17 | "base_account_key" / BorshPubkey, 18 | "messages" / borsh.Vec(typing.cast(Construct, borsh.Bytes)), 19 | ) 20 | WHITELIST_VERIFIER_NESTED_WHITELIST = PublicKey.find_program_address( 21 | seeds=[b"message", b"whitelist"], 22 | program_id=PROGRAM_ID, 23 | )[0] 24 | 25 | 26 | class PutAllAccounts(typing.TypedDict): 27 | whitelist_verifier: WhitelistVerifierNested 28 | 29 | 30 | class WhitelistVerifierNested(typing.TypedDict): 31 | cpi_caller_auth: PublicKey 32 | 33 | 34 | def put_all( 35 | args: PutAllArgs, 36 | accounts: PutAllAccounts, 37 | program_id: PublicKey = PROGRAM_ID, 38 | remaining_accounts: typing.Optional[typing.List[AccountMeta]] = None, 39 | ) -> TransactionInstruction: 40 | keys: list[AccountMeta] = [ 41 | AccountMeta( 42 | pubkey=WHITELIST_VERIFIER_NESTED_WHITELIST, 43 | is_signer=False, 44 | is_writable=False, 45 | ), 46 | AccountMeta( 47 | pubkey=accounts["whitelist_verifier"]["cpi_caller_auth"], 48 | is_signer=True, 49 | is_writable=False, 50 | ), 51 | ] 52 | if remaining_accounts is not None: 53 | keys += remaining_accounts 54 | identifier = b"\xd4\xe1\xc1[\x97\xee\x14]" 55 | encoded_args = layout.build( 56 | { 57 | "base_account_key": args["base_account_key"], 58 | "messages": args["messages"], 59 | } 60 | ) 61 | data = identifier + encoded_args 62 | return TransactionInstruction(keys, program_id, data) 63 | -------------------------------------------------------------------------------- /src/agent/state/keypairs.rs: -------------------------------------------------------------------------------- 1 | //! Keypair Management API 2 | //! 3 | //! The Keypair Manager allows hotloading keypairs via a HTTP request. 4 | 5 | use { 6 | super::State, 7 | crate::agent::solana::network::Network, 8 | anyhow::Result, 9 | solana_sdk::signature::Keypair, 10 | tokio::sync::RwLock, 11 | tracing::instrument, 12 | }; 13 | 14 | #[derive(Default)] 15 | pub struct KeypairState { 16 | primary_current_keypair: RwLock>, 17 | secondary_current_keypair: RwLock>, 18 | } 19 | 20 | #[async_trait::async_trait] 21 | pub trait Keypairs { 22 | async fn request_keypair(&self, network: Network) -> Result; 23 | async fn update_keypair(&self, network: Network, new_keypair: Keypair); 24 | } 25 | 26 | // Allow downcasting State into Keypairs for functions that depend on the `Keypairs` service. 27 | impl<'a> From<&'a State> for &'a KeypairState { 28 | fn from(state: &'a State) -> &'a KeypairState { 29 | &state.keypairs 30 | } 31 | } 32 | 33 | #[async_trait::async_trait] 34 | impl Keypairs for T 35 | where 36 | for<'a> &'a T: Into<&'a KeypairState>, 37 | T: Sync, 38 | { 39 | #[instrument(skip(self))] 40 | async fn request_keypair(&self, network: Network) -> Result { 41 | let keypair = match network { 42 | Network::Primary => &self.into().primary_current_keypair, 43 | Network::Secondary => &self.into().secondary_current_keypair, 44 | } 45 | .read() 46 | .await; 47 | 48 | Ok(Keypair::from_bytes( 49 | &keypair 50 | .as_ref() 51 | .ok_or_else(|| anyhow::anyhow!("Keypair not available"))? 52 | .to_bytes(), 53 | )?) 54 | } 55 | 56 | #[instrument(skip(self, new_keypair))] 57 | async fn update_keypair(&self, network: Network, new_keypair: Keypair) { 58 | *match network { 59 | Network::Primary => self.into().primary_current_keypair.write().await, 60 | Network::Secondary => self.into().secondary_current_keypair.write().await, 61 | } = Some(new_keypair); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/instructions/resize_buffer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import typing 3 | from solana.publickey import PublicKey 4 | from solana.system_program import SYS_PROGRAM_ID 5 | from solana.transaction import TransactionInstruction, AccountMeta 6 | from anchorpy.borsh_extension import BorshPubkey 7 | import borsh_construct as borsh 8 | from ..program_id import PROGRAM_ID 9 | 10 | 11 | class ResizeBufferArgs(typing.TypedDict): 12 | allowed_program_auth: PublicKey 13 | base_account_key: PublicKey 14 | buffer_bump: int 15 | target_size: int 16 | 17 | 18 | layout = borsh.CStruct( 19 | "allowed_program_auth" / BorshPubkey, 20 | "base_account_key" / BorshPubkey, 21 | "buffer_bump" / borsh.U8, 22 | "target_size" / borsh.U32, 23 | ) 24 | RESIZE_BUFFER_ACCOUNTS_WHITELIST = PublicKey.find_program_address( 25 | seeds=[b"message", b"whitelist"], 26 | program_id=PROGRAM_ID, 27 | )[0] 28 | 29 | 30 | class ResizeBufferAccounts(typing.TypedDict): 31 | admin: PublicKey 32 | 33 | 34 | def resize_buffer( 35 | args: ResizeBufferArgs, 36 | accounts: ResizeBufferAccounts, 37 | program_id: PublicKey = PROGRAM_ID, 38 | remaining_accounts: typing.Optional[typing.List[AccountMeta]] = None, 39 | ) -> TransactionInstruction: 40 | keys: list[AccountMeta] = [ 41 | AccountMeta( 42 | pubkey=RESIZE_BUFFER_ACCOUNTS_WHITELIST, is_signer=False, is_writable=False 43 | ), 44 | AccountMeta(pubkey=accounts["admin"], is_signer=True, is_writable=True), 45 | AccountMeta(pubkey=SYS_PROGRAM_ID, is_signer=False, is_writable=False), 46 | ] 47 | if remaining_accounts is not None: 48 | keys += remaining_accounts 49 | identifier = b"\x05\xb5\x15\xbf\xd8N\xf5/" 50 | encoded_args = layout.build( 51 | { 52 | "allowed_program_auth": args["allowed_program_auth"], 53 | "base_account_key": args["base_account_key"], 54 | "buffer_bump": args["buffer_bump"], 55 | "target_size": args["target_size"], 56 | } 57 | ) 58 | data = identifier + encoded_args 59 | return TransactionInstruction(keys, program_id, data) 60 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "fenix": { 4 | "inputs": { 5 | "nixpkgs": [ 6 | "nixpkgs" 7 | ], 8 | "rust-analyzer-src": "rust-analyzer-src" 9 | }, 10 | "locked": { 11 | "lastModified": 1662447736, 12 | "narHash": "sha256-HMnv/9V5JekVlBCdb4k9CzhW3UHJ1fi41bI5Kwi19EE=", 13 | "owner": "nix-community", 14 | "repo": "fenix", 15 | "rev": "1bdc5042356c21bdf8075dd63f291cf77bb77b92", 16 | "type": "github" 17 | }, 18 | "original": { 19 | "owner": "nix-community", 20 | "repo": "fenix", 21 | "type": "github" 22 | } 23 | }, 24 | "flake-utils": { 25 | "locked": { 26 | "lastModified": 1659877975, 27 | "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=", 28 | "owner": "numtide", 29 | "repo": "flake-utils", 30 | "rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0", 31 | "type": "github" 32 | }, 33 | "original": { 34 | "owner": "numtide", 35 | "repo": "flake-utils", 36 | "type": "github" 37 | } 38 | }, 39 | "nixpkgs": { 40 | "locked": { 41 | "lastModified": 1662019588, 42 | "narHash": "sha256-oPEjHKGGVbBXqwwL+UjsveJzghWiWV0n9ogo1X6l4cw=", 43 | "owner": "NixOS", 44 | "repo": "nixpkgs", 45 | "rev": "2da64a81275b68fdad38af669afeda43d401e94b", 46 | "type": "github" 47 | }, 48 | "original": { 49 | "id": "nixpkgs", 50 | "ref": "nixos-unstable", 51 | "type": "indirect" 52 | } 53 | }, 54 | "root": { 55 | "inputs": { 56 | "fenix": "fenix", 57 | "flake-utils": "flake-utils", 58 | "nixpkgs": "nixpkgs" 59 | } 60 | }, 61 | "rust-analyzer-src": { 62 | "flake": false, 63 | "locked": { 64 | "lastModified": 1662377094, 65 | "narHash": "sha256-0bvOQxEe8nzk/VlhHBrUn/Mz3DlE92Us7JqveVjTe0A=", 66 | "owner": "rust-lang", 67 | "repo": "rust-analyzer", 68 | "rev": "6dfd8aebdfa1ee1824446f01daf5bdb229b32f92", 69 | "type": "github" 70 | }, 71 | "original": { 72 | "owner": "rust-lang", 73 | "ref": "nightly", 74 | "repo": "rust-analyzer", 75 | "type": "github" 76 | } 77 | } 78 | }, 79 | "root": "root", 80 | "version": 7 81 | } 82 | -------------------------------------------------------------------------------- /src/agent/pyth.rs: -------------------------------------------------------------------------------- 1 | use { 2 | serde::{ 3 | Deserialize, 4 | Serialize, 5 | }, 6 | smol_str::SmolStr, 7 | std::{ 8 | collections::BTreeMap, 9 | sync::Arc, 10 | }, 11 | }; 12 | 13 | pub mod rpc; 14 | 15 | pub type Pubkey = SmolStr; 16 | pub type Attrs = BTreeMap; 17 | 18 | pub type Price = i64; 19 | pub type Exponent = i64; 20 | pub type Conf = u64; 21 | pub type Slot = u64; 22 | 23 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 24 | pub struct ProductAccountMetadata { 25 | pub account: Pubkey, 26 | pub attr_dict: Attrs, 27 | pub price: Arc<[PriceAccountMetadata]>, 28 | } 29 | 30 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 31 | pub struct PriceAccountMetadata { 32 | pub account: Pubkey, 33 | pub price_type: SmolStr, 34 | pub price_exponent: Exponent, 35 | } 36 | 37 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 38 | pub struct ProductAccount { 39 | pub account: Pubkey, 40 | pub attr_dict: Attrs, 41 | pub price_accounts: Arc<[PriceAccount]>, 42 | } 43 | 44 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 45 | pub struct PriceAccount { 46 | pub account: Pubkey, 47 | pub price_type: SmolStr, 48 | pub price_exponent: Exponent, 49 | pub status: SmolStr, 50 | pub price: Price, 51 | pub conf: Conf, 52 | pub twap: Price, 53 | pub twac: Price, 54 | pub valid_slot: Slot, 55 | pub pub_slot: Slot, 56 | pub prev_slot: Slot, 57 | pub prev_price: Price, 58 | pub prev_conf: Conf, 59 | pub publisher_accounts: Arc<[PublisherAccount]>, 60 | } 61 | 62 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 63 | pub struct PublisherAccount { 64 | pub account: Pubkey, 65 | pub status: SmolStr, 66 | pub price: Price, 67 | pub conf: Conf, 68 | pub slot: Slot, 69 | } 70 | 71 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 72 | pub struct NotifyPrice { 73 | pub subscription: SubscriptionID, 74 | pub result: PriceUpdate, 75 | } 76 | 77 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 78 | pub struct NotifyPriceSched { 79 | pub subscription: SubscriptionID, 80 | } 81 | 82 | pub type SubscriptionID = i64; 83 | 84 | #[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, PartialEq, Eq)] 85 | pub struct PriceUpdate { 86 | pub price: Price, 87 | pub conf: Conf, 88 | pub status: SmolStr, 89 | pub valid_slot: Slot, 90 | pub pub_slot: Slot, 91 | } 92 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pyth-agent" 3 | version = "3.0.6" 4 | edition = "2024" 5 | 6 | [[bin]] 7 | name = "agent" 8 | path = "src/bin/agent.rs" 9 | 10 | [dependencies] 11 | anyhow = "1.0.81" 12 | backoff = "0.4.0" 13 | base64 = "0.22.1" 14 | ed25519-dalek = "2.1.1" 15 | serde = { version = "1.0.197", features = ["derive", "rc"] } 16 | async-trait = "0.1.79" 17 | warp = { version = "0.3.6", features = ["websocket"] } 18 | tokio = { version = "1.37.0", features = ["full"] } 19 | tokio-stream = "0.1.15" 20 | futures = { version = "0.3.30" } 21 | futures-util = { version = "0.3.30", default-features = false, features = [ 22 | "sink", 23 | ] } 24 | jrpc = "0.4.1" 25 | serde_json = "1.0.115" 26 | chrono = "0.4.37" 27 | chrono-tz = "0.10.3" 28 | pyth-sdk = "0.8.0" 29 | pyth-sdk-solana = "0.10.4" 30 | solana-account-decoder = "2.2.1" 31 | solana-client = "2.2.1" 32 | solana-pubkey = "2.2.1" 33 | solana-sdk = "2.2.1" 34 | solana-transaction-status = "2.2.1" 35 | bincode = { version = "2.0.1", features = ["serde"] } 36 | config = "0.15.11" 37 | thiserror = "2.0.12" 38 | clap = { version = "4.5.4", features = ["derive"] } 39 | humantime-serde = "1.1.1" 40 | serde-this-or-that = "0.5.0" 41 | prometheus-client = "0.23.1" 42 | lazy_static = "1.4.0" 43 | winnow = "0.7.7" 44 | proptest = "1.4.0" 45 | reqwest = { version = "0.12.0", features = ["json"] } 46 | smol_str = { version="0.3.2", features=["serde"] } 47 | tracing = { version = "0.1.40", features = ["log"] } 48 | tracing-subscriber = { version = "0.3.18", features = ["env-filter", "json"] } 49 | tracing-opentelemetry = "0.24.0" 50 | opentelemetry = "0.23.0" 51 | opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] } 52 | opentelemetry-otlp = { version = "0.16.0" } 53 | protobuf = "3.7.2" 54 | pyth-price-store = "0.1.0" 55 | bytemuck = "1.13.0" 56 | tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] } 57 | http = "1.3.1" 58 | url = { version = "2.5.4", features = ["serde"] } 59 | pyth-lazer-publisher-sdk = "0.1.5" 60 | 61 | [dev-dependencies] 62 | tempfile = "3.20.0" 63 | 64 | [profile.release] 65 | panic = 'abort' 66 | 67 | [profile.dev] 68 | panic = 'abort' 69 | 70 | [lints.rust] 71 | unsafe_code = "deny" 72 | 73 | [lints.clippy] 74 | wildcard_dependencies = "deny" 75 | 76 | collapsible_if = "allow" 77 | collapsible_else_if = "allow" 78 | 79 | allow_attributes_without_reason = "warn" 80 | 81 | # Panics 82 | expect_used = "warn" 83 | fallible_impl_from = "warn" 84 | indexing_slicing = "warn" 85 | panic = "warn" 86 | panic_in_result_fn = "warn" 87 | string_slice = "warn" 88 | todo = "warn" 89 | unchecked_duration_subtraction = "warn" 90 | unreachable = "warn" 91 | unwrap_in_result = "warn" 92 | unwrap_used = "warn" 93 | 94 | # Correctness 95 | cast_lossless = "warn" 96 | cast_possible_truncation = "warn" 97 | cast_possible_wrap = "warn" 98 | cast_sign_loss = "warn" 99 | collection_is_never_read = "warn" 100 | match_wild_err_arm = "warn" 101 | path_buf_push_overwrite = "warn" 102 | read_zero_byte_vec = "warn" 103 | same_name_method = "warn" 104 | suspicious_operation_groupings = "warn" 105 | suspicious_xor_used_as_pow = "warn" 106 | unused_self = "warn" 107 | used_underscore_binding = "warn" 108 | while_float = "warn" 109 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/accounts/message_buffer.py: -------------------------------------------------------------------------------- 1 | import typing 2 | from dataclasses import dataclass 3 | from solana.publickey import PublicKey 4 | from solana.rpc.async_api import AsyncClient 5 | from solana.rpc.commitment import Commitment 6 | import borsh_construct as borsh 7 | from anchorpy.coder.accounts import ACCOUNT_DISCRIMINATOR_SIZE 8 | from anchorpy.error import AccountInvalidDiscriminator 9 | from anchorpy.utils.rpc import get_multiple_accounts 10 | from ..program_id import PROGRAM_ID 11 | 12 | 13 | class MessageBufferJSON(typing.TypedDict): 14 | bump: int 15 | version: int 16 | header_len: int 17 | end_offsets: list[int] 18 | 19 | 20 | @dataclass 21 | class MessageBuffer: 22 | discriminator: typing.ClassVar = b"\x19\xf4\x03\x05\xe1\xa5\x1d\xfa" 23 | layout: typing.ClassVar = borsh.CStruct( 24 | "bump" / borsh.U8, 25 | "version" / borsh.U8, 26 | "header_len" / borsh.U16, 27 | "end_offsets" / borsh.U16[255], 28 | ) 29 | bump: int 30 | version: int 31 | header_len: int 32 | end_offsets: list[int] 33 | 34 | @classmethod 35 | async def fetch( 36 | cls, 37 | conn: AsyncClient, 38 | address: PublicKey, 39 | commitment: typing.Optional[Commitment] = None, 40 | program_id: PublicKey = PROGRAM_ID, 41 | ) -> typing.Optional["MessageBuffer"]: 42 | resp = await conn.get_account_info(address, commitment=commitment) 43 | info = resp.value 44 | if info is None: 45 | return None 46 | if info.owner != program_id.to_solders(): 47 | raise ValueError("Account does not belong to this program") 48 | bytes_data = info.data 49 | return cls.decode(bytes_data) 50 | 51 | @classmethod 52 | async def fetch_multiple( 53 | cls, 54 | conn: AsyncClient, 55 | addresses: list[PublicKey], 56 | commitment: typing.Optional[Commitment] = None, 57 | program_id: PublicKey = PROGRAM_ID, 58 | ) -> typing.List[typing.Optional["MessageBuffer"]]: 59 | infos = await get_multiple_accounts(conn, addresses, commitment=commitment) 60 | res: typing.List[typing.Optional["MessageBuffer"]] = [] 61 | for info in infos: 62 | if info is None: 63 | res.append(None) 64 | continue 65 | if info.account.owner != program_id: 66 | raise ValueError("Account does not belong to this program") 67 | res.append(cls.decode(info.account.data)) 68 | return res 69 | 70 | @classmethod 71 | def decode(cls, data: bytes) -> "MessageBuffer": 72 | if data[:ACCOUNT_DISCRIMINATOR_SIZE] != cls.discriminator: 73 | raise AccountInvalidDiscriminator( 74 | "The discriminator for this account is invalid" 75 | ) 76 | dec = MessageBuffer.layout.parse(data[ACCOUNT_DISCRIMINATOR_SIZE:]) 77 | return cls( 78 | bump=dec.bump, 79 | version=dec.version, 80 | header_len=dec.header_len, 81 | end_offsets=dec.end_offsets, 82 | ) 83 | 84 | def to_json(self) -> MessageBufferJSON: 85 | return { 86 | "bump": self.bump, 87 | "version": self.version, 88 | "header_len": self.header_len, 89 | "end_offsets": self.end_offsets, 90 | } 91 | 92 | @classmethod 93 | def from_json(cls, obj: MessageBufferJSON) -> "MessageBuffer": 94 | return cls( 95 | bump=obj["bump"], 96 | version=obj["version"], 97 | header_len=obj["header_len"], 98 | end_offsets=obj["end_offsets"], 99 | ) 100 | -------------------------------------------------------------------------------- /src/agent/config.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | metrics, 4 | pyth, 5 | services, 6 | solana::network, 7 | state, 8 | }, 9 | anyhow::Result, 10 | config as config_rs, 11 | config_rs::{ 12 | Environment, 13 | File, 14 | }, 15 | serde::Deserialize, 16 | std::{ 17 | path::Path, 18 | time::Duration, 19 | }, 20 | }; 21 | 22 | /// Configuration for all components of the Agent 23 | #[derive(Deserialize, Debug)] 24 | pub struct Config { 25 | #[serde(default)] 26 | pub channel_capacities: ChannelCapacities, 27 | pub primary_network: network::Config, 28 | pub secondary_network: Option, 29 | #[serde(default)] 30 | #[serde(rename = "pythd_adapter")] 31 | pub state: state::Config, 32 | #[serde(default)] 33 | pub pythd_api_server: pyth::rpc::Config, 34 | #[serde(default)] 35 | pub metrics_server: metrics::Config, 36 | #[serde(default)] 37 | pub remote_keypair_loader: services::keypairs::Config, 38 | pub opentelemetry: Option, 39 | pub pyth_lazer: Option, 40 | } 41 | 42 | impl Config { 43 | pub fn new(config_file: impl AsRef) -> Result { 44 | // Build a new configuration object, allowing the default values to be 45 | // overridden by those in the config_file or "AGENT_"-prefixed environment 46 | // variables. 47 | config_rs::Config::builder() 48 | .add_source(File::from(config_file.as_ref())) 49 | .add_source(Environment::with_prefix("agent")) 50 | .build()? 51 | .try_deserialize() 52 | .map_err(|e| e.into()) 53 | } 54 | } 55 | 56 | /// Capacities of the channels top-level components use to communicate 57 | #[derive(Deserialize, Debug)] 58 | pub struct ChannelCapacities { 59 | /// Capacity of the channel used to broadcast shutdown events to all components 60 | pub shutdown: usize, 61 | /// Capacity of the channel used to send updates from the primary Oracle to the Global Store 62 | pub primary_oracle_updates: usize, 63 | /// Capacity of the channel used to send updates from the secondary Oracle to the Global Store 64 | pub secondary_oracle_updates: usize, 65 | /// Capacity of the channel the Pythd API Adapter uses to send lookup requests to the Global Store 66 | pub global_store_lookup: usize, 67 | /// Capacity of the channel the Pythd API Adapter uses to communicate with the Local Store 68 | pub local_store_lookup: usize, 69 | /// Capacity of the channel on which the Local Store receives messages 70 | pub local_store: usize, 71 | /// Capacity of the channel on which the Pythd API Adapter receives messages 72 | pub pythd_adapter: usize, 73 | /// Capacity of the slog logging channel. Adjust this value if you see complaints about channel capacity from slog 74 | pub logger_buffer: usize, 75 | } 76 | 77 | impl Default for ChannelCapacities { 78 | fn default() -> Self { 79 | Self { 80 | shutdown: 10000, 81 | primary_oracle_updates: 10000, 82 | secondary_oracle_updates: 10000, 83 | global_store_lookup: 10000, 84 | local_store_lookup: 10000, 85 | local_store: 10000, 86 | pythd_adapter: 10000, 87 | logger_buffer: 10000, 88 | } 89 | } 90 | } 91 | 92 | 93 | #[derive(Deserialize, Debug)] 94 | pub struct OpenTelemetryConfig { 95 | #[serde(with = "humantime_serde")] 96 | pub exporter_timeout_duration: Duration, 97 | pub exporter_endpoint: String, 98 | } 99 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/accounts/whitelist.py: -------------------------------------------------------------------------------- 1 | import typing 2 | from dataclasses import dataclass 3 | from construct import Construct 4 | from solana.publickey import PublicKey 5 | from solana.rpc.async_api import AsyncClient 6 | from solana.rpc.commitment import Commitment 7 | import borsh_construct as borsh 8 | from anchorpy.coder.accounts import ACCOUNT_DISCRIMINATOR_SIZE 9 | from anchorpy.error import AccountInvalidDiscriminator 10 | from anchorpy.utils.rpc import get_multiple_accounts 11 | from anchorpy.borsh_extension import BorshPubkey 12 | from ..program_id import PROGRAM_ID 13 | 14 | 15 | class WhitelistJSON(typing.TypedDict): 16 | bump: int 17 | admin: str 18 | allowed_programs: list[str] 19 | 20 | 21 | @dataclass 22 | class Whitelist: 23 | discriminator: typing.ClassVar = b"\xcc\xb04O\x92y6\xf7" 24 | layout: typing.ClassVar = borsh.CStruct( 25 | "bump" / borsh.U8, 26 | "admin" / BorshPubkey, 27 | "allowed_programs" / borsh.Vec(typing.cast(Construct, BorshPubkey)), 28 | ) 29 | bump: int 30 | admin: PublicKey 31 | allowed_programs: list[PublicKey] 32 | 33 | @classmethod 34 | async def fetch( 35 | cls, 36 | conn: AsyncClient, 37 | address: PublicKey, 38 | commitment: typing.Optional[Commitment] = None, 39 | program_id: PublicKey = PROGRAM_ID, 40 | ) -> typing.Optional["Whitelist"]: 41 | resp = await conn.get_account_info(address, commitment=commitment) 42 | info = resp.value 43 | if info is None: 44 | return None 45 | if info.owner != program_id.to_solders(): 46 | raise ValueError("Account does not belong to this program") 47 | bytes_data = info.data 48 | return cls.decode(bytes_data) 49 | 50 | @classmethod 51 | async def fetch_multiple( 52 | cls, 53 | conn: AsyncClient, 54 | addresses: list[PublicKey], 55 | commitment: typing.Optional[Commitment] = None, 56 | program_id: PublicKey = PROGRAM_ID, 57 | ) -> typing.List[typing.Optional["Whitelist"]]: 58 | infos = await get_multiple_accounts(conn, addresses, commitment=commitment) 59 | res: typing.List[typing.Optional["Whitelist"]] = [] 60 | for info in infos: 61 | if info is None: 62 | res.append(None) 63 | continue 64 | if info.account.owner != program_id: 65 | raise ValueError("Account does not belong to this program") 66 | res.append(cls.decode(info.account.data)) 67 | return res 68 | 69 | @classmethod 70 | def decode(cls, data: bytes) -> "Whitelist": 71 | if data[:ACCOUNT_DISCRIMINATOR_SIZE] != cls.discriminator: 72 | raise AccountInvalidDiscriminator( 73 | "The discriminator for this account is invalid" 74 | ) 75 | dec = Whitelist.layout.parse(data[ACCOUNT_DISCRIMINATOR_SIZE:]) 76 | return cls( 77 | bump=dec.bump, 78 | admin=dec.admin, 79 | allowed_programs=dec.allowed_programs, 80 | ) 81 | 82 | def to_json(self) -> WhitelistJSON: 83 | return { 84 | "bump": self.bump, 85 | "admin": str(self.admin), 86 | "allowed_programs": list( 87 | map(lambda item: str(item), self.allowed_programs) 88 | ), 89 | } 90 | 91 | @classmethod 92 | def from_json(cls, obj: WhitelistJSON) -> "Whitelist": 93 | return cls( 94 | bump=obj["bump"], 95 | admin=PublicKey(obj["admin"]), 96 | allowed_programs=list( 97 | map(lambda item: PublicKey(item), obj["allowed_programs"]) 98 | ), 99 | ) 100 | -------------------------------------------------------------------------------- /src/agent/state/local.rs: -------------------------------------------------------------------------------- 1 | // The Local Store stores a copy of all the price information this local publisher 2 | // is contributing to the network. The Exporters will then take this data and publish 3 | // it to the networks. 4 | use { 5 | super::State, 6 | crate::agent::metrics::PriceLocalMetrics, 7 | anyhow::{ 8 | Result, 9 | anyhow, 10 | }, 11 | chrono::NaiveDateTime, 12 | prometheus_client::registry::Registry, 13 | pyth_sdk_solana::state::PriceStatus, 14 | solana_sdk::bs58, 15 | std::collections::HashMap, 16 | tokio::sync::RwLock, 17 | }; 18 | 19 | #[derive(Copy, Clone, Debug)] 20 | pub struct PriceInfo { 21 | pub status: PriceStatus, 22 | pub price: i64, 23 | pub conf: u64, 24 | pub timestamp: NaiveDateTime, 25 | } 26 | 27 | impl PriceInfo { 28 | /// Returns false if any non-timestamp fields differ with `other`. Used for last published state comparison in exporter. 29 | pub fn cmp_no_timestamp(&self, other: &Self) -> bool { 30 | // Prevent forgetting to use a new field if we expand the type. 31 | #[deny(unused_variables)] 32 | let Self { 33 | status, 34 | price, 35 | conf, 36 | timestamp: _, 37 | } = self; 38 | 39 | status == &other.status && price == &other.price && conf == &other.conf 40 | } 41 | } 42 | 43 | pub struct Store { 44 | prices: RwLock>, 45 | metrics: PriceLocalMetrics, 46 | } 47 | 48 | impl Store { 49 | pub fn new(registry: &mut Registry) -> Self { 50 | Store { 51 | prices: RwLock::new(HashMap::new()), 52 | metrics: PriceLocalMetrics::new(registry), 53 | } 54 | } 55 | } 56 | 57 | #[async_trait::async_trait] 58 | pub trait LocalStore { 59 | async fn update( 60 | &self, 61 | price_identifier: pyth_sdk::Identifier, 62 | price_info: PriceInfo, 63 | ) -> Result<()>; 64 | async fn get_all_price_infos(&self) -> HashMap; 65 | } 66 | 67 | // Allow downcasting State into GlobalStore for functions that depend on the `GlobalStore` service. 68 | impl<'a> From<&'a State> for &'a Store { 69 | fn from(state: &'a State) -> &'a Store { 70 | &state.local_store 71 | } 72 | } 73 | 74 | #[async_trait::async_trait] 75 | impl LocalStore for T 76 | where 77 | for<'a> &'a T: Into<&'a Store>, 78 | T: Sync, 79 | { 80 | async fn update( 81 | &self, 82 | price_identifier: pyth_sdk::Identifier, 83 | price_info: PriceInfo, 84 | ) -> Result<()> { 85 | tracing::debug!( 86 | identifier = bs58::encode(price_identifier.to_bytes()).into_string(), 87 | price_update = ?price_info, 88 | "Local store received price update." 89 | ); 90 | 91 | // Drop the update if it is older than the current one stored for the price 92 | if let Some(current_price_info) = self.into().prices.read().await.get(&price_identifier) { 93 | if current_price_info.timestamp > price_info.timestamp { 94 | return Err(anyhow!( 95 | "Received stale timestamp for price {}", 96 | price_identifier 97 | )); 98 | } 99 | } 100 | 101 | self.into().metrics.update(&price_identifier, &price_info); 102 | self.into() 103 | .prices 104 | .write() 105 | .await 106 | .insert(price_identifier, price_info); 107 | 108 | Ok(()) 109 | } 110 | 111 | async fn get_all_price_infos(&self) -> HashMap { 112 | self.into().prices.read().await.clone() 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/bin/agent.rs: -------------------------------------------------------------------------------- 1 | use { 2 | anyhow::{ 3 | Context, 4 | Result, 5 | anyhow, 6 | }, 7 | clap::Parser, 8 | opentelemetry::KeyValue, 9 | opentelemetry_otlp::WithExportConfig, 10 | pyth_agent::agent::{ 11 | Agent, 12 | config::Config, 13 | }, 14 | std::{ 15 | io::IsTerminal, 16 | path::PathBuf, 17 | }, 18 | tracing_subscriber::{ 19 | EnvFilter, 20 | prelude::*, 21 | }, 22 | }; 23 | 24 | #[derive(Parser, Debug)] 25 | #[clap(author = "Pyth Data Association", version)] 26 | /// Pyth Agent - publish data to the Pyth Network 27 | struct Arguments { 28 | #[clap(short, long, default_value = "config/config.toml")] 29 | /// Path to configuration file 30 | config: PathBuf, 31 | 32 | #[clap(short = 'L', long)] 33 | /// Whether to print file:line info for each log statement 34 | log_locations: bool, 35 | } 36 | 37 | #[tokio::main] 38 | async fn main() -> Result<()> { 39 | let args = Arguments::parse(); 40 | 41 | if !args.config.as_path().exists() { 42 | return Err(anyhow!("No config found under {:?}", args.config.to_str())); 43 | } 44 | 45 | println!("Loading config from {:?}", args.config.display()); 46 | 47 | // Parse config early for logging channel capacity 48 | let config = Config::new(args.config).context("Could not parse config")?; 49 | 50 | let env_filter = EnvFilter::from_default_env(); 51 | 52 | // Initialize a Tracing Subscriber 53 | let fmt_layer = tracing_subscriber::fmt::layer() 54 | .with_file(false) 55 | .with_line_number(true) 56 | .with_thread_ids(true) 57 | .with_ansi(std::io::stderr().is_terminal()); 58 | 59 | let mut layers = Vec::new(); 60 | 61 | // Set up OpenTelemetry only if it's configured 62 | if let Some(opentelemetry_config) = &config.opentelemetry { 63 | // Set up the OpenTelemetry exporter 64 | let otlp_exporter = opentelemetry_otlp::new_exporter() 65 | .tonic() 66 | .with_endpoint(&opentelemetry_config.exporter_endpoint) 67 | .with_timeout(opentelemetry_config.exporter_timeout_duration); 68 | 69 | // Set up the OpenTelemetry tracer 70 | let tracer = opentelemetry_otlp::new_pipeline() 71 | .tracing() 72 | .with_exporter(otlp_exporter) 73 | .with_trace_config(opentelemetry_sdk::trace::config().with_resource( 74 | opentelemetry_sdk::Resource::new(vec![KeyValue::new("service.name", "pyth-agent")]), 75 | )) 76 | .install_batch(opentelemetry_sdk::runtime::Tokio) 77 | .map_err(|e| anyhow::anyhow!("Error initializing open telemetry: {}", e))?; 78 | 79 | // Set up the telemetry layer 80 | let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); 81 | layers.push(telemetry.boxed()); 82 | } 83 | // Use the compact formatter if we're in a terminal, otherwise use the JSON formatter. 84 | if std::io::stderr().is_terminal() { 85 | layers.push(fmt_layer.compact().boxed()); 86 | } else { 87 | layers.push(fmt_layer.json().boxed()); 88 | } 89 | 90 | tracing_subscriber::registry() 91 | .with(env_filter) 92 | .with(layers) 93 | .init(); 94 | 95 | // Launch the application. If it fails, print the full backtrace and exit. RUST_BACKTRACE 96 | // should be set to 1 for this otherwise it will only print the top-level error. 97 | if let Err(err) = start(config).await { 98 | eprintln!("{}", err.backtrace()); 99 | err.chain().for_each(|cause| eprintln!("{cause}")); 100 | return Err(err); 101 | } 102 | 103 | Ok(()) 104 | } 105 | 106 | async fn start(config: Config) -> Result<()> { 107 | Agent::new(config).start().await; 108 | Ok(()) 109 | } 110 | -------------------------------------------------------------------------------- /src/agent/state/transactions.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::State, 3 | crate::agent::utils::rpc_multi_client::RpcMultiClient, 4 | anyhow::Result, 5 | solana_sdk::{ 6 | commitment_config::CommitmentConfig, 7 | signature::Signature, 8 | }, 9 | std::collections::VecDeque, 10 | tokio::sync::RwLock, 11 | tracing::instrument, 12 | }; 13 | 14 | #[derive(Default)] 15 | pub struct TransactionsState { 16 | sent_transactions: RwLock>, 17 | max_transactions: usize, 18 | } 19 | 20 | impl TransactionsState { 21 | pub fn new(max_transactions: usize) -> Self { 22 | Self { 23 | sent_transactions: Default::default(), 24 | max_transactions, 25 | } 26 | } 27 | } 28 | 29 | #[async_trait::async_trait] 30 | pub trait Transactions { 31 | async fn add_transaction(&self, signature: Signature); 32 | async fn poll_transactions_status(&self, rpc_multi_client: &RpcMultiClient) -> Result<()>; 33 | } 34 | 35 | /// Allow downcasting State into TransactionsState for functions that depend on the `Transactions` service. 36 | impl<'a> From<&'a State> for &'a TransactionsState { 37 | fn from(state: &'a State) -> &'a TransactionsState { 38 | &state.transactions 39 | } 40 | } 41 | 42 | #[async_trait::async_trait] 43 | impl Transactions for T 44 | where 45 | for<'a> &'a T: Into<&'a TransactionsState>, 46 | T: Sync + Send + 'static, 47 | { 48 | #[instrument(skip(self))] 49 | async fn add_transaction(&self, signature: Signature) { 50 | tracing::debug!( 51 | signature = signature.to_string(), 52 | "Monitoring new transaction.", 53 | ); 54 | 55 | // Add the new transaction to the list 56 | let mut txs = self.into().sent_transactions.write().await; 57 | txs.push_back(signature); 58 | 59 | // Pop off the oldest transaction if necessary 60 | if txs.len() > self.into().max_transactions { 61 | txs.pop_front(); 62 | } 63 | } 64 | 65 | #[instrument(skip(self, rpc_multi_client))] 66 | async fn poll_transactions_status(&self, rpc_multi_client: &RpcMultiClient) -> Result<()> { 67 | let mut txs = self.into().sent_transactions.write().await; 68 | if txs.is_empty() { 69 | return Ok(()); 70 | } 71 | 72 | let signatures_contiguous = txs.make_contiguous(); 73 | 74 | // Poll the status of each transaction, in a single RPC request 75 | let statuses = rpc_multi_client 76 | .get_signature_statuses(signatures_contiguous) 77 | .await?; 78 | 79 | tracing::debug!( 80 | statuses = ?statuses, 81 | "Processing Signature Statuses", 82 | ); 83 | 84 | // Determine the percentage of the recently sent transactions that have successfully been committed 85 | // TODO: expose as metric 86 | let confirmed = statuses 87 | .into_iter() 88 | .zip(signatures_contiguous) 89 | .filter_map(|(status, sig)| status.map(|some_status| (some_status, sig))) 90 | .filter(|(status, sig)| { 91 | if let Some(err) = status.err.as_ref() { 92 | tracing::warn!( 93 | error = err.to_string(), 94 | tx_signature = sig.to_string(), 95 | "TX status has err value", 96 | ); 97 | } 98 | 99 | status.satisfies_commitment(CommitmentConfig::confirmed()) 100 | }) 101 | .count(); 102 | 103 | let percentage_confirmed = ((confirmed as f64) / (txs.len() as f64)) * 100.0; 104 | 105 | tracing::info!( 106 | percentage_confirmed = format!("{:.}", percentage_confirmed), 107 | "monitoring transaction hit rate", 108 | ); 109 | 110 | Ok(()) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /integration-tests/message_buffer_client_codegen/errors/custom.py: -------------------------------------------------------------------------------- 1 | import typing 2 | from anchorpy.error import ProgramError 3 | 4 | 5 | class CallerNotAllowed(ProgramError): 6 | def __init__(self) -> None: 7 | super().__init__(6000, "CPI Caller not allowed") 8 | 9 | code = 6000 10 | name = "CallerNotAllowed" 11 | msg = "CPI Caller not allowed" 12 | 13 | 14 | class DuplicateAllowedProgram(ProgramError): 15 | def __init__(self) -> None: 16 | super().__init__(6001, "Whitelist already contains program") 17 | 18 | code = 6001 19 | name = "DuplicateAllowedProgram" 20 | msg = "Whitelist already contains program" 21 | 22 | 23 | class ConversionError(ProgramError): 24 | def __init__(self) -> None: 25 | super().__init__(6002, "Conversion Error") 26 | 27 | code = 6002 28 | name = "ConversionError" 29 | msg = "Conversion Error" 30 | 31 | 32 | class SerializeError(ProgramError): 33 | def __init__(self) -> None: 34 | super().__init__(6003, "Serialization Error") 35 | 36 | code = 6003 37 | name = "SerializeError" 38 | msg = "Serialization Error" 39 | 40 | 41 | class WhitelistAdminRequired(ProgramError): 42 | def __init__(self) -> None: 43 | super().__init__(6004, "Whitelist admin required on initialization") 44 | 45 | code = 6004 46 | name = "WhitelistAdminRequired" 47 | msg = "Whitelist admin required on initialization" 48 | 49 | 50 | class InvalidAllowedProgram(ProgramError): 51 | def __init__(self) -> None: 52 | super().__init__(6005, "Invalid allowed program") 53 | 54 | code = 6005 55 | name = "InvalidAllowedProgram" 56 | msg = "Invalid allowed program" 57 | 58 | 59 | class MaximumAllowedProgramsExceeded(ProgramError): 60 | def __init__(self) -> None: 61 | super().__init__(6006, "Maximum number of allowed programs exceeded") 62 | 63 | code = 6006 64 | name = "MaximumAllowedProgramsExceeded" 65 | msg = "Maximum number of allowed programs exceeded" 66 | 67 | 68 | class InvalidPDA(ProgramError): 69 | def __init__(self) -> None: 70 | super().__init__(6007, "Invalid PDA") 71 | 72 | code = 6007 73 | name = "InvalidPDA" 74 | msg = "Invalid PDA" 75 | 76 | 77 | class CurrentDataLengthExceeded(ProgramError): 78 | def __init__(self) -> None: 79 | super().__init__(6008, "Update data exceeds current length") 80 | 81 | code = 6008 82 | name = "CurrentDataLengthExceeded" 83 | msg = "Update data exceeds current length" 84 | 85 | 86 | class MessageBufferNotProvided(ProgramError): 87 | def __init__(self) -> None: 88 | super().__init__(6009, "Message Buffer not provided") 89 | 90 | code = 6009 91 | name = "MessageBufferNotProvided" 92 | msg = "Message Buffer not provided" 93 | 94 | 95 | class MessageBufferTooSmall(ProgramError): 96 | def __init__(self) -> None: 97 | super().__init__(6010, "Message Buffer is not sufficiently large") 98 | 99 | code = 6010 100 | name = "MessageBufferTooSmall" 101 | msg = "Message Buffer is not sufficiently large" 102 | 103 | 104 | class FundBumpNotFound(ProgramError): 105 | def __init__(self) -> None: 106 | super().__init__(6011, "Fund Bump not found") 107 | 108 | code = 6011 109 | name = "FundBumpNotFound" 110 | msg = "Fund Bump not found" 111 | 112 | 113 | class ReallocFailed(ProgramError): 114 | def __init__(self) -> None: 115 | super().__init__(6012, "Reallocation failed") 116 | 117 | code = 6012 118 | name = "ReallocFailed" 119 | msg = "Reallocation failed" 120 | 121 | 122 | class TargetSizeDeltaExceeded(ProgramError): 123 | def __init__(self) -> None: 124 | super().__init__( 125 | 6013, 126 | "Target size too large for reallocation/initialization. Max delta is 10240", 127 | ) 128 | 129 | code = 6013 130 | name = "TargetSizeDeltaExceeded" 131 | msg = "Target size too large for reallocation/initialization. Max delta is 10240" 132 | 133 | 134 | class MessageBufferUninitialized(ProgramError): 135 | def __init__(self) -> None: 136 | super().__init__(6014, "MessageBuffer Uninitialized") 137 | 138 | code = 6014 139 | name = "MessageBufferUninitialized" 140 | msg = "MessageBuffer Uninitialized" 141 | 142 | 143 | CustomError = typing.Union[ 144 | CallerNotAllowed, 145 | DuplicateAllowedProgram, 146 | ConversionError, 147 | SerializeError, 148 | WhitelistAdminRequired, 149 | InvalidAllowedProgram, 150 | MaximumAllowedProgramsExceeded, 151 | InvalidPDA, 152 | CurrentDataLengthExceeded, 153 | MessageBufferNotProvided, 154 | MessageBufferTooSmall, 155 | FundBumpNotFound, 156 | ReallocFailed, 157 | TargetSizeDeltaExceeded, 158 | MessageBufferUninitialized, 159 | ] 160 | CUSTOM_ERROR_MAP: dict[int, CustomError] = { 161 | 6000: CallerNotAllowed(), 162 | 6001: DuplicateAllowedProgram(), 163 | 6002: ConversionError(), 164 | 6003: SerializeError(), 165 | 6004: WhitelistAdminRequired(), 166 | 6005: InvalidAllowedProgram(), 167 | 6006: MaximumAllowedProgramsExceeded(), 168 | 6007: InvalidPDA(), 169 | 6008: CurrentDataLengthExceeded(), 170 | 6009: MessageBufferNotProvided(), 171 | 6010: MessageBufferTooSmall(), 172 | 6011: FundBumpNotFound(), 173 | 6012: ReallocFailed(), 174 | 6013: TargetSizeDeltaExceeded(), 175 | 6014: MessageBufferUninitialized(), 176 | } 177 | 178 | 179 | def from_code(code: int) -> typing.Optional[CustomError]: 180 | maybe_err = CUSTOM_ERROR_MAP.get(code) 181 | if maybe_err is None: 182 | return None 183 | return maybe_err 184 | -------------------------------------------------------------------------------- /src/agent/solana.rs: -------------------------------------------------------------------------------- 1 | /// This module encapsulates all the interaction with a single Solana network: 2 | /// - The Oracle, which reads data from the network 3 | /// - The Exporter, which publishes data to the network 4 | pub mod network { 5 | use { 6 | super::key_store::{ 7 | self, 8 | }, 9 | crate::agent::{ 10 | services::exporter, 11 | state::oracle::{ 12 | self, 13 | }, 14 | }, 15 | serde::{ 16 | Deserialize, 17 | Serialize, 18 | }, 19 | std::time::Duration, 20 | url::Url, 21 | }; 22 | 23 | #[derive(Clone, Copy, Serialize, Deserialize, Debug)] 24 | pub enum Network { 25 | Primary, 26 | Secondary, 27 | } 28 | 29 | #[allow(clippy::unwrap_used, reason = "hardcoded value valid")] 30 | pub fn default_rpc_urls() -> Vec { 31 | vec![Url::parse("http://localhost:8899").unwrap()] 32 | } 33 | 34 | #[allow(clippy::unwrap_used, reason = "hardcoded value valid")] 35 | pub fn default_wss_urls() -> Vec { 36 | vec![Url::parse("http://localhost:8900").unwrap()] 37 | } 38 | 39 | pub fn default_rpc_timeout() -> Duration { 40 | Duration::from_secs(10) 41 | } 42 | 43 | /// Configuration for a network 44 | #[derive(Clone, Serialize, Deserialize, Debug)] 45 | pub struct Config { 46 | /// HTTP RPC endpoint list 47 | #[serde(default = "default_rpc_urls")] 48 | pub rpc_urls: Vec, 49 | /// WSS RPC endpoint 50 | #[serde(default = "default_wss_urls")] 51 | pub wss_urls: Vec, 52 | /// Timeout for the requests to the RPC 53 | #[serde(with = "humantime_serde", default = "default_rpc_timeout")] 54 | pub rpc_timeout: Duration, 55 | /// Keystore 56 | pub key_store: key_store::Config, 57 | /// Configuration for the Oracle reading data from this network 58 | #[serde(default)] 59 | pub oracle: oracle::Config, 60 | /// Configuration for the Exporter publishing data to this network 61 | #[serde(default)] 62 | pub exporter: exporter::Config, 63 | } 64 | } 65 | 66 | /// The key_store module is responsible for parsing the pythd key store. 67 | pub mod key_store { 68 | use { 69 | anyhow::Result, 70 | serde::{ 71 | Deserialize, 72 | Deserializer, 73 | Serialize, 74 | Serializer, 75 | de::Error, 76 | }, 77 | solana_sdk::{ 78 | pubkey::Pubkey, 79 | signature::Keypair, 80 | signer::keypair, 81 | }, 82 | std::{ 83 | path::PathBuf, 84 | str::FromStr, 85 | }, 86 | }; 87 | 88 | #[derive(Clone, Serialize, Deserialize, Debug)] 89 | pub struct Config { 90 | /// Path to the keypair used to publish price updates. If set 91 | /// to a non-existent file path, the system expects a keypair 92 | /// to be loaded via the remote keypair loader. If the path is 93 | /// valid, the remote keypair loading is disabled. 94 | pub publish_keypair_path: PathBuf, 95 | /// The public key of the Oracle program 96 | #[serde( 97 | serialize_with = "pubkey_string_ser", 98 | deserialize_with = "pubkey_string_de", 99 | alias = "program_key" // for compatibility 100 | )] 101 | pub pyth_oracle_program_key: Pubkey, 102 | /// The public key of the pyth-price-store program 103 | #[serde( 104 | serialize_with = "opt_pubkey_string_ser", 105 | deserialize_with = "opt_pubkey_string_de", 106 | default 107 | )] 108 | pub pyth_price_store_program_key: Option, 109 | /// The public key of the accumulator program. 110 | #[serde( 111 | serialize_with = "opt_pubkey_string_ser", 112 | deserialize_with = "opt_pubkey_string_de", 113 | default 114 | )] 115 | pub accumulator_key: Option, 116 | } 117 | 118 | pub struct KeyStore { 119 | /// The keypair used to publish price updates. When None, 120 | /// publishing will not start until a new keypair is supplied 121 | /// via the remote loading endpoint 122 | pub publish_keypair: Option, 123 | /// Public key of the Oracle program 124 | pub pyth_oracle_program_key: Pubkey, 125 | /// Public key of the pyth-price-store program 126 | pub pyth_price_store_program_key: Option, 127 | /// Public key of the accumulator program (if provided) 128 | pub accumulator_key: Option, 129 | } 130 | 131 | impl KeyStore { 132 | pub fn new(config: Config) -> Result { 133 | let publish_keypair = match keypair::read_keypair_file(&config.publish_keypair_path) { 134 | Ok(k) => Some(k), 135 | Err(e) => { 136 | tracing::warn!( 137 | error = ?e, 138 | publish_keypair_path = config.publish_keypair_path.display().to_string(), 139 | "Reading publish keypair returned an error. Waiting for a remote-loaded key before publishing.", 140 | ); 141 | None 142 | } 143 | }; 144 | 145 | Ok(KeyStore { 146 | publish_keypair, 147 | pyth_oracle_program_key: config.pyth_oracle_program_key, 148 | pyth_price_store_program_key: config.pyth_price_store_program_key, 149 | accumulator_key: config.accumulator_key, 150 | }) 151 | } 152 | } 153 | 154 | // Helper methods for stringified SOL addresses 155 | 156 | fn pubkey_string_ser(k: &Pubkey, ser: S) -> Result 157 | where 158 | S: Serializer, 159 | { 160 | ser.serialize_str(&k.to_string()) 161 | } 162 | 163 | fn pubkey_string_de<'de, D>(de: D) -> Result 164 | where 165 | D: Deserializer<'de>, 166 | { 167 | let pubkey_string = String::deserialize(de)?; 168 | let pubkey = Pubkey::from_str(&pubkey_string).map_err(D::Error::custom)?; 169 | Ok(pubkey) 170 | } 171 | 172 | fn opt_pubkey_string_ser(k_opt: &Option, ser: S) -> Result 173 | where 174 | S: Serializer, 175 | { 176 | let k_str_opt = (*k_opt).map(|k| k.to_string()); 177 | 178 | Option::::serialize(&k_str_opt, ser) 179 | } 180 | 181 | fn opt_pubkey_string_de<'de, D>(de: D) -> Result, D::Error> 182 | where 183 | D: Deserializer<'de>, 184 | { 185 | match Option::::deserialize(de)? { 186 | Some(k) => Ok(Some(Pubkey::from_str(&k).map_err(D::Error::custom)?)), 187 | None => Ok(None), 188 | } 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pyth Agent 2 | Publish data to the [Pyth Network](https://pyth.network/). 3 | 4 | ## Overview 5 | This software runs a JRPC API server, which data providers should use to publish data. Publishing using this intermediate API server provides greater reliability, usability and security than sending transactions directly to an RPC node. 6 | 7 | Note that only permissioned publishers can publish data to the network. Please read the [publisher guidelines](https://docs.pyth.network/documentation/publish-data) before getting started. 8 | 9 | ## Build 10 | 11 | Prerequisites: Rust 1.68 or higher. A Unix system is recommended. 12 | 13 | ```shell 14 | # Install dependencies (Debian-based systems) 15 | $ apt install libssl-dev build-essential 16 | 17 | # Install Rust 18 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 19 | $ rustup default 1.68 # Optional 20 | 21 | # Build the project. This will produce a binary at target/release/agent 22 | $ cargo build --release 23 | ``` 24 | 25 | ## Configure 26 | The agent takes a single `--config` CLI option, pointing at 27 | `config/config.toml` by default. An example configuration is provided 28 | there, containing a minimal set of mandatory options and documentation 29 | comments for optional settings. **The config file must exist.** 30 | 31 | ### Logging 32 | The logging level can be configured at runtime 33 | through the `RUST_LOG` environment variable using the standard 34 | `error|warn|info|debug|trace`. 35 | 36 | #### Plain/JSON logging 37 | Pyth agent will print logs in plaintext in terminal and JSON format in non-terminal environments (e.g. when writing to a file). 38 | 39 | ## Run 40 | ### From Source 41 | The preferred way to run Pyth Agent is by compiling from source. You can run the below command to build and run the agent in a single step. 42 | 43 | ```bash 44 | cargo run --release -- --config 45 | ``` 46 | 47 | ### Container 48 | For convenience, a minimal container image is also published to [ECR Public](https://gallery.ecr.aws/pyth-network/agent). An example command for running this container can be found below. Make sure to update the image version to the latest release of Pyth Agent. 49 | 50 | ```bash 51 | docker run -v /path/to/configdir:/config:z,ro public.ecr.aws/pyth-network/agent:v2.12.0-minimal 52 | ``` 53 | 54 | ## Publishing API 55 | A running agent will expose a WebSocket serving the JRPC publishing API documented [here](https://docs.pyth.network/documentation/publish-data/pyth-client-websocket-api). See `config/config.toml` for related settings. 56 | 57 | ## Best practices 58 | If your publisher is publishing updates to more than 50 price feeds, it is recommended that you do the following to reduce the connection overhead to the agent: 59 | - Batch your messages together and send them as a single request to the agent (as an array of messages). The agent will respond to the batch messages 60 | with a single response containing an array of individual responses (in the same order). If batching is not possible, you can disable the `instant_flush` option 61 | in the configuration file to let agent send the responses every `flush_interval` seconds. 62 | - Do not use subscribe to the price schedule. Instead, define a schedule on the client side and send the messages based on your own schedule. Ideally 63 | you should send price updates as soon as you have them to increase the latency of the data on the Pyth Network. 64 | 65 | # Development 66 | ## Unit Testing 67 | A collection of Rust unit tests is provided, ran with `cargo test`. 68 | 69 | ## Integration Testing 70 | In `integration-tests`, we provide end-to-end tests for the Pyth 71 | `agent` binary against a running `solana-test-validator` with Pyth 72 | oracle deployed to it. Optionally, accumulator message buffer program 73 | can be deployed and used to validate accumulator CPI correctness 74 | end-to-end (see configuration options below). Prebuilt binaries are 75 | provided manually in `integration-tests/program-binaries` - see below 76 | for more context. 77 | 78 | ### Running Integration Tests 79 | The tests are implemented as a Python package containing a `pytest` 80 | test suite, managed with [Poetry](https://python-poetry.org/) under 81 | Python >3.10. Use following commands to install and run them: 82 | 83 | ```bash 84 | cd integration-tests/ 85 | poetry install 86 | poetry run pytest -s --log-cli-level=debug 87 | ``` 88 | 89 | ### Optional Integration Test Configuration 90 | * `USE_ACCUMULATOR`, off by default - when this env is set, the test 91 | framework also deploys the accumulator program 92 | (`message_buffer.so`), initializes it and configures the agent to 93 | make accumulator-enabled calls into the oracle 94 | * `SOLANA_TEST_VALIDATOR`, systemwide `solana-test-validator` by 95 | default - when this env is set, the specified binary is used as the 96 | test validator. This is especially useful with `USE_ACCUMULATOR`, 97 | enabling life-like accumulator output from the `pythnet` validator. 98 | 99 | ### Testing Setup Overview 100 | For each test's setup in `integration-tests/tests/test_integration.py`, we: 101 | * Start `solana-test-validator` with prebuilt Solana programs deployed 102 | * Generate and fund test Solana keypairs 103 | * Initialize the oracle program - allocate test price feeds, assign 104 | publishing permissions. This is done using the dedicated [`program-admin`](https://github.com/pyth-network/program-admin) Python package. 105 | * (Optionally) Initialize accumulator message buffer program 106 | initialize test authority, preallocate message buffers, assign 107 | allowed program permissions to the oracle - this is done using a 108 | generated client package in 109 | `integration-tests/message_buffer_client_codegen`, created using 110 | [AnchorPy](https://github.com/kevinheavey/anchorpy). 111 | * Build and run the agent 112 | 113 | This is followed by a specific test scenario, 114 | e.g. `test_update_price_simple` - a couple publishing attempts with 115 | assertions of expected on-chain state. 116 | 117 | ### Prebuilt Artifact Safety 118 | In `integration-tests/program-binaries` we store oracle and 119 | accumulator `*.so`s as well as accumulator program's Anchor IDL JSON 120 | file. These artifacts are guarded against unexpected updates with a 121 | commit hook verifying `md5sum --check canary.md5sum`. Changes to the 122 | `integration-tests/message_buffer_client_codegen` package are much 123 | harder to miss in review and tracked manually. 124 | 125 | ### Updating Artifacts 126 | While you are free to experiment with the contents of 127 | `program-binaries`, commits for new or changed artifacts must include 128 | updated checksums in `canary.md5sum`. This can be done 129 | by running `md5sum` in repository root: 130 | ```shell 131 | $ md5sum integration-tests/program-binaries/*.json > canary.md5sum 132 | $ md5sum integration-tests/program-binaries/*.so >> canary.md5sum # NOTE: Mind the ">>" for appending 133 | ``` 134 | 135 | ### Updating `message_buffer_client_codegen` 136 | After obtaining an updated `message_buffer.so` and `message_buffer_idl.json`, run: 137 | ```shell 138 | $ cd integration-tests/ 139 | $ poetry install # If you haven't run this already 140 | $ poetry run anchorpy client-gen --pdas program-binaries/message_buffer_idl.json message_buffer_client_codegen 141 | ``` 142 | -------------------------------------------------------------------------------- /src/agent/services/oracle.rs: -------------------------------------------------------------------------------- 1 | //! Oracle 2 | //! 3 | //! The Oracle service is respoinsible for reacting to all remote/on-chain events. 4 | 5 | use { 6 | crate::agent::{ 7 | solana::{ 8 | key_store::KeyStore, 9 | network::{ 10 | Config, 11 | Network, 12 | }, 13 | }, 14 | state::oracle::Oracle, 15 | utils::rpc_multi_client::RpcMultiClient, 16 | }, 17 | anyhow::Result, 18 | solana_account_decoder::UiAccountEncoding, 19 | solana_client::{ 20 | nonblocking::pubsub_client::PubsubClient, 21 | rpc_config::{ 22 | RpcAccountInfoConfig, 23 | RpcProgramAccountsConfig, 24 | }, 25 | }, 26 | solana_sdk::{ 27 | account::Account, 28 | commitment_config::CommitmentConfig, 29 | pubkey::Pubkey, 30 | signature::Keypair, 31 | }, 32 | std::{ 33 | sync::Arc, 34 | time::Instant, 35 | }, 36 | tokio::task::JoinHandle, 37 | tokio_stream::StreamExt, 38 | tracing::instrument, 39 | url::Url, 40 | }; 41 | 42 | #[instrument(skip(config, state))] 43 | pub fn oracle(config: Config, network: Network, state: Arc) -> Vec> 44 | where 45 | S: Oracle, 46 | S: Send + Sync + 'static, 47 | { 48 | let mut handles = Vec::new(); 49 | 50 | let Ok(key_store) = KeyStore::new(config.key_store.clone()) else { 51 | tracing::warn!("Key store not available, Oracle won't start."); 52 | return handles; 53 | }; 54 | 55 | handles.push(tokio::spawn(poller( 56 | config.clone(), 57 | network, 58 | state.clone(), 59 | key_store.pyth_oracle_program_key, 60 | key_store.publish_keypair, 61 | key_store.pyth_price_store_program_key, 62 | config.oracle.max_lookup_batch_size, 63 | ))); 64 | 65 | if config.oracle.subscriber_enabled { 66 | let min_elapsed_time = config.oracle.subscriber_finished_min_time; 67 | let sleep_time = config.oracle.subscriber_finished_sleep_time; 68 | let mut wss_url_index: usize = 0; 69 | 70 | #[allow( 71 | clippy::indexing_slicing, 72 | reason = "index will always be valid unless wss_urls is empty" 73 | )] 74 | handles.push(tokio::spawn(async move { 75 | loop { 76 | let current_time = Instant::now(); 77 | if let Err(ref err) = subscriber( 78 | config.clone(), 79 | &config.wss_urls[wss_url_index], 80 | network, 81 | state.clone(), 82 | key_store.pyth_oracle_program_key, 83 | ) 84 | .await 85 | { 86 | tracing::error!( 87 | ?err, 88 | "Subscriber url: {} exited unexpectedly", 89 | config.wss_urls[wss_url_index] 90 | ); 91 | if current_time.elapsed() < min_elapsed_time { 92 | tracing::warn!(?sleep_time, "Subscriber restarting too quickly. Sleeping"); 93 | tokio::time::sleep(sleep_time).await; 94 | } 95 | 96 | // Round robin to the next WSS provider 97 | wss_url_index += 1; 98 | if wss_url_index >= config.wss_urls.len() { 99 | wss_url_index = 0; 100 | } 101 | } 102 | } 103 | })); 104 | } 105 | 106 | handles 107 | } 108 | 109 | /// When an account RPC Subscription update is received. 110 | /// 111 | /// We check if the account is one we're aware of and tracking, and if so, spawn 112 | /// a small background task that handles that update. We only do this for price 113 | /// accounts, all other accounts are handled below in the poller. 114 | #[instrument(skip(config, wss_url, state))] 115 | async fn subscriber( 116 | config: Config, 117 | wss_url: &Url, 118 | network: Network, 119 | state: Arc, 120 | program_key: Pubkey, 121 | ) -> Result<()> 122 | where 123 | S: Oracle, 124 | S: Send + Sync + 'static, 125 | { 126 | // Setup PubsubClient to listen for account changes on the Oracle program. 127 | let client = PubsubClient::new(wss_url.as_str()).await?; 128 | 129 | let (mut notifier, _unsub) = { 130 | let commitment = config.oracle.commitment; 131 | let config = RpcProgramAccountsConfig { 132 | account_config: RpcAccountInfoConfig { 133 | commitment: Some(CommitmentConfig { commitment }), 134 | encoding: Some(UiAccountEncoding::Base64Zstd), 135 | ..Default::default() 136 | }, 137 | filters: None, 138 | with_context: Some(true), 139 | sort_results: None, 140 | }; 141 | client.program_subscribe(&program_key, Some(config)).await 142 | }?; 143 | 144 | while let Some(update) = notifier.next().await { 145 | match update.value.account.decode::() { 146 | Some(account) => { 147 | let pubkey: Pubkey = update.value.pubkey.as_str().try_into()?; 148 | let state = state.clone(); 149 | tokio::spawn(async move { 150 | if let Err(err) = 151 | Oracle::handle_price_account_update(&*state, network, &pubkey, &account) 152 | .await 153 | { 154 | tracing::error!(?err, "Failed to handle account update"); 155 | } 156 | }); 157 | } 158 | 159 | None => { 160 | tracing::error!( 161 | update = ?update, 162 | "Failed to decode account from update.", 163 | ); 164 | } 165 | } 166 | } 167 | 168 | tracing::debug!("Subscriber closed connection."); 169 | Ok(()) 170 | } 171 | 172 | /// On poll lookup all Pyth Product/Price accounts and sync. 173 | #[instrument(skip(config, publish_keypair, state))] 174 | async fn poller( 175 | config: Config, 176 | network: Network, 177 | state: Arc, 178 | oracle_program_key: Pubkey, 179 | publish_keypair: Option, 180 | pyth_price_store_program_key: Option, 181 | max_lookup_batch_size: usize, 182 | ) where 183 | S: Oracle, 184 | S: Send + Sync + 'static, 185 | { 186 | // Setup an RpcClient for manual polling. 187 | let mut tick = tokio::time::interval(config.oracle.poll_interval_duration); 188 | let rpc_multi_client = Arc::new(RpcMultiClient::new_with_timeout_and_commitment( 189 | config.rpc_urls.clone(), 190 | config.rpc_timeout, 191 | CommitmentConfig { 192 | commitment: config.oracle.commitment, 193 | }, 194 | )); 195 | 196 | loop { 197 | if let Err(err) = async { 198 | tick.tick().await; 199 | tracing::debug!("Polling for updates."); 200 | Oracle::poll_updates( 201 | &*state, 202 | network, 203 | oracle_program_key, 204 | publish_keypair.as_ref(), 205 | pyth_price_store_program_key, 206 | &rpc_multi_client, 207 | max_lookup_batch_size, 208 | ) 209 | .await?; 210 | Oracle::sync_global_store(&*state, network).await 211 | } 212 | .await 213 | { 214 | tracing::error!(err = ?err, "Failed to handle poll updates."); 215 | } 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /src/agent/services/keypairs.rs: -------------------------------------------------------------------------------- 1 | //! Keypairs 2 | //! 3 | //! The Keypairs Service allows hotloading keys for the running agent. 4 | 5 | use { 6 | crate::agent::{ 7 | solana::network::Network, 8 | state::keypairs::Keypairs, 9 | utils::rpc_multi_client::RpcMultiClient, 10 | }, 11 | anyhow::{ 12 | Result, 13 | bail, 14 | }, 15 | serde::Deserialize, 16 | solana_sdk::{ 17 | commitment_config::CommitmentConfig, 18 | signature::Keypair, 19 | signer::Signer, 20 | }, 21 | std::{ 22 | net::SocketAddr, 23 | sync::Arc, 24 | }, 25 | tokio::task::JoinHandle, 26 | url::Url, 27 | warp::{ 28 | Filter, 29 | hyper::StatusCode, 30 | reject::Rejection, 31 | reply::{ 32 | self, 33 | WithStatus, 34 | }, 35 | }, 36 | }; 37 | 38 | const DEFAULT_MIN_KEYPAIR_BALANCE_SOL: u64 = 1; 39 | 40 | pub fn default_bind_address() -> SocketAddr { 41 | #[allow(clippy::expect_used, reason = "hardcoded value valid")] 42 | "127.0.0.1:9001" 43 | .parse() 44 | .expect("INTERNAL: Could not build default remote keypair loader bind address") 45 | } 46 | 47 | #[derive(Clone, Debug, Deserialize)] 48 | #[serde(default)] 49 | pub struct Config { 50 | primary_min_keypair_balance_sol: u64, 51 | secondary_min_keypair_balance_sol: u64, 52 | bind_address: SocketAddr, 53 | } 54 | 55 | impl Default for Config { 56 | fn default() -> Self { 57 | Self { 58 | primary_min_keypair_balance_sol: DEFAULT_MIN_KEYPAIR_BALANCE_SOL, 59 | secondary_min_keypair_balance_sol: DEFAULT_MIN_KEYPAIR_BALANCE_SOL, 60 | bind_address: default_bind_address(), 61 | } 62 | } 63 | } 64 | 65 | pub async fn keypairs( 66 | primary_rpc_urls: Vec, 67 | secondary_rpc_urls: Option>, 68 | config: Config, 69 | state: Arc, 70 | ) -> Vec> 71 | where 72 | S: Keypairs, 73 | S: Send + Sync + 'static, 74 | { 75 | let ip = config.bind_address.ip(); 76 | 77 | if !ip.is_loopback() { 78 | tracing::warn!( 79 | bind_address = ?config.bind_address, 80 | "Remote key loader: bind address is not localhost. Make sure the access on the selected address is secure.", 81 | ); 82 | } 83 | 84 | let primary_upload_route = { 85 | let state = state.clone(); 86 | let rpc_urls = primary_rpc_urls.clone(); 87 | let min_balance = config.primary_min_keypair_balance_sol; 88 | warp::path!("primary" / "load_keypair") 89 | .and(warp::post()) 90 | .and(warp::body::content_length_limit(1024)) 91 | .and(warp::body::json()) 92 | .and(warp::path::end()) 93 | .and_then(move |kp: Vec| { 94 | let state = state.clone(); 95 | let rpc_urls = rpc_urls.clone(); 96 | async move { 97 | let response = handle_new_keypair( 98 | state, 99 | Network::Primary, 100 | kp, 101 | min_balance, 102 | rpc_urls, 103 | "primary", 104 | ) 105 | .await; 106 | Result::, Rejection>::Ok(response) 107 | } 108 | }) 109 | }; 110 | 111 | let secondary_upload_route = warp::path!("secondary" / "load_keypair") 112 | .and(warp::post()) 113 | .and(warp::body::content_length_limit(1024)) 114 | .and(warp::body::json()) 115 | .and(warp::path::end()) 116 | .and_then(move |kp: Vec| { 117 | let state = state.clone(); 118 | let rpc_urls = secondary_rpc_urls.clone(); 119 | async move { 120 | if let Some(rpc_urls) = rpc_urls { 121 | let min_balance = config.secondary_min_keypair_balance_sol; 122 | let response = handle_new_keypair( 123 | state, 124 | Network::Secondary, 125 | kp, 126 | min_balance, 127 | rpc_urls, 128 | "secondary", 129 | ) 130 | .await; 131 | Result::, Rejection>::Ok(response) 132 | } else { 133 | Result::, Rejection>::Ok(reply::with_status( 134 | "Secondary network is not active", 135 | StatusCode::SERVICE_UNAVAILABLE, 136 | )) 137 | } 138 | } 139 | }); 140 | 141 | let http_api_jh = { 142 | let (_, serve) = warp::serve(primary_upload_route.or(secondary_upload_route)) 143 | .bind_with_graceful_shutdown(config.bind_address, async { 144 | let _ = crate::agent::EXIT.subscribe().changed().await; 145 | }); 146 | tokio::spawn(serve) 147 | }; 148 | 149 | // WARNING: All jobs spawned here must report their join handles in this vec 150 | vec![http_api_jh] 151 | } 152 | 153 | /// Validate and apply a keypair to the specified mut reference, 154 | /// hiding errors in logs. 155 | /// 156 | /// Returns the appropriate HTTP response depending on checks success. 157 | /// 158 | /// NOTE(2023-03-22): Lifetime bounds are currently necessary 159 | /// because of https://github.com/rust-lang/rust/issues/63033 160 | async fn handle_new_keypair<'a, 'b: 'a, S>( 161 | state: Arc, 162 | network: Network, 163 | new_keypair_bytes: Vec, 164 | min_keypair_balance_sol: u64, 165 | rpc_urls: Vec, 166 | network_name: &'b str, 167 | ) -> WithStatus<&'static str> 168 | where 169 | S: Keypairs, 170 | { 171 | let mut upload_ok = true; 172 | match Keypair::from_bytes(&new_keypair_bytes) { 173 | Ok(kp) => match validate_keypair(&kp, min_keypair_balance_sol, rpc_urls.clone()).await { 174 | Ok(()) => { 175 | Keypairs::update_keypair(&*state, network, kp).await; 176 | } 177 | Err(e) => { 178 | tracing::warn!( 179 | network = network_name, 180 | error = e.to_string(), 181 | "Remote keypair loader: Keypair failed validation", 182 | ); 183 | upload_ok = false; 184 | } 185 | }, 186 | Err(e) => { 187 | tracing::warn!( 188 | network = network_name, 189 | error = e.to_string(), 190 | "Remote keypair loader: Keypair failed validation", 191 | ); 192 | upload_ok = false; 193 | } 194 | } 195 | 196 | if upload_ok { 197 | reply::with_status("keypair upload OK", StatusCode::OK) 198 | } else { 199 | reply::with_status( 200 | "Could not upload keypair. See logs for details.", 201 | StatusCode::BAD_REQUEST, 202 | ) 203 | } 204 | } 205 | 206 | /// Validate keypair balance before using it in transactions. 207 | pub async fn validate_keypair( 208 | kp: &Keypair, 209 | min_keypair_balance_sol: u64, 210 | rpc_urls: Vec, 211 | ) -> Result<()> { 212 | let rpc_multi_client = 213 | RpcMultiClient::new_with_commitment(rpc_urls, CommitmentConfig::confirmed()); 214 | let balance_lamports = match rpc_multi_client.get_balance(kp).await { 215 | Ok(balance_lamports) => balance_lamports, 216 | Err(_) => bail!("Could not check keypair's balance"), 217 | }; 218 | 219 | let lamports_in_sol = 1_000_000_000; 220 | 221 | if balance_lamports > min_keypair_balance_sol * lamports_in_sol { 222 | Ok(()) 223 | } else { 224 | Err(anyhow::anyhow!(format!( 225 | "Keypair {} balance of {} SOL below threshold of {} SOL", 226 | kp.pubkey(), 227 | balance_lamports as f64 / lamports_in_sol as f64, 228 | min_keypair_balance_sol 229 | ))) 230 | } 231 | } 232 | -------------------------------------------------------------------------------- /src/agent.rs: -------------------------------------------------------------------------------- 1 | /* ###################################################### System Architecture ####################################################### 2 | 3 | +--------------------------------+ +--------------------------------+ 4 | | RPC Node, e.g. Pythnet | | RPC Node, e.g. Solana Mainnet | 5 | +--------------------------------+ +--------------------------------+ 6 | | ^ ^ | 7 | +---|---------------------|------+ +---|---------------------|------+ 8 | | | Primary Network | | | Secondary Network | | 9 | | v | | | | v | 10 | | +--------+ +----------+ | | +----------+ +--------+ | 11 | | | Oracle | | Exporter | | | | Exporter | | Oracle | | 12 | | +--------+ +----------+ | | +----------+ +--------+ | 13 | | | ^ ^ ^ | | ^ ^ ^ | | 14 | +------|-----------|--|--|-------+ +-----|--|--|----------|---------+ 15 | | | | | | | | | +------------------------+ 16 | | +--|-----|---------------|-----|---+ | | Pythd Websocket API | 17 | | | | Local Store | | |<---------------+-------+ +------+ | 18 | | +--|-----|---------------|-----|---+ | | | |<--|JRPC | | 19 | v | | | | | v | |Adapter| | WS | | 20 | +--------------------|---------------|--------|-----------+ | | |-->|Server| | 21 | | | Global Store | | |---->+-------+ +------+ | 22 | +--------------------|---------------|--------|-----------+ | ^ | | 23 | | | | | +---------------|----|---+ 24 | | | v v | | 25 | +---------------------+ +--------------+ | | 26 | |Remote Keypair Loader| |Metrics Server| | | 27 | +---------------------+ +--------------+ | | 28 | ^ | | | 29 | | v | v 30 | +-------------------------------------------------------------------+ 31 | | User | 32 | +-------------------------------------------------------------------+ 33 | Generated with textik.com on 2023-04-03 34 | 35 | The arrows on the diagram above represent the direction of data flow. 36 | 37 | Publisher data write path: 38 | - The user submits fresh price data to the system using the Pythd JRPC Websocket API. 39 | - The Adapter then transforms this into the Pyth SDK data structures and sends it to the Local Store. 40 | - The Local Store holds the latest price data the user has submitted for each price feed. 41 | - The Exporters periodically query the Local Store for the latest user-submitted data, 42 | and send it to the RPC node. They query the Global Store to get the on-chain status to dynamically 43 | adjust the compute unit price (if enabled). 44 | 45 | Publisher data read path: 46 | - The Oracles continually fetch data from the RPC node, and pass this to the Global Store. 47 | - The Global Store holds a unified view of the latest observed data from both networks, in the Pyth SDK data structures. 48 | - When a user queries for this data using the Pythd JRPC Websocket API, the Adapter fetches 49 | the latest data from the Global Store. It transforms this from the Pyth SDK data structures into the 50 | Pythd JRPC Websocket API data structures. 51 | - The Pythd JRPC Websocket API then sends this data to the user. 52 | 53 | Remote Keypair Loading: 54 | - If no keypair is found at startup, Exporters poll the Remote Keypair Loader for a signing keypair 55 | - When the keypair is uploaded, it is given to the Exporters 56 | 57 | Metrics Server: 58 | - Every update in global and local store is reflected in the metrics 59 | - Metrics are served using Prometheus 60 | 61 | Note that there is an Oracle and Exporter for each network, but only one Local Store and Global Store. 62 | 63 | ################################################################################################################################## */ 64 | use { 65 | self::{ 66 | pyth::rpc, 67 | solana::network, 68 | }, 69 | anyhow::Result, 70 | config::Config, 71 | futures_util::future::join_all, 72 | lazy_static::lazy_static, 73 | std::sync::Arc, 74 | tokio::sync::watch, 75 | tracing::instrument, 76 | }; 77 | 78 | pub mod config; 79 | pub mod legacy_schedule; 80 | pub mod market_schedule; 81 | pub mod metrics; 82 | pub mod pyth; 83 | pub mod services; 84 | pub mod solana; 85 | pub mod state; 86 | pub mod utils; 87 | 88 | lazy_static! { 89 | /// A static exit flag to indicate to running threads that we're shutting down. This is used to 90 | /// gracefully shut down the application. 91 | /// 92 | /// We make this global based on the fact the: 93 | /// - The `Sender` side does not rely on any async runtime. 94 | /// - Exit logic doesn't really require carefully threading this value through the app. 95 | /// - The `Receiver` side of a watch channel performs the detection based on if the change 96 | /// happened after the subscribe, so it means all listeners should always be notified 97 | /// correctly. 98 | pub static ref EXIT: watch::Sender = watch::channel(false).0; 99 | } 100 | 101 | pub struct Agent { 102 | config: Config, 103 | } 104 | 105 | impl Agent { 106 | pub fn new(config: Config) -> Self { 107 | Agent { config } 108 | } 109 | 110 | pub async fn start(&self) { 111 | tracing::info!( 112 | config = format!("{:?}", &self.config), 113 | version = env!("CARGO_PKG_VERSION"), 114 | cwd = std::env::current_dir() 115 | .map(|p| format!("{}", p.display())) 116 | .unwrap_or("".to_owned()), 117 | "Starting {}", 118 | env!("CARGO_PKG_NAME"), 119 | ); 120 | 121 | if let Err(err) = self.spawn().await { 122 | tracing::error!(err = ?err, "Agent spawn failed."); 123 | }; 124 | } 125 | 126 | #[instrument(skip(self))] 127 | async fn spawn(&self) -> Result<()> { 128 | // job handles 129 | let mut handles = vec![]; 130 | 131 | // Create the Application State. 132 | let state = Arc::new(state::State::new(&self.config).await); 133 | 134 | // Spawn the primary network Oracle. 135 | handles.extend(services::oracle( 136 | self.config.primary_network.clone(), 137 | network::Network::Primary, 138 | state.clone(), 139 | )); 140 | 141 | handles.extend(services::exporter( 142 | self.config.primary_network.clone(), 143 | network::Network::Primary, 144 | state.clone(), 145 | )); 146 | 147 | // Spawn the secondary network Oracle, if needed. 148 | if let Some(config) = &self.config.secondary_network { 149 | handles.extend(services::oracle( 150 | config.clone(), 151 | network::Network::Secondary, 152 | state.clone(), 153 | )); 154 | 155 | handles.extend(services::exporter( 156 | config.clone(), 157 | network::Network::Secondary, 158 | state.clone(), 159 | )); 160 | } 161 | 162 | // Spawn the Lazer exporter 163 | if let Some(lazer_config) = &self.config.pyth_lazer { 164 | handles.extend(services::lazer_exporter( 165 | lazer_config.clone(), 166 | state.clone(), 167 | )); 168 | } 169 | 170 | // Create the Notifier task for the Pythd RPC. 171 | handles.push(tokio::spawn(services::notifier(state.clone()))); 172 | 173 | // Spawn the Pythd API Server 174 | handles.push(tokio::spawn(rpc::run( 175 | self.config.pythd_api_server.clone(), 176 | state.clone(), 177 | ))); 178 | 179 | // Spawn the metrics server 180 | handles.push(tokio::spawn(metrics::spawn( 181 | self.config.metrics_server.bind_address, 182 | ))); 183 | 184 | // Spawn the remote keypair loader endpoint for both networks 185 | handles.extend( 186 | services::keypairs( 187 | self.config.primary_network.rpc_urls.clone(), 188 | self.config 189 | .secondary_network 190 | .as_ref() 191 | .map(|c| c.rpc_urls.clone()), 192 | self.config.remote_keypair_loader.clone(), 193 | state, 194 | ) 195 | .await, 196 | ); 197 | 198 | // Wait for all tasks to complete 199 | join_all(handles).await; 200 | 201 | Ok(()) 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /config/config.toml: -------------------------------------------------------------------------------- 1 | # Configuration for the JRPC API Websocket Server 2 | [pythd_api_server] 3 | # The address on which the websocket API server will listen. 4 | # 5 | # NOTE: non-loopback addresses must be used carefully, making sure the 6 | # connection is not exposed for unauthorized access. 7 | listen_address = "127.0.0.1:8910" 8 | 9 | # Size of the buffer of each Server's channel on which `notify_price` events are 10 | # received from the Price state. 11 | # notify_price_tx_buffer = 10000 12 | 13 | # Size of the buffer of each Server's channel on which `notify_price_sched` events are 14 | # received from the Price state. 15 | # notify_price_sched_tx_buffer = 10000 16 | 17 | # Whether flush messages and responses to the client immediately. Once disabled the 18 | # messages will be flushed every `flush_interval_duration`. Disabling it is useful if 19 | # there are many messages to be sent between the client and the server to avoid overloading 20 | # the connection. 21 | # instant_flush = true 22 | 23 | # Flush interval for responses and notifications. This is the maximum time the 24 | # server will wait before flushing the messages to the client. It will have no 25 | # effect if `instant_flush` is set to true. 26 | # flush_interval_duration = "50ms" 27 | 28 | # Configuration for the primary network this agent will publish data to. In most cases this should be a Pythnet endpoint. 29 | [primary_network] 30 | ### Required fields ### 31 | 32 | # HTTP(S) endpoints of the RPC node. Public RPC endpoints are usually 33 | # rate-limited for Pythnet, and so a private endpoint should be used in most 34 | # cases. For Pythtest, the public endpoint can be used. 35 | # API calls will cycle through each on failure. 36 | rpc_urls = ["https://api.pythtest.pyth.network"] 37 | 38 | # WS(S) endpoint of the RRC node. This is used to subscribe to account changes on the network. 39 | # This can be omitted when oracle.subscriber_enabled is set to false. 40 | wss_urls = ["wss://api.pythtest.pyth.network"] 41 | 42 | # Path to the keypair used to publish price updates. If set to a 43 | # non-existent file path, the system expects a keypair to be loaded 44 | # via the remote keypair loader. If the path is valid, the remote 45 | # keypair loading is disabled. 46 | key_store.publish_keypair_path = "/path/to/keypair.json" 47 | 48 | # Public key of the oracle program 49 | key_store.pyth_oracle_program_key = "RelevantOracleProgramAddress" 50 | 51 | # The price store program key 52 | key_store.pyth_price_store_program_key = "3m6sv6HGqEbuyLV84mD7rJn4MAC9LhUa1y1AUNVqcPfr" 53 | 54 | ### Optional fields of primary/secondary network config ### 55 | 56 | # Pubkey of accumulator message buffer program ID. Setting this 57 | # value enables accumulator support on publishing transactions. 58 | # key_store.accumulator_key = 59 | # 60 | # IMPORTANT: When publishing with accumulator_key defined, 61 | # max_batch_size must be decreased to 7 62 | # exporter.max_batch_size = 7 63 | 64 | # The interval with which to poll account information. 65 | # oracle.poll_interval_duration = "2m" 66 | 67 | # Whether subscribing to account updates over websocket is enabled 68 | # oracle.subscriber_enabled = true 69 | 70 | # Ask the Solana RPC for up to this many product/price accounts in a 71 | # single request. Tune this setting if you're experiencing timeouts on 72 | # data fetching. In order to keep concurrent open socket count at bay, 73 | # the batches are looked up sequentially, trading off overall time it 74 | # takes to fetch all symbols. 75 | # oracle.max_lookup_batch_size = 100 76 | 77 | # Minimum time for a subscriber to run 78 | # oracle.subscriber_finished_min_time = "30s" 79 | # Time to sleep if the subscriber do not run for more than the minimum time 80 | # oracle.subscriber_finished_sleep_time = "1s" 81 | 82 | # How often to refresh the cached network state (current slot and blockhash). 83 | # It is recommended to set this to slightly less than the network's block time, 84 | # as the slot fetched will be used as the time of the price update. 85 | # exporter.refresh_network_state_interval_duration = "200ms" 86 | 87 | # Duration of the interval at which to publish updates 88 | # exporter.publish_interval_duration = "1s" 89 | 90 | # Age after which a price update is considered stale and not published 91 | # exporter.staleness_threshold = "5s" 92 | 93 | # Wait at least this long before publishing an unchanged price 94 | # state; unchanged price state means only timestamp has changed 95 | # with other state identical to last published state. 96 | # exporter.unchanged_publish_threshold = "3s" 97 | 98 | # Maximum size of a batch. 99 | # IMPORTANT: the maximum size of 47 only works when the price store program key is passed; 100 | # otherwise, the maximum batch size is 12 101 | exporter.max_batch_size = 47 102 | 103 | # Number of compute units requested per update_price instruction within the transaction. 104 | # exporter.compute_unit_limit = 60000 105 | 106 | # Price per compute unit offered for update_price transactions 107 | # exporter.compute_unit_price_micro_lamports = 108 | 109 | # Whether the dynamic compute unit pricing is enabled. When enabled, the compute unit price is 110 | # calculated based on the network previous prioritization fees. 111 | # exporter.dynamic_compute_unit_pricing_enabled = false 112 | 113 | # Maximum compute unit price offered for update_price transactions. Defaults to 114 | # 1 million microlamports. This is a safety measure while using dynamic compute 115 | # price to prevent the exporter from paying too much for a single transaction. 116 | # exporter.maximum_compute_unit_price_micro_lamports = 1000000 117 | 118 | # Maximum slot gap between the current slot and the oldest slot amongst all the accounts in 119 | # the batch. This is used to calculate the dynamic price per compute unit. When the slot gap 120 | # reaches this number we will use the maximum total_compute_fee for the transaction. 121 | # exporter.maximum_slot_gap_for_dynamic_compute_unit_price = 25 122 | 123 | # Duration of the interval with which to poll the status of transactions. 124 | # It is recommended to set this to a value close to exporter.publish_interval_duration 125 | # exporter.transaction_monitor.poll_interval_duration = "4s" 126 | 127 | # Maximum number of recent transactions to monitor. When this number is exceeded, 128 | # the oldest transactions are no longer monitored. It is recommended to set this to 129 | # a value at least as large as (number of products published / number of products in a batch). 130 | # exporter.transaction_monitor.max_transactions = "100" 131 | 132 | ### Optional config sections ### 133 | 134 | ## Metrics server section ## 135 | 136 | # [metrics_server] 137 | # 138 | # Where to serve metrics. Metrics live under "/metrics" 139 | # NOTE: non-loopback addresses must be used carefully, making sure the 140 | # connection is not exposed for unauthorized access. 141 | # bind_address = "127.0.0.1:8888" 142 | 143 | ## Remote keypair loader section. ## 144 | 145 | # [remote_keypair_loader} 146 | # Where to serve the remote keypair loading endpoint, under 147 | # "/primary/load_keypair" and "/secondary/load_keypair". 148 | # 149 | # NOTE: non-loopback addresses must be used carefully, making sure the 150 | # connection is not exposed for unauthorized access. 151 | # bind_address = "127.0.0.1:9001" 152 | 153 | # How much whole SOL must a keypair hold to be considered valid for 154 | # use on a given network. Disabled with 0. 155 | # primary_min_keypair_balance_sol = 1 156 | # secondary_min_keypair_balance_sol = 1 157 | 158 | 159 | ## Channel capacities section. ## 160 | 161 | # These refer to async messaging channels 162 | # internally used by the agent's subroutines 163 | 164 | # [channel_capacities] 165 | # Capacity of the channel used to broadcast shutdown events to all 166 | # components 167 | # shutdown = 10000 168 | 169 | # Capacity of the channel used to send updates from the primary Oracle 170 | # to the Global Store 171 | # primary_oracle_updates = 10000 172 | 173 | # Capacity of the channel used to send updates from the secondary 174 | # Oracle to the Global Store 175 | # secondary_oracle_updates = 10000 176 | 177 | # Capacity of the channel the Pythd API Adapter uses to send lookup 178 | # requests to the Global Store 179 | # global_store_lookup = 10000 180 | 181 | # Capacity of the channel the Pythd API Adapter uses to communicate 182 | # with the Local Store 183 | # local_store_lookup = 10000 184 | 185 | # Capacity of the channel on which the Local Store receives messages 186 | # local_store = 10000 187 | 188 | # Capacity of the channel on which the Pythd API Adapter receives 189 | # messages 190 | # pythd_adapter = 10000 191 | 192 | # Capacity of the slog logging channel. Adjust this value if you see 193 | # complaints about channel capacity from slog 194 | # logger_buffer = 10000 195 | 196 | 197 | ## JRPC API config section. ## 198 | 199 | # [pythd_adapter] 200 | # The duration of the interval at which `notify_price_sched` notifications will be sent. 201 | # Note that this doesn't affect the rate at which transactions are published: 202 | # this is soley a backwards-compatibility API feature. 203 | # notify_price_sched_interval_duration = "1s" 204 | 205 | ## Optional secondary network section ## 206 | 207 | # Configuration for the optional secondary network this agent will 208 | # publish data to. In most cases this should be a Solana endpoint. The 209 | # options correspond to the ones in primary_network 210 | # [secondary_network] 211 | 212 | 213 | ## Configuration for OpenTelemetry ## 214 | [opentelemetry] 215 | 216 | # Timeout duration for the OpenTelemetry exporter 217 | exporter_timeout_duration = "3s" 218 | 219 | # Endpoint URL for the OpenTelemetry exporter 220 | exporter_endpoint = "http://127.0.0.1:4317" 221 | 222 | ## Configuration for Pyth Lazer ## 223 | 224 | # [pyth_lazer] 225 | # URL for the history service 226 | # history_url = "https://pyth-lazer-staging.dourolabs.app" 227 | 228 | # URLs for the Lazer relayers to connect to 229 | # relayer_urls = ["wss://pyth-lazer-staging.dourolabs.app"] 230 | 231 | # Unique identifier for this publisher 232 | # publisher_id = 1 233 | 234 | # Authorization token for connecting to relayers 235 | # authorization_token = "your-auth-token" 236 | 237 | # Path to the publisher's secret key file 238 | # publish_keypair_path = "/path/to/publisher-key.json" 239 | 240 | # Duration between price updates (defaults to 200ms if not specified) 241 | # publish_interval_duration = "200ms" 242 | -------------------------------------------------------------------------------- /src/agent/state/global.rs: -------------------------------------------------------------------------------- 1 | // The Global Store stores a copy of all the product and price information held in the Pyth 2 | // on-chain aggregation contracts, across both the primary and secondary networks. 3 | // This enables this data to be easily queried by other components. 4 | use { 5 | super::{ 6 | State, 7 | oracle::{ 8 | PriceEntry, 9 | ProductEntry, 10 | }, 11 | }, 12 | crate::agent::{ 13 | metrics::{ 14 | PriceGlobalMetrics, 15 | ProductGlobalMetrics, 16 | }, 17 | solana::network::Network, 18 | }, 19 | anyhow::{ 20 | Result, 21 | anyhow, 22 | }, 23 | prometheus_client::registry::Registry, 24 | smol_str::SmolStr, 25 | solana_sdk::pubkey::Pubkey, 26 | std::{ 27 | collections::{ 28 | BTreeMap, 29 | HashMap, 30 | HashSet, 31 | }, 32 | sync::Arc, 33 | }, 34 | tokio::sync::RwLock, 35 | }; 36 | 37 | /// AllAccountsData contains the full data for the price and product accounts, sourced 38 | /// from the primary network. 39 | #[derive(Debug, Clone, Default)] 40 | pub struct AllAccountsData { 41 | pub product_accounts: HashMap>, 42 | pub price_accounts: HashMap>, 43 | } 44 | 45 | /// AllAccountsMetadata contains the metadata for all the price and product accounts. 46 | /// 47 | /// Important: this relies on the metadata for all accounts being consistent across both networks. 48 | #[derive(Debug, Clone, Default)] 49 | pub struct AllAccountsMetadata { 50 | pub product_accounts_metadata: HashMap, 51 | pub price_accounts_metadata: HashMap, 52 | } 53 | 54 | /// ProductAccountMetadata contains the metadata for a product account. 55 | #[derive(Debug, Clone, Default)] 56 | pub struct ProductAccountMetadata { 57 | /// Attribute dictionary 58 | pub attr_dict: BTreeMap, 59 | /// Price accounts associated with this product 60 | pub price_accounts: Vec, 61 | } 62 | 63 | impl From<&ProductEntry> for ProductAccountMetadata { 64 | fn from(product_account: &ProductEntry) -> Self { 65 | ProductAccountMetadata { 66 | attr_dict: product_account 67 | .account_data 68 | .iter() 69 | .map(|(key, val)| (key.into(), val.into())) 70 | .collect(), 71 | price_accounts: product_account.price_accounts.clone(), 72 | } 73 | } 74 | } 75 | 76 | /// PriceAccountMetadata contains the metadata for a price account. 77 | #[derive(Debug, Clone)] 78 | pub struct PriceAccountMetadata { 79 | /// Exponent 80 | pub expo: i32, 81 | } 82 | 83 | impl From<&PriceEntry> for PriceAccountMetadata { 84 | fn from(price_account: &PriceEntry) -> Self { 85 | PriceAccountMetadata { 86 | expo: price_account.expo, 87 | } 88 | } 89 | } 90 | 91 | #[derive(Debug)] 92 | pub enum Update { 93 | ProductAccountUpdate { 94 | account_key: Pubkey, 95 | account: Arc, 96 | }, 97 | PriceAccountUpdate { 98 | account_key: Pubkey, 99 | account: Arc, 100 | }, 101 | } 102 | 103 | pub struct Store { 104 | /// The actual data on primary network 105 | account_data_primary: RwLock, 106 | 107 | /// The actual data on secondary network 108 | /// This data is not necessarily consistent across both networks, so we need to store it 109 | /// separately. 110 | account_data_secondary: RwLock, 111 | 112 | /// The account metadata for both networks 113 | /// The metadata is consistent across both networks, so we only need to store it once. 114 | account_metadata: RwLock, 115 | 116 | /// Prometheus metrics for products 117 | product_metrics: ProductGlobalMetrics, 118 | 119 | /// Prometheus metrics for prices 120 | price_metrics: PriceGlobalMetrics, 121 | } 122 | 123 | impl Store { 124 | pub fn new(registry: &mut Registry) -> Self { 125 | Store { 126 | account_data_primary: Default::default(), 127 | account_data_secondary: Default::default(), 128 | account_metadata: Default::default(), 129 | product_metrics: ProductGlobalMetrics::new(registry), 130 | price_metrics: PriceGlobalMetrics::new(registry), 131 | } 132 | } 133 | } 134 | 135 | #[cfg(test)] 136 | impl Store { 137 | // Allow Setting Fields during Tests. 138 | pub async fn _account_data_primary(&self, data: AllAccountsData) { 139 | *self.account_data_primary.write().await = data; 140 | } 141 | 142 | pub async fn _account_data_secondary(&self, data: AllAccountsData) { 143 | *self.account_data_secondary.write().await = data; 144 | } 145 | 146 | pub async fn _account_metadata(&self, data: AllAccountsMetadata) { 147 | *self.account_metadata.write().await = data; 148 | } 149 | } 150 | 151 | #[async_trait::async_trait] 152 | pub trait GlobalStore { 153 | async fn update(&self, network: Network, update: &Update) -> Result<()>; 154 | async fn accounts_metadata(&self) -> Result; 155 | async fn accounts_data(&self, network: Network) -> Result; 156 | async fn price_accounts( 157 | &self, 158 | network: Network, 159 | price_ids: HashSet, 160 | ) -> Result>>; 161 | } 162 | 163 | // Allow downcasting State into GlobalStore for functions that depend on the `GlobalStore` service. 164 | impl<'a> From<&'a State> for &'a Store { 165 | fn from(state: &'a State) -> &'a Store { 166 | &state.global_store 167 | } 168 | } 169 | 170 | #[async_trait::async_trait] 171 | impl GlobalStore for T 172 | where 173 | for<'a> &'a T: Into<&'a Store>, 174 | T: Sync, 175 | { 176 | async fn update(&self, network: Network, update: &Update) -> Result<()> { 177 | update_data(self, network, update).await?; 178 | update_metadata(self, update).await?; 179 | Ok(()) 180 | } 181 | 182 | async fn accounts_metadata(&self) -> Result { 183 | Ok(self.into().account_metadata.read().await.clone()) 184 | } 185 | 186 | async fn accounts_data(&self, network: Network) -> Result { 187 | match network { 188 | Network::Primary => Ok(self.into().account_data_primary.read().await.clone()), 189 | Network::Secondary => Ok(self.into().account_data_secondary.read().await.clone()), 190 | } 191 | } 192 | 193 | async fn price_accounts( 194 | &self, 195 | network: Network, 196 | price_ids: HashSet, 197 | ) -> Result>> { 198 | let account_data = match network { 199 | Network::Primary => &self.into().account_data_primary, 200 | Network::Secondary => &self.into().account_data_secondary, 201 | } 202 | .read() 203 | .await; 204 | 205 | price_ids 206 | .into_iter() 207 | .map(|id| { 208 | account_data 209 | .price_accounts 210 | .get(&id) 211 | .cloned() 212 | .map(|v| (id, v)) 213 | .ok_or(anyhow!("price id not found")) 214 | }) 215 | .collect() 216 | } 217 | } 218 | 219 | async fn update_data(state: &S, network: Network, update: &Update) -> Result<()> 220 | where 221 | for<'a> &'a S: Into<&'a Store>, 222 | { 223 | let store: &Store = state.into(); 224 | 225 | // Choose the right account data to update 226 | let account_data = match network { 227 | Network::Primary => &store.account_data_primary, 228 | Network::Secondary => &store.account_data_secondary, 229 | }; 230 | 231 | match update { 232 | Update::ProductAccountUpdate { 233 | account_key, 234 | account, 235 | } => { 236 | let attr_dict = ProductAccountMetadata::from(account.as_ref()).attr_dict; 237 | let maybe_symbol = attr_dict.get("symbol").cloned(); 238 | store.product_metrics.update(account_key, maybe_symbol); 239 | 240 | // Update the stored data 241 | account_data 242 | .write() 243 | .await 244 | .product_accounts 245 | .insert(*account_key, account.clone()); 246 | } 247 | Update::PriceAccountUpdate { 248 | account_key, 249 | account, 250 | } => { 251 | // Sanity-check that we are updating with more recent data 252 | if let Some(existing_price) = account_data.read().await.price_accounts.get(account_key) 253 | { 254 | if existing_price.timestamp > account.timestamp { 255 | // This message is not an error. It is common 256 | // for primary and secondary network to have 257 | // slight difference in their timestamps. 258 | tracing::debug!( 259 | price_key = account_key.to_string(), 260 | existing_timestamp = existing_price.timestamp, 261 | new_timestamp = account.timestamp, 262 | "Global store: ignoring stale update of an existing newer price" 263 | ); 264 | return Ok(()); 265 | } 266 | } 267 | 268 | // Update metrics 269 | store.price_metrics.update(account_key, account); 270 | 271 | // Update the stored data 272 | account_data 273 | .write() 274 | .await 275 | .price_accounts 276 | .insert(*account_key, account.clone()); 277 | } 278 | } 279 | 280 | Ok(()) 281 | } 282 | 283 | async fn update_metadata(state: &S, update: &Update) -> Result<()> 284 | where 285 | for<'a> &'a S: Into<&'a Store>, 286 | { 287 | let store: &Store = state.into(); 288 | 289 | match update { 290 | Update::ProductAccountUpdate { 291 | account_key, 292 | account, 293 | } => { 294 | store 295 | .account_metadata 296 | .write() 297 | .await 298 | .product_accounts_metadata 299 | .insert(*account_key, account.as_ref().into()); 300 | 301 | Ok(()) 302 | } 303 | Update::PriceAccountUpdate { 304 | account_key, 305 | account, 306 | } => { 307 | store 308 | .account_metadata 309 | .write() 310 | .await 311 | .price_accounts_metadata 312 | .insert(*account_key, account.as_ref().into()); 313 | 314 | Ok(()) 315 | } 316 | } 317 | } 318 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/agent/metrics.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::state::local::PriceInfo, 3 | crate::agent::state::oracle::PriceEntry, 4 | lazy_static::lazy_static, 5 | prometheus_client::{ 6 | encoding::{ 7 | EncodeLabelSet, 8 | text::encode, 9 | }, 10 | metrics::{ 11 | counter::Counter, 12 | family::Family, 13 | gauge::Gauge, 14 | }, 15 | registry::Registry, 16 | }, 17 | serde::Deserialize, 18 | smol_str::SmolStr, 19 | solana_sdk::pubkey::Pubkey, 20 | std::{ 21 | net::SocketAddr, 22 | sync::{ 23 | Arc, 24 | atomic::AtomicU64, 25 | }, 26 | }, 27 | tokio::sync::Mutex, 28 | warp::{ 29 | Filter, 30 | Rejection, 31 | Reply, 32 | hyper::StatusCode, 33 | reply, 34 | }, 35 | }; 36 | 37 | pub fn default_bind_address() -> SocketAddr { 38 | #[allow(clippy::unwrap_used, reason = "hardcoded value valid")] 39 | "127.0.0.1:8888".parse().unwrap() 40 | } 41 | 42 | #[derive(Deserialize, Debug)] 43 | pub struct Config { 44 | #[serde(default = "default_bind_address")] 45 | pub bind_address: SocketAddr, 46 | } 47 | 48 | impl Default for Config { 49 | fn default() -> Self { 50 | Self { 51 | bind_address: default_bind_address(), 52 | } 53 | } 54 | } 55 | 56 | lazy_static! { 57 | pub static ref PROMETHEUS_REGISTRY: Arc> = 58 | Arc::new(Mutex::new(::default())); 59 | } 60 | 61 | /// Instantiate a metrics API. 62 | pub async fn spawn(addr: impl Into + 'static) { 63 | let metrics_route = warp::path("metrics") 64 | .and(warp::path::end()) 65 | .and_then(move || async move { 66 | let mut buf = String::new(); 67 | #[allow(clippy::needless_borrow, reason = "false positive")] 68 | let response = encode(&mut buf, &&PROMETHEUS_REGISTRY.lock().await) 69 | .map_err(|e| -> Box { e.into() }) 70 | .map(|_| Box::new(reply::with_status(buf, StatusCode::OK))) 71 | .unwrap_or_else(|e| { 72 | tracing::error!(err = ?e, "Metrics: Could not gather metrics from registry"); 73 | Box::new(reply::with_status( 74 | "Could not gather metrics. See logs for details".to_string(), 75 | StatusCode::INTERNAL_SERVER_ERROR, 76 | )) 77 | }); 78 | 79 | Result::, Rejection>::Ok(response) 80 | }); 81 | 82 | let (_, serve) = warp::serve(metrics_route).bind_with_graceful_shutdown(addr, async { 83 | let _ = crate::agent::EXIT.subscribe().changed().await; 84 | }); 85 | 86 | serve.await 87 | } 88 | 89 | #[derive(Clone, Debug, Hash, PartialEq, Eq, EncodeLabelSet)] 90 | pub struct ProductGlobalLabels { 91 | pubkey: String, 92 | /// Set to "unknown_" if not found in the attribute set 93 | symbol: String, 94 | } 95 | 96 | /// Product account global store metrics. 97 | #[derive(Default)] 98 | pub struct ProductGlobalMetrics { 99 | /// How many times the global store has updated this product 100 | update_count: Family, 101 | } 102 | 103 | impl ProductGlobalMetrics { 104 | pub fn new(registry: &mut Registry) -> Self { 105 | let metrics = Default::default(); 106 | 107 | #[deny(unused_variables)] 108 | let Self { update_count } = &metrics; 109 | 110 | registry.register( 111 | "global_prod_update_count", 112 | "The global store's update count for a product account", 113 | update_count.clone(), 114 | ); 115 | 116 | metrics 117 | } 118 | 119 | pub fn update(&self, product_key: &Pubkey, maybe_symbol: Option) { 120 | let symbol_string = maybe_symbol 121 | .map(|x| x.into()) 122 | .unwrap_or(format!("unknown_{product_key}")); 123 | 124 | #[deny(unused_variables)] 125 | let Self { update_count } = self; 126 | 127 | update_count 128 | .get_or_create(&ProductGlobalLabels { 129 | pubkey: product_key.to_string(), 130 | symbol: symbol_string, 131 | }) 132 | .inc(); 133 | } 134 | } 135 | 136 | #[derive(Clone, Debug, Hash, PartialEq, Eq, EncodeLabelSet)] 137 | pub struct PriceGlobalLabels { 138 | pubkey: String, 139 | } 140 | 141 | /// Price account global store metrics. Most fields correspond with a subset of PriceEntry fields. 142 | #[derive(Default)] 143 | pub struct PriceGlobalMetrics { 144 | /// Note: the exponent is not applied to this metric 145 | price: Family, 146 | 147 | expo: Family, 148 | 149 | /// f64 is used to get u64 support. Official docs: 150 | /// https://docs.rs/prometheus-client/latest/prometheus_client/metrics/gauge/struct.Gauge.html#using-atomicu64-as-storage-and-f64-on-the-interface 151 | conf: Family>, 152 | timestamp: Family, 153 | 154 | /// Note: the exponent is not applied to this metric 155 | prev_price: Family, 156 | prev_conf: Family>, 157 | prev_timestamp: Family, 158 | 159 | /// How many times this Price was updated in the global store 160 | update_count: Family, 161 | } 162 | 163 | impl PriceGlobalMetrics { 164 | pub fn new(registry: &mut Registry) -> Self { 165 | let metrics = Default::default(); 166 | 167 | #[deny(unused_variables)] 168 | let Self { 169 | price, 170 | expo, 171 | conf, 172 | timestamp, 173 | prev_price, 174 | prev_conf, 175 | prev_timestamp, 176 | update_count, 177 | } = &metrics; 178 | 179 | registry.register( 180 | "global_price_price", 181 | "The global store's price value for a price account", 182 | price.clone(), 183 | ); 184 | 185 | registry.register( 186 | "global_price_expo", 187 | "The global store's exponent value for a price account", 188 | expo.clone(), 189 | ); 190 | 191 | registry.register( 192 | "global_price_conf", 193 | "The global store's confidence interval value for a price account", 194 | conf.clone(), 195 | ); 196 | 197 | registry.register( 198 | "global_price_timestamp", 199 | "The global store's publish timestamp value for a price account", 200 | timestamp.clone(), 201 | ); 202 | 203 | registry.register( 204 | "global_price_prev_price", 205 | "The global store's prev_price value for a price account", 206 | prev_price.clone(), 207 | ); 208 | 209 | registry.register( 210 | "global_price_prev_conf", 211 | "The global store's prev_conf (previous confidence interval) value for a price account", 212 | prev_conf.clone(), 213 | ); 214 | 215 | registry.register( 216 | "global_price_prev_timestamp", 217 | "The global store's prev_timestamp (last publish timestamp with status 'trading') value for a price account", 218 | prev_timestamp.clone(), 219 | ); 220 | 221 | registry.register( 222 | "global_price_update_count", 223 | "The global store's update count for a price account", 224 | update_count.clone(), 225 | ); 226 | 227 | metrics 228 | } 229 | 230 | pub fn update(&self, price_key: &Pubkey, price_account: &PriceEntry) { 231 | #[deny(unused_variables)] 232 | let Self { 233 | price, 234 | expo, 235 | conf, 236 | timestamp, 237 | prev_price, 238 | prev_conf, 239 | prev_timestamp, 240 | update_count, 241 | } = self; 242 | 243 | price 244 | .get_or_create(&PriceGlobalLabels { 245 | pubkey: price_key.to_string(), 246 | }) 247 | .set(price_account.agg.price); 248 | 249 | expo.get_or_create(&PriceGlobalLabels { 250 | pubkey: price_key.to_string(), 251 | }) 252 | .set(i64::from(price_account.expo)); 253 | 254 | conf.get_or_create(&PriceGlobalLabels { 255 | pubkey: price_key.to_string(), 256 | }) 257 | .set(price_account.agg.conf as f64); 258 | 259 | timestamp 260 | .get_or_create(&PriceGlobalLabels { 261 | pubkey: price_key.to_string(), 262 | }) 263 | .set(price_account.timestamp); 264 | 265 | prev_price 266 | .get_or_create(&PriceGlobalLabels { 267 | pubkey: price_key.to_string(), 268 | }) 269 | .set(price_account.prev_price); 270 | 271 | prev_conf 272 | .get_or_create(&PriceGlobalLabels { 273 | pubkey: price_key.to_string(), 274 | }) 275 | .set(price_account.prev_conf as f64); 276 | 277 | prev_timestamp 278 | .get_or_create(&PriceGlobalLabels { 279 | pubkey: price_key.to_string(), 280 | }) 281 | .set(price_account.prev_timestamp); 282 | 283 | update_count 284 | .get_or_create(&PriceGlobalLabels { 285 | pubkey: price_key.to_string(), 286 | }) 287 | .inc(); 288 | } 289 | } 290 | 291 | #[derive(Clone, Debug, Hash, PartialEq, Eq, EncodeLabelSet)] 292 | pub struct PriceLocalLabels { 293 | pubkey: String, 294 | } 295 | 296 | /// Metrics exposed to Prometheus by the local store for each price 297 | #[derive(Default)] 298 | pub struct PriceLocalMetrics { 299 | price: Family, 300 | /// f64 is used to get u64 support. Official docs: 301 | /// https://docs.rs/prometheus-client/latest/prometheus_client/metrics/gauge/struct.Gauge.html#using-atomicu64-as-storage-and-f64-on-the-interface 302 | conf: Family>, 303 | timestamp: Family, 304 | 305 | /// How many times this price was updated in the local store 306 | update_count: Family, 307 | } 308 | impl PriceLocalMetrics { 309 | pub fn new(registry: &mut Registry) -> Self { 310 | let metrics = Self::default(); 311 | 312 | #[deny(unused_variables)] 313 | let PriceLocalMetrics { 314 | price, 315 | conf, 316 | timestamp, 317 | update_count, 318 | } = &metrics; 319 | 320 | registry.register( 321 | "local_store_price", 322 | "Price value from the local store", 323 | price.clone(), 324 | ); 325 | registry.register( 326 | "local_store_conf", 327 | "Confidence interval value from the local store", 328 | conf.clone(), 329 | ); 330 | registry.register( 331 | "local_store_timestamp", 332 | "Publish timestamp value from the local store", 333 | timestamp.clone(), 334 | ); 335 | registry.register( 336 | "local_store_update_count", 337 | "How many times we've seen an update for this price in the local store", 338 | update_count.clone(), 339 | ); 340 | 341 | metrics 342 | } 343 | 344 | pub fn update(&self, price_id: &pyth_sdk::Identifier, price_info: &PriceInfo) { 345 | #[deny(unused_variables)] 346 | let Self { 347 | price, 348 | conf, 349 | timestamp, 350 | update_count, 351 | } = self; 352 | 353 | let price_key = Pubkey::from(price_id.to_bytes()); 354 | 355 | price 356 | .get_or_create(&PriceLocalLabels { 357 | pubkey: price_key.to_string(), 358 | }) 359 | .set(price_info.price); 360 | conf.get_or_create(&PriceLocalLabels { 361 | pubkey: price_key.to_string(), 362 | }) 363 | .set(price_info.conf as f64); 364 | timestamp 365 | .get_or_create(&PriceLocalLabels { 366 | pubkey: price_key.to_string(), 367 | }) 368 | .set(price_info.timestamp.and_utc().timestamp()); 369 | update_count 370 | .get_or_create(&PriceLocalLabels { 371 | pubkey: price_key.to_string(), 372 | }) 373 | .inc(); 374 | } 375 | } 376 | -------------------------------------------------------------------------------- /src/agent/utils/rpc_multi_client.rs: -------------------------------------------------------------------------------- 1 | use { 2 | anyhow::bail, 3 | solana_client::{ 4 | nonblocking::rpc_client::RpcClient, 5 | rpc_config::RpcSendTransactionConfig, 6 | rpc_response::RpcPrioritizationFee, 7 | }, 8 | solana_sdk::{ 9 | account::Account, 10 | commitment_config::CommitmentConfig, 11 | pubkey::Pubkey, 12 | signature::{ 13 | Keypair, 14 | Signature, 15 | Signer, 16 | }, 17 | transaction::Transaction, 18 | }, 19 | solana_transaction_status::TransactionStatus, 20 | std::{ 21 | future::Future, 22 | pin::Pin, 23 | sync::Arc, 24 | time::{ 25 | Duration, 26 | Instant, 27 | }, 28 | }, 29 | tokio::sync::Mutex, 30 | url::Url, 31 | }; 32 | 33 | #[derive(Debug, Clone)] 34 | struct EndpointState { 35 | last_failure: Option, 36 | is_healthy: bool, 37 | } 38 | 39 | #[derive(Debug)] 40 | struct RoundRobinState { 41 | current_index: usize, 42 | endpoint_states: Vec, 43 | cooldown_duration: Duration, 44 | } 45 | 46 | impl RoundRobinState { 47 | fn new(endpoint_count: usize, cooldown_duration: Duration) -> Self { 48 | Self { 49 | current_index: 0, 50 | endpoint_states: vec![ 51 | EndpointState { 52 | last_failure: None, 53 | is_healthy: true, 54 | }; 55 | endpoint_count 56 | ], 57 | cooldown_duration, 58 | } 59 | } 60 | } 61 | 62 | pub struct RpcMultiClient { 63 | rpc_clients: Vec, 64 | round_robin_state: Arc>, 65 | } 66 | 67 | impl RpcMultiClient { 68 | async fn retry_with_round_robin<'a, T, F>( 69 | &'a self, 70 | operation_name: &str, 71 | operation: F, 72 | ) -> anyhow::Result 73 | where 74 | F: Fn(&'a RpcClient) -> Pin> + Send + 'a>>, 75 | { 76 | if self.rpc_clients.is_empty() { 77 | bail!("No RPC clients available for operation: {}", operation_name); 78 | } 79 | 80 | let mut attempts = 0; 81 | // Try all endpoints twice in the worst case. 82 | let max_attempts = self.rpc_clients.len() * 2; 83 | 84 | while attempts < max_attempts { 85 | let index_option = self.get_next_endpoint().await; 86 | 87 | if let Some(index) = index_option { 88 | let future = operation( 89 | self.rpc_clients 90 | .get(index) 91 | .ok_or(anyhow::anyhow!("Index out of bounds"))?, 92 | ); 93 | match future.await { 94 | Ok(result) => { 95 | let mut state = self.round_robin_state.lock().await; 96 | 97 | #[allow(clippy::indexing_slicing, reason = "index is checked")] 98 | if index < state.endpoint_states.len() { 99 | state.endpoint_states[index].is_healthy = true; 100 | state.endpoint_states[index].last_failure = None; 101 | } 102 | return Ok(result); 103 | } 104 | Err(e) => { 105 | #[allow(clippy::indexing_slicing, reason = "index is checked")] 106 | let client = &self.rpc_clients[index]; 107 | tracing::warn!( 108 | "{} error for rpc endpoint {}: {}", 109 | operation_name, 110 | client.url(), 111 | e 112 | ); 113 | let mut state = self.round_robin_state.lock().await; 114 | 115 | #[allow(clippy::indexing_slicing, reason = "index is checked")] 116 | if index < state.endpoint_states.len() { 117 | state.endpoint_states[index].last_failure = Some(Instant::now()); 118 | state.endpoint_states[index].is_healthy = false; 119 | } 120 | } 121 | } 122 | } 123 | attempts += 1; 124 | } 125 | 126 | bail!( 127 | "{} failed for all RPC endpoints after {} attempts", 128 | operation_name, 129 | attempts 130 | ) 131 | } 132 | 133 | async fn get_next_endpoint(&self) -> Option { 134 | let mut state = self.round_robin_state.lock().await; 135 | let now = Instant::now(); 136 | let start_index = state.current_index; 137 | 138 | let mut found_index = None; 139 | for _ in 0..state.endpoint_states.len() { 140 | let index = state.current_index; 141 | state.current_index = 142 | (state.current_index + 1).checked_rem(state.endpoint_states.len())?; 143 | 144 | // Choose the next endpoint that is either healthy or has waited out the cooldown period. 145 | #[allow(clippy::indexing_slicing, reason = "index is checked")] 146 | let endpoint_state = &state.endpoint_states[index]; 147 | if endpoint_state.is_healthy 148 | || endpoint_state.last_failure.is_none_or(|failure_time| { 149 | now.duration_since(failure_time) >= state.cooldown_duration 150 | }) 151 | { 152 | found_index = Some(index); 153 | break; 154 | } 155 | } 156 | 157 | // If all endpoints have failed, simply move on to the next one. 158 | if found_index.is_none() { 159 | let index = start_index; 160 | state.current_index = (start_index + 1).checked_rem(state.endpoint_states.len())?; 161 | found_index = Some(index); 162 | } 163 | found_index 164 | } 165 | 166 | pub fn new_with_timeout(rpc_urls: Vec, timeout: Duration) -> Self { 167 | Self::new_with_timeout_and_cooldown(rpc_urls, timeout, Duration::from_secs(30)) 168 | } 169 | 170 | pub fn new_with_timeout_and_cooldown( 171 | rpc_urls: Vec, 172 | timeout: Duration, 173 | cooldown_duration: Duration, 174 | ) -> Self { 175 | let clients: Vec = rpc_urls 176 | .iter() 177 | .map(|rpc_url| RpcClient::new_with_timeout(rpc_url.to_string(), timeout)) 178 | .collect(); 179 | let round_robin_state = Arc::new(Mutex::new(RoundRobinState::new( 180 | clients.len(), 181 | cooldown_duration, 182 | ))); 183 | Self { 184 | rpc_clients: clients, 185 | round_robin_state, 186 | } 187 | } 188 | 189 | pub fn new_with_commitment(rpc_urls: Vec, commitment_config: CommitmentConfig) -> Self { 190 | Self::new_with_commitment_and_cooldown(rpc_urls, commitment_config, Duration::from_secs(30)) 191 | } 192 | 193 | pub fn new_with_commitment_and_cooldown( 194 | rpc_urls: Vec, 195 | commitment_config: CommitmentConfig, 196 | cooldown_duration: Duration, 197 | ) -> Self { 198 | let clients: Vec = rpc_urls 199 | .iter() 200 | .map(|rpc_url| RpcClient::new_with_commitment(rpc_url.to_string(), commitment_config)) 201 | .collect(); 202 | let round_robin_state = Arc::new(Mutex::new(RoundRobinState::new( 203 | clients.len(), 204 | cooldown_duration, 205 | ))); 206 | Self { 207 | rpc_clients: clients, 208 | round_robin_state, 209 | } 210 | } 211 | 212 | pub fn new_with_timeout_and_commitment( 213 | rpc_urls: Vec, 214 | timeout: Duration, 215 | commitment_config: CommitmentConfig, 216 | ) -> Self { 217 | Self::new_with_timeout_commitment_and_cooldown( 218 | rpc_urls, 219 | timeout, 220 | commitment_config, 221 | Duration::from_secs(30), 222 | ) 223 | } 224 | 225 | pub fn new_with_timeout_commitment_and_cooldown( 226 | rpc_urls: Vec, 227 | timeout: Duration, 228 | commitment_config: CommitmentConfig, 229 | cooldown_duration: Duration, 230 | ) -> Self { 231 | let clients: Vec = rpc_urls 232 | .iter() 233 | .map(|rpc_url| { 234 | RpcClient::new_with_timeout_and_commitment( 235 | rpc_url.to_string(), 236 | timeout, 237 | commitment_config, 238 | ) 239 | }) 240 | .collect(); 241 | let round_robin_state = Arc::new(Mutex::new(RoundRobinState::new( 242 | clients.len(), 243 | cooldown_duration, 244 | ))); 245 | Self { 246 | rpc_clients: clients, 247 | round_robin_state, 248 | } 249 | } 250 | 251 | pub async fn get_balance(&self, kp: &Keypair) -> anyhow::Result { 252 | let pubkey = kp.pubkey(); 253 | self.retry_with_round_robin("getBalance", |client| { 254 | Box::pin(async move { 255 | client 256 | .get_balance(&pubkey) 257 | .await 258 | .map_err(anyhow::Error::from) 259 | }) 260 | }) 261 | .await 262 | } 263 | 264 | pub async fn send_transaction_with_config( 265 | &self, 266 | transaction: &Transaction, 267 | ) -> anyhow::Result { 268 | self.retry_with_round_robin("sendTransactionWithConfig", |client| { 269 | Box::pin(async move { 270 | client 271 | .send_transaction_with_config( 272 | transaction, 273 | RpcSendTransactionConfig { 274 | skip_preflight: true, 275 | ..RpcSendTransactionConfig::default() 276 | }, 277 | ) 278 | .await 279 | .map_err(anyhow::Error::from) 280 | }) 281 | }) 282 | .await 283 | } 284 | 285 | pub async fn get_signature_statuses( 286 | &self, 287 | signatures_contiguous: &[Signature], 288 | ) -> anyhow::Result>> { 289 | self.retry_with_round_robin("getSignatureStatuses", |client| { 290 | Box::pin(async move { 291 | client 292 | .get_signature_statuses(signatures_contiguous) 293 | .await 294 | .map(|statuses| statuses.value) 295 | .map_err(anyhow::Error::from) 296 | }) 297 | }) 298 | .await 299 | } 300 | 301 | pub async fn get_recent_prioritization_fees( 302 | &self, 303 | price_accounts: &[Pubkey], 304 | ) -> anyhow::Result> { 305 | self.retry_with_round_robin("getRecentPrioritizationFees", |client| { 306 | Box::pin(async move { 307 | client 308 | .get_recent_prioritization_fees(price_accounts) 309 | .await 310 | .map_err(anyhow::Error::from) 311 | }) 312 | }) 313 | .await 314 | } 315 | 316 | pub async fn get_program_accounts( 317 | &self, 318 | oracle_program_key: Pubkey, 319 | ) -> anyhow::Result> { 320 | self.retry_with_round_robin("getProgramAccounts", |client| { 321 | Box::pin(async move { 322 | client 323 | .get_program_accounts(&oracle_program_key) 324 | .await 325 | .map_err(anyhow::Error::from) 326 | }) 327 | }) 328 | .await 329 | } 330 | 331 | pub async fn get_account_data(&self, publisher_config_key: &Pubkey) -> anyhow::Result> { 332 | self.retry_with_round_robin("getAccountData", |client| { 333 | Box::pin(async move { 334 | client 335 | .get_account_data(publisher_config_key) 336 | .await 337 | .map_err(anyhow::Error::from) 338 | }) 339 | }) 340 | .await 341 | } 342 | 343 | pub async fn get_slot_with_commitment( 344 | &self, 345 | commitment_config: CommitmentConfig, 346 | ) -> anyhow::Result { 347 | self.retry_with_round_robin("getSlotWithCommitment", |client| { 348 | Box::pin(async move { 349 | client 350 | .get_slot_with_commitment(commitment_config) 351 | .await 352 | .map_err(anyhow::Error::from) 353 | }) 354 | }) 355 | .await 356 | } 357 | 358 | pub async fn get_latest_blockhash(&self) -> anyhow::Result { 359 | self.retry_with_round_robin("getLatestBlockhash", |client| { 360 | Box::pin(async move { 361 | client 362 | .get_latest_blockhash() 363 | .await 364 | .map_err(anyhow::Error::from) 365 | }) 366 | }) 367 | .await 368 | } 369 | } 370 | -------------------------------------------------------------------------------- /src/agent/services/exporter.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::agent::{ 3 | solana::network::{ 4 | self, 5 | Network, 6 | }, 7 | state::{ 8 | exporter::Exporter, 9 | transactions::Transactions, 10 | }, 11 | utils::rpc_multi_client::RpcMultiClient, 12 | }, 13 | anyhow::Result, 14 | futures_util::future, 15 | serde::{ 16 | Deserialize, 17 | Serialize, 18 | }, 19 | solana_sdk::commitment_config::CommitmentConfig, 20 | std::{ 21 | sync::Arc, 22 | time::Duration, 23 | }, 24 | tokio::{ 25 | sync::watch, 26 | task::JoinHandle, 27 | time::Interval, 28 | }, 29 | tracing::instrument, 30 | url::Url, 31 | }; 32 | 33 | #[derive(Clone, Serialize, Deserialize, Debug)] 34 | #[serde(default)] 35 | pub struct Config { 36 | /// Duration of the interval at which to refresh the cached network state (current slot and blockhash). 37 | /// It is recommended to set this to slightly less than the network's block time, 38 | /// as the slot fetched will be used as the time of the price update. 39 | #[serde(with = "humantime_serde")] 40 | pub refresh_network_state_interval_duration: Duration, 41 | /// Duration of the interval at which to publish updates 42 | #[serde(with = "humantime_serde")] 43 | pub publish_interval_duration: Duration, 44 | /// Age after which a price update is considered stale and not published 45 | #[serde(with = "humantime_serde")] 46 | pub staleness_threshold: Duration, 47 | /// Wait at least this long before publishing an unchanged price 48 | /// state; unchanged price state means only timestamp has changed 49 | /// with other state identical to last published state. 50 | #[serde(with = "humantime_serde")] 51 | pub unchanged_publish_threshold: Duration, 52 | /// Maximum size of a batch 53 | pub max_batch_size: usize, 54 | /// Capacity of the channel between the Exporter and the Transaction Monitor 55 | pub inflight_transactions_channel_capacity: usize, 56 | /// Configuration for the Transaction Monitor 57 | pub transaction_monitor: transaction_monitor::Config, 58 | /// Number of compute units requested per update_price instruction within the transaction 59 | /// (i.e., requested units equals `n * compute_unit_limit`, where `n` is the number of update_price 60 | /// instructions) 61 | pub compute_unit_limit: u32, 62 | /// Price per compute unit offered for update_price transactions If dynamic compute unit is 63 | /// enabled and this value is set, the actual price per compute unit will be the maximum of the 64 | /// network dynamic price and this value. 65 | pub compute_unit_price_micro_lamports: Option, 66 | /// Enable using dynamic price per compute unit based on the network previous prioritization 67 | /// fees. 68 | pub dynamic_compute_unit_pricing_enabled: bool, 69 | /// Maximum total compute unit fee paid for a single transaction. Defaults to 0.001 SOL. This 70 | /// is a safety measure while using dynamic compute price to prevent the exporter from paying 71 | /// too much for a single transaction 72 | pub maximum_compute_unit_price_micro_lamports: u64, 73 | /// Maximum slot gap between the current slot and the oldest slot amongst all the accounts in 74 | /// the batch. This is used to calculate the dynamic price per compute unit. When the slot gap 75 | /// reaches this number we will use the maximum total_compute_fee for the transaction. 76 | pub maximum_slot_gap_for_dynamic_compute_unit_price: u64, 77 | } 78 | 79 | impl Default for Config { 80 | fn default() -> Self { 81 | Self { 82 | refresh_network_state_interval_duration: Duration::from_millis(200), 83 | publish_interval_duration: Duration::from_secs(1), 84 | staleness_threshold: Duration::from_secs(5), 85 | unchanged_publish_threshold: Duration::from_secs(3), 86 | max_batch_size: 12, 87 | inflight_transactions_channel_capacity: 10000, 88 | transaction_monitor: Default::default(), 89 | // The largest transactions without accumulator spend around 38k compute units 90 | // and accumulator cpi costs around 10k compute units. We set the limit to 60k 91 | // to have some buffer. 92 | compute_unit_limit: 60000, 93 | compute_unit_price_micro_lamports: None, 94 | dynamic_compute_unit_pricing_enabled: false, 95 | // Maximum compute unit price (as a cap on the dynamic price) 96 | maximum_compute_unit_price_micro_lamports: 1_000_000, 97 | // A publisher update is not included if it is 25 slots behind the current slot. 98 | // Due to the delay in the network (until a block gets confirmed) and potential 99 | // ws issues we add 15 slots to make sure we do not overpay. 100 | maximum_slot_gap_for_dynamic_compute_unit_price: 40, 101 | } 102 | } 103 | } 104 | 105 | #[derive(Debug, Clone, Copy, Default)] 106 | pub struct NetworkState { 107 | pub blockhash: solana_sdk::hash::Hash, 108 | pub current_slot: u64, 109 | } 110 | 111 | /// NetworkStateQuerier periodically queries the current state of the network, 112 | /// fetching the blockhash and slot number. 113 | struct NetworkStateQuerier { 114 | /// The RPC client 115 | rpc_multi_client: RpcMultiClient, 116 | 117 | /// The interval with which to query the network state 118 | query_interval: Interval, 119 | 120 | /// Channel the current network state is sent on 121 | network_state_tx: watch::Sender, 122 | } 123 | 124 | impl NetworkStateQuerier { 125 | #[instrument( 126 | skip(rpc_urls, rpc_timeout, query_interval), 127 | fields( 128 | rpc_timeout = rpc_timeout.as_millis(), 129 | query_interval = query_interval.period().as_millis(), 130 | ) 131 | )] 132 | pub fn new( 133 | rpc_urls: &[Url], 134 | rpc_timeout: Duration, 135 | query_interval: Interval, 136 | network_state_tx: watch::Sender, 137 | ) -> Self { 138 | let rpc_multi_client = RpcMultiClient::new_with_timeout(rpc_urls.to_vec(), rpc_timeout); 139 | NetworkStateQuerier { 140 | rpc_multi_client, 141 | query_interval, 142 | network_state_tx, 143 | } 144 | } 145 | 146 | pub async fn run(&mut self) { 147 | loop { 148 | self.query_interval.tick().await; 149 | if let Err(err) = self.query_network_state().await { 150 | tracing::error!(err = ?err, "Network state query failed"); 151 | } 152 | } 153 | } 154 | 155 | #[instrument(skip(self))] 156 | async fn query_network_state(&mut self) -> Result<()> { 157 | // Fetch the blockhash and current slot in parallel 158 | let current_slot_future = self 159 | .rpc_multi_client 160 | .get_slot_with_commitment(CommitmentConfig::confirmed()); 161 | let latest_blockhash_future = self.rpc_multi_client.get_latest_blockhash(); 162 | 163 | let (current_slot_result, latest_blockhash_result) = 164 | future::join(current_slot_future, latest_blockhash_future).await; 165 | 166 | // Send the result on the channel 167 | self.network_state_tx.send(NetworkState { 168 | blockhash: latest_blockhash_result?, 169 | current_slot: current_slot_result?, 170 | })?; 171 | 172 | Ok(()) 173 | } 174 | } 175 | 176 | #[instrument(skip(config, state))] 177 | pub fn exporter(config: network::Config, network: Network, state: Arc) -> Vec> 178 | where 179 | S: Exporter, 180 | S: Transactions, 181 | S: Send + Sync + 'static, 182 | { 183 | let mut handles = Vec::new(); 184 | 185 | // Create and spawn the network state querier 186 | let (network_state_tx, network_state_rx) = watch::channel(Default::default()); 187 | let mut network_state_querier = NetworkStateQuerier::new( 188 | &config.rpc_urls, 189 | config.rpc_timeout, 190 | tokio::time::interval(config.exporter.refresh_network_state_interval_duration), 191 | network_state_tx, 192 | ); 193 | 194 | handles.push(tokio::spawn(transaction_monitor::transaction_monitor( 195 | config.clone(), 196 | state.clone(), 197 | ))); 198 | 199 | handles.push(tokio::spawn(exporter::exporter( 200 | config, 201 | network, 202 | state, 203 | network_state_rx, 204 | ))); 205 | 206 | handles.push(tokio::spawn( 207 | async move { network_state_querier.run().await }, 208 | )); 209 | 210 | handles 211 | } 212 | 213 | #[allow(clippy::module_inception, reason = "")] 214 | mod exporter { 215 | use { 216 | super::NetworkState, 217 | crate::agent::{ 218 | solana::{ 219 | key_store::KeyStore, 220 | network::{ 221 | Config, 222 | Network, 223 | }, 224 | }, 225 | state::exporter::{ 226 | Exporter, 227 | get_publish_keypair, 228 | publish_batches, 229 | }, 230 | utils::rpc_multi_client::RpcMultiClient, 231 | }, 232 | solana_sdk::commitment_config::CommitmentConfig, 233 | std::sync::Arc, 234 | tokio::sync::watch, 235 | }; 236 | 237 | pub async fn exporter( 238 | config: Config, 239 | network: Network, 240 | state: Arc, 241 | network_state_rx: watch::Receiver, 242 | ) where 243 | S: Exporter, 244 | S: Send + Sync + 'static, 245 | { 246 | let mut publish_interval = tokio::time::interval(config.exporter.publish_interval_duration); 247 | let mut dynamic_compute_unit_price_update_interval = 248 | tokio::time::interval(config.exporter.publish_interval_duration); 249 | 250 | let rpc_multi_client: Arc = 251 | Arc::new(RpcMultiClient::new_with_timeout_and_commitment( 252 | config.rpc_urls.clone(), 253 | config.rpc_timeout, 254 | CommitmentConfig { 255 | commitment: config.oracle.commitment, 256 | }, 257 | )); 258 | let Ok(key_store) = KeyStore::new(config.key_store.clone()) else { 259 | tracing::warn!("Key store not available, Exporter won't start."); 260 | return; 261 | }; 262 | 263 | loop { 264 | tokio::select! { 265 | _ = publish_interval.tick() => { 266 | if let Ok(publish_keypair) = get_publish_keypair(&*state, network, key_store.publish_keypair.as_ref()).await { 267 | if let Ok(permissioned_updates) = Exporter::get_permissioned_updates( 268 | &*state, 269 | &publish_keypair, 270 | config.exporter.staleness_threshold, 271 | config.exporter.unchanged_publish_threshold, 272 | ).await { 273 | let publisher_buffer_key = Exporter::get_publisher_buffer_key(&*state).await; 274 | if let Err(err) = publish_batches( 275 | state.clone(), 276 | rpc_multi_client.clone(), 277 | network, 278 | &network_state_rx, 279 | key_store.accumulator_key, 280 | &publish_keypair, 281 | key_store.pyth_oracle_program_key, 282 | key_store.pyth_price_store_program_key, 283 | publisher_buffer_key, 284 | config.exporter.max_batch_size, 285 | config.exporter.staleness_threshold, 286 | config.exporter.compute_unit_limit, 287 | config.exporter.compute_unit_price_micro_lamports, 288 | config.exporter.maximum_compute_unit_price_micro_lamports, 289 | config.exporter.maximum_slot_gap_for_dynamic_compute_unit_price, 290 | config.exporter.dynamic_compute_unit_pricing_enabled, 291 | permissioned_updates, 292 | ).await { 293 | tracing::error!(err = ?err, "Exporter failed to publish."); 294 | } 295 | } 296 | } 297 | } 298 | _ = dynamic_compute_unit_price_update_interval.tick() => { 299 | if config.exporter.dynamic_compute_unit_pricing_enabled { 300 | if let Ok(publish_keypair) = get_publish_keypair(&*state, network, key_store.publish_keypair.as_ref()).await { 301 | if let Err(err) = Exporter::update_recent_compute_unit_price( 302 | &*state, 303 | &publish_keypair, 304 | &rpc_multi_client, 305 | config.exporter.staleness_threshold, 306 | config.exporter.unchanged_publish_threshold, 307 | ).await { 308 | tracing::error!(err = ?err, "Exporter failed to compute unit price."); 309 | } 310 | } 311 | } 312 | } 313 | } 314 | } 315 | } 316 | } 317 | 318 | mod transaction_monitor { 319 | use { 320 | crate::agent::{ 321 | solana::network, 322 | state::transactions::Transactions, 323 | utils::rpc_multi_client::RpcMultiClient, 324 | }, 325 | serde::{ 326 | Deserialize, 327 | Serialize, 328 | }, 329 | std::{ 330 | sync::Arc, 331 | time::Duration, 332 | }, 333 | tracing::instrument, 334 | }; 335 | 336 | #[derive(Clone, Serialize, Deserialize, Debug)] 337 | #[serde(default)] 338 | pub struct Config { 339 | /// Duration of the interval with which to poll the status of transactions. 340 | /// It is recommended to set this to a value close to the Exporter's publish_interval. 341 | #[serde(with = "humantime_serde")] 342 | pub poll_interval_duration: Duration, 343 | /// Maximum number of recent transactions to monitor. When this number is exceeded, 344 | /// the oldest transactions are no longer monitored. It is recommended to set this to 345 | /// a value at least as large as (number of products published / number of products in a batch). 346 | pub max_transactions: usize, 347 | } 348 | 349 | impl Default for Config { 350 | fn default() -> Self { 351 | Self { 352 | poll_interval_duration: Duration::from_secs(4), 353 | max_transactions: 100, 354 | } 355 | } 356 | } 357 | 358 | #[instrument(skip(config, state))] 359 | pub async fn transaction_monitor(config: network::Config, state: Arc) 360 | where 361 | S: Transactions, 362 | { 363 | let rpc_multi_client = 364 | RpcMultiClient::new_with_timeout(config.rpc_urls.clone(), config.rpc_timeout); 365 | let mut poll_interval = 366 | tokio::time::interval(config.exporter.transaction_monitor.poll_interval_duration); 367 | 368 | loop { 369 | poll_interval.tick().await; 370 | if let Err(err) = 371 | Transactions::poll_transactions_status(&*state, &rpc_multi_client).await 372 | { 373 | tracing::error!(err = ?err, "Transaction monitor failed."); 374 | } 375 | } 376 | } 377 | } 378 | -------------------------------------------------------------------------------- /src/agent/pyth/rpc.rs: -------------------------------------------------------------------------------- 1 | // This module is responsible for exposing the JRPC-esq websocket API 2 | // documented at https://docs.pyth.network/publish-data/pyth-client-websocket-api 3 | // 4 | // It does not implement the business logic, only exposes a websocket server which 5 | // accepts messages and can return responses in the expected format. 6 | 7 | use { 8 | super::{ 9 | Conf, 10 | NotifyPrice, 11 | NotifyPriceSched, 12 | Price, 13 | Pubkey, 14 | SubscriptionID, 15 | }, 16 | crate::agent::state, 17 | anyhow::{ 18 | Result, 19 | anyhow, 20 | }, 21 | futures::future::OptionFuture, 22 | futures_util::{ 23 | SinkExt, 24 | stream::{ 25 | SplitSink, 26 | SplitStream, 27 | StreamExt, 28 | }, 29 | }, 30 | jrpc::{ 31 | ErrorCode, 32 | Id, 33 | IdReq, 34 | Request, 35 | Response, 36 | Value, 37 | parse_request, 38 | }, 39 | serde::{ 40 | Deserialize, 41 | Serialize, 42 | de::DeserializeOwned, 43 | }, 44 | serde_this_or_that::{ 45 | as_i64, 46 | as_u64, 47 | }, 48 | std::{ 49 | fmt::Debug, 50 | net::SocketAddr, 51 | sync::Arc, 52 | time::Duration, 53 | }, 54 | tokio::{ 55 | sync::mpsc, 56 | time::Interval, 57 | }, 58 | tracing::instrument, 59 | warp::{ 60 | Filter, 61 | ws::{ 62 | Message, 63 | WebSocket, 64 | Ws, 65 | }, 66 | }, 67 | }; 68 | 69 | #[derive(Serialize, Deserialize, Debug)] 70 | #[serde(rename_all = "snake_case")] 71 | enum Method { 72 | GetProductList, 73 | GetProduct, 74 | GetAllProducts, 75 | SubscribePrice, 76 | NotifyPrice, 77 | SubscribePriceSched, 78 | NotifyPriceSched, 79 | UpdatePrice, 80 | } 81 | 82 | #[derive(Serialize, Deserialize, Debug)] 83 | struct GetProductParams { 84 | account: Pubkey, 85 | } 86 | 87 | #[derive(Serialize, Deserialize, Debug)] 88 | struct SubscribePriceParams { 89 | account: Pubkey, 90 | } 91 | 92 | #[derive(Serialize, Deserialize, Debug)] 93 | struct SubscribePriceSchedParams { 94 | account: Pubkey, 95 | } 96 | 97 | #[derive(Serialize, Deserialize, Debug, Clone)] 98 | struct UpdatePriceParams { 99 | account: Pubkey, 100 | #[serde(deserialize_with = "as_i64")] 101 | price: Price, 102 | #[serde(deserialize_with = "as_u64")] 103 | conf: Conf, 104 | status: String, 105 | } 106 | 107 | #[derive(Serialize, Deserialize, Debug, PartialEq)] 108 | struct SubscribeResult { 109 | subscription: SubscriptionID, 110 | } 111 | 112 | #[derive(thiserror::Error, Debug)] 113 | enum ConnectionError { 114 | #[error("websocket connection closed")] 115 | WebsocketConnectionClosed, 116 | } 117 | 118 | #[derive(Debug)] 119 | enum FlushStrategy { 120 | Instant, 121 | Interval(Interval), 122 | } 123 | 124 | async fn handle_connection( 125 | ws_conn: WebSocket, 126 | state: Arc, 127 | notify_price_tx_buffer: usize, 128 | notify_price_sched_tx_buffer: usize, 129 | instant_flush: bool, 130 | flush_interval_duration: Duration, 131 | ) where 132 | S: state::Prices, 133 | S: Send, 134 | S: Sync, 135 | S: 'static, 136 | { 137 | // Create the channels 138 | let (mut ws_tx, mut ws_rx) = ws_conn.split(); 139 | let (mut notify_price_tx, mut notify_price_rx) = mpsc::channel(notify_price_tx_buffer); 140 | let (mut notify_price_sched_tx, mut notify_price_sched_rx) = 141 | mpsc::channel(notify_price_sched_tx_buffer); 142 | 143 | let mut flush_strategy = match instant_flush { 144 | true => FlushStrategy::Instant, 145 | false => FlushStrategy::Interval(tokio::time::interval(flush_interval_duration)), 146 | }; 147 | 148 | loop { 149 | if let Err(err) = handle_next( 150 | &*state, 151 | &mut ws_tx, 152 | &mut ws_rx, 153 | &mut notify_price_tx, 154 | &mut notify_price_rx, 155 | &mut notify_price_sched_tx, 156 | &mut notify_price_sched_rx, 157 | &mut flush_strategy, 158 | ) 159 | .await 160 | { 161 | if let Some(ConnectionError::WebsocketConnectionClosed) = 162 | err.downcast_ref::() 163 | { 164 | tracing::info!("Websocket connection closed."); 165 | return; 166 | } 167 | 168 | tracing::error!(err = ?err, "RPC failed to handle WebSocket message."); 169 | } 170 | } 171 | } 172 | 173 | #[allow(clippy::too_many_arguments, reason = "")] 174 | async fn handle_next( 175 | state: &S, 176 | ws_tx: &mut SplitSink, 177 | ws_rx: &mut SplitStream, 178 | notify_price_tx: &mut mpsc::Sender, 179 | notify_price_rx: &mut mpsc::Receiver, 180 | notify_price_sched_tx: &mut mpsc::Sender, 181 | notify_price_sched_rx: &mut mpsc::Receiver, 182 | flush_strategy: &mut FlushStrategy, 183 | ) -> Result<()> 184 | where 185 | S: state::Prices, 186 | { 187 | let optional_flush_tick: OptionFuture<_> = match flush_strategy { 188 | FlushStrategy::Instant => None, 189 | FlushStrategy::Interval(interval) => Some(interval.tick()), 190 | } 191 | .into(); 192 | 193 | tokio::select! { 194 | msg = ws_rx.next() => { 195 | match msg { 196 | Some(body) => match body { 197 | Ok(msg) => { 198 | handle( 199 | ws_tx, 200 | state, 201 | notify_price_tx, 202 | notify_price_sched_tx, 203 | msg, 204 | ) 205 | .await 206 | } 207 | Err(e) => send_error(ws_tx, e.into(), None).await, 208 | }, 209 | None => Err(ConnectionError::WebsocketConnectionClosed)?, 210 | } 211 | } 212 | Some(notify_price) = notify_price_rx.recv() => { 213 | feed_notification(ws_tx, Method::NotifyPrice, Some(notify_price)) 214 | .await 215 | } 216 | Some(notify_price_sched) = notify_price_sched_rx.recv() => { 217 | feed_notification(ws_tx, Method::NotifyPriceSched, Some(notify_price_sched)) 218 | .await 219 | } 220 | Some(_) = optional_flush_tick => { 221 | flush(ws_tx).await 222 | } 223 | }?; 224 | 225 | match flush_strategy { 226 | FlushStrategy::Interval(_) => Ok(()), 227 | FlushStrategy::Instant => flush(ws_tx).await, 228 | } 229 | } 230 | 231 | async fn handle( 232 | ws_tx: &mut SplitSink, 233 | state: &S, 234 | notify_price_tx: &mpsc::Sender, 235 | notify_price_sched_tx: &mpsc::Sender, 236 | msg: Message, 237 | ) -> Result<()> 238 | where 239 | S: state::Prices, 240 | { 241 | // Ignore control and binary messages 242 | if !msg.is_text() { 243 | tracing::debug!("JSON RPC API: skipped non-text message"); 244 | return Ok(()); 245 | } 246 | 247 | // Parse and dispatch the message 248 | match parse(msg).await { 249 | Ok((requests, is_batch)) => { 250 | let mut responses = Vec::with_capacity(requests.len()); 251 | 252 | // Perform requests in sequence and gather responses 253 | for request in requests { 254 | let response = dispatch_and_catch_error( 255 | state, 256 | notify_price_tx, 257 | notify_price_sched_tx, 258 | &request, 259 | ) 260 | .await; 261 | responses.push(response) 262 | } 263 | 264 | // Send an array if we're handling a batch 265 | // request, single response object otherwise 266 | if is_batch { 267 | feed_text(ws_tx, &serde_json::to_string(&responses)?).await?; 268 | } else { 269 | #[allow( 270 | clippy::indexing_slicing, 271 | reason = "single response guaranteed to have one item" 272 | )] 273 | feed_text(ws_tx, &serde_json::to_string(&responses[0])?).await?; 274 | } 275 | } 276 | // The top-level parsing errors are fine to share with client 277 | Err(e) => { 278 | send_error(ws_tx, e, None).await?; 279 | } 280 | } 281 | 282 | Ok(()) 283 | } 284 | 285 | /// Parse a JSONRPC request object or a batch of them. The 286 | /// bool in result informs request handling whether it needs 287 | /// to respond with a single object or an array, to prevent 288 | /// sending unexpected 289 | /// `[{}]` 290 | /// array payloads. 291 | async fn parse(msg: Message) -> Result<(Vec>, bool)> { 292 | let s = msg 293 | .to_str() 294 | .map_err(|_| anyhow!("Could not parse message as text"))?; 295 | 296 | let json_value: Value = serde_json::from_str(s)?; 297 | if let Some(array) = json_value.as_array() { 298 | // Interpret request as JSON-RPC 2.0 batch if value is an array 299 | let mut requests = Vec::with_capacity(array.len()); 300 | for maybe_request in array { 301 | // Re-serialize for parse_request(), it's the only 302 | // jrpc parsing function available and it's taking 303 | // &str. 304 | let maybe_request_string = serde_json::to_string(maybe_request)?; 305 | requests.push( 306 | parse_request::(&maybe_request_string) 307 | .map_err(|e| anyhow!("Could not parse message: {}", e.error.message))?, 308 | ); 309 | } 310 | 311 | Ok((requests, true)) 312 | } else { 313 | // Base single request case 314 | let single = parse_request::(s) 315 | .map_err(|e| anyhow!("Could not parse message: {}", e.error.message))?; 316 | Ok((vec![single], false)) 317 | } 318 | } 319 | 320 | async fn dispatch_and_catch_error( 321 | state: &S, 322 | notify_price_tx: &mpsc::Sender, 323 | notify_price_sched_tx: &mpsc::Sender, 324 | request: &Request, 325 | ) -> Response 326 | where 327 | S: state::Prices, 328 | { 329 | tracing::debug!( 330 | method = ?request.method, 331 | "JSON RPC API: handling request", 332 | ); 333 | 334 | let result = match request.method { 335 | Method::GetProductList => get_product_list(state).await, 336 | Method::GetProduct => get_product(state, request).await, 337 | Method::GetAllProducts => get_all_products(state).await, 338 | Method::UpdatePrice => update_price(state, request).await, 339 | Method::SubscribePrice => subscribe_price(state, notify_price_tx, request).await, 340 | Method::SubscribePriceSched => { 341 | subscribe_price_sched(state, notify_price_sched_tx, request).await 342 | } 343 | Method::NotifyPrice | Method::NotifyPriceSched => { 344 | Err(anyhow!("unsupported method: {:?}", request.method)) 345 | } 346 | }; 347 | 348 | // Consider errors internal, print details to logs. 349 | match result { 350 | Ok(payload) => { 351 | Response::success(request.id.clone().to_id().unwrap_or(Id::from(0)), payload) 352 | } 353 | Err(e) => { 354 | tracing::warn!( 355 | request = ?request, 356 | error = e.to_string(), 357 | "Error handling JSON RPC request", 358 | ); 359 | 360 | Response::error( 361 | request.id.clone().to_id().unwrap_or(Id::from(0)), 362 | ErrorCode::InternalError, 363 | e.to_string(), 364 | None, 365 | ) 366 | } 367 | } 368 | } 369 | 370 | mod get_all_products; 371 | mod get_product; 372 | mod get_product_list; 373 | mod subscribe_price; 374 | mod subscribe_price_sched; 375 | mod update_price; 376 | use { 377 | get_all_products::*, 378 | get_product::*, 379 | get_product_list::*, 380 | subscribe_price::*, 381 | subscribe_price_sched::*, 382 | update_price::*, 383 | }; 384 | 385 | async fn send_error( 386 | ws_tx: &mut SplitSink, 387 | error: anyhow::Error, 388 | id: Option, 389 | ) -> Result<()> { 390 | let response: Response = Response::error( 391 | id.unwrap_or_else(|| Id::from(0)), 392 | ErrorCode::InternalError, 393 | error.to_string(), 394 | None, 395 | ); 396 | feed_text(ws_tx, &response.to_string()).await 397 | } 398 | 399 | async fn feed_notification( 400 | ws_tx: &mut SplitSink, 401 | method: Method, 402 | params: Option, 403 | ) -> Result<()> 404 | where 405 | T: Sized + Serialize + DeserializeOwned, 406 | { 407 | feed_request(ws_tx, IdReq::Notification, method, params).await 408 | } 409 | 410 | async fn feed_request( 411 | ws_tx: &mut SplitSink, 412 | id: I, 413 | method: Method, 414 | params: Option, 415 | ) -> Result<()> 416 | where 417 | I: Into, 418 | T: Sized + Serialize + DeserializeOwned, 419 | { 420 | let request = Request::with_params(id, method, params); 421 | feed_text(ws_tx, &request.to_string()).await 422 | } 423 | 424 | async fn feed_text(ws_tx: &mut SplitSink, msg: &str) -> Result<()> { 425 | ws_tx 426 | .feed(Message::text(msg.to_string())) 427 | .await 428 | .map_err(|e| e.into()) 429 | } 430 | 431 | async fn flush(ws_tx: &mut SplitSink) -> Result<()> { 432 | ws_tx.flush().await.map_err(|e| e.into()) 433 | } 434 | 435 | #[derive(Clone, Debug, Serialize, Deserialize)] 436 | #[serde(default)] 437 | pub struct Config { 438 | /// The address which the websocket API server will listen on. 439 | pub listen_address: String, 440 | /// Size of the buffer of each Server's channel on which `notify_price` events are 441 | /// received from the Price state. 442 | pub notify_price_tx_buffer: usize, 443 | /// Size of the buffer of each Server's channel on which `notify_price_sched` events are 444 | /// received from the Price state. 445 | pub notify_price_sched_tx_buffer: usize, 446 | /// Whether to flush immediately after sending a message or notification. 447 | pub instant_flush: bool, 448 | /// Flush interval duration for the notifications. 449 | #[serde(with = "humantime_serde")] 450 | pub flush_interval_duration: Duration, 451 | } 452 | 453 | impl Default for Config { 454 | fn default() -> Self { 455 | Self { 456 | listen_address: "127.0.0.1:8910".to_string(), 457 | notify_price_tx_buffer: 10000, 458 | notify_price_sched_tx_buffer: 10000, 459 | instant_flush: true, 460 | flush_interval_duration: Duration::from_millis(50), 461 | } 462 | } 463 | } 464 | 465 | #[instrument(skip_all)] 466 | pub async fn run(config: Config, state: Arc) 467 | where 468 | S: state::Prices, 469 | S: Send, 470 | S: Sync, 471 | S: 'static, 472 | { 473 | if let Err(err) = serve(config, state).await { 474 | tracing::error!(err = ?err, "RPC server failed."); 475 | } 476 | } 477 | 478 | async fn serve(config: Config, state: Arc) -> Result<()> 479 | where 480 | S: state::Prices, 481 | S: Send, 482 | S: Sync, 483 | S: 'static, 484 | { 485 | let config = config.clone(); 486 | 487 | let index = { 488 | let config = config.clone(); 489 | warp::path::end() 490 | .and(warp::ws()) 491 | .and(warp::any().map(move || state.clone())) 492 | .and(warp::any().map(move || config.clone())) 493 | .map(|ws: Ws, state: Arc, config: Config| { 494 | ws.on_upgrade(move |conn| async move { 495 | tracing::info!("Websocket user connected."); 496 | handle_connection( 497 | conn, 498 | state, 499 | config.notify_price_tx_buffer, 500 | config.notify_price_sched_tx_buffer, 501 | config.instant_flush, 502 | config.flush_interval_duration, 503 | ) 504 | .await 505 | }) 506 | }) 507 | }; 508 | 509 | let (_, serve) = warp::serve(index).bind_with_graceful_shutdown( 510 | config.listen_address.as_str().parse::()?, 511 | async { 512 | let _ = crate::agent::EXIT.subscribe().changed().await; 513 | }, 514 | ); 515 | 516 | tracing::info!( 517 | listen_address = config.listen_address.clone(), 518 | "Starting api server.", 519 | ); 520 | 521 | tokio::task::spawn(serve).await.map_err(|e| e.into()) 522 | } 523 | -------------------------------------------------------------------------------- /integration-tests/program-binaries/message_buffer_idl.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.1.0", 3 | "name": "message_buffer", 4 | "instructions": [ 5 | { 6 | "name": "initialize", 7 | "docs": [ 8 | "Initializes the whitelist and sets it's admin to the provided pubkey", 9 | "Once initialized, the authority must sign all further changes to the whitelist." 10 | ], 11 | "accounts": [ 12 | { 13 | "name": "payer", 14 | "isMut": true, 15 | "isSigner": true 16 | }, 17 | { 18 | "name": "whitelist", 19 | "isMut": true, 20 | "isSigner": false, 21 | "pda": { 22 | "seeds": [ 23 | { 24 | "kind": "const", 25 | "type": "string", 26 | "value": "message" 27 | }, 28 | { 29 | "kind": "const", 30 | "type": "string", 31 | "value": "whitelist" 32 | } 33 | ] 34 | } 35 | }, 36 | { 37 | "name": "systemProgram", 38 | "isMut": false, 39 | "isSigner": false 40 | } 41 | ], 42 | "args": [ 43 | { 44 | "name": "admin", 45 | "type": "publicKey" 46 | } 47 | ] 48 | }, 49 | { 50 | "name": "setAllowedPrograms", 51 | "docs": [ 52 | "Sets the programs that are allowed to invoke this program through CPI", 53 | "", 54 | "* `allowed_programs` - Entire list of programs that are allowed to", 55 | "invoke this program through CPI" 56 | ], 57 | "accounts": [ 58 | { 59 | "name": "payer", 60 | "isMut": true, 61 | "isSigner": true 62 | }, 63 | { 64 | "name": "admin", 65 | "isMut": false, 66 | "isSigner": true 67 | }, 68 | { 69 | "name": "whitelist", 70 | "isMut": true, 71 | "isSigner": false, 72 | "pda": { 73 | "seeds": [ 74 | { 75 | "kind": "const", 76 | "type": "string", 77 | "value": "message" 78 | }, 79 | { 80 | "kind": "const", 81 | "type": "string", 82 | "value": "whitelist" 83 | } 84 | ] 85 | }, 86 | "relations": [ 87 | "admin" 88 | ] 89 | } 90 | ], 91 | "args": [ 92 | { 93 | "name": "allowedPrograms", 94 | "type": { 95 | "vec": "publicKey" 96 | } 97 | } 98 | ] 99 | }, 100 | { 101 | "name": "updateWhitelistAdmin", 102 | "docs": [ 103 | "Sets the new admin for the whitelist" 104 | ], 105 | "accounts": [ 106 | { 107 | "name": "payer", 108 | "isMut": true, 109 | "isSigner": true 110 | }, 111 | { 112 | "name": "admin", 113 | "isMut": false, 114 | "isSigner": true 115 | }, 116 | { 117 | "name": "whitelist", 118 | "isMut": true, 119 | "isSigner": false, 120 | "pda": { 121 | "seeds": [ 122 | { 123 | "kind": "const", 124 | "type": "string", 125 | "value": "message" 126 | }, 127 | { 128 | "kind": "const", 129 | "type": "string", 130 | "value": "whitelist" 131 | } 132 | ] 133 | }, 134 | "relations": [ 135 | "admin" 136 | ] 137 | } 138 | ], 139 | "args": [ 140 | { 141 | "name": "newAdmin", 142 | "type": "publicKey" 143 | } 144 | ] 145 | }, 146 | { 147 | "name": "putAll", 148 | "docs": [ 149 | "Put messages into the Accumulator. All messages put for the same", 150 | "`base_account_key` go into the same buffer PDA. The PDA's address is", 151 | "`[allowed_program_auth, MESSAGE, base_account_key]`, where `allowed_program_auth`", 152 | "is the whitelisted pubkey who authorized this call.", 153 | "", 154 | "* `base_account_key` - Pubkey of the original account the", 155 | "`MessageBuffer` is derived from", 156 | "(e.g. pyth price account)", 157 | "* `messages` - Vec of vec of bytes, each representing a message", 158 | "to be hashed and accumulated", 159 | "", 160 | "This ix will write as many of the messages up to the length", 161 | "of the `accumulator_input.data`.", 162 | "If `accumulator_input.data.len() < messages.map(|x| x.len()).sum()`", 163 | "then the remaining messages will be ignored.", 164 | "", 165 | "The current implementation assumes that each invocation of this", 166 | "ix is independent of any previous invocations. It will overwrite", 167 | "any existing contents.", 168 | "", 169 | "TODO:", 170 | "- handle updates (\"paging/batches of messages\")", 171 | "" 172 | ], 173 | "accounts": [ 174 | { 175 | "name": "whitelistVerifier", 176 | "accounts": [ 177 | { 178 | "name": "whitelist", 179 | "isMut": false, 180 | "isSigner": false, 181 | "pda": { 182 | "seeds": [ 183 | { 184 | "kind": "const", 185 | "type": "string", 186 | "value": "message" 187 | }, 188 | { 189 | "kind": "const", 190 | "type": "string", 191 | "value": "whitelist" 192 | } 193 | ] 194 | } 195 | }, 196 | { 197 | "name": "cpiCallerAuth", 198 | "isMut": false, 199 | "isSigner": true, 200 | "docs": [ 201 | "PDA representing authorized cpi caller" 202 | ] 203 | } 204 | ] 205 | } 206 | ], 207 | "args": [ 208 | { 209 | "name": "baseAccountKey", 210 | "type": "publicKey" 211 | }, 212 | { 213 | "name": "messages", 214 | "type": { 215 | "vec": "bytes" 216 | } 217 | } 218 | ] 219 | }, 220 | { 221 | "name": "createBuffer", 222 | "docs": [ 223 | "Initializes the buffer account with the `target_size`", 224 | "", 225 | "*`allowed_program_auth` - The whitelisted pubkey representing an", 226 | "allowed program. Used as one of the seeds", 227 | "for deriving the `MessageBuffer` PDA.", 228 | "* `base_account_key` - Pubkey of the original account the", 229 | "`MessageBuffer` is derived from", 230 | "(e.g. pyth price account)", 231 | "*`target_size` - Initial size to allocate for the", 232 | "`MessageBuffer` PDA. `target_size`", 233 | "must be >= HEADER_LEN && <= 10240" 234 | ], 235 | "accounts": [ 236 | { 237 | "name": "whitelist", 238 | "isMut": false, 239 | "isSigner": false, 240 | "pda": { 241 | "seeds": [ 242 | { 243 | "kind": "const", 244 | "type": "string", 245 | "value": "message" 246 | }, 247 | { 248 | "kind": "const", 249 | "type": "string", 250 | "value": "whitelist" 251 | } 252 | ] 253 | }, 254 | "relations": [ 255 | "admin" 256 | ] 257 | }, 258 | { 259 | "name": "admin", 260 | "isMut": true, 261 | "isSigner": true 262 | }, 263 | { 264 | "name": "systemProgram", 265 | "isMut": false, 266 | "isSigner": false 267 | } 268 | ], 269 | "args": [ 270 | { 271 | "name": "allowedProgramAuth", 272 | "type": "publicKey" 273 | }, 274 | { 275 | "name": "baseAccountKey", 276 | "type": "publicKey" 277 | }, 278 | { 279 | "name": "targetSize", 280 | "type": "u32" 281 | } 282 | ] 283 | }, 284 | { 285 | "name": "resizeBuffer", 286 | "docs": [ 287 | "Resizes the buffer account to the `target_size`", 288 | "", 289 | "*`allowed_program_auth` - The whitelisted pubkey representing an", 290 | "allowed program. Used as one of the seeds", 291 | "for deriving the `MessageBuffer` PDA.", 292 | "* `base_account_key` - Pubkey of the original account the", 293 | "`MessageBuffer` is derived from", 294 | "(e.g. pyth price account)", 295 | "*`target_size` - Size to re-allocate for the", 296 | "`MessageBuffer` PDA. If increasing the size,", 297 | "max delta of current_size & target_size is 10240", 298 | "*`buffer_bump` - Bump seed for the `MessageBuffer` PDA" 299 | ], 300 | "accounts": [ 301 | { 302 | "name": "whitelist", 303 | "isMut": false, 304 | "isSigner": false, 305 | "pda": { 306 | "seeds": [ 307 | { 308 | "kind": "const", 309 | "type": "string", 310 | "value": "message" 311 | }, 312 | { 313 | "kind": "const", 314 | "type": "string", 315 | "value": "whitelist" 316 | } 317 | ] 318 | }, 319 | "relations": [ 320 | "admin" 321 | ] 322 | }, 323 | { 324 | "name": "admin", 325 | "isMut": true, 326 | "isSigner": true 327 | }, 328 | { 329 | "name": "systemProgram", 330 | "isMut": false, 331 | "isSigner": false 332 | } 333 | ], 334 | "args": [ 335 | { 336 | "name": "allowedProgramAuth", 337 | "type": "publicKey" 338 | }, 339 | { 340 | "name": "baseAccountKey", 341 | "type": "publicKey" 342 | }, 343 | { 344 | "name": "bufferBump", 345 | "type": "u8" 346 | }, 347 | { 348 | "name": "targetSize", 349 | "type": "u32" 350 | } 351 | ] 352 | }, 353 | { 354 | "name": "deleteBuffer", 355 | "docs": [ 356 | "Closes the buffer account and transfers the remaining lamports to the", 357 | "`admin` account", 358 | "", 359 | "*`allowed_program_auth` - The whitelisted pubkey representing an", 360 | "allowed program. Used as one of the seeds", 361 | "for deriving the `MessageBuffer` PDA.", 362 | "* `base_account_key` - Pubkey of the original account the", 363 | "`MessageBuffer` is derived from", 364 | "(e.g. pyth price account)", 365 | "*`buffer_bump` - Bump seed for the `MessageBuffer` PDA" 366 | ], 367 | "accounts": [ 368 | { 369 | "name": "whitelist", 370 | "isMut": false, 371 | "isSigner": false, 372 | "pda": { 373 | "seeds": [ 374 | { 375 | "kind": "const", 376 | "type": "string", 377 | "value": "message" 378 | }, 379 | { 380 | "kind": "const", 381 | "type": "string", 382 | "value": "whitelist" 383 | } 384 | ] 385 | }, 386 | "relations": [ 387 | "admin" 388 | ] 389 | }, 390 | { 391 | "name": "admin", 392 | "isMut": true, 393 | "isSigner": true 394 | } 395 | ], 396 | "args": [ 397 | { 398 | "name": "allowedProgramAuth", 399 | "type": "publicKey" 400 | }, 401 | { 402 | "name": "baseAccountKey", 403 | "type": "publicKey" 404 | }, 405 | { 406 | "name": "bufferBump", 407 | "type": "u8" 408 | } 409 | ] 410 | } 411 | ], 412 | "accounts": [ 413 | { 414 | "name": "MessageBuffer", 415 | "docs": [ 416 | "A MessageBuffer will have the following structure", 417 | "```ignore", 418 | "struct MessageBuffer {", 419 | "header: BufferHeader,", 420 | "messages: [u8; accountInfo.data.len - header.header_len]", 421 | "}", 422 | "```", 423 | "", 424 | "where `MESSAGES_LEN` can be dynamic. There is actual", 425 | "no messages field in the `MessageBuffer` struct definition due to messages", 426 | "needing to be a dynamic length while supporting zero_copy", 427 | "at the same time.", 428 | "", 429 | "A `MessageBuffer` AccountInfo.data will look like:", 430 | "[ , , ]", 431 | "(0..8) (8..header_len) (header_len...accountInfo.data.len)", 432 | "", 433 | "
", 434 | "", 435 | "NOTE: The defined fields are read as *Little Endian*. The actual messages", 436 | "are read as *Big Endian*. The MessageBuffer fields are only ever read", 437 | "by the Pythnet validator & Hermes so don't need to be in Big Endian", 438 | "for cross-platform compatibility." 439 | ], 440 | "type": { 441 | "kind": "struct", 442 | "fields": [ 443 | { 444 | "name": "bump", 445 | "type": "u8" 446 | }, 447 | { 448 | "name": "version", 449 | "type": "u8" 450 | }, 451 | { 452 | "name": "headerLen", 453 | "type": "u16" 454 | }, 455 | { 456 | "name": "endOffsets", 457 | "docs": [ 458 | "endpoints of every message.", 459 | "ex: [10, 14]", 460 | "=> msg1 = account_info.data[(header_len + 0)..(header_len + 10)]", 461 | "=> msg2 = account_info.data[(header_len + 10)..(header_len + 14)]" 462 | ], 463 | "type": { 464 | "array": [ 465 | "u16", 466 | 255 467 | ] 468 | } 469 | } 470 | ] 471 | } 472 | }, 473 | { 474 | "name": "Whitelist", 475 | "type": { 476 | "kind": "struct", 477 | "fields": [ 478 | { 479 | "name": "bump", 480 | "type": "u8" 481 | }, 482 | { 483 | "name": "admin", 484 | "type": "publicKey" 485 | }, 486 | { 487 | "name": "allowedPrograms", 488 | "type": { 489 | "vec": "publicKey" 490 | } 491 | } 492 | ] 493 | } 494 | } 495 | ], 496 | "errors": [ 497 | { 498 | "code": 6000, 499 | "name": "CallerNotAllowed", 500 | "msg": "CPI Caller not allowed" 501 | }, 502 | { 503 | "code": 6001, 504 | "name": "DuplicateAllowedProgram", 505 | "msg": "Whitelist already contains program" 506 | }, 507 | { 508 | "code": 6002, 509 | "name": "ConversionError", 510 | "msg": "Conversion Error" 511 | }, 512 | { 513 | "code": 6003, 514 | "name": "SerializeError", 515 | "msg": "Serialization Error" 516 | }, 517 | { 518 | "code": 6004, 519 | "name": "WhitelistAdminRequired", 520 | "msg": "Whitelist admin required on initialization" 521 | }, 522 | { 523 | "code": 6005, 524 | "name": "InvalidAllowedProgram", 525 | "msg": "Invalid allowed program" 526 | }, 527 | { 528 | "code": 6006, 529 | "name": "MaximumAllowedProgramsExceeded", 530 | "msg": "Maximum number of allowed programs exceeded" 531 | }, 532 | { 533 | "code": 6007, 534 | "name": "InvalidPDA", 535 | "msg": "Invalid PDA" 536 | }, 537 | { 538 | "code": 6008, 539 | "name": "CurrentDataLengthExceeded", 540 | "msg": "Update data exceeds current length" 541 | }, 542 | { 543 | "code": 6009, 544 | "name": "MessageBufferNotProvided", 545 | "msg": "Message Buffer not provided" 546 | }, 547 | { 548 | "code": 6010, 549 | "name": "MessageBufferTooSmall", 550 | "msg": "Message Buffer is not sufficiently large" 551 | }, 552 | { 553 | "code": 6011, 554 | "name": "FundBumpNotFound", 555 | "msg": "Fund Bump not found" 556 | }, 557 | { 558 | "code": 6012, 559 | "name": "ReallocFailed", 560 | "msg": "Reallocation failed" 561 | }, 562 | { 563 | "code": 6013, 564 | "name": "TargetSizeDeltaExceeded", 565 | "msg": "Target size too large for reallocation/initialization. Max delta is 10240" 566 | }, 567 | { 568 | "code": 6014, 569 | "name": "MessageBufferUninitialized", 570 | "msg": "MessageBuffer Uninitialized" 571 | } 572 | ] 573 | } 574 | --------------------------------------------------------------------------------