├── .dockerignore ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── README.md ├── analyze.py ├── ansible ├── add_github_key.sh ├── ansible.cfg ├── inventory └── setup_playbook.yml ├── docker-compose.yaml ├── run-shredstream.sh ├── rustfmt.toml ├── shrink_raydium.py └── src ├── app.rs ├── arb.rs ├── benchmark.rs ├── constants.rs ├── entry_processor.rs ├── graduates_processor.rs ├── lib.rs ├── listener.rs ├── logger.rs ├── main.rs ├── pump.rs ├── raydium.rs ├── recovery.rs ├── service.rs ├── shred.rs ├── shred_processor.rs ├── structs.rs └── util.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | **/packets.json 2 | **/shredstream-proxy 3 | target 4 | shredstream-proxy/target 5 | shredstream-proxy/**/**/*.rs.bk 6 | shredstream-proxy/** 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | *.log 3 | .env 4 | context.txt 5 | *.json 6 | packets.json 7 | shredstream-proxy 8 | /target 9 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shreds" 3 | version = "1.0.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = "1.0.86" 8 | bincode = "1.3.3" 9 | env_logger = "0.11.5" 10 | log = "0.4.22" 11 | borsh = { version = "=1.5.1", features = ["derive"] } 12 | reed-solomon-erasure = "6.0.0" 13 | serde = "1.0.208" 14 | serde_json = "1.0.125" 15 | solana-entry = "=1.16.27" 16 | solana-ledger = "=1.16.27" 17 | solana-program = "=1.16.27" 18 | solana-sdk = "=1.16.27" 19 | solana-client = "=1.16.27" 20 | timed = "0.2.1" 21 | tokio = { version = "1.39.2", features = ["full"] } 22 | raydium_amm = { version = "0.3.0", git = "https://github.com/piotrostr/raydium-amm" } 23 | raydium-library = { git = "https://github.com/piotrostr/raydium-library" } 24 | anchor-lang = "=0.29.0" 25 | anchor-client = "=0.29.0" 26 | indicatif = "0.17" 27 | reqwest = { version = "0.12.4", features = ["json", "stream"] } 28 | futures-util = "0.3.30" 29 | dotenv = "0.15.0" 30 | once_cell = "1.19.0" 31 | clap = { version = "4.5.13", features = ["derive"] } 32 | chrono = "0.4.38" 33 | hex = "0.4.3" 34 | rayon = "1.10.0" 35 | lazy_static = "1.5.0" 36 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.79 AS builder 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | build-essential \ 5 | pkg-config \ 6 | libssl-dev \ 7 | curl \ 8 | unzip \ 9 | libclang-dev \ 10 | clang \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | # Install latest protoc 14 | RUN PROTOC_VERSION=$(curl -s https://api.github.com/repos/protocolbuffers/protobuf/releases/latest | grep -oP '"tag_name": "\K(.*)(?=")') && \ 15 | PROTOC_VERSION=${PROTOC_VERSION#v} && \ 16 | curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip" && \ 17 | unzip "protoc-${PROTOC_VERSION}-linux-x86_64.zip" -d /usr/local && \ 18 | rm "protoc-${PROTOC_VERSION}-linux-x86_64.zip" 19 | 20 | # Copy manifests and build only the dependencies to cache them 21 | RUN USER=root cargo new --bin shreds 22 | WORKDIR /shreds 23 | COPY ./Cargo.toml ./Cargo.toml 24 | COPY ./Cargo.lock ./Cargo.lock 25 | 26 | RUN cargo build --release 27 | RUN rm src/*.rs 28 | 29 | # Copy over source 30 | COPY ./src ./src 31 | 32 | # Build for release 33 | RUN cargo build --release 34 | 35 | FROM ubuntu:22.04 AS runner 36 | 37 | RUN apt-get update && apt-get install -y \ 38 | ca-certificates \ 39 | openssl \ 40 | libssl-dev \ 41 | && rm -rf /var/lib/apt/lists/* 42 | 43 | COPY --from=builder /shreds/target/release/shreds . 44 | 45 | EXPOSE 8001/udp 46 | 47 | CMD ["./shreds", "--bind", "0.0.0.0:8001"] 48 | 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # shreds 2 | 3 | >[!NOTE] 4 | >This is now officially available at Jito: https://github.com/jito-labs/shredstream-proxy/blob/master/examples/deshred.rs 5 | 6 | 7 | use the solana gossip protocol shredstream to get as fast as humanly possible 8 | 9 | this works pretty well, though has some minor issues and drop some shred 10 | entries, but it is good enough to snipe stuff 11 | 12 | ## TODOs 13 | 14 | - safe math sometimes fails with overflow when calculating swap amount, generally will have to 15 | do something to check if a given transaction can be successful 16 | - ``` 17 | 1724687831476 INFO [shreds::listener] metrics: "{\n \"fec_set_failure_count\": 2,\n 18 | \"fec_set_success_count\": 2836,\n \"fec_sets_remaining\": 1756,\n \"fec_sets_summary\": {\n 19 | \"incomplete_count\": 1754,\n \"total_count\": 1756\n },\n \"total_collected_coding\": 20 | 102021,\n \"total_collected_data\": 106572,\n \"total_processed_data\": 100661\n}" 21 | ``` 22 | ^ a lot of the fec sets are hanging, those receive 0 data shreds and only a few coding shreds TODO check if it maybe is fec sets that have already been processed that continue to receive coding shreds 23 | - take volume into account when calculating profit and best size (flash loans might be an 24 | option) 25 | - there is missing data, likely due to an error somewhere, could be the coding shreds that are 26 | to be used 27 | - it might be useful to receive a single data tick and inspect on how the shreds are forwarded 28 | technically, shreds could be used to maintain ledger altogether, the only thing that is needed 29 | - pool calculation might be a bit off, some of the operations are unsupported too 30 | - the account keys in `update_pool_state_swap` matter, swap base in can be 31 | with a flipped user account source 32 | and destination and then it swaps the token in and out 33 | * when swapping PC2Coin it flips, this might not matter as much as the accounts 34 | - orca is yet to be implememnted, this is to be done after raydium is working 35 | 36 | * this bug: 37 | 38 | ``` 39 | 40 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 97/130: io error: failed to fill whole buffer 41 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 98/130: io error: failed to fill whole buffer 42 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 99/130: io error: failed to fill whole buffer 43 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 100/130: io error: failed to fill whole buffer 44 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 101/130: io error: failed to fill whole buffer 45 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 102/130: io error: failed to fill whole buffer 46 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 103/130: io error: failed to fill whole buffer 47 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 104/130: io error: failed to fill whole buffer 48 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 105/130: io error: failed to fill whole buffer 49 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 106/130: io error: failed to fill whole buffer 50 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 107/130: io error: failed to fill whole buffer 51 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 108/130: io error: failed to fill whole buffer 52 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 109/130: io error: failed to fill whole buffer 53 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 110/130: io error: failed to fill whole buffer 54 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 111/130: io error: failed to fill whole buffer 55 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 112/130: io error: failed to fill whole buffer 56 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 113/130: io error: failed to fill whole buffer 57 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 114/130: io error: failed to fill whole buffer 58 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 115/130: io error: failed to fill whole buffer 59 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 116/130: io error: failed to fill whole buffer 60 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 117/130: io error: failed to fill whole buffer 61 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 118/130: io error: failed to fill whole buffer 62 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 119/130: io error: failed to fill whole buffer 63 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 120/130: io error: failed to fill whole buffer 64 | 1724789637115 ERROR [shreds::shred] Failed to deserialize entry 121/130: io error: failed to fill whole buffer 65 | 66 | ``` 67 | 68 | ## In the future 69 | 70 | - in the algo, ensure that ATAs are already created, this saves some ixs 71 | -------------------------------------------------------------------------------- /analyze.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | 4 | pubsub_pattern = r"\[(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z).*pubsub: (\d+)" 5 | shreds_pattern = r"\[(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z).*algo: (\d+)" 6 | 7 | pubsub_transactions = [] 8 | shreds_transactions = [] 9 | 10 | 11 | # Function to parse the timestamp 12 | def parse_timestamp(ts_string): 13 | return datetime.strptime(ts_string, "%Y-%m-%dT%H:%M:%S.%fZ") 14 | 15 | 16 | # Process the log streams 17 | for line in log_streams.split("\n"): 18 | pubsub_match = re.search(pubsub_pattern, line) 19 | shreds_match = re.search(shreds_pattern, line) 20 | 21 | if pubsub_match: 22 | timestamp, transaction_id = pubsub_match.groups() 23 | pubsub_transactions.append((parse_timestamp(timestamp), int(transaction_id))) 24 | elif shreds_match: 25 | timestamp, transaction_id = shreds_match.groups() 26 | shreds_transactions.append((parse_timestamp(timestamp), int(transaction_id))) 27 | 28 | # Count transactions 29 | pubsub_count = len(pubsub_transactions) 30 | shreds_count = len(shreds_transactions) 31 | 32 | print(f"Number of pubsub transactions: {pubsub_count}") 33 | print(f"Number of shreds transactions: {shreds_count}") 34 | 35 | # Compare timing if both types of transactions exist 36 | if pubsub_count > 0 and shreds_count > 0: 37 | pubsub_start = min(t[0] for t in pubsub_transactions) 38 | pubsub_end = max(t[0] for t in pubsub_transactions) 39 | shreds_start = min(t[0] for t in shreds_transactions) 40 | shreds_end = max(t[0] for t in shreds_transactions) 41 | 42 | pubsub_duration = (pubsub_end - pubsub_start).total_seconds() 43 | shreds_duration = (shreds_end - shreds_start).total_seconds() 44 | 45 | print(f"Pubsub duration: {pubsub_duration:.3f} seconds") 46 | print(f"Shreds duration: {shreds_duration:.3f} seconds") 47 | 48 | if shreds_duration < pubsub_duration: 49 | print("Shreds was faster") 50 | elif shreds_duration > pubsub_duration: 51 | print("Pubsub was faster") 52 | else: 53 | print("Pubsub and Shreds had the same duration") 54 | else: 55 | print("Not enough data to compare timing") 56 | -------------------------------------------------------------------------------- /ansible/add_github_key.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 1 ]; then 4 | echo "Usage: $0 " 5 | exit 1 6 | fi 7 | 8 | SSH_KEY="$1" 9 | 10 | response=$(gh api \ 11 | --method POST \ 12 | -H "Accept: application/vnd.github+json" \ 13 | -H "X-GitHub-Api-Version: 2022-11-28" \ 14 | /user/keys \ 15 | -f "title=Ansible-generated SSH Key" \ 16 | -f "key=$SSH_KEY") 17 | 18 | if [ $? -eq 0 ]; then 19 | echo "SSH key successfully added to GitHub" 20 | else 21 | echo "Failed to add SSH key to GitHub" 22 | echo "Response: $response" 23 | exit 1 24 | fi 25 | 26 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = ./inventory 3 | host_key_checking = False 4 | -------------------------------------------------------------------------------- /ansible/inventory: -------------------------------------------------------------------------------- 1 | [your_target_hosts] 2 | 84.32.188.15 ansible_user=root 3 | -------------------------------------------------------------------------------- /ansible/setup_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Docker, setup SSH, clone repo, and build app 3 | hosts: your_target_hosts 4 | become: yes 5 | 6 | vars: 7 | github_repo: "shreds" 8 | github_username: "piotrostr" 9 | ssh_key_dir: "/{{ ansible_user }}/.ssh" 10 | ssh_key_path: "/{{ ansible_user }}/.ssh/id_rsa" 11 | 12 | tasks: 13 | - name: Update apt cache 14 | apt: 15 | update_cache: yes 16 | 17 | - name: Install required packages 18 | apt: 19 | name: 20 | - apt-transport-https 21 | - ca-certificates 22 | - curl 23 | - software-properties-common 24 | - openssl 25 | state: present 26 | 27 | - name: Add Docker GPG apt Key 28 | apt_key: 29 | url: https://download.docker.com/linux/ubuntu/gpg 30 | state: present 31 | 32 | - name: Add Docker Repository 33 | apt_repository: 34 | repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable 35 | state: present 36 | 37 | - name: Install Docker 38 | apt: 39 | name: docker-ce 40 | state: present 41 | 42 | - name: Ensure Docker service is running 43 | service: 44 | name: docker 45 | state: started 46 | enabled: yes 47 | 48 | - name: Ensure .ssh directory exists 49 | file: 50 | path: "{{ ssh_key_dir }}" 51 | state: directory 52 | mode: "0700" 53 | become: yes 54 | 55 | - name: Generate SSH key 56 | openssh_keypair: 57 | path: "{{ ssh_key_path }}" 58 | type: rsa 59 | size: 4096 60 | become: yes 61 | 62 | - name: Get SSH public key 63 | command: "cat {{ ssh_key_path }}.pub" 64 | register: public_key 65 | 66 | - name: Display public key and wait for user 67 | pause: 68 | prompt: | 69 | Please add the following public key to your GitHub account: 70 | 71 | {{ public_key.stdout }} 72 | 73 | Use the provided bash script to add the key: 74 | ./add_github_key.sh "{{ public_key.stdout }}" 75 | 76 | Press ENTER when you've added the key to continue... 77 | 78 | - name: Clone GitHub repository 79 | git: 80 | repo: "git@github.com:{{ github_username }}/{{ github_repo }}.git" 81 | dest: /{{ ansible_user }}/{{ github_repo }} 82 | accept_hostkey: yes 83 | key_file: "{{ ssh_key_path }}" 84 | 85 | - name: Clone pump-rs repository 86 | git: 87 | repo: "git@github.com:{{ github_username }}/pump-rs.git" 88 | dest: /{{ ansible_user }}/pump-rs 89 | accept_hostkey: yes 90 | key_file: "{{ ssh_key_path }}" 91 | 92 | - name: Copy over auth.json 93 | copy: 94 | src: ../auth.json 95 | dest: /{{ ansible_user }}/{{ github_repo }}/auth.json 96 | 97 | - name: Copy over FuckU.json 98 | copy: 99 | src: ../FuckU.json 100 | dest: /{{ ansible_user }}/{{ github_repo }}/FuckU.json 101 | 102 | - name: Copy over .env 103 | copy: 104 | src: ../.env 105 | dest: /{{ ansible_user }}/{{ github_repo }}/.env 106 | 107 | - name: Copy over raydium.json 108 | copy: 109 | src: ../raydium.json 110 | dest: /{{ ansible_user }}/{{ github_repo }}/raydium.json 111 | 112 | - name: Print SSH instructions 113 | debug: 114 | msg: "all gucci, `ssh {{ ansible_user }}@{{ inventory_hostname }}`" 115 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | proxy: 3 | platform: linux/amd64 4 | image: jitolabs/jito-shredstream-proxy 5 | command: shredstream 6 | environment: 7 | - RUST_LOG=info 8 | - BLOCK_ENGINE_URL=https://amsterdam.mainnet.block-engine.jito.wtf 9 | - AUTH_KEYPAIR=auth.json 10 | - DESIRED_REGIONS=amsterdam,ny,tokio,frankfurt,slc 11 | - DEST_IP_PORTS=0.0.0.0:8001 12 | volumes: 13 | - ./auth.json:/app/auth.json 14 | network_mode: host 15 | 16 | shreds: 17 | image: piotrostr/shreds 18 | platform: linux/amd64 19 | build: 20 | context: . 21 | dockerfile: Dockerfile 22 | container_name: shreds 23 | command: ["./shreds", "graduates-mode"] 24 | volumes: 25 | - type: bind 26 | source: ./raydium.json 27 | target: /raydium.json 28 | - type: bind 29 | source: ./shreds.log 30 | target: /shreds.log 31 | - .env:/.env 32 | - ./FAST.json:/FAST.json 33 | ports: 34 | - "8001:8001/udp" 35 | network_mode: host 36 | 37 | cli: 38 | image: piotrostr/shreds 39 | platform: linux/amd64 40 | build: 41 | context: . 42 | dockerfile: Dockerfile 43 | volumes: 44 | - type: bind 45 | source: ./raydium.json 46 | target: /raydium.json 47 | - type: bind 48 | source: ./shreds.log 49 | target: /shreds.log 50 | - type: bind 51 | source: ./packets.json 52 | target: /packets.json 53 | - .env:/.env 54 | - ./FAST.json:/FAST.json 55 | command: ["./shreds", "--help"] 56 | ports: 57 | - "8001:8001/udp" 58 | -------------------------------------------------------------------------------- /run-shredstream.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RUST_LOG=info cargo run --release --bin jito-shredstream-proxy -- shredstream \ 4 | --block-engine-url https://amsterdam.mainnet.block-engine.jito.wtf \ 5 | --auth-keypair $HOME/solana/keys/auth.json \ 6 | --desired-regions amsterdam \ 7 | --dest-ip-ports 0.0.0.0:8001 8 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 78 2 | -------------------------------------------------------------------------------- /shrink_raydium.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | if __name__ == "__main__": 4 | want_mints = [ 5 | "3S8qX1MsMqRbiwKg2cQyx7nis1oHMgaCuc9c4VfvVdPN", 6 | "3B5wuUrMEi5yATD7on46hKfej3pfmd7t1RKgrsN3pump", 7 | "CTg3ZgYx79zrE1MteDVkmkcGniiFrK1hJ6yiabropump", 8 | "GiG7Hr61RVm4CSUxJmgiCoySFQtdiwxtqf64MsRppump", 9 | "EbZh3FDVcgnLNbh1ooatcDL1RCRhBgTKirFKNoGPpump", 10 | "GYKmdfcUmZVrqfcH1g579BGjuzSRijj3LBuwv79rpump", 11 | "8Ki8DpuWNxu9VsS3kQbarsCWMcFGWkzzA8pUPto9zBd5", 12 | "HiHULk2EEF6kGfMar19QywmaTJLUr3LA1em8DyW1pump", 13 | ] 14 | 15 | with open("./raydium.json", "r") as f: 16 | data = json.load(f) 17 | del data["official"] 18 | data["unOfficial"] = [ 19 | i 20 | for i in data["unOfficial"] 21 | if i["baseMint"] in want_mints or i["quoteMint"] in want_mints 22 | ] 23 | 24 | with open("./raydium.json", "w") as f: 25 | json.dump(data, f, indent=2) 26 | -------------------------------------------------------------------------------- /src/app.rs: -------------------------------------------------------------------------------- 1 | use clap::{arg, Parser}; 2 | use serde::Deserialize; 3 | 4 | #[derive(Parser, Debug)] 5 | pub struct App { 6 | #[clap(flatten)] 7 | pub args: Args, 8 | 9 | #[clap(subcommand)] 10 | pub command: Command, 11 | } 12 | 13 | #[derive(Parser, Debug, Deserialize)] 14 | #[command(name = "shreds", version = "1.0", author = "piotrostr")] 15 | pub struct Args { 16 | /// Sets the bind address 17 | #[arg(short, long, default_value = "0.0.0.0:8001")] 18 | pub bind: Option, 19 | 20 | /// URL to send webhooks to 21 | #[arg(long, default_value = "http://0.0.0.0:6969")] 22 | pub post_url: Option, 23 | 24 | #[arg(short, long, default_value = "stdout")] 25 | pub log_target: Option, 26 | } 27 | 28 | #[derive(Debug, Parser)] 29 | pub enum Command { 30 | /// Run in save mode (dump packets to file) 31 | Save, 32 | 33 | /// Download Raydium JSON 34 | Download, 35 | 36 | /// Run benchmark 37 | Benchmark, 38 | 39 | /// Run in pubsub mode 40 | Pubsub, 41 | 42 | /// Run in service mode (sends pump webhooks to `post_url`) 43 | PumpMode, 44 | 45 | /// Run in arb mode (listens for raydium txs) 46 | ArbMode, 47 | 48 | /// Raydium new listings 49 | GraduatesMode, 50 | } 51 | -------------------------------------------------------------------------------- /src/arb.rs: -------------------------------------------------------------------------------- 1 | use crate::constants; 2 | use crate::raydium::{ 3 | calculate_price, initialize_raydium_amm_pools, parse_amm_instruction, 4 | swap_exact_amount, ParsedAccounts, ParsedAmmInstruction, RaydiumAmmPool, 5 | }; 6 | use crate::util::env; 7 | use log::{error, info, warn}; 8 | use solana_client::nonblocking::rpc_client::RpcClient; 9 | use solana_sdk::instruction::CompiledInstruction; 10 | use solana_sdk::message::VersionedMessage; 11 | use solana_sdk::pubkey::Pubkey; 12 | use solana_sdk::signature::Signature; 13 | use solana_sdk::transaction::VersionedTransaction; 14 | use std::collections::HashMap; 15 | use std::str::FromStr; 16 | use std::sync::Arc; 17 | use tokio::sync::RwLock; 18 | 19 | pub fn get_mints_of_interest() -> Vec { 20 | [ 21 | "3S8qX1MsMqRbiwKg2cQyx7nis1oHMgaCuc9c4VfvVdPN", // mother 22 | "EbZh3FDVcgnLNbh1ooatcDL1RCRhBgTKirFKNoGPpump", // gringo 23 | "GYKmdfcUmZVrqfcH1g579BGjuzSRijj3LBuwv79rpump", // wdog 24 | "8Ki8DpuWNxu9VsS3kQbarsCWMcFGWkzzA8pUPto9zBd5", // lockin 25 | "HiHULk2EEF6kGfMar19QywmaTJLUr3LA1em8DyW1pump", // ddc 26 | "GiG7Hr61RVm4CSUxJmgiCoySFQtdiwxtqf64MsRppump", // scf 27 | "3B5wuUrMEi5yATD7on46hKfej3pfmd7t1RKgrsN3pump", // billy 28 | "CTg3ZgYx79zrE1MteDVkmkcGniiFrK1hJ6yiabropump", // neiro 29 | ] 30 | .iter() 31 | .map(|p| Pubkey::from_str(p).unwrap()) 32 | .collect::>() 33 | } 34 | 35 | #[derive(Debug, Default)] 36 | pub struct PoolsState { 37 | pub raydium_cp_count: u64, 38 | pub raydium_amm_count: u64, 39 | pub orca_count: u64, 40 | pub orca_token_to_pool: HashMap>, 41 | // program_id to pool 42 | pub raydium_pools: HashMap>>, 43 | // mint to program_id vec 44 | pub raydium_pools_by_mint: HashMap>, 45 | pub raydium_pool_ids: Vec, 46 | pub signatures: Vec, 47 | } 48 | 49 | #[derive(Debug, Default)] 50 | pub struct OrcaPool {} 51 | 52 | impl PoolsState { 53 | /// Initialize the state of the pools, this has to be called every time 54 | /// after struct is created for arb 55 | pub async fn initialize(&mut self) { 56 | initialize_raydium_amm_pools( 57 | &RpcClient::new(env("RPC_URL").to_string()), 58 | self, 59 | get_mints_of_interest(), 60 | ) 61 | .await; 62 | info!( 63 | "Initialized Raydium AMM pools: {}", 64 | self.raydium_pools.len() 65 | ); 66 | 67 | // TODO orca etc 68 | } 69 | 70 | pub fn reduce_orca_tx(&mut self, _tx: VersionedTransaction) { 71 | // TODO: Implement Orca transaction processing 72 | } 73 | 74 | pub async fn reduce_raydium_amm_tx( 75 | &mut self, 76 | tx: Arc, 77 | ) { 78 | let raydium_amm_program_id = Pubkey::from_str(constants::RAYDIUM_AMM) 79 | .expect("Failed to parse Raydium AMM program ID"); 80 | 81 | for (idx, instruction) in tx.message.instructions().iter().enumerate() 82 | { 83 | let program_id = tx.message.static_account_keys() 84 | [instruction.program_id_index as usize]; 85 | 86 | if program_id == raydium_amm_program_id { 87 | match parse_amm_instruction(&instruction.data) { 88 | Ok(parsed_instruction) => { 89 | self.process_raydium_instruction( 90 | &parsed_instruction, 91 | instruction, 92 | &tx.message, 93 | &tx.signatures[0], 94 | ) 95 | .await; 96 | } 97 | Err(e) => { 98 | warn!("Error parsing instruction {}: {:?}", idx, e); 99 | } 100 | } 101 | } 102 | } 103 | } 104 | 105 | pub fn reduce_raydium_cp_tx(&mut self, _tx: VersionedTransaction) { 106 | panic!("Not implemented yet"); 107 | } 108 | 109 | async fn process_raydium_instruction( 110 | &mut self, 111 | parsed_instruction: &ParsedAmmInstruction, 112 | instruction: &CompiledInstruction, 113 | message: &VersionedMessage, 114 | signature: &Signature, 115 | ) { 116 | let amm_id_index = 1; // Amm account index 117 | let pool_coin_token_account_index = 5; // Pool Coin Token Account index 118 | let pool_pc_token_account_index = 6; // Pool Pc Token Account index 119 | 120 | let amm_id = 121 | get_account_key_safely(message, instruction, amm_id_index); 122 | let pool_coin_vault = get_account_key_safely( 123 | message, 124 | instruction, 125 | pool_coin_token_account_index, 126 | ); 127 | let pool_pc_vault = get_account_key_safely( 128 | message, 129 | instruction, 130 | pool_pc_token_account_index, 131 | ); 132 | if amm_id.is_none() 133 | || pool_coin_vault.is_none() 134 | || pool_pc_vault.is_none() 135 | { 136 | warn!( 137 | "{} Failed to get account keys for Raydium AMM instruction", 138 | signature.to_string() 139 | ); 140 | return; 141 | } 142 | let amm_id = amm_id.unwrap(); 143 | let pool_coin_vault = pool_coin_vault.unwrap(); 144 | let pool_pc_vault = pool_pc_vault.unwrap(); 145 | 146 | match parsed_instruction { 147 | ParsedAmmInstruction::SwapBaseOut(swap_instruction) => { 148 | self.update_pool_state_swap( 149 | &ParsedAccounts { 150 | amm_id, 151 | pool_coin_vault, 152 | pool_pc_vault, 153 | }, 154 | swap_instruction.max_amount_in, 155 | swap_instruction.amount_out, 156 | false, 157 | signature, 158 | ) 159 | .await; 160 | } 161 | ParsedAmmInstruction::SwapBaseIn(swap_instruction) => { 162 | self.update_pool_state_swap( 163 | &ParsedAccounts { 164 | amm_id, 165 | pool_coin_vault, 166 | pool_pc_vault, 167 | }, 168 | swap_instruction.amount_in, 169 | swap_instruction.minimum_amount_out, 170 | true, 171 | signature, 172 | ) 173 | .await; 174 | } 175 | // Handle other instruction types... 176 | _ => { 177 | warn!("Unhandled instruction type: {:?}", parsed_instruction) 178 | } 179 | } 180 | } 181 | 182 | async fn update_pool_state_swap( 183 | &mut self, 184 | parsed_accounts: &ParsedAccounts, 185 | amount_specified: u64, 186 | other_amount_threshold: u64, 187 | is_swap_base_in: bool, 188 | signature: &Signature, 189 | ) { 190 | if let Some(pool) = self.raydium_pools.get(&parsed_accounts.amm_id) { 191 | let mut pool = pool.write().await; 192 | if !(pool.amm_keys.amm_coin_vault 193 | == parsed_accounts.pool_coin_vault 194 | && pool.amm_keys.amm_pc_vault 195 | == parsed_accounts.pool_pc_vault) 196 | { 197 | error!( 198 | "Vault mismatch: {} {} {} {} {}", 199 | pool.amm_keys.amm_pool, 200 | pool.amm_keys.amm_coin_vault, 201 | parsed_accounts.pool_coin_vault, 202 | pool.amm_keys.amm_pc_vault, 203 | parsed_accounts.pool_pc_vault, 204 | ); 205 | return; 206 | }; 207 | 208 | let swap_direction = if is_swap_base_in { 209 | raydium_amm::math::SwapDirection::Coin2PC 210 | } else { 211 | raydium_amm::math::SwapDirection::PC2Coin 212 | }; 213 | 214 | let (pc_amount, coin_amount) = if is_swap_base_in { 215 | let swap_amount_out = swap_exact_amount( 216 | pool.state.pool_pc_vault_amount, 217 | pool.state.pool_coin_vault_amount, 218 | pool.state.swap_fee_numerator, 219 | pool.state.swap_fee_denominator, 220 | swap_direction, 221 | amount_specified, 222 | true, 223 | ); 224 | 225 | if is_swap_base_in { 226 | ( 227 | pool.state 228 | .pool_pc_vault_amount 229 | .saturating_add(swap_amount_out), 230 | pool.state 231 | .pool_coin_vault_amount 232 | .saturating_sub(amount_specified), 233 | ) 234 | } else { 235 | ( 236 | pool.state 237 | .pool_pc_vault_amount 238 | .saturating_sub(amount_specified), 239 | pool.state 240 | .pool_coin_vault_amount 241 | .saturating_add(swap_amount_out), 242 | ) 243 | } 244 | } else { 245 | let swap_amount_in = swap_exact_amount( 246 | pool.state.pool_pc_vault_amount, 247 | pool.state.pool_coin_vault_amount, 248 | pool.state.swap_fee_numerator, 249 | pool.state.swap_fee_denominator, 250 | swap_direction, 251 | other_amount_threshold, 252 | false, 253 | ); 254 | 255 | if is_swap_base_in { 256 | ( 257 | pool.state 258 | .pool_pc_vault_amount 259 | .saturating_add(other_amount_threshold), 260 | pool.state 261 | .pool_coin_vault_amount 262 | .saturating_sub(swap_amount_in), 263 | ) 264 | } else { 265 | ( 266 | pool.state 267 | .pool_pc_vault_amount 268 | .saturating_sub(swap_amount_in), 269 | pool.state 270 | .pool_coin_vault_amount 271 | .saturating_add(other_amount_threshold), 272 | ) 273 | } 274 | }; 275 | 276 | let sol_is_pc = pool.amm_keys.amm_pc_mint 277 | == Pubkey::from_str(constants::WSOL).expect("pubkey"); 278 | let sol_is_coin = pool.amm_keys.amm_coin_mint 279 | == Pubkey::from_str(constants::WSOL).expect("pubkey"); 280 | 281 | let sol_amount = if sol_is_coin { 282 | if is_swap_base_in { 283 | amount_specified 284 | } else { 285 | other_amount_threshold 286 | } 287 | } else if sol_is_pc { 288 | if is_swap_base_in { 289 | other_amount_threshold 290 | } else { 291 | amount_specified 292 | } 293 | } else { 294 | 0 295 | } as f64 296 | / 10u64.pow(9u32) as f64; 297 | 298 | pool.state.pool_pc_vault_amount = pc_amount; 299 | pool.state.pool_coin_vault_amount = coin_amount; 300 | 301 | let initial_price = calculate_price(&pool.state, &pool.decimals); 302 | 303 | let new_price = calculate_price(&pool.state, &pool.decimals); 304 | 305 | if sol_amount > 10. { 306 | info!( 307 | "large swap: ({}) {}", 308 | sol_amount, 309 | serde_json::to_string_pretty(&serde_json::json!({ 310 | "event": "Swap", 311 | "signature": signature.to_string(), 312 | "swap_direction": format!("{:?}", swap_direction), 313 | "is_swap_base_in": is_swap_base_in, 314 | "amm_id": parsed_accounts.amm_id.to_string(), 315 | "pc_mint": pool.amm_keys.amm_pc_mint.to_string(), 316 | "amount_specified": amount_specified, 317 | "coin_mint": pool.amm_keys.amm_coin_mint.to_string(), 318 | "other_amount_threshold": other_amount_threshold, 319 | "initial_price": initial_price, 320 | "new_price": new_price, 321 | })) 322 | .unwrap() 323 | ); 324 | } 325 | } 326 | } 327 | 328 | fn _check_arbitrage_opportunity( 329 | &self, 330 | _pool: RaydiumAmmPool, 331 | ) -> Option { 332 | None 333 | } 334 | } 335 | 336 | pub fn get_account_key_safely( 337 | message: &VersionedMessage, 338 | instruction: &CompiledInstruction, 339 | account_index: usize, 340 | ) -> Option { 341 | instruction 342 | .accounts 343 | .get(account_index) 344 | .and_then(|&index| message.static_account_keys().get(index as usize)) 345 | .copied() 346 | } 347 | -------------------------------------------------------------------------------- /src/benchmark.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::ops::Div; 3 | use std::sync::Arc; 4 | use tokio::sync::RwLock; 5 | 6 | use futures_util::stream::StreamExt; 7 | use log::info; 8 | use solana_client::nonblocking::pubsub_client::PubsubClient; 9 | use solana_client::rpc_config::{ 10 | RpcTransactionLogsConfig, RpcTransactionLogsFilter, 11 | }; 12 | use solana_sdk::commitment_config::CommitmentConfig; 13 | 14 | use crate::util::env; 15 | 16 | pub type Sigs = Arc>>; 17 | 18 | pub async fn listen_pubsub( 19 | pubkeys: Vec, 20 | sigs: Sigs, 21 | ) -> Result<(), Box> { 22 | let pubsub_client = PubsubClient::new(&env("WS_URL")).await?; 23 | let (mut stream, unsub) = pubsub_client 24 | .logs_subscribe( 25 | RpcTransactionLogsFilter::Mentions(pubkeys), 26 | RpcTransactionLogsConfig { 27 | commitment: Some(CommitmentConfig::processed()), 28 | }, 29 | ) 30 | .await?; 31 | 32 | while let Some(data) = stream.next().await { 33 | let timestamp = chrono::Utc::now().timestamp_millis(); 34 | let mut sigs = sigs.write().await; 35 | info!("pubsub: {} {}", timestamp, data.value.signature); 36 | sigs.push((timestamp as u64, data.value.signature)); 37 | } 38 | 39 | unsub().await; 40 | 41 | Ok(()) 42 | } 43 | 44 | pub fn compare_results( 45 | pubsub_sigs: Vec<(u64, String)>, 46 | shreds_sigs: Vec<(u64, String)>, 47 | ) { 48 | let mut miss_count = 0; 49 | let mut slower_count = 0; 50 | let mut faster_count = 0; 51 | let mut shreds_sigs_map: HashMap = HashMap::new(); 52 | for (timestamp, sig) in shreds_sigs.iter() { 53 | shreds_sigs_map.insert(sig.clone(), *timestamp); 54 | } 55 | 56 | let mut average_diff = 0f64; 57 | let mut count = 0; 58 | 59 | for (pubsub_timestamp, sig) in pubsub_sigs.iter() { 60 | if let Some(shreds_timestamp) = shreds_sigs_map.remove(sig) { 61 | let diff = shreds_timestamp as f64 - *pubsub_timestamp as f64; 62 | info!("{} diff: {}", sig, diff); 63 | average_diff += diff; 64 | count += 1; 65 | match shreds_timestamp.cmp(pubsub_timestamp) { 66 | std::cmp::Ordering::Equal => {} 67 | std::cmp::Ordering::Less => faster_count += 1, 68 | std::cmp::Ordering::Greater => slower_count += 1, 69 | } 70 | } else { 71 | miss_count += 1; 72 | } 73 | } 74 | 75 | info!("Benchmark results:"); 76 | info!("Pubsub sigs: {}", pubsub_sigs.len()); 77 | info!("Shreds sigs: {}", shreds_sigs.len()); 78 | info!("Miss count: {}", miss_count); 79 | info!("Slower count: {}", slower_count); 80 | info!("Faster count: {}", faster_count); 81 | info!("Average diff: {}", average_diff.div(count as f64)); 82 | } 83 | -------------------------------------------------------------------------------- /src/constants.rs: -------------------------------------------------------------------------------- 1 | pub const WHIRLPOOL: &str = "whirLbMiicVdio4qvUfM5KAg6Ct8VwpYzGff3uctyCc"; 2 | pub const RAYDIUM_CP: &str = "CPMMoo8L3F4NbTegBCKVNunggL7H1ZpdTHKxQB5qKP1C"; 3 | pub const RAYDIUM_AMM: &str = "675kPX9MHTjS2zt1qfr1NYHuzeLXfQM9H24wFSUt1Mp8"; 4 | pub const WSOL: &str = "So11111111111111111111111111111111111111112"; 5 | pub const PUMP_FUN_MINT_AUTHORITY: &str = 6 | "TSLvdd1pWpHVjahSpsvCXUbgwsL3JAcvokwaKt1eokM"; 7 | -------------------------------------------------------------------------------- /src/entry_processor.rs: -------------------------------------------------------------------------------- 1 | use borsh::BorshDeserialize; 2 | use rayon::prelude::*; 3 | use serde::{Deserialize, Serialize}; 4 | use solana_sdk::clock::Slot; 5 | use std::str::FromStr; 6 | use std::sync::Arc; 7 | use tokio::sync::RwLock; 8 | 9 | use log::{debug, error, info}; 10 | use solana_entry::entry::Entry; 11 | use solana_sdk::pubkey::Pubkey; 12 | use tokio::sync::mpsc; 13 | 14 | use crate::arb::PoolsState; 15 | use crate::constants; 16 | use crate::pump::{PumpCreateIx, PumpSwapIx}; 17 | use crate::util::{pubkey_to_string, string_to_pubkey}; 18 | 19 | // those are virtual btw 20 | // Initial SOL reserves: 30,000,000,000 lamports (30 SOL) 21 | // Initial token reserves: 1,073,000,000,000,000 tokens 22 | pub const DEFAULT_SOL_INITIAL_RESERVES: u64 = 30_000_000_000; 23 | pub const DEFAULT_TOKEN_INITIAL_RESERVES: u64 = 1_073_000_000_000_000; 24 | 25 | pub struct EntriesWithMeta { 26 | pub entries: Vec, 27 | pub slot: Slot, 28 | } 29 | 30 | pub struct ArbEntryProcessor { 31 | entry_rx: mpsc::Receiver, 32 | error_rx: mpsc::Receiver, 33 | pools_state: Arc>, 34 | sig_tx: mpsc::Sender, 35 | } 36 | 37 | impl ArbEntryProcessor { 38 | pub fn new( 39 | entry_rx: mpsc::Receiver, 40 | error_rx: mpsc::Receiver, 41 | pools_state: Arc>, 42 | sig_tx: mpsc::Sender, 43 | ) -> Self { 44 | ArbEntryProcessor { 45 | entry_rx, 46 | error_rx, 47 | pools_state, 48 | sig_tx, 49 | } 50 | } 51 | 52 | pub async fn receive_entries(&mut self) { 53 | loop { 54 | tokio::select! { 55 | Some(entries) = self.entry_rx.recv() => { 56 | self.process_entries(entries).await; 57 | } 58 | Some(error) = self.error_rx.recv() => { 59 | error!("{}", error); 60 | } 61 | } 62 | } 63 | } 64 | 65 | pub async fn process_entries( 66 | &mut self, 67 | entries_with_meta: EntriesWithMeta, 68 | ) { 69 | let mut pools_state = self.pools_state.write().await; 70 | debug!( 71 | "OK: entries {} txs: {}", 72 | entries_with_meta.entries.len(), 73 | entries_with_meta 74 | .entries 75 | .iter() 76 | .map(|e| e.transactions.len()) 77 | .sum::(), 78 | ); 79 | for entry in entries_with_meta.entries { 80 | for tx in entry.transactions { 81 | if tx.message.static_account_keys().contains( 82 | &Pubkey::from_str(constants::WHIRLPOOL) 83 | .expect("Failed to parse pubkey"), 84 | ) { 85 | pools_state.orca_count += 1; 86 | pools_state.reduce_orca_tx(tx); 87 | } else if tx.message.static_account_keys().contains( 88 | &Pubkey::from_str(constants::RAYDIUM_CP) 89 | .expect("Failed to parse pubkey"), 90 | ) { 91 | pools_state.raydium_cp_count += 1; 92 | // pools_state.reduce_raydium_cp_tx(tx); 93 | } else if tx.message.static_account_keys().contains( 94 | &Pubkey::from_str(constants::RAYDIUM_AMM) 95 | .expect("Failed to parse pubkey"), 96 | ) { 97 | pools_state.raydium_amm_count += 1; 98 | self.sig_tx 99 | .send(tx.signatures[0].to_string()) 100 | .await 101 | .unwrap(); 102 | pools_state.reduce_raydium_amm_tx(Arc::new(tx)).await; 103 | }; 104 | } 105 | debug!( 106 | "orca: {}, raydium cp: {}, raydium amm: {}", 107 | pools_state.orca_count, 108 | pools_state.raydium_cp_count, 109 | pools_state.raydium_amm_count 110 | ); 111 | } 112 | } 113 | } 114 | 115 | pub struct PumpEntryProcessor { 116 | entry_rx: mpsc::Receiver, 117 | error_rx: mpsc::Receiver, 118 | sig_tx: mpsc::Sender, 119 | post_url: String, 120 | client: reqwest::Client, 121 | } 122 | 123 | #[derive(Debug, Clone, Serialize, Deserialize)] 124 | pub struct CreatePumpTokenEvent { 125 | pub sig: String, 126 | pub slot: Slot, 127 | #[serde( 128 | serialize_with = "pubkey_to_string", 129 | deserialize_with = "string_to_pubkey" 130 | )] 131 | pub mint: Pubkey, 132 | #[serde( 133 | serialize_with = "pubkey_to_string", 134 | deserialize_with = "string_to_pubkey" 135 | )] 136 | pub bounding_curve: Pubkey, 137 | #[serde( 138 | serialize_with = "pubkey_to_string", 139 | deserialize_with = "string_to_pubkey" 140 | )] 141 | pub associated_bounding_curve: Pubkey, 142 | pub name: String, 143 | pub symbol: String, 144 | pub uri: String, 145 | pub dev_bought_amount: u64, 146 | pub dev_max_sol_cost: u64, 147 | pub num_dev_buy_txs: u64, 148 | pub virtual_sol_reserves: u64, 149 | pub virtual_token_reserves: u64, 150 | } 151 | 152 | impl Default for CreatePumpTokenEvent { 153 | fn default() -> Self { 154 | CreatePumpTokenEvent { 155 | sig: "".to_string(), 156 | slot: 0, 157 | mint: Pubkey::default(), 158 | bounding_curve: Pubkey::default(), 159 | associated_bounding_curve: Pubkey::default(), 160 | name: "".to_string(), 161 | symbol: "".to_string(), 162 | uri: "".to_string(), 163 | dev_bought_amount: 0, 164 | dev_max_sol_cost: 0, 165 | num_dev_buy_txs: 0, 166 | virtual_sol_reserves: DEFAULT_SOL_INITIAL_RESERVES, 167 | virtual_token_reserves: DEFAULT_TOKEN_INITIAL_RESERVES, 168 | } 169 | } 170 | } 171 | 172 | impl PumpEntryProcessor { 173 | pub fn new( 174 | entry_rx: mpsc::Receiver, 175 | error_rx: mpsc::Receiver, 176 | sig_tx: mpsc::Sender, 177 | post_url: String, 178 | ) -> Self { 179 | PumpEntryProcessor { 180 | entry_rx, 181 | error_rx, 182 | sig_tx, 183 | post_url, 184 | client: reqwest::Client::new(), 185 | } 186 | } 187 | 188 | pub async fn receive_entries(&mut self) { 189 | loop { 190 | tokio::select! { 191 | Some(entries) = self.entry_rx.recv() => { 192 | self.process_entries(entries).await; 193 | } 194 | Some(error) = self.error_rx.recv() => { 195 | error!("{}", error); 196 | } 197 | } 198 | } 199 | } 200 | 201 | /// TODO each vec of entries should be included metadata about slot of deshred 202 | pub async fn process_entries(&self, entries_with_meta: EntriesWithMeta) { 203 | let events = entries_with_meta 204 | .entries 205 | .par_iter() 206 | .map(|entry| { 207 | entry 208 | .transactions 209 | .par_iter() 210 | .filter_map(|tx| { 211 | let mut event = CreatePumpTokenEvent::default(); 212 | let account_keys = tx.message.static_account_keys(); 213 | if account_keys.len() == 18 214 | && account_keys.contains( 215 | &Pubkey::from_str( 216 | constants::PUMP_FUN_MINT_AUTHORITY, 217 | ) 218 | .expect("Failed to parse pubkey"), 219 | ) 220 | { 221 | println!("Found pump tx: {:#?}", tx); 222 | event.mint = account_keys[1]; 223 | event.bounding_curve = account_keys[3]; 224 | event.associated_bounding_curve = account_keys[4]; 225 | tx.message.instructions().iter().for_each(|ix| { 226 | if let Ok(swap) = 227 | PumpSwapIx::try_from_slice(&ix.data) 228 | { 229 | event.dev_bought_amount += swap.amount; 230 | event.dev_max_sol_cost += 231 | swap.max_sol_cost; 232 | event.num_dev_buy_txs += 1; 233 | event.virtual_sol_reserves += 234 | deduct_fee(swap.max_sol_cost); 235 | event.virtual_token_reserves -= 236 | swap.amount; 237 | } else if let Ok(token_metadata) = 238 | PumpCreateIx::try_from_slice(&ix.data) 239 | { 240 | event.name = token_metadata.name; 241 | event.symbol = token_metadata.symbol; 242 | event.uri = token_metadata.uri; 243 | } 244 | }); 245 | } else { 246 | return None; 247 | } 248 | event.sig = tx.signatures[0].to_string(); 249 | event.slot = entries_with_meta.slot; 250 | Some(event) 251 | }) 252 | .collect::>() 253 | }) 254 | .flatten() 255 | .collect::>(); 256 | 257 | // this might be tiny bit blocking 258 | for event in events { 259 | info!( 260 | "Sending webhook: {}", 261 | serde_json::to_string_pretty(&event).expect("pretty") 262 | ); 263 | self.post_webhook(event.clone()).await; 264 | self.sig_tx.send(event.sig.clone()).await.unwrap(); 265 | } 266 | } 267 | 268 | async fn post_webhook(&self, event: CreatePumpTokenEvent) { 269 | let url = self.post_url.clone() + "/v2/pump-buy"; 270 | match self.client.post(url.clone()).json(&event).send().await { 271 | Ok(resp) => { 272 | if resp.status().is_success() { 273 | info!("Webhook sent: {}", event.sig); 274 | } else { 275 | error!("Failed to send webhook to {}: {:?}", url, event); 276 | } 277 | } 278 | Err(e) => { 279 | error!("Failed to send webhook: {:?}", e); 280 | } 281 | } 282 | } 283 | } 284 | 285 | /// deduct_fee takes the 1% fee from the amount of SOL out 286 | /// e.g. if you buy 1 sol worth of the token at start, the max_sol_amount will 287 | /// amount to 1.01 sol, only 1 sol goes to the pool, 0.01 is the fee 288 | pub fn deduct_fee(sol_amount: u64) -> u64 { 289 | (sol_amount * 100) / 101 290 | } 291 | 292 | #[cfg(test)] 293 | mod tests { 294 | use super::*; 295 | 296 | #[test] 297 | fn test_deduct_fee() { 298 | assert_eq!(deduct_fee(1010000000), 1000000000); 299 | assert_eq!(deduct_fee(2020000000), 2000000000); 300 | } 301 | } 302 | -------------------------------------------------------------------------------- /src/graduates_processor.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info}; 2 | use solana_program::pubkey::Pubkey; 3 | use solana_sdk::message::VersionedMessage; 4 | use solana_sdk::transaction::VersionedTransaction; 5 | use std::str::FromStr; 6 | use tokio::sync::mpsc; 7 | 8 | use crate::entry_processor::EntriesWithMeta; 9 | 10 | const PUMP_MIGRATION_PROGRAM: &str = 11 | "39azUYFWPz3VHgKCf3VChUwbpURdCHRxjWVowf5jUJjg"; 12 | const RAYDIUM_LP_PROGRAM: &str = 13 | "675kPX9MHTjS2zt1qfr1NYHuzeLXfQM9H24wFSUt1Mp8"; 14 | 15 | lazy_static::lazy_static! { 16 | static ref PUMP_MIGRATION_PUBKEY: Pubkey = Pubkey::from_str(PUMP_MIGRATION_PROGRAM).unwrap(); 17 | static ref RAYDIUM_LP_PUBKEY: Pubkey = Pubkey::from_str(RAYDIUM_LP_PROGRAM).unwrap(); 18 | } 19 | 20 | pub struct GraduatesProcessor { 21 | entry_rx: mpsc::Receiver, 22 | error_rx: mpsc::Receiver, 23 | sig_tx: mpsc::Sender, 24 | } 25 | 26 | fn filter_transaction(transaction: &VersionedTransaction) -> bool { 27 | if let VersionedMessage::V0(message) = &transaction.message { 28 | // Check if the transaction is signed by PUMP_MIGRATION_PROGRAM 29 | let is_signed_by_pump = message 30 | .account_keys 31 | .iter() 32 | .any(|key| key == &*PUMP_MIGRATION_PUBKEY); 33 | 34 | // Check if any instruction uses RAYDIUM_LP_PROGRAM 35 | let uses_raydium = message.instructions.iter().any(|instruction| { 36 | let program_id = 37 | &message.account_keys[instruction.program_id_index as usize]; 38 | program_id == &*RAYDIUM_LP_PUBKEY 39 | }); 40 | 41 | return is_signed_by_pump && uses_raydium; 42 | } 43 | false 44 | } 45 | 46 | impl GraduatesProcessor { 47 | pub fn new( 48 | entry_rx: mpsc::Receiver, 49 | error_rx: mpsc::Receiver, 50 | sig_tx: mpsc::Sender, 51 | ) -> Self { 52 | Self { 53 | entry_rx, 54 | error_rx, 55 | sig_tx, 56 | } 57 | } 58 | 59 | pub async fn receive_entries(&mut self) { 60 | loop { 61 | tokio::select! { 62 | Some(entries) = self.entry_rx.recv() => { 63 | self.process_entries(entries).await; 64 | } 65 | Some(error) = self.error_rx.recv() => { 66 | error!("{}", error); 67 | } 68 | } 69 | } 70 | } 71 | 72 | pub async fn process_entries( 73 | &mut self, 74 | entries_with_meta: EntriesWithMeta, 75 | ) { 76 | for entry in entries_with_meta.entries { 77 | for tx in entry.transactions { 78 | if filter_transaction(&tx) { 79 | info!("Found matching transaction: {:?}", tx.signatures); 80 | if let Some(sig) = tx.signatures.first() { 81 | if let Err(e) = 82 | self.sig_tx.send(sig.to_string()).await 83 | { 84 | error!("Failed to send signature: {}", e); 85 | } 86 | } 87 | } 88 | } 89 | } 90 | } 91 | } 92 | 93 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod app; 2 | pub mod arb; 3 | pub mod benchmark; 4 | pub mod constants; 5 | pub mod entry_processor; 6 | pub mod graduates_processor; 7 | pub mod listener; 8 | pub mod logger; 9 | pub mod pump; 10 | pub mod raydium; 11 | pub mod recovery; 12 | pub mod service; 13 | pub mod shred; 14 | pub mod shred_processor; 15 | pub mod structs; 16 | pub mod util; 17 | -------------------------------------------------------------------------------- /src/listener.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info}; 2 | use std::io::Write; 3 | use std::sync::Arc; 4 | use tokio::net::UdpSocket; 5 | use tokio::signal; 6 | use tokio::sync::{Mutex, RwLock}; 7 | use tokio::time::{sleep, Duration}; 8 | 9 | use crate::arb::PoolsState; 10 | use crate::benchmark::Sigs; 11 | use crate::entry_processor::{ArbEntryProcessor, PumpEntryProcessor}; 12 | use crate::service::Mode; 13 | use crate::shred_processor::ShredProcessor; 14 | 15 | pub const PACKET_SIZE: usize = 1280 - 40 - 8; 16 | 17 | pub async fn listen( 18 | socket: Arc, 19 | received_packets: Arc>>>, 20 | ) { 21 | let mut buf = [0u8; PACKET_SIZE]; // max shred size 22 | loop { 23 | match socket.recv_from(&mut buf).await { 24 | Ok((received, _)) => { 25 | let packet = Vec::from(&buf[..received]); 26 | received_packets.lock().await.push(packet); 27 | } 28 | Err(e) => { 29 | error!("Error receiving packet: {:?}", e); 30 | } 31 | } 32 | } 33 | } 34 | 35 | pub async fn dump_to_file(received_packets: Arc>>>) { 36 | let packets = received_packets.lock().await; 37 | let mut file = 38 | std::fs::File::create("packets.json").expect("Couldn't create file"); 39 | let as_json = serde_json::to_string(&packets.clone()) 40 | .expect("Couldn't serialize to json"); 41 | file.write_all(as_json.as_bytes()) 42 | .expect("Couldn't write to file"); 43 | info!("Packets dumped to packets.json"); 44 | } 45 | 46 | pub async fn run_listener_with_algo( 47 | bind_addr: &str, 48 | shreds_sigs: Option, 49 | mode: Mode, 50 | post_url: String, 51 | benchmark: bool, 52 | ) -> Result<(), Box> { 53 | let socket = Arc::new( 54 | UdpSocket::bind(bind_addr) 55 | .await 56 | .expect("Couldn't bind to address"), 57 | ); 58 | let (entry_tx, entry_rx) = tokio::sync::mpsc::channel(2000); 59 | let (error_tx, error_rx) = tokio::sync::mpsc::channel(2000); 60 | let (sig_tx, mut sig_rx) = tokio::sync::mpsc::channel(2000); 61 | let shred_processor = 62 | Arc::new(RwLock::new(ShredProcessor::new(entry_tx, error_tx))); 63 | 64 | info!("Listening on {}", bind_addr); 65 | 66 | // metrics loop 67 | info!("Starting metrics loop"); 68 | let shred_processor_clone = shred_processor.clone(); 69 | tokio::spawn(async move { 70 | loop { 71 | sleep(Duration::from_secs(6)).await; 72 | { 73 | let metrics = shred_processor_clone.read().await.metrics(); 74 | info!("metrics: {}", metrics); 75 | drop(metrics); 76 | } 77 | } 78 | }); 79 | 80 | info!("Starting shred processor"); 81 | let mut buf = [0u8; PACKET_SIZE]; // max shred size 82 | let shred_processor = shred_processor.clone(); 83 | tokio::spawn(async move { 84 | loop { 85 | match socket.recv_from(&mut buf).await { 86 | Ok((received, _)) => { 87 | let packet = Vec::from(&buf[..received]); 88 | shred_processor 89 | .write() 90 | .await 91 | .collect(Arc::new(packet)) 92 | .await; 93 | } 94 | Err(e) => { 95 | error!("Error receiving packet: {:?}", e); 96 | } 97 | } 98 | } 99 | }); 100 | 101 | info!("Starting entry processor"); 102 | match mode { 103 | Mode::Arb => tokio::spawn(async move { 104 | let pools_state = Arc::new(RwLock::new(PoolsState::default())); 105 | pools_state.write().await.initialize().await; 106 | let mut entry_processor = ArbEntryProcessor::new( 107 | entry_rx, 108 | error_rx, 109 | pools_state.clone(), 110 | sig_tx, 111 | ); 112 | entry_processor.receive_entries().await; 113 | }), 114 | Mode::Pump => { 115 | info!("Starting entries rx (<=> webhook tx) pump mode"); 116 | tokio::spawn(async move { 117 | let mut entry_processor = PumpEntryProcessor::new( 118 | entry_rx, error_rx, sig_tx, post_url, 119 | ); 120 | entry_processor.receive_entries().await; 121 | }) 122 | } 123 | Mode::Graduates => { 124 | info!("Starting entries rx (<=> webhook tx) graduates mode"); 125 | tokio::spawn(async move { 126 | let mut entry_processor = PumpEntryProcessor::new( 127 | entry_rx, error_rx, sig_tx, post_url, 128 | ); 129 | entry_processor.receive_entries().await; 130 | }) 131 | } 132 | }; 133 | 134 | info!("Starting sigs loop"); 135 | tokio::spawn({ 136 | let shreds_sigs = shreds_sigs.clone(); 137 | async move { 138 | while let Some(sig) = sig_rx.recv().await { 139 | if benchmark { 140 | if let Some(shreds_sigs) = &shreds_sigs { 141 | let timestamp = chrono::Utc::now().timestamp_millis(); 142 | info!("algo: {} {}", timestamp, sig); 143 | shreds_sigs 144 | .write() 145 | .await 146 | .push((timestamp as u64, sig.clone())); 147 | } 148 | } 149 | } 150 | } 151 | }); 152 | 153 | signal::ctrl_c().await.expect("Failed to listen for Ctrl+C"); 154 | 155 | Ok(()) 156 | } 157 | 158 | pub async fn run_listener_with_save( 159 | bind_addr: &str, 160 | ) -> Result<(), Box> { 161 | let socket = Arc::new( 162 | UdpSocket::bind(bind_addr) 163 | .await 164 | .expect("Couldn't bind to address"), 165 | ); 166 | let received_packets = Arc::new(Mutex::new(Vec::new())); 167 | 168 | info!("Listening on {}", bind_addr); 169 | let rx = received_packets.clone(); 170 | let socket_clone = socket.clone(); 171 | 172 | tokio::spawn(async move { 173 | listen(socket_clone, rx).await; 174 | }); 175 | 176 | loop { 177 | let packets = received_packets.lock().await; 178 | info!("Total packets received: {}", packets.len()); 179 | if packets.len() > 100_000 { 180 | info!("Dumping packets to file"); 181 | break; 182 | } 183 | drop(packets); 184 | sleep(Duration::from_secs(1)).await; 185 | } 186 | dump_to_file(received_packets).await; 187 | Ok(()) 188 | } 189 | -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::{Read, Write}; 3 | 4 | #[derive(Debug)] 5 | pub enum Target { 6 | File, 7 | Stdout, 8 | } 9 | 10 | pub fn setup(target: Target) -> Result<(), Box> { 11 | // let random = grab_random_bytes(); 12 | // let log_file = File::create(format!("shreds-{}.log", hex::encode(random)))?; 13 | let log_file = File::create("shreds.log")?; 14 | println!("Logging to: {:?}", target); 15 | if let Target::File = target { 16 | println!("File: {:?}", log_file); 17 | } 18 | env_logger::Builder::default() 19 | .format_module_path(false) 20 | .filter_level(log::LevelFilter::Info) 21 | .format(|buf, record| { 22 | writeln!( 23 | buf, 24 | "{} {} [{}] {}", 25 | std::time::SystemTime::now() 26 | .duration_since(std::time::UNIX_EPOCH) 27 | .unwrap() 28 | .as_millis(), 29 | record.level(), 30 | record.target(), 31 | record.args() 32 | ) 33 | }) 34 | .target(if let Target::Stdout = target { 35 | env_logger::Target::Stdout 36 | } else { 37 | env_logger::Target::Pipe(Box::new(log_file)) 38 | }) 39 | .init(); 40 | 41 | Ok(()) 42 | } 43 | 44 | pub fn grab_random_bytes() -> [u8; 5] { 45 | let mut random = [0u8; 5]; 46 | File::open("/dev/urandom") 47 | .expect("asdf") 48 | .read_exact(&mut random) 49 | .expect("asdf"); 50 | random 51 | } 52 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use shreds::app::{App, Command}; 3 | use shreds::service::{self, Mode}; 4 | use std::sync::Arc; 5 | 6 | use log::info; 7 | use shreds::benchmark::compare_results; 8 | use shreds::raydium::download_raydium_json; 9 | use shreds::{benchmark, listener, logger}; 10 | use tokio::sync::RwLock; 11 | 12 | use shreds::constants; 13 | 14 | #[tokio::main(flavor = "multi_thread")] 15 | async fn main() -> Result<(), Box> { 16 | dotenv::dotenv().ok(); 17 | 18 | let app = App::parse(); 19 | 20 | let log_target = app.args.log_target.unwrap(); 21 | logger::setup(if log_target == "file" { 22 | logger::Target::File 23 | } else if log_target == "stdout" { 24 | logger::Target::Stdout 25 | } else { 26 | panic!("Invalid log target") 27 | })?; 28 | 29 | match app.command { 30 | Command::Save => { 31 | let bind = app.args.bind.unwrap(); 32 | info!("Binding to address: {}", bind); 33 | 34 | info!("Running in save mode"); 35 | listener::run_listener_with_save(&bind).await?; 36 | } 37 | Command::Download => { 38 | download_raydium_json(true).await?; 39 | } 40 | Command::Benchmark => { 41 | benchmark_cmd(app.args.bind.unwrap()).await?; 42 | } 43 | Command::Pubsub => { 44 | let pubsub_sigs = Arc::new(RwLock::new(Vec::new())); 45 | 46 | let pubsub_handle = tokio::spawn({ 47 | let pubsub_sigs = pubsub_sigs.clone(); 48 | async move { 49 | tokio::time::sleep(tokio::time::Duration::from_secs(3)) 50 | .await; 51 | benchmark::listen_pubsub( 52 | vec![constants::RAYDIUM_AMM.to_string()], 53 | pubsub_sigs, 54 | ) 55 | .await 56 | .expect("pubsub") 57 | } 58 | }); 59 | 60 | tokio::signal::ctrl_c() 61 | .await 62 | .expect("Failed to listen for Ctrl+C"); 63 | 64 | pubsub_handle.await?; 65 | } 66 | Command::ArbMode => { 67 | let bind = app.args.bind.unwrap(); 68 | let post = app.args.post_url.unwrap(); 69 | health_check(post.clone()).await?; 70 | info!("Binding to address: {}, posting to: {}", bind, post); 71 | service::run(bind, post, Mode::Arb).await?; 72 | } 73 | Command::PumpMode => { 74 | let bind = app.args.bind.unwrap(); 75 | let post = app.args.post_url.unwrap(); 76 | health_check(post.clone()).await?; 77 | info!("Binding to address: {}, posting to: {}", bind, post); 78 | service::run(bind, post, Mode::Pump).await?; 79 | } 80 | Command::GraduatesMode => { 81 | let bind = app.args.bind.unwrap(); 82 | let post = app.args.post_url.unwrap(); 83 | // health_check(post.clone()).await?; 84 | info!("Binding to address: {}", bind); 85 | service::run(bind, post, Mode::Graduates).await?; 86 | } 87 | } 88 | 89 | Ok(()) 90 | } 91 | 92 | pub async fn health_check( 93 | post_url: String, 94 | ) -> Result<(), Box> { 95 | info!("Running health check on: {}", post_url); 96 | 97 | let client = reqwest::Client::new(); 98 | let response = client 99 | .get(post_url + "/healthz") 100 | .send() 101 | .await 102 | .expect("Failed to send request"); 103 | 104 | if response.status().is_success() { 105 | Ok(()) 106 | } else { 107 | Err("Failed health check".into()) 108 | } 109 | } 110 | 111 | pub async fn benchmark_cmd( 112 | bind_addr: String, 113 | ) -> Result<(), Box> { 114 | info!("Binding to address: {}", bind_addr); 115 | 116 | let pubsub_sigs = Arc::new(RwLock::new(Vec::new())); 117 | let shreds_sigs = Arc::new(RwLock::new(Vec::new())); 118 | 119 | let pubsub_handle = tokio::spawn({ 120 | let pubsub_sigs = pubsub_sigs.clone(); 121 | async move { 122 | tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; 123 | benchmark::listen_pubsub( 124 | vec![constants::RAYDIUM_AMM.to_string()], 125 | pubsub_sigs, 126 | ) 127 | .await 128 | .expect("pubsub") 129 | } 130 | }); 131 | let shreds_handle = tokio::spawn({ 132 | let shreds_sigs = shreds_sigs.clone(); 133 | async move { 134 | listener::run_listener_with_algo( 135 | &bind_addr, 136 | Some(shreds_sigs), 137 | Mode::Arb, 138 | "".to_string(), 139 | true, 140 | ) 141 | .await 142 | .expect("shreds") 143 | } 144 | }); 145 | 146 | info!("Sleeping for 10 seconds..."); 147 | tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; 148 | 149 | pubsub_handle.abort(); 150 | shreds_handle.abort(); 151 | 152 | compare_results( 153 | pubsub_sigs.read().await.clone(), 154 | shreds_sigs.read().await.clone(), 155 | ); 156 | 157 | Ok(()) 158 | } 159 | -------------------------------------------------------------------------------- /src/pump.rs: -------------------------------------------------------------------------------- 1 | use borsh::{BorshDeserialize, BorshSerialize}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive( 5 | BorshDeserialize, BorshSerialize, Serialize, Deserialize, Clone, Default, 6 | )] 7 | pub struct PumpCreateIx { 8 | pub method_id: [u8; 8], 9 | pub name: String, 10 | pub symbol: String, 11 | pub uri: String, 12 | } 13 | 14 | impl std::fmt::Debug for PumpCreateIx { 15 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 16 | f.debug_struct("PumpCreateIx") 17 | .field("name", &self.name) 18 | .field("symbol", &self.symbol) 19 | .field("uri", &self.uri) 20 | .finish() 21 | } 22 | } 23 | 24 | #[derive( 25 | BorshDeserialize, 26 | BorshSerialize, 27 | Serialize, 28 | Deserialize, 29 | Clone, 30 | Default, 31 | Copy, 32 | )] 33 | pub struct PumpSwapIx { 34 | pub method_id: [u8; 8], 35 | pub amount: u64, 36 | pub max_sol_cost: u64, 37 | } 38 | 39 | impl std::fmt::Debug for PumpSwapIx { 40 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 41 | f.debug_struct("PumpSwapIx") 42 | .field("amount", &self.amount) 43 | .field("max_sol_cost", &self.max_sol_cost) 44 | .finish() 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/raydium.rs: -------------------------------------------------------------------------------- 1 | use futures_util::future::join_all; 2 | use futures_util::StreamExt; 3 | use indicatif::{ProgressBar, ProgressStyle}; 4 | use log::{info, warn}; 5 | use once_cell::sync::Lazy; 6 | use raydium_amm::math::{CheckedCeilDiv, SwapDirection, U128}; 7 | use raydium_library::amm::{self, openbook, AmmKeys, CalculateResult}; 8 | use reqwest::Client; 9 | use serde_json::Value; 10 | use solana_client::nonblocking::rpc_client::RpcClient; 11 | use solana_sdk::pubkey::Pubkey; 12 | use solana_sdk::signature::Keypair; 13 | use solana_sdk::signer::{EncodableKey, Signer}; 14 | use std::collections::HashMap; 15 | use std::fs::File; 16 | use std::io::Write; 17 | use std::path::Path; 18 | use std::str::FromStr; 19 | use std::sync::Arc; 20 | use tokio::sync::RwLock; 21 | 22 | use raydium_amm::instruction::{ 23 | AdminCancelOrdersInstruction, ConfigArgs, DepositInstruction, 24 | InitializeInstruction, InitializeInstruction2, MonitorStepInstruction, 25 | PreInitializeInstruction, SetParamsInstruction, SimulateInstruction, 26 | SwapInstructionBaseIn, SwapInstructionBaseOut, WithdrawInstruction, 27 | WithdrawSrmInstruction, 28 | }; 29 | use solana_program::program_error::ProgramError; 30 | 31 | use crate::arb::PoolsState; 32 | use crate::constants; 33 | use crate::util::env; 34 | 35 | pub struct ParsedAccounts { 36 | pub amm_id: Pubkey, 37 | pub pool_coin_vault: Pubkey, 38 | pub pool_pc_vault: Pubkey, 39 | } 40 | 41 | #[derive(Debug, Clone, Copy)] 42 | pub struct RaydiumDecimals { 43 | pub coin_decimals: u8, 44 | pub pc_decimals: u8, 45 | pub lp_decimals: u8, 46 | } 47 | 48 | // TODO there might be sol-token or token-sol both versions, gotta be able to handle both 49 | #[derive(Debug, Clone)] 50 | pub struct RaydiumAmmPool { 51 | pub token: Pubkey, 52 | pub amm_keys: AmmKeys, 53 | pub state: CalculateResult, 54 | pub decimals: RaydiumDecimals, 55 | } 56 | 57 | pub async fn initialize_raydium_amm_pools( 58 | rpc_client: &RpcClient, 59 | pools_state: &mut PoolsState, 60 | mints_of_interest: Vec, 61 | ) { 62 | info!("Reading in raydium.json (large file)"); 63 | let amm_keys_map = 64 | parse_raydium_json(RAYDIUM_JSON.clone(), mints_of_interest.clone()) 65 | .expect("parse raydium json"); 66 | let amm_program = 67 | Pubkey::from_str(constants::RAYDIUM_AMM).expect("pubkey"); 68 | let payer = Keypair::read_from_file(env("FUND_KEYPAIR_PATH")) 69 | .expect("Failed to read keypair"); 70 | let fee_payer = payer.pubkey(); 71 | 72 | // Fetch results 73 | let futures = mints_of_interest.iter().map(|mint| { 74 | let amm_keys_map = amm_keys_map.clone(); 75 | async move { 76 | let amm_keys_vec = amm_keys_map.get(mint).unwrap(); // bound to exist 77 | let mut results = Vec::new(); 78 | for (amm_keys, decimals) in amm_keys_vec.iter() { 79 | info!("Loading AMM keys for pool: {:?}", amm_keys.amm_pool); 80 | let market_keys = openbook::get_keys_for_market( 81 | rpc_client, 82 | &amm_keys.market_program, 83 | &amm_keys.market, 84 | ) 85 | .await 86 | .expect("get market keys"); 87 | let state = amm::calculate_pool_vault_amounts( 88 | rpc_client, 89 | &amm_program, 90 | &amm_keys.amm_pool, 91 | amm_keys, 92 | &market_keys, 93 | amm::utils::CalculateMethod::Simulate(fee_payer), 94 | ) 95 | .await 96 | .expect("calculate pool vault amounts"); 97 | results.push((*mint, *amm_keys, state, *decimals)); 98 | } 99 | results 100 | } 101 | }); 102 | 103 | // Join all futures 104 | let all_results = join_all(futures).await; 105 | 106 | // Update pools_state 107 | for results in all_results { 108 | for (mint, amm_keys, state, decimals) in results { 109 | pools_state.raydium_pools.insert( 110 | amm_keys.amm_pool, 111 | Arc::new(RwLock::new(RaydiumAmmPool { 112 | token: mint, 113 | amm_keys, 114 | state, 115 | decimals, 116 | })), 117 | ); 118 | pools_state 119 | .raydium_pools_by_mint 120 | .entry(mint) 121 | .or_default() 122 | .push(amm_keys.amm_pool); 123 | } 124 | } 125 | } 126 | 127 | static RAYDIUM_JSON: Lazy> = Lazy::new(|| { 128 | if !std::path::Path::new("raydium.json").exists() { 129 | panic!("raydium.json not found, download it first"); 130 | } 131 | let json_str = std::fs::read_to_string("raydium.json") 132 | .expect("Failed to read raydium.json"); 133 | let json_value: Value = serde_json::from_str(&json_str) 134 | .expect("Failed to parse raydium.json"); 135 | Arc::new(json_value) 136 | }); 137 | 138 | pub fn calculate_price( 139 | state: &CalculateResult, 140 | decimals: &RaydiumDecimals, 141 | ) -> Option { 142 | if state.pool_coin_vault_amount == 0 { 143 | return None; 144 | } 145 | let pc_decimals = 10u64.pow(decimals.pc_decimals as u32); 146 | let coin_decimals = 10u64.pow(decimals.coin_decimals as u32); 147 | 148 | // Calculate price maintaining full precision 149 | let price = (state.pool_pc_vault_amount / pc_decimals) 150 | / (state.pool_coin_vault_amount / coin_decimals); 151 | 152 | Some(price) 153 | } 154 | 155 | #[derive(Debug)] 156 | pub enum ParsedAmmInstruction { 157 | Initialize(InitializeInstruction), 158 | Initialize2(InitializeInstruction2), 159 | MonitorStep(MonitorStepInstruction), 160 | Deposit(DepositInstruction), 161 | Withdraw(WithdrawInstruction), 162 | MigrateToOpenBook, 163 | SetParams(SetParamsInstruction), 164 | WithdrawPnl, 165 | WithdrawSrm(WithdrawSrmInstruction), 166 | SwapBaseIn(SwapInstructionBaseIn), 167 | PreInitialize(PreInitializeInstruction), 168 | SwapBaseOut(SwapInstructionBaseOut), 169 | SimulateInfo(SimulateInstruction), 170 | AdminCancelOrders(AdminCancelOrdersInstruction), 171 | CreateConfigAccount, 172 | UpdateConfigAccount(ConfigArgs), 173 | } 174 | 175 | pub fn parse_amm_instruction( 176 | data: &[u8], 177 | ) -> Result { 178 | let (&tag, rest) = data 179 | .split_first() 180 | .ok_or(ProgramError::InvalidInstructionData)?; 181 | 182 | match tag { 183 | 0 => { 184 | let (nonce, rest) = unpack_u8(rest)?; 185 | let (open_time, _) = unpack_u64(rest)?; 186 | Ok(ParsedAmmInstruction::Initialize(InitializeInstruction { 187 | nonce, 188 | open_time, 189 | })) 190 | } 191 | 1 => { 192 | let (nonce, rest) = unpack_u8(rest)?; 193 | let (open_time, rest) = unpack_u64(rest)?; 194 | let (init_pc_amount, rest) = unpack_u64(rest)?; 195 | let (init_coin_amount, _) = unpack_u64(rest)?; 196 | Ok(ParsedAmmInstruction::Initialize2(InitializeInstruction2 { 197 | nonce, 198 | open_time, 199 | init_pc_amount, 200 | init_coin_amount, 201 | })) 202 | } 203 | 9 => { 204 | let (amount_in, rest) = unpack_u64(rest)?; 205 | let (minimum_amount_out, _) = unpack_u64(rest)?; 206 | Ok(ParsedAmmInstruction::SwapBaseIn(SwapInstructionBaseIn { 207 | amount_in, 208 | minimum_amount_out, 209 | })) 210 | } 211 | 11 => { 212 | let (max_amount_in, rest) = unpack_u64(rest)?; 213 | let (amount_out, _) = unpack_u64(rest)?; 214 | Ok(ParsedAmmInstruction::SwapBaseOut(SwapInstructionBaseOut { 215 | max_amount_in, 216 | amount_out, 217 | })) 218 | } 219 | // Add other instruction parsing here... 220 | _ => Err(ProgramError::InvalidInstructionData), 221 | } 222 | } 223 | 224 | fn unpack_u8(input: &[u8]) -> Result<(u8, &[u8]), ProgramError> { 225 | if !input.is_empty() { 226 | let (amount, rest) = input.split_at(1); 227 | let amount = amount[0]; 228 | Ok((amount, rest)) 229 | } else { 230 | Err(ProgramError::InvalidInstructionData) 231 | } 232 | } 233 | 234 | fn unpack_u64(input: &[u8]) -> Result<(u64, &[u8]), ProgramError> { 235 | if input.len() >= 8 { 236 | let (amount, rest) = input.split_at(8); 237 | let amount = u64::from_le_bytes(amount.try_into().unwrap()); 238 | Ok((amount, rest)) 239 | } else { 240 | Err(ProgramError::InvalidInstructionData) 241 | } 242 | } 243 | 244 | pub async fn download_raydium_json( 245 | update: bool, 246 | ) -> Result<(), Box> { 247 | if Path::new("raydium.json").exists() && !update { 248 | warn!("raydium.json already exists. Skipping download."); 249 | return Ok(()); 250 | } 251 | info!("Downloading raydium.json"); 252 | 253 | let url = "https://api.raydium.io/v2/sdk/liquidity/mainnet.json"; 254 | let client = Client::new(); 255 | let res = client.get(url).send().await?; 256 | let total_size = res.content_length().unwrap_or(0); 257 | 258 | let pb = ProgressBar::new(total_size); 259 | pb.set_style(ProgressStyle::default_bar() 260 | .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({eta})") 261 | .unwrap() 262 | .progress_chars("#>-")); 263 | 264 | let mut file = File::create("raydium.json")?; 265 | let mut downloaded: u64 = 0; 266 | let mut stream = res.bytes_stream(); 267 | 268 | while let Some(item) = stream.next().await { 269 | let chunk = item?; 270 | file.write_all(&chunk)?; 271 | let new = 272 | std::cmp::min(downloaded + (chunk.len() as u64), total_size); 273 | downloaded = new; 274 | pb.set_position(new); 275 | } 276 | 277 | pb.finish_with_message("Download completed"); 278 | Ok(()) 279 | } 280 | 281 | type Amm = (AmmKeys, RaydiumDecimals); 282 | 283 | // this takes long, possibly could make it so that it uses a search index it 284 | // returns all of the 285 | // pools for a given token, to be filtered later for relevant 286 | pub fn parse_raydium_json( 287 | raydium_json: Arc, 288 | mints_of_interest: Vec, 289 | ) -> Result>, Box> { 290 | let mut result = HashMap::new(); 291 | info!("Parsing relevant pools"); 292 | 293 | let pools = raydium_json["unOfficial"].as_array().unwrap(); 294 | let total_pools = pools.len(); 295 | 296 | let pb = ProgressBar::new(total_pools as u64); 297 | pb.set_style(ProgressStyle::default_bar() 298 | .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta})") 299 | .unwrap() 300 | .progress_chars("#>-")); 301 | 302 | for (index, json) in pools.iter().enumerate() { 303 | let base_mint = 304 | Pubkey::from_str(json["baseMint"].as_str().unwrap()).unwrap(); 305 | let quote_mint = 306 | Pubkey::from_str(json["quoteMint"].as_str().unwrap()).unwrap(); 307 | if mints_of_interest.contains(&base_mint) { 308 | let amm_keys = json_to_amm(json); 309 | result.entry(base_mint).or_insert(vec![]).push(amm_keys); 310 | } else if mints_of_interest.contains("e_mint) { 311 | let amm_keys = json_to_amm(json); 312 | result.entry(quote_mint).or_insert(vec![]).push(amm_keys); 313 | } 314 | 315 | pb.set_position((index + 1) as u64); 316 | } 317 | 318 | pb.finish_with_message("Parsing completed"); 319 | 320 | if mints_of_interest.len() != result.len() { 321 | warn!("Not all mints found in raydium.json"); 322 | } 323 | Ok(result) 324 | } 325 | 326 | pub fn json_to_amm(json: &Value) -> (AmmKeys, RaydiumDecimals) { 327 | ( 328 | AmmKeys { 329 | amm_pool: Pubkey::from_str(json["id"].as_str().unwrap()).unwrap(), 330 | amm_coin_mint: Pubkey::from_str( 331 | json["baseMint"].as_str().unwrap(), 332 | ) 333 | .unwrap(), 334 | amm_pc_mint: Pubkey::from_str( 335 | json["quoteMint"].as_str().unwrap(), 336 | ) 337 | .unwrap(), 338 | amm_authority: Pubkey::from_str( 339 | json["authority"].as_str().unwrap(), 340 | ) 341 | .unwrap(), 342 | amm_target: Pubkey::from_str( 343 | json["targetOrders"].as_str().unwrap(), 344 | ) 345 | .unwrap(), 346 | amm_coin_vault: Pubkey::from_str( 347 | json["baseVault"].as_str().unwrap(), 348 | ) 349 | .unwrap(), 350 | amm_pc_vault: Pubkey::from_str( 351 | json["quoteVault"].as_str().unwrap(), 352 | ) 353 | .unwrap(), 354 | amm_lp_mint: Pubkey::from_str(json["lpMint"].as_str().unwrap()) 355 | .unwrap(), 356 | amm_open_order: Pubkey::from_str( 357 | json["openOrders"].as_str().unwrap(), 358 | ) 359 | .unwrap(), 360 | market_program: Pubkey::from_str( 361 | json["marketProgramId"].as_str().unwrap(), 362 | ) 363 | .unwrap(), 364 | market: Pubkey::from_str(json["marketId"].as_str().unwrap()) 365 | .unwrap(), 366 | nonce: u8::default(), // not relevant 367 | }, 368 | RaydiumDecimals { 369 | coin_decimals: json["baseDecimals"].as_u64().unwrap() as u8, 370 | pc_decimals: json["quoteDecimals"].as_u64().unwrap() as u8, 371 | lp_decimals: json["lpDecimals"].as_u64().unwrap() as u8, 372 | }, 373 | ) 374 | } 375 | 376 | pub fn swap_exact_amount( 377 | pc_vault_amount: u64, 378 | coin_vault_amount: u64, 379 | swap_fee_numerator: u64, 380 | swap_fee_denominator: u64, 381 | swap_direction: SwapDirection, 382 | amount_specified: u64, 383 | swap_base_in: bool, 384 | ) -> u64 { 385 | if swap_base_in { 386 | let swap_fee = U128::from(amount_specified) 387 | .checked_mul(swap_fee_numerator.into()) 388 | .unwrap() 389 | .checked_ceil_div(swap_fee_denominator.into()) 390 | .unwrap() 391 | .0; 392 | let swap_in_after_deduct_fee = 393 | U128::from(amount_specified).checked_sub(swap_fee).unwrap(); 394 | raydium_amm::math::Calculator::swap_token_amount_base_in( 395 | swap_in_after_deduct_fee, 396 | pc_vault_amount.into(), 397 | coin_vault_amount.into(), 398 | swap_direction, 399 | ) 400 | .as_u64() 401 | } else { 402 | let swap_in_before_add_fee = 403 | raydium_amm::math::Calculator::swap_token_amount_base_out( 404 | amount_specified.into(), 405 | pc_vault_amount.into(), 406 | coin_vault_amount.into(), 407 | swap_direction, 408 | ); 409 | swap_in_before_add_fee 410 | .checked_mul(swap_fee_denominator.into()) 411 | .unwrap() 412 | .checked_ceil_div( 413 | (swap_fee_denominator 414 | .checked_sub(swap_fee_numerator) 415 | .unwrap()) 416 | .into(), 417 | ) 418 | .unwrap() 419 | .0 420 | .as_u64() 421 | } 422 | } 423 | 424 | pub fn unpack(data: &[u8]) -> Option 425 | where 426 | T: Clone, 427 | { 428 | let ret = unsafe { &*(&data[0] as *const u8 as *const T) }; 429 | Some(ret.clone()) 430 | } 431 | -------------------------------------------------------------------------------- /src/recovery.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use crate::shred_processor::FecSet; 4 | #[test] 5 | fn recovery_works() { 6 | let contents = std::fs::read_to_string("hanging_fec_sets.json") 7 | .expect("Failed to read hanging_fec_sets.json"); 8 | let fec_sets: Vec = serde_json::from_str(&contents).unwrap(); 9 | println!("FecSets hanging: {}", fec_sets.len()); 10 | 11 | fec_sets.iter().for_each(|fec_set| { 12 | println!("FecSet: {:?}", fec_set); 13 | }); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/service.rs: -------------------------------------------------------------------------------- 1 | use crate::arb::PoolsState; 2 | use crate::entry_processor::ArbEntryProcessor; 3 | use crate::entry_processor::PumpEntryProcessor; 4 | use crate::graduates_processor::GraduatesProcessor; 5 | use crate::listener::PACKET_SIZE; 6 | use crate::shred_processor::ShredProcessor; 7 | use log::{error, info}; 8 | use reqwest::Url; 9 | use std::sync::Arc; 10 | use tokio::net::UdpSocket; 11 | use tokio::sync::mpsc; 12 | use tokio::sync::RwLock; 13 | use tokio::time::{sleep, Duration}; 14 | 15 | pub enum Mode { 16 | Arb, 17 | Pump, 18 | Graduates, 19 | } 20 | 21 | pub async fn run( 22 | bind_address: String, 23 | post_url: String, 24 | mode: Mode, 25 | ) -> Result<(), Box> { 26 | Url::parse(&post_url)?; 27 | 28 | info!( 29 | "Starting listener on {}, sending to {}", 30 | bind_address, post_url 31 | ); 32 | 33 | let socket = Arc::new( 34 | UdpSocket::bind(bind_address) 35 | .await 36 | .expect("Couldn't bind to address"), 37 | ); 38 | let (entry_tx, entry_rx) = mpsc::channel(2000); 39 | let (error_tx, error_rx) = mpsc::channel(2000); 40 | let (sig_tx, mut sig_rx) = mpsc::channel(2000); 41 | 42 | let shred_processor = 43 | Arc::new(RwLock::new(ShredProcessor::new(entry_tx, error_tx))); 44 | 45 | // metrics loop 46 | info!("Starting metrics loop"); 47 | let shred_processor_clone = shred_processor.clone(); 48 | let metrics_handle = tokio::spawn(async move { 49 | loop { 50 | sleep(Duration::from_secs(6)).await; 51 | { 52 | let metrics = shred_processor_clone.read().await.metrics(); 53 | info!("metrics: {}", metrics); 54 | drop(metrics); 55 | } 56 | } 57 | }); 58 | 59 | info!("Starting sigs rx"); 60 | let sigs_handle = tokio::spawn(async move { 61 | while let Some(sig) = sig_rx.recv().await { 62 | let timestamp = chrono::Utc::now().timestamp_millis(); 63 | log::debug!("shreds: {} {}", timestamp, sig); 64 | } 65 | }); 66 | 67 | info!("Starting shred processor"); 68 | let mut buf = [0u8; PACKET_SIZE]; // max shred size 69 | let shred_processor = shred_processor.clone(); 70 | let shred_processor_handle = tokio::spawn(async move { 71 | loop { 72 | match socket.recv_from(&mut buf).await { 73 | Ok((received, _)) => { 74 | let packet = Vec::from(&buf[..received]); 75 | shred_processor 76 | .write() 77 | .await 78 | .collect(Arc::new(packet)) 79 | .await; 80 | } 81 | Err(e) => { 82 | error!("Error receiving packet: {:?}", e); 83 | } 84 | } 85 | } 86 | }); 87 | 88 | info!("Starting entry processor"); 89 | let entry_processor_handle = match mode { 90 | Mode::Arb => tokio::spawn(async move { 91 | info!("Arb mode"); 92 | let pools_state = Arc::new(RwLock::new(PoolsState::default())); 93 | pools_state.write().await.initialize().await; 94 | let mut entry_processor = ArbEntryProcessor::new( 95 | entry_rx, 96 | error_rx, 97 | pools_state.clone(), 98 | sig_tx, 99 | ); 100 | entry_processor.receive_entries().await; 101 | }), 102 | Mode::Pump => { 103 | info!("Pump mode"); 104 | tokio::spawn(async move { 105 | let mut entry_processor = PumpEntryProcessor::new( 106 | entry_rx, error_rx, sig_tx, post_url, 107 | ); 108 | entry_processor.receive_entries().await; 109 | }) 110 | } 111 | Mode::Graduates => { 112 | info!("Graduates mode"); 113 | tokio::spawn(async move { 114 | let mut entry_processor = 115 | GraduatesProcessor::new(entry_rx, error_rx, sig_tx); 116 | entry_processor.receive_entries().await; 117 | }) 118 | } 119 | }; 120 | 121 | tokio::signal::ctrl_c().await?; 122 | 123 | info!("Shutting down"); 124 | 125 | for handle in [ 126 | metrics_handle, 127 | sigs_handle, 128 | shred_processor_handle, 129 | entry_processor_handle, 130 | ] { 131 | handle.abort(); 132 | } 133 | 134 | Ok(()) 135 | } 136 | -------------------------------------------------------------------------------- /src/shred.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use solana_ledger::shred::{ReedSolomonCache, ShredFlags, Shredder}; 3 | use std::collections::{HashMap, HashSet}; 4 | 5 | use crate::structs::ShredVariant; 6 | use log::{debug, error, info, trace, warn}; 7 | use solana_entry::entry::Entry; 8 | use solana_ledger::shred::{Error, Shred}; 9 | use solana_sdk::signature::SIGNATURE_BYTES; 10 | 11 | pub fn get_shred_variant(shred: &[u8]) -> Result { 12 | let Some(&shred_variant) = shred.get(OFFSET_OF_SHRED_VARIANT) else { 13 | return Err(Error::InvalidPayloadSize(shred.len())); 14 | }; 15 | ShredVariant::try_from(shred_variant) 16 | .map_err(|_| Error::InvalidShredVariant) 17 | } 18 | 19 | pub fn is_shred_data(raw_shred: &[u8]) -> bool { 20 | matches!( 21 | get_shred_variant(raw_shred), 22 | Ok(ShredVariant::LegacyData) | Ok(ShredVariant::MerkleData { .. }) 23 | ) 24 | } 25 | 26 | pub fn get_fec_set_index( 27 | raw_shred: &[u8], 28 | ) -> Result> { 29 | Ok(u32::from_le_bytes(raw_shred[0x4f..0x4f + 4].try_into()?)) 30 | } 31 | 32 | pub fn get_last_in_slot(raw_shred: &[u8]) -> bool { 33 | if is_shred_data(raw_shred) { 34 | let flags = raw_shred[0x55]; 35 | ShredFlags::from_bits(flags) 36 | .map(|f| f.contains(ShredFlags::LAST_SHRED_IN_SLOT)) 37 | .unwrap_or(false) 38 | } else { 39 | false 40 | } 41 | } 42 | 43 | pub struct CodingShredHeader { 44 | pub num_data_shreds: u16, 45 | pub num_coding_shreds: u16, 46 | pub position: u16, 47 | } 48 | 49 | pub fn get_coding_shred_header( 50 | raw_shred: &[u8], 51 | ) -> Result> { 52 | if is_shred_data(raw_shred) { 53 | return Err("Not a coding shred".into()); 54 | } 55 | 56 | Ok(CodingShredHeader { 57 | num_data_shreds: u16::from_le_bytes( 58 | raw_shred[0x53..0x55].try_into()?, 59 | ), 60 | num_coding_shreds: u16::from_le_bytes( 61 | raw_shred[0x55..0x57].try_into()?, 62 | ), 63 | position: u16::from_le_bytes(raw_shred[0x57..0x59].try_into()?), 64 | }) 65 | } 66 | 67 | pub fn get_shred_debug_string(shred: Shred) -> String { 68 | let (block_complete, batch_complete, batch_tick) = 69 | get_shred_data_flags(shred.payload()); 70 | format!( 71 | "{} {} shred: {} {} {} {}", 72 | shred.slot(), 73 | shred.index(), 74 | u16::from_le_bytes([shred.payload()[0x56], shred.payload()[0x57]]) 75 | as usize, // size 76 | block_complete, 77 | batch_complete, 78 | batch_tick 79 | ) 80 | } 81 | 82 | pub fn debug_shred(shred: Shred) { 83 | let size_in_header = 84 | u16::from_le_bytes([shred.payload()[0x56], shred.payload()[0x57]]) 85 | as usize; 86 | info!( 87 | "index: {}: payload: {}, size in header: {} zeros: {} variant: {:?}", 88 | shred.index(), 89 | shred.payload().len(), 90 | size_in_header, 91 | shred.payload().iter().filter(|&&b| b == 0).count(), 92 | get_shred_variant(shred.payload()).expect("shred variant"), 93 | ); 94 | } 95 | 96 | pub fn deserialize_shred(data: Vec) -> Result { 97 | Shred::new_from_serialized_shred(data) 98 | } 99 | 100 | pub fn deserialize_entries( 101 | payload: &[u8], 102 | ) -> Result, Box> { 103 | if payload.len() < 8 { 104 | error!("Payload too short: {} bytes", payload.len()); 105 | return Ok(Vec::new()); 106 | } 107 | 108 | let entry_count = u64::from_le_bytes( 109 | payload[0..8].try_into().expect("entry count parse"), 110 | ); 111 | if entry_count > 10_000 { 112 | return Err(format!("entry count: {}", entry_count).into()); 113 | } 114 | trace!("Entry count prefix: {}", entry_count); 115 | trace!("First 16 bytes of payload: {:?}", &payload[..16]); 116 | 117 | // SUPER CRUCIAL 118 | // you cannot just Ok(bincode::deserialize(&payload[8..])?) 119 | // since the entries are not serialized as a vec, just separate entries 120 | // each next to the other, took me too long to figure this out :P 121 | let mut entries = Vec::new(); 122 | let mut cursor = std::io::Cursor::new(&payload[8..]); 123 | for i in 0..entry_count { 124 | match bincode::deserialize_from::<_, Entry>(&mut cursor) { 125 | Ok(entry) => { 126 | entries.push(entry); 127 | } 128 | Err(e) => { 129 | error!( 130 | "Failed to deserialize entry {}/{}: {}", 131 | i, entry_count, e 132 | ); 133 | } 134 | } 135 | } 136 | 137 | Ok(entries) 138 | } 139 | 140 | const OFFSET_OF_SHRED_VARIANT: usize = SIGNATURE_BYTES; 141 | 142 | pub fn shred_data(shred: &Shred) -> Result<&[u8], Error> { 143 | let variant = 144 | ShredVariant::try_from(shred.payload()[OFFSET_OF_SHRED_VARIANT])?; 145 | let (data_start, size) = match variant { 146 | ShredVariant::MerkleData { .. } => { 147 | let size = u16::from_le_bytes([ 148 | shred.payload()[0x56], 149 | shred.payload()[0x57], 150 | ]) as usize; 151 | (0x58usize, size.saturating_sub(0x58)) 152 | } 153 | ShredVariant::LegacyData => { 154 | (0x56, shred.payload().len().saturating_sub(0x56)) 155 | } 156 | _ => return Err(Error::InvalidShredVariant), 157 | }; 158 | 159 | let data_end = data_start.saturating_add(size); 160 | if data_end > shred.payload().len() { 161 | return Err(Error::InvalidPayloadSize(shred.payload().len())); 162 | } 163 | Ok(&shred.payload()[data_start..data_end]) 164 | } 165 | 166 | pub fn load_shreds(raw_shreds: Vec>) -> HashMap> { 167 | let mut shreds_by_slot: HashMap> = HashMap::new(); 168 | for raw_shred in raw_shreds { 169 | if raw_shred.len() == 29 170 | || raw_shred.len() == 28 171 | || raw_shred.len() == 21 172 | { 173 | continue; 174 | } 175 | let shred = 176 | Shred::new_from_serialized_shred(raw_shred).expect("new shred"); 177 | shreds_by_slot.entry(shred.slot()).or_default().push(shred); 178 | } 179 | shreds_by_slot 180 | } 181 | 182 | pub fn preprocess_shreds(shreds: Vec) -> (Vec, Vec) { 183 | // split shreds into data and code shreds, coding are only used for recovery 184 | // only data shreds are later decoded 185 | let mut data_shreds = Vec::new(); 186 | let mut code_shreds = Vec::new(); 187 | for shred in shreds { 188 | if shred.is_data() { 189 | data_shreds.push(shred); 190 | } else if shred.is_code() { 191 | code_shreds.push(shred); 192 | } 193 | } 194 | // deduplicate data_shreads and sort by key 195 | let mut seen = HashSet::new(); 196 | data_shreds.retain(|shred| seen.insert(shred.index())); 197 | data_shreds.sort_by_key(|shred| shred.index()); 198 | (data_shreds, code_shreds) 199 | } 200 | 201 | pub fn debug_shred_sizes(raw_shreds: Vec>) { 202 | let mut shred_sizes = HashMap::new(); 203 | for shred in raw_shreds.iter() { 204 | *shred_sizes.entry(shred.len()).or_insert(0) += 1; 205 | } 206 | debug!("shred sizes {:?}", shred_sizes); 207 | } 208 | 209 | pub fn deshred(data_shreds: &[Shred]) -> Vec { 210 | data_shreds 211 | .iter() 212 | .flat_map(|shred| { 213 | shred_data(shred) 214 | .map(|data| data.to_vec()) 215 | .unwrap_or_default() 216 | }) 217 | .collect() 218 | } 219 | 220 | pub fn validate_and_try_repair( 221 | data_shreds: &[Shred], 222 | code_shreds: &[Shred], 223 | ) -> Result, Box> { 224 | let index = data_shreds.first().expect("first shred").index(); 225 | let aligned = 226 | data_shreds.iter().zip(index..).all(|(s, i)| s.index() == i); 227 | let data_complete = { 228 | let shred = data_shreds.last().expect("last shred"); 229 | shred.data_complete() || shred.last_in_slot() 230 | }; 231 | if !aligned || !data_complete { 232 | if data_shreds.is_empty() { 233 | return Err("No data shreds".into()); 234 | } 235 | if code_shreds.is_empty() { 236 | return Err("No code shreds".into()); 237 | } 238 | // find the missing indices 239 | let mut missing_indices = Vec::new(); 240 | let mut expected_index = index; 241 | for shred in data_shreds.iter() { 242 | while expected_index < shred.index() { 243 | missing_indices.push(expected_index); 244 | expected_index += 1; 245 | } 246 | expected_index += 1; 247 | } 248 | match missing_indices.len() <= code_shreds.len() { 249 | true => { 250 | warn!( 251 | "Missing indices: {:?}, trying to repair", 252 | missing_indices 253 | ); 254 | } 255 | false => { 256 | return Err(format!( 257 | "Too many missing indices: {:?}", 258 | missing_indices 259 | ) 260 | .into()); 261 | } 262 | } 263 | info!("code shreds len: {}", code_shreds.len()); 264 | let data_shreds = data_shreds.to_vec(); 265 | // TODO stupid clone for now 266 | let all_shreds = data_shreds 267 | .iter() 268 | .chain(code_shreds.iter()) 269 | .cloned() 270 | .collect::>(); 271 | let data_shreds = match Shredder::try_recovery( 272 | all_shreds, 273 | &ReedSolomonCache::default(), 274 | ) { 275 | Ok(data_shreds) => data_shreds, 276 | Err(e) => { 277 | error!("Failed to repair shreds: {}", e); 278 | return Err(e.into()); 279 | } 280 | }; 281 | let aligned = 282 | data_shreds.iter().zip(index..).all(|(s, i)| s.index() == i); 283 | let data_complete = { 284 | let shred = data_shreds.last().expect("last shred"); 285 | shred.data_complete() || shred.last_in_slot() 286 | }; 287 | if !aligned || !data_complete { 288 | return Err(format!( 289 | "Shreds aligned: {} complete: {}, repair no workerino", 290 | aligned, data_complete 291 | ) 292 | .into()); 293 | } 294 | } 295 | 296 | Ok(data_shreds.to_vec()) 297 | } 298 | 299 | pub fn get_shred_index( 300 | raw_shred: &[u8], 301 | ) -> Result> { 302 | Ok(u32::from_le_bytes(raw_shred[0x49..0x49 + 4].try_into()?)) 303 | } 304 | 305 | /// get_shred_is_last works for data shreds only 306 | pub fn get_shred_is_last( 307 | raw_shred: &[u8], 308 | ) -> Result> { 309 | match raw_shred.get(0x55) { 310 | Some(flags) => { 311 | let flags = ShredFlags::from_bits(*flags).expect("parse flags"); 312 | if flags.contains(ShredFlags::DATA_COMPLETE_SHRED) 313 | || flags.contains(ShredFlags::LAST_SHRED_IN_SLOT) 314 | { 315 | Ok(true) 316 | } else { 317 | Ok(false) 318 | } 319 | } 320 | None => Err("Error getting flags".into()), 321 | } 322 | } 323 | 324 | pub fn get_shred_data_flags(raw_shred: &[u8]) -> (bool, bool, u8) { 325 | let flags = raw_shred[0x55]; 326 | // Extract block_complete (bit 7) 327 | let block_complete = (flags & 0b1000_0000) != 0; 328 | 329 | // Extract batch_complete (bit 6) 330 | let batch_complete = (flags & 0b0100_0000) != 0; 331 | 332 | // Extract batch_tick (bits 0-5) 333 | let batch_tick = flags & 0b0011_1111; 334 | 335 | (block_complete, batch_complete, batch_tick) 336 | } 337 | 338 | pub fn get_expected_coding_shreds(n: usize) -> usize { 339 | match n { 340 | 1 => 17, 341 | 2 => 18, 342 | 3 => 19, 343 | 4 => 19, 344 | 5 => 20, 345 | 6 => 21, 346 | 7 => 21, 347 | 8 => 22, 348 | 9 => 23, 349 | 10 => 23, 350 | 11 => 24, 351 | 12 => 24, 352 | 13 => 25, 353 | 14 => 25, 354 | 15 => 26, 355 | 16 => 26, 356 | 17 => 26, 357 | 18 => 27, 358 | 19 => 27, 359 | 20 => 28, 360 | 21 => 28, 361 | 22 => 29, 362 | 23 => 29, 363 | 24 => 29, 364 | 25 => 30, 365 | 26 => 30, 366 | 27 => 31, 367 | 28 => 31, 368 | 29 => 31, 369 | 30 => 32, 370 | 31 => 32, 371 | 32 => 32, 372 | n if n > 32 && n <= 67 => n, 373 | _ => 0, // Invalid case 374 | } 375 | } 376 | 377 | #[cfg(test)] 378 | mod tests { 379 | 380 | use super::*; 381 | use log::debug; 382 | 383 | #[test] 384 | fn deserialize_shreds() { 385 | crate::logger::setup(crate::logger::Target::Stdout).expect("logger"); 386 | 387 | let data = std::fs::read_to_string("packets.json") 388 | .expect("Failed to read packets.json"); 389 | let raw_shreds: Vec> = 390 | serde_json::from_str(&data).expect("Failed to parse JSON"); 391 | 392 | // debugging, useful 393 | { 394 | let shreds = raw_shreds 395 | .iter() 396 | .filter(|shred| shred.len() > 29) 397 | .map(|shred| deserialize_shred(shred.clone()).expect("shred")) 398 | .collect::>(); 399 | let mut shreds_by_slot = HashMap::new(); 400 | for shred in shreds.iter() { 401 | shreds_by_slot 402 | .entry(shred.slot()) 403 | .or_insert_with(Vec::new) 404 | .push(shred.clone()); 405 | } 406 | for (_, mut shreds) in shreds_by_slot { 407 | shreds = shreds 408 | .iter() 409 | .filter(|shred| shred.is_data()) 410 | .cloned() 411 | .collect::>(); 412 | shreds.sort_by_key(|shred| shred.index()); 413 | shreds.dedup_by_key(|shred| shred.index()); 414 | for shred in shreds { 415 | debug!("{}", get_shred_debug_string(shred)); 416 | } 417 | } 418 | } 419 | 420 | // Group shreds by slot 421 | let shreds_by_slot = load_shreds(raw_shreds); 422 | 423 | for (slot, shreds) in &shreds_by_slot { 424 | debug!("slot: {} shreds: {}", slot, shreds.len()); 425 | } 426 | 427 | // Process shreds for each slot 428 | for (slot, slot_shreds) in shreds_by_slot { 429 | let (data_shreds, code_shreds) = preprocess_shreds(slot_shreds); 430 | info!( 431 | "Processing slot: {} (data: {}, code: {})", 432 | slot, 433 | data_shreds.len(), 434 | code_shreds.len() 435 | ); 436 | let data_shreds = 437 | match validate_and_try_repair(&data_shreds, &code_shreds) { 438 | Ok(data_shreds) => data_shreds, 439 | Err(e) => { 440 | error!("Failed to validate and repair shreds: {}", e); 441 | continue; 442 | } 443 | }; 444 | 445 | assert!(!data_shreds.is_empty()); 446 | 447 | let deshredded_data = deshred(&data_shreds); 448 | 449 | debug!("Deshredded data size: {}", deshredded_data.len()); 450 | match deserialize_entries(&deshredded_data) { 451 | Ok(entries) => { 452 | info!( 453 | "Successfully deserialized {} entries", 454 | entries.len() 455 | ); 456 | } 457 | Err(e) => error!("Failed to deserialize entries: {}", e), 458 | } 459 | } 460 | } 461 | } 462 | -------------------------------------------------------------------------------- /src/shred_processor.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info, warn}; 2 | use serde_json::json; 3 | use std::collections::{HashMap, HashSet}; 4 | use std::sync::Arc; 5 | use tokio::sync::mpsc; 6 | 7 | use solana_ledger::shred::{ 8 | layout, ReedSolomonCache, Shred, ShredId, Shredder, 9 | }; 10 | use solana_sdk::clock::Slot; 11 | 12 | use crate::entry_processor::EntriesWithMeta; 13 | use crate::shred::{ 14 | deserialize_entries, deshred, get_coding_shred_header, get_fec_set_index, 15 | get_last_in_slot, get_shred_index, is_shred_data, CodingShredHeader, 16 | }; 17 | use serde::{Deserialize, Serialize}; 18 | 19 | pub const MAX_SHREDS_PER_SLOT: usize = 32_768 / 2; 20 | 21 | pub struct FecSetSuccess { 22 | pub slot: Slot, 23 | pub fec_set_index: u32, 24 | } 25 | 26 | #[derive(Serialize, Deserialize)] 27 | pub struct FecSet { 28 | pub data_shreds: HashMap>>, 29 | pub coding_shreds: HashMap>>, 30 | pub num_expected_data: Option, 31 | pub num_expected_coding: Option, 32 | pub is_last_in_slot: bool, 33 | pub processed: bool, 34 | } 35 | 36 | impl std::fmt::Debug for FecSet { 37 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 38 | f.debug_struct("FecSet") 39 | .field("data_shreds_count", &self.data_shreds.len()) 40 | .field("coding_shreds_count", &self.coding_shreds.len()) 41 | .field("num_expected_data", &self.num_expected_data) 42 | .field("num_expected_coding", &self.num_expected_coding) 43 | .field("is_last_in_slot", &self.is_last_in_slot) 44 | .field("processed", &self.processed) 45 | .finish() 46 | } 47 | } 48 | 49 | #[derive(Debug)] 50 | pub struct ShredProcessor { 51 | fec_sets: HashMap<(Slot, u32), FecSet>, // (slot, fec_set_index) -> FecSet 52 | uniqueness: HashSet, 53 | _handles: Vec>, 54 | entry_tx: mpsc::Sender, 55 | _error_tx: mpsc::Sender, 56 | total_collected_data: u128, 57 | total_processed_data: u128, 58 | total_collected_coding: u128, 59 | fec_set_success: u128, 60 | fec_set_failure: u128, 61 | } 62 | 63 | impl ShredProcessor { 64 | pub fn new( 65 | entry_tx: mpsc::Sender, 66 | error_tx: mpsc::Sender, 67 | ) -> Self { 68 | ShredProcessor { 69 | fec_sets: HashMap::new(), 70 | uniqueness: HashSet::new(), 71 | _handles: Vec::new(), 72 | entry_tx, 73 | _error_tx: error_tx, 74 | total_collected_data: 0, 75 | total_processed_data: 0, 76 | total_collected_coding: 0, 77 | fec_set_success: 0, 78 | fec_set_failure: 0, 79 | } 80 | } 81 | 82 | fn dump_hanging_fec_sets(&self) { 83 | let start = std::time::Instant::now(); 84 | let file = std::fs::File::create("hanging_fec_sets.json").unwrap(); 85 | let incomplete = self 86 | .fec_sets 87 | .values() 88 | .filter(|set| !Self::is_fec_set_complete(set)) 89 | .collect::>(); 90 | let serialized = serde_json::to_string_pretty(&incomplete).unwrap(); 91 | std::io::Write::write_all( 92 | &mut std::io::BufWriter::new(file), 93 | serialized.as_bytes(), 94 | ) 95 | .unwrap(); 96 | info!( 97 | "Dumped {} incomplete FEC sets in {}ms", 98 | incomplete.len(), 99 | start.elapsed().as_millis() 100 | ); 101 | } 102 | 103 | pub fn metrics(&self) -> String { 104 | let incomplete_count = self 105 | .fec_sets 106 | .values() 107 | .filter(|set| !Self::is_fec_set_complete(set)) 108 | .count(); 109 | let metrics = json!({ 110 | "total_collected_data": self.total_collected_data, 111 | "total_collected_coding": self.total_collected_coding, 112 | "total_processed_data": self.total_processed_data, 113 | "fec_set_success_count": self.fec_set_success, 114 | "fec_set_failure_count": self.fec_set_failure, 115 | "fec_sets_remaining": self.fec_sets.len(), 116 | "fec_sets_summary": { 117 | "total_count": self.fec_sets.len(), 118 | "incomplete_count": incomplete_count, 119 | } 120 | }); 121 | 122 | // dump only once (fishing for more testing data) 123 | if incomplete_count > 1000 124 | && !std::path::Path::new("hanging_fec_sets.json").exists() 125 | { 126 | self.dump_hanging_fec_sets(); 127 | } 128 | 129 | serde_json::to_string_pretty(&metrics) 130 | .unwrap_or_else(|_| "Error serializing metrics".to_string()) 131 | } 132 | 133 | pub async fn insert(&mut self, slot: Slot, raw_shred: Arc>) { 134 | let is_data = is_shred_data(&raw_shred); 135 | let index = get_shred_index(&raw_shred).expect("get index"); 136 | let fec_set_index = 137 | get_fec_set_index(&raw_shred).expect("get fec set index"); 138 | 139 | let fec_set = self 140 | .fec_sets 141 | .entry((slot, fec_set_index)) 142 | .or_insert_with(|| FecSet { 143 | processed: false, 144 | data_shreds: HashMap::new(), 145 | coding_shreds: HashMap::new(), 146 | num_expected_data: None, 147 | num_expected_coding: None, 148 | is_last_in_slot: false, 149 | }); 150 | 151 | if is_data { 152 | self.total_collected_data += 1; 153 | fec_set.data_shreds.insert(index, raw_shred.clone()); 154 | fec_set.is_last_in_slot |= get_last_in_slot(&raw_shred); 155 | } else { 156 | self.total_collected_coding += 1; 157 | fec_set.coding_shreds.insert(index, raw_shred.clone()); 158 | // Update expected counts from coding shred 159 | if fec_set.num_expected_data.is_none() 160 | || fec_set.num_expected_coding.is_none() 161 | { 162 | if let Ok(CodingShredHeader { 163 | num_data_shreds, 164 | num_coding_shreds, 165 | .. 166 | }) = get_coding_shred_header(&raw_shred) 167 | { 168 | fec_set.num_expected_data = Some(num_data_shreds); 169 | fec_set.num_expected_coding = Some(num_coding_shreds); 170 | } 171 | } 172 | } 173 | 174 | if Self::is_fec_set_complete(fec_set) { 175 | self.process_fec_set(slot, fec_set_index).await; 176 | } 177 | } 178 | 179 | fn is_fec_set_complete(fec_set: &FecSet) -> bool { 180 | if let (Some(expected_data), Some(expected_coding)) = 181 | (fec_set.num_expected_data, fec_set.num_expected_coding) 182 | { 183 | let total_shreds = 184 | fec_set.data_shreds.len() + fec_set.coding_shreds.len(); 185 | let total_expected = 186 | expected_data as usize + expected_coding as usize; 187 | 188 | fec_set.data_shreds.len() == expected_data as usize 189 | || total_shreds >= total_expected 190 | } else { 191 | false 192 | } 193 | } 194 | 195 | async fn process_fec_set(&mut self, slot: Slot, fec_set_index: u32) { 196 | let fec_set = match self.fec_sets.get_mut(&(slot, fec_set_index)) { 197 | Some(set) => set, 198 | None => return, 199 | }; 200 | 201 | if fec_set.processed { 202 | return; 203 | } 204 | 205 | let expected_data_shreds = 206 | fec_set.num_expected_data.unwrap_or(1) as usize; 207 | let mut data_shreds: Vec = fec_set 208 | .data_shreds 209 | .values() 210 | .filter_map(|raw_shred| { 211 | Shred::new_from_serialized_shred(raw_shred.to_vec()).ok() 212 | }) 213 | .collect(); 214 | 215 | if data_shreds.len() < expected_data_shreds { 216 | let coding_shreds: Vec = fec_set 217 | .coding_shreds 218 | .values() 219 | .filter_map(|raw_shred| { 220 | Shred::new_from_serialized_shred(raw_shred.to_vec()).ok() 221 | }) 222 | .collect(); 223 | 224 | info!("Attempting to recover missing data shreds for slot {} FEC set {}", slot, fec_set_index); 225 | match Shredder::try_recovery( 226 | data_shreds 227 | .iter() 228 | .chain(coding_shreds.iter()) 229 | .cloned() 230 | .collect(), 231 | &ReedSolomonCache::default(), 232 | ) { 233 | Ok(recovered_shreds) => { 234 | info!( 235 | "Recovered {} data shreds for slot {} FEC set {}", 236 | recovered_shreds.len(), 237 | slot, 238 | fec_set_index 239 | ); 240 | data_shreds.extend( 241 | recovered_shreds.into_iter().filter(|s| s.is_data()), 242 | ); 243 | } 244 | Err(e) => { 245 | warn!("Failed to recover data shreds for slot {} FEC set {}: {:?}", 246 | slot, fec_set_index, e); 247 | } 248 | } 249 | } 250 | 251 | if data_shreds.is_empty() { 252 | error!( 253 | "No valid data shreds found for slot {} FEC set {}", 254 | slot, fec_set_index 255 | ); 256 | return; 257 | } 258 | 259 | data_shreds.sort_by_key(|shred| shred.index()); 260 | let deshredded_data = deshred(&data_shreds); 261 | 262 | match deserialize_entries(&deshredded_data) { 263 | Ok(entries) => { 264 | self.fec_set_success += 1; 265 | self.total_processed_data += data_shreds.len() as u128; 266 | fec_set.processed = true; 267 | self.fec_sets.remove(&(slot, fec_set_index)); 268 | if let Err(e) = self 269 | .entry_tx 270 | .send(EntriesWithMeta { entries, slot }) 271 | .await 272 | { 273 | error!( 274 | "Failed to send entries for slot {} FEC set {}: {:?}", 275 | slot, fec_set_index, e 276 | ); 277 | } 278 | } 279 | Err(e) => { 280 | self.fec_set_failure += 1; 281 | error!("Failed to deserialize entries for slot {} FEC set {}: {:?}", 282 | slot, fec_set_index, e); 283 | } 284 | } 285 | } 286 | 287 | pub async fn collect(&mut self, raw_shred: Arc>) { 288 | if raw_shred.len() < 0x58 { 289 | return; 290 | } 291 | match layout::get_shred_id(&raw_shred) { 292 | Some(shred_id) => { 293 | if !self.uniqueness.insert(shred_id) { 294 | return; 295 | } 296 | self.insert(shred_id.slot(), raw_shred.clone()).await; 297 | } 298 | None => { 299 | error!("Error getting shred id"); 300 | } 301 | } 302 | } 303 | } 304 | 305 | #[cfg(test)] 306 | mod tests { 307 | use super::*; 308 | use crate::arb::PoolsState; 309 | use crate::entry_processor::ArbEntryProcessor; 310 | use crate::pump::PumpCreateIx; 311 | use borsh::BorshDeserialize; 312 | use log::info; 313 | use tokio::sync::RwLock; 314 | 315 | #[tokio::test] 316 | async fn processor_works() { 317 | dotenv::dotenv().ok(); 318 | // env_logger::Builder::default() 319 | // .filter_level(log::LevelFilter::Info) 320 | // .init(); 321 | 322 | let data = std::fs::read_to_string("packets.json") 323 | .expect("Failed to read packets.json"); 324 | let raw_shreds: Vec> = 325 | serde_json::from_str(&data).expect("Failed to parse JSON"); 326 | 327 | let (entry_tx, entry_rx) = mpsc::channel(2000); 328 | let (error_tx, error_rx) = mpsc::channel(2000); 329 | let (sig_tx, mut sig_rx) = mpsc::channel(2000); 330 | 331 | tokio::spawn(async move { 332 | while let Some(sig) = sig_rx.recv().await { 333 | let timestamp = chrono::Utc::now().timestamp_millis(); 334 | log::debug!("shreds: {} {}", timestamp, sig); 335 | } 336 | }); 337 | 338 | let mut processor = ShredProcessor::new(entry_tx, error_tx); 339 | for raw_shred in raw_shreds { 340 | processor.collect(Arc::new(raw_shred)).await; 341 | } 342 | 343 | tokio::spawn(async move { 344 | let pools_state = Arc::new(RwLock::new(PoolsState::default())); 345 | pools_state.write().await.initialize().await; 346 | let mut entry_processor = ArbEntryProcessor::new( 347 | entry_rx, 348 | error_rx, 349 | pools_state.clone(), 350 | sig_tx, 351 | ); 352 | entry_processor.receive_entries().await; 353 | }); 354 | 355 | for handle in processor._handles.drain(..) { 356 | handle.await.expect("Failed to process batch"); 357 | } 358 | 359 | info!("{}", processor.metrics()); 360 | } 361 | 362 | #[test] 363 | fn deserialize_pump_create_ix() { 364 | let bytes = vec![ 365 | 0x18, 0x1e, 0xc8, 0x28, 0x05, 0x1c, 0x07, 0x77, 0x05, 0x00, 0x00, 366 | 0x00, 0x42, 0x52, 0x4f, 0x4b, 0x45, 0x03, 0x00, 0x00, 0x00, 0x42, 367 | 0x52, 0x4b, 0x43, 0x00, 0x00, 0x00, 0x68, 0x74, 0x74, 0x70, 0x73, 368 | 0x3a, 0x2f, 0x2f, 0x69, 0x70, 0x66, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 369 | 0x69, 0x70, 0x66, 0x73, 0x2f, 0x51, 0x6d, 0x5a, 0x77, 0x65, 0x7a, 370 | 0x4d, 0x7a, 0x43, 0x63, 0x35, 0x63, 0x35, 0x35, 0x71, 0x68, 0x71, 371 | 0x72, 0x55, 0x71, 0x34, 0x63, 0x31, 0x4e, 0x41, 0x4c, 0x6e, 0x78, 372 | 0x78, 0x31, 0x67, 0x32, 0x5a, 0x76, 0x4b, 0x36, 0x33, 0x64, 0x46, 373 | 0x38, 0x35, 0x6b, 0x32, 0x77, 0x31, 0x50, 374 | ]; 375 | println!("size: {}", bytes.len()); 376 | let create = PumpCreateIx::try_from_slice(&bytes).unwrap(); 377 | println!("{:?}", create); 378 | } 379 | } 380 | -------------------------------------------------------------------------------- /src/structs.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use solana_ledger::shred::{Error, ShredType}; 3 | use std::convert::TryFrom; 4 | 5 | #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] 6 | #[serde(into = "u8", try_from = "u8")] 7 | pub enum ShredVariant { 8 | LegacyCode, // 0b0101_1010 9 | LegacyData, // 0b1010_0101 10 | // proof_size is the number of Merkle proof entries, and is encoded in the 11 | // lowest 4 bits of the binary representation. The first 4 bits identify 12 | // the shred variant: 13 | // 0b0100_???? MerkleCode 14 | // 0b0110_???? MerkleCode chained 15 | // 0b0111_???? MerkleCode chained resigned 16 | // 0b1000_???? MerkleData 17 | // 0b1001_???? MerkleData chained 18 | // 0b1011_???? MerkleData chained resigned 19 | MerkleCode { 20 | proof_size: u8, 21 | chained: bool, 22 | resigned: bool, 23 | }, // 0b01??_???? 24 | MerkleData { 25 | proof_size: u8, 26 | chained: bool, 27 | resigned: bool, 28 | }, // 0b10??_???? 29 | } 30 | 31 | impl From for ShredType { 32 | #[inline] 33 | fn from(shred_variant: ShredVariant) -> Self { 34 | match shred_variant { 35 | ShredVariant::LegacyCode => ShredType::Code, 36 | ShredVariant::LegacyData => ShredType::Data, 37 | ShredVariant::MerkleCode { .. } => ShredType::Code, 38 | ShredVariant::MerkleData { .. } => ShredType::Data, 39 | } 40 | } 41 | } 42 | 43 | impl From for u8 { 44 | fn from(shred_variant: ShredVariant) -> u8 { 45 | match shred_variant { 46 | ShredVariant::LegacyCode => 0x5a, 47 | ShredVariant::LegacyData => 0xa5, 48 | ShredVariant::MerkleCode { 49 | proof_size, 50 | chained: false, 51 | resigned: false, 52 | } => proof_size | 0x40, 53 | ShredVariant::MerkleCode { 54 | proof_size, 55 | chained: true, 56 | resigned: false, 57 | } => proof_size | 0x60, 58 | ShredVariant::MerkleCode { 59 | proof_size, 60 | chained: true, 61 | resigned: true, 62 | } => proof_size | 0x70, 63 | ShredVariant::MerkleData { 64 | proof_size, 65 | chained: false, 66 | resigned: false, 67 | } => proof_size | 0x80, 68 | ShredVariant::MerkleData { 69 | proof_size, 70 | chained: true, 71 | resigned: false, 72 | } => proof_size | 0x90, 73 | ShredVariant::MerkleData { 74 | proof_size, 75 | chained: true, 76 | resigned: true, 77 | } => proof_size | 0xb0, 78 | ShredVariant::MerkleCode { 79 | proof_size: _, 80 | chained: false, 81 | resigned: true, 82 | } 83 | | ShredVariant::MerkleData { 84 | proof_size: _, 85 | chained: false, 86 | resigned: true, 87 | } => panic!("Invalid shred variant: {shred_variant:?}"), 88 | } 89 | } 90 | } 91 | 92 | impl TryFrom for ShredVariant { 93 | type Error = Error; 94 | fn try_from(shred_variant: u8) -> Result { 95 | if shred_variant == u8::from(ShredType::Code) { 96 | Ok(ShredVariant::LegacyCode) 97 | } else if shred_variant == u8::from(ShredType::Data) { 98 | Ok(ShredVariant::LegacyData) 99 | } else { 100 | let proof_size = shred_variant & 0x0F; 101 | match shred_variant & 0xF0 { 102 | 0x40 => Ok(ShredVariant::MerkleCode { 103 | proof_size, 104 | chained: false, 105 | resigned: false, 106 | }), 107 | 0x60 => Ok(ShredVariant::MerkleCode { 108 | proof_size, 109 | chained: true, 110 | resigned: false, 111 | }), 112 | 0x70 => Ok(ShredVariant::MerkleCode { 113 | proof_size, 114 | chained: true, 115 | resigned: true, 116 | }), 117 | 0x80 => Ok(ShredVariant::MerkleData { 118 | proof_size, 119 | chained: false, 120 | resigned: false, 121 | }), 122 | 0x90 => Ok(ShredVariant::MerkleData { 123 | proof_size, 124 | chained: true, 125 | resigned: false, 126 | }), 127 | 0xb0 => Ok(ShredVariant::MerkleData { 128 | proof_size, 129 | chained: true, 130 | resigned: true, 131 | }), 132 | other => { 133 | println!("unknown: {:#x}", other); 134 | Err(Error::InvalidShredVariant) 135 | } 136 | } 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use solana_sdk::pubkey::Pubkey; 3 | use std::str::FromStr; 4 | 5 | pub fn env(key: &str) -> String { 6 | std::env::var(key).unwrap_or_else(|_| { 7 | panic!("{} env var not set", key); 8 | }) 9 | } 10 | /// Helper function for pubkey serialize 11 | pub fn pubkey_to_string( 12 | pubkey: &Pubkey, 13 | serializer: S, 14 | ) -> Result 15 | where 16 | S: serde::Serializer, 17 | { 18 | serializer.serialize_str(&pubkey.to_string()) 19 | } 20 | 21 | /// Helper function for pubkey deserialize 22 | pub fn string_to_pubkey<'de, D>(deserializer: D) -> Result 23 | where 24 | D: serde::Deserializer<'de>, 25 | { 26 | let s = String::deserialize(deserializer)?; 27 | Pubkey::from_str(&s).map_err(serde::de::Error::custom) 28 | } 29 | --------------------------------------------------------------------------------