├── .env.example ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── diesel.toml ├── migrations ├── .keep ├── 2024-08-11-151101_create_miners │ ├── down.sql │ └── up.sql ├── 2024-08-11-235837_create_pools │ ├── down.sql │ └── up.sql ├── 2024-08-12-000144_create_challenges │ ├── down.sql │ └── up.sql ├── 2024-08-12-002816_create_submissions │ ├── down.sql │ └── up.sql ├── 2024-08-12-003544_create_txns │ ├── down.sql │ └── up.sql ├── 2024-08-12-004131_create_claims │ ├── down.sql │ └── up.sql ├── 2024-08-13-014749_create_rewards │ ├── down.sql │ └── up.sql ├── 2024-08-14-194732_add_digest_to_submissions │ ├── down.sql │ └── up.sql ├── 2024-08-14-235001_create_earnings │ ├── down.sql │ └── up.sql ├── 2024-08-15-041437_idx_challenge │ ├── down.sql │ └── up.sql ├── 2024-08-15-041901_idx_miner_pubkey │ ├── down.sql │ └── up.sql ├── 2024-08-15-042349_idx_submissions_nonce │ ├── down.sql │ └── up.sql ├── 2024-08-15-234722_challenges_challenge_unique │ ├── down.sql │ └── up.sql ├── 2024-08-22-223217_drop_redundent_idx_challenge │ ├── down.sql │ └── up.sql ├── 2024-09-18-045831_idx_submissions_miner_id │ ├── down.sql │ └── up.sql ├── 2024-09-18-053220_idx_submissions_challenge_id │ ├── down.sql │ └── up.sql ├── 2024-09-18-053428_idx_submissions_miner_challenge │ ├── down.sql │ └── up.sql ├── 2024-09-20-162626_submissions_id_to_bigint │ ├── down.sql │ └── up.sql ├── 2024-09-21-202059_earnings_add_created_at_day_generated_column │ ├── down.sql │ └── up.sql ├── 2024-09-21-203337_idx_earnings_miner_id_created_at_day │ ├── down.sql │ └── up.sql ├── 2024-10-06-044121_idx_rewards_miner_id │ ├── down.sql │ └── up.sql ├── 2024-10-06-185750_miner_pubkey_unique │ ├── down.sql │ └── up.sql ├── 2024-10-06-190014_reward_miner_id_unique │ ├── down.sql │ └── up.sql ├── 2024-10-18-203518_create_stake_accounts │ ├── down.sql │ └── up.sql ├── 2024-10-19-235653_stake_accounts_stake_pda_unique │ ├── down.sql │ └── up.sql ├── 2024-12-03-162732_earnings_id_to_bigint │ ├── down.sql │ └── up.sql ├── 2025-03-01-234736_submissions_2 │ ├── down.sql │ └── up.sql ├── 2025-03-02-015151_drop_submissions │ ├── down.sql │ └── up.sql └── 2025-03-02-015415_drop_earnings │ ├── down.sql │ └── up.sql ├── rust-toolchain.toml ├── src ├── app_database.rs ├── app_metrics.rs ├── app_rr_database.rs ├── global_boost_util.rs ├── main.rs ├── message.rs ├── models.rs ├── ore_utils.rs ├── proof_migration.rs ├── routes.rs ├── schema.rs ├── scripts.rs └── systems │ ├── app_metrics_system.rs │ ├── cache_update_system.rs │ ├── claim_system.rs │ ├── client_message_handler_system.rs │ ├── client_submissions_handler.rs │ ├── handle_ready_clients_system.rs │ ├── message_text_all_clients_system.rs │ ├── mod.rs │ ├── pong_tracking_system.rs │ ├── pool_mine_success_system.rs │ ├── pool_submission_system.rs │ └── proof_tracking_system.rs └── whitelist.txt.example /.env.example: -------------------------------------------------------------------------------- 1 | WALLET_PATH = "~/.config/solana/id.json" 2 | FEE_WALLET_PATH = "~/.config/solana/fee_id.json" 3 | RPC_URL = "RPC_URL_HERE" 4 | RPC_2_URL = "RPC_2_URL_HERE" 5 | RPC_WS_URL = "RPC_WS_URL_HERE" 6 | PASSWORD = "password" 7 | DATABASE_URL = "DATABASE_URL_HERE" 8 | DATABASE_RR_URL = "DATABASE_READ_REPLICA_URL_HERE" 9 | METRICS_URL = "" 10 | METRICS_TOKEN = "" 11 | METRICS_ORG = "" 12 | METRICS_BUCKET = "" 13 | METRICS_HOST = "" 14 | 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | .env 4 | 5 | whitelist.txt 6 | 7 | /logs 8 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ore-hq-server" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | axum = { version = "0.8.3", features = ["ws"] } 8 | axum-extra = { version = "0.10.1", features = ["typed-header"] } 9 | bytemuck = "1.14.3" 10 | drillx = "2.0.0" 11 | futures = "0.3.30" 12 | futures-util = { version = "0.3.30", default-features = false, features = ["sink", "std"] } 13 | ore-api = "3.6.0" 14 | ore-boost-api = "4.0.0" 15 | ore-miner-delegation = { version = "0.10.0", features = ["no-entrypoint"]} 16 | serde = { version = "1.0.204", features = ["derive"] } 17 | serde_json = "1.0.122" 18 | solana-sdk = "=2.1" 19 | solana-client = "=2.1" 20 | solana-program = "=2.1" 21 | solana-account-decoder = "=2.1" 22 | solana-transaction-status = "=2.1" 23 | spl-associated-token-account = { version = "^6", features = ["no-entrypoint"] } 24 | spl-token = { version = "^4", features = ["no-entrypoint"] } 25 | tokio = { version = "1.44.2", features = ["full", "rt-multi-thread"] } 26 | tower = { version = "0.5.2", features = ["util"] } 27 | tower-http = { version = "0.6.2", features = ["trace", "cors"] } 28 | tracing = "0.1.40" 29 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } 30 | dotenv = "0.15.0" 31 | bincode = "1.3.3" 32 | crossbeam-channel = "0.5.13" 33 | rand = "0.8.5" 34 | clap = { version = "4.5.14", features = ["derive"] } 35 | diesel = { version = "2.2.3", features = ["mysql", "chrono"] } 36 | chrono = "0.4.38" 37 | deadpool-diesel = { version = "0.6.1", features = ["mysql"] } 38 | base64 = "0.22.1" 39 | tracing-appender = "0.2.3" 40 | uuid = { version = "1.10.0", features = ["v4"] } 41 | steel = { features = ["spl"], version = "4.0" } 42 | reqwest = "0.11" 43 | sysinfo = "0.32.0" 44 | 45 | -------------------------------------------------------------------------------- /diesel.toml: -------------------------------------------------------------------------------- 1 | # For documentation on how to configure this file, 2 | # see https://diesel.rs/guides/configuring-diesel-cli 3 | 4 | [print_schema] 5 | file = "src/schema.rs" 6 | custom_type_derives = ["diesel::query_builder::QueryId", "Clone"] 7 | 8 | [migrations_directory] 9 | dir = "./migrations" 10 | -------------------------------------------------------------------------------- /migrations/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kriptikz/ore-hq-server/1620c4fb3c1287907aa5147937af01c17e54f1a5/migrations/.keep -------------------------------------------------------------------------------- /migrations/2024-08-11-151101_create_miners/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE miners 2 | -------------------------------------------------------------------------------- /migrations/2024-08-11-151101_create_miners/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE miners ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | pubkey VARCHAR(44) NOT NULL, 4 | enabled BOOL DEFAULT false NOT NULL, 5 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 6 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 7 | ) 8 | -------------------------------------------------------------------------------- /migrations/2024-08-11-235837_create_pools/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE pools 2 | -------------------------------------------------------------------------------- /migrations/2024-08-11-235837_create_pools/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE pools ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | proof_pubkey VARCHAR(44) NOT NULL, 4 | authority_pubkey VARCHAR(44) NOT NULL, 5 | total_rewards BIGINT UNSIGNED DEFAULT 0 NOT NULL, 6 | claimed_rewards BIGINT UNSIGNED DEFAULT 0 NOT NULL, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2024-08-12-000144_create_challenges/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE challenges 2 | -------------------------------------------------------------------------------- /migrations/2024-08-12-000144_create_challenges/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE challenges ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | pool_id INT NOT NULL, 4 | submission_id INT, 5 | challenge BINARY(32) NOT NULL, 6 | rewards_earned BIGINT UNSIGNED DEFAULT 0, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2024-08-12-002816_create_submissions/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE submissions 2 | -------------------------------------------------------------------------------- /migrations/2024-08-12-002816_create_submissions/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE submissions ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | miner_id INT NOT NULL, 4 | challenge_id INT NOT NULL, 5 | difficulty TINYINT NOT NULL, 6 | nonce BIGINT UNSIGNED NOT NULL, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2024-08-12-003544_create_txns/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE txns 2 | -------------------------------------------------------------------------------- /migrations/2024-08-12-003544_create_txns/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE txns ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | txn_type VARCHAR(15) NOT NULL, 4 | signature VARCHAR(200) NOT NULL, 5 | priority_fee INT UNSIGNED DEFAULT 0 NOT NULL, 6 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 7 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 8 | ) 9 | -------------------------------------------------------------------------------- /migrations/2024-08-12-004131_create_claims/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE claims 2 | -------------------------------------------------------------------------------- /migrations/2024-08-12-004131_create_claims/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE claims ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | miner_id INT NOT NULL, 4 | pool_id INT NOT NULL, 5 | txn_id INT NOT NULL, 6 | amount BIGINT UNSIGNED NOT NULL, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2024-08-13-014749_create_rewards/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE rewards 2 | -------------------------------------------------------------------------------- /migrations/2024-08-13-014749_create_rewards/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE rewards ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | miner_id INT NOT NULL, 4 | pool_id INT NOT NULL, 5 | balance BIGINT UNSIGNED DEFAULT 0 NOT NULL, 6 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 7 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 8 | ) 9 | -------------------------------------------------------------------------------- /migrations/2024-08-14-194732_add_digest_to_submissions/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions DROP COLUMN digest 2 | -------------------------------------------------------------------------------- /migrations/2024-08-14-194732_add_digest_to_submissions/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions ADD COLUMN digest BINARY(16) 2 | -------------------------------------------------------------------------------- /migrations/2024-08-14-235001_create_earnings/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE earnings 2 | -------------------------------------------------------------------------------- /migrations/2024-08-14-235001_create_earnings/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE earnings ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | miner_id INT NOT NULL, 4 | pool_id INT NOT NULL, 5 | challenge_id INT NOT NULL, 6 | amount BIGINT UNSIGNED DEFAULT 0 NOT NULL, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2024-08-15-041437_idx_challenge/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE challenges DROP INDEX idx_challenge 2 | -------------------------------------------------------------------------------- /migrations/2024-08-15-041437_idx_challenge/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_challenge ON challenges (challenge) 2 | -------------------------------------------------------------------------------- /migrations/2024-08-15-041901_idx_miner_pubkey/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE miners DROP INDEX idx_miner_pubkey 2 | -------------------------------------------------------------------------------- /migrations/2024-08-15-041901_idx_miner_pubkey/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_miner_pubkey ON miners (pubkey) 2 | -------------------------------------------------------------------------------- /migrations/2024-08-15-042349_idx_submissions_nonce/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions DROP INDEX idx_submissions_nonce 2 | -------------------------------------------------------------------------------- /migrations/2024-08-15-042349_idx_submissions_nonce/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_submissions_nonce ON submissions (nonce) 2 | -------------------------------------------------------------------------------- /migrations/2024-08-15-234722_challenges_challenge_unique/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE challenges DROP INDEX uc_challenges 2 | -------------------------------------------------------------------------------- /migrations/2024-08-15-234722_challenges_challenge_unique/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE challenges ADD CONSTRAINT uc_challenges UNIQUE (challenge) 2 | -------------------------------------------------------------------------------- /migrations/2024-08-22-223217_drop_redundent_idx_challenge/down.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_challenge ON challenges (challenge) 2 | -------------------------------------------------------------------------------- /migrations/2024-08-22-223217_drop_redundent_idx_challenge/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE challenges DROP INDEX idx_challenge 2 | -------------------------------------------------------------------------------- /migrations/2024-09-18-045831_idx_submissions_miner_id/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions DROP INDEX idx_submissions_miner_id 2 | -------------------------------------------------------------------------------- /migrations/2024-09-18-045831_idx_submissions_miner_id/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_submissions_miner_id ON submissions (miner_id) 2 | -------------------------------------------------------------------------------- /migrations/2024-09-18-053220_idx_submissions_challenge_id/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions DROP INDEX idx_submissions_challenge_id 2 | -------------------------------------------------------------------------------- /migrations/2024-09-18-053220_idx_submissions_challenge_id/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_submissions_challenge_id ON submissions (challenge_id) 2 | -------------------------------------------------------------------------------- /migrations/2024-09-18-053428_idx_submissions_miner_challenge/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions DROP INDEX idx_submissions_miner_challenge 2 | -------------------------------------------------------------------------------- /migrations/2024-09-18-053428_idx_submissions_miner_challenge/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_submissions_miner_challenge ON submissions (miner_id, challenge_id) 2 | -------------------------------------------------------------------------------- /migrations/2024-09-20-162626_submissions_id_to_bigint/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions MODIFY COLUMN id INT NOT NULL AUTO_INCREMENT 2 | -------------------------------------------------------------------------------- /migrations/2024-09-20-162626_submissions_id_to_bigint/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE submissions MODIFY COLUMN id BIGINT NOT NULL AUTO_INCREMENT 2 | -------------------------------------------------------------------------------- /migrations/2024-09-21-202059_earnings_add_created_at_day_generated_column/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE earnings DROP COLUMN created_at_day 2 | -------------------------------------------------------------------------------- /migrations/2024-09-21-202059_earnings_add_created_at_day_generated_column/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE earnings ADD COLUMN created_at_day DATE generated always AS (DATE(created_at)) 2 | -------------------------------------------------------------------------------- /migrations/2024-09-21-203337_idx_earnings_miner_id_created_at_day/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE earnings DROP INDEX idx_earnings_miner_id_created_at_day 2 | -------------------------------------------------------------------------------- /migrations/2024-09-21-203337_idx_earnings_miner_id_created_at_day/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_earnings_miner_id_created_at_day ON earnings (miner_id, created_at_day) 2 | -------------------------------------------------------------------------------- /migrations/2024-10-06-044121_idx_rewards_miner_id/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rewards DROP INDEX idx_rewards_miner_id 2 | -------------------------------------------------------------------------------- /migrations/2024-10-06-044121_idx_rewards_miner_id/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX idx_rewards_miner_id ON rewards (miner_id) 2 | -------------------------------------------------------------------------------- /migrations/2024-10-06-185750_miner_pubkey_unique/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE miners DROP INDEX uc_miner_pubkey 2 | -------------------------------------------------------------------------------- /migrations/2024-10-06-185750_miner_pubkey_unique/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE miners ADD CONSTRAINT uc_miner_pubkey UNIQUE (pubkey) 2 | -------------------------------------------------------------------------------- /migrations/2024-10-06-190014_reward_miner_id_unique/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rewards DROP INDEX uc_reward_miner_id 2 | -------------------------------------------------------------------------------- /migrations/2024-10-06-190014_reward_miner_id_unique/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rewards ADD CONSTRAINT uc_reward_miner_id UNIQUE (miner_id) 2 | -------------------------------------------------------------------------------- /migrations/2024-10-18-203518_create_stake_accounts/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE stake_accounts 2 | -------------------------------------------------------------------------------- /migrations/2024-10-18-203518_create_stake_accounts/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE stake_accounts ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | pool_id INT NOT NULL, 4 | mint_pubkey VARCHAR(44) NOT NULL, 5 | staker_pubkey VARCHAR(44) NOT NULL, 6 | stake_pda VARCHAR(44) NOT NULL, 7 | rewards_balance BIGINT UNSIGNED DEFAULT 0 NOT NULL, 8 | staked_balance BIGINT UNSIGNED DEFAULT 0 NOT NULL, 9 | total_rewards_earned BIGINT UNSIGNED DEFAULT 0 NOT NULL, 10 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 11 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 12 | ) 13 | -------------------------------------------------------------------------------- /migrations/2024-10-19-235653_stake_accounts_stake_pda_unique/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE stake_accounts DROP INDEX u_stake_pda 2 | -------------------------------------------------------------------------------- /migrations/2024-10-19-235653_stake_accounts_stake_pda_unique/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE stake_accounts ADD CONSTRAINT u_stake_pda UNIQUE (stake_pda) 2 | -------------------------------------------------------------------------------- /migrations/2024-12-03-162732_earnings_id_to_bigint/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE earnings MODIFY COLUMN id BIGINT NOT NULL AUTO_INCREMENT 2 | -------------------------------------------------------------------------------- /migrations/2024-12-03-162732_earnings_id_to_bigint/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE earnings MODIFY COLUMN id BIGINT NOT NULL AUTO_INCREMENT 2 | -------------------------------------------------------------------------------- /migrations/2025-03-01-234736_submissions_2/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE submissions_2 2 | -------------------------------------------------------------------------------- /migrations/2025-03-01-234736_submissions_2/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE submissions_2 ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | miner_id INT NOT NULL, 4 | challenge_id INT NOT NULL, 5 | difficulty TINYINT NOT NULL, 6 | nonce BIGINT UNSIGNED NOT NULL, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2025-03-02-015151_drop_submissions/down.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE submissions ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | miner_id INT NOT NULL, 4 | challenge_id INT NOT NULL, 5 | difficulty TINYINT NOT NULL, 6 | nonce BIGINT UNSIGNED NOT NULL, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2025-03-02-015151_drop_submissions/up.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE submissions 2 | -------------------------------------------------------------------------------- /migrations/2025-03-02-015415_drop_earnings/down.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE earnings ( 2 | id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 3 | miner_id INT NOT NULL, 4 | pool_id INT NOT NULL, 5 | challenge_id INT NOT NULL, 6 | amount BIGINT UNSIGNED DEFAULT 0 NOT NULL, 7 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, 8 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL 9 | ) 10 | -------------------------------------------------------------------------------- /migrations/2025-03-02-015415_drop_earnings/up.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE earnings 2 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.81.0" 3 | components = [ "rustfmt", "rust-analyzer" ] 4 | profile = "minimal" 5 | -------------------------------------------------------------------------------- /src/app_metrics.rs: -------------------------------------------------------------------------------- 1 | use reqwest::Client; 2 | 3 | use crate::ore_utils::{MineEventWithBoosts, MineEventWithGlobalBoosts}; 4 | 5 | #[derive(Debug)] 6 | pub enum AppMetricsError { 7 | FailedToSendMetrics(String), 8 | } 9 | 10 | #[derive(Debug)] 11 | pub struct MetricsClaimEventData { 12 | duration_ms: u64, 13 | timestamp_ns: u64, 14 | has_error: bool, 15 | error: String, 16 | } 17 | 18 | #[derive(Debug)] 19 | pub struct MetricsProcessingClaimsEventData { 20 | pub claims_queue_length: usize, 21 | } 22 | 23 | #[derive(Debug)] 24 | pub struct MetricsRouteEventData { 25 | pub route: String, 26 | pub method: String, 27 | pub status_code: u32, 28 | pub request: u128, 29 | pub response: u128, 30 | pub latency: u128, 31 | pub ts_ns: u128, 32 | } 33 | 34 | #[derive(Debug)] 35 | pub enum AppMetricsMineEvent { 36 | V1(MineEventWithBoosts), 37 | V2(MineEventWithGlobalBoosts), 38 | } 39 | 40 | #[derive(Debug)] 41 | pub enum AppMetricsEvent { 42 | MineEvent(AppMetricsMineEvent), 43 | ClaimEvent(MetricsClaimEventData), 44 | ProcessingClaimsEvent(MetricsProcessingClaimsEventData), 45 | RouteEvent(MetricsRouteEventData), 46 | } 47 | 48 | pub struct AppMetrics { 49 | client: Client, 50 | url: String, 51 | token: String, 52 | org: String, 53 | bucket: String, 54 | pub hostname: String, 55 | } 56 | 57 | impl AppMetrics { 58 | pub fn new(url: String, token: String, org: String, bucket: String, hostname: String) -> Self { 59 | let client = reqwest::Client::new(); 60 | AppMetrics { 61 | client, 62 | url, 63 | token: format!("Token {}", token), 64 | org, 65 | bucket, 66 | hostname, 67 | } 68 | } 69 | 70 | pub async fn send_data_to_influxdb( 71 | &self, 72 | data: String, 73 | ) -> Result<(), AppMetricsError> { 74 | match self.client.post(format!( 75 | "{}/api/v2/write?org={}&bucket={}&precision=ns", 76 | self.url, 77 | self.org, 78 | self.bucket 79 | )).header("Authorization", self.token.clone()) 80 | .body(data) 81 | .send() 82 | .await { 83 | Ok(res) => { 84 | let status = res.status(); 85 | if !res.status().is_success() { 86 | let error_body = res.text().await.unwrap_or_else(|_| "Failed to get error body".to_string()); 87 | Err(AppMetricsError::FailedToSendMetrics(format!("Status Code: {}.\nError: {}", status, error_body))) 88 | } else { 89 | Ok(()) 90 | } 91 | }, 92 | Err(e) => { 93 | tracing::error!(target: "server_log", "Failed to send metrics data to influxdb.\nError: {:?}", e); 94 | Err(AppMetricsError::FailedToSendMetrics(format!("{:?}", e))) 95 | } 96 | } 97 | 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/app_rr_database.rs: -------------------------------------------------------------------------------- 1 | use deadpool_diesel::mysql::{Manager, Pool}; 2 | use diesel::{sql_types::Text, MysqlConnection, RunQueryDsl}; 3 | use tracing::error; 4 | 5 | use crate::{ 6 | app_database::AppDatabaseError, models, ChallengeWithDifficulty, Submission, 7 | SubmissionWithPubkey, Txn, 8 | }; 9 | 10 | pub struct AppRRDatabase { 11 | connection_pool: Pool, 12 | } 13 | 14 | impl AppRRDatabase { 15 | pub fn new(url: String) -> Self { 16 | let manager = Manager::new(url, deadpool_diesel::Runtime::Tokio1); 17 | 18 | let pool = Pool::builder(manager).build().unwrap(); 19 | 20 | AppRRDatabase { 21 | connection_pool: pool, 22 | } 23 | } 24 | 25 | pub async fn get_miner_rewards( 26 | &self, 27 | miner_pubkey: String, 28 | ) -> Result { 29 | if let Ok(db_conn) = self.connection_pool.get().await { 30 | let res = db_conn.interact(move |conn: &mut MysqlConnection| { 31 | diesel::sql_query("SELECT r.id, r.balance, r.miner_id FROM miners m JOIN rewards r ON m.id = r.miner_id WHERE m.pubkey = ?") 32 | .bind::(miner_pubkey) 33 | .get_result::(conn) 34 | }).await; 35 | 36 | match res { 37 | Ok(interaction) => match interaction { 38 | Ok(query) => { 39 | return Ok(query); 40 | } 41 | Err(e) => { 42 | error!("get_miner_rewards: {:?}", e); 43 | return Err(AppDatabaseError::QueryFailed); 44 | } 45 | }, 46 | Err(e) => { 47 | error!("{:?}", e); 48 | return Err(AppDatabaseError::InteractionFailed); 49 | } 50 | } 51 | } else { 52 | return Err(AppDatabaseError::FailedToGetConnectionFromPool); 53 | }; 54 | } 55 | 56 | pub async fn get_last_challenge_submissions( 57 | &self, 58 | ) -> Result, AppDatabaseError> { 59 | if let Ok(db_conn) = self.connection_pool.get().await { 60 | let res = db_conn 61 | .interact(move |conn: &mut MysqlConnection| { 62 | 63 | diesel::sql_query("SELECT s.*, m.pubkey FROM submissions_2 s JOIN miners m ON s.miner_id = m.id JOIN challenges c ON s.challenge_id = c.id WHERE c.id = (SELECT id from challenges WHERE rewards_earned IS NOT NULL ORDER BY id DESC LIMIT 1)") 64 | .load::(conn) 65 | }) 66 | .await; 67 | 68 | match res { 69 | Ok(interaction) => match interaction { 70 | Ok(query) => { 71 | return Ok(query); 72 | } 73 | Err(e) => { 74 | error!("{:?}", e); 75 | return Err(AppDatabaseError::QueryFailed); 76 | } 77 | }, 78 | Err(e) => { 79 | error!("{:?}", e); 80 | return Err(AppDatabaseError::InteractionFailed); 81 | } 82 | } 83 | } else { 84 | return Err(AppDatabaseError::FailedToGetConnectionFromPool); 85 | }; 86 | } 87 | 88 | pub async fn get_miner_submissions( 89 | &self, 90 | pubkey: String, 91 | ) -> Result, AppDatabaseError> { 92 | if let Ok(db_conn) = self.connection_pool.get().await { 93 | let res = db_conn 94 | .interact(move |conn: &mut MysqlConnection| { 95 | diesel::sql_query("SELECT s.* FROM submissions_2 s JOIN miners m ON s.miner_id = m.id WHERE m.pubkey = ? ORDER BY s.id DESC LIMIT 100") 96 | .bind::(pubkey) 97 | .load::(conn) 98 | }) 99 | .await; 100 | 101 | match res { 102 | Ok(interaction) => match interaction { 103 | Ok(query) => { 104 | return Ok(query); 105 | } 106 | Err(e) => { 107 | error!("{:?}", e); 108 | return Err(AppDatabaseError::QueryFailed); 109 | } 110 | }, 111 | Err(e) => { 112 | error!("{:?}", e); 113 | return Err(AppDatabaseError::InteractionFailed); 114 | } 115 | } 116 | } else { 117 | return Err(AppDatabaseError::FailedToGetConnectionFromPool); 118 | }; 119 | } 120 | 121 | pub async fn get_challenges(&self) -> Result, AppDatabaseError> { 122 | if let Ok(db_conn) = self.connection_pool.get().await { 123 | let res = db_conn 124 | .interact(move |conn: &mut MysqlConnection| { 125 | 126 | diesel::sql_query("SELECT c.id, c.rewards_earned, c.updated_at, s.difficulty FROM challenges c JOIN submissions_2 s ON c.submission_id = s.id WHERE c.submission_id IS NOT NULL ORDER BY c.id DESC LIMIT 1440") 127 | .load::(conn) 128 | }) 129 | .await; 130 | 131 | match res { 132 | Ok(interaction) => match interaction { 133 | Ok(query) => { 134 | return Ok(query); 135 | } 136 | Err(e) => { 137 | error!("{:?}", e); 138 | return Err(AppDatabaseError::QueryFailed); 139 | } 140 | }, 141 | Err(e) => { 142 | error!("{:?}", e); 143 | return Err(AppDatabaseError::InteractionFailed); 144 | } 145 | } 146 | } else { 147 | return Err(AppDatabaseError::FailedToGetConnectionFromPool); 148 | }; 149 | } 150 | 151 | pub async fn get_pool_by_authority_pubkey( 152 | &self, 153 | pool_pubkey: String, 154 | ) -> Result { 155 | if let Ok(db_conn) = self.connection_pool.get().await { 156 | let res = db_conn.interact(move |conn: &mut MysqlConnection| { 157 | diesel::sql_query("SELECT id, proof_pubkey, authority_pubkey, total_rewards, claimed_rewards FROM pools WHERE pools.authority_pubkey = ?") 158 | .bind::(pool_pubkey) 159 | .get_result::(conn) 160 | }).await; 161 | 162 | match res { 163 | Ok(interaction) => match interaction { 164 | Ok(query) => { 165 | return Ok(query); 166 | } 167 | Err(e) => { 168 | error!("{:?}", e); 169 | return Err(AppDatabaseError::QueryFailed); 170 | } 171 | }, 172 | Err(e) => { 173 | error!("{:?}", e); 174 | return Err(AppDatabaseError::InteractionFailed); 175 | } 176 | } 177 | } else { 178 | return Err(AppDatabaseError::FailedToGetConnectionFromPool); 179 | }; 180 | } 181 | 182 | pub async fn get_latest_mine_txn(&self) -> Result { 183 | if let Ok(db_conn) = self.connection_pool.get().await { 184 | let res = db_conn 185 | .interact(move |conn: &mut MysqlConnection| { 186 | diesel::sql_query( 187 | "SELECT * FROM txns WHERE txn_type = ? ORDER BY id DESC LIMIT 1", 188 | ) 189 | .bind::("mine") 190 | .get_result::(conn) 191 | }) 192 | .await; 193 | 194 | match res { 195 | Ok(interaction) => match interaction { 196 | Ok(query) => { 197 | return Ok(query); 198 | } 199 | Err(e) => { 200 | error!("{:?}", e); 201 | return Err(AppDatabaseError::QueryFailed); 202 | } 203 | }, 204 | Err(e) => { 205 | error!("{:?}", e); 206 | return Err(AppDatabaseError::InteractionFailed); 207 | } 208 | } 209 | } else { 210 | return Err(AppDatabaseError::FailedToGetConnectionFromPool); 211 | }; 212 | } 213 | 214 | pub async fn get_last_claim_by_pubkey( 215 | &self, 216 | pubkey: String, 217 | ) -> Result { 218 | if let Ok(db_conn) = self.connection_pool.get().await { 219 | let res = db_conn 220 | .interact(move |conn: &mut MysqlConnection| { 221 | diesel::sql_query("SELECT c.created_at FROM claims c JOIN miners m ON c.miner_id = m.id WHERE m.pubkey = ? ORDER BY c.id DESC LIMIT 1") 222 | .bind::(pubkey) 223 | .get_result::(conn) 224 | }) 225 | .await; 226 | 227 | match res { 228 | Ok(interaction) => match interaction { 229 | Ok(query) => { 230 | return Ok(query); 231 | } 232 | Err(e) => { 233 | error!(target: "server_log", "{:?}", e); 234 | return Err(AppDatabaseError::QueryFailed); 235 | } 236 | }, 237 | Err(e) => { 238 | error!(target: "server_log", "{:?}", e); 239 | return Err(AppDatabaseError::InteractionFailed); 240 | } 241 | } 242 | } else { 243 | return Err(AppDatabaseError::FailedToGetConnectionFromPool); 244 | }; 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /src/global_boost_util.rs: -------------------------------------------------------------------------------- 1 | use ore_api::consts::{CONFIG_ADDRESS, BUS_ADDRESSES}; 2 | use ore_api::state::{Proof, Config}; 3 | use solana_client::nonblocking::rpc_client::RpcClient; 4 | use solana_sdk::account::ReadableAccount; 5 | use solana_sdk::pubkey::Pubkey; 6 | use steel::AccountDeserialize; 7 | 8 | use crate::ore_utils::{get_proof_pda, proof_pubkey}; 9 | 10 | pub async fn get_proof(client: &RpcClient, authority: Pubkey) -> Result { 11 | let proof_address = crate::ore_utils::get_proof_pda(authority); 12 | let data = client.get_account_data(&proof_address).await; 13 | match data { 14 | Ok(data) => { 15 | let proof = Proof::try_from_bytes(&data); 16 | if let Ok(proof) = proof { 17 | return Ok(*proof); 18 | } else { 19 | return Err("Failed to parse proof account".to_string()); 20 | } 21 | } 22 | Err(_) => return Err("Failed to get proof account".to_string()), 23 | } 24 | } 25 | 26 | pub async fn get_proof_and_config_with_busses( 27 | client: &RpcClient, 28 | authority: Pubkey, 29 | ) -> ( 30 | Result, 31 | Result, 32 | Result>, ()>, 33 | ) { 34 | let account_pubkeys = vec![ 35 | get_proof_pda(authority), 36 | CONFIG_ADDRESS, 37 | BUS_ADDRESSES[0], 38 | BUS_ADDRESSES[1], 39 | BUS_ADDRESSES[2], 40 | BUS_ADDRESSES[3], 41 | BUS_ADDRESSES[4], 42 | BUS_ADDRESSES[5], 43 | BUS_ADDRESSES[6], 44 | BUS_ADDRESSES[7], 45 | ]; 46 | let datas = client.get_multiple_accounts(&account_pubkeys).await; 47 | if let Ok(datas) = datas { 48 | let proof = if let Some(data) = &datas[0] { 49 | Ok(*Proof::try_from_bytes(data.data()).expect("Failed to parse treasury account")) 50 | } else { 51 | Err(()) 52 | }; 53 | 54 | let treasury_config = if let Some(data) = &datas[1] { 55 | Ok(*ore_api::state::Config::try_from_bytes(data.data()) 56 | .expect("Failed to parse config account")) 57 | } else { 58 | Err(()) 59 | }; 60 | let bus_1 = if let Some(data) = &datas[2] { 61 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 62 | .expect("Failed to parse bus1 account")) 63 | } else { 64 | Err(()) 65 | }; 66 | let bus_2 = if let Some(data) = &datas[3] { 67 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 68 | .expect("Failed to parse bus2 account")) 69 | } else { 70 | Err(()) 71 | }; 72 | let bus_3 = if let Some(data) = &datas[4] { 73 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 74 | .expect("Failed to parse bus3 account")) 75 | } else { 76 | Err(()) 77 | }; 78 | let bus_4 = if let Some(data) = &datas[5] { 79 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 80 | .expect("Failed to parse bus4 account")) 81 | } else { 82 | Err(()) 83 | }; 84 | let bus_5 = if let Some(data) = &datas[6] { 85 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 86 | .expect("Failed to parse bus5 account")) 87 | } else { 88 | Err(()) 89 | }; 90 | let bus_6 = if let Some(data) = &datas[7] { 91 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 92 | .expect("Failed to parse bus6 account")) 93 | } else { 94 | Err(()) 95 | }; 96 | let bus_7 = if let Some(data) = &datas[8] { 97 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 98 | .expect("Failed to parse bus7 account")) 99 | } else { 100 | Err(()) 101 | }; 102 | let bus_8 = if let Some(data) = &datas[9] { 103 | Ok(*ore_api::state::Bus::try_from_bytes(data.data()) 104 | .expect("Failed to parse bus1 account")) 105 | } else { 106 | Err(()) 107 | }; 108 | 109 | ( 110 | proof, 111 | treasury_config, 112 | Ok(vec![bus_1, bus_2, bus_3, bus_4, bus_5, bus_6, bus_7, bus_8]), 113 | ) 114 | } else { 115 | (Err(()), Err(()), Err(())) 116 | } 117 | } 118 | 119 | pub async fn get_original_proof(client: &RpcClient, authority: Pubkey) -> Result { 120 | let proof_address = proof_pubkey(authority); 121 | let data = client.get_account_data(&proof_address).await; 122 | match data { 123 | Ok(data) => { 124 | let proof = Proof::try_from_bytes(&data); 125 | if let Ok(proof) = proof { 126 | return Ok(*proof); 127 | } else { 128 | return Err("Failed to parse proof account".to_string()); 129 | } 130 | } 131 | Err(_) => return Err("Failed to get proof account".to_string()), 132 | } 133 | } 134 | 135 | 136 | pub async fn get_config(client: &RpcClient) -> Result { 137 | let data = client.get_account_data(&CONFIG_ADDRESS).await; 138 | match data { 139 | Ok(data) => { 140 | let config = Config::try_from_bytes(&data); 141 | if let Ok(config) = config { 142 | return Ok(*config); 143 | } else { 144 | return Err("Failed to parse config account".to_string()); 145 | } 146 | } 147 | Err(_) => return Err("Failed to get config account".to_string()), 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/message.rs: -------------------------------------------------------------------------------- 1 | pub struct ServerMessageStartMining { 2 | challenge: [u8; 32], 3 | cutoff: i64, 4 | nonce_range_start: u64, 5 | nonce_range_end: u64, 6 | } 7 | 8 | impl ServerMessageStartMining { 9 | pub fn new( 10 | challenge: [u8; 32], 11 | cutoff: i64, 12 | nonce_range_start: u64, 13 | nonce_range_end: u64, 14 | ) -> Self { 15 | ServerMessageStartMining { 16 | challenge, 17 | cutoff, 18 | nonce_range_start, 19 | nonce_range_end, 20 | } 21 | } 22 | 23 | pub fn to_message_binary(&self) -> Vec { 24 | let mut bin_data = Vec::new(); 25 | bin_data.push(0u8); 26 | bin_data.extend_from_slice(&self.challenge); 27 | bin_data.extend_from_slice(&self.cutoff.to_le_bytes()); 28 | bin_data.extend_from_slice(&self.nonce_range_start.to_le_bytes()); 29 | bin_data.extend_from_slice(&self.nonce_range_end.to_le_bytes()); 30 | 31 | bin_data 32 | } 33 | } 34 | 35 | pub struct ServerMessagePoolSubmissionResult { 36 | difficulty: u32, 37 | total_balance: f64, 38 | total_rewards: f64, 39 | top_stake: f64, 40 | multiplier: f64, 41 | active_miners: u32, 42 | challenge: [u8; 32], 43 | best_nonce: u64, 44 | miner_supplied_difficulty: u32, 45 | miner_earned_rewards: f64, 46 | miner_percentage: f64, 47 | } 48 | 49 | impl ServerMessagePoolSubmissionResult { 50 | pub fn new( 51 | difficulty: u32, 52 | total_balance: f64, 53 | total_rewards: f64, 54 | top_stake: f64, 55 | multiplier: f64, 56 | active_miners: u32, 57 | challenge: [u8; 32], 58 | best_nonce: u64, 59 | miner_supplied_difficulty: u32, 60 | miner_earned_rewards: f64, 61 | miner_percentage: f64, 62 | ) -> Self { 63 | ServerMessagePoolSubmissionResult { 64 | difficulty, 65 | total_balance, 66 | total_rewards, 67 | top_stake, 68 | multiplier, 69 | active_miners, 70 | challenge, 71 | best_nonce, 72 | miner_supplied_difficulty, 73 | miner_earned_rewards, 74 | miner_percentage, 75 | } 76 | } 77 | 78 | pub fn to_message_binary(&self) -> Vec { 79 | let mut bin_data = Vec::new(); 80 | bin_data.push(1u8); 81 | bin_data.extend_from_slice(&self.difficulty.to_le_bytes()); 82 | bin_data.extend_from_slice(&self.total_balance.to_le_bytes()); 83 | bin_data.extend_from_slice(&self.total_rewards.to_le_bytes()); 84 | bin_data.extend_from_slice(&self.top_stake.to_le_bytes()); 85 | bin_data.extend_from_slice(&self.multiplier.to_le_bytes()); 86 | bin_data.extend_from_slice(&self.active_miners.to_le_bytes()); 87 | bin_data.extend_from_slice(&self.challenge); 88 | bin_data.extend_from_slice(&self.best_nonce.to_le_bytes()); 89 | bin_data.extend_from_slice(&self.miner_supplied_difficulty.to_le_bytes()); 90 | bin_data.extend_from_slice(&self.miner_earned_rewards.to_le_bytes()); 91 | bin_data.extend_from_slice(&self.miner_percentage.to_le_bytes()); 92 | 93 | bin_data 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/models.rs: -------------------------------------------------------------------------------- 1 | use chrono::NaiveDateTime; 2 | use diesel::prelude::*; 3 | use diesel::sql_types::{BigInt, Integer, Nullable, Text, Timestamp, TinyInt, Unsigned}; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 7 | #[diesel(table_name = crate::schema::challenges)] 8 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 9 | pub struct Challenge { 10 | pub id: i32, 11 | pub pool_id: i32, 12 | pub submission_id: Option, 13 | pub challenge: Vec, 14 | pub rewards_earned: Option, 15 | } 16 | 17 | #[derive(Debug, Clone, Deserialize, Serialize, QueryableByName)] 18 | pub struct ChallengeWithDifficulty { 19 | #[diesel(sql_type = Integer)] 20 | pub id: i32, 21 | #[diesel(sql_type = Nullable>)] 22 | pub rewards_earned: Option, 23 | #[diesel(sql_type = TinyInt)] 24 | pub difficulty: i8, 25 | #[diesel(sql_type = Timestamp)] 26 | pub updated_at: NaiveDateTime, 27 | } 28 | 29 | #[derive(Debug, Clone, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 30 | #[diesel(table_name = crate::schema::challenges)] 31 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 32 | pub struct InsertChallenge { 33 | pub pool_id: i32, 34 | pub challenge: Vec, 35 | pub rewards_earned: Option, 36 | } 37 | 38 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 39 | #[diesel(table_name = crate::schema::challenges)] 40 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 41 | pub struct UpdateChallengeRewards { 42 | pub rewards_earned: Option, 43 | } 44 | 45 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 46 | #[diesel(table_name = crate::schema::claims)] 47 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 48 | pub struct Claim { 49 | pub miner_id: i32, 50 | pub pool_id: i32, 51 | pub txn_id: i32, 52 | pub amount: u64, 53 | } 54 | 55 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 56 | #[diesel(table_name = crate::schema::claims)] 57 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 58 | pub struct LastClaim { 59 | pub created_at: NaiveDateTime, 60 | } 61 | 62 | #[derive(Debug, Copy, Clone, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 63 | #[diesel(table_name = crate::schema::claims)] 64 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 65 | pub struct InsertClaim { 66 | pub miner_id: i32, 67 | pub pool_id: i32, 68 | pub txn_id: i32, 69 | pub amount: u64, 70 | } 71 | 72 | #[derive(Debug, Clone, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 73 | #[diesel(table_name = crate::schema::miners)] 74 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 75 | pub struct Miner { 76 | pub id: i32, 77 | pub pubkey: String, 78 | pub enabled: bool, 79 | } 80 | 81 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 82 | #[diesel(table_name = crate::schema::pools)] 83 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 84 | pub struct Pool { 85 | pub id: i32, 86 | pub proof_pubkey: String, 87 | pub authority_pubkey: String, 88 | pub total_rewards: u64, 89 | pub claimed_rewards: u64, 90 | } 91 | 92 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 93 | #[diesel(table_name = crate::schema::submissions_2)] 94 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 95 | pub struct Submission { 96 | pub id: i32, 97 | pub miner_id: i32, 98 | pub challenge_id: i32, 99 | pub nonce: u64, 100 | pub difficulty: i8, 101 | pub created_at: NaiveDateTime, 102 | } 103 | #[derive(Debug, Clone, Deserialize, Serialize, QueryableByName)] 104 | pub struct SubmissionWithPubkey { 105 | #[diesel(sql_type = BigInt)] 106 | pub id: i64, 107 | #[diesel(sql_type = Integer)] 108 | pub miner_id: i32, 109 | #[diesel(sql_type = Integer)] 110 | pub challenge_id: i32, 111 | #[diesel(sql_type = Unsigned)] 112 | pub nonce: u64, 113 | #[diesel(sql_type = TinyInt)] 114 | pub difficulty: i8, 115 | #[diesel(sql_type = Timestamp)] 116 | pub created_at: NaiveDateTime, 117 | #[diesel(sql_type = Text)] 118 | pub pubkey: String, 119 | } 120 | 121 | #[derive( 122 | Debug, Clone, Serialize, Deserialize, Insertable, Queryable, Selectable, QueryableByName, 123 | )] 124 | #[diesel(table_name = crate::schema::submissions_2)] 125 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 126 | pub struct InsertSubmission { 127 | pub miner_id: i32, 128 | pub challenge_id: i32, 129 | pub nonce: u64, 130 | pub difficulty: i8, 131 | } 132 | 133 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 134 | #[diesel(table_name = crate::schema::submissions_2)] 135 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 136 | pub struct SubmissionWithId { 137 | pub id: i32, 138 | } 139 | 140 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 141 | #[diesel(table_name = crate::schema::txns)] 142 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 143 | pub struct Txn { 144 | pub id: i32, 145 | pub txn_type: String, 146 | pub signature: String, 147 | pub priority_fee: u32, 148 | pub created_at: NaiveDateTime, 149 | } 150 | 151 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 152 | #[diesel(table_name = crate::schema::txns)] 153 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 154 | pub struct TxnId { 155 | pub id: i32, 156 | } 157 | 158 | #[derive(Debug, Clone, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 159 | #[diesel(table_name = crate::schema::txns)] 160 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 161 | pub struct InsertTxn { 162 | pub txn_type: String, 163 | pub signature: String, 164 | pub priority_fee: u32, 165 | } 166 | 167 | #[derive(Debug, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 168 | #[diesel(table_name = crate::schema::rewards)] 169 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 170 | pub struct InsertReward { 171 | pub miner_id: i32, 172 | pub pool_id: i32, 173 | } 174 | 175 | #[derive(Debug, Clone, Copy, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 176 | #[diesel(table_name = crate::schema::rewards)] 177 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 178 | pub struct UpdateReward { 179 | pub miner_id: i32, 180 | pub balance: u64, 181 | } 182 | 183 | #[derive(Debug, Serialize, Clone, Copy, Deserialize, Queryable, Selectable, QueryableByName)] 184 | #[diesel(table_name = crate::schema::rewards)] 185 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 186 | pub struct Reward { 187 | pub id: i32, 188 | pub balance: u64, 189 | pub miner_id: i32, 190 | } 191 | 192 | #[derive(Debug, Clone, Deserialize, Insertable)] 193 | #[diesel(table_name = crate::schema::stake_accounts)] 194 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 195 | pub struct InsertStakeAccount { 196 | pub pool_id: i32, 197 | pub mint_pubkey: String, 198 | pub staker_pubkey: String, 199 | pub stake_pda: String, 200 | pub staked_balance: u64, 201 | } 202 | 203 | #[derive(Debug, Clone, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 204 | #[diesel(table_name = crate::schema::stake_accounts)] 205 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 206 | pub struct StakeAccount { 207 | pub id: i32, 208 | pub pool_id: i32, 209 | pub mint_pubkey: String, 210 | pub staker_pubkey: String, 211 | pub stake_pda: String, 212 | pub rewards_balance: u64, 213 | pub staked_balance: u64, 214 | } 215 | 216 | #[derive(Debug, Clone, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 217 | #[diesel(table_name = crate::schema::stake_accounts)] 218 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 219 | pub struct UpdateStakeAccount { 220 | pub stake_pda: String, 221 | pub staked_balance: u64, 222 | } 223 | 224 | #[derive(Debug, Clone, Serialize, Deserialize, Queryable, Selectable, QueryableByName)] 225 | #[diesel(table_name = crate::schema::stake_accounts)] 226 | #[diesel(check_for_backend(diesel::mysql::Mysql))] 227 | pub struct UpdateStakeAccountRewards { 228 | pub stake_pda: String, 229 | pub rewards_balance: u64, 230 | } 231 | -------------------------------------------------------------------------------- /src/ore_utils.rs: -------------------------------------------------------------------------------- 1 | use std::{str::FromStr, time::{SystemTime, UNIX_EPOCH}}; 2 | 3 | use bytemuck::{Pod, Zeroable}; 4 | use drillx::Solution; 5 | use ore_api::{ 6 | consts::{BUS_ADDRESSES, CONFIG_ADDRESS, MINT_ADDRESS, PROOF, TOKEN_DECIMALS}, state::{Config, Proof}, ID as ORE_ID 7 | }; 8 | use ore_boost_api::state::{boost_pda, stake_pda}; 9 | use ore_miner_delegation::{instruction, state::{DelegatedBoost, DelegatedBoostV2, DelegatedStake}, utils::AccountDeserializeV1, pda::managed_proof_pda}; 10 | use solana_client::nonblocking::rpc_client::RpcClient; 11 | use solana_sdk::{account::ReadableAccount, instruction::Instruction, pubkey::Pubkey}; 12 | use spl_associated_token_account::get_associated_token_address; 13 | use steel::event; 14 | 15 | pub const ORE_TOKEN_DECIMALS: u8 = TOKEN_DECIMALS; 16 | 17 | #[repr(C)] 18 | #[derive(Clone, Copy, Debug, PartialEq, Pod, Zeroable)] 19 | pub struct MineEventWithBoosts { 20 | pub balance: u64, 21 | pub difficulty: u64, 22 | pub last_hash_at: i64, 23 | pub timing: i64, 24 | pub reward: u64, 25 | pub boost_1: u64, 26 | pub boost_2: u64, 27 | pub boost_3: u64, 28 | } 29 | 30 | event!(MineEventWithBoosts); 31 | 32 | #[repr(C)] 33 | #[derive(Clone, Copy, Debug, PartialEq, Pod, Zeroable)] 34 | pub struct MineEventWithGlobalBoosts { 35 | pub balance: u64, 36 | pub difficulty: u64, 37 | pub last_hash_at: i64, 38 | pub timing: i64, 39 | pub net_reward: u64, 40 | pub net_base_reward: u64, 41 | pub net_miner_boost_reward: u64, 42 | pub net_staker_boost_reward: u64, 43 | } 44 | 45 | event!(MineEventWithGlobalBoosts); 46 | 47 | pub fn get_auth_ix(signer: Pubkey) -> Instruction { 48 | let proof = get_proof_pda(signer); 49 | 50 | ore_api::prelude::auth(proof) 51 | } 52 | 53 | pub fn get_mine_with_global_boost_ix(signer: Pubkey, solution: Solution, bus: usize) -> Instruction { 54 | instruction::mine_with_boost(signer, BUS_ADDRESSES[bus], solution) 55 | } 56 | 57 | pub fn get_rotate_ix(signer: Pubkey) -> Instruction { 58 | instruction::rotate_global_boost(signer) 59 | } 60 | 61 | pub fn get_register_ix(signer: Pubkey) -> Instruction { 62 | instruction::open_managed_proof(signer) 63 | } 64 | 65 | pub fn get_reset_ix(signer: Pubkey) -> Instruction { 66 | ore_api::prelude::reset(signer) 67 | } 68 | 69 | pub fn get_claim_ix(signer: Pubkey, beneficiary: Pubkey, claim_amount: u64) -> Instruction { 70 | instruction::undelegate_stake(signer, signer, beneficiary, claim_amount) 71 | } 72 | 73 | pub fn get_stake_ix(signer: Pubkey, sender: Pubkey, stake_amount: u64) -> Instruction { 74 | instruction::delegate_stake(sender, signer, stake_amount) 75 | } 76 | 77 | pub fn get_ore_mint() -> Pubkey { 78 | MINT_ADDRESS 79 | } 80 | 81 | pub fn get_managed_proof_token_ata(miner: Pubkey) -> Pubkey { 82 | let managed_proof = Pubkey::find_program_address( 83 | &[b"managed-proof-account", miner.as_ref()], 84 | &ore_miner_delegation::id(), 85 | ); 86 | 87 | get_associated_token_address(&managed_proof.0, &ore_api::consts::MINT_ADDRESS) 88 | } 89 | 90 | pub fn get_proof_pda(miner: Pubkey) -> Pubkey { 91 | let managed_proof = Pubkey::find_program_address( 92 | &[b"managed-proof-account", miner.as_ref()], 93 | &ore_miner_delegation::id(), 94 | ); 95 | 96 | proof_pubkey(managed_proof.0) 97 | } 98 | 99 | pub async fn get_delegated_stake_account( 100 | client: &RpcClient, 101 | staker: Pubkey, 102 | miner: Pubkey, 103 | ) -> Result { 104 | let data = client 105 | .get_account_data(&get_delegated_stake_pda(staker, miner)) 106 | .await; 107 | match data { 108 | Ok(data) => { 109 | let delegated_stake = DelegatedStake::try_from_bytes(&data); 110 | if let Ok(delegated_stake) = delegated_stake { 111 | return Ok(*delegated_stake); 112 | } else { 113 | return Err("Failed to parse delegated stake account".to_string()); 114 | } 115 | } 116 | Err(_) => return Err("Failed to get delegated stake account".to_string()), 117 | } 118 | } 119 | 120 | pub async fn get_delegated_boost_account( 121 | client: &RpcClient, 122 | staker: Pubkey, 123 | miner: Pubkey, 124 | mint: Pubkey, 125 | ) -> Result { 126 | let data = client 127 | .get_account_data(&get_delegated_boost_pda(staker, miner, mint)) 128 | .await; 129 | match data { 130 | Ok(data) => { 131 | let delegated_boost = DelegatedBoost::try_from_bytes(&data); 132 | if let Ok(delegated_boost) = delegated_boost { 133 | return Ok(*delegated_boost); 134 | } else { 135 | return Err("Failed to parse delegated boost account".to_string()); 136 | } 137 | } 138 | Err(_) => return Err("Failed to get delegated boost account".to_string()), 139 | } 140 | } 141 | 142 | pub async fn get_delegated_boost_account_v2( 143 | client: &RpcClient, 144 | staker: Pubkey, 145 | miner: Pubkey, 146 | mint: Pubkey, 147 | ) -> Result { 148 | let data = client 149 | .get_account_data(&get_delegated_boost_v2_pda(staker, miner, mint)) 150 | .await; 151 | match data { 152 | Ok(data) => { 153 | let delegated_boost = DelegatedBoostV2::try_from_bytes(&data); 154 | if let Ok(delegated_boost) = delegated_boost { 155 | return Ok(*delegated_boost); 156 | } else { 157 | return Err("Failed to parse delegated boost v2 account".to_string()); 158 | } 159 | } 160 | Err(_) => return Err("Failed to get delegated boost v2 account".to_string()), 161 | } 162 | } 163 | 164 | pub fn get_delegated_stake_pda(staker: Pubkey, miner: Pubkey) -> Pubkey { 165 | let managed_proof = Pubkey::find_program_address( 166 | &[b"managed-proof-account", miner.as_ref()], 167 | &ore_miner_delegation::id(), 168 | ); 169 | 170 | Pubkey::find_program_address( 171 | &[ 172 | b"delegated-stake", 173 | staker.as_ref(), 174 | managed_proof.0.as_ref(), 175 | ], 176 | &ore_miner_delegation::id(), 177 | ) 178 | .0 179 | } 180 | 181 | pub fn get_delegated_boost_pda(staker: Pubkey, miner: Pubkey, mint: Pubkey) -> Pubkey { 182 | let managed_proof = Pubkey::find_program_address( 183 | &[b"managed-proof-account", miner.as_ref()], 184 | &ore_miner_delegation::id(), 185 | ); 186 | 187 | Pubkey::find_program_address( 188 | &[ 189 | ore_miner_delegation::consts::DELEGATED_BOOST, 190 | staker.as_ref(), 191 | mint.as_ref(), 192 | managed_proof.0.as_ref(), 193 | ], 194 | &ore_miner_delegation::id(), 195 | ) 196 | .0 197 | } 198 | 199 | pub fn get_delegated_boost_v2_pda(staker: Pubkey, miner: Pubkey, mint: Pubkey) -> Pubkey { 200 | let managed_proof = Pubkey::find_program_address( 201 | &[b"managed-proof-account", miner.as_ref()], 202 | &ore_miner_delegation::id(), 203 | ); 204 | 205 | Pubkey::find_program_address( 206 | &[ 207 | ore_miner_delegation::consts::DELEGATED_BOOST_V2, 208 | staker.as_ref(), 209 | mint.as_ref(), 210 | managed_proof.0.as_ref(), 211 | ], 212 | &ore_miner_delegation::id(), 213 | ) 214 | .0 215 | } 216 | 217 | 218 | 219 | 220 | pub async fn get_pool_boost_stake(rpc_client: &RpcClient, authority: Pubkey) -> Vec { 221 | let managed_proof = Pubkey::find_program_address( 222 | &[b"managed-proof-account", authority.as_ref()], 223 | &ore_miner_delegation::id(), 224 | ); 225 | 226 | let boost_mints = vec![ 227 | Pubkey::from_str("oreoU2P8bN6jkk3jbaiVxYnG1dCXcYxwhwyK9jSybcp").unwrap(), 228 | Pubkey::from_str("DrSS5RM7zUd9qjUEdDaf31vnDUSbCrMto6mjqTrHFifN").unwrap(), 229 | Pubkey::from_str("meUwDp23AaxhiNKaQCyJ2EAF2T4oe1gSkEkGXSRVdZb").unwrap() 230 | ]; 231 | 232 | // Get pools boost stake accounts 233 | let mut boost_stake_acct_pdas = vec![]; 234 | 235 | for boost_mint in boost_mints { 236 | let boost_account_pda = boost_pda(boost_mint); 237 | let boost_stake_pda = stake_pda(managed_proof.0, boost_account_pda.0); 238 | boost_stake_acct_pdas.push(boost_stake_pda.0); 239 | } 240 | 241 | let mut stake_acct = vec![]; 242 | if let Ok(accounts) = rpc_client.get_multiple_accounts(&boost_stake_acct_pdas).await { 243 | for account in accounts { 244 | } 245 | } else { 246 | tracing::error!(target: "server_log", "Failed to get pool boost accounts.") 247 | } 248 | 249 | return stake_acct; 250 | } 251 | 252 | 253 | pub fn proof_pubkey(authority: Pubkey) -> Pubkey { 254 | Pubkey::find_program_address(&[PROOF, authority.as_ref()], &ORE_ID).0 255 | } 256 | 257 | pub fn get_cutoff(proof: Proof, buffer_time: u64) -> i64 { 258 | let now = SystemTime::now() 259 | .duration_since(UNIX_EPOCH) 260 | .expect("Failed to get time") 261 | .as_secs() as i64; 262 | proof 263 | .last_hash_at 264 | .saturating_add(60) 265 | .saturating_sub(buffer_time as i64) 266 | .saturating_sub(now) 267 | } 268 | -------------------------------------------------------------------------------- /src/proof_migration.rs: -------------------------------------------------------------------------------- 1 | use solana_client::nonblocking::rpc_client::RpcClient; 2 | use solana_sdk::{ 3 | commitment_config::{CommitmentConfig, CommitmentLevel}, 4 | compute_budget::ComputeBudgetInstruction, 5 | signature::Keypair, 6 | signer::Signer, 7 | transaction::Transaction, 8 | }; 9 | use spl_associated_token_account::get_associated_token_address; 10 | use tracing::{error, info}; 11 | 12 | pub async fn migrate( 13 | rpc_client: &RpcClient, 14 | wallet: &Keypair, 15 | original_proof_balance: u64, 16 | ore_token_account_balance: u64, 17 | ) -> Result<(), String> { 18 | let miner_ore_token_account_addr = 19 | get_associated_token_address(&wallet.pubkey(), &ore_api::consts::MINT_ADDRESS); 20 | 21 | // Claim from original ore proof 22 | let mut ixs = Vec::new(); 23 | let prio_fee_ix = ComputeBudgetInstruction::set_compute_unit_price(20_000); 24 | ixs.push(prio_fee_ix); 25 | let claim_ix = ore_api::prelude::claim( 26 | wallet.pubkey(), 27 | miner_ore_token_account_addr, 28 | original_proof_balance, 29 | ); 30 | 31 | ixs.push(claim_ix); 32 | let mut tx = Transaction::new_with_payer(&ixs, Some(&wallet.pubkey())); 33 | 34 | let blockhash = rpc_client 35 | .get_latest_blockhash() 36 | .await 37 | .expect("should get latest blockhash"); 38 | 39 | tx.sign(&[&wallet], blockhash); 40 | 41 | info!("Claiming from original proof."); 42 | match rpc_client 43 | .send_and_confirm_transaction_with_spinner_and_commitment( 44 | &tx, 45 | CommitmentConfig { 46 | commitment: CommitmentLevel::Confirmed, 47 | }, 48 | ) 49 | .await 50 | { 51 | Ok(_) => { 52 | info!("Successfully claimed from original proof."); 53 | } 54 | Err(e) => { 55 | error!("Failed to send and confirm tx.\n E: {:?}", e); 56 | return Err("Failed to claim from original proof".to_string()); 57 | } 58 | } 59 | 60 | // Delegate stake to new proof 61 | let mut ixs = Vec::new(); 62 | let prio_fee_ix = ComputeBudgetInstruction::set_compute_unit_price(20_000); 63 | ixs.push(prio_fee_ix); 64 | let stake_ix = crate::ore_utils::get_stake_ix( 65 | wallet.pubkey(), 66 | wallet.pubkey(), 67 | original_proof_balance.saturating_add(ore_token_account_balance), 68 | ); 69 | 70 | ixs.push(stake_ix); 71 | let mut tx = Transaction::new_with_payer(&ixs, Some(&wallet.pubkey())); 72 | 73 | let blockhash = rpc_client 74 | .get_latest_blockhash() 75 | .await 76 | .expect("should get latest blockhash"); 77 | 78 | tx.sign(&[&wallet], blockhash); 79 | 80 | info!("Staking to new proof."); 81 | match rpc_client 82 | .send_and_confirm_transaction_with_spinner_and_commitment( 83 | &tx, 84 | CommitmentConfig { 85 | commitment: CommitmentLevel::Confirmed, 86 | }, 87 | ) 88 | .await 89 | { 90 | Ok(_) => { 91 | info!("Successfully staked to new proof."); 92 | } 93 | Err(e) => { 94 | error!("Failed to send and confirm tx.\n E: {:?}", e); 95 | return Err("Failed to stake to new proof".to_string()); 96 | } 97 | } 98 | 99 | Ok(()) 100 | } 101 | -------------------------------------------------------------------------------- /src/routes.rs: -------------------------------------------------------------------------------- 1 | use app_rr_database::AppRRDatabase; 2 | use axum::{ 3 | http::{Response, StatusCode}, 4 | response::IntoResponse, 5 | Extension, Json, 6 | }; 7 | use solana_client::nonblocking::rpc_client::RpcClient; 8 | use solana_sdk::pubkey::Pubkey; 9 | use spl_associated_token_account::get_associated_token_address; 10 | use tokio::{sync::{mpsc::UnboundedSender, RwLock}, time::Instant}; 11 | use tracing::error; 12 | 13 | use crate::{ 14 | app_metrics::{AppMetricsEvent, MetricsRouteEventData}, app_rr_database, ore_utils::{get_ore_mint}, ChallengeWithDifficulty, ChallengesCache, Config, Txn, global_boost_util::get_proof 15 | }; 16 | use std::{str::FromStr, sync::Arc, time::{SystemTime, UNIX_EPOCH}}; 17 | 18 | pub async fn get_challenges( 19 | Extension(app_config): Extension>, 20 | Extension(app_cache_challenges): Extension>>, 21 | Extension(app_metrics_channel): Extension>, 22 | ) -> Result>, String> { 23 | let metrics_start = SystemTime::now() 24 | .duration_since(UNIX_EPOCH) 25 | .expect("Time went backwards") 26 | .as_millis(); 27 | if app_config.stats_enabled { 28 | let reader = app_cache_challenges.read().await; 29 | let cached_challenges = reader.clone(); 30 | drop(reader); 31 | let metrics_end = SystemTime::now() 32 | .duration_since(UNIX_EPOCH) 33 | .expect("Time went backwards") 34 | .as_millis(); 35 | 36 | let metrics_data = MetricsRouteEventData { 37 | route: "challenges".to_string(), 38 | method: "GET".to_string(), 39 | status_code: 200, 40 | request: metrics_start, 41 | response: metrics_end, 42 | latency: metrics_end - metrics_start, 43 | ts_ns: metrics_end, 44 | 45 | }; 46 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 47 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 48 | }; 49 | return Ok(Json(cached_challenges.item)); 50 | } else { 51 | let metrics_end = SystemTime::now() 52 | .duration_since(UNIX_EPOCH) 53 | .expect("Time went backwards") 54 | .as_millis(); 55 | 56 | let metrics_data = MetricsRouteEventData { 57 | route: "challenges".to_string(), 58 | method: "GET".to_string(), 59 | status_code: 400, 60 | request: metrics_start, 61 | response: metrics_end, 62 | latency: metrics_end - metrics_start, 63 | ts_ns: metrics_end, 64 | 65 | }; 66 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 67 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 68 | }; 69 | return Err("Stats not enabled for this server.".to_string()); 70 | } 71 | } 72 | 73 | pub async fn get_latest_mine_txn( 74 | Extension(app_rr_database): Extension>, 75 | Extension(app_config): Extension>, 76 | Extension(app_metrics_channel): Extension>, 77 | ) -> Result, String> { 78 | let metrics_start = SystemTime::now() 79 | .duration_since(UNIX_EPOCH) 80 | .expect("Time went backwards") 81 | .as_millis(); 82 | 83 | if app_config.stats_enabled { 84 | let res = app_rr_database.get_latest_mine_txn().await; 85 | 86 | match res { 87 | Ok(txn) => { 88 | let metrics_end = SystemTime::now() 89 | .duration_since(UNIX_EPOCH) 90 | .expect("Time went backwards") 91 | .as_millis(); 92 | 93 | let metrics_data = MetricsRouteEventData { 94 | route: "txns/latest-mine".to_string(), 95 | method: "GET".to_string(), 96 | status_code: 400, 97 | request: metrics_start, 98 | response: metrics_end, 99 | latency: metrics_end - metrics_start, 100 | ts_ns: metrics_end, 101 | 102 | }; 103 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 104 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 105 | }; 106 | Ok(Json(txn)) 107 | }, 108 | Err(_) => { 109 | let metrics_end = SystemTime::now() 110 | .duration_since(UNIX_EPOCH) 111 | .expect("Time went backwards") 112 | .as_millis(); 113 | 114 | let metrics_data = MetricsRouteEventData { 115 | route: "txns/latest-mine".to_string(), 116 | method: "GET".to_string(), 117 | status_code: 500, 118 | request: metrics_start, 119 | response: metrics_end, 120 | latency: metrics_end - metrics_start, 121 | ts_ns: metrics_end, 122 | 123 | }; 124 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 125 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 126 | }; 127 | Err("Failed to get latest mine txn".to_string()) 128 | } 129 | } 130 | } else { 131 | let metrics_end = SystemTime::now() 132 | .duration_since(UNIX_EPOCH) 133 | .expect("Time went backwards") 134 | .as_millis(); 135 | 136 | let metrics_data = MetricsRouteEventData { 137 | route: "txns/latest-mine".to_string(), 138 | method: "GET".to_string(), 139 | status_code: 400, 140 | request: metrics_start, 141 | response: metrics_end, 142 | latency: metrics_end - metrics_start, 143 | ts_ns: metrics_end, 144 | 145 | }; 146 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 147 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 148 | }; 149 | return Err("Stats not enabled for this server.".to_string()); 150 | } 151 | } 152 | 153 | pub async fn get_pool( 154 | Extension(app_rr_database): Extension>, 155 | Extension(app_config): Extension>, 156 | ) -> Result, String> { 157 | if app_config.stats_enabled { 158 | let pubkey = Pubkey::from_str("mineXqpDeBeMR8bPQCyy9UneJZbjFywraS3koWZ8SSH").unwrap(); 159 | let res = app_rr_database 160 | .get_pool_by_authority_pubkey(pubkey.to_string()) 161 | .await; 162 | 163 | match res { 164 | Ok(pool) => Ok(Json(pool)), 165 | Err(_) => Err("Failed to get pool data".to_string()), 166 | } 167 | } else { 168 | return Err("Stats not enabled for this server.".to_string()); 169 | } 170 | } 171 | 172 | pub async fn get_pool_staked( 173 | Extension(app_config): Extension>, 174 | Extension(rpc_client): Extension>, 175 | Extension(app_metrics_channel): Extension>, 176 | ) -> impl IntoResponse { 177 | let metrics_start = SystemTime::now() 178 | .duration_since(UNIX_EPOCH) 179 | .expect("Time went backwards") 180 | .as_millis(); 181 | if app_config.stats_enabled { 182 | let pubkey = Pubkey::from_str("mineXqpDeBeMR8bPQCyy9UneJZbjFywraS3koWZ8SSH").unwrap(); 183 | let proof = if let Ok(loaded_proof) = get_proof(&rpc_client, pubkey).await { 184 | loaded_proof 185 | } else { 186 | error!("get_pool_staked: Failed to load proof."); 187 | return Err("Stats not enabled for this server.".to_string()); 188 | }; 189 | 190 | let metrics_end = SystemTime::now() 191 | .duration_since(UNIX_EPOCH) 192 | .expect("Time went backwards") 193 | .as_millis(); 194 | 195 | let metrics_data = MetricsRouteEventData { 196 | route: "pool/staked".to_string(), 197 | method: "GET".to_string(), 198 | status_code: 200, 199 | request: metrics_start, 200 | response: metrics_end, 201 | latency: metrics_end - metrics_start, 202 | ts_ns: metrics_end, 203 | 204 | }; 205 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 206 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 207 | }; 208 | 209 | return Ok(Json(proof.balance)); 210 | } else { 211 | let metrics_end = SystemTime::now() 212 | .duration_since(UNIX_EPOCH) 213 | .expect("Time went backwards") 214 | .as_millis(); 215 | 216 | let metrics_data = MetricsRouteEventData { 217 | route: "pool/staked".to_string(), 218 | method: "GET".to_string(), 219 | status_code: 400, 220 | request: metrics_start, 221 | response: metrics_end, 222 | latency: metrics_end - metrics_start, 223 | ts_ns: metrics_end, 224 | 225 | }; 226 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 227 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 228 | }; 229 | return Err("Stats not enabled for this server.".to_string()); 230 | } 231 | } 232 | 233 | pub async fn get_pool_balance( 234 | Extension(app_config): Extension>, 235 | Extension(rpc_client): Extension>, 236 | Extension(app_metrics_channel): Extension>, 237 | ) -> impl IntoResponse { 238 | let metrics_start = SystemTime::now() 239 | .duration_since(UNIX_EPOCH) 240 | .expect("Time went backwards") 241 | .as_millis(); 242 | if app_config.stats_enabled { 243 | let pubkey = Pubkey::from_str("mineXqpDeBeMR8bPQCyy9UneJZbjFywraS3koWZ8SSH").unwrap(); 244 | let miner_token_account = get_associated_token_address(&pubkey, &get_ore_mint()); 245 | if let Ok(response) = rpc_client 246 | .get_token_account_balance(&miner_token_account) 247 | .await 248 | { 249 | let metrics_end = SystemTime::now() 250 | .duration_since(UNIX_EPOCH) 251 | .expect("Time went backwards") 252 | .as_millis(); 253 | 254 | let metrics_data = MetricsRouteEventData { 255 | route: "pool/balance".to_string(), 256 | method: "GET".to_string(), 257 | status_code: 200, 258 | request: metrics_start, 259 | response: metrics_end, 260 | latency: metrics_end - metrics_start, 261 | ts_ns: metrics_end, 262 | 263 | }; 264 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 265 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 266 | }; 267 | return Response::builder() 268 | .status(StatusCode::OK) 269 | .body(response.ui_amount_string) 270 | .unwrap(); 271 | } else { 272 | let metrics_end = SystemTime::now() 273 | .duration_since(UNIX_EPOCH) 274 | .expect("Time went backwards") 275 | .as_millis(); 276 | 277 | let metrics_data = MetricsRouteEventData { 278 | route: "pool/balance".to_string(), 279 | method: "GET".to_string(), 280 | status_code: 500, 281 | request: metrics_start, 282 | response: metrics_end, 283 | latency: metrics_end - metrics_start, 284 | ts_ns: metrics_end, 285 | 286 | }; 287 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 288 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 289 | }; 290 | return Response::builder() 291 | .status(StatusCode::BAD_REQUEST) 292 | .body("Failed to get token account balance".to_string()) 293 | .unwrap(); 294 | } 295 | } else { 296 | let metrics_end = SystemTime::now() 297 | .duration_since(UNIX_EPOCH) 298 | .expect("Time went backwards") 299 | .as_millis(); 300 | 301 | let metrics_data = MetricsRouteEventData { 302 | route: "pool/balance".to_string(), 303 | method: "GET".to_string(), 304 | status_code: 400, 305 | request: metrics_start, 306 | response: metrics_end, 307 | latency: metrics_end - metrics_start, 308 | ts_ns: metrics_end, 309 | 310 | }; 311 | if let Err(e) = app_metrics_channel.send(AppMetricsEvent::RouteEvent(metrics_data)) { 312 | tracing::error!(target: "server_log", "Failed to send msg down app metrics channel."); 313 | }; 314 | return Response::builder() 315 | .status(StatusCode::SERVICE_UNAVAILABLE) 316 | .body("Stats not available on this server.".to_string()) 317 | .unwrap(); 318 | } 319 | } 320 | -------------------------------------------------------------------------------- /src/schema.rs: -------------------------------------------------------------------------------- 1 | // @generated automatically by Diesel CLI. 2 | 3 | diesel::table! { 4 | challenges (id) { 5 | id -> Integer, 6 | pool_id -> Integer, 7 | submission_id -> Nullable, 8 | #[max_length = 32] 9 | challenge -> Binary, 10 | rewards_earned -> Nullable>, 11 | created_at -> Timestamp, 12 | updated_at -> Timestamp, 13 | } 14 | } 15 | 16 | diesel::table! { 17 | claims (id) { 18 | id -> Integer, 19 | miner_id -> Integer, 20 | pool_id -> Integer, 21 | txn_id -> Integer, 22 | amount -> Unsigned, 23 | created_at -> Timestamp, 24 | updated_at -> Timestamp, 25 | } 26 | } 27 | 28 | diesel::table! { 29 | miners (id) { 30 | id -> Integer, 31 | #[max_length = 44] 32 | pubkey -> Varchar, 33 | enabled -> Bool, 34 | created_at -> Timestamp, 35 | updated_at -> Timestamp, 36 | } 37 | } 38 | 39 | diesel::table! { 40 | pools (id) { 41 | id -> Integer, 42 | #[max_length = 44] 43 | proof_pubkey -> Varchar, 44 | #[max_length = 44] 45 | authority_pubkey -> Varchar, 46 | total_rewards -> Unsigned, 47 | claimed_rewards -> Unsigned, 48 | created_at -> Timestamp, 49 | updated_at -> Timestamp, 50 | } 51 | } 52 | 53 | diesel::table! { 54 | rewards (id) { 55 | id -> Integer, 56 | miner_id -> Integer, 57 | pool_id -> Integer, 58 | balance -> Unsigned, 59 | created_at -> Timestamp, 60 | updated_at -> Timestamp, 61 | } 62 | } 63 | 64 | diesel::table! { 65 | stake_accounts (id) { 66 | id -> Integer, 67 | pool_id -> Integer, 68 | #[max_length = 44] 69 | mint_pubkey -> Varchar, 70 | #[max_length = 44] 71 | staker_pubkey -> Varchar, 72 | #[max_length = 44] 73 | stake_pda -> Varchar, 74 | rewards_balance -> Unsigned, 75 | staked_balance -> Unsigned, 76 | total_rewards_earned -> Unsigned, 77 | created_at -> Timestamp, 78 | updated_at -> Timestamp, 79 | } 80 | } 81 | 82 | diesel::table! { 83 | submissions_2 (id) { 84 | id -> Integer, 85 | miner_id -> Integer, 86 | challenge_id -> Integer, 87 | difficulty -> Tinyint, 88 | nonce -> Unsigned, 89 | created_at -> Timestamp, 90 | updated_at -> Timestamp, 91 | } 92 | } 93 | 94 | diesel::table! { 95 | txns (id) { 96 | id -> Integer, 97 | #[max_length = 15] 98 | txn_type -> Varchar, 99 | #[max_length = 200] 100 | signature -> Varchar, 101 | priority_fee -> Unsigned, 102 | created_at -> Timestamp, 103 | updated_at -> Timestamp, 104 | } 105 | } 106 | 107 | diesel::allow_tables_to_appear_in_same_query!( 108 | challenges, 109 | claims, 110 | miners, 111 | pools, 112 | rewards, 113 | stake_accounts, 114 | submissions_2, 115 | txns, 116 | ); 117 | -------------------------------------------------------------------------------- /src/scripts.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, str::FromStr, sync::Arc, time::Duration}; 2 | 3 | use ore_miner_delegation::{pda::{delegated_boost_pda, managed_proof_pda}, state::DelegatedBoostV2, utils::AccountDeserializeV1}; 4 | use solana_account_decoder::UiAccountEncoding; 5 | use solana_client::{nonblocking::rpc_client::RpcClient, rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, rpc_filter::{Memcmp, RpcFilterType}}; 6 | use solana_sdk::{ 7 | commitment_config::{CommitmentConfig, CommitmentLevel}, pubkey::Pubkey, signature::read_keypair_file, signer::Signer 8 | }; 9 | use tokio::time::Instant; 10 | 11 | use crate::{app_database::AppDatabase, InsertStakeAccount, UpdateStakeAccount}; 12 | 13 | pub async fn update_stake_accounts() -> Result<(), Box> { 14 | println!("Updating stake accounts from on-chain data"); 15 | 16 | // load envs 17 | let wallet_path_str = std::env::var("WALLET_PATH").expect("WALLET_PATH must be set."); 18 | let database_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set."); 19 | let rpc_url = std::env::var("RPC_URL").expect("RPC_URL must be set."); 20 | 21 | let wallet_path = std::path::Path::new(&wallet_path_str); 22 | 23 | if !wallet_path.exists() { 24 | println!("Failed to load wallet at: {}", wallet_path_str); 25 | return Err("Failed to find wallet path.".into()); 26 | } 27 | 28 | let wallet = read_keypair_file(wallet_path) 29 | .expect("Failed to load keypair from file: {wallet_path_str}"); 30 | println!("loaded wallet {}", wallet.pubkey().to_string()); 31 | 32 | let app_database = Arc::new(AppDatabase::new(database_url)); 33 | let rpc_client = RpcClient::new_with_commitment(rpc_url, CommitmentConfig::confirmed()); 34 | 35 | // let pool = match app_database.get_pool_by_authority_pubkey(wallet.pubkey().to_string()).await { 36 | // Ok(p) => { 37 | // p 38 | // }, 39 | // Err(_) => { 40 | // println!("Failed to get pool data from database"); 41 | // return Ok(()); 42 | // } 43 | // }; 44 | 45 | // let boost_mints = vec![ 46 | // Pubkey::from_str("oreoU2P8bN6jkk3jbaiVxYnG1dCXcYxwhwyK9jSybcp").unwrap(), 47 | // Pubkey::from_str("DrSS5RM7zUd9qjUEdDaf31vnDUSbCrMto6mjqTrHFifN").unwrap(), 48 | // Pubkey::from_str("meUwDp23AaxhiNKaQCyJ2EAF2T4oe1gSkEkGXSRVdZb").unwrap() 49 | // ]; 50 | 51 | // println!("Fetching ore stake accounts from db..."); 52 | // let mut ore_stake_accounts = vec![]; 53 | // let mut last_id: i32 = 0; 54 | // loop { 55 | // tokio::time::sleep(Duration::from_millis(400)).await; 56 | // match app_database.get_staker_accounts_for_mint(pool.id, boost_mints[0].to_string(), last_id, 0).await { 57 | // Ok(d) => { 58 | // if d.len() > 0 { 59 | // for ac in d.iter() { 60 | // last_id = ac.id; 61 | // ore_stake_accounts.push(ac.clone()); 62 | // } 63 | // } 64 | 65 | // if d.len() < 500 { 66 | // break; 67 | // } 68 | // }, 69 | // Err(e) => { 70 | // println!("Failed to get staker accounts for stake account updates."); 71 | // println!("Error: {:?}", e); 72 | // } 73 | // }; 74 | // } 75 | 76 | // println!("Got {} ore stake accounts.", ore_stake_accounts.len()); 77 | 78 | // println!("Fetching ore-sol stake accounts from db..."); 79 | // let mut ore_sol_stake_accounts = vec![]; 80 | // let mut last_id: i32 = 0; 81 | // loop { 82 | // tokio::time::sleep(Duration::from_millis(400)).await; 83 | // match app_database.get_staker_accounts_for_mint(pool.id, boost_mints[1].to_string(), last_id, 0).await { 84 | // Ok(d) => { 85 | // if d.len() > 0 { 86 | // for ac in d.iter() { 87 | // last_id = ac.id; 88 | // ore_sol_stake_accounts.push(ac.clone()); 89 | // } 90 | // } 91 | 92 | // if d.len() < 500 { 93 | // break; 94 | // } 95 | // }, 96 | // Err(e) => { 97 | // println!("Failed to get staker accounts for stake account updates."); 98 | // println!("Error: {:?}", e); 99 | // } 100 | // }; 101 | // } 102 | // println!("Got {} ore-sol stake accounts.", ore_sol_stake_accounts.len()); 103 | 104 | // println!("Fetching ore-isc stake accounts from db..."); 105 | // let mut ore_isc_stake_accounts = vec![]; 106 | // let mut last_id: i32 = 0; 107 | // loop { 108 | // tokio::time::sleep(Duration::from_millis(400)).await; 109 | // match app_database.get_staker_accounts_for_mint(pool.id, boost_mints[2].to_string(), last_id, 0).await { 110 | // Ok(d) => { 111 | // if d.len() > 0 { 112 | // for ac in d.iter() { 113 | // last_id = ac.id; 114 | // ore_isc_stake_accounts.push(ac.clone()); 115 | // } 116 | // } 117 | 118 | // if d.len() < 500 { 119 | // break; 120 | // } 121 | // }, 122 | // Err(e) => { 123 | // println!("Failed to get staker accounts for stake account updates."); 124 | // println!("Error: {:?}", e); 125 | // } 126 | // }; 127 | // } 128 | // println!("Got {} ore-isc stake accounts.", ore_isc_stake_accounts.len()); 129 | 130 | 131 | let managed_proof_authority_pda = managed_proof_pda(wallet.pubkey()); 132 | let program_accounts = match rpc_client.get_program_accounts_with_config( 133 | &ore_miner_delegation::id(), 134 | RpcProgramAccountsConfig { 135 | filters: Some(vec![RpcFilterType::DataSize(152), RpcFilterType::Memcmp(Memcmp::new_raw_bytes(16, managed_proof_authority_pda.0.to_bytes().into()))]), 136 | account_config: RpcAccountInfoConfig { 137 | encoding: Some(UiAccountEncoding::Base64), 138 | data_slice: None, 139 | commitment: Some(CommitmentConfig { commitment: CommitmentLevel::Finalized}), 140 | min_context_slot: None, 141 | }, 142 | with_context: None, 143 | sort_results: None, 144 | } 145 | ).await { 146 | Ok(pa) => { 147 | pa 148 | }, 149 | Err(e) => { 150 | println!("Failed to get program_accounts. Error: {:?}", e); 151 | return Ok(()); 152 | } 153 | 154 | }; 155 | 156 | println!("Found {} program accounts", program_accounts.len()); 157 | 158 | let mut delegated_boosts = HashMap::new(); 159 | for program_account in program_accounts.iter() { 160 | if let Ok(delegate_boost_acct) = DelegatedBoostV2::try_from_bytes(&program_account.1.data) { 161 | delegated_boosts.insert(program_account.0, delegate_boost_acct); 162 | } 163 | } 164 | 165 | println!("Found {} delegated_boosts.", delegated_boosts.len()); 166 | let mut updated_stake_accounts = vec![]; 167 | 168 | 169 | let mut total_token_balances = 0; 170 | for delegate_boost in delegated_boosts.iter() { 171 | let updated_stake_account = UpdateStakeAccount { 172 | stake_pda: delegate_boost.0.to_string(), 173 | staked_balance: delegate_boost.1.amount, 174 | }; 175 | total_token_balances += delegate_boost.1.amount; 176 | 177 | updated_stake_accounts.push(updated_stake_account); 178 | } 179 | 180 | println!("Total tokens delegated: {}", total_token_balances); 181 | 182 | let instant = Instant::now(); 183 | let batch_size = 200; 184 | println!("Updating stake accounts."); 185 | if updated_stake_accounts.len() > 0 { 186 | for (i, batch) in updated_stake_accounts.chunks(batch_size).enumerate() { 187 | let instant = Instant::now(); 188 | println!("Updating batch {}", i); 189 | while let Err(_) = app_database.update_stake_accounts_staked_balance(batch.to_vec()).await { 190 | println!("Failed to update stake_account staked_balance in db. Retrying..."); 191 | tokio::time::sleep(Duration::from_millis(500)).await; 192 | } 193 | println!("Updated staked_account batch {} in {}ms", i, instant.elapsed().as_millis()); 194 | tokio::time::sleep(Duration::from_millis(200)).await; 195 | } 196 | println!("Successfully updated stake_accounts"); 197 | } 198 | println!("Updated stake_accounts in {}ms", instant.elapsed().as_millis()); 199 | 200 | Ok(()) 201 | } 202 | 203 | pub async fn db_submissions_cleanup() -> Result<(), Box> { 204 | let sleep_secs = 10; 205 | println!("Starting db submissions cleaup script."); 206 | println!("Submissions over 7 days old will be cleared every {} seconds.", sleep_secs); 207 | 208 | // load envs 209 | let database_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set."); 210 | 211 | let app_database = Arc::new(AppDatabase::new(database_url)); 212 | 213 | loop { 214 | let time = Instant::now(); 215 | println!("Starting submissions cleanup."); 216 | match app_database.delete_old_submissions().await { 217 | Ok(_) => { 218 | 219 | println!("Successfully ran submissions cleanup in {}ms", time.elapsed().as_millis()); 220 | }, 221 | Err(e) => { 222 | println!("Error cleaning up submissions.\nError: {:?}", e); 223 | } 224 | } 225 | 226 | println!("Next cleanup in {} seconds.", sleep_secs); 227 | tokio::time::sleep(Duration::from_secs(sleep_secs)).await; 228 | } 229 | } 230 | 231 | 232 | 233 | -------------------------------------------------------------------------------- /src/systems/app_metrics_system.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 2 | 3 | use sysinfo::{CpuRefreshKind, Disks, MemoryRefreshKind, RefreshKind, System}; 4 | use tokio::{sync::mpsc::UnboundedReceiver, time::Instant}; 5 | 6 | use crate::app_metrics::{AppMetrics, AppMetricsEvent}; 7 | 8 | pub async fn metrics_system( 9 | url: String, 10 | token: String, 11 | org: String, 12 | bucket: String, 13 | host: String, 14 | mut metrics_event: UnboundedReceiver 15 | ) { 16 | let app_metrics = AppMetrics::new(url, token, org, bucket, host); 17 | let mut sys = System::new(); 18 | let mut disks = Disks::new_with_refreshed_list(); 19 | 20 | let r_kind = RefreshKind::new() 21 | .with_cpu(CpuRefreshKind::everything().without_frequency()) 22 | .with_memory(MemoryRefreshKind::everything()); 23 | 24 | let mut system_stats_instant = Instant::now(); 25 | 26 | loop { 27 | tick_system_stats_metrics(&app_metrics, &mut sys, &r_kind, &mut system_stats_instant, &mut disks).await; 28 | while let Ok(me) = metrics_event.try_recv() { 29 | match me { 30 | AppMetricsEvent::MineEvent(data) => { 31 | match data { 32 | crate::app_metrics::AppMetricsMineEvent::V1(data) => { 33 | let ts_ns = match SystemTime::now().duration_since(UNIX_EPOCH) { 34 | Ok(d) => { 35 | d.as_nanos() 36 | }, 37 | Err(_d) => { 38 | tracing::error!(target: "server_log", "Time went backwards..."); 39 | continue; 40 | } 41 | }; 42 | let formatted_data = format!("mine_event,host={} balance={}u,difficulty={}u,last_hash_at={}i,timing={}i,reward={}u,boost_1={}u,boost_2={}u,boost_3={}u {}", 43 | app_metrics.hostname, 44 | data.balance, 45 | data.difficulty, 46 | data.last_hash_at, 47 | data.timing, 48 | data.reward, 49 | data.boost_1, 50 | data.boost_2, 51 | data.boost_3, 52 | ts_ns 53 | ); 54 | match app_metrics.send_data_to_influxdb(formatted_data).await { 55 | Ok(_) => { 56 | }, 57 | Err(e) => { 58 | tracing::error!(target: "server_log", "Failed to send metrics data to influxdb.\nError: {:?}", e); 59 | } 60 | } 61 | 62 | }, 63 | crate::app_metrics::AppMetricsMineEvent::V2(data) => { 64 | let ts_ns = match SystemTime::now().duration_since(UNIX_EPOCH) { 65 | Ok(d) => { 66 | d.as_nanos() 67 | }, 68 | Err(_d) => { 69 | tracing::error!(target: "server_log", "Time went backwards..."); 70 | continue; 71 | } 72 | }; 73 | let formatted_data = format!("mine_event,host={} balance={}u,difficulty={}u,last_hash_at={}i,timing={}i,reward={}u,boost_1={}u,boost_2={}u,boost_3={}u {}", 74 | app_metrics.hostname, 75 | data.balance, 76 | data.difficulty, 77 | data.last_hash_at, 78 | data.timing, 79 | data.net_reward, 80 | 0.0, 81 | 0.0, 82 | 0.0, 83 | ts_ns 84 | ); 85 | match app_metrics.send_data_to_influxdb(formatted_data).await { 86 | Ok(_) => { 87 | }, 88 | Err(e) => { 89 | tracing::error!(target: "server_log", "Failed to send metrics data to influxdb.\nError: {:?}", e); 90 | } 91 | } 92 | 93 | } 94 | 95 | } 96 | }, 97 | AppMetricsEvent::ClaimEvent(_data) => { 98 | }, 99 | AppMetricsEvent::ProcessingClaimsEvent(data) => { 100 | let ts_ns = match SystemTime::now().duration_since(UNIX_EPOCH) { 101 | Ok(d) => { 102 | d.as_nanos() 103 | }, 104 | Err(_d) => { 105 | tracing::error!(target: "server_log", "Time went backwards..."); 106 | continue; 107 | } 108 | }; 109 | let formatted_data = format!("claim_system_event,host={} queue_length={}u {}", 110 | app_metrics.hostname, 111 | data.claims_queue_length, 112 | ts_ns 113 | ); 114 | match app_metrics.send_data_to_influxdb(formatted_data).await { 115 | Ok(_) => { 116 | }, 117 | Err(e) => { 118 | tracing::error!(target: "server_log", "Failed to send metrics data to influxdb.\nError: {:?}", e); 119 | } 120 | } 121 | 122 | }, 123 | AppMetricsEvent::RouteEvent(data) => { 124 | let ts_ns = match SystemTime::now().duration_since(UNIX_EPOCH) { 125 | Ok(d) => { 126 | d.as_nanos() 127 | }, 128 | Err(_d) => { 129 | tracing::error!(target: "server_log", "Time went backwards..."); 130 | continue; 131 | } 132 | }; 133 | let formatted_data = format!("route_event,host={} route=\"{}\",method=\"{}\",status_code={}u,request={}u,response={}u,latency={}u {}", 134 | app_metrics.hostname, 135 | data.route, 136 | data.method, 137 | data.status_code, 138 | data.request, 139 | data.response, 140 | data.latency, 141 | ts_ns 142 | ); 143 | match app_metrics.send_data_to_influxdb(formatted_data).await { 144 | Ok(_) => {}, 145 | Err(e) => { 146 | tracing::error!(target: "server_log", "Failed to send metrics data to influxdb.\nError: {:?}", e); 147 | } 148 | } 149 | } 150 | } 151 | } 152 | tokio::time::sleep(Duration::from_millis(100)).await; 153 | } 154 | } 155 | 156 | async fn tick_system_stats_metrics(app_metrics: &AppMetrics, sys: &mut System, r_kind: &RefreshKind, system_stats_instant: &mut Instant, disks: &mut Disks) { 157 | if system_stats_instant.elapsed().as_secs() >= 5 { 158 | // track metrics 159 | sys.refresh_specifics(*r_kind); 160 | disks.refresh_list(); 161 | let ts_ns = match SystemTime::now().duration_since(UNIX_EPOCH) { 162 | Ok(d) => { 163 | d.as_nanos() 164 | }, 165 | Err(_d) => { 166 | tracing::error!(target: "server_log", "Time went backwards..."); 167 | return; 168 | } 169 | }; 170 | let mut cpu_data = String::new(); 171 | let mut total_cpu_usage = 0.; 172 | 173 | for (i, cpu) in sys.cpus().iter().enumerate() { 174 | let formatted_data = format!("cpu,host={},cpu={}i usage={} {}\n", 175 | app_metrics.hostname, 176 | i, 177 | cpu.cpu_usage(), 178 | ts_ns, 179 | ); 180 | total_cpu_usage += cpu.cpu_usage(); 181 | cpu_data.push_str(&formatted_data); 182 | } 183 | 184 | let cpu_total_usage_data = format!("cpu,host={} total_usage={} {}\n", 185 | app_metrics.hostname, 186 | total_cpu_usage, 187 | ts_ns, 188 | ); 189 | 190 | cpu_data.push_str(&cpu_total_usage_data); 191 | 192 | 193 | let mut disk_data = String::new(); 194 | let mut total_disk_used = 0; 195 | let mut total_disk_total = 0; 196 | for (i, disk) in disks.iter().enumerate() { 197 | let available_space = disk.available_space(); 198 | let total_space = disk.total_space(); 199 | let used_space = total_space - available_space; 200 | let formatted_data = format!("disk,host={},disk={}i used={},total={} {}\n", 201 | app_metrics.hostname, 202 | i, 203 | used_space, 204 | disk.total_space(), 205 | ts_ns, 206 | ); 207 | total_disk_used += used_space; 208 | total_disk_total += total_space; 209 | disk_data.push_str(&formatted_data); 210 | } 211 | let disk_total_usage_data = format!("disk,host={},disk=all used={},total={} {}\n", 212 | app_metrics.hostname, 213 | total_disk_used, 214 | total_disk_total, 215 | ts_ns, 216 | ); 217 | 218 | disk_data.push_str(&disk_total_usage_data); 219 | 220 | let memory_data = format!( 221 | "memory,host={} total={}i,used={}i,free={}i {}", 222 | app_metrics.hostname, 223 | sys.total_memory(), 224 | sys.used_memory(), 225 | sys.free_memory(), 226 | ts_ns, 227 | ); 228 | 229 | let metrics_data = format!("{}\n{}\n{}\n", 230 | cpu_data, 231 | disk_data, 232 | memory_data, 233 | ); 234 | match app_metrics.send_data_to_influxdb(metrics_data).await { 235 | Ok(_) => { 236 | }, 237 | Err(e) => { 238 | tracing::error!(target: "server_log", "Failed to send metrics data to influxdb.\nError: {:?}", e); 239 | } 240 | } 241 | // reset instant 242 | *system_stats_instant = Instant::now(); 243 | } 244 | } 245 | -------------------------------------------------------------------------------- /src/systems/cache_update_system.rs: -------------------------------------------------------------------------------- 1 | use std::{ops::Div, str::FromStr as _, sync::Arc, time::Duration}; 2 | 3 | use ore_boost_api::state::{boost_pda, stake_pda}; 4 | use solana_client::nonblocking::rpc_client::RpcClient; 5 | use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel}; 6 | use steel::{AccountDeserialize as _, Pubkey}; 7 | use tokio::{sync::RwLock, time::Instant}; 8 | use base64::{prelude::BASE64_STANDARD, Engine}; 9 | 10 | use crate::{app_rr_database::AppRRDatabase, ore_utils::ORE_TOKEN_DECIMALS, BoostMultiplierCache, BoostMultiplierData, ChallengesCache, Config, LastChallengeSubmissionsCache, LatestBlockhashCache, WalletExtension}; 11 | 12 | const CACHED_BOOST_MULTIPLIER_UPDATE_INTERVAL: u64 = 15; 13 | const CACHED_LAST_CHALLENGE_SUBMISSIONS_UPDATE_INTERVAL: u64 = 15; 14 | const CACHED_CHALLENGES_UPDATE_INTERVAL: u64 = 15; 15 | const CACHED_LATEST_BLOCKHASH_UPDATE_INTERVAL: u64 = 5; 16 | 17 | 18 | pub async fn cache_update_system( 19 | app_config: Arc, 20 | rpc_client: Arc, 21 | app_rr_database: Arc, 22 | boost_multiplier_cache: Arc>, 23 | last_challenge_submission_cache: Arc>, 24 | challenges_cache: Arc>, 25 | latest_blockhash_cache: Arc>, 26 | ) { 27 | // Cached LatestBlockhash 28 | let cached_item = latest_blockhash_cache.clone(); 29 | let app_rpc_client = rpc_client.clone(); 30 | tokio::spawn(async move { 31 | let latest_blockhash_cache = cached_item; 32 | let rpc_client = app_rpc_client; 33 | loop { 34 | let lbhash = loop { 35 | match rpc_client.get_latest_blockhash_with_commitment(CommitmentConfig { commitment: CommitmentLevel::Finalized }).await { 36 | Ok(lb) => { 37 | //tracing::info!(target: "server_log", "Successfully updated latest blockhash"); 38 | break lb 39 | }, 40 | Err(e) => { 41 | tracing::error!(target: "server_log", "Failed to get latest blockhash in cache system. E: {:?}\n Retrying in 2 secs...", e); 42 | tokio::time::sleep(Duration::from_secs(2000)).await; 43 | } 44 | }; 45 | }; 46 | let serialized_blockhash = bincode::serialize(&lbhash).unwrap(); 47 | let encoded_blockhash = BASE64_STANDARD.encode(serialized_blockhash); 48 | let mut writer = latest_blockhash_cache.write().await; 49 | writer.item = encoded_blockhash.clone(); 50 | writer.last_updated_at = Instant::now(); 51 | drop(writer); 52 | 53 | tokio::time::sleep(Duration::from_secs(CACHED_LATEST_BLOCKHASH_UPDATE_INTERVAL)).await; 54 | } 55 | }); 56 | 57 | if app_config.stats_enabled { 58 | // Cached Boost Multiplier 59 | let bm_cache = boost_multiplier_cache.clone(); 60 | let app_rpc_client = rpc_client.clone(); 61 | tokio::spawn(async move { 62 | let boost_multiplier_cache = bm_cache; 63 | let rpc_client = app_rpc_client; 64 | loop { 65 | let mut boost_multiplier_datas = vec![]; 66 | let mut writer = boost_multiplier_cache.write().await; 67 | writer.item = boost_multiplier_datas.clone(); 68 | writer.last_updated_at = Instant::now(); 69 | drop(writer); 70 | 71 | tokio::time::sleep(Duration::from_secs(CACHED_BOOST_MULTIPLIER_UPDATE_INTERVAL)).await; 72 | } 73 | }); 74 | 75 | // Cached Last Challenge Submissions 76 | let cached_item = last_challenge_submission_cache.clone(); 77 | let app_rr_db = app_rr_database.clone(); 78 | tokio::spawn(async move { 79 | let last_challenge_submission_cache = cached_item; 80 | let app_rr_database = app_rr_db; 81 | loop { 82 | let res = app_rr_database.get_last_challenge_submissions().await; 83 | 84 | match res { 85 | Ok(submissions) => { 86 | let mut writer = last_challenge_submission_cache.write().await; 87 | writer.item = submissions.clone(); 88 | writer.last_updated_at = Instant::now(); 89 | drop(writer); 90 | } 91 | Err(_) => {}, 92 | } 93 | 94 | tokio::time::sleep(Duration::from_secs(CACHED_LAST_CHALLENGE_SUBMISSIONS_UPDATE_INTERVAL)).await; 95 | } 96 | }); 97 | 98 | // Cached Challenges 99 | let cached_item = challenges_cache.clone(); 100 | let app_rr_db = app_rr_database.clone(); 101 | tokio::spawn(async move { 102 | let challenges_cache = cached_item; 103 | let app_rr_database = app_rr_db; 104 | loop { 105 | let res = app_rr_database.get_challenges().await; 106 | 107 | match res { 108 | Ok(challenges) => { 109 | let mut writer = challenges_cache.write().await; 110 | writer.item = challenges.clone(); 111 | writer.last_updated_at = Instant::now(); 112 | drop(writer); 113 | } 114 | Err(_) => {}, 115 | } 116 | 117 | tokio::time::sleep(Duration::from_secs(CACHED_CHALLENGES_UPDATE_INTERVAL)).await; 118 | } 119 | }); 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/systems/claim_system.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use solana_client::{nonblocking::rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig}; 4 | use solana_sdk::{ 5 | compute_budget::ComputeBudgetInstruction, 6 | signature::{Keypair, Signature}, 7 | signer::Signer, 8 | transaction::Transaction, 9 | }; 10 | use solana_transaction_status::TransactionConfirmationStatus; 11 | use spl_associated_token_account::get_associated_token_address; 12 | use steel::Pubkey; 13 | use tokio::{sync::mpsc::UnboundedSender, time::Instant}; 14 | use tracing::{error, info}; 15 | 16 | use crate::{ 17 | app_database::AppDatabase, app_metrics::{AppMetricsEvent, MetricsProcessingClaimsEventData}, ore_utils::{get_ore_mint, ORE_TOKEN_DECIMALS}, ClaimsQueue, ClaimsQueueItem, InsertClaim, InsertTxn 18 | }; 19 | 20 | const CLAIMS_PROCESSING_AMOUNT: usize = 10; 21 | 22 | pub async fn claim_system( 23 | claims_queue: Arc, 24 | rpc_client: Arc, 25 | wallet: Arc, 26 | app_database: Arc, 27 | app_metrics_sender: UnboundedSender, 28 | ) { 29 | loop { 30 | let mut handles = Vec::new(); 31 | 32 | let reader = claims_queue.queue.read().await; 33 | let claims_queue_len = reader.len(); 34 | info!(target: "claim_log", "Claims queue length: {}", claims_queue_len); 35 | 36 | if claims_queue_len > 0 { 37 | let processing_claims_event_data = MetricsProcessingClaimsEventData { 38 | claims_queue_length: claims_queue_len, 39 | }; 40 | match app_metrics_sender.send(AppMetricsEvent::ProcessingClaimsEvent(processing_claims_event_data)) { 41 | Ok(_) => {} 42 | Err(_) => { 43 | tracing::error!(target: "claim_log", "Failed to send AppMetricsEvent down app_metrics_sender mpsc channel."); 44 | } 45 | } 46 | 47 | let mut selected_claims = Vec::with_capacity(CLAIMS_PROCESSING_AMOUNT); 48 | for (i, item) in reader.iter().enumerate() { 49 | if i >= CLAIMS_PROCESSING_AMOUNT { 50 | break; 51 | } 52 | 53 | selected_claims.push((item.0.clone(), item.1.clone())); 54 | } 55 | drop(reader); 56 | 57 | 58 | for ((user_pubkey, _mint_pubkey), claim_queue_item) in selected_claims { 59 | let cq = claims_queue.clone(); 60 | let rpc = rpc_client.clone(); 61 | let w = wallet.clone(); 62 | let adb = app_database.clone(); 63 | handles.push(tokio::spawn(async move { 64 | let claims_queue = cq; 65 | let rpc_client = rpc; 66 | let wallet = w; 67 | let app_database = adb; 68 | process_claim(user_pubkey, claim_queue_item, rpc_client, wallet, app_database, claims_queue).await; 69 | })); 70 | tokio::time::sleep(Duration::from_millis(500)).await; 71 | } 72 | 73 | for handle in handles { 74 | // wait for spawned tasks to finish 75 | let _ = handle.await; 76 | } 77 | } 78 | 79 | tokio::time::sleep(Duration::from_secs(2)).await; 80 | } 81 | } 82 | 83 | async fn process_claim(user_pubkey: Pubkey, claim_queue_item: ClaimsQueueItem, rpc_client: Arc, wallet: Arc, app_database: Arc, claims_queue: Arc) { 84 | if let Some(mint_pubkey) = claim_queue_item.mint { 85 | info!(target: "claim_log", "Processing stakers claim"); 86 | let staker_pubkey = user_pubkey; 87 | let ore_mint = get_ore_mint(); 88 | let receiver_pubkey = claim_queue_item.receiver_pubkey; 89 | let receiver_token_account = get_associated_token_address(&receiver_pubkey, &ore_mint); 90 | let amount = claim_queue_item.amount; 91 | 92 | let mut claim_amount = amount; 93 | 94 | if let Ok(stake_account) = app_database 95 | .get_staker_rewards(staker_pubkey.to_string(), mint_pubkey.to_string()) 96 | .await 97 | { 98 | if amount > stake_account.rewards_balance { 99 | let mut writer = claims_queue.queue.write().await; 100 | writer.remove(&(staker_pubkey, Some(mint_pubkey))); 101 | drop(writer); 102 | return; 103 | } 104 | } else { 105 | let mut writer = claims_queue.queue.write().await; 106 | writer.remove(&(staker_pubkey, Some(mint_pubkey))); 107 | drop(writer); 108 | return; 109 | } 110 | 111 | let prio_fee: u32 = 100_000; 112 | 113 | let mut is_creating_ata = false; 114 | let mut ixs = Vec::new(); 115 | let prio_fee_ix = ComputeBudgetInstruction::set_compute_unit_price(prio_fee as u64); 116 | ixs.push(prio_fee_ix); 117 | if let Ok(response) = rpc_client 118 | .get_token_account_balance(&receiver_token_account) 119 | .await 120 | { 121 | if let Some(_amount) = response.ui_amount { 122 | info!(target: "claim_log", "staker claim beneficiary has valid token account."); 123 | } else { 124 | info!(target: "claim_log", "will create token account for staker claim beneficiary"); 125 | ixs.push( 126 | spl_associated_token_account::instruction::create_associated_token_account( 127 | &wallet.pubkey(), 128 | &receiver_pubkey, 129 | &ore_api::consts::MINT_ADDRESS, 130 | &spl_token::id(), 131 | ), 132 | ) 133 | } 134 | } else { 135 | info!(target: "claim_log", "Adding create ata ix for staker claim"); 136 | is_creating_ata = true; 137 | ixs.push( 138 | spl_associated_token_account::instruction::create_associated_token_account( 139 | &wallet.pubkey(), 140 | &receiver_pubkey, 141 | &ore_api::consts::MINT_ADDRESS, 142 | &spl_token::id(), 143 | ), 144 | ) 145 | } 146 | 147 | // 0.00_000_001_000 148 | if is_creating_ata { 149 | claim_amount = amount - 1_000 150 | } 151 | let ix = 152 | crate::ore_utils::get_claim_ix(wallet.pubkey(), receiver_token_account, claim_amount); 153 | ixs.push(ix); 154 | 155 | if let Ok((hash, _slot)) = rpc_client 156 | .get_latest_blockhash_with_commitment(rpc_client.commitment()) 157 | .await 158 | { 159 | let expired_timer = Instant::now(); 160 | let mut tx = Transaction::new_with_payer(&ixs, Some(&wallet.pubkey())); 161 | 162 | tx.sign(&[&wallet], hash); 163 | 164 | let rpc_config = RpcSendTransactionConfig { 165 | preflight_commitment: Some(rpc_client.commitment().commitment), 166 | ..RpcSendTransactionConfig::default() 167 | }; 168 | 169 | let signature; 170 | let mut send_attempts = 1; 171 | loop { 172 | match rpc_client 173 | .send_transaction_with_config(&tx, rpc_config) 174 | .await 175 | { 176 | Ok(sig) => { 177 | signature = sig; 178 | break; 179 | }, 180 | Err(e) => { 181 | if send_attempts > 10 { 182 | error!(target: "claim_log", "Failed to send stakers claim transaction.\nError: {:?}.\nRetry Limit Reached. Removing claim from queue.", e); 183 | let mut writer = claims_queue.queue.write().await; 184 | writer.remove(&(staker_pubkey, Some(mint_pubkey))); 185 | drop(writer); 186 | return; 187 | } else { 188 | send_attempts += 1; 189 | error!(target: "claim_log", "Failed to send stakers claim transaction.\nError: {:?}.\nRetrying in 2 seconds...", e); 190 | tokio::time::sleep(Duration::from_millis(2000)).await; 191 | } 192 | } 193 | } 194 | } 195 | 196 | let result: Result = loop { 197 | let elapsed = expired_timer.elapsed().as_secs(); 198 | if elapsed >= 600 { 199 | break Err("Transaction Expired".to_string()); 200 | } 201 | let results = rpc_client.get_signature_statuses(&[signature]).await; 202 | if let Ok(response) = results { 203 | let statuses = response.value; 204 | if let Some(status) = &statuses[0] { 205 | info!(target: "claim_log", "Staker claim for {} -- elapsed: {} -- status: {:?}", user_pubkey.to_string(), elapsed, status); 206 | if status.confirmation_status() 207 | == TransactionConfirmationStatus::Finalized 208 | { 209 | if status.err.is_some() { 210 | let e_str = format!("Stake Claim Transaction Failed: {:?}", status.err); 211 | break Err(e_str); 212 | } 213 | break Ok(signature); 214 | } 215 | } else { 216 | info!(target: "claim_log", "Staker claim for {} -- elapsed: {} -- status: None", user_pubkey.to_string(), elapsed); 217 | } 218 | } else { 219 | info!(target: "claim_log", "Staker claim for {} -- elapsed: {} -- status: GET SIG RPC ERROR", user_pubkey.to_string(), elapsed); 220 | } 221 | // Send the txn again 222 | let _ = rpc_client.send_transaction_with_config(&tx, rpc_config).await; 223 | tokio::time::sleep(Duration::from_millis(5000)).await; 224 | }; 225 | 226 | match result { 227 | Ok(sig) => { 228 | let amount_dec = amount as f64 / 10f64.powf(ORE_TOKEN_DECIMALS as f64); 229 | info!(target: "claim_log", "Staker {} successfully claimed {}.\nSig: {}", staker_pubkey.to_string(), amount_dec, sig.to_string()); 230 | 231 | // TODO: use transacions, or at least put them into one query 232 | let db_pool = app_database 233 | .get_pool_by_authority_pubkey(wallet.pubkey().to_string()) 234 | .await 235 | .unwrap(); 236 | let staker = app_database 237 | .get_stake_account_for_staker(db_pool.id, staker_pubkey.to_string(), mint_pubkey.to_string()) 238 | .await 239 | .unwrap(); 240 | while let Err(_) = 241 | app_database.decrease_stakers_rewards(staker.id, amount).await 242 | { 243 | error!(target: "claim_log", "Failed to decrease stakers rewards! Retrying..."); 244 | tokio::time::sleep(Duration::from_millis(2000)).await; 245 | } 246 | while let Err(_) = app_database 247 | .update_pool_claimed(wallet.pubkey().to_string(), amount) 248 | .await 249 | { 250 | error!(target: "claim_log", "Failed to increase pool claimed amount! Retrying..."); 251 | tokio::time::sleep(Duration::from_millis(2000)).await; 252 | } 253 | 254 | let itxn = InsertTxn { 255 | txn_type: "staker-claim".to_string(), 256 | signature: sig.to_string(), 257 | priority_fee: prio_fee, 258 | }; 259 | while let Err(_) = app_database.add_new_txn(itxn.clone()).await { 260 | error!(target: "claim_log", "Failed to add new staker-claim txn! Retrying..."); 261 | tokio::time::sleep(Duration::from_millis(2000)).await; 262 | } 263 | 264 | // TODO: InsertStakerClaim 265 | let mut writer = claims_queue.queue.write().await; 266 | writer.remove(&(staker_pubkey, Some(mint_pubkey))); 267 | drop(writer); 268 | 269 | info!(target: "claim_log", "Stake rewards claim successfully processed!"); 270 | } 271 | Err(e) => { 272 | error!(target: "claim_log", "ERROR: {:?}", e); 273 | } 274 | } 275 | } else { 276 | error!(target: "claim_log", "Failed to confirm transaction, will retry on next iteration."); 277 | } 278 | } else { 279 | info!(target: "claim_log", "Processing miners claim"); 280 | let miner_pubkey = user_pubkey; 281 | let ore_mint = get_ore_mint(); 282 | let receiver_pubkey = claim_queue_item.receiver_pubkey; 283 | let receiver_token_account = get_associated_token_address(&receiver_pubkey, &ore_mint); 284 | let amount = claim_queue_item.amount; 285 | 286 | let mut claim_amount = amount; 287 | 288 | if let Ok(miner_rewards) = app_database 289 | .get_miner_rewards(miner_pubkey.to_string()) 290 | .await 291 | { 292 | if amount > miner_rewards.balance { 293 | let mut writer = claims_queue.queue.write().await; 294 | writer.remove(&(miner_pubkey, None)); 295 | drop(writer); 296 | return; 297 | } 298 | } else { 299 | let mut writer = claims_queue.queue.write().await; 300 | writer.remove(&(miner_pubkey, None)); 301 | drop(writer); 302 | return; 303 | } 304 | 305 | let prio_fee: u32 = 100_000; 306 | 307 | let mut is_creating_ata = false; 308 | let mut ixs = Vec::new(); 309 | let prio_fee_ix = ComputeBudgetInstruction::set_compute_unit_price(prio_fee as u64); 310 | ixs.push(prio_fee_ix); 311 | if let Ok(response) = rpc_client 312 | .get_token_account_balance(&receiver_token_account) 313 | .await 314 | { 315 | if let Some(_amount) = response.ui_amount { 316 | info!(target: "claim_log", "miner has valid token account."); 317 | } else { 318 | info!(target: "claim_log", "will create token account for miner"); 319 | ixs.push( 320 | spl_associated_token_account::instruction::create_associated_token_account( 321 | &wallet.pubkey(), 322 | &receiver_pubkey, 323 | &ore_api::consts::MINT_ADDRESS, 324 | &spl_token::id(), 325 | ), 326 | ) 327 | } 328 | } else { 329 | info!(target: "claim_log", "Adding create ata ix for miner claim"); 330 | is_creating_ata = true; 331 | ixs.push( 332 | spl_associated_token_account::instruction::create_associated_token_account( 333 | &wallet.pubkey(), 334 | &receiver_pubkey, 335 | &ore_api::consts::MINT_ADDRESS, 336 | &spl_token::id(), 337 | ), 338 | ) 339 | } 340 | 341 | // 0.02_000_000_000 342 | if is_creating_ata { 343 | claim_amount = amount - 2_000_000_000 344 | } 345 | let ix = 346 | crate::ore_utils::get_claim_ix(wallet.pubkey(), receiver_token_account, claim_amount); 347 | ixs.push(ix); 348 | 349 | if let Ok((hash, _slot)) = rpc_client 350 | .get_latest_blockhash_with_commitment(rpc_client.commitment()) 351 | .await 352 | { 353 | let expired_timer = Instant::now(); 354 | let mut tx = Transaction::new_with_payer(&ixs, Some(&wallet.pubkey())); 355 | 356 | tx.sign(&[&wallet], hash); 357 | 358 | let rpc_config = RpcSendTransactionConfig { 359 | preflight_commitment: Some(rpc_client.commitment().commitment), 360 | ..RpcSendTransactionConfig::default() 361 | }; 362 | 363 | let signature; 364 | let mut send_attempts = 1; 365 | loop { 366 | match rpc_client 367 | .send_transaction_with_config(&tx, rpc_config) 368 | .await 369 | { 370 | Ok(sig) => { 371 | signature = sig; 372 | break; 373 | }, 374 | Err(e) => { 375 | if send_attempts > 10 { 376 | error!(target: "claim_log", "Failed to send claim transaction.\nError: {:?}\nRetry limit reached. Removing claim from queue.", e); 377 | let mut writer = claims_queue.queue.write().await; 378 | writer.remove(&(miner_pubkey, None)); 379 | drop(writer); 380 | return; 381 | } else { 382 | send_attempts += 1; 383 | error!(target: "claim_log", "Failed to send claim transaction.\nError: {:?}.\n retrying in 2 seconds...", e); 384 | tokio::time::sleep(Duration::from_millis(2000)).await; 385 | } 386 | } 387 | } 388 | } 389 | 390 | let result: Result = loop { 391 | let elapsed = expired_timer.elapsed().as_secs(); 392 | if elapsed >= 600 { 393 | break Err("Transaction Expired".to_string()); 394 | } 395 | let results = rpc_client.get_signature_statuses(&[signature]).await; 396 | if let Ok(response) = results { 397 | let statuses = response.value; 398 | if let Some(status) = &statuses[0] { 399 | info!(target: "claim_log", "Miner claim for {} -- elapsed: {} -- status: {:?}", user_pubkey.to_string(), elapsed, status); 400 | if status.confirmation_status() 401 | == TransactionConfirmationStatus::Finalized 402 | { 403 | if status.err.is_some() { 404 | let e_str = format!("Transaction Failed: {:?}", status.err); 405 | break Err(e_str); 406 | } 407 | break Ok(signature); 408 | } 409 | } 410 | } 411 | tokio::time::sleep(Duration::from_millis(5000)).await; 412 | }; 413 | 414 | match result { 415 | Ok(sig) => { 416 | let amount_dec = amount as f64 / 10f64.powf(ORE_TOKEN_DECIMALS as f64); 417 | info!(target: "claim_log", "Miner {} successfully claimed {}.\nSig: {}", miner_pubkey.to_string(), amount_dec, sig.to_string()); 418 | 419 | // TODO: use transacions, or at least put them into one query 420 | let miner = app_database 421 | .get_miner_by_pubkey_str(miner_pubkey.to_string()) 422 | .await 423 | .unwrap(); 424 | let db_pool = app_database 425 | .get_pool_by_authority_pubkey(wallet.pubkey().to_string()) 426 | .await 427 | .unwrap(); 428 | while let Err(_) = 429 | app_database.decrease_miner_reward(miner.id, amount).await 430 | { 431 | error!(target: "claim_log", "Failed to decrease stakers rewards! Retrying..."); 432 | tokio::time::sleep(Duration::from_millis(2000)).await; 433 | } 434 | while let Err(_) = app_database 435 | .update_pool_claimed(wallet.pubkey().to_string(), amount) 436 | .await 437 | { 438 | error!(target: "claim_log", "Failed to increase pool claimed amount! Retrying..."); 439 | tokio::time::sleep(Duration::from_millis(2000)).await; 440 | } 441 | 442 | let itxn = InsertTxn { 443 | txn_type: "claim".to_string(), 444 | signature: sig.to_string(), 445 | priority_fee: prio_fee, 446 | }; 447 | while let Err(_) = app_database.add_new_txn(itxn.clone()).await { 448 | error!(target: "claim_log", "Failed to increase pool claimed amount! Retrying..."); 449 | tokio::time::sleep(Duration::from_millis(2000)).await; 450 | } 451 | 452 | let txn_id; 453 | loop { 454 | if let Ok(ntxn) = app_database.get_txn_by_sig(sig.to_string()).await { 455 | txn_id = ntxn.id; 456 | break; 457 | } else { 458 | error!(target: "claim_log", "Failed to get tx by sig! Retrying..."); 459 | tokio::time::sleep(Duration::from_millis(2000)).await; 460 | } 461 | } 462 | 463 | let iclaim = InsertClaim { 464 | miner_id: miner.id, 465 | pool_id: db_pool.id, 466 | txn_id, 467 | amount, 468 | }; 469 | while let Err(_) = app_database.add_new_claim(iclaim).await { 470 | error!(target: "claim_log", "Failed add new claim to db! Retrying..."); 471 | tokio::time::sleep(Duration::from_millis(2000)).await; 472 | } 473 | 474 | let mut writer = claims_queue.queue.write().await; 475 | writer.remove(&(miner_pubkey, None)); 476 | drop(writer); 477 | 478 | info!(target: "claim_log", "Claim successfully processed!"); 479 | } 480 | Err(e) => { 481 | error!(target: "claim_log", "ERROR: {:?}", e); 482 | } 483 | } 484 | } else { 485 | error!(target: "claim_log", "Failed to confirm transaction, will retry on next iteration."); 486 | } 487 | } 488 | } 489 | -------------------------------------------------------------------------------- /src/systems/client_message_handler_system.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{HashMap, HashSet}, 3 | net::SocketAddr, 4 | ops::Range, 5 | sync::Arc, 6 | }; 7 | 8 | 9 | use ore_api::state::Proof; 10 | use solana_sdk::pubkey::Pubkey; 11 | use tokio::{ 12 | sync::{mpsc::UnboundedReceiver, Mutex, RwLock}, 13 | time::Instant, 14 | }; 15 | 16 | use crate::{ 17 | AppState, ClientMessage, EpochHashes, LastPong, SubmissionWindow, 18 | }; 19 | 20 | use super::client_submissions_handler::{client_submissions_handler, ClientBestSolution}; 21 | 22 | pub async fn client_message_handler_system( 23 | mut receiver_channel: UnboundedReceiver, 24 | ready_clients: Arc>>, 25 | proof: Arc>, 26 | epoch_hashes: Arc>, 27 | client_nonce_ranges: Arc>>>>, 28 | app_state: Arc>, 29 | app_pongs: Arc>, 30 | app_submission_window: Arc>, 31 | ) { 32 | let (s, r) = tokio::sync::mpsc::unbounded_channel::(); 33 | 34 | let app_proof = proof.clone(); 35 | let app_epoch_hashes = epoch_hashes.clone(); 36 | let app_client_nonce_ranges = client_nonce_ranges.clone(); 37 | let app_app_state = app_state.clone(); 38 | let app_app_submission_window = app_submission_window.clone(); 39 | tokio::spawn(async move { 40 | client_submissions_handler( 41 | r, 42 | app_proof, 43 | app_epoch_hashes, 44 | app_client_nonce_ranges, 45 | app_app_state, 46 | app_app_submission_window 47 | ).await; 48 | }); 49 | 50 | while let Some(client_message) = receiver_channel.recv().await { 51 | match client_message { 52 | ClientMessage::Pong(addr) => { 53 | let mut writer = app_pongs.write().await; 54 | writer.pongs.insert(addr, Instant::now()); 55 | drop(writer); 56 | } 57 | ClientMessage::Ready(addr) => { 58 | let ready_clients = ready_clients.clone(); 59 | let mut lock = ready_clients.lock().await; 60 | lock.insert(addr); 61 | drop(lock); 62 | } 63 | ClientMessage::Mining(addr) => { 64 | tracing::info!(target: "server_log", "Client {} has started mining!", addr.to_string()); 65 | } 66 | ClientMessage::BestSolution(addr, solution, pubkey) => { 67 | let _ = s.send(ClientBestSolution { 68 | data: (addr, solution, pubkey) 69 | }); 70 | } 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/systems/client_submissions_handler.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{HashMap, HashSet}, 3 | net::SocketAddr, 4 | ops::Range, 5 | sync::Arc, 6 | }; 7 | 8 | use drillx::Solution; 9 | use uuid::Uuid; 10 | 11 | use axum::extract::ws::Message; 12 | use futures::SinkExt; 13 | use ore_api::state::Proof; 14 | use solana_sdk::pubkey::Pubkey; 15 | use tokio::sync::{mpsc::UnboundedReceiver, Mutex, RwLock}; 16 | 17 | use crate::{ 18 | AppState, EpochHashes, InternalMessageSubmission, SubmissionWindow, MAX_CALCULATED_HASHPOWER, MIN_DIFF, MIN_HASHPOWER 19 | }; 20 | 21 | pub struct ClientBestSolution { 22 | pub data: (SocketAddr, Solution, Pubkey), 23 | } 24 | 25 | pub async fn client_submissions_handler( 26 | mut receiver_channel: UnboundedReceiver, 27 | proof: Arc>, 28 | epoch_hashes: Arc>, 29 | client_nonce_ranges: Arc>>>>, 30 | app_state: Arc>, 31 | app_submission_window: Arc>, 32 | ) { 33 | 34 | loop { 35 | let mut msgs = vec![]; 36 | receiver_channel.recv_many(&mut msgs, 100).await; 37 | for msg in msgs.iter() { 38 | let (addr, solution, pubkey) = msg.data; 39 | let diff = solution.to_hash().difficulty(); 40 | if diff >= MIN_DIFF { 41 | let reader = app_submission_window.read().await; 42 | let submission_windows_closed = reader.closed; 43 | drop(reader); 44 | 45 | if submission_windows_closed { 46 | //tracing::error!(target: "server_log", "{} submitted after submission window was closed!", pubkey); 47 | 48 | let reader = app_state.read().await; 49 | if let Some(app_client_socket) = reader.sockets.get(&addr) { 50 | let msg = format!("Late submission. Please make sure your hash time is under 60 seconds."); 51 | let _ = app_client_socket 52 | .socket 53 | .lock() 54 | .await 55 | .send(Message::Text(msg.into())) 56 | .await; 57 | } else { 58 | //tracing::error!(target: "server_log", "Failed to get client socket for addr: {}", addr); 59 | continue; 60 | } 61 | drop(reader); 62 | continue; 63 | } 64 | 65 | let reader = client_nonce_ranges.read().await; 66 | let nonce_ranges: Vec> = { 67 | if let Some(nr) = reader.get(&pubkey) { 68 | nr.clone() 69 | } else { 70 | //tracing::error!(target: "server_log", "Client nonce range not set!"); 71 | continue; 72 | } 73 | }; 74 | drop(reader); 75 | 76 | let nonce = u64::from_le_bytes(solution.n); 77 | 78 | let mut in_range = false; 79 | 80 | for nonce_range in nonce_ranges.iter() { 81 | if nonce_range.contains(&nonce) { 82 | in_range = true; 83 | break; 84 | } 85 | } 86 | 87 | if !in_range { 88 | //tracing::error!(target: "server_log", "Client submitted nonce out of assigned range"); 89 | continue; 90 | } 91 | 92 | let reader = app_state.read().await; 93 | let miner_id; 94 | if let Some(app_client_socket) = reader.sockets.get(&addr) { 95 | miner_id = app_client_socket.miner_id; 96 | } else { 97 | //tracing::error!(target: "server_log", "Failed to get client socket for addr: {}", addr); 98 | continue; 99 | } 100 | drop(reader); 101 | 102 | let lock = proof.lock().await; 103 | let challenge = lock.challenge; 104 | drop(lock); 105 | 106 | if solution.is_valid(&challenge) { 107 | let submission_uuid = Uuid::new_v4(); 108 | //tracing::info!(target: "submission_log", "{} - {} found diff: {}", submission_uuid, pubkey_str, diff); 109 | // calculate rewards 110 | let mut hashpower = MIN_HASHPOWER * 2u64.pow(diff - MIN_DIFF); 111 | if hashpower > MAX_CALCULATED_HASHPOWER { 112 | hashpower = MAX_CALCULATED_HASHPOWER; 113 | } 114 | { 115 | let reader = epoch_hashes.read().await; 116 | let subs = reader.submissions.clone(); 117 | drop(reader); 118 | 119 | if let Some(old_sub) = subs.get(&pubkey) { 120 | if diff > old_sub.supplied_diff { 121 | let mut epoch_hashes = epoch_hashes.write().await; 122 | epoch_hashes.submissions.insert( 123 | pubkey, 124 | InternalMessageSubmission { 125 | miner_id, 126 | supplied_nonce: nonce, 127 | supplied_diff: diff, 128 | hashpower, 129 | }, 130 | ); 131 | if diff > epoch_hashes.best_hash.difficulty { 132 | tracing::info!(target: "server_log", "{} - New best diff: {}", submission_uuid, diff); 133 | //tracing::info!(target: "submission_log", "{} - New best diff: {}", submission_uuid, diff); 134 | epoch_hashes.best_hash.difficulty = diff; 135 | epoch_hashes.best_hash.solution = Some(solution); 136 | } 137 | drop(epoch_hashes); 138 | } 139 | } else { 140 | //tracing::info!(target: "submission_log", "{} - Adding {} submission diff: {} to epoch_hashes submissions.", submission_uuid, pubkey_str, diff); 141 | let mut epoch_hashes = epoch_hashes.write().await; 142 | epoch_hashes.submissions.insert( 143 | pubkey, 144 | InternalMessageSubmission { 145 | miner_id, 146 | supplied_nonce: nonce, 147 | supplied_diff: diff, 148 | hashpower, 149 | }, 150 | ); 151 | if diff > epoch_hashes.best_hash.difficulty { 152 | tracing::info!(target: "server_log", "{} - New best diff: {}", submission_uuid, diff); 153 | //tracing::info!(target: "submission_log", "{} - New best diff: {}", submission_uuid, diff); 154 | epoch_hashes.best_hash.difficulty = diff; 155 | epoch_hashes.best_hash.solution = Some(solution); 156 | } 157 | drop(epoch_hashes); 158 | //tracing::info!(target: "submission_log", "{} - Added {} submission diff: {} to epoch_hashes submissions.", submission_uuid, pubkey_str, diff); 159 | } 160 | } 161 | } else { 162 | tracing::error!(target: "server_log", "{} returned an invalid solution!", pubkey); 163 | 164 | let reader = app_state.read().await; 165 | if let Some(app_client_socket) = reader.sockets.get(&addr) { 166 | let _ = app_client_socket.socket.lock().await.send(Message::Text("Invalid solution. If this keeps happening, please contact support.".to_string().into())).await; 167 | } else { 168 | //tracing::error!(target: "server_log", "Failed to get client socket for addr: {}", addr); 169 | continue; 170 | } 171 | drop(reader); 172 | } 173 | } 174 | } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/systems/handle_ready_clients_system.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{HashMap, HashSet}, 3 | net::SocketAddr, 4 | ops::Range, 5 | sync::Arc, 6 | time::Duration, 7 | }; 8 | 9 | use axum::extract::ws::Message; 10 | use futures::SinkExt; 11 | use ore_api::state::Proof; 12 | use solana_sdk::pubkey::Pubkey; 13 | use tokio::sync::{Mutex, RwLock}; 14 | 15 | use crate::{message::ServerMessageStartMining, ore_utils::get_cutoff, AppState, EpochHashes, SubmissionWindow}; 16 | 17 | const NONCE_RANGE_SIZE: u64 = 40_000_000; 18 | 19 | pub async fn handle_ready_clients_system( 20 | app_state: Arc>, 21 | app_proof: Arc>, 22 | app_epoch_hashes: Arc>, 23 | ready_clients: Arc>>, 24 | app_nonce: Arc>, 25 | app_client_nonce_ranges: Arc>>>>, 26 | app_submission_window: Arc>, 27 | ) { 28 | tracing::info!(target: "server_log", "handle ready clients system started!"); 29 | loop { 30 | let reader = app_state.read().await; 31 | let paused = reader.paused.clone(); 32 | drop(reader); 33 | 34 | if !paused { 35 | let mut clients = Vec::new(); 36 | { 37 | let ready_clients_lock = ready_clients.lock().await; 38 | for ready_client in ready_clients_lock.iter() { 39 | clients.push(ready_client.clone()); 40 | } 41 | drop(ready_clients_lock); 42 | }; 43 | 44 | if clients.len() > 0 { 45 | let lock = app_proof.lock().await; 46 | let latest_proof = lock.clone(); 47 | drop(lock); 48 | 49 | let cutoff = get_cutoff(latest_proof, 4); 50 | let mut should_mine = true; 51 | 52 | // only distribute challenge if 10 seconds or more is left 53 | // or if there is no best_hash yet 54 | let cutoff = if cutoff < 10 { 55 | let solution = app_epoch_hashes.read().await.best_hash.solution; 56 | if solution.is_some() { 57 | should_mine = false; 58 | } 59 | 0 60 | } else { 61 | cutoff 62 | }; 63 | 64 | let reader = app_submission_window.read().await; 65 | let is_window_closed = reader.closed; 66 | drop(reader); 67 | 68 | if should_mine && !is_window_closed { 69 | let r_clients_len = clients.len(); 70 | //tracing::info!(target: "server_log", "Handling {} ready clients.", r_clients_len); 71 | let lock = app_proof.lock().await; 72 | let latest_proof = lock.clone(); 73 | drop(lock); 74 | let challenge = latest_proof.challenge; 75 | 76 | // tracing::info!(target: "submission_log", "Giving clients challenge: {}", BASE64_STANDARD.encode(challenge)); 77 | // tracing::info!(target: "submission_log", "With cutoff: {}", cutoff); 78 | let shared_state = app_state.read().await; 79 | let sockets = shared_state.sockets.clone(); 80 | drop(shared_state); 81 | for client in clients { 82 | let app_client_nonce_ranges = app_client_nonce_ranges.clone(); 83 | if let Some(sender) = sockets.get(&client) { 84 | let nonce_range = { 85 | let mut nonce = app_nonce.lock().await; 86 | let start = *nonce; 87 | *nonce += NONCE_RANGE_SIZE; 88 | drop(nonce); 89 | // max hashes possible in 60s for a single client 90 | // 91 | let nonce_end = start + NONCE_RANGE_SIZE - 1; 92 | let end = nonce_end; 93 | start..end 94 | }; 95 | 96 | let start_mining_message = ServerMessageStartMining::new( 97 | challenge, 98 | cutoff, 99 | nonce_range.start, 100 | nonce_range.end, 101 | ); 102 | let sender = sender.clone(); 103 | tokio::spawn(async move { 104 | let _ = sender 105 | .socket 106 | .lock() 107 | .await 108 | .send(Message::Binary(start_mining_message.to_message_binary().into())) 109 | .await; 110 | let reader = app_client_nonce_ranges.read().await; 111 | let current_nonce_ranges = 112 | if let Some(val) = reader.get(&sender.pubkey) { 113 | Some(val.clone()) 114 | } else { 115 | None 116 | }; 117 | drop(reader); 118 | 119 | if let Some(nonce_ranges) = current_nonce_ranges { 120 | let mut new_nonce_ranges = nonce_ranges.to_vec(); 121 | new_nonce_ranges.push(nonce_range); 122 | 123 | app_client_nonce_ranges 124 | .write() 125 | .await 126 | .insert(sender.pubkey, new_nonce_ranges); 127 | } else { 128 | let new_nonce_ranges = vec![nonce_range]; 129 | app_client_nonce_ranges 130 | .write() 131 | .await 132 | .insert(sender.pubkey, new_nonce_ranges); 133 | } 134 | }); 135 | } 136 | // remove ready client from list 137 | let _ = ready_clients.lock().await.remove(&client); 138 | } 139 | //tracing::info!(target: "server_log", "Handled {} ready clients.", r_clients_len); 140 | } 141 | } 142 | } else { 143 | tracing::info!(target: "server_log", "Mining is paused"); 144 | tokio::time::sleep(Duration::from_secs(30)).await; 145 | } 146 | 147 | tokio::time::sleep(Duration::from_millis(400)).await; 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/systems/message_text_all_clients_system.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use axum::extract::ws::Message; 4 | use futures::SinkExt; 5 | use tokio::sync::{mpsc::UnboundedReceiver, RwLock}; 6 | 7 | use crate::{AppState, MessageInternalAllClients}; 8 | 9 | pub async fn message_text_all_clients_system( 10 | app_shared_state: Arc>, 11 | mut all_clients_receiver: UnboundedReceiver 12 | ) { 13 | loop { 14 | while let Some(msg) = all_clients_receiver.recv().await { 15 | { 16 | let shared_state = app_shared_state.read().await; 17 | let socks = shared_state.sockets.clone(); 18 | drop(shared_state); 19 | for (_socket_addr, socket_sender) in socks.iter() { 20 | let text = msg.text.clone(); 21 | let socket = socket_sender.clone(); 22 | tokio::spawn(async move { 23 | if let Ok(_) = 24 | socket.socket.lock().await.send(Message::Text(text.into())).await 25 | { 26 | } else { 27 | tracing::error!(target: "server_log", "Failed to send client text"); 28 | } 29 | }); 30 | } 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/systems/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod claim_system; 2 | pub mod client_message_handler_system; 3 | pub mod handle_ready_clients_system; 4 | pub mod pong_tracking_system; 5 | pub mod pool_submission_system; 6 | pub mod proof_tracking_system; 7 | pub mod pool_mine_success_system; 8 | pub mod message_text_all_clients_system; 9 | pub mod app_metrics_system; 10 | pub mod cache_update_system; 11 | pub mod client_submissions_handler; 12 | -------------------------------------------------------------------------------- /src/systems/pong_tracking_system.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use tokio::sync::RwLock; 4 | 5 | use crate::{AppState, LastPong}; 6 | 7 | pub async fn pong_tracking_system( 8 | app_pongs: Arc>, 9 | app_state: Arc>, 10 | ) { 11 | loop { 12 | let reader = app_pongs.read().await; 13 | let pongs = reader.pongs.clone(); 14 | drop(reader); 15 | 16 | tracing::info!(target: "server_log", "App pongs length: {}", pongs.len()); 17 | 18 | for pong in pongs.iter() { 19 | if pong.1.elapsed().as_secs() > 90 { 20 | //tracing::error!(target: "server_log", "Failed to get pong within 45s from client on socket: {}", pong.0); 21 | let mut writer = app_state.write().await; 22 | writer.sockets.remove(pong.0); 23 | drop(writer); 24 | 25 | let mut writer = app_pongs.write().await; 26 | writer.pongs.remove(pong.0); 27 | drop(writer) 28 | } 29 | } 30 | 31 | tokio::time::sleep(Duration::from_secs(45)).await; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/systems/pool_mine_success_system.rs: -------------------------------------------------------------------------------- 1 | use axum::extract::ws::Message; 2 | use base64::{prelude::BASE64_STANDARD, Engine}; 3 | use futures::SinkExt; 4 | use std::{ 5 | ops::Div, 6 | sync::Arc, 7 | time::Duration, 8 | }; 9 | 10 | use solana_sdk:: 11 | signer::Signer 12 | ; 13 | use tokio::{ 14 | sync::{mpsc::UnboundedReceiver, RwLock}, time::Instant} 15 | ; 16 | use tracing::info; 17 | 18 | use crate::{ 19 | app_database::AppDatabase, message::ServerMessagePoolSubmissionResult, ore_utils:: 20 | ORE_TOKEN_DECIMALS, AppState, ClientVersion, Config, InsertSubmission, MessageInternalMineSuccess, UpdateReward, UpdateStakeAccountRewards, WalletExtension 21 | }; 22 | 23 | pub const ORE_STAKE_PERCENTAGE: u64 = 20; 24 | pub const ORE_SOL_STAKE_PERCENTAGE: u64 = 10; 25 | pub const ORE_ISC_STAKE_PERCENTAGE: u64 = 14; 26 | pub const TOTAL_STAKER_PERCENTAGE: u64 = ORE_STAKE_PERCENTAGE + ORE_SOL_STAKE_PERCENTAGE + ORE_ISC_STAKE_PERCENTAGE; 27 | 28 | pub async fn pool_mine_success_system( 29 | app_shared_state: Arc>, 30 | app_database: Arc, 31 | app_config: Arc, 32 | app_wallet: Arc, 33 | mut mine_success_receiver: UnboundedReceiver 34 | ) { 35 | loop { 36 | while let Some(msg) = mine_success_receiver.recv().await { 37 | let id = uuid::Uuid::new_v4(); 38 | let c = BASE64_STANDARD.encode(msg.challenge); 39 | info!(target: "server_log", "{} - Processing internal mine success for challenge: {}", id, c); 40 | { 41 | let instant = Instant::now(); 42 | info!(target: "server_log", "{} - Getting sockets.", id); 43 | let shared_state = app_shared_state.read().await; 44 | let len = shared_state.sockets.len(); 45 | let socks = shared_state.sockets.clone(); 46 | drop(shared_state); 47 | info!(target: "server_log", "{} - Got sockets in {}.", id, instant.elapsed().as_millis()); 48 | 49 | let mut i_rewards = Vec::new(); 50 | let mut i_submissions = Vec::new(); 51 | 52 | let instant = Instant::now(); 53 | info!(target: "server_log", "{} - Processing submission results for challenge: {}.", id, c); 54 | let staker_rewards = if !msg.global_boosts_active { 55 | (msg.rewards as u128).saturating_mul(TOTAL_STAKER_PERCENTAGE as u128).saturating_div(100) as u64 56 | } else { 57 | info!(target: "server_log", "{} - Global Boosts Active, Staking rewards are 0", id); 58 | 0 59 | }; 60 | 61 | let total_rewards = msg.rewards - msg.commissions - staker_rewards; 62 | info!(target: "server_log", "{} - Miners Rewards: {}", id, total_rewards); 63 | info!(target: "server_log", "{} - Commission: {}", id, msg.commissions); 64 | info!(target: "server_log", "{} - Staker Rewards: {}", id, staker_rewards); 65 | let mut total_miners_earned_rewards = 0; 66 | for (miner_pubkey, msg_submission) in msg.submissions.iter() { 67 | let decimals = 10f64.powf(ORE_TOKEN_DECIMALS as f64); 68 | let earned_rewards = (total_rewards as u128).saturating_mul(msg_submission.hashpower as u128).saturating_div(msg.total_hashpower as u128) as u64; 69 | total_miners_earned_rewards += earned_rewards; 70 | 71 | let new_submission = InsertSubmission { 72 | miner_id: msg_submission.miner_id, 73 | challenge_id: msg.challenge_id, 74 | nonce: msg_submission.supplied_nonce, 75 | difficulty: msg_submission.supplied_diff as i8, 76 | }; 77 | 78 | let new_reward = UpdateReward { 79 | miner_id: msg_submission.miner_id, 80 | balance: earned_rewards, 81 | }; 82 | 83 | i_rewards.push(new_reward); 84 | i_submissions.push(new_submission); 85 | //let _ = app_database.add_new_earning(new_earning).await.unwrap(); 86 | 87 | let earned_rewards_dec = (earned_rewards as f64).div(decimals); 88 | let pool_rewards_dec = (msg.rewards as f64).div(decimals); 89 | 90 | let percentage = if pool_rewards_dec != 0.0 { 91 | (earned_rewards_dec / pool_rewards_dec) * 100.0 92 | } else { 93 | 0.0 // Handle the case where pool_rewards_dec is 0 to avoid division by zero 94 | }; 95 | 96 | let top_stake = 1.0f64; 97 | 98 | for (_addr, client_connection) in socks.iter() { 99 | if client_connection.pubkey.eq(&miner_pubkey) { 100 | let socket_sender = client_connection.socket.clone(); 101 | 102 | match client_connection.client_version { 103 | ClientVersion::V1 => { 104 | let message = format!( 105 | "Pool Submitted Difficulty: {}\nPool Earned: {:.11} ORE\nPool Balance: {:.11} ORE\nTop Stake: {:.11} ORE\nPool Multiplier: {:.2}x\n----------------------\nActive Miners: {}\n----------------------\nMiner Submitted Difficulty: {}\nMiner Earned: {:.11} ORE\n{:.2}% of total pool reward", 106 | msg.difficulty, 107 | pool_rewards_dec, 108 | msg.total_balance, 109 | top_stake, 110 | msg.multiplier, 111 | len, 112 | msg_submission.supplied_diff, 113 | earned_rewards_dec, 114 | percentage 115 | ); 116 | tokio::spawn(async move { 117 | if let Ok(_) = socket_sender 118 | .lock() 119 | .await 120 | .send(Message::Text(message.into())) 121 | .await 122 | { 123 | } else { 124 | tracing::error!(target: "server_log", "Failed to send client text"); 125 | } 126 | }); 127 | } 128 | ClientVersion::V2 => { 129 | let server_message = ServerMessagePoolSubmissionResult::new( 130 | msg.difficulty, 131 | msg.total_balance, 132 | pool_rewards_dec, 133 | top_stake, 134 | msg.multiplier, 135 | len as u32, 136 | msg.challenge, 137 | msg.best_nonce, 138 | msg_submission.supplied_diff as u32, 139 | earned_rewards_dec, 140 | percentage, 141 | ); 142 | tokio::spawn(async move { 143 | if let Ok(_) = socket_sender 144 | .lock() 145 | .await 146 | .send(Message::Binary( 147 | server_message.to_message_binary().into(), 148 | )) 149 | .await 150 | { 151 | } else { 152 | tracing::error!(target: "server_log", "Failed to send client pool submission result binary message"); 153 | } 154 | }); 155 | } 156 | } 157 | } 158 | } 159 | } 160 | 161 | info!(target: "server_log", "{} - Finished processing submission results in {}ms for challenge: {}.", id, instant.elapsed().as_millis(), c); 162 | 163 | let batch_size = 1000; 164 | let instant = Instant::now(); 165 | info!(target: "server_log", "{} - Adding submissions", id); 166 | if i_submissions.len() > 0 { 167 | for batch in i_submissions.chunks(batch_size) { 168 | info!(target: "server_log", "{} - Submissions batch size: {}", id, batch.len()); 169 | while let Err(_) = 170 | app_database.add_new_submissions_batch(batch.to_vec()).await 171 | { 172 | tracing::error!(target: "server_log", "{} - Failed to add new submissions batch. Retrying...", id); 173 | tokio::time::sleep(Duration::from_millis(500)).await; 174 | } 175 | tokio::time::sleep(Duration::from_millis(200)).await; 176 | } 177 | 178 | info!(target: "server_log", "{} - Successfully added submissions batch", id); 179 | } 180 | info!(target: "server_log", "{} - Added {} submissions in {}ms", id, i_submissions.len(), instant.elapsed().as_millis()); 181 | 182 | tokio::time::sleep(Duration::from_millis(500)).await; 183 | 184 | let batch_size = 400; 185 | let instant = Instant::now(); 186 | info!(target: "server_log", "{} - Updating rewards", id); 187 | if i_rewards.len() > 0 { 188 | let mut batch_num = 1; 189 | for batch in i_rewards.chunks(batch_size) { 190 | let instant = Instant::now(); 191 | info!(target: "server_log", "{} - Updating reward batch {}", id, batch_num); 192 | while let Err(_) = app_database.update_rewards(batch.to_vec()).await { 193 | tracing::error!(target: "server_log", "{} - Failed to update rewards in db. Retrying...", id); 194 | tokio::time::sleep(Duration::from_millis(500)).await; 195 | } 196 | info!(target: "server_log", "{} - Updated reward batch {} in {}ms", id, batch_num, instant.elapsed().as_millis()); 197 | batch_num += 1; 198 | tokio::time::sleep(Duration::from_millis(200)).await; 199 | } 200 | info!(target: "server_log", "{} - Successfully updated rewards", id); 201 | } 202 | info!(target: "server_log", "{} - Updated rewards in {}ms", id, instant.elapsed().as_millis()); 203 | 204 | tokio::time::sleep(Duration::from_millis(500)).await; 205 | 206 | let instant = Instant::now(); 207 | info!(target: "server_log", "{} - Updating pool rewards", id); 208 | while let Err(_) = app_database 209 | .update_pool_rewards( 210 | app_wallet.miner_wallet.pubkey().to_string(), 211 | msg.rewards, 212 | ) 213 | .await 214 | { 215 | tracing::error!(target: "server_log", 216 | "{} - Failed to update pool rewards! Retrying...", id 217 | ); 218 | tokio::time::sleep(Duration::from_millis(1000)).await; 219 | } 220 | info!(target: "server_log", "{} - Updated pool rewards in {}ms", id, instant.elapsed().as_millis()); 221 | 222 | tokio::time::sleep(Duration::from_millis(200)).await; 223 | 224 | let instant = Instant::now(); 225 | info!(target: "server_log", "{} - Updating challenge rewards", id); 226 | if let Ok(s) = app_database 227 | .get_submission_id_with_nonce(msg.best_nonce) 228 | .await 229 | { 230 | if let Err(_) = app_database 231 | .update_challenge_rewards(msg.challenge.to_vec(), s, msg.rewards) 232 | .await 233 | { 234 | tracing::error!(target: "server_log", "{} - Failed to update challenge rewards! Skipping! Devs check!", id); 235 | let err_str = format!("{} - Challenge UPDATE FAILED - Challenge: {:?}\nSubmission ID: {}\nRewards: {}\n", id, msg.challenge.to_vec(), s, msg.rewards); 236 | tracing::error!(target: "server_log", err_str); 237 | } 238 | info!(target: "server_log", "{} - Updated challenge rewards in {}ms", id, instant.elapsed().as_millis()); 239 | } else { 240 | tracing::error!(target: "server_log", "{} - Failed to get submission id with nonce: {} for challenge_id: {}", id, msg.best_nonce, msg.challenge_id); 241 | tracing::error!(target: "server_log", "{} - Failed update challenge rewards!", id); 242 | let mut found_best_nonce = false; 243 | for submission in i_submissions { 244 | if submission.nonce == msg.best_nonce { 245 | found_best_nonce = true; 246 | break; 247 | } 248 | } 249 | 250 | if found_best_nonce { 251 | info!(target: "server_log", "{} - Found best nonce in i_submissions", id); 252 | } else { 253 | info!(target: "server_log", "{} - Failed to find best nonce in i_submissions", id); 254 | } 255 | } 256 | 257 | if msg.global_boosts_active { 258 | info!(target: "server_log", "{} - Global Boosts Active, skipping processing of staker rewards.", id); 259 | info!(target: "server_log", "{} - Skipping unclaimed rewards bonuses.", id); 260 | } else { 261 | info!(target: "server_log", "{} - Processing stakers rewards", id); 262 | process_stakers_rewards(msg.rewards, staker_rewards, &app_database, &app_config).await; 263 | info!(target: "server_log", "{} - Total Distributed For Miners: {}", id, total_miners_earned_rewards); 264 | } 265 | 266 | info!(target: "server_log", "{} - Finished processing internal mine success for challenge: {}", id, c); 267 | } 268 | } 269 | } 270 | } 271 | 272 | pub async fn process_stakers_rewards(total_rewards: u64, staker_rewards: u64, app_database: &Arc, app_config: &Arc) { 273 | let ore_rewards = (total_rewards as u128).saturating_mul(ORE_STAKE_PERCENTAGE as u128).saturating_div(100) as u64; 274 | let ore_sol_rewards = (total_rewards as u128).saturating_mul(ORE_SOL_STAKE_PERCENTAGE as u128).saturating_div(100) as u64; 275 | let ore_isc_rewards = (total_rewards as u128).saturating_mul(ORE_ISC_STAKE_PERCENTAGE as u128).saturating_div(100) as u64; 276 | 277 | info!(target: "server_log", "Total Rewards: {}", total_rewards); 278 | info!(target: "server_log", "ore Rewards ({}%): {}", ORE_STAKE_PERCENTAGE, ore_rewards); 279 | info!(target: "server_log", "ore-sol Rewards ({}%): {}", ORE_SOL_STAKE_PERCENTAGE, ore_sol_rewards); 280 | info!(target: "server_log", "ore-isc Rewards ({}%): {}", ORE_ISC_STAKE_PERCENTAGE, ore_isc_rewards); 281 | 282 | if ore_rewards + ore_sol_rewards + ore_isc_rewards > staker_rewards { 283 | tracing::error!(target: "server_log", "Calculations exceeded max staker rewards of 40%!!!"); 284 | return; 285 | } 286 | 287 | // get all the stake accounts for ore mint 288 | let mut ore_stake_accounts = vec![]; 289 | let mut total_ore_boosted = 0; 290 | let mut last_id: i32 = 0; 291 | loop { 292 | tokio::time::sleep(Duration::from_millis(100)).await; 293 | match app_database.get_staker_accounts_for_mint(app_config.pool_id, "oreoU2P8bN6jkk3jbaiVxYnG1dCXcYxwhwyK9jSybcp".to_string(), last_id, 1).await { 294 | Ok(d) => { 295 | if d.len() > 0 { 296 | for ac in d.iter() { 297 | last_id = ac.id; 298 | total_ore_boosted += ac.staked_balance; 299 | ore_stake_accounts.push(ac.clone()); 300 | } 301 | } 302 | 303 | if d.len() < 500 { 304 | break; 305 | } 306 | }, 307 | Err(e) => { 308 | tracing::error!(target: "server_log", "Failed to get staker accounts for ore"); 309 | tracing::error!(target: "server_log", "Error: {:?}", e); 310 | } 311 | }; 312 | } 313 | 314 | tracing::info!(target: "server_log", "Found {} ore stake accounts.", ore_stake_accounts.len()); 315 | tracing::info!(target: "server_log", "Total {} ore boosted.", total_ore_boosted as f64 / 10f64.powf(ORE_TOKEN_DECIMALS as f64)); 316 | 317 | // get all the stake accounts for ore-sol mint 318 | let mut ore_sol_stake_accounts = vec![]; 319 | let mut total_ore_sol_boosted = 0; 320 | let mut last_id: i32 = 0; 321 | loop { 322 | tokio::time::sleep(Duration::from_millis(100)).await; 323 | match app_database.get_staker_accounts_for_mint(app_config.pool_id, "DrSS5RM7zUd9qjUEdDaf31vnDUSbCrMto6mjqTrHFifN".to_string(), last_id, 1).await { 324 | Ok(d) => { 325 | if d.len() > 0 { 326 | for ac in d.iter() { 327 | last_id = ac.id; 328 | total_ore_sol_boosted += ac.staked_balance; 329 | ore_sol_stake_accounts.push(ac.clone()); 330 | } 331 | } 332 | 333 | if d.len() < 500 { 334 | break; 335 | } 336 | }, 337 | Err(e) => { 338 | tracing::error!(target: "server_log", "Failed to get staker accounts for ore-sol"); 339 | tracing::error!(target: "server_log", "Error: {:?}", e); 340 | } 341 | }; 342 | } 343 | 344 | tracing::info!(target: "server_log", "Found {} ore-sol stake accounts.", ore_sol_stake_accounts.len()); 345 | tracing::info!(target: "server_log", "Total {} ore-sol boosted.", total_ore_sol_boosted as f64 / 10f64.powf(ORE_TOKEN_DECIMALS as f64)); 346 | 347 | // get all the stake accounts for ore-isc mint 348 | let mut ore_isc_stake_accounts = vec![]; 349 | let mut total_ore_isc_boosted = 0; 350 | let mut last_id: i32 = 0; 351 | loop { 352 | tokio::time::sleep(Duration::from_millis(100)).await; 353 | match app_database.get_staker_accounts_for_mint(app_config.pool_id, "meUwDp23AaxhiNKaQCyJ2EAF2T4oe1gSkEkGXSRVdZb".to_string(), last_id, 1).await { 354 | Ok(d) => { 355 | if d.len() > 0 { 356 | for ac in d.iter() { 357 | last_id = ac.id; 358 | total_ore_isc_boosted += ac.staked_balance; 359 | ore_isc_stake_accounts.push(ac.clone()); 360 | } 361 | } 362 | 363 | if d.len() < 500 { 364 | break; 365 | } 366 | }, 367 | Err(e) => { 368 | tracing::error!(target: "server_log", "Failed to get staker accounts for ore-isc"); 369 | tracing::error!(target: "server_log", "Error: {:?}", e); 370 | } 371 | }; 372 | } 373 | 374 | tracing::info!(target: "server_log", "Found {} ore-isc stake accounts.", ore_isc_stake_accounts.len()); 375 | tracing::info!(target: "server_log", "Total {} ore-isc boosted.", total_ore_isc_boosted as f64 / 10f64.powf(ORE_TOKEN_DECIMALS as f64)); 376 | 377 | let mut update_stake_rewards = vec![]; 378 | let mut total_distributed_for_ore = 0; 379 | if total_ore_boosted > 0 { 380 | for ore_stake_account in ore_stake_accounts.iter() { 381 | let rewards_balance = (ore_rewards as u128 * ore_stake_account.staked_balance as u128 / total_ore_boosted as u128) as u64; 382 | let stake_rewards = UpdateStakeAccountRewards { 383 | stake_pda: ore_stake_account.stake_pda.clone(), 384 | rewards_balance, 385 | }; 386 | total_distributed_for_ore += rewards_balance; 387 | update_stake_rewards.push(stake_rewards); 388 | } 389 | } 390 | 391 | let mut total_distributed_for_ore_sol = 0; 392 | if total_ore_sol_boosted > 0 { 393 | for ore_sol_stake_account in ore_sol_stake_accounts.iter() { 394 | let rewards_balance = (ore_sol_rewards as u128 * ore_sol_stake_account.staked_balance as u128 / total_ore_sol_boosted as u128) as u64; 395 | let stake_rewards = UpdateStakeAccountRewards { 396 | stake_pda: ore_sol_stake_account.stake_pda.clone(), 397 | rewards_balance, 398 | }; 399 | total_distributed_for_ore_sol += rewards_balance; 400 | update_stake_rewards.push(stake_rewards); 401 | } 402 | } 403 | 404 | let mut total_distributed_for_ore_isc = 0; 405 | if total_ore_isc_boosted > 0 { 406 | for ore_isc_stake_account in ore_isc_stake_accounts.iter() { 407 | let rewards_balance = (ore_isc_rewards as u128 * ore_isc_stake_account.staked_balance as u128 / total_ore_isc_boosted as u128) as u64; 408 | let stake_rewards = UpdateStakeAccountRewards { 409 | stake_pda: ore_isc_stake_account.stake_pda.clone(), 410 | rewards_balance, 411 | }; 412 | total_distributed_for_ore_isc += rewards_balance; 413 | update_stake_rewards.push(stake_rewards); 414 | } 415 | } 416 | 417 | 418 | 419 | let instant = Instant::now(); 420 | info!(target: "server_log", "Total distributed to stakers: {}", total_distributed_for_ore + total_distributed_for_ore_sol + total_distributed_for_ore_isc); 421 | info!(target: "server_log", "Total distributed for ore: {}", total_distributed_for_ore); 422 | info!(target: "server_log", "Total distributed for ore_sol: {}", total_distributed_for_ore_sol); 423 | info!(target: "server_log", "Total distributed for ore_isc: {}", total_distributed_for_ore_isc); 424 | 425 | let batch_size = 400; 426 | info!(target: "server_log", "Updating staking rewards"); 427 | if update_stake_rewards.len() > 0 { 428 | let mut batch_num = 1; 429 | for batch in update_stake_rewards.chunks(batch_size) { 430 | let instant = Instant::now(); 431 | info!(target: "server_log", "Updating stake reward batch {}", batch_num); 432 | while let Err(_) = app_database.update_stake_accounts_rewards(batch.to_vec()).await { 433 | tracing::error!(target: "server_log", "Failed to update rewards in db. Retrying..."); 434 | tokio::time::sleep(Duration::from_millis(500)).await; 435 | } 436 | info!(target: "server_log", "Updated reward batch {} in {}ms", batch_num, instant.elapsed().as_millis()); 437 | batch_num += 1; 438 | tokio::time::sleep(Duration::from_millis(200)).await; 439 | } 440 | info!(target: "server_log", "Successfully updated rewards"); 441 | } 442 | info!(target: "server_log", "Updated rewards in {}ms", instant.elapsed().as_millis()); 443 | } 444 | 445 | 446 | pub async fn process_unclaimed_bonus_rewards(total_rewards: u64, bonus_rewards: u64, app_database: &Arc, app_config: &Arc) { 447 | 448 | // get all the rewards accounts for miners 449 | let mut miner_rewards_accounts = vec![]; 450 | let mut total_miner_rewards_unclaimed = 0; 451 | let mut last_id: i32 = 0; 452 | loop { 453 | tokio::time::sleep(Duration::from_millis(100)).await; 454 | match app_database.get_miner_reward_accounts(last_id).await { 455 | Ok(d) => { 456 | if d.len() > 0 { 457 | for ac in d.iter() { 458 | last_id = ac.id; 459 | total_miner_rewards_unclaimed += ac.balance; 460 | miner_rewards_accounts.push(ac.clone()); 461 | } 462 | } 463 | 464 | if d.len() < 500 { 465 | break; 466 | } 467 | }, 468 | Err(e) => { 469 | tracing::error!(target: "server_log", "Failed to get miner reward accounts."); 470 | tracing::error!(target: "server_log", "Error: {:?}", e); 471 | } 472 | }; 473 | } 474 | // get all the rewards accounts from legacy pool staking 475 | let mut staker_rewards_accounts = vec![]; 476 | let mut total_staker_rewards_unclaimed = 0; 477 | let mut last_id: i32 = 0; 478 | loop { 479 | tokio::time::sleep(Duration::from_millis(100)).await; 480 | match app_database.get_stake_accounts(app_config.pool_id, last_id).await { 481 | Ok(d) => { 482 | if d.len() > 0 { 483 | for ac in d.iter() { 484 | last_id = ac.id; 485 | total_staker_rewards_unclaimed += ac.rewards_balance; 486 | staker_rewards_accounts.push(ac.clone()); 487 | } 488 | } 489 | 490 | if d.len() < 500 { 491 | break; 492 | } 493 | }, 494 | Err(e) => { 495 | tracing::error!(target: "server_log", "Failed to get staker reward accounts."); 496 | tracing::error!(target: "server_log", "Error: {:?}", e); 497 | } 498 | }; 499 | } 500 | 501 | // calculated total unclaimed amount 502 | let total_unclaimed_amount = total_miner_rewards_unclaimed + total_staker_rewards_unclaimed; 503 | 504 | // distribute bonus_rewards based on % of unclaimed amount 505 | let mut update_stake_rewards = vec![]; 506 | let mut total_distributed_for_stakers = 0; 507 | if total_staker_rewards_unclaimed > 0 { 508 | for stake_account in staker_rewards_accounts.iter() { 509 | let rewards_balance = (bonus_rewards as u128 * stake_account.rewards_balance as u128 / total_unclaimed_amount as u128) as u64; 510 | let stake_rewards = UpdateStakeAccountRewards { 511 | stake_pda: stake_account.stake_pda.clone(), 512 | rewards_balance, 513 | }; 514 | total_distributed_for_stakers += rewards_balance; 515 | update_stake_rewards.push(stake_rewards); 516 | } 517 | } 518 | info!(target: "server_log", "Total calculated distribution amount to stakers: {}", total_distributed_for_stakers); 519 | let mut total_distributed_for_miners = 0; 520 | let mut i_rewards = Vec::new(); 521 | if total_miner_rewards_unclaimed > 0 { 522 | for rewards_account in miner_rewards_accounts.iter() { 523 | let rewards_balance = (bonus_rewards as u128 * rewards_account.balance as u128 / total_unclaimed_amount as u128) as u64; 524 | let new_reward = UpdateReward { 525 | miner_id: rewards_account.miner_id, 526 | balance: rewards_balance, 527 | }; 528 | 529 | total_distributed_for_miners += rewards_balance; 530 | i_rewards.push(new_reward); 531 | } 532 | } 533 | info!(target: "server_log", "Total calculated distribution amount to miners: {}", total_distributed_for_miners); 534 | 535 | info!(target: "server_log", "Total calculated distribution amount for all: {}", total_distributed_for_stakers + total_distributed_for_miners); 536 | 537 | let instant = Instant::now(); 538 | let batch_size = 400; 539 | info!(target: "server_log", "Updating bonus staking rewards"); 540 | if update_stake_rewards.len() > 0 { 541 | let mut batch_num = 1; 542 | for batch in update_stake_rewards.chunks(batch_size) { 543 | let instant = Instant::now(); 544 | info!(target: "server_log", "Updating stake reward batch {}", batch_num); 545 | while let Err(_) = app_database.update_stake_accounts_rewards(batch.to_vec()).await { 546 | tracing::error!(target: "server_log", "Failed to update rewards in db. Retrying..."); 547 | tokio::time::sleep(Duration::from_millis(500)).await; 548 | } 549 | info!(target: "server_log", "Updated reward batch {} in {}ms", batch_num, instant.elapsed().as_millis()); 550 | batch_num += 1; 551 | tokio::time::sleep(Duration::from_millis(200)).await; 552 | } 553 | info!(target: "server_log", "Successfully updated rewards"); 554 | } 555 | info!(target: "server_log", "Updated rewards in {}ms", instant.elapsed().as_millis()); 556 | } 557 | 558 | 559 | 560 | 561 | 562 | 563 | -------------------------------------------------------------------------------- /src/systems/proof_tracking_system.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use base64::{prelude::BASE64_STANDARD, Engine}; 4 | use futures::StreamExt; 5 | use ore_api::state::Proof; 6 | use steel::AccountDeserialize; 7 | use solana_account_decoder::UiAccountEncoding; 8 | use solana_client::{nonblocking::pubsub_client::PubsubClient, rpc_config::RpcAccountInfoConfig}; 9 | use solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair, signer::Signer}; 10 | use tokio::sync::Mutex; 11 | 12 | use crate::ore_utils::get_proof_pda; 13 | 14 | pub async fn proof_tracking_system( 15 | ws_url: String, 16 | wallet: Arc, 17 | proof: Arc>, 18 | app_last_challenge: Arc>, 19 | ) { 20 | loop { 21 | tracing::info!(target: "server_log", "Establishing rpc websocket connection..."); 22 | let mut ps_client = PubsubClient::new(&ws_url).await; 23 | let mut attempts = 0; 24 | 25 | while ps_client.is_err() && attempts < 3 { 26 | tracing::error!(target: "server_log", "Failed to connect to websocket, retrying..."); 27 | ps_client = PubsubClient::new(&ws_url).await; 28 | tokio::time::sleep(Duration::from_millis(1000)).await; 29 | attempts += 1; 30 | } 31 | tracing::info!(target: "server_log", "RPC WS connection established!"); 32 | 33 | let app_wallet = wallet.clone(); 34 | if let Ok(ps_client) = ps_client { 35 | let ps_client = Arc::new(ps_client); 36 | let account_pubkey = get_proof_pda(app_wallet.pubkey()); 37 | let pubsub = ps_client 38 | .account_subscribe( 39 | &account_pubkey, 40 | Some(RpcAccountInfoConfig { 41 | encoding: Some(UiAccountEncoding::Base64), 42 | data_slice: None, 43 | commitment: Some(CommitmentConfig::confirmed()), 44 | min_context_slot: None, 45 | }), 46 | ) 47 | .await; 48 | 49 | tracing::info!(target: "server_log", "Tracking pool proof updates with websocket"); 50 | if let Ok((mut account_sub_notifications, _account_unsub)) = pubsub { 51 | while let Some(response) = account_sub_notifications.next().await { 52 | let data = response.value.data.decode(); 53 | if let Some(data_bytes) = data { 54 | // if let Ok(bus) = Bus::try_from_bytes(&data_bytes) { 55 | // let _ = sender.send(AccountUpdatesData::BusData(*bus)); 56 | // } 57 | // if let Ok(ore_config) = ore_api::state::Config::try_from_bytes(&data_bytes) { 58 | // let _ = sender.send(AccountUpdatesData::TreasuryConfigData(*ore_config)); 59 | // } 60 | if let Ok(new_proof) = Proof::try_from_bytes(&data_bytes) { 61 | tracing::info!(target: "server_log", "Got new proof data"); 62 | tracing::info!(target: "server_log", "Challenge: {}", BASE64_STANDARD.encode(new_proof.challenge)); 63 | 64 | let lock = app_last_challenge.lock().await; 65 | let last_challenge = lock.clone(); 66 | drop(lock); 67 | 68 | 69 | if last_challenge.eq(&new_proof.challenge) { 70 | tracing::error!(target: "server_log", "Websocket tried to update proof with old challenge!"); 71 | } else { 72 | let mut app_proof = proof.lock().await; 73 | *app_proof = *new_proof; 74 | drop(app_proof); 75 | } 76 | } 77 | } 78 | } 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /whitelist.txt.example: -------------------------------------------------------------------------------- 1 | EABRhJzpLiavJu3PnbVxWo7SEt7q5KDn7b637evZQ6js 2 | wVPNpqKMcjwvzRHXYMiJ3vmsHY39dSdgputWEYraero 3 | --------------------------------------------------------------------------------