├── .cargo └── config.toml ├── .github └── workflows │ ├── build.yml │ └── checks.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md └── src ├── api ├── mod.rs ├── routes.rs ├── stats.rs └── utils.rs ├── config.rs ├── ingress ├── mod.rs └── sv1_ingress.rs ├── jd_client ├── error.rs ├── job_declarator │ ├── message_handler.rs │ ├── mod.rs │ ├── setup_connection.rs │ └── task_manager.rs ├── mining_downstream │ ├── mod.rs │ └── task_manager.rs ├── mining_upstream │ ├── mod.rs │ ├── task_manager.rs │ └── upstream.rs ├── mod.rs ├── task_manager.rs └── template_receiver │ ├── message_handler.rs │ ├── mod.rs │ ├── setup_connection.rs │ └── task_manager.rs ├── main.rs ├── minin_pool_connection ├── errors.rs ├── mod.rs └── task_manager.rs ├── proxy_state.rs ├── router └── mod.rs ├── share_accounter ├── errors.rs ├── mod.rs └── task_manager.rs ├── shared ├── error.rs ├── mod.rs └── utils.rs └── translator ├── downstream ├── accept_connection.rs ├── diff_management.rs ├── downstream.rs ├── mod.rs ├── notify.rs ├── receive_from_downstream.rs ├── send_to_downstream.rs └── task_manager.rs ├── error.rs ├── mod.rs ├── proxy ├── bridge.rs ├── mod.rs ├── next_mining_notify.rs └── task_manager.rs ├── task_manager.rs ├── upstream ├── diff_management.rs ├── mod.rs ├── task_manager.rs └── upstream.rs └── utils.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = ["-Wunused-crate-dependencies"] 3 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build demand-cli binaries for Linux and macOS 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | workflow_dispatch: 11 | 12 | jobs: 13 | build-linux: 14 | name: Build Linux Binary 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@v3 19 | 20 | - name: Set up Rust toolchain 21 | uses: dtolnay/rust-toolchain@stable 22 | 23 | - name: Build Linux Binary 24 | run: cargo build --release 25 | 26 | - name: Rename Linux Binary 27 | run: mv target/release/demand-cli demand-cli-linux 28 | 29 | - name: Upload Linux Artifact 30 | uses: actions/upload-artifact@v4 31 | with: 32 | name: linux-artifact 33 | path: demand-cli-linux 34 | 35 | build-macos: 36 | name: Build macOS Binary 37 | runs-on: macos-latest 38 | steps: 39 | - name: Checkout code 40 | uses: actions/checkout@v3 41 | 42 | - name: Set up Rust toolchain 43 | uses: dtolnay/rust-toolchain@stable 44 | 45 | - name: Build macOS Binary 46 | run: cargo build --release 47 | 48 | - name: Rename macOS Binary 49 | run: mv target/release/demand-cli demand-cli-macos 50 | 51 | - name: Upload macOS Artifact 52 | uses: actions/upload-artifact@v4 53 | with: 54 | name: macos-artifact 55 | path: demand-cli-macos 56 | 57 | build-windows: 58 | name: Build Windows Binary 59 | runs-on: windows-latest 60 | steps: 61 | - name: Checkout code 62 | uses: actions/checkout@v3 63 | 64 | - name: Set up Rust toolchain 65 | uses: dtolnay/rust-toolchain@stable 66 | 67 | - name: Build Windows Binary 68 | run: cargo build --release 69 | 70 | - name: Rename Windows Binary 71 | run: move target\release\demand-cli.exe demand-cli-windows.exe 72 | 73 | - name: Upload Windows Artifact 74 | uses: actions/upload-artifact@v4 75 | with: 76 | name: windows-artifact 77 | path: demand-cli-windows.exe 78 | 79 | create-release: 80 | name: Create GitHub Release and Upload Binaries 81 | needs: [build-linux, build-macos, build-windows] 82 | runs-on: ubuntu-latest 83 | # Only run this job for push events to master or manual workflow dispatch 84 | if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' 85 | steps: 86 | - name: Checkout code 87 | uses: actions/checkout@v3 88 | 89 | - name: Set up Rust toolchain 90 | uses: dtolnay/rust-toolchain@stable 91 | 92 | - name: Install cargo-release 93 | run: cargo install cargo-release 94 | 95 | - name: Create a new tag based on Cargo.toml version 96 | run: | 97 | VERSION=$(cargo metadata --format-version 1 --no-deps | jq -r .packages[0].version) 98 | 99 | # Check if the tag already exists remotely 100 | if git ls-remote --tags origin | grep -q "refs/tags/v$VERSION"; then 101 | echo "Tag v$VERSION already exists, skipping tag creation." 102 | else 103 | git tag "v$VERSION" 104 | git push origin "v$VERSION" 105 | fi 106 | 107 | echo "TAG_NAME=v$VERSION" >> $GITHUB_ENV 108 | 109 | 110 | - name: Download Linux Artifact 111 | uses: actions/download-artifact@v4 112 | with: 113 | name: linux-artifact 114 | path: . 115 | 116 | - name: Download macOS Artifact 117 | uses: actions/download-artifact@v4 118 | with: 119 | name: macos-artifact 120 | path: . 121 | 122 | - name: Download Windows Artifact 123 | uses: actions/download-artifact@v4 124 | with: 125 | name: windows-artifact 126 | path: . 127 | 128 | - name: Create GitHub Release and Upload Binaries 129 | uses: softprops/action-gh-release@v1 130 | with: 131 | tag_name: ${{ env.TAG_NAME }} 132 | files: | 133 | demand-cli-linux 134 | demand-cli-macos 135 | demand-cli-windows.exe 136 | env: 137 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 138 | -------------------------------------------------------------------------------- /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | name: Rust CI Checks 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | workflow_dispatch: 11 | 12 | jobs: 13 | check: 14 | name: Run Checks (fmt, clippy, test) 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@v3 19 | 20 | - name: Set up Rust toolchain 21 | uses: dtolnay/rust-toolchain@stable 22 | with: 23 | components: rustfmt, clippy 24 | 25 | - name: Run cargo fmt 26 | run: cargo fmt --all -- --check 27 | 28 | - name: Run cargo clippy 29 | run: cargo clippy --all-targets --all-features -- -D warnings 30 | 31 | - name: Run cargo test 32 | run: cargo test --all-features 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "demand-cli" 3 | version = "0.1.7" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | dashmap = {version = "6.1.0", features = ["inline"]} 8 | bitcoin = {version = "0.32.5", features = ["serde","rand"]} 9 | serde_json = { version = "1.0.64", default-features = false, features = ["alloc"] } 10 | tokio-util = { version = "*", features = ["codec"] } 11 | nohash-hasher = "*" 12 | futures = "*" 13 | async-recursion = "1.0.0" 14 | lazy_static = "1.4.0" 15 | rand = "0.8.4" 16 | tracing = { version = "0.1" } 17 | tracing-subscriber = { version = "*", features = ["env-filter"]} 18 | tokio = {version="^1.36.0",features = ["full","tracing", "macros","rt-multi-thread"]} 19 | key-utils = "1.0.0" 20 | pid = { version = "4.0.0"} 21 | clap = {version = "4.5.31", features = ["derive"]} 22 | axum = {version = "0.8.1"} 23 | serde = { version = "1.0.219", features = ["derive"] } 24 | sysinfo = {version = "0.33.1"} 25 | primitive-types = { version = "0.13.1" } 26 | toml ={ version = "0.8.22" } 27 | #roles_logic_sv2 = "1.2.1" 28 | #sv1_api = "1.0.1" 29 | #demand-sv2-connection = "0.0.3" 30 | #framing_sv2 = "^2.0.0" 31 | #binary_sv2 = "1.1.0" 32 | #demand-share-accounting-ext = "0.0.10" 33 | 34 | #noise_sv2 = "1.1.0" 35 | #codec_sv2 = { version = "1.2.1", features = ["noise_sv2","with_buffer_pool"]} 36 | 37 | #demand-share-accounting-ext = {path = "../demand-share-accounting-ext"} 38 | #demand-sv2-connection = { path = "../demand-sv2-connection"} 39 | #roles_logic_sv2 = { path = "../stratum/protocols/v2/roles-logic-sv2"} 40 | #framing_sv2 = { path = "../stratum/protocols/v2/framing-sv2"} 41 | #binary_sv2 = { path = "../stratum/protocols/v2/binary-sv2/binary-sv2"} 42 | #noise_sv2 = {path ="../stratum/protocols/v2/noise-sv2"} 43 | #codec_sv2 = {features = ["noise_sv2","with_buffer_pool"], path = "../stratum/protocols/v2/codec-sv2" } 44 | #sv1_api = {path = "../stratum/protocols/v1" } 45 | 46 | demand-share-accounting-ext = { git = "https://github.com/demand-open-source/share-accounting-ext"} 47 | demand-sv2-connection = {git = "https://github.com/demand-open-source/demand-sv2-connection"} 48 | roles_logic_sv2 = { git = "https://github.com/demand-open-source/stratum", branch ="ImproveCoinbase", subdirectory = "protocols/v2/roles-logic-sv2"} 49 | framing_sv2 = { git = "https://github.com/demand-open-source/stratum", branch = "ImproveCoinbase",subdirectory = "protocols/v2/framing_sv2" } 50 | binary_sv2 = { git = "https://github.com/demand-open-source/stratum", branch ="ImproveCoinbase",subdirectory = "protocols/v2/binary_sv2"} 51 | noise_sv2 = { git = "https://github.com/demand-open-source/stratum", branch="ImproveCoinbase",subdirectory = "protocols/v2/noise-sv2"} 52 | codec_sv2 = { git = "https://github.com/demand-open-source/stratum", branch="ImproveCoinbase",subdirectory = "protocols/v2/codec-sv2", features = ["noise_sv2","with_buffer_pool"]} 53 | sv1_api = { git = "https://github.com/demand-open-source/stratum", branch = "ImproveCoinbase",subdirectory = "protocols/v1"} 54 | 55 | 56 | 57 | 58 | 59 | [dev-dependencies] 60 | rand = "0.8.5" 61 | sha2 = "0.10.8" 62 | 63 | [profile.release] 64 | strip = true 65 | lto = true 66 | panic = 'abort' 67 | 68 | [profile.dev] 69 | panic = 'abort' 70 | 71 | [target.'cfg(not(target_os = "windows"))'.dependencies] 72 | jemallocator = "*" 73 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Demand-cli 2 | [![Stars](https://img.shields.io/github/stars/demand-open-source/demand-cli?style=social)](https://github.com/demand-open-source/demand-cli) 3 | [![Forks](https://img.shields.io/github/forks/demand-open-source/demand-cli?style=social)](https://github.com/demand-open-source/demand-cli) 4 | ![Release](https://img.shields.io/github/v/release/demand-open-source/demand-cli) 5 | 6 | **Demand CLI** is a proxy that let miners to connect to and mine with [Demand Pool](https://dmnd.work). It serves two primary purposes: 7 | 1. Translation: Enables miners using StratumV1 to connect to the Demand Pool without requiring firmware updates. Sv1 messages gets translated to Sv2. 8 | 2. Job Declaration (JD): Allows miners to declare custom jobs to the pool using StratumV2. 9 | 10 | 11 | ## Features 12 | - **Stratum V2 Support**: Uses the secure and efficient Stratum V2 protocol for communication with the pool. 13 | - **Job Declaration**: Enables miners to propose custom block templates to the pool. This helps make mining more decentralized by allowing miners to pick the transactions they want to include in a block. 14 | - **Stratum V1 Translation**: Acts as a bridge, allowing StratumV1 miners to connect to the Demand Pool without firmware updates. 15 | - **Flexible Configuration**: Provides options for customization. This allows users to optimize the tool for their specific mining environment. 16 | - **Monitoring API**: Provides HTTP endpoints to monitor proxy health, pool connectivity, miner performace, and system resource usage in real-time. 17 | 18 | ## Getting Started 19 | 20 | ### Prerequisites 21 | 22 | - [Rust](https://www.rust-lang.org/tools/install) (version 1.70.0 or higher recommended) 23 | - Cargo (included with Rust) 24 | - [Bitcoin Node](https://github.com/Sjors/bitcoin) (Optional): Required for job declaration. 25 | 26 | ### Installation 27 | 28 | There are two options to get started: 29 | 30 | #### Option 1: Download Pre-built Binaries 31 | 32 | - Visit the [releases page](https://github.com/demand-open-source/demand-cli/releases/tag/v0.1.1). 33 | - Download the binary for your system. 34 | 35 | #### Option 2: Build from Source 36 | 37 | - Clone the repository and build the project: 38 | ```bash 39 | git clone https://github.com/demand-open-source/demand-cli.git 40 | cd demand-cli 41 | cargo build --release 42 | ``` 43 | 44 | The executable will be located in the `target/release/` directory. 45 | 46 | ## Configuration 47 | 48 | Before running the CLI, set up the necessary environment variables. 49 | 50 | ### Environment Variables 51 | 52 | - **`TOKEN` (Required)**: Your Demand Pool authentication token. 53 | Export it with: 54 | 55 | ```bash 56 | export TOKEN= 57 | ``` 58 | 59 | - **`TP_ADDRESS` (Optional)**: Template Provider address (default: `127.0.0.1:8442`). Set this if you want to use job declaration feature. 60 | 61 | Export it with 62 | ```bash 63 | export TP_ADDRESS= 64 | ``` 65 | Note: if `TP_ADDRESS` id not set, job declaration is disabled, and the proxy uses templates provided by the pool. 66 | 67 | ## Running the CLI 68 | 69 | Depending on whether you built from source or downloaded a binary, the command to run the proxy is slightly different. There are also different options you can use. 70 | Below are two example setups to get you started. 71 | 72 | #### Example 1: Built from Source with Job Declaration 73 | 74 | This example assumes you’ve built `demand-cli` from source and want to enable job declaration. 75 | 76 | ```bash 77 | export TOKEN=abc123xyz 78 | export TP_ADDRESS=192.168.1.100:8442 79 | ./target/release/demand-cli -d 100T --loglevel debug --nc on 80 | ``` 81 | 82 | Connect Miners: 83 | Point your Stratum V1 miners to :32767. 84 | 85 | #### Example 2: Pre Binary without Job Declaration 86 | This example assumes you downloaded a pre-built binary (for linux in this case) for from release page and wants to connect test endpoint 87 | Set Environment Variable: 88 | ```bash 89 | export TOKEN=xyz789abc 90 | ./demand-cli-linux-x64 -d 10T --test 91 | ``` 92 | 93 | Point your Stratum V1 miners to :32767. 94 | 95 | ### Options 96 | 97 | - **`--test`**: Connects to test endpoint 98 | - **`-d`**: Expected downstream hashrate (e.g., `10T`, `2.5P`, `5E`). Default is `100TH/s` (100 Terahashes/second). 99 | This helps the pool adjust to your hashrate. 100 | - **`--loglevel`**: Logging verbosity (`info`, `debug`, `error`, `warn`). Default is `info`. 101 | - **`--nc`**: Noise connection logging verbosity (`info`, `debug`, `error`, `warn`). Default is `off`. 102 | 103 | 104 | ## Monitoring API: 105 | 106 | The proxy exposes REST API enspoints to monitor its health, pool connectivity, connected mining devices performance and system resource usage. All endpoints are served on `http://0.0.0.0:3001` and return JSON responses in the format: 107 | 108 | ```json 109 | { 110 | "success": boolean, 111 | "message": string | null, 112 | "data": object | string | null 113 | } 114 | ``` 115 | 116 | ### Endpoint Overview 117 | 118 | 119 | | Endpoint | Method | Description | Response Status Codes | 120 | |----------------------|--------|-----------------------------------------------------------------------------|-----------------------| 121 | | `/api/health` | GET | Checks the health status of the proxy. | 200, 503 | 122 | | `/api/pool/info` | GET | Retrieves the current pool address and latency. | 200, 404 | 123 | | `/api/stats/miners` | GET | Returns device_name, hashrate, accepted and rejected shares count, and current_difficulty for all connected downstream devices (empty if none). | 200, 500 | 124 | | `/api/stats/aggregate` | GET | Provides aggregated stats of all connected downstream devices. | 200, 500 | 125 | | `/api/stats/system` | GET | Returns system resource usage (CPU and memory) for the proxy process. | 200 | 126 | 127 | 128 | ### Endpoint Details 129 | 130 |
131 | GET /api/health - Retrieves the health of the proxy 132 | 133 | - **Responses**: 134 | - **200 OK** (Healthy): 135 | ```json 136 | { "success": true, "message": null, "data": "Proxy OK" } 137 | ``` 138 | - **503 Service Unavailable** (Unhealthy): 139 | ```json 140 | { "success": false, "message": "", "data": null } 141 | ``` 142 | 143 |
144 | 145 |
146 | GET /api/pool/info - Retrieves the current pool’s address and latency in milliseconds or null if not currently connected to pool 147 | 148 | - **Responses**: 149 | - **200 OK** (Connected to Pool): 150 | ```json 151 | { 152 | "success": true, 153 | "data": { 154 | "address": "", 155 | "latency": "5072" 156 | } 157 | } 158 | ``` 159 | - **400 NOT_FOUND** (Not connected to Pool): 160 | ```json 161 | { "success": true, "data": { "address": null, "latency": null } } 162 | ``` 163 | 164 |
165 | 166 |
167 | GET /api/stats/miners - Shows stats for connected devices, empty if none. 168 | 169 | - **Responses**: 170 | - **200 OK** (Miing devices connected): 171 | ```json 172 | { 173 | "success":true, 174 | "message":null, 175 | "data":{ 176 | "3":{ 177 | "device_name":"cpuminer/2.5.1", 178 | "hashrate":4721719.5, 179 | "accepted_shares":273, 180 | "rejected_shares":22, 181 | "current_difficulty":0.0065961657 182 | }, 183 | "2":{ 184 | "device_name":"cpuminer/2.5.1", 185 | "hashrate":6464463.0, 186 | "accepted_shares":413, 187 | "rejected_shares":64, 188 | "current_difficulty":0.00903075 189 | } 190 | } 191 | } 192 | ``` 193 | - **200 OK** (No connected devices): 194 | ```json 195 | { "success": true, "message": null,"data": {} } 196 | ``` 197 | - **500 Internal Server Error**: 198 | ```json 199 | { "success": false, "message": "Failed to collect stats: ", "data": null } 200 | ``` 201 | 202 |
203 | 204 |
205 | GET /api/stats/aggregate - Aggregates metrics across all devices 206 | 207 | - **Responses**: 208 | - **200 OK**: 209 | ```json 210 | { 211 | "success":true, 212 | "message":null, 213 | "data":{ 214 | "total_connected_device":2, 215 | "aggregate_hashrate":11186182.5, 216 | "aggregate_accepted_shares":686, 217 | "aggregate_rejected_shares":86, 218 | "aggregate_diff":0.01562692 219 | } 220 | } 221 | ``` 222 | - **500 Internal Server Error**: 223 | ```json 224 | { "success": false, "message": "Failed to collect stats: ", "data": null } 225 | ``` 226 | 227 |
228 | 229 |
230 | GET /api/stats/system - Reports CPU and memory usage (0 - 100%). 231 | 232 | - **Responses**: 233 | - **200 OK**: 234 | ```json 235 | {"success":true,"message":null,"data":{"cpu_usage_%":"0.565","memory_usage_bytes":25935872}} 236 | ``` 237 | 238 |
239 | 240 | 241 | ## Contributing 242 | 243 | Contributions are welcome. Please refer to our [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. 244 | 245 | ## Support 246 | 247 | Report issues via [GitHub Issues](https://github.com/demand-open-source/demand-cli/issues). 248 | -------------------------------------------------------------------------------- /src/api/mod.rs: -------------------------------------------------------------------------------- 1 | mod routes; 2 | pub mod stats; 3 | mod utils; 4 | use crate::{router::Router, API_SERVER_PORT}; 5 | use axum::{routing::get, Router as AxumRouter}; 6 | use routes::Api; 7 | use stats::StatsSender; 8 | 9 | // Holds shared state (like the router) that so that it can be accessed in all routes. 10 | #[derive(Clone)] 11 | pub struct AppState { 12 | router: Router, 13 | stats_sender: StatsSender, 14 | } 15 | 16 | pub(crate) async fn start(router: Router, stats_sender: StatsSender) { 17 | let state = AppState { 18 | router, 19 | stats_sender, 20 | }; 21 | let app = AxumRouter::new() 22 | .route("/api/health", get(Api::health_check)) 23 | .route("/api/pool/info", get(Api::get_pool_info)) 24 | .route("/api/stats/miners", get(Api::get_downstream_stats)) 25 | .route("/api/stats/aggregate", get(Api::get_aggregate_stats)) 26 | .route("/api/stats/system", get(Api::system_stats)) 27 | .with_state(state); 28 | 29 | let api_server_addr = format!("0.0.0.0:{}", *API_SERVER_PORT); 30 | let listener = tokio::net::TcpListener::bind(api_server_addr) 31 | .await 32 | .expect("Invalid server address"); 33 | println!("API Server listening on port {}", *API_SERVER_PORT); 34 | axum::serve(listener, app).await.unwrap(); 35 | } 36 | -------------------------------------------------------------------------------- /src/api/routes.rs: -------------------------------------------------------------------------------- 1 | use super::{utils::get_cpu_and_memory_usage, AppState}; 2 | use crate::proxy_state::ProxyState; 3 | use axum::{extract::State, http::StatusCode, response::IntoResponse, Json}; 4 | use serde::Serialize; 5 | 6 | pub struct Api {} 7 | 8 | impl Api { 9 | // Retrieves connected donwnstreams stats 10 | pub async fn get_downstream_stats(State(state): State) -> impl IntoResponse { 11 | match state.stats_sender.collect_stats().await { 12 | Ok(stats) => (StatusCode::OK, Json(APIResponse::success(Some(stats)))), 13 | Err(e) => ( 14 | StatusCode::INTERNAL_SERVER_ERROR, 15 | Json(APIResponse::error(Some(format!( 16 | "Failed to collect stats: {}", 17 | e 18 | )))), 19 | ), 20 | } 21 | } 22 | 23 | // Retrieves system stats (CPU and memory usage) 24 | pub async fn system_stats() -> impl IntoResponse { 25 | let (cpu, memory) = get_cpu_and_memory_usage().await; 26 | let cpu_usgae = format!("{:.3}", cpu); 27 | let data = serde_json::json!({"cpu_usage_%": cpu_usgae, "memory_usage_bytes": memory}); 28 | Json(APIResponse::success(Some(data))) 29 | } 30 | 31 | // Returns aggregate stats of all downstream devices 32 | pub async fn get_aggregate_stats(State(state): State) -> impl IntoResponse { 33 | let stats = match state.stats_sender.collect_stats().await { 34 | Ok(stats) => stats, 35 | Err(e) => { 36 | return ( 37 | StatusCode::INTERNAL_SERVER_ERROR, 38 | Json(APIResponse::error(Some(format!( 39 | "Failed to collect stats: {}", 40 | e 41 | )))), 42 | ); 43 | } 44 | }; 45 | let mut total_connected_device = 0; 46 | let mut total_accepted_shares = 0; 47 | let mut total_rejected_shares = 0; 48 | let mut total_hashrate = 0.0; 49 | let mut total_diff = 0.0; 50 | for (_, downstream) in stats { 51 | total_connected_device += 1; 52 | total_accepted_shares += downstream.accepted_shares; 53 | total_rejected_shares += downstream.rejected_shares; 54 | total_hashrate += downstream.hashrate as f64; 55 | total_diff += downstream.current_difficulty as f64 56 | } 57 | let result = AggregateStates { 58 | total_connected_device, 59 | aggregate_hashrate: total_hashrate, 60 | aggregate_accepted_shares: total_accepted_shares, 61 | aggregate_rejected_shares: total_rejected_shares, 62 | aggregate_diff: total_diff, 63 | }; 64 | (StatusCode::OK, Json(APIResponse::success(Some(result)))) 65 | } 66 | 67 | // Retrieves the current pool information 68 | pub async fn get_pool_info(State(state): State) -> impl IntoResponse { 69 | let current_pool_address = state.router.current_pool; 70 | let latency = *state.router.latency_rx.borrow(); 71 | 72 | match (current_pool_address, latency) { 73 | (Some(address), Some(latency)) => { 74 | let response_data = serde_json::json!({ 75 | "address": address.to_string(), 76 | "latency": latency.as_millis().to_string() 77 | }); 78 | ( 79 | StatusCode::OK, 80 | Json(APIResponse::success(Some(response_data))), 81 | ) 82 | } 83 | (_, _) => ( 84 | StatusCode::NOT_FOUND, 85 | Json(APIResponse::error(Some( 86 | "Pool information unavailable".to_string(), 87 | ))), 88 | ), 89 | } 90 | } 91 | 92 | // Returns the status of the Proxy 93 | pub async fn health_check() -> impl IntoResponse { 94 | match ProxyState::is_proxy_down() { 95 | (false, None) => ( 96 | StatusCode::OK, 97 | Json(APIResponse::success(Some("Proxy OK".to_string()))), 98 | ), 99 | (true, Some(states)) => ( 100 | StatusCode::SERVICE_UNAVAILABLE, 101 | Json(APIResponse::error(Some(states))), 102 | ), 103 | _ => ( 104 | StatusCode::SERVICE_UNAVAILABLE, 105 | Json(APIResponse::error(Some("Unknown proxy state".to_string()))), 106 | ), 107 | } 108 | } 109 | } 110 | 111 | #[derive(Serialize)] 112 | struct AggregateStates { 113 | total_connected_device: u32, 114 | aggregate_hashrate: f64, // f64 is used here to avoid overflow 115 | aggregate_accepted_shares: u64, 116 | aggregate_rejected_shares: u64, 117 | aggregate_diff: f64, 118 | } 119 | 120 | #[derive(Debug, Serialize)] 121 | struct APIResponse { 122 | success: bool, 123 | message: Option, 124 | data: Option, 125 | } 126 | 127 | impl APIResponse { 128 | fn success(data: Option) -> Self { 129 | APIResponse { 130 | success: true, 131 | message: None, 132 | data, 133 | } 134 | } 135 | 136 | fn error(message: Option) -> Self { 137 | APIResponse { 138 | success: false, 139 | message, 140 | data: None, 141 | } 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /src/api/stats.rs: -------------------------------------------------------------------------------- 1 | use serde::Serialize; 2 | use std::collections::HashMap; 3 | use tokio::sync::{mpsc, oneshot}; 4 | use tracing::warn; 5 | 6 | #[derive(Debug)] 7 | enum StatsCommand { 8 | SetupStats(u32), 9 | UpdateHashrate(u32, f32), 10 | UpdateDiff(u32, f32), 11 | UpdateAcceptedShares(u32), 12 | UpdateRejectedShares(u32), 13 | UpdateDeviceName(u32, String), 14 | RemoveStats(u32), 15 | GetStats(oneshot::Sender>), 16 | } 17 | 18 | #[derive(Debug, Clone, Serialize)] 19 | pub struct DownstreamConnectionStats { 20 | pub device_name: Option, 21 | pub hashrate: f32, 22 | pub accepted_shares: u64, 23 | pub rejected_shares: u64, 24 | pub current_difficulty: f32, 25 | } 26 | 27 | impl DownstreamConnectionStats { 28 | fn new() -> Self { 29 | Self { 30 | device_name: None, 31 | hashrate: 0.0, 32 | accepted_shares: 0, 33 | rejected_shares: 0, 34 | current_difficulty: 0.0, 35 | } 36 | } 37 | } 38 | 39 | #[derive(Debug, Clone)] 40 | pub struct StatsSender { 41 | sender: mpsc::Sender, 42 | } 43 | 44 | impl StatsSender { 45 | pub fn new() -> Self { 46 | let (tx, rx) = mpsc::channel(100); 47 | tokio::spawn(StatsManager::new(rx).run()); 48 | Self { sender: tx } 49 | } 50 | 51 | fn send(&self, command: StatsCommand) { 52 | if let Err(e) = self.sender.try_send(command) { 53 | warn!("Failed to send command: {:?}", e); 54 | } 55 | } 56 | 57 | pub fn setup_stats(&self, connection_id: u32) { 58 | self.send(StatsCommand::SetupStats(connection_id)); 59 | } 60 | 61 | pub fn update_hashrate(&self, connection_id: u32, hashrate: f32) { 62 | self.send(StatsCommand::UpdateHashrate(connection_id, hashrate)); 63 | } 64 | 65 | pub fn update_diff(&self, connection_id: u32, diff: f32) { 66 | self.send(StatsCommand::UpdateDiff(connection_id, diff)); 67 | } 68 | 69 | pub fn update_accepted_shares(&self, connection_id: u32) { 70 | self.send(StatsCommand::UpdateAcceptedShares(connection_id)); 71 | } 72 | 73 | pub fn update_rejected_shares(&self, connection_id: u32) { 74 | self.send(StatsCommand::UpdateRejectedShares(connection_id)); 75 | } 76 | 77 | pub fn update_device_name(&self, connection_id: u32, name: String) { 78 | self.send(StatsCommand::UpdateDeviceName(connection_id, name)); 79 | } 80 | 81 | pub fn remove_stats(&self, connection_id: u32) { 82 | self.send(StatsCommand::RemoveStats(connection_id)); 83 | } 84 | 85 | pub async fn collect_stats(&self) -> Result, String> { 86 | let (tx, rx) = oneshot::channel(); 87 | self.send(StatsCommand::GetStats(tx)); 88 | match rx.await { 89 | Ok(stats) => Ok(stats), 90 | Err(e) => Err(e.to_string()), 91 | } 92 | } 93 | } 94 | 95 | struct StatsManager { 96 | stats: HashMap, 97 | receiver: mpsc::Receiver, 98 | } 99 | 100 | impl StatsManager { 101 | fn new(receiver: mpsc::Receiver) -> Self { 102 | Self { 103 | stats: HashMap::new(), 104 | receiver, 105 | } 106 | } 107 | 108 | async fn run(mut self) { 109 | while let Some(msg) = self.receiver.recv().await { 110 | match msg { 111 | StatsCommand::SetupStats(id) => { 112 | self.stats.insert(id, DownstreamConnectionStats::new()); 113 | } 114 | StatsCommand::UpdateHashrate(id, hashrate) => { 115 | if let Some(stats) = self.stats.get_mut(&id) { 116 | stats.hashrate = hashrate 117 | } 118 | } 119 | StatsCommand::UpdateDiff(id, diff) => { 120 | if let Some(stats) = self.stats.get_mut(&id) { 121 | stats.current_difficulty = diff 122 | } 123 | } 124 | StatsCommand::UpdateAcceptedShares(id) => { 125 | if let Some(stats) = self.stats.get_mut(&id) { 126 | stats.accepted_shares += 1 127 | } 128 | } 129 | StatsCommand::UpdateRejectedShares(id) => { 130 | if let Some(stats) = self.stats.get_mut(&id) { 131 | stats.rejected_shares += 1 132 | } 133 | } 134 | StatsCommand::UpdateDeviceName(id, name) => { 135 | if let Some(stats) = self.stats.get_mut(&id) { 136 | stats.device_name = Some(name) 137 | } 138 | } 139 | StatsCommand::RemoveStats(id) => { 140 | self.stats.remove(&id); 141 | } 142 | StatsCommand::GetStats(tx) => { 143 | let _ = tx.send(self.stats.clone()); 144 | } 145 | } 146 | } 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/api/utils.rs: -------------------------------------------------------------------------------- 1 | use sysinfo::{Pid, ProcessRefreshKind, ProcessesToUpdate, System}; 2 | 3 | pub(super) async fn get_cpu_and_memory_usage() -> (f32, u64) { 4 | let mut system = System::new_all(); 5 | 6 | // First refresh to get initial values 7 | system.refresh_processes_specifics( 8 | ProcessesToUpdate::All, 9 | true, 10 | ProcessRefreshKind::nothing().with_cpu().with_memory(), 11 | ); 12 | 13 | // Wait for a measurable interval 14 | tokio::time::sleep(sysinfo::MINIMUM_CPU_UPDATE_INTERVAL).await; 15 | 16 | // Second refresh to get the difference 17 | system.refresh_processes_specifics( 18 | ProcessesToUpdate::All, 19 | true, 20 | ProcessRefreshKind::nothing().with_cpu().with_memory(), 21 | ); 22 | 23 | let pid = std::process::id(); 24 | if let Some(process) = system.process(Pid::from_u32(pid)) { 25 | let cpu_usage = process.cpu_usage(); 26 | let cpu_nums = system.cpus().len() as f32; // get the number of cpu 27 | 28 | let normalized_cpu_usage = if cpu_nums > 0.0 { 29 | cpu_usage / cpu_nums 30 | } else { 31 | 0.0 32 | }; 33 | 34 | let memory = process.memory(); 35 | (normalized_cpu_usage, memory) 36 | } else { 37 | (0.0, 0) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use lazy_static::lazy_static; 3 | use serde::{Deserialize, Serialize}; 4 | use std::{ 5 | net::{SocketAddr, ToSocketAddrs}, 6 | path::PathBuf, 7 | }; 8 | use tracing::{error, info, warn}; 9 | 10 | use crate::{HashUnit, DEFAULT_SV1_HASHPOWER}; 11 | lazy_static! { 12 | pub static ref CONFIG: Configuration = Configuration::load_config(); 13 | } 14 | #[derive(Parser)] 15 | struct Args { 16 | #[clap(long)] 17 | test: bool, 18 | #[clap(long = "d", short = 'd', value_parser = parse_hashrate)] 19 | downstream_hashrate: Option, 20 | #[clap(long = "loglevel", short = 'l')] 21 | loglevel: Option, 22 | #[clap(long = "nc", short = 'n')] 23 | noise_connection_log: Option, 24 | #[clap(long = "delay")] 25 | delay: Option, 26 | #[clap(long = "interval", short = 'i')] 27 | adjustment_interval: Option, 28 | #[clap(long = "pool", short = 'p', value_delimiter = ',')] 29 | pool_addresses: Option>, 30 | #[clap(long = "test-pool", value_delimiter = ',')] 31 | test_pool_addresses: Option>, 32 | #[clap(long)] 33 | token: Option, 34 | #[clap(long)] 35 | tp_address: Option, 36 | #[clap(long)] 37 | listening_addr: Option, 38 | #[clap(long = "config", short = 'c')] 39 | config_file: Option, 40 | #[clap(long = "api-server-port", short = 's')] 41 | api_server_port: Option, 42 | } 43 | 44 | #[derive(Serialize, Deserialize)] 45 | struct ConfigFile { 46 | token: Option, 47 | tp_address: Option, 48 | pool_addresses: Option>, 49 | test_pool_addresses: Option>, 50 | interval: Option, 51 | delay: Option, 52 | downstream_hashrate: Option, 53 | loglevel: Option, 54 | nc_loglevel: Option, 55 | test: Option, 56 | listening_addr: Option, 57 | api_server_port: Option, 58 | } 59 | 60 | pub struct Configuration { 61 | token: Option, 62 | tp_address: Option, 63 | pool_addresses: Option>, 64 | test_pool_addresses: Option>, 65 | interval: u64, 66 | delay: u64, 67 | downstream_hashrate: f32, 68 | loglevel: String, 69 | nc_loglevel: String, 70 | test: bool, 71 | listening_addr: Option, 72 | api_server_port: String, 73 | } 74 | impl Configuration { 75 | pub fn token() -> Option { 76 | CONFIG.token.clone() 77 | } 78 | 79 | pub fn tp_address() -> Option { 80 | CONFIG.tp_address.clone() 81 | } 82 | 83 | pub fn pool_address() -> Option> { 84 | if CONFIG.test { 85 | CONFIG.test_pool_addresses.clone() // Return test pool addresses in test mode 86 | } else { 87 | CONFIG.pool_addresses.clone() 88 | } 89 | } 90 | 91 | pub fn adjustment_interval() -> u64 { 92 | CONFIG.interval 93 | } 94 | 95 | pub fn delay() -> u64 { 96 | CONFIG.delay 97 | } 98 | 99 | pub fn downstream_hashrate() -> f32 { 100 | CONFIG.downstream_hashrate 101 | } 102 | 103 | pub fn downstream_listening_addr() -> Option { 104 | CONFIG.listening_addr.clone() 105 | } 106 | pub fn api_server_port() -> String { 107 | CONFIG.api_server_port.clone() 108 | } 109 | 110 | pub fn loglevel() -> &'static str { 111 | match CONFIG.loglevel.to_lowercase().as_str() { 112 | "trace" | "debug" | "info" | "warn" | "error" | "off" => &CONFIG.loglevel, 113 | _ => { 114 | eprintln!( 115 | "Invalid log level '{}'. Defaulting to 'info'.", 116 | CONFIG.loglevel 117 | ); 118 | "info" 119 | } 120 | } 121 | } 122 | 123 | pub fn nc_loglevel() -> &'static str { 124 | match CONFIG.nc_loglevel.as_str() { 125 | "trace" | "debug" | "info" | "warn" | "error" | "off" => &CONFIG.nc_loglevel, 126 | _ => { 127 | eprintln!( 128 | "Invalid log level for noise_connection '{}' Defaulting to 'off'.", 129 | &CONFIG.nc_loglevel 130 | ); 131 | "off" 132 | } 133 | } 134 | } 135 | 136 | pub fn test() -> bool { 137 | CONFIG.test 138 | } 139 | 140 | // Loads config from CLI, file, or env vars with precedence: CLI > file > env. 141 | fn load_config() -> Self { 142 | let args = Args::parse(); 143 | let config_path: PathBuf = args.config_file.unwrap_or("config.toml".into()); 144 | let config: ConfigFile = std::fs::read_to_string(&config_path) 145 | .ok() 146 | .and_then(|content| toml::from_str(&content).ok()) 147 | .unwrap_or(ConfigFile { 148 | token: None, 149 | tp_address: None, 150 | pool_addresses: None, 151 | test_pool_addresses: None, 152 | interval: None, 153 | delay: None, 154 | downstream_hashrate: None, 155 | loglevel: None, 156 | nc_loglevel: None, 157 | test: None, 158 | listening_addr: None, 159 | api_server_port: None, 160 | }); 161 | 162 | let token = args 163 | .token 164 | .or(config.token) 165 | .or_else(|| std::env::var("TOKEN").ok()); 166 | 167 | let tp_address = args 168 | .tp_address 169 | .or(config.tp_address) 170 | .or_else(|| std::env::var("TP_ADDRESS").ok()); 171 | 172 | let pool_addresses: Option> = args 173 | .pool_addresses 174 | .map(|addresses| { 175 | addresses 176 | .into_iter() 177 | .map(parse_address) 178 | .collect::>() 179 | }) 180 | .or_else(|| { 181 | config.pool_addresses.map(|addresses| { 182 | addresses 183 | .into_iter() 184 | .map(parse_address) 185 | .collect::>() 186 | }) 187 | }) 188 | .or_else(|| { 189 | std::env::var("POOL_ADDRESSES").ok().map(|s| { 190 | s.split(',') 191 | .map(|s| parse_address(s.trim().to_string())) 192 | .collect::>() 193 | }) 194 | }); 195 | 196 | let test_pool_addresses: Option> = args 197 | .test_pool_addresses 198 | .map(|addresses| { 199 | addresses 200 | .into_iter() 201 | .map(parse_address) 202 | .collect::>() 203 | }) 204 | .or_else(|| { 205 | config.test_pool_addresses.map(|addresses| { 206 | addresses 207 | .into_iter() 208 | .map(parse_address) 209 | .collect::>() 210 | }) 211 | }) 212 | .or_else(|| { 213 | std::env::var("TEST_POOL_ADDRESSES").ok().map(|s| { 214 | s.split(',') 215 | .map(|s| parse_address(s.trim().to_string())) 216 | .collect::>() 217 | }) 218 | }); 219 | 220 | let interval = args 221 | .adjustment_interval 222 | .or(config.interval) 223 | .or_else(|| std::env::var("INTERVAL").ok().and_then(|s| s.parse().ok())) 224 | .unwrap_or(120_000); 225 | 226 | let delay = args 227 | .delay 228 | .or(config.delay) 229 | .or_else(|| std::env::var("DELAY").ok().and_then(|s| s.parse().ok())) 230 | .unwrap_or(0); 231 | 232 | let expected_hashrate = args 233 | .downstream_hashrate 234 | .or_else(|| { 235 | config 236 | .downstream_hashrate 237 | .as_deref() 238 | .and_then(|d| parse_hashrate(d).ok()) 239 | }) 240 | .or_else(|| { 241 | std::env::var("DOWNSTREAM_HASHRATE") 242 | .ok() 243 | .and_then(|s| s.parse().ok()) 244 | }); 245 | let downstream_hashrate; 246 | if let Some(hashpower) = expected_hashrate { 247 | downstream_hashrate = hashpower; 248 | info!( 249 | "Using downstream hashrate: {}h/s", 250 | HashUnit::format_value(hashpower) 251 | ); 252 | } else { 253 | downstream_hashrate = DEFAULT_SV1_HASHPOWER; 254 | warn!( 255 | "No downstream hashrate provided, using default value: {}h/s", 256 | HashUnit::format_value(DEFAULT_SV1_HASHPOWER) 257 | ); 258 | } 259 | 260 | let listening_addr = args.listening_addr.or(config.listening_addr).or_else(|| { 261 | std::env::var("DOWNSTREAM_HASHRATE") 262 | .ok() 263 | .and_then(|s| s.parse().ok()) 264 | }); 265 | let api_server_port = args 266 | .api_server_port 267 | .or(config.api_server_port) 268 | .or_else(|| { 269 | std::env::var("API_SERVER_PORT") 270 | .ok() 271 | .and_then(|s| s.parse().ok()) 272 | }) 273 | .unwrap_or("3001".to_string()); 274 | 275 | let loglevel = args 276 | .loglevel 277 | .or(config.loglevel) 278 | .or_else(|| std::env::var("LOGLEVEL").ok()) 279 | .unwrap_or("info".to_string()); 280 | 281 | let nc_loglevel = args 282 | .noise_connection_log 283 | .or(config.nc_loglevel) 284 | .or_else(|| std::env::var("NC_LOGLEVEL").ok()) 285 | .unwrap_or("off".to_string()); 286 | 287 | let test = args.test || config.test.unwrap_or(false) || std::env::var("TEST").is_ok(); 288 | 289 | Configuration { 290 | token, 291 | tp_address, 292 | pool_addresses, 293 | test_pool_addresses, 294 | interval, 295 | delay, 296 | downstream_hashrate, 297 | loglevel, 298 | nc_loglevel, 299 | test, 300 | listening_addr, 301 | api_server_port, 302 | } 303 | } 304 | } 305 | 306 | /// Parses a hashrate string (e.g., "10T", "2.5P", "500E") into an f32 value in h/s. 307 | fn parse_hashrate(hashrate_str: &str) -> Result { 308 | let hashrate_str = hashrate_str.trim(); 309 | if hashrate_str.is_empty() { 310 | return Err("Hashrate cannot be empty. Expected format: '' (e.g., '10T', '2.5P', '5E'".to_string()); 311 | } 312 | 313 | let unit = hashrate_str.chars().last().unwrap_or(' ').to_string(); 314 | let num = &hashrate_str[..hashrate_str.len().saturating_sub(1)]; 315 | 316 | let num: f32 = num.parse().map_err(|_| { 317 | format!( 318 | "Invalid number '{}'. Expected format: '' (e.g., '10T', '2.5P', '5E')", 319 | num 320 | ) 321 | })?; 322 | 323 | let multiplier = HashUnit::from_str(&unit) 324 | .map(|unit| unit.multiplier()) 325 | .ok_or_else(|| format!( 326 | "Invalid unit '{}'. Expected 'T' (Terahash), 'P' (Petahash), or 'E' (Exahash). Example: '10T', '2.5P', '5E'", 327 | unit 328 | ))?; 329 | 330 | let hashrate = num * multiplier; 331 | 332 | if hashrate.is_infinite() || hashrate.is_nan() { 333 | return Err("Hashrate too large or invalid".to_string()); 334 | } 335 | 336 | Ok(hashrate) 337 | } 338 | 339 | fn parse_address(addr: String) -> SocketAddr { 340 | addr.to_socket_addrs() 341 | .map_err(|e| error!("Invalid socket address: {}", e)) 342 | .expect("Failed to parse socket address") 343 | .next() 344 | .expect("No socket address resolved") 345 | } 346 | -------------------------------------------------------------------------------- /src/ingress/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod sv1_ingress; 2 | //pub mod sv2_up_connection; 3 | //pub mod task_manager; 4 | -------------------------------------------------------------------------------- /src/ingress/sv1_ingress.rs: -------------------------------------------------------------------------------- 1 | use std::net::{IpAddr, SocketAddr}; 2 | 3 | use crate::shared::{error::Sv1IngressError, utils::AbortOnDrop}; 4 | use futures::{ 5 | stream::{SplitSink, SplitStream}, 6 | SinkExt, StreamExt, 7 | }; 8 | use tokio::{ 9 | net::{TcpListener, TcpStream}, 10 | sync::mpsc::{channel, Receiver, Sender}, 11 | }; 12 | use tokio_util::codec::{Framed, LinesCodec}; 13 | use tracing::{error, info, warn}; 14 | 15 | pub fn start_listen_for_downstream( 16 | downstreams: Sender<(Sender, Receiver, IpAddr)>, 17 | ) -> AbortOnDrop { 18 | info!("Starting downstream listner"); 19 | tokio::task::spawn(async move { 20 | let down_addr: String = crate::SV1_DOWN_LISTEN_ADDR.to_string(); 21 | let downstream_addr: SocketAddr = down_addr.parse().expect("Invalid listen address"); 22 | let downstream_listener = TcpListener::bind(downstream_addr) 23 | .await 24 | .expect("impossible to bind downstream"); 25 | while let Ok((stream, addr)) = downstream_listener.accept().await { 26 | info!("Try to connect {:#?}", addr); 27 | Downstream::initialize( 28 | stream, 29 | crate::MAX_LEN_DOWN_MSG, 30 | addr.ip(), 31 | downstreams.clone(), 32 | ); 33 | } 34 | }) 35 | .into() 36 | } 37 | struct Downstream {} 38 | 39 | impl Downstream { 40 | pub fn initialize( 41 | stream: TcpStream, 42 | max_len_for_downstream_messages: u32, 43 | address: IpAddr, 44 | downstreams: Sender<(Sender, Receiver, IpAddr)>, 45 | ) { 46 | tokio::spawn(async move { 47 | info!("spawning downstream"); 48 | let (send_to_upstream, recv) = channel(10); 49 | let (send, recv_from_upstream) = channel(10); 50 | downstreams 51 | .send((send, recv, address)) 52 | .await 53 | .expect("Translator busy"); 54 | let codec = LinesCodec::new_with_max_length(max_len_for_downstream_messages as usize); 55 | let framed = Framed::new(stream, codec); 56 | Self::start(framed, recv_from_upstream, send_to_upstream).await 57 | }); 58 | } 59 | async fn start( 60 | framed: Framed, 61 | receiver: Receiver, 62 | sender: Sender, 63 | ) { 64 | let (writer, reader) = framed.split(); 65 | let result = tokio::select! { 66 | result1 = Self::receive_from_downstream_and_relay_up(reader, sender) => result1, 67 | result2 = Self::receive_from_upstream_and_relay_down(writer, receiver) => result2, 68 | }; 69 | // upstream disconnected make sure to clean everything before exit 70 | match result { 71 | Sv1IngressError::DownstreamDropped => (), 72 | Sv1IngressError::TranslatorDropped => (), 73 | Sv1IngressError::TaskFailed => (), 74 | } 75 | } 76 | async fn receive_from_downstream_and_relay_up( 77 | mut recv: SplitStream>, 78 | send: Sender, 79 | ) -> Sv1IngressError { 80 | let task = tokio::spawn(async move { 81 | while let Some(Ok(message)) = recv.next().await { 82 | if send.send(message).await.is_err() { 83 | error!("Upstream dropped trying to send"); 84 | return Sv1IngressError::TranslatorDropped; 85 | } 86 | } 87 | warn!("Downstream dropped while trying to send message up"); 88 | Sv1IngressError::DownstreamDropped 89 | }) 90 | .await; 91 | match task { 92 | Ok(err) => err, 93 | Err(_) => Sv1IngressError::TaskFailed, 94 | } 95 | } 96 | async fn receive_from_upstream_and_relay_down( 97 | mut send: SplitSink, String>, 98 | mut recv: Receiver, 99 | ) -> Sv1IngressError { 100 | let task = tokio::spawn(async move { 101 | while let Some(message) = recv.recv().await { 102 | let message = message.replace(['\n', '\r'], ""); 103 | if send.send(message).await.is_err() { 104 | warn!("Downstream dropped while trying to send message down"); 105 | return Sv1IngressError::DownstreamDropped; 106 | }; 107 | } 108 | if send.close().await.is_err() { 109 | error!("Failed to close connection"); 110 | }; 111 | error!("Upstream dropped trying to receive"); 112 | Sv1IngressError::TranslatorDropped 113 | }) 114 | .await; 115 | match task { 116 | Ok(err) => err, 117 | Err(_) => Sv1IngressError::TaskFailed, 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/jd_client/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use bitcoin::error::ParseIntError; 4 | 5 | pub type ProxyResult = core::result::Result; 6 | 7 | #[derive(Debug)] 8 | pub enum Error { 9 | VecToSlice32(Vec), 10 | /// Errors on bad CLI argument input. 11 | BadCliArgs, 12 | /// Errors from `binary_sv2` crate. 13 | BinarySv2(binary_sv2::Error), 14 | /// Errors on bad noise handshake. 15 | CodecNoise(codec_sv2::noise_sv2::Error), 16 | /// Errors from `framing_sv2` crate. 17 | FramingSv2(framing_sv2::Error), 18 | /// Errors on bad `TcpStream` connection. 19 | Io(std::io::Error), 20 | /// Errors on bad `String` to `int` conversion. 21 | ParseInt(std::num::ParseIntError), 22 | /// Errors from `roles_logic_sv2` crate. 23 | RolesSv2Logic(roles_logic_sv2::errors::Error), 24 | UpstreamIncoming(roles_logic_sv2::errors::Error), 25 | #[allow(dead_code)] 26 | SubprotocolMining(String), 27 | // Locking Errors 28 | PoisonLock, 29 | TokioChannelErrorRecv(tokio::sync::broadcast::error::RecvError), 30 | Uint256Conversion(ParseIntError), 31 | Infallible(std::convert::Infallible), 32 | Unrecoverable, 33 | TaskManagerFailed, 34 | JdClientMutexCorrupted, 35 | 36 | // jd_client/job_declarator specific errors 37 | JobDeclaratorMutexCorrupted, 38 | JobDeclaratorTaskManagerFailed, 39 | // jd_client/mining_downstream specific errors 40 | JdClientDownstreamMutexCorrupted, 41 | JdClientDownstreamTaskManagerFailed, 42 | // jd_client/mining_upstream specific errors 43 | JdClientUpstreamMutexCorrupted, 44 | JdClientUpstreamTaskManagerFailed, 45 | JdMissing, 46 | // template_receiver specific errors 47 | TemplateRxMutexCorrupted, 48 | TemplateRxTaskManagerFailed, 49 | TpMissing, 50 | } 51 | 52 | impl fmt::Display for Error { 53 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 54 | use Error::*; 55 | match self { 56 | BadCliArgs => write!(f, "Bad CLI arg input"), 57 | BinarySv2(ref e) => write!(f, "Binary SV2 error: `{:?}`", e), 58 | CodecNoise(ref e) => write!(f, "Noise error: `{:?}", e), 59 | FramingSv2(ref e) => write!(f, "Framing SV2 error: `{:?}`", e), 60 | Io(ref e) => write!(f, "I/O error: `{:?}", e), 61 | ParseInt(ref e) => write!(f, "Bad convert from `String` to `int`: `{:?}`", e), 62 | RolesSv2Logic(ref e) => write!(f, "Roles SV2 Logic Error: `{:?}`", e), 63 | SubprotocolMining(ref e) => write!(f, "Subprotocol Mining Error: `{:?}`", e), 64 | UpstreamIncoming(ref e) => write!(f, "Upstream parse incoming error: `{:?}`", e), 65 | PoisonLock => write!(f, "Poison Lock error"), 66 | TokioChannelErrorRecv(ref e) => write!(f, "Channel receive error: `{:?}`", e), 67 | Uint256Conversion(ref e) => write!(f, "U256 Conversion Error: `{:?}`", e), 68 | VecToSlice32(ref e) => write!(f, "Standard Error: `{:?}`", e), 69 | Infallible(ref e) => write!(f, "Infallible Error:`{:?}`", e), 70 | Unrecoverable => write!(f, "Unrecoverable Error"), 71 | JdClientMutexCorrupted => write!(f, "JdClient mutex Corrupted"), 72 | TaskManagerFailed => write!(f, "Failed to add Task in JdClient TaskManager"), 73 | 74 | JobDeclaratorMutexCorrupted => write!(f, "Job Declarator mutex Corrupted"), 75 | JobDeclaratorTaskManagerFailed => { 76 | write!(f, "Failed to add Task in Job Declarator TaskManager") 77 | } 78 | JdMissing => write!(f, "Job declarator is None"), 79 | 80 | JdClientDownstreamMutexCorrupted => { 81 | write!(f, "JdClient Mining Downstream mutex Corrupted") 82 | } 83 | JdClientDownstreamTaskManagerFailed => write!( 84 | f, 85 | "Failed to add Task in JdClient Mining Downstream TaskManager" 86 | ), 87 | JdClientUpstreamMutexCorrupted => write!(f, "JdClient Mining Upstream mutex Corrupted"), 88 | JdClientUpstreamTaskManagerFailed => write!( 89 | f, 90 | "Failed to add Task in JdClient Mining Upstream TaskManager" 91 | ), 92 | TemplateRxMutexCorrupted => write!(f, "TemplateRx mutex Corrupted"), 93 | TemplateRxTaskManagerFailed => { 94 | write!(f, "Failed to add Task in TemplateRx TaskManager") 95 | } 96 | TpMissing => write!(f, "Failed to connect to TP"), 97 | } 98 | } 99 | } 100 | 101 | impl From for Error { 102 | fn from(e: binary_sv2::Error) -> Self { 103 | Error::BinarySv2(e) 104 | } 105 | } 106 | 107 | impl From for Error { 108 | fn from(e: codec_sv2::noise_sv2::Error) -> Self { 109 | Error::CodecNoise(e) 110 | } 111 | } 112 | 113 | impl From for Error { 114 | fn from(e: framing_sv2::Error) -> Self { 115 | Error::FramingSv2(e) 116 | } 117 | } 118 | 119 | impl From for Error { 120 | fn from(e: std::io::Error) -> Self { 121 | Error::Io(e) 122 | } 123 | } 124 | 125 | impl From for Error { 126 | fn from(e: std::num::ParseIntError) -> Self { 127 | Error::ParseInt(e) 128 | } 129 | } 130 | 131 | impl From for Error { 132 | fn from(e: roles_logic_sv2::errors::Error) -> Self { 133 | Error::RolesSv2Logic(e) 134 | } 135 | } 136 | 137 | impl From for Error { 138 | fn from(e: tokio::sync::broadcast::error::RecvError) -> Self { 139 | Error::TokioChannelErrorRecv(e) 140 | } 141 | } 142 | 143 | // *** LOCK ERRORS *** 144 | // impl<'a> From>> for Error<'a> { 145 | // fn from(e: PoisonError>) -> Self { 146 | // Error::PoisonLock( 147 | // LockError::Bridge(e) 148 | // ) 149 | // } 150 | // } 151 | 152 | // impl<'a> From>> for Error<'a> { 153 | // fn from(e: PoisonError>) -> Self { 154 | // Error::PoisonLock( 155 | // LockError::NextMiningNotify(e) 156 | // ) 157 | // } 158 | // } 159 | 160 | impl From> for Error { 161 | fn from(e: Vec) -> Self { 162 | Error::VecToSlice32(e) 163 | } 164 | } 165 | 166 | impl From for Error { 167 | fn from(e: ParseIntError) -> Self { 168 | Error::Uint256Conversion(e) 169 | } 170 | } 171 | 172 | impl From for Error { 173 | fn from(e: std::convert::Infallible) -> Self { 174 | Error::Infallible(e) 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/jd_client/job_declarator/message_handler.rs: -------------------------------------------------------------------------------- 1 | use super::JobDeclarator; 2 | use roles_logic_sv2::{ 3 | handlers::{job_declaration::ParseServerJobDeclarationMessages, SendTo_}, 4 | job_declaration_sv2::{ 5 | AllocateMiningJobTokenSuccess, DeclareMiningJobError, DeclareMiningJobSuccess, 6 | ProvideMissingTransactions, ProvideMissingTransactionsSuccess, 7 | }, 8 | parsers::JobDeclaration, 9 | }; 10 | pub type SendTo = SendTo_, ()>; 11 | use roles_logic_sv2::errors::Error; 12 | 13 | impl ParseServerJobDeclarationMessages for JobDeclarator { 14 | fn handle_allocate_mining_job_token_success( 15 | &mut self, 16 | message: AllocateMiningJobTokenSuccess, 17 | ) -> Result { 18 | self.allocated_tokens.push(message.into_static()); 19 | 20 | Ok(SendTo::None(None)) 21 | } 22 | 23 | fn handle_declare_mining_job_success( 24 | &mut self, 25 | message: DeclareMiningJobSuccess, 26 | ) -> Result { 27 | let message = JobDeclaration::DeclareMiningJobSuccess(message.into_static()); 28 | Ok(SendTo::None(Some(message))) 29 | } 30 | 31 | fn handle_declare_mining_job_error( 32 | &mut self, 33 | _message: DeclareMiningJobError, 34 | ) -> Result { 35 | Ok(SendTo::None(None)) 36 | } 37 | 38 | fn handle_provide_missing_transactions( 39 | &mut self, 40 | message: ProvideMissingTransactions, 41 | ) -> Result { 42 | let tx_list = self 43 | .last_declare_mining_jobs_sent 44 | .get(&message.request_id) 45 | .ok_or(Error::UnknownRequestId(message.request_id))? 46 | .clone() 47 | .ok_or(Error::JDSMissingTransactions)? 48 | .tx_list 49 | .into_inner(); 50 | 51 | let unknown_tx_position_list: Vec = message.unknown_tx_position_list.into_inner(); 52 | let missing_transactions: Vec = unknown_tx_position_list 53 | .iter() 54 | .filter_map(|&pos| tx_list.get(pos as usize).cloned()) 55 | .collect(); 56 | let request_id = message.request_id; 57 | let transaction_list = binary_sv2::Seq064K::new(missing_transactions) 58 | .map_err(|_| Error::JDSMissingTransactions)?; 59 | let message_provide_missing_transactions = ProvideMissingTransactionsSuccess { 60 | request_id, 61 | transaction_list, 62 | }; 63 | let message_enum = 64 | JobDeclaration::ProvideMissingTransactionsSuccess(message_provide_missing_transactions); 65 | Ok(SendTo::Respond(message_enum)) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/jd_client/job_declarator/setup_connection.rs: -------------------------------------------------------------------------------- 1 | use crate::config::Configuration; 2 | use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; 3 | use rand::distributions::{Alphanumeric, DistString}; 4 | use roles_logic_sv2::{ 5 | common_messages_sv2::{Protocol, SetupConnection}, 6 | handlers::common::{ParseUpstreamCommonMessages, SendTo}, 7 | parsers::PoolMessages, 8 | routing_logic::{CommonRoutingLogic, NoRouting}, 9 | utils::Mutex, 10 | }; 11 | use std::{convert::TryInto, net::SocketAddr, sync::Arc}; 12 | use tokio::sync::mpsc::{Receiver as TReceiver, Sender as TSender}; 13 | use tracing::error; 14 | 15 | pub type Message = PoolMessages<'static>; 16 | pub type StdFrame = StandardSv2Frame; 17 | pub type EitherFrame = StandardEitherFrame; 18 | pub struct SetupConnectionHandler {} 19 | 20 | impl SetupConnectionHandler { 21 | fn get_setup_connection_message(proxy_address: SocketAddr) -> SetupConnection<'static> { 22 | let endpoint_host = proxy_address 23 | .ip() 24 | .to_string() 25 | .into_bytes() 26 | .try_into() 27 | .expect("Internal error: this operation can not fail because IP addr string can always be converted into Inner"); 28 | let vendor = String::new().try_into().expect("Internal error: this operation can not fail because empty string can always be converted into Inner"); 29 | let hardware_version = String::new().try_into().expect("Internal error: this operation can not fail because empty string can always be converted into Inner"); 30 | let firmware = String::new().try_into().expect("Internal error: this operation can not fail because empty string can always be converted into Inner"); 31 | let token = Configuration::token().expect("Checked at initialization"); 32 | let device_id = Alphanumeric.sample_string(&mut rand::thread_rng(), 16); 33 | let device_id = format!("{}::POOLED::{}", device_id, token) 34 | .to_string() 35 | .try_into() 36 | .expect("Internal error: this operation can not fail because device_id string can always be converted into Inner"); 37 | let mut setup_connection = SetupConnection { 38 | protocol: Protocol::JobDeclarationProtocol, 39 | min_version: 2, 40 | max_version: 2, 41 | flags: 0b0000_0000_0000_0000_0000_0000_0000_0000, 42 | endpoint_host, 43 | endpoint_port: proxy_address.port(), 44 | vendor, 45 | hardware_version, 46 | firmware, 47 | device_id, 48 | }; 49 | setup_connection.set_async_job_nogotiation(); 50 | setup_connection 51 | } 52 | 53 | pub async fn setup( 54 | receiver: &mut TReceiver, 55 | sender: &mut TSender, 56 | proxy_address: SocketAddr, 57 | ) -> Result<(), crate::jd_client::error::Error> { 58 | let setup_connection = Self::get_setup_connection_message(proxy_address); 59 | 60 | let sv2_frame: StdFrame = PoolMessages::Common(setup_connection.into()).try_into()?; 61 | let sv2_frame = sv2_frame.into(); 62 | 63 | sender 64 | .send(sv2_frame) 65 | .await 66 | .map_err(|_| crate::jd_client::error::Error::Unrecoverable)?; 67 | 68 | let mut incoming: StdFrame = match receiver.recv().await { 69 | Some(msg) => msg.try_into()?, 70 | None => { 71 | error!("Failed to receive msg from pool"); 72 | return Err(crate::jd_client::error::Error::Unrecoverable); // Better Error to return? 73 | } 74 | }; 75 | 76 | let message_type = incoming 77 | .get_header() 78 | .ok_or(crate::jd_client::error::Error::Unrecoverable)? 79 | .msg_type(); 80 | let payload = incoming.payload(); 81 | ParseUpstreamCommonMessages::handle_message_common( 82 | Arc::new(Mutex::new(SetupConnectionHandler {})), 83 | message_type, 84 | payload, 85 | CommonRoutingLogic::None, 86 | )?; 87 | Ok(()) 88 | } 89 | } 90 | 91 | impl ParseUpstreamCommonMessages for SetupConnectionHandler { 92 | fn handle_setup_connection_success( 93 | &mut self, 94 | _: roles_logic_sv2::common_messages_sv2::SetupConnectionSuccess, 95 | ) -> Result { 96 | Ok(SendTo::None(None)) 97 | } 98 | 99 | fn handle_setup_connection_error( 100 | &mut self, 101 | _: roles_logic_sv2::common_messages_sv2::SetupConnectionError, 102 | ) -> Result { 103 | todo!() 104 | } 105 | 106 | fn handle_channel_endpoint_changed( 107 | &mut self, 108 | _: roles_logic_sv2::common_messages_sv2::ChannelEndpointChanged, 109 | ) -> Result { 110 | todo!() 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/jd_client/job_declarator/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | AllocateTokens(AbortOnDrop), 12 | #[allow(clippy::enum_variant_names)] 13 | MainTask(AbortOnDrop), 14 | OnSetNewPrevHash(AbortOnDrop), 15 | } 16 | 17 | #[derive(Debug)] 18 | pub struct TaskManager { 19 | send_task: mpsc::Sender, 20 | abort: Option, 21 | } 22 | 23 | impl TaskManager { 24 | #[allow(unused_variables)] 25 | pub fn initialize() -> Arc> { 26 | let (sender, mut receiver) = mpsc::channel(10); 27 | let handle = tokio::task::spawn(async move { 28 | let mut tasks = vec![]; 29 | while let Some(task) = receiver.recv().await { 30 | tasks.push(task); 31 | } 32 | warn!("Share accounter main task manager stopped, keep alive tasks"); 33 | loop { 34 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 35 | } 36 | }); 37 | Arc::new(Mutex::new(Self { 38 | send_task: sender, 39 | abort: Some(handle.into()), 40 | })) 41 | } 42 | 43 | pub fn get_aborter(&mut self) -> Option { 44 | self.abort.take() 45 | } 46 | 47 | pub async fn add_allocate_tokens( 48 | self_: Arc>, 49 | abortable: AbortOnDrop, 50 | ) -> Result<(), ()> { 51 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 52 | send_task 53 | .send(Task::AllocateTokens(abortable)) 54 | .await 55 | .map_err(|_| ()) 56 | } 57 | pub async fn add_main_task(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 58 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 59 | send_task 60 | .send(Task::MainTask(abortable)) 61 | .await 62 | .map_err(|_| ()) 63 | } 64 | pub async fn add_on_set_new_prev_hash( 65 | self_: Arc>, 66 | abortable: AbortOnDrop, 67 | ) -> Result<(), ()> { 68 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 69 | send_task 70 | .send(Task::OnSetNewPrevHash(abortable)) 71 | .await 72 | .map_err(|_| ()) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/jd_client/mining_downstream/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | SetChannelFactory(AbortOnDrop), 12 | MainTask(AbortOnDrop), 13 | } 14 | 15 | #[derive(Debug)] 16 | pub struct TaskManager { 17 | send_task: mpsc::Sender, 18 | abort: Option, 19 | } 20 | 21 | impl TaskManager { 22 | #[allow(unused_variables)] 23 | pub fn initialize() -> Arc> { 24 | let (sender, mut receiver) = mpsc::channel(10); 25 | let handle = tokio::task::spawn(async move { 26 | let mut tasks = vec![]; 27 | while let Some(task) = receiver.recv().await { 28 | tasks.push(task); 29 | } 30 | warn!("Share accounter main task manager stopped, keep alive tasks"); 31 | loop { 32 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 33 | } 34 | }); 35 | Arc::new(Mutex::new(Self { 36 | send_task: sender, 37 | abort: Some(handle.into()), 38 | })) 39 | } 40 | 41 | pub fn get_aborter(&mut self) -> Option { 42 | self.abort.take() 43 | } 44 | 45 | pub async fn add_set_channel_factory( 46 | self_: Arc>, 47 | abortable: AbortOnDrop, 48 | ) -> Result<(), ()> { 49 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 50 | send_task 51 | .send(Task::SetChannelFactory(abortable)) 52 | .await 53 | .map_err(|_| ()) 54 | } 55 | 56 | pub async fn add_main_task(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 57 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 58 | send_task 59 | .send(Task::MainTask(abortable)) 60 | .await 61 | .map_err(|_| ()) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/jd_client/mining_upstream/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod upstream; 2 | pub use upstream::Upstream; 3 | mod task_manager; 4 | -------------------------------------------------------------------------------- /src/jd_client/mining_upstream/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | MainTask(AbortOnDrop), 12 | } 13 | 14 | #[derive(Debug)] 15 | pub struct TaskManager { 16 | send_task: mpsc::Sender, 17 | abort: Option, 18 | } 19 | 20 | impl TaskManager { 21 | #[allow(unused_variables)] 22 | pub fn initialize() -> Arc> { 23 | let (sender, mut receiver) = mpsc::channel(10); 24 | let handle = tokio::task::spawn(async move { 25 | let mut tasks = vec![]; 26 | while let Some(task) = receiver.recv().await { 27 | tasks.push(task); 28 | } 29 | warn!("Share accounter main task manager stopped, keep alive tasks"); 30 | loop { 31 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 32 | } 33 | }); 34 | Arc::new(Mutex::new(Self { 35 | send_task: sender, 36 | abort: Some(handle.into()), 37 | })) 38 | } 39 | 40 | pub fn get_aborter(&mut self) -> Option { 41 | self.abort.take() 42 | } 43 | 44 | pub async fn add_main_task(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 45 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 46 | send_task 47 | .send(Task::MainTask(abortable)) 48 | .await 49 | .map_err(|_| ()) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/jd_client/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(special_module_name)] 2 | 3 | mod error; 4 | pub mod job_declarator; 5 | pub mod mining_downstream; 6 | pub mod mining_upstream; 7 | mod task_manager; 8 | mod template_receiver; 9 | 10 | use job_declarator::JobDeclarator; 11 | use key_utils::Secp256k1PublicKey; 12 | use mining_downstream::DownstreamMiningNode; 13 | use std::sync::atomic::AtomicBool; 14 | use task_manager::TaskManager; 15 | use template_receiver::TemplateRx; 16 | use tracing::{error, info}; 17 | 18 | /// Is used by the template receiver and the downstream. When a NewTemplate is received the context 19 | /// that is running the template receiver set this value to false and then the message is sent to 20 | /// the context that is running the Downstream that do something and then set it back to true. 21 | /// 22 | /// In the meantime if the context that is running the template receiver receives a SetNewPrevHash 23 | /// it wait until the value of this global is true before doing anything. 24 | /// 25 | /// Acuire and Release memory ordering is used. 26 | /// 27 | /// Memory Ordering Explanation: 28 | /// We use Acquire-Release ordering instead of SeqCst or Relaxed for the following reasons: 29 | /// 1. Acquire in template receiver context ensures we see all operations before the Release store 30 | /// the downstream. 31 | /// 2. Within the same execution context (template receiver), a Relaxed store followed by an Acquire 32 | /// load is sufficient. This is because operations within the same context execute in the order 33 | /// they appear in the code. 34 | /// 3. The combination of Release in downstream and Acquire in template receiver contexts establishes 35 | /// a happens-before relationship, guaranteeing that we handle the SetNewPrevHash message after 36 | /// that downstream have finished handling the NewTemplate. 37 | /// 3. SeqCst is overkill we only need to synchronize two contexts, a globally agreed-upon order 38 | /// between all the contexts is not necessary. 39 | pub static IS_NEW_TEMPLATE_HANDLED: AtomicBool = AtomicBool::new(true); 40 | 41 | pub static IS_CUSTOM_JOB_SET: AtomicBool = AtomicBool::new(true); 42 | 43 | use crate::proxy_state::{DownstreamType, ProxyState, TpState}; 44 | use roles_logic_sv2::{parsers::Mining, utils::Mutex}; 45 | use std::{ 46 | net::{IpAddr, SocketAddr}, 47 | str::FromStr, 48 | sync::Arc, 49 | }; 50 | 51 | use crate::shared::utils::AbortOnDrop; 52 | 53 | pub async fn start( 54 | receiver: tokio::sync::mpsc::Receiver>, 55 | sender: tokio::sync::mpsc::Sender>, 56 | up_receiver: tokio::sync::mpsc::Receiver>, 57 | up_sender: tokio::sync::mpsc::Sender>, 58 | ) -> Option { 59 | // This will not work when we implement support for multiple upstream 60 | IS_CUSTOM_JOB_SET.store(true, std::sync::atomic::Ordering::Release); 61 | IS_NEW_TEMPLATE_HANDLED.store(true, std::sync::atomic::Ordering::Release); 62 | initialize_jd(receiver, sender, up_receiver, up_sender).await 63 | } 64 | 65 | async fn initialize_jd( 66 | receiver: tokio::sync::mpsc::Receiver>, 67 | sender: tokio::sync::mpsc::Sender>, 68 | up_receiver: tokio::sync::mpsc::Receiver>, 69 | up_sender: tokio::sync::mpsc::Sender>, 70 | ) -> Option { 71 | let task_manager = TaskManager::initialize(); 72 | let abortable = match task_manager.safe_lock(|t| t.get_aborter()) { 73 | Ok(abortable) => abortable?, 74 | Err(e) => { 75 | error!("Jdc task manager mutex corrupt: {e}"); 76 | return None; 77 | } 78 | }; 79 | let test_only_do_not_send_solution_to_tp = false; 80 | 81 | // When Downstream receive a share that meets bitcoin target it transformit in a 82 | // SubmitSolution and send it to the TemplateReceiver 83 | let (send_solution, recv_solution) = tokio::sync::mpsc::channel(10); 84 | 85 | // Instantiate a new `Upstream` (SV2 Pool) 86 | let upstream = match mining_upstream::Upstream::new(crate::MIN_EXTRANONCE_SIZE, up_sender).await 87 | { 88 | Ok(upstream) => upstream, 89 | Err(e) => { 90 | error!("Failed to instantiate new Upstream: {e}"); 91 | drop(abortable); 92 | return None; 93 | } 94 | }; 95 | 96 | // Initialize JD part 97 | let tp_address = match crate::TP_ADDRESS.safe_lock(|tp| tp.clone()) { 98 | Ok(tp_address) => tp_address 99 | .expect("Unreachable code, jdc is not instantiated when TP_ADDRESS not present"), 100 | Err(e) => { 101 | error!("TP_ADDRESS mutex corrupted: {e}"); 102 | drop(abortable); 103 | return None; 104 | } 105 | }; 106 | 107 | let mut parts = tp_address.split(':'); 108 | let ip_tp = parts.next().expect("The passed value for TP address is not valid. Terminating.... TP_ADDRESS should be in this format `127.0.0.1:8442`").to_string(); 109 | let port_tp = parts.next().expect("The passed value for TP address is not valid. Terminating.... TP_ADDRESS should be in this format `127.0.0.1:8442`").parse::().expect("This operation should not fail because a valid port_tp should always be converted to U16"); 110 | 111 | let auth_pub_k: Secp256k1PublicKey = crate::AUTH_PUB_KEY.parse().expect("Invalid public key"); 112 | let address = match crate::POOL_ADDRESS.safe_lock(|address| *address) { 113 | Ok(Some(address)) => address, 114 | Ok(None) => { 115 | error!("Pool address is missing"); 116 | ProxyState::update_inconsistency(Some(1)); 117 | return None; 118 | } 119 | Err(e) => { 120 | error!("Pool address mutex is poisoned: {e:?}"); 121 | ProxyState::update_inconsistency(Some(1)); 122 | return None; 123 | } 124 | }; 125 | 126 | let (jd, jd_abortable) = 127 | match JobDeclarator::new(address, auth_pub_k.into_bytes(), upstream.clone(), true).await { 128 | Ok(c) => c, 129 | Err(e) => { 130 | error!("Failed to intialize Jd: {e}"); 131 | drop(abortable); 132 | return None; 133 | } 134 | }; 135 | 136 | if TaskManager::add_job_declarator_task(task_manager.clone(), jd_abortable) 137 | .await 138 | .is_err() 139 | { 140 | error!( 141 | "Task manager failed while trying to add job declarator task{}", 142 | error::Error::TaskManagerFailed 143 | ); 144 | drop(abortable); 145 | return None; 146 | }; 147 | 148 | let donwstream = Arc::new(Mutex::new(DownstreamMiningNode::new( 149 | sender, 150 | Some(upstream.clone()), 151 | send_solution, 152 | false, 153 | vec![], 154 | Some(jd.clone()), 155 | ))); 156 | let downstream_abortable = match DownstreamMiningNode::start(donwstream.clone(), receiver).await 157 | { 158 | Ok(abortable) => abortable, 159 | Err(e) => { 160 | error!("Can not start downstream mining node: {e}"); 161 | ProxyState::update_downstream_state(DownstreamType::JdClientMiningDownstream); 162 | return None; 163 | } 164 | }; 165 | if TaskManager::add_mining_downtream_task(task_manager.clone(), downstream_abortable) 166 | .await 167 | .is_err() 168 | { 169 | error!( 170 | "Task manager failed while trying to add mining downstream task{}", 171 | error::Error::TaskManagerFailed 172 | ); 173 | drop(abortable); 174 | return None; 175 | }; 176 | if upstream 177 | .safe_lock(|u| u.downstream = Some(donwstream.clone())) 178 | .is_err() 179 | { 180 | error!("Upstream mutex failed"); 181 | drop(abortable); // drop all tasks initailzed upto this point 182 | return None; 183 | }; 184 | 185 | // Start receiving messages from the SV2 Upstream role 186 | let upstream_abortable = 187 | match mining_upstream::Upstream::parse_incoming(upstream.clone(), up_receiver).await { 188 | Ok(abortable) => abortable, 189 | Err(e) => { 190 | error!("Failed to get jdc upstream abortable: {e}"); 191 | drop(abortable); // drop all tasks initailzed upto this point 192 | return None; 193 | } 194 | }; 195 | if TaskManager::add_mining_upstream_task(task_manager.clone(), upstream_abortable) 196 | .await 197 | .is_err() 198 | { 199 | error!( 200 | "Task manager failed while trying to add mining upstream task{}", 201 | error::Error::TaskManagerFailed 202 | ); 203 | drop(abortable); // drop all tasks initailzed upto this point 204 | return None; 205 | }; 206 | let ip = IpAddr::from_str(ip_tp.as_str()) 207 | .expect("Infallable Operation: Failed tp can always be converted into IpAddr"); 208 | let tp_abortable = match TemplateRx::connect( 209 | SocketAddr::new(ip, port_tp), 210 | recv_solution, 211 | Some(jd.clone()), 212 | donwstream.clone(), 213 | vec![], 214 | None, 215 | test_only_do_not_send_solution_to_tp, 216 | ) 217 | .await 218 | { 219 | Ok(abortable) => abortable, 220 | Err(_) => { 221 | info!("Dropping jd abortable"); 222 | eprintln!("TP is unreachable, the proxy is in not in JD mode"); 223 | drop(abortable); 224 | // Temporaily set TP_ADDRESS to None so that proxy can restart without it. 225 | // that means we will start mining without jd 226 | if crate::TP_ADDRESS.safe_lock(|tp| *tp = None).is_err() { 227 | error!("TP_ADDRESS mutex corrupt"); 228 | return None; 229 | }; 230 | tokio::spawn(retry_connection(tp_address)); 231 | return None; 232 | } 233 | }; 234 | 235 | if TaskManager::add_template_receiver_task(task_manager, tp_abortable) 236 | .await 237 | .is_err() 238 | { 239 | error!( 240 | "Task manager failed while trying to add template receiver task{}", 241 | error::Error::TaskManagerFailed 242 | ); 243 | drop(abortable); 244 | return None; 245 | }; 246 | Some(abortable) 247 | } 248 | 249 | // Used when tp is down or connection was unsuccessful to retry connection. 250 | async fn retry_connection(address: String) { 251 | let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(5)); 252 | loop { 253 | info!("TP Retrying connection...."); 254 | interval.tick().await; 255 | if tokio::net::TcpStream::connect(address.clone()) 256 | .await 257 | .is_ok() 258 | { 259 | info!("Successfully reconnected to TP: Restarting Proxy..."); 260 | if crate::TP_ADDRESS 261 | .safe_lock(|tp| *tp = Some(address)) 262 | .is_err() 263 | { 264 | error!("TP_ADDRESS Mutex failed"); 265 | std::process::exit(1); 266 | }; 267 | // This force the proxy to restart. If we use Up the proxy just ignore it. 268 | // So updating it to Down and setting the TP_ADDRESS to Some(address) will make the 269 | // proxy restart with TP, the the TpState will be set to Up. 270 | ProxyState::update_tp_state(TpState::Down); 271 | break; 272 | } 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /src/jd_client/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | JobDeclarator(AbortOnDrop), 12 | TemplateReceiver(AbortOnDrop), 13 | MiningUpstream(AbortOnDrop), 14 | MiningDowntream(AbortOnDrop), 15 | } 16 | 17 | #[derive(Debug)] 18 | pub struct TaskManager { 19 | send_task: mpsc::Sender, 20 | abort: Option, 21 | } 22 | 23 | impl TaskManager { 24 | #[allow(unused_variables)] 25 | pub fn initialize() -> Arc> { 26 | let (sender, mut receiver) = mpsc::channel(10); 27 | let handle = tokio::task::spawn(async move { 28 | let mut tasks = vec![]; 29 | while let Some(task) = receiver.recv().await { 30 | tasks.push(task); 31 | } 32 | warn!("Share accounter main task manager stopped, keep alive tasks"); 33 | loop { 34 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 35 | } 36 | }); 37 | Arc::new(Mutex::new(Self { 38 | send_task: sender, 39 | abort: Some(handle.into()), 40 | })) 41 | } 42 | 43 | pub fn get_aborter(&mut self) -> Option { 44 | self.abort.take() 45 | } 46 | 47 | pub async fn add_job_declarator_task( 48 | self_: Arc>, 49 | abortable: AbortOnDrop, 50 | ) -> Result<(), ()> { 51 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 52 | send_task 53 | .send(Task::JobDeclarator(abortable)) 54 | .await 55 | .map_err(|_| ()) 56 | } 57 | pub async fn add_template_receiver_task( 58 | self_: Arc>, 59 | abortable: AbortOnDrop, 60 | ) -> Result<(), ()> { 61 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 62 | send_task 63 | .send(Task::TemplateReceiver(abortable)) 64 | .await 65 | .map_err(|_| ()) 66 | } 67 | pub async fn add_mining_upstream_task( 68 | self_: Arc>, 69 | abortable: AbortOnDrop, 70 | ) -> Result<(), ()> { 71 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 72 | send_task 73 | .send(Task::MiningUpstream(abortable)) 74 | .await 75 | .map_err(|_| ()) 76 | } 77 | pub async fn add_mining_downtream_task( 78 | self_: Arc>, 79 | abortable: AbortOnDrop, 80 | ) -> Result<(), ()> { 81 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 82 | send_task 83 | .send(Task::MiningDowntream(abortable)) 84 | .await 85 | .map_err(|_| ()) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/jd_client/template_receiver/message_handler.rs: -------------------------------------------------------------------------------- 1 | use super::TemplateRx; 2 | use roles_logic_sv2::{ 3 | errors::Error, 4 | handlers::template_distribution::{ParseServerTemplateDistributionMessages, SendTo}, 5 | parsers::TemplateDistribution, 6 | template_distribution_sv2::*, 7 | }; 8 | 9 | impl ParseServerTemplateDistributionMessages for TemplateRx { 10 | fn handle_new_template(&mut self, m: NewTemplate) -> Result { 11 | let new_template = m.into_static(); 12 | let new_template = TemplateDistribution::NewTemplate(new_template); 13 | Ok(SendTo::None(Some(new_template))) 14 | } 15 | 16 | fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result { 17 | let new_prev_hash = SetNewPrevHash { 18 | template_id: m.template_id, 19 | prev_hash: m.prev_hash.into_static(), 20 | header_timestamp: m.header_timestamp, 21 | n_bits: m.n_bits, 22 | target: m.target.into_static(), 23 | }; 24 | let new_prev_hash = TemplateDistribution::SetNewPrevHash(new_prev_hash); 25 | Ok(SendTo::None(Some(new_prev_hash))) 26 | } 27 | 28 | fn handle_request_tx_data_success( 29 | &mut self, 30 | m: RequestTransactionDataSuccess, 31 | ) -> Result { 32 | let m = RequestTransactionDataSuccess { 33 | transaction_list: m.transaction_list.into_static(), 34 | excess_data: m.excess_data.into_static(), 35 | template_id: m.template_id, 36 | }; 37 | let tx_received = TemplateDistribution::RequestTransactionDataSuccess(m); 38 | Ok(SendTo::None(Some(tx_received))) 39 | } 40 | 41 | fn handle_request_tx_data_error( 42 | &mut self, 43 | _m: RequestTransactionDataError, 44 | ) -> Result { 45 | let m = RequestTransactionDataError { 46 | template_id: _m.template_id, 47 | error_code: _m.error_code.into_static(), 48 | }; 49 | let error_code_string = 50 | std::str::from_utf8(m.error_code.as_ref()).unwrap_or("unknown error code"); 51 | match error_code_string { 52 | "template-id-not-found" => Err(Error::NoValidTemplate(error_code_string.to_string())), 53 | "stale-template-id" => Ok(SendTo::None(Some( 54 | TemplateDistribution::RequestTransactionDataError(m), 55 | ))), 56 | _ => Err(Error::NoValidTemplate(error_code_string.to_string())), 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/jd_client/template_receiver/setup_connection.rs: -------------------------------------------------------------------------------- 1 | use crate::jd_client::error::Error; 2 | use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; 3 | use roles_logic_sv2::{ 4 | common_messages_sv2::{Protocol, SetupConnection}, 5 | handlers::common::{ParseUpstreamCommonMessages, SendTo}, 6 | parsers::PoolMessages, 7 | routing_logic::{CommonRoutingLogic, NoRouting}, 8 | utils::Mutex, 9 | }; 10 | use std::{convert::TryInto, net::SocketAddr, sync::Arc}; 11 | use tokio::sync::mpsc::{Receiver as TReceiver, Sender as TSender}; 12 | use tracing::error; 13 | pub type Message = PoolMessages<'static>; 14 | pub type StdFrame = StandardSv2Frame; 15 | pub type EitherFrame = StandardEitherFrame; 16 | pub struct SetupConnectionHandler {} 17 | 18 | impl SetupConnectionHandler { 19 | fn get_setup_connection_message(address: SocketAddr) -> SetupConnection<'static> { 20 | let endpoint_host = address.ip().to_string().into_bytes().try_into().expect("Internal error: this operation can not fail because IP address can always be converted into Inner"); 21 | let vendor = String::new().try_into().expect("Internal error: this operation can not fail empty String can always be converted into Inner"); 22 | let hardware_version = String::new().try_into().expect("Internal error: this operation can not fail empty String can always be converted into Inner"); 23 | let firmware = String::new().try_into().expect("Internal error: this operation can not fail empty String can always be converted into Inner"); 24 | let device_id = String::new().try_into().expect("Internal error: this operation can not fail empty String can always be converted into Inner"); 25 | SetupConnection { 26 | protocol: Protocol::TemplateDistributionProtocol, 27 | min_version: 2, 28 | max_version: 2, 29 | flags: 0b0000_0000_0000_0000_0000_0000_0000_0000, 30 | endpoint_host, 31 | endpoint_port: address.port(), 32 | vendor, 33 | hardware_version, 34 | firmware, 35 | device_id, 36 | } 37 | } 38 | 39 | pub async fn setup( 40 | receiver: &mut TReceiver, 41 | sender: &mut TSender, 42 | address: SocketAddr, 43 | ) -> Result<(), Error> { 44 | let setup_connection = Self::get_setup_connection_message(address); 45 | 46 | let sv2_frame: StdFrame = PoolMessages::Common(setup_connection.into()) 47 | .try_into() 48 | .expect("Internal error: this operation can not fail because PoolMessage::Common can always be converted into StdFrame"); 49 | let sv2_frame = sv2_frame.into(); 50 | sender 51 | .send(sv2_frame) 52 | .await 53 | .map_err(|_| Error::Unrecoverable)?; 54 | 55 | let mut incoming: StdFrame = match receiver.recv().await { 56 | Some(msg) => msg.try_into()?, 57 | None => { 58 | error!("Failed to parse incoming SetupConnectionResponse"); 59 | return Err(Error::Unrecoverable); 60 | } 61 | }; 62 | 63 | let message_type = match incoming.get_header() { 64 | Some(header) => header.msg_type(), 65 | None => { 66 | error!("Message header is None"); 67 | return Err(Error::Unrecoverable); 68 | } 69 | }; 70 | let payload = incoming.payload(); 71 | ParseUpstreamCommonMessages::handle_message_common( 72 | Arc::new(Mutex::new(SetupConnectionHandler {})), 73 | message_type, 74 | payload, 75 | CommonRoutingLogic::None, 76 | ) 77 | .map_err(Error::RolesSv2Logic)?; 78 | Ok(()) 79 | } 80 | } 81 | 82 | impl ParseUpstreamCommonMessages for SetupConnectionHandler { 83 | fn handle_setup_connection_success( 84 | &mut self, 85 | _: roles_logic_sv2::common_messages_sv2::SetupConnectionSuccess, 86 | ) -> Result { 87 | Ok(SendTo::None(None)) 88 | } 89 | 90 | fn handle_setup_connection_error( 91 | &mut self, 92 | _: roles_logic_sv2::common_messages_sv2::SetupConnectionError, 93 | ) -> Result { 94 | todo!() 95 | } 96 | 97 | fn handle_channel_endpoint_changed( 98 | &mut self, 99 | _: roles_logic_sv2::common_messages_sv2::ChannelEndpointChanged, 100 | ) -> Result { 101 | todo!() 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/jd_client/template_receiver/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | OnNewSolution(AbortOnDrop), 12 | MainTask(AbortOnDrop), 13 | } 14 | 15 | #[derive(Debug)] 16 | pub struct TaskManager { 17 | send_task: mpsc::Sender, 18 | abort: Option, 19 | } 20 | 21 | impl TaskManager { 22 | #[allow(unused_variables)] 23 | pub fn initialize() -> Arc> { 24 | let (sender, mut receiver) = mpsc::channel(10); 25 | let handle = tokio::task::spawn(async move { 26 | let mut tasks = vec![]; 27 | while let Some(task) = receiver.recv().await { 28 | tasks.push(task); 29 | } 30 | warn!("Share accounter main task manager stopped, keep alive tasks"); 31 | loop { 32 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 33 | } 34 | }); 35 | Arc::new(Mutex::new(Self { 36 | send_task: sender, 37 | abort: Some(handle.into()), 38 | })) 39 | } 40 | 41 | pub fn get_aborter(&mut self) -> Option { 42 | self.abort.take() 43 | } 44 | 45 | pub async fn add_on_new_solution( 46 | self_: Arc>, 47 | abortable: AbortOnDrop, 48 | ) -> Result<(), ()> { 49 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 50 | send_task 51 | .send(Task::OnNewSolution(abortable)) 52 | .await 53 | .map_err(|_| ()) 54 | } 55 | pub async fn add_main_task(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 56 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 57 | send_task 58 | .send(Task::MainTask(abortable)) 59 | .await 60 | .map_err(|_| ()) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(target_os = "windows"))] 2 | use jemallocator::Jemalloc; 3 | use router::Router; 4 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; 5 | #[cfg(not(target_os = "windows"))] 6 | #[global_allocator] 7 | static GLOBAL: Jemalloc = Jemalloc; 8 | 9 | use crate::shared::utils::AbortOnDrop; 10 | use config::Configuration; 11 | use key_utils::Secp256k1PublicKey; 12 | use lazy_static::lazy_static; 13 | use proxy_state::{PoolState, ProxyState, TpState, TranslatorState}; 14 | use std::{net::SocketAddr, time::Duration}; 15 | use tokio::sync::mpsc::channel; 16 | use tracing::{error, info, warn}; 17 | mod api; 18 | 19 | mod config; 20 | mod ingress; 21 | pub mod jd_client; 22 | mod minin_pool_connection; 23 | mod proxy_state; 24 | mod router; 25 | mod share_accounter; 26 | mod shared; 27 | mod translator; 28 | 29 | const TRANSLATOR_BUFFER_SIZE: usize = 32; 30 | const MIN_EXTRANONCE_SIZE: u16 = 6; 31 | const MIN_EXTRANONCE2_SIZE: u16 = 5; 32 | const UPSTREAM_EXTRANONCE1_SIZE: usize = 15; 33 | const DEFAULT_SV1_HASHPOWER: f32 = 100_000_000_000_000.0; 34 | const SHARE_PER_MIN: f32 = 10.0; 35 | const CHANNEL_DIFF_UPDTATE_INTERVAL: u32 = 10; 36 | const MAX_LEN_DOWN_MSG: u32 = 10000; 37 | const MAIN_AUTH_PUB_KEY: &str = "9bQHWXsQ2J9TRFTaxRh3KjoxdyLRfWVEy25YHtKF8y8gotLoCZZ"; 38 | const TEST_AUTH_PUB_KEY: &str = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72"; 39 | const DEFAULT_LISTEN_ADDRESS: &str = "0.0.0.0:32767"; 40 | 41 | lazy_static! { 42 | static ref SV1_DOWN_LISTEN_ADDR: String = 43 | Configuration::downstream_listening_addr().unwrap_or(DEFAULT_LISTEN_ADDRESS.to_string()); 44 | static ref TP_ADDRESS: roles_logic_sv2::utils::Mutex> = 45 | roles_logic_sv2::utils::Mutex::new(Configuration::tp_address()); 46 | static ref POOL_ADDRESS: roles_logic_sv2::utils::Mutex> = 47 | roles_logic_sv2::utils::Mutex::new(None); // Connected pool address 48 | static ref EXPECTED_SV1_HASHPOWER: f32 = Configuration::downstream_hashrate(); 49 | static ref API_SERVER_PORT: String = Configuration::api_server_port(); 50 | } 51 | 52 | lazy_static! { 53 | pub static ref AUTH_PUB_KEY: &'static str = if Configuration::test() { 54 | TEST_AUTH_PUB_KEY 55 | } else { 56 | MAIN_AUTH_PUB_KEY 57 | }; 58 | } 59 | 60 | #[tokio::main] 61 | async fn main() { 62 | let log_level = Configuration::loglevel(); 63 | 64 | let noise_connection_log_level = Configuration::nc_loglevel(); 65 | 66 | //Disable noise_connection error (for now) because: 67 | // 1. It produce logs that are not very user friendly and also bloat the logs 68 | // 2. The errors resulting from noise_connection are handled. E.g if unrecoverable error from noise connection occurs during Pool connection: We either retry connecting immediatley or we update Proxy state to Pool Down 69 | tracing_subscriber::registry() 70 | .with(tracing_subscriber::fmt::layer()) 71 | .with(tracing_subscriber::EnvFilter::new(format!( 72 | "{},demand_sv2_connection::noise_connection_tokio={}", 73 | log_level, noise_connection_log_level 74 | ))) 75 | .init(); 76 | Configuration::token().expect("TOKEN is not set"); 77 | 78 | if Configuration::test() { 79 | info!("Connecting to test endpoint..."); 80 | } 81 | 82 | let auth_pub_k: Secp256k1PublicKey = AUTH_PUB_KEY.parse().expect("Invalid public key"); 83 | 84 | let pool_addresses = Configuration::pool_address() 85 | .filter(|p| !p.is_empty()) 86 | .unwrap_or_else(|| { 87 | if Configuration::test() { 88 | panic!("Test pool address is missing"); 89 | } else { 90 | panic!("Pool address is missing"); 91 | } 92 | }); 93 | 94 | let mut router = router::Router::new(pool_addresses, auth_pub_k, None, None); 95 | let epsilon = Duration::from_millis(30_000); 96 | let best_upstream = router.select_pool_connect().await; 97 | initialize_proxy(&mut router, best_upstream, epsilon).await; 98 | info!("exiting"); 99 | tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; 100 | } 101 | 102 | async fn initialize_proxy( 103 | router: &mut Router, 104 | mut pool_addr: Option, 105 | epsilon: Duration, 106 | ) { 107 | loop { 108 | // Initial setup for the proxy 109 | let stats_sender = api::stats::StatsSender::new(); 110 | 111 | let (send_to_pool, recv_from_pool, pool_connection_abortable) = 112 | match router.connect_pool(pool_addr).await { 113 | Ok(connection) => connection, 114 | Err(_) => { 115 | error!("No upstream available. Retrying..."); 116 | warn!("Are you using the correct TOKEN??"); 117 | let mut secs = 10; 118 | while secs > 0 { 119 | tracing::warn!("Retrying in {} seconds...", secs); 120 | tokio::time::sleep(Duration::from_secs(1)).await; 121 | secs -= 1; 122 | } 123 | // Restart loop, esentially restarting proxy 124 | continue; 125 | } 126 | }; 127 | 128 | let (downs_sv1_tx, downs_sv1_rx) = channel(10); 129 | let sv1_ingress_abortable = ingress::sv1_ingress::start_listen_for_downstream(downs_sv1_tx); 130 | 131 | let (translator_up_tx, mut translator_up_rx) = channel(10); 132 | let translator_abortable = 133 | match translator::start(downs_sv1_rx, translator_up_tx, stats_sender.clone()).await { 134 | Ok(abortable) => abortable, 135 | Err(e) => { 136 | error!("Impossible to initialize translator: {e}"); 137 | // Impossible to start the proxy so we restart proxy 138 | ProxyState::update_translator_state(TranslatorState::Down); 139 | ProxyState::update_tp_state(TpState::Down); 140 | return; 141 | } 142 | }; 143 | 144 | let (from_jdc_to_share_accounter_send, from_jdc_to_share_accounter_recv) = channel(10); 145 | let (from_share_accounter_to_jdc_send, from_share_accounter_to_jdc_recv) = channel(10); 146 | let (jdc_to_translator_sender, jdc_from_translator_receiver, _) = translator_up_rx 147 | .recv() 148 | .await 149 | .expect("Translator failed before initialization"); 150 | 151 | let jdc_abortable: Option; 152 | let share_accounter_abortable; 153 | let tp = match TP_ADDRESS.safe_lock(|tp| tp.clone()) { 154 | Ok(tp) => tp, 155 | Err(e) => { 156 | error!("TP_ADDRESS Mutex Corrupted: {e}"); 157 | return; 158 | } 159 | }; 160 | 161 | if let Some(_tp_addr) = tp { 162 | jdc_abortable = jd_client::start( 163 | jdc_from_translator_receiver, 164 | jdc_to_translator_sender, 165 | from_share_accounter_to_jdc_recv, 166 | from_jdc_to_share_accounter_send, 167 | ) 168 | .await; 169 | if jdc_abortable.is_none() { 170 | ProxyState::update_tp_state(TpState::Down); 171 | }; 172 | share_accounter_abortable = match share_accounter::start( 173 | from_jdc_to_share_accounter_recv, 174 | from_share_accounter_to_jdc_send, 175 | recv_from_pool, 176 | send_to_pool, 177 | ) 178 | .await 179 | { 180 | Ok(abortable) => abortable, 181 | Err(_) => { 182 | error!("Failed to start share_accounter"); 183 | return; 184 | } 185 | } 186 | } else { 187 | jdc_abortable = None; 188 | 189 | share_accounter_abortable = match share_accounter::start( 190 | jdc_from_translator_receiver, 191 | jdc_to_translator_sender, 192 | recv_from_pool, 193 | send_to_pool, 194 | ) 195 | .await 196 | { 197 | Ok(abortable) => abortable, 198 | Err(_) => { 199 | error!("Failed to start share_accounter"); 200 | return; 201 | } 202 | }; 203 | }; 204 | 205 | // Collecting all abort handles 206 | let mut abort_handles = vec![ 207 | (pool_connection_abortable, "pool_connection".to_string()), 208 | (sv1_ingress_abortable, "sv1_ingress".to_string()), 209 | (translator_abortable, "translator".to_string()), 210 | (share_accounter_abortable, "share_accounter".to_string()), 211 | ]; 212 | if let Some(jdc_handle) = jdc_abortable { 213 | abort_handles.push((jdc_handle, "jdc".to_string())); 214 | } 215 | let server_handle = tokio::spawn(api::start(router.clone(), stats_sender)); 216 | match monitor(router, abort_handles, epsilon, server_handle).await { 217 | Reconnect::NewUpstream(new_pool_addr) => { 218 | ProxyState::update_proxy_state_up(); 219 | pool_addr = Some(new_pool_addr); 220 | continue; 221 | } 222 | Reconnect::NoUpstream => { 223 | ProxyState::update_proxy_state_up(); 224 | pool_addr = None; 225 | continue; 226 | } 227 | }; 228 | } 229 | } 230 | 231 | async fn monitor( 232 | router: &mut Router, 233 | abort_handles: Vec<(AbortOnDrop, std::string::String)>, 234 | epsilon: Duration, 235 | server_handle: tokio::task::JoinHandle<()>, 236 | ) -> Reconnect { 237 | let mut should_check_upstreams_latency = 0; 238 | loop { 239 | // Check if a better upstream exist every 100 seconds 240 | if should_check_upstreams_latency == 10 * 100 { 241 | should_check_upstreams_latency = 0; 242 | if let Some(new_upstream) = router.monitor_upstream(epsilon).await { 243 | info!("Faster upstream detected. Reinitializing proxy..."); 244 | drop(abort_handles); 245 | server_handle.abort(); // abort server 246 | 247 | // Needs a little to time to drop 248 | tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; 249 | return Reconnect::NewUpstream(new_upstream); 250 | } 251 | } 252 | 253 | // Monitor finished tasks 254 | if let Some((_handle, name)) = abort_handles 255 | .iter() 256 | .find(|(handle, _name)| handle.is_finished()) 257 | { 258 | error!("Task {:?} finished, Closing connection", name); 259 | for (handle, _name) in abort_handles { 260 | drop(handle); 261 | } 262 | server_handle.abort(); // abort server 263 | 264 | // Check if the proxy state is down, and if so, reinitialize the proxy. 265 | let is_proxy_down = ProxyState::is_proxy_down(); 266 | if is_proxy_down.0 { 267 | error!( 268 | "Status: {:?}. Reinitializing proxy...", 269 | is_proxy_down.1.unwrap_or("Proxy".to_string()) 270 | ); 271 | return Reconnect::NoUpstream; 272 | } else { 273 | return Reconnect::NoUpstream; 274 | } 275 | } 276 | 277 | // Check if the proxy state is down, and if so, reinitialize the proxy. 278 | let is_proxy_down = ProxyState::is_proxy_down(); 279 | if is_proxy_down.0 { 280 | error!( 281 | "{:?} is DOWN. Reinitializing proxy...", 282 | is_proxy_down.1.unwrap_or("Proxy".to_string()) 283 | ); 284 | drop(abort_handles); // Drop all abort handles 285 | server_handle.abort(); // abort server 286 | tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; // Needs a little to time to drop 287 | return Reconnect::NoUpstream; 288 | } 289 | 290 | should_check_upstreams_latency += 1; 291 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; 292 | } 293 | } 294 | 295 | pub enum Reconnect { 296 | NewUpstream(std::net::SocketAddr), // Reconnecting with a new upstream 297 | NoUpstream, // Reconnecting without upstream 298 | } 299 | 300 | enum HashUnit { 301 | Tera, 302 | Peta, 303 | Exa, 304 | } 305 | 306 | impl HashUnit { 307 | /// Returns the multiplier for each unit in h/s 308 | fn multiplier(&self) -> f32 { 309 | match self { 310 | HashUnit::Tera => 1e12, 311 | HashUnit::Peta => 1e15, 312 | HashUnit::Exa => 1e18, 313 | } 314 | } 315 | 316 | // Converts a unit string (e.g., "T") to a HashUnit variant 317 | fn from_str(s: &str) -> Option { 318 | match s.to_uppercase().as_str() { 319 | "T" => Some(HashUnit::Tera), 320 | "P" => Some(HashUnit::Peta), 321 | "E" => Some(HashUnit::Exa), 322 | _ => None, 323 | } 324 | } 325 | 326 | /// Formats a hashrate value (f32) into a string with the appropriate unit 327 | fn format_value(hashrate: f32) -> String { 328 | if hashrate >= 1e18 { 329 | format!("{:.2}E", hashrate / 1e18) 330 | } else if hashrate >= 1e15 { 331 | format!("{:.2}P", hashrate / 1e15) 332 | } else if hashrate >= 1e12 { 333 | format!("{:.2}T", hashrate / 1e12) 334 | } else { 335 | format!("{:.2}", hashrate) 336 | } 337 | } 338 | } 339 | -------------------------------------------------------------------------------- /src/minin_pool_connection/errors.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | #[derive(Debug)] 4 | pub enum Error { 5 | BinarySv2(binary_sv2::Error), 6 | /// Errors on bad noise handshake. 7 | SV2Connection(demand_sv2_connection::Error), 8 | /// Errors from `framing_sv2` crate. 9 | FramingSv2(framing_sv2::Error), 10 | /// Errors on bad `TcpStream` connection. 11 | Io(std::io::Error), 12 | /// Errors on bad `String` to `int` conversion. 13 | RolesSv2Logic(roles_logic_sv2::errors::Error), 14 | UpstreamIncoming(roles_logic_sv2::errors::Error), 15 | Timeout, 16 | Unrecoverable, 17 | UnexpectedMessage, 18 | MiningPoolMutexCorrupted, 19 | MiningPoolTaskManagerFailed, 20 | } 21 | 22 | impl fmt::Display for Error { 23 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 24 | use Error::*; 25 | match self { 26 | BinarySv2(ref e) => write!(f, "Binary SV2 error: `{:?}`", e), 27 | SV2Connection(ref e) => write!(f, "Demand SV2 connectiom error: `{:?}", e), 28 | FramingSv2(ref e) => write!(f, "Framing SV2 error: `{:?}`", e), 29 | Io(ref e) => write!(f, "I/O error: `{:?}", e), 30 | RolesSv2Logic(ref e) => write!(f, "Roles SV2 Logic Error: `{:?}`", e), 31 | UpstreamIncoming(ref e) => write!(f, "Upstream parse incoming error: `{:?}`", e), 32 | Unrecoverable => write!(f, "Unrecoverable error"), 33 | UnexpectedMessage => write!(f, "Unexpected Message Type"), 34 | Timeout => write!(f, "Timeout Elapsed"), 35 | MiningPoolMutexCorrupted => write!(f, "Mining Pool Mutex Corrupted"), 36 | MiningPoolTaskManagerFailed => write!(f, "Mining Pool TaskManager Error"), 37 | } 38 | } 39 | } 40 | 41 | impl From for Error { 42 | fn from(e: binary_sv2::Error) -> Self { 43 | Error::BinarySv2(e) 44 | } 45 | } 46 | 47 | impl From for Error { 48 | fn from(e: demand_sv2_connection::Error) -> Self { 49 | Error::SV2Connection(e) 50 | } 51 | } 52 | 53 | impl From for Error { 54 | fn from(e: framing_sv2::Error) -> Self { 55 | Error::FramingSv2(e) 56 | } 57 | } 58 | 59 | impl From for Error { 60 | fn from(e: std::io::Error) -> Self { 61 | Error::Io(e) 62 | } 63 | } 64 | 65 | impl From for Error { 66 | fn from(e: roles_logic_sv2::errors::Error) -> Self { 67 | Error::RolesSv2Logic(e) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/minin_pool_connection/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod errors; 2 | mod task_manager; 3 | 4 | use std::net::SocketAddr; 5 | 6 | use codec_sv2::{HandshakeRole, StandardEitherFrame, StandardSv2Frame}; 7 | use demand_share_accounting_ext::parser::PoolExtMessages; 8 | use demand_sv2_connection::noise_connection_tokio::Connection; 9 | use errors::Error; 10 | use key_utils::Secp256k1PublicKey; 11 | use noise_sv2::Initiator; 12 | use rand::distributions::{Alphanumeric, DistString}; 13 | use roles_logic_sv2::{ 14 | common_messages_sv2::{Protocol, SetupConnection, SetupConnectionSuccess}, 15 | parsers::CommonMessages, 16 | }; 17 | use tokio::net::TcpStream; 18 | use tokio::sync::mpsc::{Receiver, Sender}; 19 | use tracing::{error, info}; 20 | 21 | use crate::{ 22 | config::Configuration, proxy_state::ProxyState, shared::utils::AbortOnDrop, PoolState, 23 | }; 24 | use task_manager::TaskManager; 25 | 26 | pub type Message = PoolExtMessages<'static>; 27 | pub type StdFrame = StandardSv2Frame; 28 | pub type EitherFrame = StandardEitherFrame; 29 | 30 | const DEFAULT_TIMER: std::time::Duration = std::time::Duration::from_secs(5); 31 | 32 | pub async fn connect_pool( 33 | address: SocketAddr, 34 | authority_public_key: Secp256k1PublicKey, 35 | setup_connection_msg: Option>, 36 | timer: Option, 37 | ) -> Result< 38 | ( 39 | Sender>, 40 | Receiver>, 41 | AbortOnDrop, 42 | ), 43 | Error, 44 | > { 45 | let socket = loop { 46 | match TcpStream::connect(address).await { 47 | Ok(socket) => break socket, 48 | Err(e) => { 49 | error!( 50 | "Failed to connect to Upstream role at {}, retrying in 5s: {}", 51 | address, e 52 | ); 53 | tokio::time::sleep(std::time::Duration::from_secs(5)).await 54 | } 55 | } 56 | }; 57 | 58 | let initiator = 59 | Initiator::from_raw_k(authority_public_key.into_bytes()).expect("Invalid authority key"); 60 | 61 | info!( 62 | "PROXY SERVER - ACCEPTING FROM UPSTREAM: {}", 63 | socket.peer_addr().expect("Failed to get peer address") 64 | ); 65 | 66 | // Channel to send and receive messages to the SV2 Upstream role 67 | let (mut receiver, mut sender, _, _) = 68 | Connection::new(socket, HandshakeRole::Initiator(initiator)) 69 | .await 70 | .map_err(|e| { 71 | error!("Failed to create connection"); 72 | Error::SV2Connection(e) 73 | })?; 74 | let setup_connection_msg = 75 | setup_connection_msg.unwrap_or(get_mining_setup_connection_msg(true)); 76 | match mining_setup_connection( 77 | &mut receiver, 78 | &mut sender, 79 | setup_connection_msg, 80 | timer.unwrap_or(DEFAULT_TIMER), 81 | ) 82 | .await 83 | { 84 | Ok(_) => { 85 | let task_manager = TaskManager::initialize(); 86 | let abortable = task_manager 87 | .safe_lock(|t| t.get_aborter()) 88 | .map_err(|_| Error::MiningPoolMutexCorrupted)? 89 | .ok_or(Error::MiningPoolTaskManagerFailed)?; 90 | 91 | let (send_to_down, recv_from_down) = tokio::sync::mpsc::channel(10); 92 | let (send_from_down, recv_to_up) = tokio::sync::mpsc::channel(10); 93 | let relay_up_task = relay_up(recv_to_up, sender); 94 | TaskManager::add_sv2_relay_up(task_manager.clone(), relay_up_task) 95 | .await 96 | .map_err(|_| Error::MiningPoolTaskManagerFailed)?; 97 | 98 | let relay_down_task = relay_down(receiver, send_to_down); 99 | TaskManager::add_sv2_relay_down(task_manager.clone(), relay_down_task) 100 | .await 101 | .map_err(|_| Error::MiningPoolTaskManagerFailed)?; 102 | Ok((send_from_down, recv_from_down, abortable)) 103 | } 104 | Err(e) => Err(e), 105 | } 106 | } 107 | 108 | pub fn relay_up( 109 | mut recv: Receiver>, 110 | send: Sender, 111 | ) -> AbortOnDrop { 112 | let task = tokio::spawn(async move { 113 | while let Some(msg) = recv.recv().await { 114 | let std_frame: Result = msg.try_into(); 115 | if let Ok(std_frame) = std_frame { 116 | let either_frame: EitherFrame = std_frame.into(); 117 | if send.send(either_frame).await.is_err() { 118 | error!("Mining upstream failed"); 119 | ProxyState::update_pool_state(PoolState::Down); 120 | break; 121 | }; 122 | } else { 123 | panic!("Internal Mining downstream try to send invalid message"); 124 | } 125 | } 126 | }); 127 | task.into() 128 | } 129 | 130 | pub fn relay_down( 131 | mut recv: Receiver, 132 | send: Sender>, 133 | ) -> AbortOnDrop { 134 | let task = tokio::spawn(async move { 135 | while let Some(msg) = recv.recv().await { 136 | let msg: Result = msg.try_into().map_err(|_| ()); 137 | if let Ok(mut msg) = msg { 138 | if let Some(header) = msg.get_header() { 139 | let message_type = header.msg_type(); 140 | let payload = msg.payload(); 141 | let extension = header.ext_type(); 142 | let msg: Result, _> = 143 | (extension, message_type, payload).try_into(); 144 | if let Ok(msg) = msg { 145 | let msg = msg.into_static(); 146 | if send.send(msg).await.is_err() { 147 | error!("Internal Mining downstream not available"); 148 | 149 | // Update Proxy state to reflect Internal inconsistency 150 | ProxyState::update_inconsistency(Some(1)); 151 | } 152 | } else { 153 | error!("Mining Upstream send non Mining message. Disconnecting"); 154 | break; 155 | } 156 | } else { 157 | error!("Mining Upstream send invalid message no header. Disconnecting"); 158 | break; 159 | } 160 | } else { 161 | error!("Mining Upstream down."); 162 | break; 163 | } 164 | } 165 | error!("Failed to receive msg from Pool"); 166 | ProxyState::update_pool_state(PoolState::Down); 167 | }); 168 | task.into() 169 | } 170 | 171 | pub async fn mining_setup_connection( 172 | recv: &mut Receiver, 173 | send: &mut Sender, 174 | setup_conection: SetupConnection<'static>, 175 | timer: std::time::Duration, 176 | ) -> Result { 177 | let msg = PoolExtMessages::Common(CommonMessages::SetupConnection(setup_conection)); 178 | let std_frame: StdFrame = match msg.try_into() { 179 | Ok(frame) => frame, 180 | Err(e) => { 181 | error!("Failed to convert PoolExtMessages to StdFrame."); 182 | return Err(Error::RolesSv2Logic(e)); 183 | } 184 | }; 185 | let either_frame: EitherFrame = std_frame.into(); 186 | if send.send(either_frame).await.is_err() { 187 | error!("Failed to send Eitherframe"); 188 | return Err(Error::Unrecoverable); 189 | } 190 | if let Ok(Some(msg)) = tokio::time::timeout(timer, recv.recv()).await { 191 | let mut msg: StdFrame = msg.try_into().map_err(Error::FramingSv2)?; 192 | let header = msg.get_header().ok_or(Error::UnexpectedMessage)?; 193 | let message_type = header.msg_type(); 194 | let payload = msg.payload(); 195 | let msg: CommonMessages<'_> = match (message_type, payload).try_into() { 196 | Ok(message) => message, 197 | Err(e) => { 198 | error!("Unexpected Message: {e}"); 199 | return Err(Error::UpstreamIncoming(e)); 200 | } 201 | }; 202 | match msg { 203 | CommonMessages::SetupConnectionSuccess(s) => Ok(s), 204 | e => { 205 | error!("Unexpected Message: {e:?}"); 206 | Err(Error::UnexpectedMessage) 207 | } 208 | } 209 | } else { 210 | error!("Failed to setup connection: Timeout"); 211 | Err(Error::Timeout) 212 | } 213 | } 214 | 215 | pub fn get_mining_setup_connection_msg(work_selection: bool) -> SetupConnection<'static> { 216 | let endpoint_host = "0.0.0.0".to_string().into_bytes().try_into().expect("Internal error: this operation can not fail because the string 0.0.0.0 can always be converted into Inner"); 217 | let vendor = String::new().try_into().expect("Internal error: this operation can not fail because an empty string can always be converted into Inner"); 218 | let hardware_version = String::new().try_into().expect("Internal error: this operation can not fail because an empty string can always be converted into Inner"); 219 | let firmware = String::new().try_into().expect("Internal error: this operation can not fail because an empty string can always be converted into Inner"); 220 | let flags = match work_selection { 221 | false => 0b0000_0000_0000_0000_0000_0000_0000_0100, 222 | true => 0b0000_0000_0000_0000_0000_0000_0000_0110, 223 | }; 224 | let token = Configuration::token().expect("Checked at initialization"); 225 | let device_id = Alphanumeric.sample_string(&mut rand::thread_rng(), 16); 226 | let device_id = format!("{}::POOLED::{}", device_id, token) 227 | .to_string() 228 | .try_into() 229 | .expect("Internal error: this operation can not fail because an device_id can always be converted into Inner"); 230 | SetupConnection { 231 | protocol: Protocol::MiningProtocol, 232 | min_version: 2, 233 | max_version: 2, 234 | flags, 235 | endpoint_host, 236 | endpoint_port: 50, 237 | vendor, 238 | hardware_version, 239 | firmware, 240 | device_id, 241 | } 242 | } 243 | -------------------------------------------------------------------------------- /src/minin_pool_connection/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | Sv2UpRelayUp(AbortOnDrop), 12 | Sv2UpRelayDown(AbortOnDrop), 13 | } 14 | 15 | pub struct TaskManager { 16 | send_task: mpsc::Sender, 17 | abort: Option, 18 | } 19 | 20 | impl TaskManager { 21 | pub fn initialize() -> Arc> { 22 | let (sender, mut receiver) = mpsc::channel(10); 23 | let handle = tokio::task::spawn(async move { 24 | let mut tasks = vec![]; 25 | while let Some(task) = receiver.recv().await { 26 | tasks.push(task); 27 | } 28 | warn!("Ingress task manager stopped, keep alive tasks"); 29 | loop { 30 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 31 | } 32 | }); 33 | Arc::new(Mutex::new(Self { 34 | send_task: sender, 35 | abort: Some(handle.into()), 36 | })) 37 | } 38 | 39 | pub fn get_aborter(&mut self) -> Option { 40 | self.abort.take() 41 | } 42 | pub async fn add_sv2_relay_up( 43 | self_: Arc>, 44 | abortable: AbortOnDrop, 45 | ) -> Result<(), ()> { 46 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 47 | send_task 48 | .send(Task::Sv2UpRelayUp(abortable)) 49 | .await 50 | .map_err(|_| ()) 51 | } 52 | pub async fn add_sv2_relay_down( 53 | self_: Arc>, 54 | abortable: AbortOnDrop, 55 | ) -> Result<(), ()> { 56 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 57 | send_task 58 | .send(Task::Sv2UpRelayDown(abortable)) 59 | .await 60 | .map_err(|_| ()) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/proxy_state.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use lazy_static::lazy_static; 4 | use roles_logic_sv2::utils::Mutex; 5 | use serde::{Deserialize, Serialize}; 6 | use tracing::{error, info}; 7 | 8 | lazy_static! { 9 | static ref PROXY_STATE: Arc> = Arc::new(Mutex::new(ProxyState::new())); 10 | } 11 | 12 | /// Main enum representing the overall state of the proxy 13 | #[derive(Debug, Clone, PartialEq)] 14 | pub enum ProxyStates { 15 | Pool(PoolState), 16 | Tp(TpState), 17 | Jd(JdState), 18 | ShareAccounter(ShareAccounterState), 19 | InternalInconsistency(u32), 20 | Downstream(DownstreamState), 21 | Upstream(UpstreamState), 22 | Translator(TranslatorState), 23 | } 24 | 25 | /// Represents the state of the pool 26 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 27 | pub enum PoolState { 28 | Up, 29 | Down, 30 | } 31 | 32 | /// Represents the state of the Tp 33 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 34 | pub enum TpState { 35 | Up, 36 | Down, 37 | } 38 | 39 | /// Represents the state of the Translator 40 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 41 | pub enum TranslatorState { 42 | Up, 43 | Down, 44 | } 45 | 46 | /// Represents the state of the JD 47 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 48 | pub enum JdState { 49 | Up, 50 | Down, 51 | } 52 | 53 | /// Represents the state of the Share Accounter 54 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 55 | pub enum ShareAccounterState { 56 | Up, 57 | Down, 58 | } 59 | 60 | /// Represents the state of the Downstream 61 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 62 | pub enum DownstreamState { 63 | Up, 64 | Down(Vec), // A specific downstream is down 65 | } 66 | 67 | /// Represents the state of the Upstream 68 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 69 | pub enum UpstreamState { 70 | Up, 71 | Down(Vec), // A specific upstream is down 72 | } 73 | 74 | /// Represents different downstreams 75 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 76 | pub enum DownstreamType { 77 | JdClientMiningDownstream, 78 | TranslatorDownstream, 79 | } 80 | 81 | /// Represents different upstreams 82 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 83 | pub enum UpstreamType { 84 | JDCMiningUpstream, 85 | TranslatorUpstream, 86 | } 87 | 88 | /// Represents global proxy state 89 | #[derive(Debug, Serialize, Deserialize)] 90 | pub struct ProxyState { 91 | pub pool: PoolState, 92 | pub tp: TpState, 93 | pub jd: JdState, 94 | pub share_accounter: ShareAccounterState, 95 | pub translator: TranslatorState, 96 | pub inconsistency: Option, 97 | pub downstream: DownstreamState, 98 | pub upstream: UpstreamState, 99 | } 100 | 101 | impl ProxyState { 102 | pub fn new() -> Self { 103 | Self { 104 | pool: PoolState::Up, 105 | tp: TpState::Up, 106 | jd: JdState::Up, 107 | share_accounter: ShareAccounterState::Up, 108 | translator: TranslatorState::Up, 109 | inconsistency: None, 110 | downstream: DownstreamState::Up, 111 | upstream: UpstreamState::Up, 112 | } 113 | } 114 | 115 | pub fn update_pool_state(pool_state: PoolState) { 116 | info!("Updating PoolState state to {:?}", pool_state); 117 | if PROXY_STATE 118 | .safe_lock(|state| { 119 | state.pool = pool_state; 120 | // // state.update_proxy_state(); 121 | }) 122 | .is_err() 123 | { 124 | error!("Global Proxy Mutex Corrupted"); 125 | std::process::exit(1); 126 | } 127 | } 128 | 129 | pub fn update_tp_state(tp_state: TpState) { 130 | info!("Updating TpState state to {:?}", tp_state); 131 | if PROXY_STATE 132 | .safe_lock(|state| { 133 | state.tp = tp_state; 134 | }) 135 | .is_err() 136 | { 137 | error!("Global Proxy Mutex Corrupted"); 138 | std::process::exit(1); 139 | } 140 | } 141 | 142 | pub fn update_jd_state(jd_state: JdState) { 143 | info!("Updating JdState state to {:?}", jd_state); 144 | if PROXY_STATE 145 | .safe_lock(|state| { 146 | state.jd = jd_state; 147 | }) 148 | .is_err() 149 | { 150 | error!("Global Proxy Mutex Corrupted"); 151 | std::process::exit(1); 152 | } 153 | } 154 | 155 | pub fn update_translator_state(translator_state: TranslatorState) { 156 | info!("Updating Translator state to {:?}", translator_state); 157 | if PROXY_STATE 158 | .safe_lock(|state| { 159 | state.translator = translator_state; 160 | }) 161 | .is_err() 162 | { 163 | error!("Global Proxy Mutex Corrupted"); 164 | std::process::exit(1); 165 | } 166 | } 167 | 168 | pub fn update_share_accounter_state(share_accounter_state: ShareAccounterState) { 169 | info!( 170 | "Updating ShareAccounterState state to {:?}", 171 | share_accounter_state 172 | ); 173 | if PROXY_STATE 174 | .safe_lock(|state| { 175 | state.share_accounter = share_accounter_state; 176 | }) 177 | .is_err() 178 | { 179 | error!("Global Proxy Mutex Corrupted"); 180 | std::process::exit(1); 181 | } 182 | } 183 | 184 | pub fn update_inconsistency(code: Option) { 185 | info!("Updating Internal Inconsistency state to {:?}", code); 186 | if PROXY_STATE 187 | .safe_lock(|state| { 188 | state.inconsistency = code; 189 | }) 190 | .is_err() 191 | { 192 | error!("Global Proxy Mutex Corrupted"); 193 | std::process::exit(1); 194 | } 195 | } 196 | 197 | pub fn update_downstream_state(downstream_type: DownstreamType) { 198 | info!("Updating Downstream state to {:?}", downstream_type); 199 | if PROXY_STATE 200 | .safe_lock(|state| { 201 | state.downstream = DownstreamState::Down(vec![downstream_type]); 202 | }) 203 | .is_err() 204 | { 205 | error!("Global Proxy Mutex Corrupted"); 206 | std::process::exit(1); 207 | } 208 | } 209 | 210 | pub fn update_upstream_state(upstream_type: UpstreamType) { 211 | info!("Updating Upstream state to {:?}", upstream_type); 212 | if PROXY_STATE 213 | .safe_lock(|state| { 214 | state.upstream = UpstreamState::Down(vec![upstream_type]); 215 | }) 216 | .is_err() 217 | { 218 | error!("Global Proxy Mutex Corrupted"); 219 | std::process::exit(1); 220 | } 221 | } 222 | 223 | pub fn update_proxy_state_up() { 224 | if PROXY_STATE 225 | .safe_lock(|state| { 226 | state.pool = PoolState::Up; 227 | state.jd = JdState::Up; 228 | state.translator = TranslatorState::Up; 229 | state.tp = TpState::Up; 230 | state.share_accounter = ShareAccounterState::Up; 231 | state.upstream = UpstreamState::Up; 232 | state.downstream = DownstreamState::Up; 233 | state.inconsistency = None; 234 | }) 235 | .is_err() 236 | { 237 | error!("Global Proxy Mutex Corrupted"); 238 | std::process::exit(1); 239 | } 240 | } 241 | 242 | pub fn is_proxy_down() -> (bool, Option) { 243 | let errors = Self::get_errors(); 244 | if errors.is_ok() && errors.as_ref().unwrap().is_empty() { 245 | (false, None) 246 | } else { 247 | let error_descriptions: Vec = 248 | errors.iter().map(|e| format!("{:?}", e)).collect(); 249 | (true, Some(error_descriptions.join(", "))) 250 | } 251 | } 252 | 253 | pub fn get_errors() -> Result, ()> { 254 | let mut errors = Vec::new(); 255 | if PROXY_STATE 256 | .safe_lock(|state| { 257 | if state.pool == PoolState::Down { 258 | errors.push(ProxyStates::Pool(state.pool)); 259 | } 260 | if state.tp == TpState::Down { 261 | errors.push(ProxyStates::Tp(state.tp)); 262 | } 263 | if state.jd == JdState::Down { 264 | errors.push(ProxyStates::Jd(state.jd)); 265 | } 266 | if state.share_accounter == ShareAccounterState::Down { 267 | errors.push(ProxyStates::ShareAccounter(state.share_accounter)); 268 | } 269 | if state.translator == TranslatorState::Down { 270 | errors.push(ProxyStates::Translator(state.translator)); 271 | } 272 | if let Some(inconsistency) = state.inconsistency { 273 | errors.push(ProxyStates::InternalInconsistency(inconsistency)); 274 | } 275 | if matches!(state.downstream, DownstreamState::Down(_)) { 276 | errors.push(ProxyStates::Downstream(state.downstream.clone())); 277 | } 278 | if matches!(state.upstream, UpstreamState::Down(_)) { 279 | errors.push(ProxyStates::Upstream(state.upstream.clone())); 280 | } 281 | }) 282 | .is_err() 283 | { 284 | error!("Global Proxy Mutex Corrupted"); 285 | std::process::exit(1); 286 | } else { 287 | Ok(errors) 288 | } 289 | } 290 | } 291 | -------------------------------------------------------------------------------- /src/router/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | net::SocketAddr, 3 | time::{Duration, Instant}, 4 | }; 5 | 6 | use crate::jd_client::job_declarator::{setup_connection::SetupConnectionHandler, JobDeclarator}; 7 | use codec_sv2::{buffer_sv2::Slice, HandshakeRole}; 8 | use demand_share_accounting_ext::parser::PoolExtMessages; 9 | use demand_sv2_connection::noise_connection_tokio::Connection; 10 | use key_utils::Secp256k1PublicKey; 11 | use noise_sv2::Initiator; 12 | use roles_logic_sv2::{common_messages_sv2::SetupConnection, parsers::Mining}; 13 | use tokio::{ 14 | net::TcpStream, 15 | sync::{ 16 | mpsc::{Receiver, Sender}, 17 | watch, 18 | }, 19 | }; 20 | use tracing::{error, info}; 21 | 22 | use crate::{ 23 | minin_pool_connection::{self, get_mining_setup_connection_msg, mining_setup_connection}, 24 | shared::utils::AbortOnDrop, 25 | }; 26 | 27 | /// Router handles connection to Multiple upstreams. 28 | #[derive(Clone)] 29 | pub struct Router { 30 | pool_addresses: Vec, 31 | pub current_pool: Option, 32 | auth_pub_k: Secp256k1PublicKey, 33 | setup_connection_msg: Option>, 34 | timer: Option, 35 | latency_tx: watch::Sender>, 36 | pub latency_rx: watch::Receiver>, 37 | } 38 | 39 | impl Router { 40 | /// Creates a new `Router` instance with the specified upstream addresses. 41 | pub fn new( 42 | pool_addresses: Vec, 43 | auth_pub_k: Secp256k1PublicKey, 44 | // Configuration msg used to setup connection between client and pool 45 | // If not, present `get_mining_setup_connection_msg()` is called to generated default values 46 | setup_connection_msg: Option>, 47 | // Max duration for pool setup after which it times out. 48 | // If None, default time of 5s is used. 49 | timer: Option, 50 | ) -> Self { 51 | let (latency_tx, latency_rx) = watch::channel(None); 52 | Self { 53 | pool_addresses, 54 | current_pool: None, 55 | auth_pub_k, 56 | setup_connection_msg, 57 | timer, 58 | latency_tx, 59 | latency_rx, 60 | } 61 | } 62 | 63 | /// Internal function to select pool with the least latency. 64 | async fn select_pool(&self) -> Option<(SocketAddr, Duration)> { 65 | let mut best_pool = None; 66 | let mut least_latency = Duration::MAX; 67 | 68 | for &pool_addr in &self.pool_addresses { 69 | if let Ok(latency) = self.get_latency(pool_addr).await { 70 | if latency < least_latency { 71 | least_latency = latency; 72 | best_pool = Some(pool_addr) 73 | } 74 | } 75 | } 76 | 77 | best_pool.map(|pool| (pool, least_latency)) 78 | } 79 | 80 | /// Select the best pool for connection 81 | pub async fn select_pool_connect(&self) -> Option { 82 | info!("Selecting the best upstream "); 83 | if let Some((pool, latency)) = self.select_pool().await { 84 | info!("Latency for upstream {:?} is {:?}", pool, latency); 85 | self.latency_tx.send_replace(Some(latency)); // update latency 86 | Some(pool) 87 | } else { 88 | //info!("No available pool"); 89 | None 90 | } 91 | } 92 | 93 | /// Select the best pool for monitoring 94 | async fn select_pool_monitor(&self, epsilon: Duration) -> Option { 95 | if let Some((best_pool, best_pool_latency)) = self.select_pool().await { 96 | if let Some(current_pool) = self.current_pool { 97 | if best_pool == current_pool { 98 | return None; 99 | } 100 | let current_latency = match self.get_latency(current_pool).await { 101 | Ok(latency) => latency, 102 | Err(e) => { 103 | error!("Failed to get latency: {:?}", e); 104 | return None; 105 | } 106 | }; 107 | // saturating_sub is used to avoid panic on negative duration result 108 | if best_pool_latency < current_latency.saturating_sub(epsilon) { 109 | info!( 110 | "Found faster pool: {:?} with latency {:?}", 111 | best_pool, best_pool_latency 112 | ); 113 | return Some(best_pool); 114 | } else { 115 | return None; 116 | } 117 | } else { 118 | return Some(best_pool); 119 | } 120 | } 121 | None 122 | } 123 | 124 | /// Selects the best upstream and connects to. 125 | /// Uses minin_pool_connection::connect_pool 126 | pub async fn connect_pool( 127 | &mut self, 128 | pool_addr: Option, 129 | ) -> Result< 130 | ( 131 | tokio::sync::mpsc::Sender>, 132 | tokio::sync::mpsc::Receiver>, 133 | AbortOnDrop, 134 | ), 135 | minin_pool_connection::errors::Error, 136 | > { 137 | let pool = match pool_addr { 138 | Some(addr) => addr, 139 | None => match self.select_pool_connect().await { 140 | Some(addr) => addr, 141 | // Called when we initialize the proxy, without a valid pool we can not start mine and we 142 | // return Err 143 | None => { 144 | return Err(minin_pool_connection::errors::Error::Unrecoverable); 145 | } 146 | }, 147 | }; 148 | self.current_pool = Some(pool); 149 | 150 | info!("Upstream {:?} selected", pool); 151 | 152 | match minin_pool_connection::connect_pool( 153 | pool, 154 | self.auth_pub_k, 155 | self.setup_connection_msg.clone(), 156 | self.timer, 157 | ) 158 | .await 159 | { 160 | Ok((send_to_pool, recv_from_pool, pool_connection_abortable)) => { 161 | crate::POOL_ADDRESS 162 | .safe_lock(|pool_address| { 163 | *pool_address = Some(pool); 164 | }) 165 | .unwrap_or_else(|_| { 166 | error!("Pool address Mutex corrupt"); 167 | crate::proxy_state::ProxyState::update_inconsistency(Some(1)); 168 | }); 169 | 170 | Ok((send_to_pool, recv_from_pool, pool_connection_abortable)) 171 | } 172 | 173 | Err(e) => Err(e), 174 | } 175 | } 176 | 177 | /// Returns the sum all the latencies for a given upstream 178 | async fn get_latency(&self, pool_address: SocketAddr) -> Result { 179 | let mut pool = PoolLatency::new(pool_address); 180 | let setup_connection_msg = self.setup_connection_msg.as_ref(); 181 | let timer = self.timer.as_ref(); 182 | let auth_pub_key = self.auth_pub_k; 183 | 184 | tokio::time::timeout( 185 | Duration::from_secs(15), 186 | PoolLatency::get_mining_setup_latencies( 187 | &mut pool, 188 | setup_connection_msg.cloned(), 189 | timer.cloned(), 190 | auth_pub_key, 191 | ), 192 | ) 193 | .await 194 | .map_err(|_| { 195 | error!( 196 | "Failed to get mining setup latencies for {:?}: Timeout", 197 | pool_address 198 | ); 199 | })??; 200 | 201 | if (PoolLatency::get_mining_setup_latencies( 202 | &mut pool, 203 | setup_connection_msg.cloned(), 204 | timer.cloned(), 205 | auth_pub_key, 206 | ) 207 | .await) 208 | .is_err() 209 | { 210 | error!( 211 | "Failed to get mining setup latencies for: {:?}", 212 | pool_address 213 | ); 214 | return Err(()); 215 | } 216 | if (PoolLatency::get_jd_latencies(&mut pool, auth_pub_key).await).is_err() { 217 | error!("Failed to get jd setup latencies for: {:?}", pool_address); 218 | return Err(()); 219 | } 220 | 221 | let latencies = [ 222 | pool.open_sv2_mining_connection, 223 | pool.setup_a_channel, 224 | pool.receive_first_job, 225 | pool.receive_first_set_new_prev_hash, 226 | pool.open_sv2_jd_connection, 227 | pool.get_a_mining_token, 228 | ]; 229 | // Get sum of all latencies for pool 230 | let sum_of_latencies: Duration = latencies.iter().flatten().sum(); 231 | Ok(sum_of_latencies) 232 | } 233 | 234 | /// Checks for faster upstream switch to it if found 235 | pub async fn monitor_upstream(&mut self, epsilon: Duration) -> Option { 236 | if let Some(best_pool) = self.select_pool_monitor(epsilon).await { 237 | if Some(best_pool) != self.current_pool { 238 | info!("Switching to faster upstreamn {:?}", best_pool); 239 | return Some(best_pool); 240 | } else { 241 | return None; 242 | } 243 | } 244 | None 245 | } 246 | } 247 | 248 | /// Track latencies for various stages of pool connection setup. 249 | #[derive(Clone, Copy, Debug)] 250 | struct PoolLatency { 251 | pool: SocketAddr, 252 | open_sv2_mining_connection: Option, 253 | setup_a_channel: Option, 254 | receive_first_job: Option, 255 | receive_first_set_new_prev_hash: Option, 256 | open_sv2_jd_connection: Option, 257 | get_a_mining_token: Option, 258 | } 259 | 260 | impl PoolLatency { 261 | // Create new `PoolLatency` given an upstream address 262 | fn new(pool: SocketAddr) -> PoolLatency { 263 | Self { 264 | pool, 265 | open_sv2_mining_connection: None, 266 | setup_a_channel: None, 267 | receive_first_job: None, 268 | receive_first_set_new_prev_hash: None, 269 | open_sv2_jd_connection: None, 270 | get_a_mining_token: None, 271 | } 272 | } 273 | 274 | /// Sets the `PoolLatency`'s `open_sv2_mining_connection`, `setup_channel_timer`, `receive_first_job`, 275 | /// and `receive_first_set_new_prev_hash` 276 | async fn get_mining_setup_latencies( 277 | &mut self, 278 | setup_connection_msg: Option>, 279 | timer: Option, 280 | authority_public_key: Secp256k1PublicKey, 281 | ) -> Result<(), ()> { 282 | // Set open_sv2_mining_connection latency 283 | let open_sv2_mining_connection_timer = Instant::now(); 284 | match TcpStream::connect(self.pool).await { 285 | Ok(stream) => { 286 | self.open_sv2_mining_connection = Some(open_sv2_mining_connection_timer.elapsed()); 287 | 288 | let (mut receiver, mut sender, setup_connection_msg) = 289 | initialize_mining_connections( 290 | setup_connection_msg, 291 | stream, 292 | authority_public_key, 293 | ) 294 | .await?; 295 | 296 | // Set setup_channel latency 297 | let setup_channel_timer = Instant::now(); 298 | let result = mining_setup_connection( 299 | &mut receiver, 300 | &mut sender, 301 | setup_connection_msg, 302 | timer.unwrap_or(Duration::from_secs(2)), 303 | ) 304 | .await; 305 | match result { 306 | Ok(_) => { 307 | self.setup_a_channel = Some(setup_channel_timer.elapsed()); 308 | let (send_to_down, mut recv_from_down) = tokio::sync::mpsc::channel(10); 309 | let (send_from_down, recv_to_up) = tokio::sync::mpsc::channel(10); 310 | let channel = open_channel(); 311 | if send_from_down 312 | .send(PoolExtMessages::Mining(channel)) 313 | .await 314 | .is_err() 315 | { 316 | error!("Failed to send channel to pool"); 317 | return Err(()); 318 | } 319 | 320 | let relay_up_task = minin_pool_connection::relay_up(recv_to_up, sender); 321 | let relay_down_task = 322 | minin_pool_connection::relay_down(receiver, send_to_down); 323 | 324 | let timer = Instant::now(); 325 | let mut received_new_job = false; 326 | let mut received_prev_hash = false; 327 | 328 | while let Some(message) = recv_from_down.recv().await { 329 | if let PoolExtMessages::Mining(Mining::NewExtendedMiningJob( 330 | _new_ext_job, 331 | )) = message.clone() 332 | { 333 | // Set receive_first_job latency 334 | self.receive_first_job = Some(timer.elapsed()); 335 | received_new_job = true; 336 | } 337 | if let PoolExtMessages::Mining(Mining::SetNewPrevHash(_new_prev_hash)) = 338 | message.clone() 339 | { 340 | // Set receive_first_set_new_prev_hash latency 341 | self.receive_first_set_new_prev_hash = Some(timer.elapsed()); 342 | received_prev_hash = true; 343 | } 344 | // Both latencies have been set so we break the loop 345 | if received_new_job && received_prev_hash { 346 | break; 347 | } 348 | } 349 | drop(relay_up_task); 350 | drop(relay_down_task); 351 | 352 | Ok(()) 353 | } 354 | Err(e) => { 355 | error!( 356 | "Failed to get mining setup latency for pool {}: {:?}", 357 | self.pool, e 358 | ); 359 | Err(()) 360 | } 361 | } 362 | } 363 | _ => { 364 | error!("Failed to get mining setup latencies for: {:?}", self.pool); 365 | Err(()) 366 | } 367 | } 368 | } 369 | 370 | /// Sets the `PoolLatency`'s `open_sv2_jd_connection` and `get_a_mining_token` 371 | async fn get_jd_latencies( 372 | &mut self, 373 | authority_public_key: Secp256k1PublicKey, 374 | ) -> Result<(), ()> { 375 | let address = self.pool; 376 | 377 | // Set open_sv2_jd_connection latency 378 | let open_sv2_jd_connection_timer = Instant::now(); 379 | 380 | match tokio::time::timeout(Duration::from_secs(2), TcpStream::connect(address)).await { 381 | Ok(Ok(stream)) => { 382 | let tp = crate::TP_ADDRESS 383 | .safe_lock(|tp| tp.clone()) 384 | .map_err(|_| error!(" TP_ADDRESS Mutex Corrupted"))?; 385 | if let Some(_tp_addr) = tp { 386 | let initiator = Initiator::from_raw_k(authority_public_key.into_bytes()) 387 | // Safe expect Key is a constant and must be right 388 | .expect("Unable to create initialtor"); 389 | let (mut receiver, mut sender, _, _) = 390 | match Connection::new(stream, HandshakeRole::Initiator(initiator)).await { 391 | Ok(connection) => connection, 392 | Err(e) => { 393 | error!("Failed to create jd connection: {:?}", e); 394 | return Err(()); 395 | } 396 | }; 397 | if let Err(e) = 398 | SetupConnectionHandler::setup(&mut receiver, &mut sender, address).await 399 | { 400 | error!("Failed to setup connection: {:?}", e); 401 | return Err(()); 402 | } 403 | 404 | self.open_sv2_jd_connection = Some(open_sv2_jd_connection_timer.elapsed()); 405 | 406 | let (sender, mut _receiver) = tokio::sync::mpsc::channel(10); 407 | let upstream = 408 | match crate::jd_client::mining_upstream::Upstream::new(0, sender).await { 409 | Ok(upstream) => upstream, 410 | Err(e) => { 411 | error!("Failed to create upstream: {:?}", e); 412 | return Err(()); 413 | } 414 | }; 415 | 416 | let (job_declarator, _aborter) = match JobDeclarator::new( 417 | address, 418 | authority_public_key.into_bytes(), 419 | upstream, 420 | false, 421 | ) 422 | .await 423 | { 424 | Ok(new) => new, 425 | Err(e) => { 426 | error!("Failed to create job declarator: {:?}", e); 427 | return Err(()); 428 | } 429 | }; 430 | 431 | // Set get_a_mining_token latency 432 | let get_a_mining_token_timer = Instant::now(); 433 | let _token = JobDeclarator::get_last_token(&job_declarator).await; 434 | self.get_a_mining_token = Some(get_a_mining_token_timer.elapsed()); 435 | } else { 436 | self.open_sv2_jd_connection = Some(Duration::from_millis(0)); 437 | self.get_a_mining_token = Some(Duration::from_millis(0)); 438 | } 439 | Ok(()) 440 | } 441 | _ => Err(()), 442 | } 443 | } 444 | } 445 | 446 | // Helper functions 447 | fn open_channel() -> Mining<'static> { 448 | roles_logic_sv2::parsers::Mining::OpenExtendedMiningChannel( 449 | roles_logic_sv2::mining_sv2::OpenExtendedMiningChannel { 450 | request_id: 0, 451 | max_target: binary_sv2::u256_from_int(u64::MAX), 452 | min_extranonce_size: 8, 453 | user_identity: "ABC" 454 | .to_string() 455 | .try_into() 456 | // This can never fail 457 | .expect("Failed to convert user identity to string"), 458 | nominal_hash_rate: 0.0, 459 | }, 460 | ) 461 | } 462 | 463 | async fn initialize_mining_connections( 464 | setup_connection_msg: Option>, 465 | stream: TcpStream, 466 | authority_public_key: Secp256k1PublicKey, 467 | ) -> Result< 468 | ( 469 | Receiver, Slice>>, 470 | Sender, Slice>>, 471 | SetupConnection<'static>, 472 | ), 473 | (), 474 | > { 475 | let initiator = 476 | // Safe expect Key is a constant and must be right 477 | Initiator::from_raw_k(authority_public_key.into_bytes()).expect("Invalid authority key"); 478 | let (receiver, sender, _, _) = 479 | match Connection::new(stream, HandshakeRole::Initiator(initiator)).await { 480 | Ok(connection) => connection, 481 | Err(e) => { 482 | error!("Failed to create mining connection: {:?}", e); 483 | return Err(()); 484 | } 485 | }; 486 | let setup_connection_msg = 487 | setup_connection_msg.unwrap_or(get_mining_setup_connection_msg(true)); 488 | Ok((receiver, sender, setup_connection_msg)) 489 | } 490 | -------------------------------------------------------------------------------- /src/share_accounter/errors.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | #[derive(Debug)] 4 | pub enum Error { 5 | ShareAccounterTaskManagerMutexCorrupted, 6 | ShareAccounterTaskManagerError, 7 | } 8 | 9 | impl fmt::Display for Error { 10 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 11 | use Error::*; 12 | match self { 13 | ShareAccounterTaskManagerMutexCorrupted => { 14 | write!(f, "Share Accounter Task Manager Mutex Corrupted") 15 | } 16 | ShareAccounterTaskManagerError => { 17 | write!(f, "Share Accounter TaskManager Failed to add Task") 18 | } 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/share_accounter/mod.rs: -------------------------------------------------------------------------------- 1 | mod errors; 2 | mod task_manager; 3 | 4 | use errors::Error; 5 | use std::sync::Arc; 6 | use tracing::error; 7 | 8 | use dashmap::DashMap; 9 | use demand_share_accounting_ext::*; 10 | use parser::{PoolExtMessages, ShareAccountingMessages}; 11 | use roles_logic_sv2::{mining_sv2::SubmitSharesSuccess, parsers::Mining}; 12 | use task_manager::TaskManager; 13 | 14 | use crate::{ 15 | proxy_state::{ProxyState, ShareAccounterState}, 16 | shared::utils::AbortOnDrop, 17 | PoolState, 18 | }; 19 | 20 | pub async fn start( 21 | receiver: tokio::sync::mpsc::Receiver>, 22 | sender: tokio::sync::mpsc::Sender>, 23 | up_receiver: tokio::sync::mpsc::Receiver>, 24 | up_sender: tokio::sync::mpsc::Sender>, 25 | ) -> Result { 26 | let task_manager = TaskManager::initialize(); 27 | let shares_sent_up = Arc::new(DashMap::with_capacity(100)); 28 | let abortable = task_manager 29 | .safe_lock(|t| t.get_aborter()) 30 | .map_err(|_| Error::ShareAccounterTaskManagerMutexCorrupted)? 31 | .ok_or(Error::ShareAccounterTaskManagerError)?; 32 | 33 | let relay_up_task = relay_up(receiver, up_sender, shares_sent_up.clone()); 34 | TaskManager::add_relay_up(task_manager.clone(), relay_up_task) 35 | .await 36 | .map_err(|_| Error::ShareAccounterTaskManagerError)?; 37 | 38 | let relay_down_task = relay_down(up_receiver, sender, shares_sent_up.clone()); 39 | TaskManager::add_relay_down(task_manager.clone(), relay_down_task) 40 | .await 41 | .map_err(|_| Error::ShareAccounterTaskManagerError)?; 42 | Ok(abortable) 43 | } 44 | 45 | struct ShareSentUp { 46 | channel_id: u32, 47 | sequence_number: u32, 48 | } 49 | 50 | fn relay_up( 51 | mut receiver: tokio::sync::mpsc::Receiver>, 52 | up_sender: tokio::sync::mpsc::Sender>, 53 | shares_sent_up: Arc>, 54 | ) -> AbortOnDrop { 55 | let task = tokio::spawn(async move { 56 | while let Some(msg) = receiver.recv().await { 57 | if let Mining::SubmitSharesExtended(m) = &msg { 58 | shares_sent_up.insert( 59 | m.job_id, 60 | ShareSentUp { 61 | channel_id: m.channel_id, 62 | sequence_number: m.sequence_number, 63 | }, 64 | ); 65 | }; 66 | let msg = PoolExtMessages::Mining(msg); 67 | if up_sender.send(msg).await.is_err() { 68 | break; 69 | } 70 | } 71 | }); 72 | task.into() 73 | } 74 | 75 | fn relay_down( 76 | mut up_receiver: tokio::sync::mpsc::Receiver>, 77 | sender: tokio::sync::mpsc::Sender>, 78 | shares_sent_up: Arc>, 79 | ) -> AbortOnDrop { 80 | let task = tokio::spawn(async move { 81 | while let Some(msg) = up_receiver.recv().await { 82 | match msg { 83 | PoolExtMessages::ShareAccountingMessages(msg) => { 84 | if let ShareAccountingMessages::ShareOk(msg) = msg { 85 | let job_id_bytes = msg.ref_job_id.to_le_bytes(); 86 | let job_id = u32::from_le_bytes(job_id_bytes[4..8].try_into().expect("Internal error: job_id_bytes[4..8] can always be convertible into a u32")); 87 | let share_sent_up = match shares_sent_up.remove(&job_id) { 88 | Some(shares) => shares.1, 89 | // job_id doesn't exist 90 | None => { 91 | error!("Pool sent invalid share success"); 92 | // Set global pool state to Down 93 | ProxyState::update_pool_state(PoolState::Down); 94 | return; 95 | } 96 | }; 97 | 98 | let success = Mining::SubmitSharesSuccess(SubmitSharesSuccess { 99 | channel_id: share_sent_up.channel_id, 100 | last_sequence_number: share_sent_up.sequence_number, 101 | new_submits_accepted_count: 1, 102 | new_shares_sum: 1, 103 | }); 104 | if let Err(e) = sender.send(success).await { 105 | error!("{e:?}"); 106 | ProxyState::update_share_accounter_state(ShareAccounterState::Down); 107 | break; 108 | } 109 | }; 110 | } 111 | PoolExtMessages::Mining(msg) => { 112 | if let Err(e) = sender.send(msg).await { 113 | error!("{e}"); 114 | ProxyState::update_share_accounter_state(ShareAccounterState::Down); 115 | break; 116 | } 117 | } 118 | _ => { 119 | error!("Pool send unexpected message on mining connection"); 120 | ProxyState::update_pool_state(PoolState::Down); 121 | break; 122 | } 123 | } 124 | } 125 | }); 126 | task.into() 127 | } 128 | -------------------------------------------------------------------------------- /src/share_accounter/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[allow(dead_code)] 9 | enum Task { 10 | RelayUp(AbortOnDrop), 11 | RelayDown(AbortOnDrop), 12 | } 13 | 14 | pub struct TaskManager { 15 | send_task: mpsc::Sender, 16 | abort: Option, 17 | } 18 | 19 | impl TaskManager { 20 | #[allow(unused_variables)] 21 | pub fn initialize() -> Arc> { 22 | let (sender, mut receiver) = mpsc::channel(10); 23 | let handle = tokio::task::spawn(async move { 24 | let mut tasks = vec![]; 25 | while let Some(task) = receiver.recv().await { 26 | tasks.push(task); 27 | } 28 | warn!("Share accounter main task manager stopped, keep alive tasks"); 29 | loop { 30 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 31 | } 32 | }); 33 | Arc::new(Mutex::new(Self { 34 | send_task: sender, 35 | abort: Some(handle.into()), 36 | })) 37 | } 38 | 39 | pub fn get_aborter(&mut self) -> Option { 40 | self.abort.take() 41 | } 42 | 43 | pub async fn add_relay_up(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 44 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 45 | send_task 46 | .send(Task::RelayUp(abortable)) 47 | .await 48 | .map_err(|_| ()) 49 | } 50 | pub async fn add_relay_down(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 51 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 52 | send_task 53 | .send(Task::RelayDown(abortable)) 54 | .await 55 | .map_err(|_| ()) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/shared/error.rs: -------------------------------------------------------------------------------- 1 | pub enum Sv1IngressError { 2 | TranslatorDropped, 3 | DownstreamDropped, 4 | TaskFailed, 5 | } 6 | -------------------------------------------------------------------------------- /src/shared/mod.rs: -------------------------------------------------------------------------------- 1 | //! Shared functionalities 2 | //! 3 | //! 4 | 5 | pub mod error; 6 | pub mod utils; 7 | -------------------------------------------------------------------------------- /src/shared/utils.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | use sv1_api::utils::HexU32Be; 4 | use tokio::task::AbortHandle; 5 | use tokio::task::JoinHandle; 6 | 7 | #[derive(Debug)] 8 | pub struct AbortOnDrop { 9 | abort_handle: AbortHandle, 10 | } 11 | 12 | impl AbortOnDrop { 13 | pub fn new(handle: JoinHandle) -> Self { 14 | let abort_handle = handle.abort_handle(); 15 | Self { abort_handle } 16 | } 17 | 18 | pub fn is_finished(&self) -> bool { 19 | self.abort_handle.is_finished() 20 | } 21 | } 22 | 23 | impl core::ops::Drop for AbortOnDrop { 24 | fn drop(&mut self) { 25 | self.abort_handle.abort() 26 | } 27 | } 28 | 29 | impl From> for AbortOnDrop { 30 | fn from(value: JoinHandle) -> Self { 31 | Self::new(value) 32 | } 33 | } 34 | 35 | /// Select a version rolling mask and min bit count based on the request from the miner. 36 | /// It copy the behavior from SRI translator 37 | pub fn sv1_rolling(configure: &sv1_api::client_to_server::Configure) -> (HexU32Be, HexU32Be) { 38 | // TODO 0x1FFFE000 should be configured 39 | // = 11111111111111110000000000000 40 | // this is a reasonable default as it allows all 16 version bits to be used 41 | // If the tproxy/pool needs to use some version bits this needs to be configurable 42 | // so upstreams can negotiate with downstreams. When that happens this should consider 43 | // the min_bit_count in the mining.configure message 44 | let version_rollin_mask = configure 45 | .version_rolling_mask() 46 | .map(|mask| HexU32Be(mask & 0x1FFFE000)) 47 | .unwrap_or(HexU32Be(0)); 48 | let version_rolling_min_bit = configure 49 | .version_rolling_min_bit_count() 50 | .unwrap_or(HexU32Be(0)); 51 | (version_rollin_mask, version_rolling_min_bit) 52 | } 53 | 54 | #[derive(Debug, PartialEq, Eq, Clone, Copy)] 55 | pub struct UserId(pub i64); 56 | impl Display for UserId { 57 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 58 | write!(f, "{}", self.0) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/translator/downstream/accept_connection.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | proxy_state::{DownstreamType, ProxyState}, 3 | translator::{ 4 | error::Error, proxy::Bridge, upstream::diff_management::UpstreamDifficultyConfig, 5 | }, 6 | }; 7 | 8 | use super::{downstream::Downstream, task_manager::TaskManager, DownstreamMessages}; 9 | use roles_logic_sv2::utils::Mutex; 10 | use std::{net::IpAddr, sync::Arc}; 11 | use sv1_api::server_to_client; 12 | use tokio::sync::{ 13 | broadcast, 14 | mpsc::{Receiver, Sender}, 15 | }; 16 | use tokio::task; 17 | use tracing::{error, info}; 18 | 19 | pub async fn start_accept_connection( 20 | task_manager: Arc>, 21 | tx_sv1_submit: Sender, 22 | tx_mining_notify: broadcast::Sender>, 23 | bridge: Arc>, 24 | upstream_difficulty_config: Arc>, 25 | mut downstreams: Receiver<(Sender, Receiver, IpAddr)>, 26 | stats_sender: crate::api::stats::StatsSender, 27 | ) -> Result<(), Error<'static>> { 28 | let handle = { 29 | let task_manager = task_manager.clone(); 30 | task::spawn(async move { 31 | // This is needed. When bridge want to send a notification if no downstream is 32 | // available at least one receiver must be around. 33 | let _s = tx_mining_notify.subscribe(); 34 | while let Some((send, recv, addr)) = downstreams.recv().await { 35 | info!("Translator opening connection for ip {}", addr); 36 | 37 | // The initial difficulty is derived from the formula: difficulty = hash_rate / (shares_per_second * 2^32) 38 | let initial_hash_rate = *crate::EXPECTED_SV1_HASHPOWER; 39 | let share_per_second = crate::SHARE_PER_MIN / 60.0; 40 | let initial_difficulty = 41 | dbg!(initial_hash_rate / (share_per_second * 2f32.powf(32.0))); 42 | let initial_difficulty = 43 | crate::translator::downstream::diff_management::nearest_power_of_10( 44 | initial_difficulty, 45 | ); 46 | 47 | // Formula: expected_hash_rate = (shares_per_second) * initial_difficulty * 2^32, where shares_per_second = SHARE_PER_MIN / 60 48 | let expected_hash_rate = 49 | (crate::SHARE_PER_MIN / 60.0) * initial_difficulty * 2f32.powf(32.0); 50 | if Bridge::ready(&bridge).await.is_err() { 51 | error!("Bridge not ready"); 52 | break; 53 | }; 54 | let open_sv1_downstream = 55 | match bridge.safe_lock(|s| s.on_new_sv1_connection(expected_hash_rate)) { 56 | Ok(sv1_downstream) => sv1_downstream, 57 | Err(e) => { 58 | error!("{e}"); 59 | break; 60 | } 61 | }; 62 | 63 | match open_sv1_downstream { 64 | Ok(opened) => { 65 | info!( 66 | "Translator opening connection for ip {} with id {}", 67 | addr, opened.channel_id 68 | ); 69 | Downstream::new_downstream( 70 | opened.channel_id, 71 | tx_sv1_submit.clone(), 72 | tx_mining_notify.subscribe(), 73 | opened.extranonce, 74 | opened.last_notify, 75 | opened.extranonce2_len as usize, 76 | addr.to_string(), 77 | upstream_difficulty_config.clone(), 78 | send, 79 | recv, 80 | task_manager.clone(), 81 | initial_difficulty, 82 | stats_sender.clone(), 83 | ) 84 | .await 85 | } 86 | Err(e) => { 87 | error!("{e:?}"); 88 | ProxyState::update_downstream_state(DownstreamType::TranslatorDownstream); 89 | break; 90 | } 91 | } 92 | } 93 | }) 94 | }; 95 | TaskManager::add_accept_connection(task_manager, handle.into()) 96 | .await 97 | .map_err(|_| Error::TranslatorTaskManagerFailed) 98 | } 99 | -------------------------------------------------------------------------------- /src/translator/downstream/mod.rs: -------------------------------------------------------------------------------- 1 | use roles_logic_sv2::mining_sv2::Target; 2 | use sv1_api::{client_to_server::Submit, utils::HexU32Be}; 3 | pub mod diff_management; 4 | #[allow(clippy::module_inception)] 5 | pub mod downstream; 6 | pub use downstream::Downstream; 7 | mod accept_connection; 8 | mod notify; 9 | mod receive_from_downstream; 10 | mod send_to_downstream; 11 | mod task_manager; 12 | 13 | /// This constant is used as a check to ensure clients 14 | /// do not send a mining.subscribe and never a mining.authorize 15 | /// since they will take up a tcp connection but never be allowed to 16 | /// receive jobs. Without the timeout the TProxy can be exploited by incoming 17 | /// `mining.subscribe` messages that init connections and take up compute 18 | const SUBSCRIBE_TIMEOUT_SECS: u64 = 10; 19 | 20 | /// enum of messages sent to the Bridge 21 | #[derive(Debug, Clone)] 22 | pub enum DownstreamMessages { 23 | SubmitShares(SubmitShareWithChannelId), 24 | SetDownstreamTarget(SetDownstreamTarget), 25 | } 26 | 27 | /// wrapper around a `mining.submit` with extra channel informationfor the Bridge to 28 | /// process 29 | #[derive(Debug, Clone)] 30 | pub struct SubmitShareWithChannelId { 31 | pub channel_id: u32, 32 | pub share: Submit<'static>, 33 | // TODO why we need allow dead code here??? 34 | #[allow(dead_code)] 35 | extranonce: Vec, 36 | #[allow(dead_code)] 37 | extranonce2_len: usize, 38 | pub version_rolling_mask: Option, 39 | } 40 | 41 | /// message for notifying the bridge that a downstream target has updated 42 | /// so the Bridge can process the update 43 | #[derive(Debug, Clone)] 44 | pub struct SetDownstreamTarget { 45 | pub channel_id: u32, 46 | pub new_target: Target, 47 | } 48 | 49 | pub fn new_subscription_id() -> String { 50 | "ae6812eb4cd7735a302a8a9dd95cf71f".into() 51 | } 52 | -------------------------------------------------------------------------------- /src/translator/downstream/notify.rs: -------------------------------------------------------------------------------- 1 | use crate::proxy_state::{DownstreamType, ProxyState}; 2 | use crate::translator::downstream::SUBSCRIBE_TIMEOUT_SECS; 3 | use crate::translator::error::Error; 4 | 5 | use super::{downstream::Downstream, task_manager::TaskManager}; 6 | use roles_logic_sv2::utils::Mutex; 7 | use std::sync::Arc; 8 | use sv1_api::json_rpc; 9 | use sv1_api::server_to_client; 10 | use sv1_api::utils::HexU32Be; 11 | use tokio::sync::broadcast; 12 | use tokio::task; 13 | use tracing::{debug, error, warn}; 14 | 15 | fn apply_mask(mask: Option, message: &mut server_to_client::Notify<'static>) { 16 | if let Some(mask) = mask { 17 | message.version = HexU32Be(message.version.0 & !mask.0); 18 | } 19 | } 20 | 21 | pub async fn start_notify( 22 | task_manager: Arc>, 23 | downstream: Arc>, 24 | mut rx_sv1_notify: broadcast::Receiver>, 25 | recent_notifies: std::collections::VecDeque>, 26 | host: String, 27 | connection_id: u32, 28 | ) -> Result<(), Error<'static>> { 29 | let handle = { 30 | let task_manager = task_manager.clone(); 31 | let (upstream_difficulty_config, stats_sender) = downstream 32 | .safe_lock(|d| (d.upstream_difficulty_config.clone(), d.stats_sender.clone()))?; 33 | upstream_difficulty_config.safe_lock(|c| { 34 | c.channel_nominal_hashrate += *crate::EXPECTED_SV1_HASHPOWER; 35 | })?; 36 | stats_sender.setup_stats(connection_id); 37 | task::spawn(async move { 38 | let timeout_timer = std::time::Instant::now(); 39 | let mut first_sent = false; 40 | loop { 41 | let mask = downstream 42 | .safe_lock(|d| d.version_rolling_mask.clone()) 43 | .unwrap(); 44 | let is_a = match downstream.safe_lock(|d| !d.authorized_names.is_empty()) { 45 | Ok(is_a) => is_a, 46 | Err(e) => { 47 | error!("{e}"); 48 | ProxyState::update_downstream_state(DownstreamType::TranslatorDownstream); 49 | break; 50 | } 51 | }; 52 | if is_a && !first_sent && !recent_notifies.is_empty() { 53 | if let Err(e) = Downstream::init_difficulty_management(&downstream).await { 54 | error!("Failed to initailize difficulty managemant {e}") 55 | }; 56 | 57 | let mut sv1_mining_notify_msg = match recent_notifies.back().cloned() { 58 | Some(sv1_mining_notify_msg) => sv1_mining_notify_msg, 59 | None => { 60 | error!("sv1_mining_notify_msg is None"); 61 | ProxyState::update_downstream_state( 62 | DownstreamType::TranslatorDownstream, 63 | ); 64 | break; 65 | } 66 | }; 67 | apply_mask(mask, &mut sv1_mining_notify_msg); 68 | let message: json_rpc::Message = sv1_mining_notify_msg.into(); 69 | Downstream::send_message_downstream(downstream.clone(), message).await; 70 | if downstream 71 | .clone() 72 | .safe_lock(|s| { 73 | s.first_job_received = true; 74 | }) 75 | .is_err() 76 | { 77 | error!("Translator Downstream Mutex Poisoned"); 78 | ProxyState::update_downstream_state(DownstreamType::TranslatorDownstream); 79 | break; 80 | } 81 | first_sent = true; 82 | } else if is_a && !recent_notifies.is_empty() { 83 | if let Err(e) = 84 | start_update(task_manager, downstream.clone(), connection_id).await 85 | { 86 | warn!("Translator impossible to start update task: {e}"); 87 | break; 88 | }; 89 | 90 | while let Ok(mut sv1_mining_notify_msg) = rx_sv1_notify.recv().await { 91 | if downstream 92 | .safe_lock(|d| { 93 | d.recent_notifies.push_back(sv1_mining_notify_msg.clone()); 94 | debug!( 95 | "Downstream {}: Added job_id {} to recent_notifies. Current jobs: {:?}", 96 | connection_id, 97 | sv1_mining_notify_msg.job_id, 98 | d.recent_notifies.iter().map(|n| &n.job_id).collect::>() 99 | ); 100 | if d.recent_notifies.len() > 2 { 101 | if let Some(removed) = d.recent_notifies.pop_front() { 102 | debug!("Downstream {}: Removed oldest job_id {}", connection_id, removed.job_id); 103 | } 104 | }}) 105 | .is_err() 106 | { 107 | error!("Translator Downstream Mutex Poisoned"); 108 | ProxyState::update_downstream_state( 109 | DownstreamType::TranslatorDownstream, 110 | ); 111 | break; 112 | } 113 | 114 | apply_mask(mask.clone(), &mut sv1_mining_notify_msg); 115 | let message: json_rpc::Message = sv1_mining_notify_msg.into(); 116 | Downstream::send_message_downstream(downstream.clone(), message).await; 117 | } 118 | break; 119 | } else { 120 | // timeout connection if miner does not send the authorize message after sending a subscribe 121 | if timeout_timer.elapsed().as_secs() > SUBSCRIBE_TIMEOUT_SECS { 122 | warn!( 123 | "Downstream: miner.subscribe/miner.authorize TIMEOUT for {} {}", 124 | &host, connection_id 125 | ); 126 | break; 127 | } 128 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 129 | } 130 | } 131 | // TODO here we want to be sure that on drop this is called 132 | let _ = Downstream::remove_downstream_hashrate_from_channel(&downstream); 133 | // TODO here we want to kill the tasks 134 | warn!( 135 | "Downstream: Shutting down sv1 downstream job notifier for {}", 136 | &host 137 | ); 138 | }) 139 | }; 140 | TaskManager::add_notify(task_manager, handle.into()) 141 | .await 142 | .map_err(|_| Error::TranslatorTaskManagerFailed) 143 | } 144 | 145 | async fn start_update( 146 | task_manager: Arc>, 147 | downstream: Arc>, 148 | connection_id: u32, 149 | ) -> Result<(), Error<'static>> { 150 | let handle = task::spawn(async move { 151 | // Prevent difficulty adjustments until after delay elapses 152 | tokio::time::sleep(std::time::Duration::from_secs(crate::Configuration::delay())).await; 153 | loop { 154 | let share_count = crate::translator::utils::get_share_count(connection_id); 155 | let sleep_duration = if share_count >= crate::SHARE_PER_MIN * 3.0 156 | || share_count <= crate::SHARE_PER_MIN / 3.0 157 | { 158 | // TODO: this should only apply when after the first share has been received 159 | std::time::Duration::from_millis(crate::Configuration::adjustment_interval()) 160 | } else { 161 | std::time::Duration::from_millis(crate::Configuration::adjustment_interval()) 162 | }; 163 | 164 | tokio::time::sleep(sleep_duration).await; 165 | 166 | let recent_notifies = match downstream.safe_lock(|d| d.recent_notifies.clone()) { 167 | Ok(ln) => ln, 168 | Err(e) => { 169 | error!("{e}"); 170 | return; 171 | } 172 | }; 173 | assert!(!recent_notifies.is_empty()); 174 | // if hashrate has changed, update difficulty management, and send new 175 | // mining.set_difficulty 176 | if let Err(e) = Downstream::try_update_difficulty_settings(&downstream).await { 177 | error!("{e}"); 178 | return; 179 | }; 180 | } 181 | }); 182 | TaskManager::add_update(task_manager, handle.into()) 183 | .await 184 | .map_err(|_| Error::TranslatorTaskManagerFailed) 185 | } 186 | -------------------------------------------------------------------------------- /src/translator/downstream/receive_from_downstream.rs: -------------------------------------------------------------------------------- 1 | use super::{downstream::Downstream, task_manager::TaskManager}; 2 | use crate::{ 3 | proxy_state::{DownstreamType, ProxyState}, 4 | translator::error::Error, 5 | }; 6 | use roles_logic_sv2::utils::Mutex; 7 | use std::sync::Arc; 8 | use sv1_api::{client_to_server::Submit, json_rpc}; 9 | use tokio::sync::mpsc; 10 | use tokio::task; 11 | use tracing::{error, warn}; 12 | 13 | pub async fn start_receive_downstream( 14 | task_manager: Arc>, 15 | downstream: Arc>, 16 | mut recv_from_down: mpsc::Receiver, 17 | connection_id: u32, 18 | ) -> Result<(), Error<'static>> { 19 | let handle = task::spawn(async move { 20 | while let Some(incoming) = recv_from_down.recv().await { 21 | let incoming: Result = serde_json::from_str(&incoming); 22 | if let Ok(incoming) = incoming { 23 | // if message is Submit Shares update difficulty management 24 | if let sv1_api::Message::StandardRequest(standard_req) = incoming.clone() { 25 | if let Ok(Submit { .. }) = standard_req.try_into() { 26 | if let Err(e) = Downstream::save_share(downstream.clone()) { 27 | error!("{}", e); 28 | break; 29 | } 30 | } 31 | } 32 | 33 | if let Err(error) = 34 | Downstream::handle_incoming_sv1(downstream.clone(), incoming).await 35 | { 36 | error!("Failed to handle incoming sv1 msg: {:?}", error); 37 | ProxyState::update_downstream_state(DownstreamType::TranslatorDownstream); 38 | }; 39 | } else { 40 | // Message received could not be converted to rpc message 41 | error!( 42 | "{}", 43 | Error::V1Protocol(Box::new(sv1_api::error::Error::InvalidJsonRpcMessageKind)) 44 | ); 45 | return; 46 | } 47 | } 48 | if let Ok(stats_sender) = downstream.safe_lock(|d| d.stats_sender.clone()) { 49 | stats_sender.remove_stats(connection_id); 50 | } 51 | // No message to receive 52 | warn!( 53 | "Downstream: Shutting down sv1 downstream reader {}", 54 | connection_id 55 | ); 56 | }); 57 | TaskManager::add_receive_downstream(task_manager, handle.into()) 58 | .await 59 | .map_err(|_| Error::TranslatorTaskManagerFailed) 60 | } 61 | -------------------------------------------------------------------------------- /src/translator/downstream/send_to_downstream.rs: -------------------------------------------------------------------------------- 1 | use super::task_manager::TaskManager; 2 | use crate::translator::error::Error; 3 | use roles_logic_sv2::utils::Mutex; 4 | use std::sync::Arc; 5 | use sv1_api::json_rpc; 6 | use tokio::sync::mpsc; 7 | use tokio::task; 8 | use tracing::{error, warn}; 9 | 10 | pub async fn start_send_to_downstream( 11 | task_manager: Arc>, 12 | mut receiver_outgoing: mpsc::Receiver, 13 | send_to_down: mpsc::Sender, 14 | connection_id: u32, 15 | host: String, 16 | ) -> Result<(), Error<'static>> { 17 | let handle = task::spawn(async move { 18 | while let Some(res) = receiver_outgoing.recv().await { 19 | let to_send = match serde_json::to_string(&res) { 20 | Ok(string) => format!("{}\n", string), 21 | Err(e) => { 22 | error!("Failed to serialize msg {e:?}"); 23 | break; 24 | } 25 | }; 26 | if send_to_down.send(to_send).await.is_err() { 27 | warn!("Downstream {} dropped", host); 28 | break; 29 | } 30 | } 31 | warn!( 32 | "Downstream: Shutting down sv1 downstream writer: {}", 33 | connection_id 34 | ); 35 | }); 36 | TaskManager::add_send_downstream(task_manager, handle.into()) 37 | .await 38 | .map_err(|_| Error::TranslatorTaskManagerFailed) 39 | } 40 | -------------------------------------------------------------------------------- /src/translator/downstream/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[allow(dead_code)] 9 | enum Task { 10 | AcceptConnection(AbortOnDrop), 11 | ReceiveDownstream(AbortOnDrop), 12 | SendDownstream(AbortOnDrop), 13 | Notify(AbortOnDrop), 14 | Update(AbortOnDrop), 15 | } 16 | 17 | pub struct TaskManager { 18 | send_task: mpsc::Sender, 19 | abort: Option, 20 | } 21 | 22 | impl TaskManager { 23 | pub fn initialize() -> Arc> { 24 | let (sender, mut receiver) = mpsc::channel(10); 25 | let handle = tokio::task::spawn(async move { 26 | let mut tasks = vec![]; 27 | while let Some(task) = receiver.recv().await { 28 | tasks.push(task); 29 | } 30 | warn!("Translator downstream task manager stopped, keep alive tasks"); 31 | loop { 32 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 33 | } 34 | }); 35 | Arc::new(Mutex::new(Self { 36 | send_task: sender, 37 | abort: Some(handle.into()), 38 | })) 39 | } 40 | 41 | pub fn get_aborter(&mut self) -> Option { 42 | self.abort.take() 43 | } 44 | 45 | pub async fn add_receive_downstream( 46 | self_: Arc>, 47 | abortable: AbortOnDrop, 48 | ) -> Result<(), ()> { 49 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 50 | send_task 51 | .send(Task::ReceiveDownstream(abortable)) 52 | .await 53 | .map_err(|_| ()) 54 | } 55 | pub async fn add_update(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 56 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 57 | send_task 58 | .send(Task::Update(abortable)) 59 | .await 60 | .map_err(|_| ()) 61 | } 62 | pub async fn add_notify(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 63 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 64 | send_task 65 | .send(Task::Notify(abortable)) 66 | .await 67 | .map_err(|_| ()) 68 | } 69 | pub async fn add_send_downstream( 70 | self_: Arc>, 71 | abortable: AbortOnDrop, 72 | ) -> Result<(), ()> { 73 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 74 | send_task 75 | .send(Task::SendDownstream(abortable)) 76 | .await 77 | .map_err(|_| ()) 78 | } 79 | pub async fn add_accept_connection( 80 | self_: Arc>, 81 | abortable: AbortOnDrop, 82 | ) -> Result<(), ()> { 83 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 84 | send_task 85 | .send(Task::AcceptConnection(abortable)) 86 | .await 87 | .map_err(|_| ()) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/translator/error.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::Infallible, fmt, sync::PoisonError}; 2 | 3 | pub type ProxyResult<'a, T> = core::result::Result>; 4 | 5 | #[derive(Debug)] 6 | pub enum Error<'a> { 7 | /// Errors due to invalid extranonce from upstream 8 | InvalidExtranonce(String), 9 | /// Errors from `roles_logic_sv2` crate. 10 | RolesSv2Logic(roles_logic_sv2::errors::Error), 11 | V1Protocol(Box>), 12 | // Locking Errors 13 | PoisonLock, 14 | TranslatorUpstreamMutexPoisoned, 15 | TranslatorDiffConfigMutexPoisoned, 16 | TranslatorTaskManagerMutexPoisoned, 17 | BridgeMutexPoisoned, 18 | BridgeTaskManagerMutexPoisoned, 19 | // Task Manager Errors 20 | TranslatorTaskManagerFailed, 21 | BridgeTaskManagerFailed, 22 | // Unrecoverable Errors 23 | Unrecoverable, 24 | // used to handle SV2 protocol error messages from pool 25 | #[allow(clippy::enum_variant_names)] 26 | TargetError(roles_logic_sv2::errors::Error), 27 | Infallible(Infallible), 28 | ImpossibleToOpenChannnel, 29 | #[allow(clippy::enum_variant_names)] 30 | AsyncChannelError, 31 | } 32 | 33 | impl From for Error<'_> { 34 | fn from(v: Infallible) -> Self { 35 | Self::Infallible(v) 36 | } 37 | } 38 | 39 | impl fmt::Display for Error<'_> { 40 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 41 | match self { 42 | Error::InvalidExtranonce(e) => write!(f, "InvalidExtranonce {}", e), 43 | Error::RolesSv2Logic(e) => write!(f, "RolesSv2Logic {}", e), 44 | Error::V1Protocol(e) => write!(f, "V1Protocol {}", e), 45 | Error::PoisonLock => write!(f, "PoisonLock"), 46 | Error::TargetError(e) => write!(f, "TargetError {}", e), 47 | Error::Infallible(e) => write!(f, "Infallible {}", e), 48 | Error::ImpossibleToOpenChannnel => write!(f, "ImpossibleToOpenChannnel"), 49 | Error::AsyncChannelError => write!(f, "AsyncChannelError"), 50 | Error::TranslatorUpstreamMutexPoisoned => write!(f, "TranslatorUpstreamMutexPoisoned"), 51 | Error::TranslatorDiffConfigMutexPoisoned => { 52 | write!(f, "TranslatorDiffConfigMutexPoisoned") 53 | } 54 | Error::TranslatorTaskManagerMutexPoisoned => { 55 | write!(f, "TranslatorTaskManagerMutexPoisoned") 56 | } 57 | Error::BridgeMutexPoisoned => write!(f, "BridgeMutexPoisoned"), 58 | Error::BridgeTaskManagerMutexPoisoned => write!(f, "BridgeTaskManagerMutexPoisoned"), 59 | Error::TranslatorTaskManagerFailed => write!(f, "TranslatorTaskManagerFailed"), 60 | Error::BridgeTaskManagerFailed => write!(f, "BridgeTaskManagerFailed"), 61 | Error::Unrecoverable => write!(f, "Unrecoverable"), 62 | } 63 | } 64 | } 65 | 66 | impl From> for Error<'_> { 67 | fn from(_: PoisonError) -> Self { 68 | Self::PoisonLock 69 | } 70 | } 71 | impl From> for Error<'_> { 72 | fn from(_value: tokio::sync::mpsc::error::SendError) -> Self { 73 | Self::AsyncChannelError 74 | } 75 | } 76 | impl From for Error<'_> { 77 | fn from(value: roles_logic_sv2::Error) -> Self { 78 | Self::RolesSv2Logic(value) 79 | } 80 | } 81 | 82 | impl<'a> From> for Error<'a> { 83 | fn from(value: sv1_api::error::Error<'a>) -> Self { 84 | Self::V1Protocol(Box::new(value)) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/translator/mod.rs: -------------------------------------------------------------------------------- 1 | mod downstream; 2 | 3 | mod error; 4 | mod proxy; 5 | mod upstream; 6 | mod utils; 7 | 8 | use bitcoin::Address; 9 | use error::Error; 10 | 11 | use roles_logic_sv2::{parsers::Mining, utils::Mutex}; 12 | use tracing::error; 13 | 14 | use std::{net::IpAddr, sync::Arc}; 15 | use tokio::sync::mpsc::channel; 16 | 17 | use sv1_api::server_to_client; 18 | use tokio::sync::broadcast; 19 | 20 | use crate::{ 21 | proxy_state::{ProxyState, TranslatorState}, 22 | shared::utils::AbortOnDrop, 23 | }; 24 | use tokio::sync::mpsc::{Receiver as TReceiver, Sender as TSender}; 25 | 26 | use self::upstream::diff_management::UpstreamDifficultyConfig; 27 | mod task_manager; 28 | use task_manager::TaskManager; 29 | 30 | pub async fn start( 31 | downstreams: TReceiver<(TSender, TReceiver, IpAddr)>, 32 | pool_connection: TSender<( 33 | TSender>, 34 | TReceiver>, 35 | Option
, 36 | )>, 37 | stats_sender: crate::api::stats::StatsSender, 38 | ) -> Result> { 39 | let task_manager = TaskManager::initialize(pool_connection.clone()); 40 | let abortable = task_manager 41 | .safe_lock(|t| t.get_aborter()) 42 | .map_err(|_| Error::TranslatorTaskManagerMutexPoisoned)? 43 | .ok_or(Error::TranslatorTaskManagerFailed)?; 44 | 45 | let (send_to_up, up_recv_from_here) = channel(crate::TRANSLATOR_BUFFER_SIZE); 46 | let (up_send_to_here, recv_from_up) = channel(crate::TRANSLATOR_BUFFER_SIZE); 47 | pool_connection 48 | .send((up_send_to_here, up_recv_from_here, None)) 49 | .await 50 | .map_err(|_| { 51 | error!("Internal Error: Failed to send channels to the pool"); 52 | Error::Unrecoverable // Propagate error to that caller. There, we will restart Proxy 53 | })?; 54 | 55 | // `tx_sv1_bridge` sender is used by `Downstream` to send a `DownstreamMessages` message to 56 | // `Bridge` via the `rx_sv1_downstream` receiver 57 | // (Sender, Receiver) 58 | let (tx_sv1_bridge, rx_sv1_bridge) = channel(crate::TRANSLATOR_BUFFER_SIZE); 59 | 60 | // Sender/Receiver to send a SV2 `SubmitSharesExtended` from the `Bridge` to the `Upstream` 61 | // (Sender>, Receiver>) 62 | let (tx_sv2_submit_shares_ext, rx_sv2_submit_shares_ext) = 63 | channel(crate::TRANSLATOR_BUFFER_SIZE); 64 | 65 | // Sender/Receiver to send a SV2 `SetNewPrevHash` message from the `Upstream` to the `Bridge` 66 | // (Sender>, Receiver>) 67 | let (tx_sv2_set_new_prev_hash, rx_sv2_set_new_prev_hash) = 68 | channel(crate::TRANSLATOR_BUFFER_SIZE); 69 | 70 | // Sender/Receiver to send a SV2 `NewExtendedMiningJob` message from the `Upstream` to the 71 | // `Bridge` 72 | // (Sender>, Receiver>) 73 | let (tx_sv2_new_ext_mining_job, rx_sv2_new_ext_mining_job) = 74 | channel(crate::TRANSLATOR_BUFFER_SIZE); 75 | 76 | // Sender/Receiver to send a new extranonce from the `Upstream` to this `main` function to be 77 | // passed to the `Downstream` upon a Downstream role connection 78 | // (Sender, Receiver) 79 | let (tx_sv2_extranonce, mut rx_sv2_extranonce) = channel(crate::TRANSLATOR_BUFFER_SIZE); 80 | let target = Arc::new(Mutex::new(vec![0; 32])); 81 | 82 | // Sender/Receiver to send SV1 `mining.notify` message from the `Bridge` to the `Downstream` 83 | let (tx_sv1_notify, _): ( 84 | broadcast::Sender, 85 | broadcast::Receiver, 86 | ) = broadcast::channel(crate::TRANSLATOR_BUFFER_SIZE); 87 | 88 | let channel_nominal_hashrate = 0.0; 89 | 90 | let upstream_diff = UpstreamDifficultyConfig { 91 | channel_diff_update_interval: crate::CHANNEL_DIFF_UPDTATE_INTERVAL, 92 | channel_nominal_hashrate, 93 | }; 94 | let diff_config = Arc::new(Mutex::new(upstream_diff)); 95 | 96 | // Instantiate a new `Upstream` (SV2 Pool) 97 | let upstream = upstream::Upstream::new( 98 | tx_sv2_set_new_prev_hash, 99 | tx_sv2_new_ext_mining_job, 100 | crate::MIN_EXTRANONCE_SIZE - 1, 101 | tx_sv2_extranonce, 102 | target.clone(), 103 | diff_config.clone(), 104 | send_to_up, 105 | ) 106 | .await?; 107 | 108 | let upstream_abortable = 109 | upstream::Upstream::start(upstream, recv_from_up, rx_sv2_submit_shares_ext).await?; 110 | TaskManager::add_upstream(task_manager.clone(), upstream_abortable) 111 | .await 112 | .map_err(|_| Error::TranslatorTaskManagerFailed)?; 113 | 114 | let startup_task = { 115 | let target = target.clone(); 116 | let task_manager = task_manager.clone(); 117 | tokio::task::spawn(async move { 118 | let (extended_extranonce, up_id) = match rx_sv2_extranonce.recv().await { 119 | Some((extended_extranonce, up_id)) => (extended_extranonce, up_id), 120 | None => { 121 | error!("Failed to receive from rx_sv2_extranonce"); 122 | ProxyState::update_translator_state(TranslatorState::Down); 123 | return; 124 | } 125 | }; 126 | 127 | loop { 128 | let target: [u8; 32] = match target.safe_lock(|t| t.clone()) { 129 | Ok(target) => target.try_into().expect("Internal error: this operation cannot fail because Vec can always be converted into [u8; 32]"), 130 | Err(e) => { 131 | error!("{}", Error::TargetError(roles_logic_sv2::Error::PoisonLock(e.to_string()))); 132 | break 133 | } 134 | }; 135 | 136 | if target != [0; 32] { 137 | break; 138 | }; 139 | tokio::task::yield_now().await; 140 | } 141 | 142 | // Instantiate a new `Bridge` and begins handling incoming messages 143 | let b = match proxy::Bridge::new( 144 | tx_sv2_submit_shares_ext, 145 | tx_sv1_notify.clone(), 146 | extended_extranonce, 147 | target, 148 | up_id, 149 | ) { 150 | Ok(b) => b, 151 | Err(e) => { 152 | error!("Failed to instantiate new Bridge: {e}"); 153 | return; 154 | } 155 | }; 156 | 157 | let bridge_aborter = match proxy::Bridge::start( 158 | b.clone(), 159 | rx_sv2_set_new_prev_hash, 160 | rx_sv2_new_ext_mining_job, 161 | rx_sv1_bridge, 162 | ) 163 | .await 164 | { 165 | Ok(abortable) => abortable, 166 | Err(e) => { 167 | error!("Failed to start bridge: {e}"); 168 | return; 169 | } 170 | }; 171 | 172 | let downstream_aborter = match downstream::Downstream::accept_connections( 173 | tx_sv1_bridge, 174 | tx_sv1_notify, 175 | b, 176 | diff_config, 177 | downstreams, 178 | stats_sender, 179 | ) 180 | .await 181 | { 182 | Ok(abortable) => abortable, 183 | Err(e) => { 184 | error!("Downstream failed to accept connection: {e}"); 185 | return; 186 | } 187 | }; 188 | 189 | if TaskManager::add_bridge(task_manager.clone(), bridge_aborter) 190 | .await 191 | .is_err() 192 | { 193 | error!("{}", Error::TranslatorTaskManagerFailed); 194 | return; 195 | }; 196 | 197 | if TaskManager::add_downstream_listener(task_manager.clone(), downstream_aborter) 198 | .await 199 | .is_err() 200 | { 201 | error!("{}", Error::TranslatorTaskManagerFailed); 202 | } 203 | }) 204 | }; 205 | TaskManager::add_startup_task(task_manager.clone(), startup_task.into()) 206 | .await 207 | .map_err(|_| Error::TranslatorTaskManagerFailed)?; 208 | 209 | Ok(abortable) 210 | } 211 | -------------------------------------------------------------------------------- /src/translator/proxy/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod bridge; 2 | pub mod next_mining_notify; 3 | pub use bridge::Bridge; 4 | mod task_manager; 5 | -------------------------------------------------------------------------------- /src/translator/proxy/next_mining_notify.rs: -------------------------------------------------------------------------------- 1 | use roles_logic_sv2::{ 2 | job_creator::extended_job_to_non_segwit, 3 | mining_sv2::{NewExtendedMiningJob, SetNewPrevHash}, 4 | }; 5 | use sv1_api::{ 6 | server_to_client, 7 | utils::{HexU32Be, MerkleNode, PrevHash}, 8 | }; 9 | use tracing::debug; 10 | 11 | /// Creates a new SV1 `mining.notify` message if both SV2 `SetNewPrevHash` and 12 | /// `NewExtendedMiningJob` messages have been received. If one of these messages is still being 13 | /// waited on, the function returns `None`. 14 | /// If clean_jobs = false, it means a new job is created, with the same PrevHash 15 | pub fn create_notify( 16 | new_prev_hash: SetNewPrevHash<'static>, 17 | new_job: NewExtendedMiningJob<'static>, 18 | clean_jobs: bool, 19 | extranonce_len: usize, 20 | ) -> server_to_client::Notify<'static> { 21 | let new_job = extended_job_to_non_segwit(new_job, extranonce_len) 22 | .expect("failed to convert extended job to non segwit"); 23 | // Make sure that SetNewPrevHash + NewExtendedMiningJob is matching (not future) 24 | let job_id = new_job.job_id.to_string(); 25 | 26 | // U256<'static> -> MerkleLeaf 27 | let prev_hash = PrevHash(new_prev_hash.prev_hash.clone()); 28 | 29 | // B064K<'static'> -> HexBytes 30 | let coin_base1 = new_job.coinbase_tx_prefix.to_vec().into(); 31 | let coin_base2 = new_job.coinbase_tx_suffix.to_vec().into(); 32 | 33 | // Seq0255<'static, U56<'static>> -> Vec> 34 | let merkle_path = new_job.merkle_path.clone().into_static().0; 35 | let merkle_branch: Vec = merkle_path.into_iter().map(MerkleNode).collect(); 36 | 37 | // u32 -> HexBytes 38 | let version = HexU32Be(new_job.version); 39 | let bits = HexU32Be(new_prev_hash.nbits); 40 | let time = HexU32Be(match new_job.is_future() { 41 | true => new_prev_hash.min_ntime, 42 | false => new_job.min_ntime.clone().into_inner().expect("Internal error: this operation can not fail because the U32 can always be converted into Inner"), 43 | }); 44 | 45 | let notify_response = server_to_client::Notify { 46 | job_id, 47 | prev_hash, 48 | coin_base1, 49 | coin_base2, 50 | merkle_branch, 51 | version, 52 | bits, 53 | time, 54 | clean_jobs, 55 | }; 56 | debug!("\nNextMiningNotify: {:?}\n", notify_response); 57 | notify_response 58 | } 59 | -------------------------------------------------------------------------------- /src/translator/proxy/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | NewExtendedMiningJob(AbortOnDrop), 12 | DownstreamMessages(AbortOnDrop), 13 | NewPrevHash(AbortOnDrop), 14 | } 15 | 16 | pub struct TaskManager { 17 | send_task: mpsc::Sender, 18 | abort: Option, 19 | } 20 | 21 | impl TaskManager { 22 | pub fn initialize() -> Arc> { 23 | let (sender, mut receiver) = mpsc::channel(10); 24 | let handle = tokio::task::spawn(async move { 25 | let mut tasks = vec![]; 26 | while let Some(task) = receiver.recv().await { 27 | tasks.push(task); 28 | } 29 | warn!("Translator bridge task manager stopped, keep alive tasks"); 30 | loop { 31 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 32 | } 33 | }); 34 | Arc::new(Mutex::new(Self { 35 | send_task: sender, 36 | abort: Some(handle.into()), 37 | })) 38 | } 39 | 40 | pub fn get_aborter(&mut self) -> Option { 41 | self.abort.take() 42 | } 43 | 44 | pub async fn add_handle_new_extended_mining_job( 45 | self_: Arc>, 46 | abortable: AbortOnDrop, 47 | ) -> Result<(), ()> { 48 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 49 | send_task 50 | .send(Task::NewExtendedMiningJob(abortable)) 51 | .await 52 | .map_err(|_| ()) 53 | } 54 | pub async fn add_handle_new_prev_hash( 55 | self_: Arc>, 56 | abortable: AbortOnDrop, 57 | ) -> Result<(), ()> { 58 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 59 | send_task 60 | .send(Task::NewPrevHash(abortable)) 61 | .await 62 | .map_err(|_| ()) 63 | } 64 | pub async fn add_handle_downstream_messages( 65 | self_: Arc>, 66 | abortable: AbortOnDrop, 67 | ) -> Result<(), ()> { 68 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 69 | send_task 70 | .send(Task::DownstreamMessages(abortable)) 71 | .await 72 | .map_err(|_| ()) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/translator/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use bitcoin::Address; 5 | use roles_logic_sv2::parsers::Mining; 6 | use roles_logic_sv2::utils::Mutex; 7 | use tokio::sync::mpsc; 8 | use tokio::sync::mpsc::{Receiver, Sender}; 9 | use tracing::warn; 10 | 11 | type Message = Mining<'static>; 12 | 13 | #[allow(dead_code)] 14 | enum Task { 15 | DownstreamListener(AbortOnDrop), 16 | Upstream(AbortOnDrop), 17 | #[allow(clippy::enum_variant_names)] 18 | StartupTask(AbortOnDrop), 19 | Bridge(AbortOnDrop), 20 | } 21 | 22 | pub struct TaskManager { 23 | send_task: mpsc::Sender, 24 | abort: Option, 25 | } 26 | 27 | impl TaskManager { 28 | #[allow(unused_variables)] 29 | pub fn initialize( 30 | // We need this to be alive for all the live of the translator 31 | up_connection: Sender<(Sender, Receiver, Option
)>, 32 | ) -> Arc> { 33 | let (sender, mut receiver) = mpsc::channel(10); 34 | let handle = tokio::task::spawn(async move { 35 | let mut tasks = vec![]; 36 | while let Some(task) = receiver.recv().await { 37 | tasks.push(task); 38 | } 39 | warn!("Translator main task manager stopped, keep alive tasks"); 40 | loop { 41 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 42 | } 43 | #[allow(unreachable_code)] 44 | drop(up_connection) 45 | }); 46 | Arc::new(Mutex::new(Self { 47 | send_task: sender, 48 | abort: Some(handle.into()), 49 | })) 50 | } 51 | 52 | pub fn get_aborter(&mut self) -> Option { 53 | self.abort.take() 54 | } 55 | 56 | pub async fn add_downstream_listener( 57 | self_: Arc>, 58 | abortable: AbortOnDrop, 59 | ) -> Result<(), ()> { 60 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 61 | send_task 62 | .send(Task::DownstreamListener(abortable)) 63 | .await 64 | .map_err(|_| ()) 65 | } 66 | pub async fn add_upstream(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 67 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 68 | send_task 69 | .send(Task::Upstream(abortable)) 70 | .await 71 | .map_err(|_| ()) 72 | } 73 | pub async fn add_startup_task( 74 | self_: Arc>, 75 | abortable: AbortOnDrop, 76 | ) -> Result<(), ()> { 77 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 78 | send_task 79 | .send(Task::StartupTask(abortable)) 80 | .await 81 | .map_err(|_| ()) 82 | } 83 | pub async fn add_bridge(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 84 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 85 | send_task 86 | .send(Task::Bridge(abortable)) 87 | .await 88 | .map_err(|_| ()) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/translator/upstream/diff_management.rs: -------------------------------------------------------------------------------- 1 | use crate::translator::error::Error; 2 | 3 | use super::Upstream; 4 | 5 | #[derive(Debug, Clone)] 6 | pub struct UpstreamDifficultyConfig { 7 | pub channel_diff_update_interval: u32, 8 | pub channel_nominal_hashrate: f32, 9 | } 10 | 11 | use super::super::error::ProxyResult; 12 | use binary_sv2::u256_from_int; 13 | use roles_logic_sv2::{ 14 | mining_sv2::UpdateChannel, parsers::Mining, utils::Mutex, Error as RolesLogicError, 15 | }; 16 | use std::{sync::Arc, time::Duration}; 17 | use tracing::error; 18 | 19 | impl Upstream { 20 | /// this function checks if the elapsed time since the last update has surpassed the config 21 | pub(super) async fn try_update_hashrate(self_: Arc>) -> ProxyResult<'static, ()> { 22 | let (channel_id_option, diff_mgmt, tx_message) = self_ 23 | .safe_lock(|u| (u.channel_id, u.difficulty_config.clone(), u.sender.clone())) 24 | .map_err(|_e| Error::TranslatorDiffConfigMutexPoisoned)?; 25 | let channel_id = channel_id_option.ok_or(super::super::error::Error::RolesSv2Logic( 26 | RolesLogicError::NotFoundChannelId, 27 | ))?; 28 | let (timeout, new_hashrate) = diff_mgmt 29 | .safe_lock(|d| (d.channel_diff_update_interval, d.channel_nominal_hashrate)) 30 | .map_err(|_| Error::TranslatorDiffConfigMutexPoisoned)?; 31 | // UPDATE CHANNEL 32 | let update_channel = UpdateChannel { 33 | channel_id, 34 | nominal_hash_rate: new_hashrate, 35 | maximum_target: u256_from_int(u64::MAX), 36 | }; 37 | let message = Mining::UpdateChannel(update_channel); 38 | 39 | if tx_message.send(message).await.is_err() { 40 | error!("Failed to send message"); 41 | return Err(Error::AsyncChannelError); 42 | } 43 | tokio::time::sleep(Duration::from_secs(timeout as u64)).await; 44 | Ok(()) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/translator/upstream/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod diff_management; 2 | #[allow(clippy::module_inception)] 3 | pub mod upstream; 4 | pub use upstream::Upstream; 5 | mod task_manager; 6 | -------------------------------------------------------------------------------- /src/translator/upstream/task_manager.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::shared::utils::AbortOnDrop; 4 | use roles_logic_sv2::utils::Mutex; 5 | use tokio::sync::mpsc; 6 | use tracing::warn; 7 | 8 | #[derive(Debug)] 9 | #[allow(dead_code)] 10 | enum Task { 11 | DiffManagment(AbortOnDrop), 12 | MainLoop(AbortOnDrop), 13 | HandleSubmit(AbortOnDrop), 14 | } 15 | 16 | pub struct TaskManager { 17 | send_task: mpsc::Sender, 18 | abort: Option, 19 | } 20 | 21 | impl TaskManager { 22 | pub fn initialize() -> Arc> { 23 | let (sender, mut receiver) = mpsc::channel(10); 24 | let handle = tokio::task::spawn(async move { 25 | let mut tasks = vec![]; 26 | while let Some(task) = receiver.recv().await { 27 | tasks.push(task); 28 | } 29 | warn!("Translator upstream task manager stopped, keep alive tasks"); 30 | loop { 31 | tokio::time::sleep(std::time::Duration::from_secs(1000)).await; 32 | } 33 | }); 34 | Arc::new(Mutex::new(Self { 35 | send_task: sender, 36 | abort: Some(handle.into()), 37 | })) 38 | } 39 | 40 | pub fn get_aborter(&mut self) -> Option { 41 | self.abort.take() 42 | } 43 | pub async fn add_diff_managment( 44 | self_: Arc>, 45 | abortable: AbortOnDrop, 46 | ) -> Result<(), ()> { 47 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 48 | send_task 49 | .send(Task::DiffManagment(abortable)) 50 | .await 51 | .map_err(|_| ()) 52 | } 53 | pub async fn add_main_loop(self_: Arc>, abortable: AbortOnDrop) -> Result<(), ()> { 54 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 55 | send_task 56 | .send(Task::MainLoop(abortable)) 57 | .await 58 | .map_err(|_| ()) 59 | } 60 | pub async fn add_handle_submit( 61 | self_: Arc>, 62 | abortable: AbortOnDrop, 63 | ) -> Result<(), ()> { 64 | let send_task = self_.safe_lock(|s| s.send_task.clone()).unwrap(); 65 | send_task 66 | .send(Task::HandleSubmit(abortable)) 67 | .await 68 | .map_err(|_| ()) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/translator/utils.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::VecDeque, 3 | sync::{atomic::AtomicBool, Arc}, 4 | }; 5 | 6 | use crate::{ 7 | proxy_state::{DownstreamType, ProxyState}, 8 | translator::error::Error, 9 | }; 10 | use binary_sv2::Sv2DataType; 11 | use bitcoin::{ 12 | block::{Header, Version}, 13 | hashes::{sha256d, Hash as BHash}, 14 | hex::DisplayHex, 15 | BlockHash, CompactTarget, 16 | }; 17 | use lazy_static::lazy_static; 18 | use roles_logic_sv2::utils::Mutex; 19 | use sv1_api::{client_to_server, server_to_client::Notify}; 20 | use tracing::{debug, error, info}; 21 | 22 | use super::downstream::Downstream; 23 | lazy_static! { 24 | pub static ref SHARE_TIMESTAMPS: Arc>> = 25 | Arc::new(Mutex::new(VecDeque::with_capacity(70))); 26 | pub static ref IS_RATE_LIMITED: AtomicBool = AtomicBool::new(false); 27 | static ref SHARE_COUNTS: Arc>> = 28 | Arc::new(Mutex::new(std::collections::HashMap::new())); 29 | } 30 | 31 | /// Checks if a share can be sent upstream based on a rate limit of 70 shares per minute. 32 | /// Returns `true` if the share can be sent, `false` if the limit is exceeded. 33 | pub async fn check_share_rate_limit(downstream: Arc>) { 34 | let mut interval = tokio::time::interval(std::time::Duration::from_secs(1)); 35 | let mut last_update = tokio::time::Instant::now(); // Track last difficulty update 36 | let mut rate_limit_hit_count = 0; 37 | 38 | loop { 39 | interval.tick().await; 40 | let now = tokio::time::Instant::now(); 41 | let count = SHARE_TIMESTAMPS 42 | .safe_lock(|timestamps| { 43 | while let Some(&front) = timestamps.front() { 44 | if now.duration_since(front).as_secs() >= 60 { 45 | timestamps.pop_front(); 46 | } else { 47 | break; 48 | } 49 | } 50 | timestamps.len() 51 | }) 52 | .unwrap_or_else(|e| { 53 | error!("Failed to lock SHARE_TIMESTAMPS: {:?}", e); 54 | ProxyState::update_downstream_state(DownstreamType::TranslatorDownstream); 55 | 0 56 | }); 57 | 58 | let is_limited = count >= 70; 59 | IS_RATE_LIMITED.store(is_limited, std::sync::atomic::Ordering::SeqCst); 60 | 61 | if is_limited { 62 | rate_limit_hit_count += 1; 63 | } else { 64 | rate_limit_hit_count = 0; 65 | } 66 | 67 | if rate_limit_hit_count >= 5 && now.duration_since(last_update).as_secs() >= 2 { 68 | debug!("Rate limited. Updating difficulty"); 69 | if let Err(e) = Downstream::try_update_difficulty_settings(&downstream).await { 70 | error!("Failed to update difficulty: {e}"); 71 | } 72 | last_update = now; 73 | IS_RATE_LIMITED.store(false, std::sync::atomic::Ordering::SeqCst); 74 | rate_limit_hit_count = 0; 75 | } 76 | } 77 | } 78 | 79 | /// Checks if a share can be sent by checking if rate is limited 80 | pub fn allow_submit_share() -> crate::translator::error::ProxyResult<'static, bool> { 81 | // Check if rate-limited 82 | let is_rate_limited = IS_RATE_LIMITED.load(std::sync::atomic::Ordering::SeqCst); 83 | 84 | if is_rate_limited { 85 | return Ok(false); // Rate limit exceeded, don’t send 86 | } 87 | 88 | SHARE_TIMESTAMPS 89 | .safe_lock(|timestamps| { 90 | timestamps.push_back(tokio::time::Instant::now()); 91 | }) 92 | .map_err(|e| { 93 | error!("Failed to lock SHARE_TIMESTAMPS: {:?}", e); 94 | Error::TranslatorDiffConfigMutexPoisoned 95 | })?; 96 | 97 | Ok(true) // Share can be sent 98 | } 99 | 100 | pub fn validate_share( 101 | request: &client_to_server::Submit<'static>, 102 | recent_notifies: &VecDeque>, 103 | difficulty: f32, 104 | extranonce1: Vec, 105 | version_rolling_mask: Option, 106 | ) -> bool { 107 | let recent_notifies = recent_notifies.clone(); 108 | let matching_job = recent_notifies 109 | .iter() 110 | .find(|notify| notify.job_id == request.job_id); 111 | 112 | let job = match matching_job { 113 | Some(job) => job, 114 | None => { 115 | error!( 116 | "Share rejected: Job ID {} not found in recent notify msgs", 117 | request.job_id 118 | ); 119 | return false; 120 | } 121 | }; 122 | // Check job ID match 123 | if request.job_id != job.job_id { 124 | error!("Share rejected: Job ID mismatch"); 125 | return false; 126 | } 127 | 128 | let prev_hash_vec: Vec = job.prev_hash.clone().into(); 129 | let prev_hash = binary_sv2::U256::from_vec_(prev_hash_vec).unwrap(); 130 | let mut merkle_branch = Vec::new(); 131 | for branch in &job.merkle_branch { 132 | merkle_branch.push(branch.0.to_vec()); 133 | } 134 | 135 | let mut extranonce = Vec::new(); 136 | extranonce.extend_from_slice(extranonce1.as_ref()); 137 | extranonce.extend_from_slice(request.extra_nonce2.0.as_ref()); 138 | let extranonce: &[u8] = extranonce.as_ref(); 139 | 140 | let job_version = job.version.0; 141 | let request_version = request 142 | .version_bits 143 | .clone() 144 | .map(|vb| vb.0) 145 | .unwrap_or(job_version); 146 | let mask = version_rolling_mask 147 | .unwrap_or(sv1_api::utils::HexU32Be(0x1FFFE000_u32)) 148 | .0; 149 | let version = (job_version & !mask) | (request_version & mask); 150 | 151 | let mut hash = get_hash( 152 | request.nonce.0, 153 | version, 154 | request.time.0, 155 | extranonce, 156 | job, 157 | roles_logic_sv2::utils::u256_to_block_hash(prev_hash), 158 | merkle_branch, 159 | ); 160 | 161 | hash.reverse(); //convert to little-endian 162 | info!("Hash: {:?}", hash.to_vec().as_hex()); 163 | let target = Downstream::difficulty_to_target(difficulty); 164 | info!("Target: {:?}", target.to_vec().as_hex()); 165 | hash <= target 166 | } 167 | 168 | pub fn get_hash( 169 | nonce: u32, 170 | version: u32, 171 | ntime: u32, 172 | extranonce: &[u8], 173 | job: &Notify, 174 | prev_hash: BlockHash, 175 | merkle_path: Vec>, 176 | ) -> [u8; 32] { 177 | // Construct coinbase 178 | let mut coinbase = Vec::new(); 179 | coinbase.extend_from_slice(job.coin_base1.as_ref()); 180 | coinbase.extend_from_slice(extranonce); 181 | coinbase.extend_from_slice(job.coin_base2.as_ref()); 182 | 183 | // Calculate the Merkle root 184 | let coinbase_hash = ::hash(&coinbase); 185 | let mut merkle_root = coinbase_hash.to_byte_array(); 186 | 187 | for path in merkle_path { 188 | let mut combined = Vec::with_capacity(64); 189 | combined.extend_from_slice(&merkle_root); 190 | combined.extend_from_slice(path.as_ref()); 191 | merkle_root = ::hash(&combined).to_byte_array(); 192 | } 193 | 194 | // Construct the block header 195 | let header = Header { 196 | version: Version::from_consensus(version.try_into().unwrap()), 197 | prev_blockhash: prev_hash, 198 | merkle_root: bitcoin::TxMerkleNode::from_byte_array(merkle_root), 199 | time: ntime, 200 | bits: CompactTarget::from_consensus(job.bits.0), 201 | nonce, 202 | }; 203 | 204 | // Calculate the block hash 205 | let block_hash: [u8; 32] = header.block_hash().to_raw_hash().to_byte_array(); 206 | block_hash 207 | } 208 | 209 | // Update share count for each miner 210 | pub fn update_share_count(connection_id: u32) { 211 | SHARE_COUNTS 212 | .safe_lock(|share_counts| { 213 | let now = tokio::time::Instant::now(); 214 | if let Some((count, last_update)) = share_counts.get_mut(&connection_id) { 215 | if now.duration_since(*last_update) < std::time::Duration::from_secs(60) { 216 | *count += 1; 217 | } else { 218 | *count = 1; 219 | *last_update = now; 220 | } 221 | } else { 222 | share_counts.insert(connection_id, (1, now)); 223 | } 224 | }) 225 | .unwrap_or_else(|_| { 226 | error!("Failed to lock SHARE_COUNTS"); 227 | ProxyState::update_downstream_state(DownstreamType::TranslatorDownstream) 228 | }); 229 | } 230 | 231 | // Get share count for the last 60 secs 232 | pub fn get_share_count(connection_id: u32) -> f32 { 233 | let now = tokio::time::Instant::now(); 234 | let share_counts = SHARE_COUNTS 235 | .safe_lock(|share_counts| { 236 | if let Some((count, last_update)) = share_counts.get(&connection_id) { 237 | if now.duration_since(*last_update) < tokio::time::Duration::from_secs(60) { 238 | *count as f32 // Shares per minute 239 | } else { 240 | 0.0 // More than 60 seconds since the last share, so return 0. 241 | } 242 | } else { 243 | 0.0 244 | } 245 | }) 246 | .unwrap_or_else(|_| { 247 | error!("Failed to lock SHARE_COUNTS"); 248 | ProxyState::update_downstream_state(DownstreamType::TranslatorDownstream); 249 | 0.0 250 | }); 251 | share_counts 252 | } 253 | 254 | // /// currently the pool only supports 16 bytes exactly for its channels 255 | // /// to use but that may change 256 | // pub fn proxy_extranonce1_len( 257 | // channel_extranonce2_size: usize, 258 | // downstream_extranonce2_len: usize, 259 | // ) -> usize { 260 | // // full_extranonce_len - pool_extranonce1_len - miner_extranonce2 = tproxy_extranonce1_len 261 | // channel_extranonce2_size - downstream_extranonce2_len 262 | // } 263 | --------------------------------------------------------------------------------