├── bin ├── tracker │ ├── Cargo.toml │ └── src │ │ ├── cli.rs │ │ └── main.rs ├── server │ ├── Cargo.toml │ └── src │ │ └── main.rs └── processor │ ├── Cargo.toml │ └── src │ └── main.rs ├── parachains.json ├── routes ├── mock-out │ └── out │ │ └── placeholder ├── mock-parachains.json ├── config.toml ├── Cargo.toml ├── src │ ├── registry.rs │ ├── extend_subscription.rs │ ├── lib.rs │ ├── consumption.rs │ └── register.rs └── tests │ ├── registry.rs │ ├── register.rs │ ├── mock.rs │ ├── extend_subscription.rs │ └── consumption.rs ├── .gitignore ├── artifacts └── metadata.scale ├── scripts ├── process.sh ├── reset_env.sh ├── watchdog.sh └── init.sh ├── types ├── Cargo.toml └── src │ └── lib.rs ├── config.toml ├── Cargo.toml ├── .rustfmt.toml ├── shared ├── Cargo.toml └── src │ ├── config.rs │ ├── registry.rs │ ├── lib.rs │ ├── chaindata.rs │ ├── consumption.rs │ └── payment.rs ├── .github └── workflows │ └── tests.yaml ├── registry.json ├── README.md ├── src └── api.rs └── chaindata.json /bin/tracker/Cargo.toml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /parachains.json: -------------------------------------------------------------------------------- 1 | [ 2 | 3 | ] -------------------------------------------------------------------------------- /routes/mock-out/out/placeholder: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /routes/mock-parachains.json: -------------------------------------------------------------------------------- 1 | [] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /out 3 | parachains.json 4 | -------------------------------------------------------------------------------- /artifacts/metadata.scale: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BoredApe8461/CorespaceWeigher/HEAD/artifacts/metadata.scale -------------------------------------------------------------------------------- /scripts/process.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sh -c 'RUST_LOG=INFO ./target/release/processor' >> logs/processor.out 2>&1 4 | -------------------------------------------------------------------------------- /scripts/reset_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TRACKER="./target/release/tracker" 4 | WATCHDOG="scripts/watchdog.sh" 5 | 6 | PIDS=$(pgrep -f "$TRACKER|$WATCHDOG") 7 | 8 | if [ -z "$PIDS" ]; then 9 | echo "Process not found." 10 | else 11 | # Kill each process 12 | for PID in $PIDS; do 13 | kill -9 $PID 14 | done 15 | fi 16 | -------------------------------------------------------------------------------- /bin/server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | rocket = { version = "0.5.0", features=["json"] } 10 | rocket_cors = "0.6.0" 11 | 12 | routes = { path = "../../routes" } 13 | -------------------------------------------------------------------------------- /types/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "types" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | edition.workspace = true 6 | repository.workspace = true 7 | license.workspace = true 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | serde = "1.0.193" 13 | rocket = { version = "0.5.0", features=["json"] } 14 | 15 | -------------------------------------------------------------------------------- /config.toml: -------------------------------------------------------------------------------- 1 | output_directory = "out/" 2 | registry = "registry.json" 3 | chaindata = "chaindata.json" 4 | outputs = 2 5 | 6 | [payment_info] 7 | rpc_url = "wss://rococo-rpc.polkadot.io" 8 | receiver = "5DADsnBXr5DXiEAjdJvruf6c7ZSUR8iXUTATQqJfheGLiEVm" 9 | cost = "1000000000" #0.001 ROC 10 | # 7,890,000 is 3 months in seconds. 11 | subscription_duration = 7890000 12 | # 604800 is 1 week in seconds. 13 | renewal_period=604800 14 | 15 | -------------------------------------------------------------------------------- /bin/tracker/src/cli.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | 3 | /// Arguments for the tracker. 4 | #[derive(Parser, Debug)] 5 | #[command(author, version, about, long_about = None)] 6 | pub struct Args { 7 | /// Specifies the index of the RPC to be used. 8 | /// 9 | /// Multiple RPCs may be provided for each parachain on Kusama and Polkadot. 10 | /// `rpc_index` selects which RPC from the list will be used. 11 | #[arg(short, long)] 12 | pub rpc_index: usize, 13 | } 14 | -------------------------------------------------------------------------------- /scripts/watchdog.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILE_TO_WATCH="logs/tracker-logs-$1.out" 4 | 5 | WS_CONNECTION_ERROR="Failed to read message: Networking or low-level protocol error: WebSocket connection error: connection closed" 6 | 7 | # Continuously watch the file 8 | tail -f "$FILE_TO_WATCH" | while read LINE 9 | do 10 | echo "$LINE" | grep "$WS_CONNECTION_ERROR" > /dev/null 11 | if [ $? = 0 ] 12 | then 13 | ./scripts/init.sh 14 | fi 15 | done 16 | -------------------------------------------------------------------------------- /bin/processor/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "processor" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | log = "0.4" 10 | shared = { path = "../../shared" } 11 | env_logger = "0.10.1" 12 | polkadot-core-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } 13 | types = { path = "../../types" } 14 | -------------------------------------------------------------------------------- /routes/config.toml: -------------------------------------------------------------------------------- 1 | output_directory = "mock-out" 2 | registry = "mock-parachains.json" 3 | chaindata = "../chaindata.json" 4 | free_mode = true 5 | outputs = 1 6 | 7 | [payment_info] 8 | rpc_url = "wss://rococo-rpc.polkadot.io" 9 | receiver = "5DADsnBXr5DXiEAjdJvruf6c7ZSUR8iXUTATQqJfheGLiEVm" 10 | cost = "1000000000" #0.001 ROC 11 | # 2,419,200 is 4 weeks in seconds. 12 | subscription_duration=2419200 13 | 14 | # 604800 is 1 week in seconds. 15 | renewal_period=604800 16 | 17 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | authors = ["RegionX "] 3 | edition = "2021" 4 | 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [[bin]] 8 | name = "consumption-tracker" 9 | path = "src/main.rs" 10 | 11 | [[bin]] 12 | name = "server" 13 | path = "src/server.rs" 14 | 15 | [dependencies] 16 | csv = "1.3.0" 17 | parity-scale-codec = "3.6.5" 18 | rocket = "0.5.0" 19 | rocket_cors = "0.6.0" 20 | serde = "1.0.193" 21 | serde_json = "1.0.108" 22 | subxt = "0.32.1" 23 | subxt-metadata = "0.32.1" 24 | tokio = { version = "1", features = ["full"] } 25 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Basic 2 | edition = "2021" 3 | hard_tabs = true 4 | max_width = 100 5 | use_small_heuristics = "Max" 6 | # Imports 7 | imports_granularity = "Crate" 8 | reorder_imports = true 9 | # Consistency 10 | newline_style = "Unix" 11 | # Misc 12 | chain_width = 80 13 | spaces_around_ranges = false 14 | binop_separator = "Back" 15 | reorder_impl_items = false 16 | match_arm_leading_pipes = "Preserve" 17 | match_arm_blocks = false 18 | match_block_trailing_comma = true 19 | trailing_comma = "Vertical" 20 | trailing_semicolon = false 21 | use_field_init_shorthand = true 22 | # Format comments 23 | comment_width = 100 24 | wrap_comments = true 25 | -------------------------------------------------------------------------------- /shared/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shared" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | edition.workspace = true 6 | repository.workspace = true 7 | license.workspace = true 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | csv = "1.3.0" 13 | log = "0.4" 14 | toml = "0.8.8" 15 | serde = "1.0.193" 16 | serde_json = "1.0.108" 17 | subxt = "0.32.1" 18 | polkadot-core-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } 19 | parity-scale-codec = "3.6.9" 20 | 21 | types = { path = "../types" } 22 | 23 | [features] 24 | test-utils = [] 25 | -------------------------------------------------------------------------------- /routes/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "routes" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | log = "0.4" 10 | chrono = "0.4.33" 11 | rocket = { version = "0.5.0", features=["json"] } 12 | rocket_cors = "0.6.0" 13 | serde = "1.0.193" 14 | serde_json = "1.0.108" 15 | polkadot-core-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } 16 | 17 | types = { path = "../types" } 18 | shared = { path = "../shared", features = ["test-utils"]} 19 | 20 | [dev-dependencies] 21 | maplit = "1.0.2" 22 | scopeguard = "1.2.0" 23 | -------------------------------------------------------------------------------- /scripts/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TRACKER_LOGS_0="logs/tracker-logs-0.out" 4 | TRACKER_LOGS_1="logs/tracker-logs-1.out" 5 | 6 | TRACKER="./target/release/tracker" 7 | 8 | reset_env() { 9 | PIDS=$(pgrep -f "$TRACKER") 10 | 11 | if [ -z "$PIDS" ]; then 12 | echo "Process not found." 13 | else 14 | # Kill each process 15 | for PID in $PIDS; do 16 | kill -9 $PID 17 | done 18 | fi 19 | } 20 | 21 | reset_env 22 | 23 | # start the tracker again 24 | nohup sh -c 'RUST_LOG=INFO ./target/release/tracker --rpc-index 0' > $TRACKER_LOGS_0 2>&1 & 25 | nohup sh -c 'RUST_LOG=INFO ./target/release/tracker --rpc-index 1' > $TRACKER_LOGS_1 2>&1 & 26 | -------------------------------------------------------------------------------- /routes/src/registry.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use crate::Error; 17 | use rocket::get; 18 | use shared::registry::registered_paras; 19 | 20 | /// Query all the registered parachains. 21 | #[get("/registry")] 22 | pub fn registry() -> Result { 23 | let registered_paras = registered_paras(); 24 | 25 | serde_json::to_string(®istered_paras).map_err(|_| Error::InvalidData) 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | name: Rust tests 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | install: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout the source code 16 | uses: actions/checkout@v3 17 | 18 | - name: Install & display rust toolchain 19 | run: | 20 | rustup show 21 | rustup toolchain install nightly 22 | rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu 23 | rustup component add clippy --toolchain nightly-x86_64-unknown-linux-gnu 24 | rustup show 25 | 26 | - name: Check targets are installed correctly 27 | run: rustup target list --installed 28 | 29 | format: 30 | needs: install 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: Use cashed cargo 34 | uses: actions/cache@v3 35 | with: 36 | path: ~/.cargo 37 | key: ${{ runner.os }}-rust-${{ hashFiles('rust-toolchain.toml') }} 38 | 39 | - name: Checkout the source code 40 | uses: actions/checkout@v3 41 | 42 | - name: Ensure the rust code is formatted 43 | run: cargo fmt --all --check 44 | -------------------------------------------------------------------------------- /bin/server/src/main.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | /// Web API for interacting with the Consumption Tracker service. 17 | /// 18 | /// This API exposes two main endpoints: 19 | /// - `/consumption`: Used to query consumption data associated with a parachain. 20 | /// - `/register`: Used to register a parachain for consumption tracking. 21 | use rocket_cors::CorsOptions; 22 | use routes::{ 23 | consumption::consumption, extend_subscription::extend_subscription, register::register_para, 24 | registry::registry, 25 | }; 26 | 27 | #[macro_use] 28 | extern crate rocket; 29 | 30 | #[launch] 31 | fn rocket() -> _ { 32 | rocket::build() 33 | .attach(CorsOptions::default().to_cors().unwrap()) 34 | 35 | .mount("/", routes![consumption, register_para, registry, extend_subscription]) 36 | 37 | } 38 | -------------------------------------------------------------------------------- /routes/tests/registry.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use rocket::{ 17 | http::Status, 18 | local::blocking::{Client, LocalResponse}, 19 | routes, 20 | }; 21 | use routes::registry::registry; 22 | 23 | use shared::chaindata::get_para; 24 | use types::{Parachain, RelayChain::*}; 25 | 26 | mod mock; 27 | use mock::MockEnvironment; 28 | 29 | #[test] 30 | fn getting_registry_works() { 31 | MockEnvironment::new().execute_with(|| { 32 | let rocket = rocket::build().mount("/", routes![registry]); 33 | let client = Client::tracked(rocket).expect("valid rocket instance"); 34 | 35 | let response = client.get("/registry").dispatch(); 36 | assert_eq!(response.status(), Status::Ok); 37 | 38 | 39 | let mut registry = parse_ok_response(response); 40 | registry.sort_by_key(|p| p.para_id); 41 | 42 | assert_eq!( 43 | registry, 44 | vec![get_para(Polkadot, 2000).unwrap(), get_para(Polkadot, 2004).unwrap()] 45 | ); 46 | }); 47 | } 48 | 49 | fn parse_ok_response<'a>(response: LocalResponse<'a>) -> Vec { 50 | let body = response.into_string().unwrap(); 51 | serde_json::from_str(&body).expect("can't parse value") 52 | } 53 | -------------------------------------------------------------------------------- /registry.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Acala", 4 | "rpcs": [ 5 | "wss://acala-rpc.dwellir.com", 6 | "wss://acala-rpc-0.aca-api.network", 7 | "wss://acala-rpc-1.aca-api.network", 8 | "wss://acala-rpc-3.aca-api.network/ws" 9 | ], 10 | "para_id": 2000, 11 | "relay_chain": "Polkadot", 12 | 13 | "expiry_timestamp": 1709740246 14 | }, 15 | { 16 | "name": "Polkadot", 17 | "rpcs": [ 18 | "wss://rpc-polkadot.luckyfriday.io", 19 | "wss://polkadot-rpc.dwellir.com" 20 | ], 21 | "para_id": 0, 22 | "relay_chain": "Polkadot", 23 | "expiry_timestamp": 1709740246 24 | }, 25 | { 26 | "name": "Moonbeam", 27 | "rpcs": [ 28 | "wss://moonbeam-rpc.dwellir.com", 29 | "wss://wss.api.moonbeam.network", 30 | "wss://1rpc.io/glmr", 31 | "wss://moonbeam.unitedbloc.com" 32 | ], 33 | "para_id": 2004, 34 | "relay_chain": "Polkadot", 35 | "expiry_timestamp": 1709740246 36 | }, 37 | { 38 | "name": "Astar", 39 | "rpcs": [ 40 | "wss://astar.public.curie.radiumblock.co/ws", 41 | "wss://rpc.astar.network", 42 | "wss://astar-rpc.dwellir.com", 43 | "wss://1rpc.io/astr" 44 | ], 45 | "para_id": 2006, 46 | "relay_chain": "Polkadot", 47 | "expiry_timestamp": 1709740246 48 | }, 49 | { 50 | "name": "HydraDX", 51 | "rpcs": [ 52 | "wss://hydradx-rpc.dwellir.com", 53 | "wss://rpc.hydradx.cloud" 54 | ], 55 | "para_id": 2034, 56 | "relay_chain": "Polkadot", 57 | "expiry_timestamp": 1709740246 58 | }, 59 | { 60 | "name": "Zeitgeist", 61 | "rpcs": [ 62 | "wss://zeitgeist-rpc.dwellir.com", 63 | "wss://main.rpc.zeitgeist.pm/ws" 64 | ], 65 | "para_id": 2092, 66 | "relay_chain": "Polkadot", 67 | "expiry_timestamp": 1709740246 68 | } 69 | ] -------------------------------------------------------------------------------- /shared/src/config.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use subxt::utils::AccountId32; 17 | use types::Timestamp; 18 | 19 | const CONFIG_FILE: &str = "config.toml"; 20 | 21 | #[derive(serde::Deserialize, Clone)] 22 | pub struct PaymentInfo { 23 | /// The rpc url from the chain where the payment is required to occur. 24 | pub rpc_url: String, 25 | /// The account that the payment should be sent to. 26 | pub receiver: AccountId32, 27 | /// The cost of the payment. 28 | // 29 | // Defined as a `String` since the `toml` crate has issues parsing `u128`. 30 | pub cost: String, 31 | /// This defines the duration that a single subscription payment will cover. 32 | pub subscription_duration: Timestamp, 33 | 34 | /// Defines how much before the expiry can the subscription be renewed. 35 | pub renewal_period: Timestamp, 36 | } 37 | 38 | #[derive(serde::Deserialize)] 39 | pub struct Config { 40 | /// Path to the root output directory. 41 | pub output_directory: String, 42 | /// Path to the registry file. 43 | pub registry: String, 44 | /// Path to the chaindata file. 45 | pub chaindata: String, 46 | /// The payment configuration. 47 | pub payment_info: Option, 48 | /// The Number of distinct output directories. 49 | pub outputs: usize, 50 | } 51 | 52 | pub fn config() -> Config { 53 | let config_str = std::fs::read_to_string(CONFIG_FILE).expect("Failed to read config file"); 54 | toml::from_str(&config_str).expect("Failed to parse config file") 55 | } 56 | 57 | pub fn output_directory(rpc_index: Option) -> String { 58 | let output_dir = config().output_directory.trim_end_matches('/').to_string(); 59 | 60 | if let Some(rpc_index) = rpc_index { 61 | format!("{}/out-{}", output_dir, rpc_index) 62 | } else { 63 | format!("{}/out", output_dir) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /shared/src/registry.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use crate::config::config; 17 | use std::{ 18 | fs::{File, OpenOptions}, 19 | io::{Read, Seek, Write}, 20 | }; 21 | use types::{ParaId, Parachain, RelayChain}; 22 | 23 | pub fn registered_paras() -> Vec { 24 | let mut registry = get_registry(); 25 | let mut content = String::new(); 26 | 27 | // If this fails it simply means that the registry is empty. 28 | let _ = registry.read_to_string(&mut content); 29 | let paras: Vec = serde_json::from_str(&content).expect("Failed to serialize"); 30 | 31 | paras 32 | } 33 | 34 | pub fn registered_para(relay_chain: RelayChain, para_id: ParaId) -> Option { 35 | registered_paras() 36 | .iter() 37 | .find(|para| para.relay_chain == relay_chain && para.para_id == para_id) 38 | .cloned() 39 | } 40 | 41 | pub fn update_registry(paras: Vec) -> Result<(), String> { 42 | let mut registry = get_registry(); 43 | let json_data = serde_json::to_string_pretty(¶s).map_err(|_| "Failed to serialize")?; 44 | 45 | registry.set_len(0).map_err(|_| "Failed to truncate file")?; 46 | registry 47 | .seek(std::io::SeekFrom::Start(0)) 48 | .map_err(|_| "Failed to seek to the beginning")?; 49 | 50 | registry 51 | .write_all(json_data.as_bytes()) 52 | .map_err(|_| "Failed to write into file")?; 53 | 54 | Ok(()) 55 | } 56 | 57 | fn get_registry() -> File { 58 | match OpenOptions::new().read(true).write(true).open(config().registry) { 59 | Ok(file) => file, 60 | Err(_) => init_registry(), 61 | } 62 | } 63 | 64 | pub fn init_registry() -> File { 65 | let mut registry = 66 | File::create(config().registry).expect("Failed to create registered para file"); 67 | // An empty vector 68 | registry.write_all(b"[]").expect("Failed to write into registered para file"); 69 | 70 | registry 71 | } 72 | -------------------------------------------------------------------------------- /bin/processor/src/main.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use shared::{ 17 | config::config, 18 | consumption::{delete_consumption, get_consumption, write_batch_consumption}, 19 | registry::registered_paras, 20 | }; 21 | use std::collections::BTreeMap; 22 | use types::WeightConsumption; 23 | 24 | const LOG_TARGET: &str = "processor"; 25 | 26 | fn main() { 27 | env_logger::init(); 28 | 29 | let outputs = config().outputs; 30 | let paras = registered_paras(); 31 | 32 | paras.iter().for_each(|para| { 33 | let mut processed = BTreeMap::new(); 34 | 35 | log::info!( 36 | target: LOG_TARGET, 37 | "{}-{} - Processing consumption.", 38 | para.relay_chain, 39 | para.para_id, 40 | ); 41 | 42 | (0..outputs).for_each(|output_index| { 43 | let consumption = if let Ok(data) = get_consumption(para.clone(), Some(output_index)) { 44 | data 45 | } else { 46 | log::error!( 47 | target: LOG_TARGET, 48 | "{}-{} - Failed to get consumption.", 49 | para.relay_chain, 50 | para.para_id, 51 | ); 52 | vec![] 53 | }; 54 | 55 | consumption.into_iter().for_each(|data| { 56 | processed.entry(data.block_number).or_insert(data); 57 | }); 58 | }); 59 | 60 | let processed: Vec = processed.values().cloned().collect(); 61 | 62 | log::info!( 63 | target: LOG_TARGET, 64 | "{}-{} - Writing processed consumption. Total blocks tracked: {}", 65 | para.relay_chain, 66 | para.para_id, 67 | processed.len() 68 | ); 69 | 70 | if let Err(e) = write_batch_consumption(para.clone(), processed) { 71 | log::error!( 72 | target: LOG_TARGET, 73 | "{}-{} - Failed to write batch consumption: {:?}", 74 | para.relay_chain, 75 | para.para_id, 76 | e, 77 | ); 78 | 79 | return; 80 | } 81 | 82 | (0..outputs).for_each(|output_index| delete_consumption(para.clone(), output_index)); 83 | }); 84 | } 85 | -------------------------------------------------------------------------------- /shared/src/lib.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use std::{ 17 | process::Command, 18 | time::{SystemTime, UNIX_EPOCH}, 19 | }; 20 | use types::Timestamp; 21 | 22 | pub mod chaindata; 23 | pub mod config; 24 | pub mod consumption; 25 | pub mod payment; 26 | pub mod registry; 27 | 28 | #[cfg(feature = "test-utils")] 29 | use crate::config::output_directory; 30 | 31 | const LOG_TARGET: &str = "shared"; 32 | 33 | /// Rounds a number to a fixed number of decimals. 34 | pub fn round_to(number: f32, decimals: i32) -> f32 { 35 | let factor = 10f32.powi(decimals); 36 | (number * factor).round() / factor 37 | } 38 | 39 | /// Returns the current time since UNIX EPOCH. 40 | pub fn current_timestamp() -> Timestamp { 41 | // It is fine to use `unwrap_or_default` since the current time will never be before the UNIX 42 | // EPOCH. 43 | SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() 44 | } 45 | 46 | 47 | pub fn init_tracker() { 48 | let output = Command::new("./scripts/init.sh").output().expect("Failed to execute command"); 49 | 50 | if output.status.success() { 51 | log::info!("Successfully reinitalized tracker"); 52 | } else { 53 | let stderr = String::from_utf8_lossy(&output.stderr); 54 | log::info!("Failed to reinitialize tracker: {:?}", stderr); 55 | } 56 | } 57 | 58 | // There isn't a good reason to use this other than for testing. 59 | #[cfg(feature = "test-utils")] 60 | pub fn reset_mock_environment() { 61 | // Reset the registered paras file: 62 | let _registry = registry::init_registry(); 63 | 64 | let output_path = output_directory(None); 65 | // Remove the output files: 66 | let _ = std::fs::create_dir(output_path.clone()); 67 | 68 | for entry in std::fs::read_dir(output_path).expect("Failed to read output directory") { 69 | let entry = entry.expect("Failed to ready entry"); 70 | let path = entry.path(); 71 | if path.is_file() { 72 | std::fs::remove_file(path).expect("Failed to remove consumption data") 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /shared/src/chaindata.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | use crate::config::config; 16 | use serde::{Deserialize, Serialize}; 17 | use std::{fs::File, io::Read}; 18 | use types::{ParaId, Parachain, RelayChain}; 19 | 20 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)] 21 | pub struct Relay { 22 | id: RelayChain, 23 | } 24 | 25 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)] 26 | pub struct Rpc { 27 | url: String, 28 | } 29 | 30 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)] 31 | struct ChainData { 32 | pub name: String, 33 | pub para_id: ParaId, 34 | pub relay: Relay, 35 | pub rpcs: Vec, 36 | } 37 | 38 | #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] 39 | pub enum ChainDataError { 40 | ParaNotFound, 41 | } 42 | 43 | impl From for ChainDataError { 44 | fn from(v: String) -> Self { 45 | match v.as_str() { 46 | "ParaNotFound" => Self::ParaNotFound, 47 | _ => panic!("UnknownError"), 48 | } 49 | } 50 | } 51 | 52 | /// Get the rpcs of a parachain. 53 | pub fn get_para(relay: RelayChain, para_id: ParaId) -> Result { 54 | let mut file = File::open(config().chaindata).expect("ChainData not found"); 55 | let mut content = String::new(); 56 | 57 | file.read_to_string(&mut content).expect("Failed to load chaindata"); 58 | let chaindata: Vec = serde_json::from_str(&content).expect("Failed to serialize"); 59 | 60 | let index = chaindata 61 | .iter() 62 | .position(|para| para.para_id == para_id && para.relay == Relay { id: relay.clone() }) 63 | .ok_or(ChainDataError::ParaNotFound)?; 64 | 65 | let para_chaindata = chaindata.get(index).expect("We just found the index; qed"); 66 | 67 | let rpcs: Vec = para_chaindata.rpcs.clone().into_iter().map(|rpc| rpc.url).collect(); 68 | 69 | let para = Parachain { 70 | relay_chain: relay, 71 | para_id, 72 | name: para_chaindata.name.clone(), 73 | rpcs, 74 | expiry_timestamp: Default::default(), 75 | }; 76 | 77 | Ok(para) 78 | } 79 | -------------------------------------------------------------------------------- /routes/tests/register.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use polkadot_core_primitives::BlockNumber; 17 | use rocket::{ 18 | http::{ContentType, Status}, 19 | local::blocking::{Client, LocalResponse}, 20 | routes, 21 | }; 22 | use routes::{register::register_para, Error}; 23 | use shared::registry::{registered_para, registered_paras}; 24 | use types::RelayChain::*; 25 | 26 | mod mock; 27 | use mock::{mock_para, MockEnvironment}; 28 | 29 | #[test] 30 | fn register_works() { 31 | MockEnvironment::default().execute_with(|| { 32 | let rocket = rocket::build().mount("/", routes![register_para]); 33 | let client = Client::tracked(rocket).expect("valid rocket instance"); 34 | 35 | let para = mock_para(Polkadot, 2001); 36 | 37 | let response = client 38 | .post("/register_para") 39 | .header(ContentType::JSON) 40 | .body(serde_json::to_string(®istration_data).unwrap()) 41 | .dispatch(); 42 | 43 | assert_eq!(response.status(), Status::Ok); 44 | 45 | 46 | let registered = registered_para(Polkadot, 2000).unwrap(); 47 | 48 | // Set the `expiry_timestamp` to the proper value. 49 | para.expiry_timestamp = registered.expiry_timestamp; 50 | 51 | // Ensure the parachain is properly registered: 52 | assert_eq!(registered_paras(), vec![para.clone()]); 53 | assert_eq!(registered, para); 54 | }); 55 | } 56 | 57 | #[test] 58 | fn cannot_register_same_para_twice() { 59 | MockEnvironment::default().execute_with(|| { 60 | let rocket = rocket::build().mount("/", routes![register_para]); 61 | let client = Client::tracked(rocket).expect("valid rocket instance"); 62 | 63 | let para = mock_para(Polkadot, 2001); 64 | 65 | let register = client 66 | .post("/register_para") 67 | .header(ContentType::JSON) 68 | .body(serde_json::to_string(®istration_data).unwrap()); 69 | 70 | // Cannot register the same para twice: 71 | assert_eq!(register.clone().dispatch().status(), Status::Ok); 72 | assert_eq!(parse_err_response(register.dispatch()), Error::AlreadyRegistered); 73 | }); 74 | } 75 | 76 | fn parse_err_response<'a>(response: LocalResponse<'a>) -> Error { 77 | let body = response.into_string().unwrap(); 78 | body.into() 79 | } 80 | -------------------------------------------------------------------------------- /routes/src/extend_subscription.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use crate::*; 17 | use polkadot_core_primitives::BlockNumber; 18 | use rocket::{post, serde::json::Json}; 19 | use shared::{ 20 | config::config, 21 | current_timestamp, 22 | payment::validate_registration_payment, 23 | registry::{registered_para, registered_paras, update_registry}, 24 | }; 25 | use types::{ParaId, RelayChain}; 26 | 27 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] 28 | #[serde(crate = "rocket::serde")] 29 | pub struct ExtendSubscriptionData { 30 | /// The parachain which is getting its subscription extended. 31 | pub para: (RelayChain, ParaId), 32 | /// The block in which the payment occurred for the specific parachain. 33 | pub payment_block_number: BlockNumber, 34 | } 35 | 36 | /// Extend the subscription of a parachain for resource utilization tracking. 37 | #[post("/extend-subscription", data = "")] 38 | pub async fn extend_subscription(data: Json) -> Result<(), Error> { 39 | let (relay_chain, para_id) = data.para.clone(); 40 | 41 | log::info!( 42 | target: LOG_TARGET, 43 | "{}-{} - Attempting to extend subscription for para", 44 | relay_chain, para_id 45 | ); 46 | 47 | let para = registered_para(relay_chain.clone(), para_id).ok_or(Error::NotRegistered)?; 48 | 49 | let subscription_duration = if let Some(payment_info) = config().payment_info { 50 | if para.expiry_timestamp.saturating_sub(payment_info.renewal_period) > current_timestamp() { 51 | // Cannot renew yet. 52 | return Err(Error::AlreadyRegistered); 53 | } 54 | 55 | validate_registration_payment( 56 | para.clone(), 57 | payment_info.clone(), 58 | data.payment_block_number, 59 | ) 60 | .await 61 | .map_err(Error::PaymentValidationError)?; 62 | 63 | payment_info.subscription_duration 64 | } else { 65 | Default::default() 66 | }; 67 | 68 | let mut paras = registered_paras(); 69 | 70 | if let Some(para) = paras.iter_mut().find(|p| **p == para) { 71 | para.expiry_timestamp += subscription_duration; 72 | } else { 73 | return Err(Error::NotRegistered); 74 | } 75 | 76 | if let Err(err) = update_registry(paras) { 77 | log::error!( 78 | target: LOG_TARGET, 79 | "{}-{} Failed to extend subscription for para: {:?}", 80 | para.relay_chain, 81 | para.para_id, 82 | err 83 | ); 84 | } else { 85 | #[cfg(not(debug_assertions))] 86 | shared::init_tracker(); 87 | } 88 | 89 | Ok(()) 90 | } 91 | -------------------------------------------------------------------------------- /routes/src/lib.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | //! Web API for interacting with the Consumption Tracker service. 17 | //! 18 | //! This API exposes the following endpoints: 19 | //! - `/consumption`: Used to query consumption data associated with a parachain. 20 | //! - `/register`: Used to register a parachain for consumption tracking. 21 | //! - `/registry`: Used for querying all the registered parachains. 22 | //! - `/extend-subscription`: For extending the subscription of a parachain. 23 | 24 | use rocket::{http::Status, response::Responder, Request, Response}; 25 | use serde::{Deserialize, Serialize}; 26 | use shared::{chaindata::ChainDataError, payment::PaymentError}; 27 | 28 | const LOG_TARGET: &str = "server"; 29 | 30 | #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] 31 | pub enum Error { 32 | /// Cannot register an already registered parachain. 33 | AlreadyRegistered, 34 | /// The specified para is not registered. 35 | NotRegistered, 36 | /// Indicates that the consumption data for the parachain was not found. 37 | /// 38 | /// This should be only encountered if the consumption file has not been generated yet, 39 | /// which is only possible if the parachain has been registered within the last few seconds. 40 | ConsumptionDataNotFound, 41 | /// The stored data is invalid. This should never really happen. 42 | InvalidData, 43 | /// The caller tried to register a parachain without payment. 44 | PaymentRequired, 45 | // An error occured when trying to read parachain's chaindata. 46 | ChainDataError(ChainDataError), 47 | /// An error occured when trying to validate the payment. 48 | PaymentValidationError(PaymentError), 49 | } 50 | 51 | impl<'r> Responder<'r, 'static> for Error { 52 | fn respond_to(self, _: &'r Request<'_>) -> Result, Status> { 53 | let body = format!("{:?}", self); 54 | Response::build() 55 | .status(Status::InternalServerError) 56 | .sized_body(body.len(), std::io::Cursor::new(body)) 57 | .ok() 58 | } 59 | } 60 | 61 | impl From for Error { 62 | fn from(v: String) -> Self { 63 | match v.as_str() { 64 | "AlreadyRegistered" => Self::AlreadyRegistered, 65 | "NotRegistered" => Self::NotRegistered, 66 | "ConsumptionDataNotFound" => Self::ConsumptionDataNotFound, 67 | "InvalidData" => Self::InvalidData, 68 | _ => panic!("UnknownError"), 69 | } 70 | } 71 | } 72 | 73 | pub mod consumption; 74 | pub mod extend_subscription; 75 | pub mod register; 76 | pub mod registry; 77 | -------------------------------------------------------------------------------- /routes/tests/mock.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | #[cfg(test)] 17 | use maplit::hashmap; 18 | use scopeguard::guard; 19 | use shared::{ 20 | chaindata::get_para, consumption::write_consumption, registry::update_registry, 21 | reset_mock_environment, 22 | }; 23 | use std::collections::HashMap; 24 | use types::{Parachain, RelayChain::*, WeightConsumption}; 25 | 26 | #[derive(Default)] 27 | pub struct MockEnvironment { 28 | pub weight_consumptions: HashMap>, 29 | } 30 | 31 | impl MockEnvironment { 32 | pub fn new() -> Self { 33 | // Start with an empty environment. 34 | reset_mock_environment(); 35 | 36 | // Initialize some mock data: 37 | let mock = MockEnvironment { weight_consumptions: mock_consumption() }; 38 | 39 | for (para, weight_consumptions) in &mock.weight_consumptions { 40 | weight_consumptions.iter().for_each(|consumption| { 41 | write_consumption(para.clone(), consumption.clone(), None) 42 | .expect("Failed to write conusumption data"); 43 | }); 44 | } 45 | 46 | let _ = update_registry(mock.weight_consumptions.keys().cloned().collect()); 47 | 48 | mock 49 | } 50 | 51 | pub fn execute_with(&self, execute: impl FnOnce() -> R) -> R { 52 | let _guard = guard((), |_| { 53 | // Reset the environment once we are complete with the test. 54 | reset_mock_environment(); 55 | }); 56 | 57 | execute() 58 | } 59 | } 60 | 61 | pub fn mock_consumption() -> HashMap> { 62 | hashmap! { 63 | get_para(Polkadot, 2000).unwrap() => vec![ 64 | WeightConsumption { 65 | block_number: 1, 66 | timestamp: 0, 67 | ref_time: (0.5, 0.3, 0.2).into(), 68 | proof_size: (0.5, 0.3, 0.2).into(), 69 | }, 70 | WeightConsumption { 71 | block_number: 2, 72 | timestamp: 6, 73 | ref_time: (0.1, 0.4, 0.2).into(), 74 | proof_size: (0.2, 0.3, 0.3).into(), 75 | }, 76 | WeightConsumption { 77 | block_number: 3, 78 | timestamp: 12, 79 | ref_time: (0.0, 0.2, 0.4).into(), 80 | proof_size: (0.1, 0.0, 0.3).into(), 81 | }, 82 | WeightConsumption { 83 | block_number: 4, 84 | timestamp: 18, 85 | ref_time: (0.1, 0.0, 0.4).into(), 86 | proof_size: (0.2, 0.1, 0.3).into(), 87 | }, 88 | ], 89 | get_para(Polkadot, 2004).unwrap() => vec![ 90 | WeightConsumption { 91 | block_number: 1, 92 | timestamp: 0, 93 | ref_time: (0.8, 0.0, 0.1).into(), 94 | proof_size: (0.6, 0.2, 0.1).into(), 95 | }, 96 | ], 97 | } 98 | } 99 | 100 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Consumption Tracker 2 | 3 | ## Overview 4 | 5 | The program is designed to fetch weight utilization data from a predefined set 6 | of parachains. The obtained weight information is then stored in the `out` 7 | directory as multiple CSV files. 8 | 9 | ## Output Structure 10 | 11 | Each parachain has its own dedicated output file, and these files are updated 12 | every time a new block is finalized and the weight consumption data is 13 | successfully queried. 14 | 15 | ## Data structure 16 | 17 | The data stored is the 2D weight consumption per each dispatch class. 18 | The data is stored in the CSV file within the following sequence: 19 | 20 | | block_number | timestamp | normal_dispatch_ref_time | operational_dispatch_ref_time | mandatory_dispatch_ref_time | normal_proof_size | operational_proof_size | mandatory_proof_size | 21 | |--------------|-----------------------|---------------------------|-------------------------------|-----------------------------|-------------------|-------------------------|-----------------------| 22 | | ... | ... | ... | ... | ... | ... | ... | ... | 23 | 24 | The percentages themselves are stored by representing them as decimal numbers; 25 | for example, 50.5% is stored as 0.505 with a precision of three decimals. 26 | 27 | ## Building & Running 28 | 29 | To compile the Corespace Weigher project run the following command from the root of the repo: 30 | ``` 31 | cargo build --release 32 | ``` 33 | 34 | This will output binaries: `tracker` and `server` 35 | 36 | The `tracker` binary is responsible for tracking the actual consumption data of parachains. This program will read the parachains.json file to obtain the list of parachains for which it will track consumption data by listening to the latest blocks from the specified RPC nodes. 37 | 38 | The `server` binary provides a web interface that can be used for registering a parachain for consumption tracking, as well as for querying all the consumption data. 39 | 40 | ### Watchdog 🐕 41 | 42 | WebSocket connections can be closed due to underlying networking issues. In such cases, the tracking of parachain data would stop. For this reason, a script called 'watchdog' is introduced to ensure the tracker attempts to create a new connection whenever the current one is broken. 43 | 44 | ```sh 45 | ./scripts/watchdog.sh 46 | ``` 47 | 48 | ## Web API 49 | 50 | #### Registering a parachain 51 | 52 | A basic example of registering a parachain: 53 | 54 | ``` 55 | curl -X POST http://127.0.0.1:8000/register_para -H "Content-Type: application/json" -d '{ 56 | "para": ["Polkadot", 2000] 57 | }' 58 | ``` 59 | 60 | #### Querying consumption data 61 | 62 | A basic example of querying the consumption of a parachain with the paraID 2000 that is part of the Polkadot network: 63 | 64 | ``` 65 | curl http://127.0.0.1:8000/consumption/polkadot/2000 66 | ``` 67 | 68 | ## Local development 69 | 70 | For local development, you can run the entire suite of tests using the command below. It's important to run tests sequentially as some of them depend on shared mock state. This approach ensures that each test runs in isolation without interference from others. 71 | ``` 72 | cargo test -- --test-threads=1 73 | ``` 74 | -------------------------------------------------------------------------------- /shared/src/consumption.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use crate::{config::output_directory, LOG_TARGET}; 17 | use csv::{ReaderBuilder, WriterBuilder}; 18 | use std::fs::{File, OpenOptions}; 19 | use types::{Parachain, WeightConsumption}; 20 | 21 | pub fn get_consumption( 22 | para: Parachain, 23 | rpc_index: Option, 24 | ) -> Result, &'static str> { 25 | let file = 26 | File::open(output_file_path(para, rpc_index)).map_err(|_| "Consumption data not found")?; 27 | let mut rdr = ReaderBuilder::new().has_headers(false).from_reader(file); 28 | 29 | let consumption: Vec = rdr 30 | .deserialize::() 31 | .filter_map(|result| result.ok()) 32 | .collect(); 33 | 34 | Ok(consumption) 35 | } 36 | 37 | pub fn write_consumption( 38 | para: Parachain, 39 | consumption: WeightConsumption, 40 | rpc_index: Option, 41 | ) -> Result<(), std::io::Error> { 42 | log::info!( 43 | target: LOG_TARGET, 44 | "{}-{} - Writing weight consumption for block: #{}", 45 | para.relay_chain, para.para_id, consumption.block_number 46 | ); 47 | 48 | let output_file_path = output_file_path(para, rpc_index); 49 | let file = OpenOptions::new().create(true).append(true).open(output_file_path)?; 50 | 51 | let mut wtr = WriterBuilder::new().from_writer(file); 52 | 53 | // The data is stored in the sequence described at the beginning of the file. 54 | wtr.write_record(&consumption.to_csv())?; 55 | 56 | wtr.flush() 57 | } 58 | 59 | pub fn write_batch_consumption( 60 | para: Parachain, 61 | consumption: Vec, 62 | ) -> Result<(), std::io::Error> { 63 | log::info!( 64 | target: LOG_TARGET, 65 | "{}-{} - Writing batch weight consumption.", 66 | para.relay_chain, para.para_id 67 | ); 68 | 69 | let output_file_path = output_file_path(para, None); 70 | let file = OpenOptions::new().create(true).append(true).open(output_file_path)?; 71 | 72 | let mut wtr = WriterBuilder::new().from_writer(file); 73 | 74 | // TODO: add a to_csv function 75 | consumption.iter().try_for_each(|entry| { 76 | // The data is stored in the sequence described at the beginning of the file. 77 | wtr.write_record(&entry.to_csv()) 78 | })?; 79 | 80 | wtr.flush() 81 | } 82 | 83 | pub fn delete_consumption(para: Parachain, rpc_index: usize) { 84 | log::info!( 85 | target: LOG_TARGET, 86 | "{}-{} - Deleting weight consumption.", 87 | para.relay_chain, para.para_id 88 | ); 89 | 90 | let output_file_path = output_file_path(para, Some(rpc_index)); 91 | match std::fs::remove_file(output_file_path.clone()) { 92 | Ok(_) => { 93 | log::info!( 94 | target: LOG_TARGET, 95 | "{} Deleted successfully", 96 | output_file_path 97 | ); 98 | }, 99 | Err(e) => { 100 | log::error!( 101 | target: LOG_TARGET, 102 | "{} Failed to delete: {:?}", 103 | output_file_path, e 104 | ); 105 | }, 106 | } 107 | } 108 | 109 | fn output_file_path(para: Parachain, rpc_index: Option) -> String { 110 | format!("{}/{}-{}.csv", output_directory(rpc_index), para.relay_chain, para.para_id) 111 | } 112 | -------------------------------------------------------------------------------- /src/api.rs: -------------------------------------------------------------------------------- 1 | /// Web API for interacting with the Consumption Tracker service. 2 | /// 3 | /// This API exposes two main endpoints: 4 | /// - `/consumption`: Used to query consumption data associated with a parachain. 5 | /// - `/register`: Used to register a parachain for consumption tracking. 6 | use csv::ReaderBuilder; 7 | use rocket::{http::Status, response::Responder, serde::json::Json, Request, Response}; 8 | use rocket_cors::CorsOptions; 9 | use std::{ 10 | fs::{File, OpenOptions}, 11 | io::{Read, Seek, Write}, 12 | }; 13 | 14 | mod shared; 15 | use shared::*; 16 | 17 | mod types; 18 | use types::*; 19 | 20 | #[macro_use] 21 | extern crate rocket; 22 | 23 | #[derive(Debug)] 24 | enum Error { 25 | /// Cannot register an already registered parachain. 26 | AlreadyRegistered, 27 | /// Tried to get the consumption of a parachain that is not registered. 28 | NotRegistered, 29 | /// Indicates that the consumption data for the parachain was not found. 30 | /// 31 | /// This should be only encountered if the consumption file has not been generated yet, 32 | /// which is only possible if the parachain has been registered within the last few seconds. 33 | ConsumptionDataNotFound, 34 | /// The stored data is invalid. This should never really happen. 35 | InvalidData, 36 | /// Failed to find the parachains data. This isn't a user error, but a bug in the code itself. 37 | ParasDataNotFound, 38 | } 39 | 40 | impl<'r> Responder<'r, 'static> for Error { 41 | fn respond_to(self, _: &'r Request<'_>) -> Result, Status> { 42 | let body = format!("Error: {:?}", self); 43 | Response::build() 44 | .status(Status::InternalServerError) 45 | .sized_body(body.len(), std::io::Cursor::new(body)) 46 | .ok() 47 | } 48 | } 49 | 50 | /// Query the consumption data of a parachain. 51 | /// 52 | /// This will return an error in case there is no data associated with the specific parachain. 53 | #[get("/consumption//")] 54 | fn consumption(relay: &str, para_id: ParaId) -> Result { 55 | let para = parachain(relay.into(), para_id).ok_or(Error::NotRegistered)?; 56 | 57 | let file = File::open(file_path(para)).map_err(|_| Error::ConsumptionDataNotFound)?; 58 | let mut rdr = ReaderBuilder::new().has_headers(false).from_reader(file); 59 | 60 | let weight_consumptions: Vec = rdr 61 | .deserialize::() 62 | .filter_map(|result| result.ok()) 63 | .collect(); 64 | 65 | serde_json::to_string(&weight_consumptions).map_err(|_| Error::InvalidData) 66 | } 67 | 68 | /// Register a parachain for resource utilization tracking. 69 | #[post("/register_para", data = "")] 70 | fn register_para(para: Json) -> Result { 71 | let mut file = OpenOptions::new() 72 | .read(true) 73 | .write(true) 74 | .create(true) 75 | .open(PARACHAINS) 76 | .map_err(|_| Error::ParasDataNotFound)?; 77 | 78 | let mut content = String::new(); 79 | file.read_to_string(&mut content) 80 | .map_err(|_| Error::InvalidData)?; 81 | 82 | let mut paras: Vec = 83 | serde_json::from_str(&content).map_err(|_| Error::InvalidData)?; 84 | 85 | if parachain(para.relay_chain.clone(), para.para_id.clone()).is_some() { 86 | return Err(Error::AlreadyRegistered); 87 | } 88 | 89 | paras.push(para.into_inner()); 90 | let json_data = serde_json::to_string_pretty(¶s).expect("Failed to serialize"); 91 | 92 | file.set_len(0).expect("Failed to truncate file"); 93 | file.seek(std::io::SeekFrom::Start(0)) 94 | .expect("Failed to seek to the beginning"); 95 | 96 | file.write_all(json_data.as_bytes()).unwrap(); 97 | 98 | Ok(format!("")) 99 | } 100 | 101 | #[launch] 102 | fn rocket() -> _ { 103 | rocket::build() 104 | .attach(CorsOptions::default().to_cors().unwrap()) 105 | .mount("/", routes![consumption, register_para]) 106 | } 107 | -------------------------------------------------------------------------------- /routes/src/consumption.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use crate::Error; 17 | use chrono::NaiveDateTime; 18 | use rocket::{ 19 | form, 20 | form::{FromFormField, ValueField}, 21 | get, 22 | }; 23 | use shared::{consumption::get_consumption, registry::registered_para}; 24 | use std::collections::BTreeMap; 25 | 26 | use types::{DispatchClassConsumption, ParaId, Timestamp, WeightConsumption}; 27 | 28 | #[derive(Clone, Debug, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)] 29 | #[serde(crate = "rocket::serde")] 30 | pub enum Grouping { 31 | BlockNumber, 32 | Minute, 33 | Hour, 34 | Day, 35 | Month, 36 | Year, 37 | } 38 | 39 | #[rocket::async_trait] 40 | impl<'r> FromFormField<'r> for Grouping { 41 | fn from_value(field: ValueField<'r>) -> form::Result<'r, Self> { 42 | match field.value { 43 | "minute" => Ok(Grouping::Minute), 44 | "hour" => Ok(Grouping::Hour), 45 | "day" => Ok(Grouping::Day), 46 | "month" => Ok(Grouping::Month), 47 | "year" => Ok(Grouping::Year), 48 | _ => Err(form::Error::validation("invalid Grouping").into()), 49 | } 50 | } 51 | } 52 | 53 | #[derive(Default, Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] 54 | #[serde(crate = "rocket::serde")] 55 | pub struct AggregatedData { 56 | pub group: String, 57 | /// The aggregated ref_time consumption over all the dispatch classes. 58 | pub ref_time: DispatchClassConsumption, 59 | /// The aggregated proof size over all dispatch classes. 60 | pub proof_size: DispatchClassConsumption, 61 | pub count: usize, 62 | } 63 | 64 | /// Query the consumption data of a parachain. 65 | /// 66 | /// This will return an error in case there is no data associated with the specific parachain. 67 | #[get("/consumption//?&&&&")] 68 | pub fn consumption( 69 | relay: &str, 70 | para_id: ParaId, 71 | start: Option, 72 | end: Option, 73 | page: Option, 74 | page_size: Option, 75 | grouping: Option, 76 | ) -> Result { 77 | let para = registered_para(relay.into(), para_id).ok_or(Error::NotRegistered)?; 78 | 79 | let (page, page_size) = (page.unwrap_or_default(), page_size.unwrap_or(u32::MAX)); 80 | let (start, end) = (start.unwrap_or_default(), end.unwrap_or(Timestamp::MAX)); 81 | 82 | // By default query the consumption that was collected from rpc index 0. 83 | let weight_consumptions: Vec = get_consumption(para, None) 84 | .map_err(|_| Error::ConsumptionDataNotFound)? 85 | .into_iter() 86 | .filter(|consumption| consumption.timestamp >= start && consumption.timestamp <= end) 87 | .skip(page.saturating_mul(page_size) as usize) 88 | .take(page_size as usize) 89 | .collect(); 90 | 91 | let grouping = grouping.unwrap_or(Grouping::BlockNumber); 92 | 93 | let grouped = group_consumption(weight_consumptions, grouping); 94 | 95 | serde_json::to_string(&grouped).map_err(|_| Error::InvalidData) 96 | } 97 | 98 | pub fn group_consumption( 99 | weight_consumptions: Vec, 100 | grouping: Grouping, 101 | ) -> Vec { 102 | let grouped = weight_consumptions.iter().fold(BTreeMap::new(), |mut acc, datum| { 103 | let key = get_aggregation_key(datum.clone(), grouping); 104 | let entry: &mut AggregatedData = acc.entry(key).or_default(); 105 | 106 | entry.ref_time.normal += datum.ref_time.normal; 107 | entry.ref_time.operational += datum.ref_time.operational; 108 | entry.ref_time.mandatory += datum.ref_time.mandatory; 109 | 110 | entry.proof_size.normal += datum.proof_size.normal; 111 | entry.proof_size.operational += datum.proof_size.operational; 112 | entry.proof_size.mandatory += datum.proof_size.mandatory; 113 | 114 | entry.count += 1; 115 | 116 | acc 117 | }); 118 | 119 | grouped 120 | .into_iter() 121 | .map(|(key, entry)| { 122 | let mut entry = entry; 123 | entry.group = key; 124 | entry 125 | }) 126 | .collect() 127 | } 128 | 129 | fn get_aggregation_key(datum: WeightConsumption, grouping: Grouping) -> String { 130 | let datetime = 131 | NaiveDateTime::from_timestamp_opt((datum.timestamp / 1000) as i64, 0).unwrap_or_default(); 132 | 133 | match grouping { 134 | Grouping::BlockNumber => datum.block_number.to_string(), 135 | Grouping::Minute => datetime.format("%Y-%m-%dT%H:%M").to_string(), 136 | Grouping::Hour => datetime.format("%Y-%m-%dT%H:00").to_string(), 137 | Grouping::Day => datetime.format("%Y-%m-%d").to_string(), 138 | Grouping::Month => datetime.format("%Y-%m").to_string(), 139 | Grouping::Year => datetime.format("%Y").to_string(), 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /routes/src/register.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use crate::*; 17 | use polkadot_core_primitives::BlockNumber; 18 | use rocket::{post, serde::json::Json}; 19 | use shared::registry::{registered_para, registered_paras, update_registry}; 20 | use types::Parachain; 21 | 22 | /// Register a parachain for resource utilization tracking. 23 | #[post("/register_para", data = "")] 24 | pub fn register_para(para: Json) -> Result<(), Error> { 25 | let mut paras = registered_paras(); 26 | 27 | if registered_para(relay_chain.clone(), para_id).is_some() { 28 | return Err(Error::AlreadyRegistered); 29 | } 30 | 31 | paras.push(para.into_inner()); 32 | 33 | if let Err(err) = update_registry(paras) { 34 | log::error!( 35 | target: LOG_TARGET, 36 | "{}-{} - Failed to register para: {:?}", 37 | para.relay_chain, 38 | para.para_id, 39 | err 40 | ); 41 | } else { 42 | #[cfg(not(debug_assertions))] 43 | shared::init_tracker(); 44 | } 45 | 46 | Ok(()) 47 | } 48 | 49 | async fn validate_registration_payment( 50 | para: Parachain, 51 | payment_info: PaymentInfo, 52 | payment_block_number: BlockNumber, 53 | ) -> Result<(), Error> { 54 | // TODO: Could this code be improved so that we don't have to instantiate both clients? 55 | let rpc_client = RpcClient::from_url(&payment_info.rpc_url.clone()) 56 | .await 57 | .map_err(|_| Error::PaymentValidationFailed)?; 58 | 59 | let online_client = OnlineClient::::from_url(payment_info.rpc_url.clone()) 60 | .await 61 | .map_err(|_| Error::PaymentValidationFailed)?; 62 | 63 | // Ensure that the `payment_block_number` is from a finalized block. 64 | let last_finalized = 65 | get_last_finalized_block(rpc_client.clone(), online_client.clone()).await?; 66 | if payment_block_number > last_finalized { 67 | return Err(Error::UnfinalizedPayment) 68 | } 69 | 70 | let block_hash = get_block_hash(rpc_client, payment_block_number).await?; 71 | let block = get_block(online_client, block_hash).await?; 72 | 73 | ensure_contains_payment(para, payment_info, block).await 74 | } 75 | 76 | async fn ensure_contains_payment( 77 | para: Parachain, 78 | payment_info: PaymentInfo, 79 | block: Block>, 80 | ) -> Result<(), Error> { 81 | let payment = opaque_payment_extrinsic(para, payment_info).await?; 82 | 83 | let extrinsics = block.extrinsics().await.map_err(|_| Error::PaymentValidationFailed)?; 84 | let extrinsics: Vec> = extrinsics 85 | .iter() 86 | .filter_map(|ext| { 87 | ext.as_ref().ok().and_then(|e| e.as_root_extrinsic::().ok()) 88 | }) 89 | .map(|ext| ext.encode()) 90 | .collect(); 91 | 92 | if extrinsics.contains(&payment.encode()) { 93 | Ok(()) 94 | } else { 95 | Err(Error::PaymentNotFound) 96 | } 97 | } 98 | 99 | async fn opaque_payment_extrinsic( 100 | para: Parachain, 101 | payment_info: PaymentInfo, 102 | ) -> Result { 103 | if let Ok(cost) = payment_info.cost.parse::() { 104 | let transfer_call = polkadot::Call::Balances(BalancesCall::transfer_keep_alive { 105 | dest: payment_info.receiver.into(), 106 | value: cost, 107 | }); 108 | 109 | let remark = format!("{}:{}", para.relay_chain, para.para_id).as_bytes().to_vec(); 110 | let remark_call = polkadot::Call::System(SystemCall::remark { remark }); 111 | 112 | let batch_call = polkadot::Call::Utility(UtilityCall::batch_all { 113 | calls: vec![transfer_call, remark_call], 114 | }); 115 | 116 | Ok(batch_call) 117 | } else { 118 | log::error!( 119 | target: LOG_TARGET, 120 | "Failed to parse cost", 121 | ); 122 | Err(Error::PaymentValidationFailed) 123 | } 124 | } 125 | 126 | async fn get_last_finalized_block( 127 | rpc_client: RpcClient, 128 | online_client: OnlineClient, 129 | ) -> Result { 130 | let params = rpc_params![]; 131 | let block_hash: H256 = rpc_client 132 | .request("chain_getFinalizedHead", params) 133 | .await 134 | .map_err(|_| Error::PaymentValidationFailed)?; 135 | 136 | let block = get_block(online_client, block_hash).await?; 137 | 138 | Ok(block.number()) 139 | } 140 | 141 | async fn get_block( 142 | api: OnlineClient, 143 | block_hash: H256, 144 | ) -> Result>, Error> { 145 | api.blocks().at(block_hash).await.map_err(|_| Error::PaymentValidationFailed) 146 | } 147 | 148 | async fn get_block_hash(rpc_client: RpcClient, block_number: BlockNumber) -> Result { 149 | let params = rpc_params![Some(block_number)]; 150 | let block_hash: H256 = rpc_client 151 | .request("chain_getBlockHash", params) 152 | .await 153 | .map_err(|_| Error::PaymentValidationFailed)?; 154 | 155 | Ok(block_hash) 156 | } 157 | -------------------------------------------------------------------------------- /types/src/lib.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use serde::{Deserialize, Deserializer, Serialize}; 17 | use std::fmt; 18 | 19 | /// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS 20 | /// reboots. 21 | pub type Timestamp = u64; 22 | 23 | /// Type used for identifying parachains. 24 | pub type ParaId = u32; 25 | 26 | #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Hash)] 27 | #[serde(crate = "rocket::serde")] 28 | pub enum RelayChain { 29 | Polkadot, 30 | Kusama, 31 | } 32 | 33 | impl fmt::Display for RelayChain { 34 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 35 | match self { 36 | RelayChain::Polkadot => write!(f, "Polkadot"), 37 | RelayChain::Kusama => write!(f, "Kusama"), 38 | } 39 | } 40 | } 41 | 42 | impl From<&str> for RelayChain { 43 | fn from(s: &str) -> Self { 44 | match s.to_lowercase().as_str() { 45 | "polkadot" => RelayChain::Polkadot, 46 | "kusama" => RelayChain::Kusama, 47 | _ => panic!("Invalid relay chain: {}", s), 48 | } 49 | } 50 | } 51 | 52 | impl<'de> Deserialize<'de> for RelayChain { 53 | fn deserialize(deserializer: D) -> Result 54 | where 55 | D: Deserializer<'de>, 56 | { 57 | let s = String::deserialize(deserializer)?.to_lowercase(); 58 | match s.as_str() { 59 | "polkadot" | "Polkadot" => Ok(RelayChain::Polkadot), 60 | "kusama" | "Kusama" => Ok(RelayChain::Kusama), 61 | _ => Err(serde::de::Error::custom(format!("Invalid relay chain: {}", s))), 62 | } 63 | } 64 | } 65 | 66 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)] 67 | #[serde(crate = "rocket::serde")] 68 | pub struct Parachain { 69 | /// Name of the parachain. 70 | pub name: String, 71 | /// The rpc url endpoint from where we can query the weight consumption. 72 | pub rpcs: Vec, 73 | /// The `ParaId` of the parachain. 74 | pub para_id: ParaId, 75 | /// The relay chain that the parachain is using for block validation. 76 | pub relay_chain: RelayChain, 77 | 78 | /// The timestamp when the subscription expires. 79 | pub expiry_timestamp: Timestamp, 80 | } 81 | 82 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 83 | pub struct WeightConsumption { 84 | /// The block number for which the weight consumption is related to. 85 | pub block_number: u32, 86 | /// The timestamp of the block. 87 | pub timestamp: Timestamp, 88 | /// The ref_time consumption over all the dispatch classes. 89 | pub ref_time: DispatchClassConsumption, 90 | /// The proof size over all dispatch classes. 91 | pub proof_size: DispatchClassConsumption, 92 | } 93 | 94 | #[derive(Default, Debug, Serialize, PartialEq, Deserialize, Clone)] 95 | pub struct DispatchClassConsumption { 96 | /// The percentage of the weight used by user submitted extrinsics compared to the 97 | /// maximum potential. 98 | pub normal: f32, 99 | /// The percentage of the weight used by user operational dispatches compared to the 100 | /// maximum potential. 101 | pub operational: f32, 102 | /// The percentage of the weight used by the mandatory tasks of a parachain compared 103 | /// to the maximum potential. 104 | pub mandatory: f32, 105 | } 106 | 107 | /// A shorthand for converting a tuple of `f32`s into `DispatchClassConsumption`. 108 | /// 109 | /// The order in which the values need to be provided is: `normal`, `operational`, `mandatory`. 110 | impl From<(f32, f32, f32)> for DispatchClassConsumption { 111 | fn from(value: (f32, f32, f32)) -> Self { 112 | DispatchClassConsumption { normal: value.0, operational: value.1, mandatory: value.2 } 113 | } 114 | } 115 | 116 | impl fmt::Display for WeightConsumption { 117 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 118 | write!(f, "\n\tNormal ref_time consumption: {}", self.ref_time.normal)?; 119 | write!(f, "\n\tOperational ref_time consumption: {}", self.ref_time.operational)?; 120 | write!(f, "\n\tMandatory ref_time consumption: {}", self.ref_time.mandatory)?; 121 | 122 | write!(f, "\n\tNormal proof size: {}", self.proof_size.normal)?; 123 | write!(f, "\n\tOperational proof size: {}", self.proof_size.operational)?; 124 | write!(f, "\n\tMandatory proof size: {}", self.proof_size.mandatory)?; 125 | Ok(()) 126 | } 127 | } 128 | 129 | impl WeightConsumption { 130 | /// Returns consumption data as a vector of strings, where each element 131 | /// represents a column in a CSV format. Each string in the vector corresponds 132 | /// to one column of data. 133 | pub fn to_csv(&self) -> Vec { 134 | vec![ 135 | // Block number: 136 | self.block_number.to_string(), 137 | // Timestamp: 138 | self.timestamp.to_string(), 139 | // Reftime consumption: 140 | self.ref_time.normal.to_string(), 141 | self.ref_time.operational.to_string(), 142 | self.ref_time.mandatory.to_string(), 143 | // Proof size: 144 | self.proof_size.normal.to_string(), 145 | self.proof_size.operational.to_string(), 146 | self.proof_size.mandatory.to_string(), 147 | ] 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /routes/tests/extend_subscription.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use polkadot_core_primitives::BlockNumber; 17 | use rocket::{ 18 | http::{ContentType, Status}, 19 | local::blocking::{Client, LocalResponse}, 20 | routes, 21 | }; 22 | use routes::{ 23 | extend_subscription::{extend_subscription, ExtendSubscriptionData}, 24 | Error, 25 | }; 26 | use shared::{ 27 | chaindata::get_para, 28 | payment::PaymentError, 29 | registry::{registered_para, update_registry}, 30 | }; 31 | use types::RelayChain::*; 32 | 33 | mod mock; 34 | use mock::MockEnvironment; 35 | 36 | const PARA_2000_PAYMENT: BlockNumber = 9145403; 37 | 38 | #[test] 39 | fn extend_subscription_works() { 40 | MockEnvironment::new().execute_with(|| { 41 | let rocket = rocket::build().mount("/", routes![extend_subscription]); 42 | let client = Client::tracked(rocket).expect("valid rocket instance"); 43 | 44 | let para = get_para(Polkadot, 2000).unwrap(); 45 | let extend_subscription = ExtendSubscriptionData { 46 | para: (para.relay_chain.clone(), para.para_id), 47 | payment_block_number: PARA_2000_PAYMENT, 48 | }; 49 | 50 | let response = client 51 | .post("/extend-subscription") 52 | .header(ContentType::JSON) 53 | .body(serde_json::to_string(&extend_subscription).unwrap()) 54 | .dispatch(); 55 | 56 | assert_eq!(response.status(), Status::Ok); 57 | 58 | let registered = registered_para(Polkadot, 2000).unwrap(); 59 | // Ensure the `expiry_timestamp` got updated: 60 | assert!(registered.expiry_timestamp != para.expiry_timestamp); 61 | }); 62 | } 63 | 64 | #[test] 65 | fn cannot_extend_subscription_for_unregistered() { 66 | MockEnvironment::new().execute_with(|| { 67 | let rocket = rocket::build().mount("/", routes![extend_subscription]); 68 | let client = Client::tracked(rocket).expect("valid rocket instance"); 69 | 70 | let extend_subscription = ExtendSubscriptionData { 71 | para: (Polkadot, 2006), 72 | payment_block_number: PARA_2000_PAYMENT, 73 | }; 74 | 75 | let response = client 76 | .post("/extend-subscription") 77 | .header(ContentType::JSON) 78 | .body(serde_json::to_string(&extend_subscription).unwrap()) 79 | .dispatch(); 80 | 81 | assert_eq!(parse_err_response(response), Error::NotRegistered); 82 | }); 83 | } 84 | 85 | #[test] 86 | fn cannot_extend_subscription_before_renewal_period() { 87 | MockEnvironment::new().execute_with(|| { 88 | let rocket = rocket::build().mount("/", routes![extend_subscription]); 89 | let client = Client::tracked(rocket).expect("valid rocket instance"); 90 | 91 | let extend_subscription = ExtendSubscriptionData { 92 | para: (Polkadot, 2000), 93 | payment_block_number: PARA_2000_PAYMENT, 94 | }; 95 | 96 | let mut para = get_para(Polkadot, 2000).unwrap(); 97 | para.expiry_timestamp = u64::MAX; 98 | update_registry(vec![para]).unwrap(); 99 | 100 | let response = client 101 | .post("/extend-subscription") 102 | .header(ContentType::JSON) 103 | .body(serde_json::to_string(&extend_subscription).unwrap()) 104 | .dispatch(); 105 | 106 | assert_eq!(parse_err_response(response), Error::AlreadyRegistered); 107 | }); 108 | } 109 | 110 | #[test] 111 | fn providing_non_finalized_payment_block_number_fails() { 112 | MockEnvironment::new().execute_with(|| { 113 | let rocket = rocket::build().mount("/", routes![extend_subscription]); 114 | let client = Client::tracked(rocket).expect("valid rocket instance"); 115 | 116 | let para = get_para(Polkadot, 2000).unwrap(); 117 | let extend_subscription = ExtendSubscriptionData { 118 | para: (para.relay_chain.clone(), para.para_id), 119 | payment_block_number: 99999999, 120 | }; 121 | 122 | let response = client 123 | .post("/extend-subscription") 124 | .header(ContentType::JSON) 125 | .body(serde_json::to_string(&extend_subscription).unwrap()) 126 | .dispatch(); 127 | 128 | assert_eq!( 129 | parse_err_response(response), 130 | Error::PaymentValidationError(PaymentError::Unfinalized) 131 | ); 132 | }); 133 | } 134 | 135 | #[test] 136 | fn payment_not_found_works() { 137 | MockEnvironment::new().execute_with(|| { 138 | let rocket = rocket::build().mount("/", routes![extend_subscription]); 139 | let client = Client::tracked(rocket).expect("valid rocket instance"); 140 | 141 | let para = get_para(Polkadot, 2004).unwrap(); 142 | // We are extending the subscription for para 2004, but the payment is for para 2000. 143 | let extend_subscription = ExtendSubscriptionData { 144 | para: (para.relay_chain.clone(), para.para_id), 145 | payment_block_number: PARA_2000_PAYMENT, 146 | }; 147 | 148 | let response = client 149 | .post("/extend-subscription") 150 | .header(ContentType::JSON) 151 | .body(serde_json::to_string(&extend_subscription).unwrap()) 152 | .dispatch(); 153 | 154 | assert_eq!( 155 | parse_err_response(response), 156 | Error::PaymentValidationError(PaymentError::NotFound) 157 | ); 158 | }); 159 | } 160 | 161 | fn parse_err_response<'a>(response: LocalResponse<'a>) -> Error { 162 | let body = response.into_string().unwrap(); 163 | body.into() 164 | } 165 | -------------------------------------------------------------------------------- /shared/src/payment.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | //! File containing all the payment validation related logic. 17 | 18 | use crate::{ 19 | config::PaymentInfo, 20 | payment::polkadot::runtime_types::{ 21 | frame_system::pallet::Call as SystemCall, pallet_balances::pallet::Call as BalancesCall, 22 | pallet_utility::pallet::Call as UtilityCall, 23 | }, 24 | *, 25 | }; 26 | use parity_scale_codec::Encode; 27 | use polkadot_core_primitives::BlockNumber; 28 | use serde::{Deserialize, Serialize}; 29 | use subxt::{ 30 | backend::rpc::{rpc_params, RpcClient}, 31 | blocks::Block, 32 | utils::H256, 33 | OnlineClient, PolkadotConfig, 34 | }; 35 | use types::Parachain; 36 | 37 | #[subxt::subxt(runtime_metadata_path = "../artifacts/metadata.scale")] 38 | mod polkadot {} 39 | 40 | #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] 41 | pub enum PaymentError { 42 | /// Failed to validate they payment. 43 | ValidationFailed, 44 | /// The payment is not part of a finalized block. 45 | Unfinalized, 46 | /// The payment was not found in the specified block. 47 | NotFound, 48 | } 49 | 50 | impl From for PaymentError { 51 | fn from(v: String) -> Self { 52 | match v.as_str() { 53 | "ValidationFailed" => Self::ValidationFailed, 54 | "Unfinalized" => Self::Unfinalized, 55 | "NotFound" => Self::NotFound, 56 | _ => panic!("UnknownError"), 57 | } 58 | } 59 | } 60 | 61 | pub async fn validate_registration_payment( 62 | para: Parachain, 63 | payment_info: PaymentInfo, 64 | payment_block_number: BlockNumber, 65 | ) -> Result<(), PaymentError> { 66 | // TODO: Could this code be improved so that we don't have to instantiate both clients? 67 | let rpc_client = RpcClient::from_url(&payment_info.rpc_url.clone()) 68 | .await 69 | .map_err(|_| PaymentError::ValidationFailed)?; 70 | 71 | let online_client = OnlineClient::::from_url(payment_info.rpc_url.clone()) 72 | .await 73 | .map_err(|_| PaymentError::ValidationFailed)?; 74 | 75 | // Ensure that the `payment_block_number` is from a finalized block. 76 | let last_finalized = 77 | get_last_finalized_block(rpc_client.clone(), online_client.clone()).await?; 78 | if payment_block_number > last_finalized { 79 | return Err(PaymentError::Unfinalized); 80 | } 81 | 82 | let block_hash = get_block_hash(rpc_client, payment_block_number).await?; 83 | let block = get_block(online_client, block_hash).await?; 84 | 85 | ensure_contains_payment(para, payment_info, block).await 86 | } 87 | 88 | async fn ensure_contains_payment( 89 | para: Parachain, 90 | payment_info: PaymentInfo, 91 | block: Block>, 92 | ) -> Result<(), PaymentError> { 93 | let payment = opaque_payment_extrinsic(para, payment_info).await?; 94 | 95 | let extrinsics = block.extrinsics().await.map_err(|_| PaymentError::ValidationFailed)?; 96 | let extrinsics: Vec> = extrinsics 97 | .iter() 98 | .filter_map(|ext| { 99 | ext.as_ref().ok().and_then(|e| e.as_root_extrinsic::().ok()) 100 | }) 101 | .map(|ext| ext.encode()) 102 | .collect(); 103 | 104 | if extrinsics.contains(&payment.encode()) { 105 | Ok(()) 106 | } else { 107 | Err(PaymentError::NotFound) 108 | } 109 | } 110 | 111 | async fn opaque_payment_extrinsic( 112 | para: Parachain, 113 | payment_info: PaymentInfo, 114 | ) -> Result { 115 | if let Ok(cost) = payment_info.cost.parse::() { 116 | let transfer_call = polkadot::Call::Balances(BalancesCall::transfer_keep_alive { 117 | dest: payment_info.receiver.into(), 118 | value: cost, 119 | }); 120 | 121 | let remark = format!("regionx-weigher::{}:{}", para.relay_chain, para.para_id) 122 | .as_bytes() 123 | .to_vec(); 124 | let remark_call = polkadot::Call::System(SystemCall::remark { remark }); 125 | 126 | let batch_call = polkadot::Call::Utility(UtilityCall::batch_all { 127 | calls: vec![transfer_call, remark_call], 128 | }); 129 | 130 | Ok(batch_call) 131 | } else { 132 | log::error!( 133 | target: LOG_TARGET, 134 | "Failed to parse cost", 135 | ); 136 | Err(PaymentError::ValidationFailed) 137 | } 138 | } 139 | 140 | async fn get_last_finalized_block( 141 | rpc_client: RpcClient, 142 | online_client: OnlineClient, 143 | ) -> Result { 144 | let params = rpc_params![]; 145 | let block_hash: H256 = rpc_client 146 | .request("chain_getFinalizedHead", params) 147 | .await 148 | .map_err(|_| PaymentError::ValidationFailed)?; 149 | 150 | let block = get_block(online_client, block_hash).await?; 151 | 152 | Ok(block.number()) 153 | } 154 | 155 | async fn get_block( 156 | api: OnlineClient, 157 | block_hash: H256, 158 | ) -> Result>, PaymentError> { 159 | api.blocks().at(block_hash).await.map_err(|_| PaymentError::ValidationFailed) 160 | } 161 | 162 | async fn get_block_hash( 163 | rpc_client: RpcClient, 164 | block_number: BlockNumber, 165 | ) -> Result { 166 | let params = rpc_params![Some(block_number)]; 167 | let block_hash: H256 = rpc_client 168 | .request("chain_getBlockHash", params) 169 | .await 170 | .map_err(|_| PaymentError::ValidationFailed)?; 171 | 172 | Ok(block_hash) 173 | } 174 | -------------------------------------------------------------------------------- /bin/tracker/src/main.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | //! # Consumption Tracker 17 | //! 18 | //! This is the main source file for the Consumption Tracker binary. 19 | //! 20 | //! ## Overview 21 | //! 22 | //! The program is designed to fetch weight utilization data from a predefined set 23 | //! of parachains. The obtained weight information is then stored in the `out` 24 | //! directory as multiple CSV files. 25 | //! 26 | //! ## Output Structure 27 | //! 28 | //! Each parachain has its own dedicated output file, and these files are updated 29 | //! every time a new block is finalized and the weight consumption data is 30 | //! successfully queried. 31 | //! 32 | //! ## Data structure 33 | //! 34 | //! The data stored is the 2D weight consumption per each dispatch class. 35 | //! The data is stored in the CSV file within the following sequence: 36 | //! 37 | //! | block_number | timestamp | normal_dispatch_ref_time | operational_dispatch_ref_time | mandatory_dispatch_ref_time | normal_proof_size | operational_proof_size | mandatory_proof_size | 38 | //! |--------------|-----------------------|---------------------------|-------------------------------|-----------------------------|-------------------|-------------------------|-----------------------| 39 | //! | ... | ... | ... | ... | ... | ... | ... | ... | 40 | //! 41 | //! The percentages themselves are stored by representing them as decimal numbers; 42 | //! for example, 50.5% is stored as 0.505 with a precision of three decimals. 43 | 44 | const LOG_TARGET: &str = "tracker"; 45 | 46 | const LOG_TARGET: &str = "tracker"; 47 | 48 | use clap::Parser; 49 | use shared::{consumption::write_consumption, registry::registered_paras, round_to}; 50 | use subxt::{blocks::Block, utils::H256, OnlineClient, PolkadotConfig}; 51 | use types::{Parachain, Timestamp, WeightConsumption}; 52 | 53 | mod cli; 54 | 55 | #[subxt::subxt(runtime_metadata_path = "../../artifacts/metadata.scale")] 56 | mod polkadot {} 57 | 58 | #[tokio::main] 59 | async fn main() -> Result<(), Box> { 60 | env_logger::init(); 61 | 62 | let args = cli::Args::parse(); 63 | 64 | // Asynchronously subscribes to follow the latest finalized block of each parachain 65 | // and continuously fetches the weight consumption. 66 | let tasks: Vec<_> = registered_paras() 67 | .into_iter() 68 | .map(|para| { 69 | tokio::spawn(async move { track_weight_consumption(para, args.rpc_index).await }) 70 | }) 71 | .collect(); 72 | 73 | for task in tasks { 74 | task.await.expect("Failed to track consumption"); 75 | } 76 | 77 | Ok(()) 78 | } 79 | 80 | async fn track_weight_consumption(para: Parachain, rpc_index: usize) { 81 | let Some(rpc) = para.rpcs.get(rpc_index) else { 82 | log::error!( 83 | target: LOG_TARGET, 84 | "{}-{} - doesn't have an rpc with index: {}", 85 | para.relay_chain, para.para_id, rpc_index, 86 | ); 87 | return; 88 | }; 89 | 90 | log::info!("{}-{} - Starting to track consumption.", para.relay_chain, para.para_id); 91 | let result = OnlineClient::::from_url(rpc).await; 92 | 93 | if let Ok(api) = result { 94 | if let Err(err) = track_blocks(api, para.clone(), rpc_index).await { 95 | log::error!( 96 | target: LOG_TARGET, 97 | "{}-{} - Failed to track new block: {:?}", 98 | para.relay_chain, 99 | para.para_id, 100 | err 101 | ); 102 | } 103 | } else { 104 | log::error!( 105 | target: LOG_TARGET, 106 | "{}-{} - Failed to create online client: {:?}", 107 | para.relay_chain, 108 | para.para_id, 109 | result 110 | ); 111 | } 112 | } 113 | 114 | async fn track_blocks( 115 | api: OnlineClient, 116 | para: Parachain, 117 | rpc_index: usize, 118 | ) -> Result<(), Box> { 119 | log::info!( 120 | target: LOG_TARGET, 121 | "{}-{} - Subsciribing to finalized blocks", 122 | para.relay_chain, 123 | para.para_id 124 | ); 125 | 126 | let mut blocks_sub = api 127 | .blocks() 128 | .subscribe_finalized() 129 | .await 130 | .map_err(|_| "Failed to subscribe to finalized blocks")?; 131 | 132 | // Wait for new finalized blocks, then fetch and output the weight consumption accordingly. 133 | while let Some(Ok(block)) = blocks_sub.next().await { 134 | note_new_block(api.clone(), para.clone(), rpc_index, block).await?; 135 | } 136 | 137 | Ok(()) 138 | } 139 | 140 | async fn note_new_block( 141 | api: OnlineClient, 142 | para: Parachain, 143 | rpc_index: usize, 144 | block: Block>, 145 | ) -> Result<(), Box> { 146 | let block_number = block.header().number; 147 | 148 | let timestamp = timestamp_at(api.clone(), block.hash()).await?; 149 | let consumption = weight_consumption(api, block_number, timestamp).await?; 150 | 151 | write_consumption(para, consumption, Some(rpc_index))?; 152 | 153 | Ok(()) 154 | } 155 | 156 | async fn weight_consumption( 157 | api: OnlineClient, 158 | block_number: u32, 159 | timestamp: Timestamp, 160 | ) -> Result> { 161 | let weight_query = polkadot::storage().system().block_weight(); 162 | let weight_consumed = api 163 | .storage() 164 | .at_latest() 165 | .await? 166 | .fetch(&weight_query) 167 | .await? 168 | .ok_or("Failed to query consumption")?; 169 | 170 | let weight_limit_query = polkadot::constants().system().block_weights(); 171 | let weight_limit = api.constants().at(&weight_limit_query)?; 172 | 173 | let proof_limit = weight_limit.max_block.proof_size; 174 | // NOTE: This will be the same for all parachains within the same network until elastic scaling 175 | // is enabled. 176 | let ref_time_limit = weight_limit.max_block.ref_time; 177 | 178 | let normal_ref_time = weight_consumed.normal.ref_time; 179 | let operational_ref_time = weight_consumed.operational.ref_time; 180 | let mandatory_ref_time = weight_consumed.mandatory.ref_time; 181 | 182 | let normal_proof_size = weight_consumed.normal.proof_size; 183 | let operational_proof_size = weight_consumed.operational.proof_size; 184 | let mandatory_proof_size = weight_consumed.mandatory.proof_size; 185 | 186 | let consumption = WeightConsumption { 187 | block_number, 188 | timestamp, 189 | ref_time: ( 190 | round_to(normal_ref_time as f32 / ref_time_limit as f32, 3), 191 | round_to(operational_ref_time as f32 / ref_time_limit as f32, 3), 192 | round_to(mandatory_ref_time as f32 / ref_time_limit as f32, 3), 193 | ) 194 | .into(), 195 | proof_size: ( 196 | round_to(normal_proof_size as f32 / proof_limit as f32, 3), 197 | round_to(operational_proof_size as f32 / proof_limit as f32, 3), 198 | round_to(mandatory_proof_size as f32 / proof_limit as f32, 3), 199 | ) 200 | .into(), 201 | }; 202 | 203 | Ok(consumption) 204 | } 205 | 206 | async fn timestamp_at( 207 | api: OnlineClient, 208 | block_hash: H256, 209 | ) -> Result> { 210 | let timestamp_query = polkadot::storage().timestamp().now(); 211 | 212 | let timestamp = api 213 | .storage() 214 | .at(block_hash) 215 | .fetch(×tamp_query) 216 | .await? 217 | .ok_or("Failed to query consumption")?; 218 | 219 | Ok(timestamp) 220 | } 221 | -------------------------------------------------------------------------------- /routes/tests/consumption.rs: -------------------------------------------------------------------------------- 1 | // This file is part of RegionX. 2 | // 3 | // RegionX is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | 8 | // RegionX is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | 13 | // You should have received a copy of the GNU General Public License 14 | // along with RegionX. If not, see . 15 | 16 | use rocket::{ 17 | http::Status, 18 | local::blocking::{Client, LocalResponse}, 19 | routes, 20 | }; 21 | use routes::{ 22 | consumption::{consumption, group_consumption, AggregatedData, Grouping}, 23 | Error, 24 | }; 25 | use shared::{chaindata::get_para, registry::update_registry, reset_mock_environment}; 26 | use std::collections::HashMap; 27 | use types::{RelayChain::*, WeightConsumption}; 28 | 29 | mod mock; 30 | use mock::{mock_consumption, MockEnvironment}; 31 | 32 | #[test] 33 | fn getting_all_consumption_data_works() { 34 | MockEnvironment::new().execute_with(|| { 35 | let rocket = rocket::build().mount("/", routes![consumption]); 36 | let client = Client::tracked(rocket).expect("valid rocket instance"); 37 | 38 | let para = get_para(Polkadot, 2000).unwrap(); 39 | let response = client.get("/consumption/polkadot/2000").dispatch(); 40 | assert_eq!(response.status(), Status::Ok); 41 | 42 | let consumption_data = parse_ok_response(response); 43 | let expected_consumption = group_consumption( 44 | mock_consumption().get(¶).unwrap().clone(), 45 | Grouping::BlockNumber, 46 | ); 47 | assert_eq!(consumption_data, expected_consumption); 48 | }); 49 | } 50 | 51 | #[test] 52 | fn parachain_not_found_handled() { 53 | MockEnvironment::new().execute_with(|| { 54 | let rocket = rocket::build().mount("/", routes![consumption]); 55 | let client = Client::tracked(rocket).expect("valid rocket instance"); 56 | 57 | let response = client.get("/consumption/polkadot/42").dispatch(); 58 | assert_eq!(response.status(), Status::InternalServerError); 59 | 60 | let err = parse_err_response(response); 61 | assert_eq!(err, Error::NotRegistered); 62 | }); 63 | } 64 | 65 | #[test] 66 | fn consumption_data_not_found_handled() { 67 | // We run this test outside the mock environment which means the consumption data state won't 68 | // get inititalized. 69 | 70 | let rocket = rocket::build().mount("/", routes![consumption]); 71 | let client = Client::tracked(rocket).expect("valid rocket instance"); 72 | 73 | // Register a parachain without storing any consumption data. 74 | assert!(update_registry(vec![get_para(Polkadot, 2000).unwrap()]).is_ok()); 75 | 76 | let response = client.get("/consumption/polkadot/2000").dispatch(); 77 | assert_eq!(response.status(), Status::InternalServerError); 78 | 79 | let err = parse_err_response(response); 80 | assert_eq!(err, Error::ConsumptionDataNotFound); 81 | 82 | reset_mock_environment(); 83 | } 84 | 85 | #[test] 86 | fn pagination_works() { 87 | MockEnvironment::new().execute_with(|| { 88 | let rocket = rocket::build().mount("/", routes![consumption]); 89 | let client = Client::tracked(rocket).expect("valid rocket instance"); 90 | 91 | let para = get_para(Polkadot, 2000).unwrap(); 92 | let mock_data = mock_consumption().get(¶).unwrap().clone(); 93 | 94 | // CASE 1: Limit response size by setting page size 95 | let response = client.get("/consumption/polkadot/2000?page_size=1").dispatch(); 96 | assert_eq!(response.status(), Status::Ok); 97 | 98 | let consumption_data = parse_ok_response(response); 99 | let expected_data = 100 | group_consumption(vec![mock_data.first().unwrap().clone()], Grouping::BlockNumber); 101 | // Should only contain the first consumption data. 102 | assert_eq!(consumption_data, expected_data); 103 | 104 | // CASE 2: Specifying the page without page size will still show all the data. 105 | let response = client.get("/consumption/polkadot/2000?page=0").dispatch(); 106 | assert_eq!(response.status(), Status::Ok); 107 | 108 | let consumption_data = parse_ok_response(response); 109 | let expected_data = group_consumption(mock_data.clone(), Grouping::BlockNumber); 110 | // Should only contain the first consumption data. 111 | assert_eq!(consumption_data, expected_data); 112 | 113 | // CASE 3: Specifying the page and page size works. 114 | let response = client.get("/consumption/polkadot/2000?page=1&page_size=2").dispatch(); 115 | assert_eq!(response.status(), Status::Ok); 116 | 117 | let consumption_data = parse_ok_response(response); 118 | let expected_data = group_consumption( 119 | mock_data.into_iter().skip(2).take(2).collect::>(), 120 | Grouping::BlockNumber, 121 | ); 122 | // Should skip the first page and take the second one. 123 | assert_eq!(consumption_data, expected_data); 124 | 125 | // CASE 4: An out-of-bound page and page size will return an empty vector. 126 | let response = client.get("/consumption/polkadot/2000?page=69&page_size=42").dispatch(); 127 | assert_eq!(response.status(), Status::Ok); 128 | 129 | let consumption_data = parse_ok_response(response); 130 | assert!(consumption_data.is_empty()); 131 | }); 132 | } 133 | 134 | #[test] 135 | fn timestamp_based_filtering_works() { 136 | MockEnvironment::new().execute_with(|| { 137 | let rocket = rocket::build().mount("/", routes![consumption]); 138 | let client = Client::tracked(rocket).expect("valid rocket instance"); 139 | 140 | let para = get_para(Polkadot, 2000).unwrap(); 141 | let mock_data = mock_consumption().get(¶).unwrap().clone(); 142 | 143 | // CASE 1: setting the starting timestamp filters out the data. 144 | let start_timestamp = 6; 145 | let response = client.get("/consumption/polkadot/2000?start=6").dispatch(); 146 | assert_eq!(response.status(), Status::Ok); 147 | 148 | let response_data = parse_ok_response(response); 149 | let expected_data = group_consumption( 150 | mock_data 151 | .clone() 152 | .into_iter() 153 | .filter(|c| c.timestamp >= start_timestamp) 154 | .collect::>(), 155 | Grouping::BlockNumber, 156 | ); 157 | 158 | // Should only contain the consumption where the timestamp is greater than or equal to 6. 159 | assert_eq!(response_data, expected_data); 160 | 161 | // CASE 2: setting the ending timestamp filters out the data. 162 | let end_timestamp = 12; 163 | let response = client.get("/consumption/polkadot/2000?end=12").dispatch(); 164 | assert_eq!(response.status(), Status::Ok); 165 | 166 | let response_data = parse_ok_response(response); 167 | let expected_data = group_consumption( 168 | mock_data 169 | .clone() 170 | .into_iter() 171 | .filter(|c| c.timestamp <= end_timestamp) 172 | .collect::>(), 173 | Grouping::BlockNumber, 174 | ); 175 | 176 | // Should only contain the consumption where the timestamp is less than or equal to 12. 177 | assert_eq!(response_data, expected_data); 178 | 179 | // CASE 3: setting the both start and ending timestamp filters out the data. 180 | let start_timestamp = 6; 181 | let end_timestamp = 6; 182 | let response = client.get("/consumption/polkadot/2000?start=6&end=6").dispatch(); 183 | assert_eq!(response.status(), Status::Ok); 184 | 185 | let response_data = parse_ok_response(response); 186 | let expected_data = group_consumption( 187 | mock_data 188 | .into_iter() 189 | .filter(|c| c.timestamp >= start_timestamp && c.timestamp <= end_timestamp) 190 | .collect::>(), 191 | Grouping::BlockNumber, 192 | ); 193 | 194 | assert_eq!(response_data, expected_data); 195 | // Should only contain one consumption data since the `start` and `end` are set to the same 196 | // value. 197 | assert!(response_data.len() == 1); 198 | }); 199 | } 200 | 201 | #[test] 202 | fn pagination_and_timestamp_filtering_works() { 203 | MockEnvironment::new().execute_with(|| { 204 | let rocket = rocket::build().mount("/", routes![consumption]); 205 | let client = Client::tracked(rocket).expect("valid rocket instance"); 206 | 207 | let para = get_para(Polkadot, 2000).unwrap(); 208 | let mock_data = mock_consumption().get(¶).unwrap().clone(); 209 | 210 | // Combined Case: Filter by timestamp and paginate 211 | let start_timestamp = 6; 212 | let page_size = 2; 213 | let page_number = 1; 214 | let response = client 215 | .get(format!( 216 | "/consumption/polkadot/2000?start={}&page={}&page_size={}", 217 | start_timestamp, page_number, page_size 218 | )) 219 | .dispatch(); 220 | assert_eq!(response.status(), Status::Ok); 221 | 222 | let response_data = parse_ok_response(response); 223 | let expected_data = group_consumption( 224 | mock_data 225 | .into_iter() 226 | .filter(|c| c.timestamp >= start_timestamp) 227 | .skip(page_size * page_number) 228 | .take(page_size) 229 | .collect::>(), 230 | Grouping::BlockNumber, 231 | ); 232 | 233 | // Check if the data is filtered by timestamp and then paginated 234 | assert_eq!(response_data, expected_data); 235 | }); 236 | } 237 | 238 | #[test] 239 | fn grouping_works() { 240 | MockEnvironment::new().execute_with(|| { 241 | let rocket = rocket::build().mount("/", routes![consumption]); 242 | let client = Client::tracked(rocket).expect("valid rocket instance"); 243 | 244 | // Default is grouping by block number: 245 | let para = get_para(Polkadot, 2000).unwrap(); 246 | let response = client.get("/consumption/polkadot/2000").dispatch(); 247 | assert_eq!(response.status(), Status::Ok); 248 | 249 | let consumption_data = parse_ok_response(response); 250 | let expected_consumption = group_consumption( 251 | mock_consumption().get(¶).unwrap().clone(), 252 | Grouping::BlockNumber, 253 | ); 254 | assert_eq!(consumption_data, expected_consumption); 255 | 256 | // Grouping by minute: 257 | let response = client.get("/consumption/polkadot/2000?grouping=minute").dispatch(); 258 | assert_eq!(response.status(), Status::Ok); 259 | 260 | let consumption_data = parse_ok_response(response); 261 | let expected_consumption = 262 | group_consumption(mock_consumption().get(¶).unwrap().clone(), Grouping::Minute); 263 | assert_eq!(consumption_data, expected_consumption); 264 | 265 | // Grouping by hour: 266 | let response = client.get("/consumption/polkadot/2000?grouping=hour").dispatch(); 267 | assert_eq!(response.status(), Status::Ok); 268 | 269 | let consumption_data = parse_ok_response(response); 270 | let expected_consumption = 271 | group_consumption(mock_consumption().get(¶).unwrap().clone(), Grouping::Hour); 272 | assert_eq!(consumption_data, expected_consumption); 273 | 274 | // Grouping by day: 275 | let response = client.get("/consumption/polkadot/2000?grouping=day").dispatch(); 276 | assert_eq!(response.status(), Status::Ok); 277 | 278 | let consumption_data = parse_ok_response(response); 279 | let expected_consumption = 280 | group_consumption(mock_consumption().get(¶).unwrap().clone(), Grouping::Day); 281 | assert_eq!(consumption_data, expected_consumption); 282 | 283 | // Grouping by month: 284 | let response = client.get("/consumption/polkadot/2000?grouping=month").dispatch(); 285 | assert_eq!(response.status(), Status::Ok); 286 | 287 | let consumption_data = parse_ok_response(response); 288 | let expected_consumption = 289 | group_consumption(mock_consumption().get(¶).unwrap().clone(), Grouping::Month); 290 | assert_eq!(consumption_data, expected_consumption); 291 | 292 | // Grouping by year: 293 | let response = client.get("/consumption/polkadot/2000?grouping=year").dispatch(); 294 | assert_eq!(response.status(), Status::Ok); 295 | 296 | let consumption_data = parse_ok_response(response); 297 | let expected_consumption = 298 | group_consumption(mock_consumption().get(¶).unwrap().clone(), Grouping::Year); 299 | assert_eq!(consumption_data, expected_consumption); 300 | }); 301 | } 302 | 303 | 304 | fn parse_ok_response<'a>(response: LocalResponse<'a>) -> Vec { 305 | 306 | let body = response.into_string().unwrap(); 307 | serde_json::from_str(&body).expect("can't parse value") 308 | } 309 | 310 | fn parse_err_response<'a>(response: LocalResponse<'a>) -> Error { 311 | let body = response.into_string().unwrap(); 312 | body.into() 313 | } 314 | -------------------------------------------------------------------------------- /chaindata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Amplitude", 4 | "para_id": 2124, 5 | "relay": { 6 | "id": "kusama" 7 | }, 8 | "rpcs": [ 9 | { 10 | "url": "wss://amplitude-rpc.dwellir.com" 11 | }, 12 | { 13 | "url": "wss://rpc-amplitude.pendulumchain.tech" 14 | } 15 | ] 16 | }, 17 | { 18 | "name": "Encointer", 19 | "para_id": 1001, 20 | "relay": { 21 | "id": "kusama" 22 | }, 23 | "rpcs": [ 24 | { 25 | "url": "wss://sys.ibp.network/encointer-kusama" 26 | }, 27 | { 28 | "url": "wss://sys.dotters.network/encointer-kusama" 29 | }, 30 | { 31 | "url": "wss://kusama.api.encointer.org" 32 | }, 33 | { 34 | "url": "wss://ksm-rpc.stakeworld.io/encointer" 35 | } 36 | ] 37 | }, 38 | { 39 | "name": "Altair", 40 | "para_id": 2088, 41 | "relay": { 42 | "id": "kusama" 43 | }, 44 | "rpcs": [ 45 | { 46 | "url": "wss://fullnode.altair.centrifuge.io" 47 | }, 48 | { 49 | "url": "wss://altair.api.onfinality.io/public-ws" 50 | } 51 | ] 52 | }, 53 | { 54 | "name": "Basilisk", 55 | "para_id": 2090, 56 | "relay": { 57 | "id": "kusama" 58 | }, 59 | "rpcs": [ 60 | { 61 | "url": "wss://basilisk-rpc.dwellir.com" 62 | }, 63 | { 64 | "url": "wss://rpc.basilisk.cloud" 65 | } 66 | ] 67 | }, 68 | { 69 | "name": "Bit.Country Pioneer", 70 | "para_id": 2096, 71 | "relay": { 72 | "id": "kusama" 73 | }, 74 | "rpcs": [ 75 | { 76 | "url": "wss://pioneer-rpc-3.bit.country/wss" 77 | } 78 | ] 79 | }, 80 | { 81 | "name": "Parallel Heiko", 82 | "para_id": 2085, 83 | "relay": { 84 | "id": "kusama" 85 | }, 86 | "rpcs": [ 87 | { 88 | "url": "wss://heiko-rpc.parallel.fi" 89 | } 90 | ] 91 | }, 92 | { 93 | "name": "Integritee", 94 | "para_id": 2015, 95 | "relay": { 96 | "id": "kusama" 97 | }, 98 | "rpcs": [ 99 | { 100 | "url": "wss://kusama.api.integritee.network" 101 | }, 102 | { 103 | "url": "wss://integritee-kusama.api.onfinality.io/public-ws" 104 | } 105 | ] 106 | }, 107 | { 108 | "name": "DAO IPCI", 109 | "para_id": 2222, 110 | "relay": { 111 | "id": "kusama" 112 | }, 113 | "rpcs": [ 114 | { 115 | "url": "wss://kusama.rpc.ipci.io" 116 | } 117 | ] 118 | }, 119 | { 120 | "name": "Imbue", 121 | "para_id": 2121, 122 | "relay": { 123 | "id": "kusama" 124 | }, 125 | "rpcs": [ 126 | { 127 | "url": "wss://kusama.imbuenetwork.com" 128 | } 129 | ] 130 | }, 131 | { 132 | "name": "GM", 133 | "para_id": 2123, 134 | "relay": { 135 | "id": "kusama" 136 | }, 137 | "rpcs": [ 138 | { 139 | "url": "wss://ws.gm.bldnodes.org/" 140 | } 141 | ] 142 | }, 143 | { 144 | "name": "Bifrost Kusama", 145 | "para_id": 2001, 146 | "relay": { 147 | "id": "kusama" 148 | }, 149 | "rpcs": [ 150 | { 151 | "url": "wss://bifrost-rpc.dwellir.com" 152 | }, 153 | { 154 | "url": "wss://bifrost-rpc.liebi.com/ws" 155 | }, 156 | { 157 | "url": "wss://us.bifrost-rpc.liebi.com/ws" 158 | } 159 | ] 160 | }, 161 | { 162 | "name": "Krest", 163 | "para_id": 2241, 164 | "relay": { 165 | "id": "kusama" 166 | }, 167 | "rpcs": [ 168 | { 169 | "url": "wss://wss-krest.peaq.network/" 170 | }, 171 | { 172 | "url": "wss://krest.unitedbloc.com/" 173 | } 174 | ] 175 | }, 176 | { 177 | "name": "Kusama Bridge Hub", 178 | "para_id": 1002, 179 | "relay": { 180 | "id": "kusama" 181 | }, 182 | "rpcs": [ 183 | { 184 | "url": "wss://kusama-bridge-hub-rpc.dwellir.com" 185 | }, 186 | { 187 | "url": "wss://kusama-bridge-hub-rpc-tn.dwellir.com" 188 | }, 189 | { 190 | "url": "wss://sys.ibp.network/bridgehub-kusama" 191 | }, 192 | { 193 | "url": "wss://sys.dotters.network/bridgehub-kusama" 194 | }, 195 | { 196 | "url": "wss://kusama-bridge-hub-rpc.polkadot.io" 197 | }, 198 | { 199 | "url": "wss://ksm-rpc.stakeworld.io/bridgehub" 200 | } 201 | ] 202 | }, 203 | { 204 | "name": "Khala", 205 | "para_id": 2004, 206 | "relay": { 207 | "id": "kusama" 208 | }, 209 | "rpcs": [ 210 | { 211 | "url": "wss://khala-rpc.dwellir.com" 212 | }, 213 | { 214 | "url": "wss://khala-api.phala.network/ws" 215 | } 216 | ] 217 | }, 218 | { 219 | "name": "Kusama Asset Hub", 220 | "para_id": 1000, 221 | "relay": { 222 | "id": "kusama" 223 | }, 224 | "rpcs": [ 225 | { 226 | "url": "wss://statemine-rpc.dwellir.com" 227 | }, 228 | { 229 | "url": "wss://statemine-rpc-tn.dwellir.com" 230 | }, 231 | { 232 | "url": "wss://sys.ibp.network/statemine" 233 | }, 234 | { 235 | "url": "wss://sys.dotters.network/statemine" 236 | }, 237 | { 238 | "url": "wss://rpc-asset-hub-kusama.luckyfriday.io" 239 | }, 240 | { 241 | "url": "wss://kusama-asset-hub-rpc.polkadot.io" 242 | }, 243 | { 244 | "url": "wss://statemine.public.curie.radiumblock.co/ws" 245 | }, 246 | { 247 | "url": "wss://ksm-rpc.stakeworld.io/assethub" 248 | } 249 | ] 250 | }, 251 | { 252 | "name": "Genshiro", 253 | "para_id": 2024, 254 | "relay": { 255 | "id": "kusama" 256 | }, 257 | "rpcs": [ 258 | { 259 | "url": "wss://node.ksm.genshiro.io" 260 | }, 261 | { 262 | "url": "wss://node.genshiro.io" 263 | } 264 | ] 265 | }, 266 | { 267 | "name": "Moonriver", 268 | "para_id": 2023, 269 | "relay": { 270 | "id": "kusama" 271 | }, 272 | "rpcs": [ 273 | { 274 | "url": "wss://moonriver-rpc.dwellir.com" 275 | }, 276 | { 277 | "url": "wss://wss.api.moonriver.moonbeam.network" 278 | }, 279 | { 280 | "url": "wss://moonriver.unitedbloc.com" 281 | } 282 | ] 283 | }, 284 | { 285 | "name": "Litmus", 286 | "para_id": 2106, 287 | "relay": { 288 | "id": "kusama" 289 | }, 290 | "rpcs": [ 291 | { 292 | "url": "wss://rpc.litmus-parachain.litentry.io" 293 | } 294 | ] 295 | }, 296 | { 297 | "name": "Robonomics", 298 | "para_id": 2048, 299 | "relay": { 300 | "id": "kusama" 301 | }, 302 | "rpcs": [ 303 | { 304 | "url": "wss://kusama.rpc.robonomics.network/" 305 | }, 306 | { 307 | "url": "wss://robonomics.0xsamsara.com" 308 | } 309 | ] 310 | }, 311 | { 312 | "name": "Quartz", 313 | "para_id": 2095, 314 | "relay": { 315 | "id": "kusama" 316 | }, 317 | "rpcs": [ 318 | { 319 | "url": "wss://quartz-rpc.dwellir.com" 320 | }, 321 | { 322 | "url": "wss://ws-quartz.unique.network" 323 | }, 324 | { 325 | "url": "wss://us-ws-quartz.unique.network" 326 | }, 327 | { 328 | "url": "wss://asia-ws-quartz.unique.network" 329 | }, 330 | { 331 | "url": "wss://eu-ws-quartz.unique.network" 332 | } 333 | ] 334 | }, 335 | { 336 | "name": "Sora", 337 | "para_id": 2011, 338 | "relay": { 339 | "id": "kusama" 340 | }, 341 | "rpcs": [ 342 | { 343 | "url": "wss://ws.parachain-collator-1.c1.sora2.soramitsu.co.jp" 344 | } 345 | ] 346 | }, 347 | { 348 | "name": "Shiden", 349 | "para_id": 2007, 350 | "relay": { 351 | "id": "kusama" 352 | }, 353 | "rpcs": [ 354 | { 355 | "url": "wss://shiden-rpc.dwellir.com" 356 | }, 357 | { 358 | "url": "wss://rpc.shiden.astar.network" 359 | } 360 | ] 361 | }, 362 | { 363 | "name": "Crust Shadow", 364 | "para_id": 2012, 365 | "relay": { 366 | "id": "kusama" 367 | }, 368 | "rpcs": [ 369 | { 370 | "url": "wss://rpc-shadow.crust.network/" 371 | } 372 | ] 373 | }, 374 | { 375 | "name": "InvArch Tinkernet", 376 | "para_id": 2125, 377 | "relay": { 378 | "id": "kusama" 379 | }, 380 | "rpcs": [ 381 | { 382 | "url": "wss://tinkernet-rpc.dwellir.com" 383 | } 384 | ] 385 | }, 386 | { 387 | "name": "Turing", 388 | "para_id": 2114, 389 | "relay": { 390 | "id": "kusama" 391 | }, 392 | "rpcs": [ 393 | { 394 | "url": "wss://turing-rpc.dwellir.com" 395 | }, 396 | { 397 | "url": "wss://rpc.turing.oak.tech" 398 | } 399 | ] 400 | }, 401 | { 402 | "name": "Subzero", 403 | "para_id": 2236, 404 | "relay": { 405 | "id": "kusama" 406 | }, 407 | "rpcs": [ 408 | { 409 | "url": "wss://rpc-1.kusama.node.zero.io" 410 | } 411 | ] 412 | }, 413 | { 414 | "name": "Picasso", 415 | "para_id": 2087, 416 | "relay": { 417 | "id": "kusama" 418 | }, 419 | "rpcs": [ 420 | { 421 | "url": "wss://picasso-rpc.dwellir.com" 422 | }, 423 | { 424 | "url": "wss://rpc.composablenodes.tech" 425 | } 426 | ] 427 | }, 428 | { 429 | "name": "Karura", 430 | "para_id": 2000, 431 | "relay": { 432 | "id": "kusama" 433 | }, 434 | "rpcs": [ 435 | { 436 | "url": "wss://karura-rpc-0.aca-api.network" 437 | }, 438 | { 439 | "url": "wss://karura-rpc-1.aca-api.network" 440 | }, 441 | { 442 | "url": "wss://karura-rpc-2.aca-api.network/ws" 443 | }, 444 | { 445 | "url": "wss://karura-rpc-3.aca-api.network/ws" 446 | } 447 | ] 448 | }, 449 | { 450 | "name": "MangataX", 451 | "para_id": 2110, 452 | "relay": { 453 | "id": "kusama" 454 | }, 455 | "rpcs": [ 456 | { 457 | "url": "wss://kusama-archive.mangata.online" 458 | }, 459 | { 460 | "url": "wss://kusama-rpc.mangata.online" 461 | } 462 | ] 463 | }, 464 | { 465 | "name": "Acurast Canary", 466 | "para_id": 2239, 467 | "relay": { 468 | "id": "kusama" 469 | }, 470 | "rpcs": [ 471 | { 472 | "url": "wss://acurast-canarynet-ws.prod.gke.papers.tech" 473 | } 474 | ] 475 | }, 476 | { 477 | "name": "Kabocha", 478 | "para_id": 2113, 479 | "relay": { 480 | "id": "kusama" 481 | }, 482 | "rpcs": [ 483 | { 484 | "url": "wss://kabocha.jelliedowl.net" 485 | } 486 | ] 487 | }, 488 | { 489 | "name": "t1rn", 490 | "para_id": 3334, 491 | "relay": { 492 | "id": "kusama" 493 | }, 494 | "rpcs": [ 495 | { 496 | "url": "wss://rpc.t1rn.io" 497 | } 498 | ] 499 | }, 500 | { 501 | "name": "Kintsugi", 502 | "para_id": 2092, 503 | "relay": { 504 | "id": "kusama" 505 | }, 506 | "rpcs": [ 507 | { 508 | "url": "wss://kintsugi-rpc.dwellir.com" 509 | }, 510 | { 511 | "url": "wss://api-kusama.interlay.io/parachain" 512 | } 513 | ] 514 | }, 515 | { 516 | "name": "Kreivo - By Virto", 517 | "para_id": 2281, 518 | "relay": { 519 | "id": "kusama" 520 | }, 521 | "rpcs": [ 522 | { 523 | "url": "wss://kreivo.io/" 524 | } 525 | ] 526 | }, 527 | { 528 | "name": "Kpron", 529 | "para_id": 2019, 530 | "relay": { 531 | "id": "kusama" 532 | }, 533 | "rpcs": [ 534 | { 535 | "url": "wss://kusama-kpron-rpc.apron.network/" 536 | } 537 | ] 538 | }, 539 | { 540 | "name": "Sakura", 541 | "para_id": 2016, 542 | "relay": { 543 | "id": "kusama" 544 | }, 545 | "rpcs": [ 546 | { 547 | "url": "wss://api-sakura.clover.finance" 548 | } 549 | ] 550 | }, 551 | { 552 | "name": "Quantum Portal", 553 | "para_id": 2274, 554 | "relay": { 555 | "id": "kusama" 556 | }, 557 | "rpcs": [ 558 | { 559 | "url": "wss://qpn.svcs.ferrumnetwork.io/" 560 | } 561 | ] 562 | }, 563 | { 564 | "name": "Bajun", 565 | "para_id": 2119, 566 | "relay": { 567 | "id": "kusama" 568 | }, 569 | "rpcs": [ 570 | { 571 | "url": "wss://rpc-parachain.bajun.network" 572 | }, 573 | { 574 | "url": "wss://bajun.public.curie.radiumblock.co/ws" 575 | } 576 | ] 577 | }, 578 | { 579 | "name": "Calamari", 580 | "para_id": 2084, 581 | "relay": { 582 | "id": "kusama" 583 | }, 584 | "rpcs": [ 585 | { 586 | "url": "wss://calamari.systems" 587 | } 588 | ] 589 | }, 590 | { 591 | "name": "Darwinia Crab", 592 | "para_id": 2105, 593 | "relay": { 594 | "id": "kusama" 595 | }, 596 | "rpcs": [ 597 | { 598 | "url": "wss://darwiniacrab-rpc.dwellir.com" 599 | }, 600 | { 601 | "url": "wss://crab-rpc.darwinia.network/" 602 | }, 603 | { 604 | "url": "wss://crab-rpc.darwiniacommunitydao.xyz" 605 | } 606 | ] 607 | }, 608 | { 609 | "name": "Polkadot", 610 | "para_id": 0, 611 | "relay": { 612 | "id": "polkadot" 613 | }, 614 | "rpcs": [ 615 | { 616 | "url": "wss://polkadot.api.onfinality.io/public-ws" 617 | }, 618 | { 619 | "url": "wss://rpc-polkadot.luckyfriday.io" 620 | } 621 | ] 622 | }, 623 | { 624 | "name": "Ajuna", 625 | "para_id": 2051, 626 | "relay": { 627 | "id": "polkadot" 628 | }, 629 | "rpcs": [ 630 | { 631 | "url": "wss://rpc-parachain.ajuna.network" 632 | }, 633 | { 634 | "url": "wss://ajuna.public.curie.radiumblock.co/ws" 635 | } 636 | ] 637 | }, 638 | { 639 | "name": "Litentry", 640 | "para_id": 2013, 641 | "relay": { 642 | "id": "polkadot" 643 | }, 644 | "rpcs": [ 645 | { 646 | "url": "wss://litentry-rpc.dwellir.com" 647 | }, 648 | { 649 | "url": "wss://rpc.litentry-parachain.litentry.io" 650 | } 651 | ] 652 | }, 653 | { 654 | "name": "Energy Web X", 655 | "para_id": 3345, 656 | "relay": { 657 | "id": "polkadot" 658 | }, 659 | "rpcs": [ 660 | { 661 | "url": "wss://public-rpc.mainnet.energywebx.com" 662 | } 663 | ] 664 | }, 665 | { 666 | "name": "Acala", 667 | "para_id": 2000, 668 | "relay": { 669 | "id": "polkadot" 670 | }, 671 | "rpcs": [ 672 | { 673 | "url": "wss://acala-rpc.dwellir.com" 674 | }, 675 | { 676 | "url": "wss://acala-rpc-0.aca-api.network" 677 | }, 678 | { 679 | "url": "wss://acala-rpc-1.aca-api.network" 680 | }, 681 | { 682 | "url": "wss://acala-rpc-3.aca-api.network/ws" 683 | } 684 | ] 685 | }, 686 | { 687 | "name": "Crust", 688 | "para_id": 2008, 689 | "relay": { 690 | "id": "polkadot" 691 | }, 692 | "rpcs": [ 693 | { 694 | "url": "wss://crust-parachain.crustapps.net" 695 | } 696 | ] 697 | }, 698 | { 699 | "name": "Clover", 700 | "para_id": 2002, 701 | "relay": { 702 | "id": "polkadot" 703 | }, 704 | "rpcs": [ 705 | { 706 | "url": "wss://rpc-para.clover.finance" 707 | } 708 | ] 709 | }, 710 | { 711 | "name": "Darwinia", 712 | "para_id": 2046, 713 | "relay": { 714 | "id": "polkadot" 715 | }, 716 | "rpcs": [ 717 | { 718 | "url": "wss://darwinia-rpc.dwellir.com" 719 | }, 720 | { 721 | "url": "wss://rpc.darwinia.network" 722 | }, 723 | { 724 | "url": "wss://darwinia-rpc.darwiniacommunitydao.xyz" 725 | } 726 | ] 727 | }, 728 | { 729 | "name": "Centrifuge", 730 | "para_id": 2031, 731 | "relay": { 732 | "id": "polkadot" 733 | }, 734 | "rpcs": [ 735 | { 736 | "url": "wss://centrifuge-rpc.dwellir.com" 737 | }, 738 | { 739 | "url": "wss://fullnode.centrifuge.io" 740 | }, 741 | { 742 | "url": "wss://rpc-centrifuge.luckyfriday.io" 743 | } 744 | ] 745 | }, 746 | { 747 | "name": "InvArch", 748 | "para_id": 3340, 749 | "relay": { 750 | "id": "polkadot" 751 | }, 752 | "rpcs": [ 753 | { 754 | "url": "wss://invarch-rpc.dwellir.com" 755 | } 756 | ] 757 | }, 758 | { 759 | "name": "Composable Finance", 760 | "para_id": 2019, 761 | "relay": { 762 | "id": "polkadot" 763 | }, 764 | "rpcs": [ 765 | { 766 | "url": "wss://composable-rpc.dwellir.com" 767 | }, 768 | { 769 | "url": "wss://rpc.composable.finance" 770 | } 771 | ] 772 | }, 773 | { 774 | "name": "Astar", 775 | "para_id": 2006, 776 | "relay": { 777 | "id": "polkadot" 778 | }, 779 | "rpcs": [ 780 | { 781 | "url": "wss://astar-rpc.dwellir.com" 782 | }, 783 | { 784 | "url": "wss://rpc.astar.network" 785 | }, 786 | { 787 | "url": "wss://1rpc.io/astr" 788 | }, 789 | { 790 | "url": "wss://astar.public.curie.radiumblock.co/ws" 791 | } 792 | ] 793 | }, 794 | { 795 | "name": "KILT Spiritnet", 796 | "para_id": 2086, 797 | "relay": { 798 | "id": "polkadot" 799 | }, 800 | "rpcs": [ 801 | { 802 | "url": "wss://kilt-rpc.dwellir.com" 803 | }, 804 | { 805 | "url": "wss://spiritnet.kilt.io/" 806 | } 807 | ] 808 | }, 809 | { 810 | "name": "Bifrost Polkadot", 811 | "para_id": 2030, 812 | "relay": { 813 | "id": "polkadot" 814 | }, 815 | "rpcs": [ 816 | { 817 | "url": "wss://hk.p.bifrost-rpc.liebi.com/ws" 818 | }, 819 | { 820 | "url": "wss://eu.bifrost-polkadot-rpc.liebi.com/ws" 821 | } 822 | ] 823 | }, 824 | { 825 | "name": "HydraDX", 826 | "para_id": 2034, 827 | "relay": { 828 | "id": "polkadot" 829 | }, 830 | "rpcs": [ 831 | { 832 | "url": "wss://hydradx-rpc.dwellir.com" 833 | }, 834 | { 835 | "url": "wss://rpc.hydradx.cloud" 836 | } 837 | ] 838 | }, 839 | { 840 | "name": "Manta", 841 | "para_id": 2104, 842 | "relay": { 843 | "id": "polkadot" 844 | }, 845 | "rpcs": [ 846 | { 847 | "url": "wss://ws.manta.systems" 848 | } 849 | ] 850 | }, 851 | { 852 | "name": "Moonbeam", 853 | "para_id": 2004, 854 | "relay": { 855 | "id": "polkadot" 856 | }, 857 | "rpcs": [ 858 | { 859 | "url": "wss://moonbeam-rpc.dwellir.com" 860 | }, 861 | { 862 | "url": "wss://1rpc.io/glmr" 863 | }, 864 | { 865 | "url": "wss://wss.api.moonbeam.network" 866 | }, 867 | { 868 | "url": "wss://moonbeam.unitedbloc.com" 869 | } 870 | ] 871 | }, 872 | { 873 | "name": "Nodle", 874 | "para_id": 2026, 875 | "relay": { 876 | "id": "polkadot" 877 | }, 878 | "rpcs": [ 879 | { 880 | "url": "wss://eden-rpc.dwellir.com" 881 | }, 882 | { 883 | "url": "wss://nodle-parachain.api.onfinality.io/public-ws" 884 | } 885 | ] 886 | }, 887 | { 888 | "name": "Moonsama", 889 | "para_id": 3334, 890 | "relay": { 891 | "id": "polkadot" 892 | }, 893 | "rpcs": [ 894 | { 895 | "url": "wss://rpc.moonsama.com/ws" 896 | } 897 | ] 898 | }, 899 | { 900 | "name": "OriginTrail", 901 | "para_id": 2043, 902 | "relay": { 903 | "id": "polkadot" 904 | }, 905 | "rpcs": [ 906 | { 907 | "url": "wss://origintrail-rpc.dwellir.com" 908 | }, 909 | { 910 | "url": "wss://parachain-rpc.origin-trail.network" 911 | } 912 | ] 913 | }, 914 | { 915 | "name": "Phala", 916 | "para_id": 2035, 917 | "relay": { 918 | "id": "polkadot" 919 | }, 920 | "rpcs": [ 921 | { 922 | "url": "wss://phala-rpc.dwellir.com" 923 | }, 924 | { 925 | "url": "wss://api.phala.network/ws" 926 | } 927 | ] 928 | }, 929 | { 930 | "name": "Polkadex", 931 | "para_id": 2040, 932 | "relay": { 933 | "id": "polkadot" 934 | }, 935 | "rpcs": [ 936 | { 937 | "url": "wss://polkadex-rpc.dwellir.com" 938 | }, 939 | { 940 | "url": "wss://polkadex-parachain.public.curie.radiumblock.co/ws" 941 | } 942 | ] 943 | }, 944 | { 945 | "name": "Polkadot Bridge Hub", 946 | "para_id": 1002, 947 | "relay": { 948 | "id": "polkadot" 949 | }, 950 | "rpcs": [ 951 | { 952 | "url": "wss://polkadot-bridge-hub-rpc.dwellir.com" 953 | }, 954 | { 955 | "url": "wss://polkadot-bridge-hub-rpc-tn.dwellir.com" 956 | }, 957 | { 958 | "url": "wss://sys.ibp.network/bridgehub-polkadot" 959 | }, 960 | { 961 | "url": "wss://sys.dotters.network/bridgehub-polkadot" 962 | }, 963 | { 964 | "url": "wss://rpc-bridge-hub-polkadot.luckyfriday.io" 965 | }, 966 | { 967 | "url": "wss://polkadot-bridge-hub-rpc.polkadot.io" 968 | }, 969 | { 970 | "url": "wss://dot-rpc.stakeworld.io/bridgehub" 971 | } 972 | ] 973 | }, 974 | { 975 | "name": "Parallel", 976 | "para_id": 2012, 977 | "relay": { 978 | "id": "polkadot" 979 | }, 980 | "rpcs": [ 981 | { 982 | "url": "wss://parallel-rpc.dwellir.com" 983 | }, 984 | { 985 | "url": "wss://rpc.parallel.fi" 986 | } 987 | ] 988 | }, 989 | { 990 | "name": "Collectives", 991 | "para_id": 1001, 992 | "relay": { 993 | "id": "polkadot" 994 | }, 995 | "rpcs": [ 996 | { 997 | "url": "wss://polkadot-collectives-rpc.dwellir.com" 998 | }, 999 | { 1000 | "url": "wss://polkadot-collectives-rpc-tn.dwellir.com" 1001 | }, 1002 | { 1003 | "url": "wss://sys.ibp.network/collectives-polkadot" 1004 | }, 1005 | { 1006 | "url": "wss://sys.dotters.network/collectives-polkadot" 1007 | }, 1008 | { 1009 | "url": "wss://rpc-collectives-polkadot.luckyfriday.io" 1010 | }, 1011 | { 1012 | "url": "wss://polkadot-collectives-rpc.polkadot.io" 1013 | }, 1014 | { 1015 | "url": "wss://collectives.public.curie.radiumblock.co/ws" 1016 | }, 1017 | { 1018 | "url": "wss://dot-rpc.stakeworld.io/collectives" 1019 | } 1020 | ] 1021 | }, 1022 | { 1023 | "name": "Pendulum", 1024 | "para_id": 2094, 1025 | "relay": { 1026 | "id": "polkadot" 1027 | }, 1028 | "rpcs": [ 1029 | { 1030 | "url": "wss://pendulum-rpc.dwellir.com" 1031 | }, 1032 | { 1033 | "url": "wss://rpc-pendulum.prd.pendulumchain.tech" 1034 | } 1035 | ] 1036 | }, 1037 | { 1038 | "name": "Polkadot Asset Hub", 1039 | "para_id": 1000, 1040 | "relay": { 1041 | "id": "polkadot" 1042 | }, 1043 | "rpcs": [ 1044 | { 1045 | "url": "wss://statemint-rpc.dwellir.com" 1046 | }, 1047 | { 1048 | "url": "wss://statemint-rpc-tn.dwellir.com" 1049 | }, 1050 | { 1051 | "url": "wss://sys.ibp.network/statemint" 1052 | }, 1053 | { 1054 | "url": "wss://sys.dotters.network/statemint" 1055 | }, 1056 | { 1057 | "url": "wss://rpc-asset-hub-polkadot.luckyfriday.io" 1058 | }, 1059 | { 1060 | "url": "wss://polkadot-asset-hub-rpc.polkadot.io" 1061 | }, 1062 | { 1063 | "url": "wss://statemint.public.curie.radiumblock.co/ws" 1064 | }, 1065 | { 1066 | "url": "wss://dot-rpc.stakeworld.io/assethub" 1067 | } 1068 | ] 1069 | }, 1070 | { 1071 | "name": "Subsocial", 1072 | "para_id": 2101, 1073 | "relay": { 1074 | "id": "polkadot" 1075 | }, 1076 | "rpcs": [ 1077 | { 1078 | "url": "wss://para.subsocial.network" 1079 | }, 1080 | { 1081 | "url": "wss://subsocial-rpc.dwellir.com" 1082 | } 1083 | ] 1084 | }, 1085 | { 1086 | "name": "Watr", 1087 | "para_id": 2058, 1088 | "relay": { 1089 | "id": "polkadot" 1090 | }, 1091 | "rpcs": [ 1092 | { 1093 | "url": "wss://watr-rpc.watr-api.network" 1094 | } 1095 | ] 1096 | }, 1097 | { 1098 | "name": "Zeitgeist", 1099 | "para_id": 2092, 1100 | "relay": { 1101 | "id": "polkadot" 1102 | }, 1103 | "rpcs": [ 1104 | { 1105 | "url": "wss://zeitgeist-rpc.dwellir.com" 1106 | }, 1107 | { 1108 | "url": "wss://main.rpc.zeitgeist.pm/ws" 1109 | } 1110 | ] 1111 | }, 1112 | { 1113 | "name": "Unique", 1114 | "para_id": 2037, 1115 | "relay": { 1116 | "id": "polkadot" 1117 | }, 1118 | "rpcs": [ 1119 | { 1120 | "url": "wss://unique-rpc.dwellir.com" 1121 | }, 1122 | { 1123 | "url": "wss://ws.unique.network" 1124 | }, 1125 | { 1126 | "url": "wss://us-ws.unique.network" 1127 | }, 1128 | { 1129 | "url": "wss://asia-ws.unique.network" 1130 | }, 1131 | { 1132 | "url": "wss://eu-ws.unique.network" 1133 | } 1134 | ] 1135 | }, 1136 | { 1137 | "name": "Equilibrium", 1138 | "para_id": 2011, 1139 | "relay": { 1140 | "id": "polkadot" 1141 | }, 1142 | "rpcs": [ 1143 | { 1144 | "url": "wss://equilibrium-rpc.dwellir.com" 1145 | } 1146 | ] 1147 | }, 1148 | { 1149 | "name": "Interlay", 1150 | "para_id": 2032, 1151 | "relay": { 1152 | "id": "polkadot" 1153 | }, 1154 | "rpcs": [ 1155 | { 1156 | "url": "wss://interlay-rpc.dwellir.com" 1157 | }, 1158 | { 1159 | "url": "wss://api.interlay.io/parachain" 1160 | }, 1161 | { 1162 | "url": "wss://rpc-interlay.luckyfriday.io/" 1163 | } 1164 | ] 1165 | }, 1166 | { 1167 | "name": "Kapex", 1168 | "para_id": 2007, 1169 | "relay": { 1170 | "id": "polkadot" 1171 | }, 1172 | "rpcs": [ 1173 | { 1174 | "url": "wss://kapex-rpc.dwellir.com" 1175 | } 1176 | ] 1177 | }, 1178 | { 1179 | "name": "Frequency", 1180 | "para_id": 2091, 1181 | "relay": { 1182 | "id": "polkadot" 1183 | }, 1184 | "rpcs": [ 1185 | { 1186 | "url": "wss://frequency-rpc.dwellir.com" 1187 | }, 1188 | { 1189 | "url": "wss://0.rpc.frequency.xyz" 1190 | }, 1191 | { 1192 | "url": "wss://1.rpc.frequency.xyz" 1193 | } 1194 | ] 1195 | }, 1196 | { 1197 | "name": "t3rn", 1198 | "para_id": 3333, 1199 | "relay": { 1200 | "id": "polkadot" 1201 | }, 1202 | "rpcs": [ 1203 | { 1204 | "url": "wss://ws.t3rn.io" 1205 | } 1206 | ] 1207 | }, 1208 | { 1209 | "name": "Geminis", 1210 | "para_id": 2038, 1211 | "relay": { 1212 | "id": "polkadot" 1213 | }, 1214 | "rpcs": [ 1215 | { 1216 | "url": "wss://rpc.geminis.network" 1217 | } 1218 | ] 1219 | }, 1220 | { 1221 | "name": "Oak", 1222 | "para_id": 2090, 1223 | "relay": { 1224 | "id": "polkadot" 1225 | }, 1226 | "rpcs": [ 1227 | { 1228 | "url": "wss://rpc.oak.tech" 1229 | } 1230 | ] 1231 | }, 1232 | { 1233 | "name": "SubDAO", 1234 | "para_id": 2018, 1235 | "relay": { 1236 | "id": "polkadot" 1237 | }, 1238 | "rpcs": [ 1239 | { 1240 | "url": "wss://parachain-rpc.subdao.org" 1241 | } 1242 | ] 1243 | }, 1244 | { 1245 | "name": "OmniBTC", 1246 | "para_id": 2053, 1247 | "relay": { 1248 | "id": "polkadot" 1249 | }, 1250 | "rpcs": [ 1251 | { 1252 | "url": "wss://psc-parachain.coming.chat" 1253 | } 1254 | ] 1255 | }, 1256 | { 1257 | "name": "Bitgreen", 1258 | "para_id": 2048, 1259 | "relay": { 1260 | "id": "polkadot" 1261 | }, 1262 | "rpcs": [ 1263 | { 1264 | "url": "wss://mainnet.bitgreen.org" 1265 | } 1266 | ] 1267 | }, 1268 | { 1269 | "name": "Hashed", 1270 | "para_id": 2093, 1271 | "relay": { 1272 | "id": "polkadot" 1273 | }, 1274 | "rpcs": [ 1275 | { 1276 | "url": "wss://c1.hashed.live" 1277 | }, 1278 | { 1279 | "url": "wss://c2.hashed.network" 1280 | }, 1281 | { 1282 | "url": "wss://c3.hashed.live" 1283 | } 1284 | ] 1285 | }, 1286 | { 1287 | "name": "Aventus", 1288 | "para_id": 2056, 1289 | "relay": { 1290 | "id": "polkadot" 1291 | }, 1292 | "rpcs": [ 1293 | { 1294 | "url": "wss://public-rpc.mainnet.aventus.io" 1295 | } 1296 | ] 1297 | } 1298 | ] 1299 | --------------------------------------------------------------------------------