├── .zed └── settings.json ├── .envrc ├── docker ├── secrets │ └── .keep ├── xtask.dockerfile └── node.dockerfile ├── rustfmt.toml ├── pallets ├── torus0 │ ├── src │ │ ├── migrations.rs │ │ ├── ext.rs │ │ ├── fee.rs │ │ ├── burn.rs │ │ ├── benchmarking.rs │ │ └── stake.rs │ ├── rpc │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ ├── api │ │ ├── src │ │ │ └── api.rs │ │ └── Cargo.toml │ ├── Cargo.toml │ └── tests │ │ └── burn.rs ├── emission0 │ ├── src │ │ ├── migrations.rs │ │ ├── ext.rs │ │ ├── benchmarking.rs │ │ └── weight_control.rs │ ├── api │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ ├── Cargo.toml │ └── tests │ │ └── weights.rs ├── governance │ ├── src │ │ ├── migrations.rs │ │ ├── ext.rs │ │ ├── whitelist.rs │ │ ├── config.rs │ │ ├── roles.rs │ │ └── voting.rs │ ├── api │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ ├── tests │ │ ├── delegation.rs │ │ └── config.rs │ └── Cargo.toml ├── permission0 │ ├── src │ │ ├── migrations.rs │ │ └── permission │ │ │ ├── wallet.rs │ │ │ ├── namespace.rs │ │ │ └── curator.rs │ ├── rpc │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ ├── api │ │ └── Cargo.toml │ ├── Cargo.toml │ └── tests │ │ └── curator.rs └── faucet │ ├── src │ └── ext.rs │ └── Cargo.toml ├── .cargo └── config.toml ├── client ├── src │ ├── interfaces.rs │ ├── chain.rs │ ├── utils.rs │ ├── error.rs │ ├── rpc.rs │ ├── lib.rs │ ├── events.rs │ └── client.rs ├── examples │ ├── fetching_storage.rs │ ├── subscribing_event.rs │ ├── subscribing_all_events.rs │ ├── iterating_storage.rs │ ├── calling_extrinsic.rs │ └── calling_rpc.rs ├── build.rs ├── codegen │ ├── Cargo.toml │ └── src │ │ ├── ir.rs │ │ ├── parser │ │ ├── mod.rs │ │ └── calls.rs │ │ ├── codegen │ │ ├── calls.rs │ │ └── mod.rs │ │ └── lib.rs ├── Cargo.toml └── README.md ├── .env.sample ├── data ├── torus-genesis-balances.json.sha256.txt ├── testnet │ ├── aura.pub.json │ ├── gran.pub.json │ └── bootnodes.json └── mainnet │ ├── aura.pub.json │ ├── gran.pub.json │ └── bootnodes.json ├── .vscode ├── extensions.json └── settings.json ├── .github ├── CODEOWNERS ├── pull_request_template.md └── workflows │ ├── build-docs.yml │ ├── build-docker-xtask.yml │ ├── build-runtime.yml │ ├── build-docker-node.yml │ └── check.yml ├── .dockerignore ├── .devcontainer └── devcontainer.json ├── .helix └── languages.toml ├── node ├── src │ ├── main.rs │ ├── lib.rs │ ├── chain_spec.rs │ ├── cli.rs │ └── rpc.rs ├── build.rs └── Cargo.toml ├── rust-toolchain.toml ├── .gitignore ├── runtime ├── src │ ├── precompiles │ │ ├── solidity │ │ │ └── balanceTransfer.abi │ │ ├── balance_transfer.rs │ │ └── mod.rs │ └── benchmarks.rs ├── build.rs └── Cargo.toml ├── mcp ├── README.md ├── Cargo.toml └── src │ ├── utils.rs │ ├── weights.rs │ ├── balance.rs │ ├── consensus.rs │ └── emission.rs ├── xtask ├── Cargo.toml └── src │ └── main.rs ├── project-selector └── Cargo.toml ├── test-utils └── Cargo.toml ├── docker-compose.yml ├── scripts ├── diff-previous-tag.sh └── adjust-spec-file.py ├── LICENSE ├── docs ├── nix.md ├── deploy.md └── changelog_prompt.md ├── justfile ├── flake.nix ├── flake.lock └── .maintain └── frame-weight-template.hbs /.zed/settings.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /docker/secrets/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 100 2 | -------------------------------------------------------------------------------- /pallets/torus0/src/migrations.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /pallets/emission0/src/migrations.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /pallets/governance/src/migrations.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /pallets/permission0/src/migrations.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | xtask = "run --package xtask --" 3 | -------------------------------------------------------------------------------- /client/src/interfaces.rs: -------------------------------------------------------------------------------- 1 | include!(concat!(env!("OUT_DIR"), "/interfaces.rs")); 2 | -------------------------------------------------------------------------------- /.env.sample: -------------------------------------------------------------------------------- 1 | GITHUB_TOKEN= 2 | 3 | AWS_ACCESS_KEY_ID= 4 | AWS_SECRET_ACCESS_KEY= 5 | SCCACHE_ENDPOINT= 6 | -------------------------------------------------------------------------------- /data/torus-genesis-balances.json.sha256.txt: -------------------------------------------------------------------------------- 1 | 934a1055effc9d0a7e3f736bbac6668453ed1959c7f075f78803186003a1ad52 torus-genesis-balances.json 2 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "streetsidesoftware.code-spell-checker", 4 | "rust-lang.rust-analyzer", 5 | "ryanluker.vscode-coverage-gutters" 6 | ] 7 | } -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @functor-flow @saiintbrisson @steinerkelvin @devwckd 2 | 3 | /.github/workflows/* @steinerkelvin @daviptrs @saiintbrisson 4 | /docker/**/* @steinerkelvin @daviptrs @saiintbrisson 5 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Env files 2 | .env 3 | 4 | # direnv / Nix 5 | /.direnv/ 6 | 7 | # Docker 8 | /docker/secrets/ 9 | /docker/volumes/ 10 | 11 | # Rust 12 | /target/ 13 | 14 | # Git Hooks 15 | .pre-commit-config.yaml 16 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "image": "mcr.microsoft.com/devcontainers/universal:2", 3 | "features": { 4 | "ghcr.io/devcontainers/features/nix:1": { 5 | "flake": "github:nixos/nixpkgs/nixpkgs-unstable#direnv" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /data/testnet/aura.pub.json: -------------------------------------------------------------------------------- 1 | [ 2 | "5HgqdQWjhW6DNAwFuR9QyXPT2X8sMcPbfkqhhDMcZTtzrg6G", 3 | "5CZg4dpW13JvjMo61xEgrYDkVW4R9i4HYMpLEpaa9CMfHfmt", 4 | "5Gut8yb1HHw4ro9BEH3o44XgVDgXkJTWRNxEp4a1bvGr9tws", 5 | "5HKQ3mwPR7oppkb957YyTBRMR4iLVjF27mdPx7a9EkBBinFN" 6 | ] 7 | -------------------------------------------------------------------------------- /.helix/languages.toml: -------------------------------------------------------------------------------- 1 | [language-server.rust-analyzer.config] 2 | cargo.extraEnv = { SKIP_WASM_BUILD = "true" } 3 | cargo.features = ["runtime-benchmarks"] 4 | check.extraEnv = { SKIP_WASM_BUILD = "true" } 5 | check.overrideCommand = ["cargo", "check", "--message-format=json"] 6 | -------------------------------------------------------------------------------- /node/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::result_large_err)] 2 | //! The Torus node implementation. 3 | 4 | mod chain_spec; 5 | mod cli; 6 | mod command; 7 | mod rpc; 8 | mod service; 9 | 10 | fn main() -> polkadot_sdk::sc_cli::Result<()> { 11 | command::run() 12 | } 13 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.88.0" 3 | components = [ 4 | "clippy", 5 | "rustfmt", 6 | "rust-src", 7 | "rust-analyzer", 8 | "llvm-tools-preview", 9 | ] 10 | targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] 11 | profile = "minimal" 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local files 2 | *.local.* 3 | 4 | # Temporary files 5 | /tmp/ 6 | 7 | # Env files 8 | .env 9 | 10 | # direnv / Nix 11 | /.direnv/ 12 | 13 | # Docker 14 | /docker/secrets/ 15 | /docker/volumes/ 16 | 17 | # Rust 18 | **/target/ 19 | 20 | # Git Hooks 21 | .pre-commit-config.yaml 22 | 23 | rust-project.json 24 | -------------------------------------------------------------------------------- /pallets/faucet/src/ext.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::frame_support::traits::Currency; 2 | 3 | pub(super) type BalanceOf = <::Currency as Currency< 4 | ::AccountId, 5 | >>::Balance; 6 | 7 | pub(super) type AccountIdOf = ::AccountId; 8 | -------------------------------------------------------------------------------- /pallets/torus0/src/ext.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::frame_support::traits::Currency; 2 | 3 | pub(super) type BalanceOf = <::Currency as Currency< 4 | ::AccountId, 5 | >>::Balance; 6 | 7 | pub(super) type AccountIdOf = ::AccountId; 8 | -------------------------------------------------------------------------------- /pallets/governance/src/ext.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::frame_support::traits::Currency; 2 | 3 | pub(super) type BalanceOf = <::Currency as Currency< 4 | ::AccountId, 5 | >>::Balance; 6 | 7 | pub(super) type AccountIdOf = ::AccountId; 8 | -------------------------------------------------------------------------------- /data/testnet/gran.pub.json: -------------------------------------------------------------------------------- 1 | [ 2 | [ 3 | "5CwLdPUxBiCUCVZ3w5dvhWztXEZ58ijXG7jD6F3QY8SfZwj1", 4 | 1 5 | ], 6 | [ 7 | "5FuTB4GLFdxX2Ee8Wz69qA6M8GtHzxy6jGVLQoDJXTRKAv4o", 8 | 1 9 | ], 10 | [ 11 | "5CUnFDtwqd2feHwqGuojjQX4z9eBnJqZBdWo1xY8b26rEUyN", 12 | 1 13 | ], 14 | [ 15 | "5D2LyEr5wvkWrVAWrM3ThpipxPDDdN6deaweLjMKitrBH9BT", 16 | 1 17 | ] 18 | ] 19 | -------------------------------------------------------------------------------- /runtime/src/precompiles/solidity/balanceTransfer.abi: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "inputs": [ 4 | { 5 | "internalType": "bytes32", 6 | "name": "data", 7 | "type": "bytes32" 8 | } 9 | ], 10 | "name": "transfer", 11 | "outputs": [], 12 | "stateMutability": "payable", 13 | "type": "function" 14 | } 15 | ] 16 | -------------------------------------------------------------------------------- /pallets/torus0/rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-torus0-rpc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [lints] 7 | workspace = true 8 | 9 | [dependencies] 10 | async-trait.workspace = true 11 | jsonrpsee.workspace = true 12 | pallet-torus0-api = { workspace = true, features = ["std"] } 13 | polkadot-sdk = { workspace = true, features = ["sp-api", "sp-runtime", "std"] } 14 | torus-runtime = { workspace = true, features = ["std"] } 15 | -------------------------------------------------------------------------------- /client/examples/fetching_storage.rs: -------------------------------------------------------------------------------- 1 | use torus_client::client::TorusClient; 2 | 3 | #[tokio::main] 4 | pub async fn main() { 5 | let client = TorusClient::for_mainnet().await.unwrap(); 6 | 7 | let pending_emission = client 8 | .emission0() 9 | .storage() 10 | .pending_emission() 11 | .await 12 | .unwrap() 13 | .unwrap_or(0); 14 | 15 | println!("pending emission: {pending_emission}"); 16 | } 17 | -------------------------------------------------------------------------------- /pallets/permission0/rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-permission0-rpc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [lints] 7 | workspace = true 8 | 9 | [dependencies] 10 | async-trait.workspace = true 11 | jsonrpsee.workspace = true 12 | pallet-permission0-api = { workspace = true, features = ["std"] } 13 | polkadot-sdk = { workspace = true, features = ["sp-api", "sp-runtime", "std"] } 14 | torus-runtime = { workspace = true, features = ["std"] } 15 | -------------------------------------------------------------------------------- /pallets/emission0/src/ext.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::{frame_support::traits::Currency, frame_system}; 2 | 3 | pub type BalanceOf = 4 | <::Currency as Currency<::AccountId>>::Balance; 5 | 6 | pub type AccountIdOf = ::AccountId; 7 | 8 | pub type NegativeImbalanceOf = <::Currency as Currency< 9 | ::AccountId, 10 | >>::NegativeImbalance; 11 | -------------------------------------------------------------------------------- /runtime/src/benchmarks.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "runtime-benchmarks")] 2 | 3 | polkadot_sdk::frame_benchmarking::define_benchmarks!( 4 | [frame_benchmarking, BaselineBench::] 5 | [frame_system, SystemBench::] 6 | [pallet_balances, Balances] 7 | [pallet_timestamp, Timestamp] 8 | [pallet_sudo, Sudo] 9 | [pallet_emission0, Emission0] 10 | [pallet_governance, Governance] 11 | [pallet_torus0, Torus0] 12 | [pallet_permission0, Permission0] 13 | ); 14 | -------------------------------------------------------------------------------- /client/src/chain.rs: -------------------------------------------------------------------------------- 1 | pub trait Chain: Clone {} 2 | 3 | #[cfg(feature = "mainnet")] 4 | #[derive(Clone)] 5 | pub struct MainNet {} 6 | 7 | #[cfg(feature = "mainnet")] 8 | impl Chain for MainNet {} 9 | 10 | #[cfg(feature = "testnet")] 11 | #[derive(Clone)] 12 | pub struct TestNet {} 13 | 14 | #[cfg(feature = "testnet")] 15 | impl Chain for TestNet {} 16 | 17 | #[cfg(feature = "devnet")] 18 | #[derive(Clone)] 19 | pub struct DevNet {} 20 | 21 | #[cfg(feature = "devnet")] 22 | impl Chain for DevNet {} 23 | -------------------------------------------------------------------------------- /data/testnet/bootnodes.json: -------------------------------------------------------------------------------- 1 | [ 2 | "/dns4/validator-0.nodes.testnet.torus.network/tcp/30333/p2p/12D3KooWMxgfPZQ7AmTnSKbPmBrQuJ9UXwsXP98g578MdJFy5cHm", 3 | "/dns4/validator-1.nodes.testnet.torus.network/tcp/30333/p2p/12D3KooWRsZYrkFuGxTbZbEnH2A2KDT3dKAcx6EUBCTykEpvKvpN", 4 | "/dns4/validator-2.nodes.testnet.torus.network/tcp/30333/p2p/12D3KooWP8PQjwAGPGtm6QRcwNhMuv4PDNHUKp8rD6gZfgdpAByx", 5 | "/dns4/validator-3.nodes.testnet.torus.network/tcp/30333/p2p/12D3KooWKCdLiV4gapVCtbkTNxCboMR1Mg1YvjbM8ZNKDpxQRvPB" 6 | ] 7 | -------------------------------------------------------------------------------- /client/examples/subscribing_event.rs: -------------------------------------------------------------------------------- 1 | use futures::StreamExt; 2 | use torus_client::{client::TorusClient, interfaces::mainnet::api::torus0::events::StakeAdded}; 3 | 4 | #[tokio::main] 5 | pub async fn main() { 6 | let client = TorusClient::for_mainnet().await.unwrap(); 7 | 8 | let mut events = client.events().subscribe::().await.unwrap(); 9 | 10 | while let Some(Ok(event)) = events.next().await { 11 | println!("{} added {} stake to {}", event.0, event.2, event.1); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /client/examples/subscribing_all_events.rs: -------------------------------------------------------------------------------- 1 | use futures::StreamExt; 2 | use torus_client::client::TorusClient; 3 | 4 | #[tokio::main] 5 | pub async fn main() { 6 | let client = TorusClient::for_mainnet().await.unwrap(); 7 | 8 | let mut events = client.events().subscribe_unfiltered().await.unwrap(); 9 | 10 | while let Some(Ok(event)) = events.next().await { 11 | println!( 12 | "event received {}::{}", 13 | event.pallet_name(), 14 | event.variant_name() 15 | ); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /pallets/emission0/api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-emission0-api" 3 | description = "Torus emission pallet API." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = ["polkadot-sdk/std"] 15 | runtime-benchmarks = ["polkadot-sdk/runtime-benchmarks"] 16 | try-runtime = ["polkadot-sdk/try-runtime"] 17 | 18 | [dependencies] 19 | polkadot-sdk = { workspace = true, features = ["sp-runtime"] } 20 | -------------------------------------------------------------------------------- /pallets/governance/api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-governance-api" 3 | description = "Torus governance pallet api." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = ["polkadot-sdk/std"] 15 | runtime-benchmarks = ["polkadot-sdk/runtime-benchmarks"] 16 | try-runtime = ["polkadot-sdk/try-runtime"] 17 | 18 | [dependencies] 19 | polkadot-sdk = { workspace = true, features = ["sp-runtime"] } 20 | -------------------------------------------------------------------------------- /mcp/README.md: -------------------------------------------------------------------------------- 1 | # torus-mcp 2 | 3 | A simple MCP that interacts with the torus network. 4 | The MCP is currently used internally for tests on top of a premade set of dev accounts. 5 | 6 | ## How to use 7 | 8 | Build it 9 | 10 | ``` 11 | # testnet 12 | just build-testnet-mcp 13 | # devnet (must have a devnet running locally on the port 9944) 14 | just build-devnet-mcp 15 | ``` 16 | 17 | Move the output so it doesn't get lost with the project (Optional) 18 | 19 | Install it and be happy! (e.g `claude mcp add torus-mcp /local/to/the/executable`) 20 | ``` 21 | -------------------------------------------------------------------------------- /pallets/torus0/api/src/api.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::multiple_bound_locations)] 2 | 3 | use codec::{Decode, Encode}; 4 | use polkadot_sdk::sp_runtime::DispatchError; 5 | 6 | polkadot_sdk::sp_api::decl_runtime_apis! { 7 | /// RPC related to Torus0. 8 | pub trait Torus0RuntimeApi { 9 | /// Calculates the total creation cost of a namespace: (Fee, Deposit). 10 | fn namespace_path_creation_cost(account_id: AccountId, path: crate::NamespacePathInner) -> Result<(Balance, Balance), DispatchError>; 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Pull Request Checklist 2 | 3 | Before submitting this PR, please make sure: 4 | 5 | - [ ] You have run `cargo clippy` and addressed any warnings 6 | - [ ] You have added appropriate tests (if applicable) 7 | - [ ] You have updated the documentation (if applicable) 8 | - [ ] You have reviewed your own code 9 | - [ ] You have updated changelog (if applicable) 10 | 11 | ## Description 12 | 13 | Please provide a brief description of the changes in this PR. 14 | 15 | ## Related Issues 16 | 17 | Please link any related issues here 18 | -------------------------------------------------------------------------------- /client/examples/iterating_storage.rs: -------------------------------------------------------------------------------- 1 | use futures::StreamExt; 2 | use torus_client::client::TorusClient; 3 | 4 | #[tokio::main] 5 | pub async fn main() { 6 | let client = TorusClient::for_mainnet().await.unwrap(); 7 | 8 | let total_stake: u128 = client 9 | .torus0() 10 | .storage() 11 | .staking_to_iter() 12 | .await 13 | .unwrap() 14 | .fold(0u128, |value, entry| async move { 15 | let ((_, _), stake) = entry.unwrap(); 16 | value + stake 17 | }) 18 | .await; 19 | 20 | println!("torus total stake: {total_stake}"); 21 | } 22 | -------------------------------------------------------------------------------- /client/src/utils.rs: -------------------------------------------------------------------------------- 1 | /// Utility to convert a hex string to bytes 2 | pub fn hex_to_bytes(hex: &str) -> Result, hex::FromHexError> { 3 | let hex = hex.trim_start_matches("0x"); 4 | hex::decode(hex) 5 | } 6 | 7 | /// Utility to convert bytes to a hex string 8 | pub fn bytes_to_hex(bytes: &[u8]) -> String { 9 | format!("0x{}", hex::encode(bytes)) 10 | } 11 | 12 | const ONE_TORUS: u128 = 10u128.pow(18); 13 | 14 | /// Utility to convert a floating number representing tokens to the chain-ready fixed u128 15 | pub fn to_torus(tokens: f64) -> u128 { 16 | (tokens.abs() * ONE_TORUS as f64) as u128 17 | } 18 | -------------------------------------------------------------------------------- /client/build.rs: -------------------------------------------------------------------------------- 1 | use std::{error::Error, path::PathBuf}; 2 | 3 | use torus_client_codegen::generate_interfaces; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<(), Box> { 7 | let output_file = PathBuf::from(format!( 8 | "{}/interfaces.rs", 9 | std::env::var("OUT_DIR").unwrap() 10 | )); 11 | 12 | let devnet_url = if std::env::var("CARGO_FEATURE_DEVNET").is_ok() { 13 | Some(std::env::var("DEVNET_URL").unwrap_or("ws://127.0.0.1:9944".to_string())) 14 | } else { 15 | None 16 | }; 17 | 18 | generate_interfaces(&output_file, devnet_url).await?; 19 | 20 | Ok(()) 21 | } 22 | -------------------------------------------------------------------------------- /pallets/emission0/api/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | use polkadot_sdk::frame_support::dispatch::DispatchResult; 4 | 5 | #[derive(Default)] 6 | pub struct ConsensusMemberStats { 7 | pub incentives: u16, 8 | pub dividends: u16, 9 | } 10 | 11 | pub trait Emission0Api { 12 | /// Fetches stats emitted by the consensus for an agent. 13 | /// Returns `None` if the agent has not taken part in the last consensus 14 | /// run. 15 | fn consensus_stats(member: &AccountId) -> Option; 16 | 17 | fn delegate_weight_control(delegator: &AccountId, delegatee: &AccountId) -> DispatchResult; 18 | } 19 | -------------------------------------------------------------------------------- /xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | xflags = "0.3.2" 8 | hex = "0.4.3" 9 | hex-literal = "0.4.1" 10 | tokio = { version = "1.0", features = ["full"] } 11 | parity-scale-codec = { version = "3.6.1", features = ["derive"] } 12 | polkadot-sdk = { workspace = true, features = [ 13 | "std", 14 | "sp-keyring", 15 | "sp-core", 16 | "sp-runtime", 17 | "sc-service", 18 | "sc-client-api", 19 | "frame-remote-externalities", 20 | "sp-crypto-hashing", 21 | ] } 22 | tempfile = "3.14.0" 23 | serde_json = { version = "1", features = ["arbitrary_precision"] } 24 | -------------------------------------------------------------------------------- /client/src/error.rs: -------------------------------------------------------------------------------- 1 | pub type Result = std::result::Result; 2 | 3 | #[derive(Debug, thiserror::Error)] 4 | pub enum Error { 5 | #[error(transparent)] 6 | Call(#[from] CallError), 7 | #[error(transparent)] 8 | SubxtError(#[from] subxt::error::Error), 9 | #[error(transparent)] 10 | CodecError(#[from] codec::Error), 11 | #[error(transparent)] 12 | Rpc(#[from] subxt::ext::subxt_rpcs::Error), 13 | } 14 | 15 | #[derive(Debug, thiserror::Error)] 16 | pub enum CallError { 17 | #[error("{0}")] 18 | Dropped(String), 19 | #[error("{0}")] 20 | Failed(String), 21 | #[error("{0}")] 22 | Invalid(String), 23 | } 24 | -------------------------------------------------------------------------------- /mcp/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torus-mcp" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [features] 7 | default = ["testnet"] 8 | 9 | testnet = ["torus-client/testnet"] 10 | devnet = ["torus-client/devnet"] 11 | 12 | [dependencies] 13 | lazy_static = { workspace = true } 14 | rmcp = { workspace = true, features = ["macros", "transport-io"] } 15 | torus-client = { path = "../client" } 16 | schemars = { workspace = true } 17 | serde = { workspace = true } 18 | serde_json = { workspace = true } 19 | tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } 20 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 21 | tracing = { workspace = true } 22 | -------------------------------------------------------------------------------- /client/examples/calling_extrinsic.rs: -------------------------------------------------------------------------------- 1 | use subxt_signer::ecdsa::dev::alice; 2 | use torus_client::client::TorusClient; 3 | 4 | #[tokio::main] 5 | pub async fn main() { 6 | let signer = alice(); // change it to your signer 7 | let name = "alice agent".as_bytes().to_vec(); 8 | let url = "url".as_bytes().to_vec(); 9 | let metadata = "metadata".as_bytes().to_vec(); 10 | 11 | let client = TorusClient::for_mainnet().await.unwrap(); 12 | 13 | if let Err(err) = client 14 | .torus0() 15 | .calls() 16 | .register_agent_wait(name, url, metadata, signer) 17 | .await 18 | { 19 | print!("could not register agent: {err}"); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /client/examples/calling_rpc.rs: -------------------------------------------------------------------------------- 1 | use subxt_signer::ecdsa::dev::alice; 2 | use torus_client::{chain::MainNet, client::TorusClient}; 3 | 4 | #[tokio::main] 5 | pub async fn main() { 6 | let client = TorusClient::for_url::("wss://api-30.nodes.torus.network") 7 | .await 8 | .unwrap(); 9 | 10 | let account = alice().public_key().to_account_id(); 11 | let path = "agent.test.namespace"; 12 | let (fee, deposit) = client 13 | .rpc() 14 | .namespace_path_creation_cost(account, path) 15 | .await 16 | .unwrap(); 17 | 18 | println!( 19 | "namespace path `agent.test.namespace` creation cost: {}", 20 | fee + deposit 21 | ); 22 | } 23 | -------------------------------------------------------------------------------- /project-selector/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "project-selector" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | edition.workspace = true 6 | 7 | [dependencies] 8 | camino = "1.1.9" 9 | cargo_metadata = "0.20.0" 10 | project-model = { git = "https://github.com/rust-lang/rust-analyzer", version = "0.0.0", rev = "95d5091a09f1ae92cfa59c1b8ccee1dd45053883" } 11 | toolchain = { git = "https://github.com/rust-lang/rust-analyzer", version = "0.0.0", rev = "95d5091a09f1ae92cfa59c1b8ccee1dd45053883" } 12 | serde = { version = "1", features = ["std", "derive"] } 13 | serde_json = { version = "1", features = ["std"] } 14 | itertools = "0.14.0" 15 | clap.workspace = true 16 | anyhow = "1.0.98" 17 | glob = "0.3.2" 18 | -------------------------------------------------------------------------------- /test-utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-utils" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | edition.workspace = true 6 | 7 | [features] 8 | default = ["std"] 9 | std = [] 10 | 11 | [dependencies] 12 | codec.workspace = true 13 | scale-info.workspace = true 14 | polkadot-sdk = { workspace = true, features = [ 15 | "std", 16 | "runtime", 17 | "pallet-balances", 18 | ] } 19 | 20 | pallet-governance.workspace = true 21 | pallet-torus0.workspace = true 22 | pallet-emission0.workspace = true 23 | pallet-permission0.workspace = true 24 | pallet-faucet.workspace = true 25 | 26 | pallet-torus0-api.workspace = true 27 | pallet-governance-api.workspace = true 28 | pallet-permission0-api.workspace = true 29 | -------------------------------------------------------------------------------- /pallets/torus0/api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-torus0-api" 3 | description = "Torus initial pallet API." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = ["codec/std", "polkadot-sdk/std", "scale-info/std"] 15 | runtime-benchmarks = ["polkadot-sdk/runtime-benchmarks"] 16 | try-runtime = ["polkadot-sdk/try-runtime"] 17 | 18 | [dependencies] 19 | codec = { workspace = true, features = ["derive"] } 20 | scale-info = { workspace = true, features = ["derive"] } 21 | polkadot-sdk = { workspace = true, features = [ 22 | "sp-api", 23 | "sp-core", 24 | "sp-runtime", 25 | "sp-std", 26 | ] } 27 | -------------------------------------------------------------------------------- /client/codegen/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torus-client-codegen" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | syn = { version = "2.0", features = ["full", "parsing"] } 8 | quote = "1.0" 9 | proc-macro2 = "1.0" 10 | prettyplease = "0.2" 11 | thiserror = "1.0" 12 | clap = { version = "4.0", features = ["derive"] } 13 | hex = "0.4.3" 14 | subxt = { version = "0.42.1" } 15 | subxt-codegen = { version = "0.42.1" } 16 | subxt-utils-fetchmetadata = { version = "0.42.1", features = ["url"] } 17 | codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } 18 | stringcase = "0.4.0" 19 | paste = "1.0.15" 20 | tokio = { version = "1.46.1", features = ["macros", "rt-multi-thread"] } 21 | scale-typegen = "0.11.1" 22 | -------------------------------------------------------------------------------- /pallets/governance/api/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | use polkadot_sdk::{frame_support::dispatch::DispatchResult, sp_runtime::Percent}; 4 | 5 | pub trait GovernanceApi { 6 | fn dao_treasury_address() -> AccountId; 7 | 8 | fn treasury_emission_fee() -> Percent; 9 | 10 | fn is_whitelisted(key: &AccountId) -> bool; 11 | 12 | fn ensure_allocator(key: &AccountId) -> DispatchResult; 13 | 14 | fn get_allocators() -> impl Iterator; 15 | 16 | fn set_allocator(key: &AccountId); 17 | 18 | fn can_create_namespace(key: &AccountId) -> bool; 19 | 20 | fn can_register_agent(key: &AccountId) -> bool; 21 | 22 | #[doc(hidden)] 23 | #[cfg(feature = "runtime-benchmarks")] 24 | fn force_set_whitelisted(key: &AccountId); 25 | } 26 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | torus-node: 3 | container_name: torus-node 4 | image: ghcr.io/renlabs-dev/torus-substrate:latest 5 | volumes: 6 | - ./docker/volumes/node-data:/node-data 7 | ports: 8 | - 30333:30333 # P2P 9 | - 9944:9944 # RPC 10 | restart: always 11 | entrypoint: ["torus-node"] 12 | command: 13 | - "--base-path" 14 | - "/node-data" 15 | - "--chain" 16 | - "mainnet" 17 | - "--rpc-external" 18 | - "--rpc-cors=all" 19 | - "--rpc-max-response-size" 20 | - "100" 21 | - "--rpc-max-connections" 22 | - "5000" 23 | - "--port" 24 | - "30333" 25 | - "--rpc-port" 26 | - "9944" 27 | - "--telemetry-url" 28 | - "ws://telemetry.torus.network:8001/submit 0" 29 | - "--sync=warp" 30 | -------------------------------------------------------------------------------- /docker/xtask.dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.76-slim-bullseye as builder 2 | 3 | WORKDIR /usr/src/app 4 | 5 | RUN apt-get update && apt install -y --no-install-recommends \ 6 | ca-certificates \ 7 | curl \ 8 | build-essential \ 9 | protobuf-compiler \ 10 | libclang-dev \ 11 | git \ 12 | pkg-config \ 13 | libssl-dev 14 | 15 | COPY . . 16 | 17 | RUN cargo build --release --bin xtask 18 | 19 | FROM debian:bullseye-slim 20 | 21 | WORKDIR /app 22 | 23 | RUN apt-get update && apt install -y --no-install-recommends \ 24 | ca-certificates \ 25 | curl \ 26 | build-essential \ 27 | protobuf-compiler \ 28 | libclang-dev \ 29 | git \ 30 | pkg-config \ 31 | libssl-dev 32 | 33 | COPY --from=builder /usr/src/app/target/release/xtask /usr/local/bin 34 | 35 | ENTRYPOINT ["xtask"] 36 | -------------------------------------------------------------------------------- /node/src/lib.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Polkadot Sdk. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | pub mod chain_spec; 19 | pub(crate) mod cli; 20 | pub mod rpc; 21 | pub mod service; 22 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | // Editor 3 | "editor.formatOnSave": true, 4 | // Rust 5 | "rust-analyzer.cargo.extraEnv": { 6 | "SKIP_WASM_BUILD": "1" 7 | }, 8 | "rust-analyzer.check.extraEnv": { 9 | "SKIP_WASM_BUILD": "1" 10 | }, 11 | // "rust-analyzer.cargo.features": ["runtime-benchmarks", "testnet"], 12 | "rust-analyzer.check.overrideCommand": [ 13 | "cargo", 14 | "check", 15 | "--message-format=json" 16 | ], 17 | "coverage-gutters.coverageFileNames": ["target/cov.xml"], 18 | // Spell checker 19 | "cSpell.words": [ 20 | "alice", 21 | "buildx", 22 | "codegen", 23 | "devcontainers", 24 | "extrinsics", 25 | "irongut", 26 | "jwalton", 27 | "mainnet", 28 | "nocapture", 29 | "presign", 30 | "println", 31 | "subxt", 32 | "Swatinem", 33 | "wbuild" 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /mcp/src/utils.rs: -------------------------------------------------------------------------------- 1 | use rmcp::ErrorData; 2 | use torus_client::{subxt::utils::AccountId32, subxt_signer::sr25519::Keypair}; 3 | 4 | use crate::ACCOUNTS; 5 | 6 | pub fn keypair_from_name(name: impl AsRef) -> Result { 7 | let name = name.as_ref().to_lowercase(); 8 | ACCOUNTS 9 | .get(&name) 10 | .ok_or_else(|| { 11 | ErrorData::invalid_request(format!("{name} is not a valid account name."), None) 12 | }) 13 | .cloned() 14 | } 15 | 16 | pub fn name_or_key(account_id: &AccountId32) -> String { 17 | ACCOUNTS 18 | .iter() 19 | .find_map(|(name, keypair)| { 20 | if &keypair.public_key().to_account_id() == account_id { 21 | Some(name.to_string()) 22 | } else { 23 | None 24 | } 25 | }) 26 | .unwrap_or_else(|| account_id.to_string()) 27 | } 28 | -------------------------------------------------------------------------------- /scripts/diff-previous-tag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Path to runtime lib 5 | RUNTIME_LIB="runtime/src/lib.rs" 6 | 7 | spec_version=$(grep -Eo "spec_version:[[:space:]]+[0-9]+" "$RUNTIME_LIB" | cut -f2 -d' ') 8 | prev=$((spec_version - 1)) 9 | 10 | echo "Current spec_version: $spec_version" 11 | echo "Looking for previous version: $prev" 12 | 13 | tags=("runtime/mainnet-$prev" "runtime/testnet-$prev") 14 | 15 | found="" 16 | for tag in "${tags[@]}"; do 17 | if git rev-parse --verify --quiet "$tag" > /dev/null; then 18 | found=$tag 19 | break 20 | fi 21 | done 22 | 23 | if [[ -n "$found" ]]; then 24 | echo "Found previous tag: $found" 25 | else 26 | echo "Error: no previous runtime tag found for version $prev" >&2 27 | exit 1 28 | fi 29 | 30 | git --no-pager diff -U5 "$found"..HEAD -- ':!*.json' ':!*.lock' ':!**/tests/*.rs' ':!project-selector' ':!**/weights.rs' 31 | -------------------------------------------------------------------------------- /node/build.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | use polkadot_sdk::substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; 19 | 20 | fn main() { 21 | generate_cargo_keys(); 22 | rerun_if_git_head_changed(); 23 | } 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright 2024 Renlabs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 6 | software and associated documentation files (the "Software"), to deal in the Software 7 | without restriction, including without limitation the rights to use, copy, modify, 8 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 9 | permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 12 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 13 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 14 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 15 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 16 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | -------------------------------------------------------------------------------- /pallets/permission0/api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-permission0-api" 3 | version = "0.1.0" 4 | description = "API for permission0 pallet" 5 | authors.workspace = true 6 | edition.workspace = true 7 | license = "MIT-0" 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = [ 15 | "codec/std", 16 | "polkadot-sdk/std", 17 | "scale-info/std", 18 | "pallet-torus0-api/std", 19 | ] 20 | runtime-benchmarks = [ 21 | "polkadot-sdk/runtime-benchmarks", 22 | "pallet-torus0-api/runtime-benchmarks", 23 | ] 24 | try-runtime = ["polkadot-sdk/try-runtime"] 25 | 26 | [dependencies] 27 | codec = { workspace = true, features = ["derive"] } 28 | scale-info = { workspace = true, features = ["derive"] } 29 | polkadot-sdk = { workspace = true, features = ["experimental", "runtime"] } 30 | pallet-torus0-api = { workspace = true, default-features = false } 31 | bitflags = { workspace = true, default-features = false } 32 | -------------------------------------------------------------------------------- /runtime/build.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | fn main() { 19 | #[cfg(feature = "std")] 20 | { 21 | substrate_wasm_builder::WasmBuilder::init_with_defaults() 22 | .enable_metadata_hash("TORUS", 18) 23 | .build(); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /pallets/governance/tests/delegation.rs: -------------------------------------------------------------------------------- 1 | use pallet_governance::NotDelegatingVotingPower; 2 | use polkadot_sdk::frame_support::assert_ok; 3 | use test_utils::{Test, get_origin, new_test_ext}; 4 | 5 | #[test] 6 | fn delegates_voting_power_correctly() { 7 | new_test_ext().execute_with(|| { 8 | const MODULE: u32 = 0; 9 | 10 | assert_ok!(pallet_governance::Pallet::::enable_vote_delegation( 11 | get_origin(MODULE) 12 | )); 13 | 14 | assert!(!NotDelegatingVotingPower::::get().contains(&MODULE)); 15 | }); 16 | } 17 | 18 | #[test] 19 | fn disable_voting_power_delegation_correctly() { 20 | new_test_ext().execute_with(|| { 21 | const MODULE: u32 = 0; 22 | 23 | assert_ok!(pallet_governance::Pallet::::enable_vote_delegation( 24 | get_origin(MODULE) 25 | )); 26 | 27 | assert_ok!(pallet_governance::Pallet::::disable_vote_delegation( 28 | get_origin(MODULE) 29 | )); 30 | 31 | assert!(NotDelegatingVotingPower::::get().contains(&MODULE)); 32 | }); 33 | } 34 | -------------------------------------------------------------------------------- /docs/nix.md: -------------------------------------------------------------------------------- 1 | # Build and development with Nix 2 | 3 | The recommended way to install dependencies on the Torus Node project is using 4 | [Nix] and [direnv]. 5 | 6 | You can install Nix with the [Determinate Nix Installer]: 7 | 8 | ```sh 9 | curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install 10 | ``` 11 | 12 | ## With direnv 13 | 14 | You can then install and use [direnv] to automatically load the environment: 15 | 16 | - Install direnv: 17 | 18 | ```sh 19 | nix profile install nixpkgs#direnv 20 | ``` 21 | 22 | - Add to your `~/.zshrc` and reload your shell if you're using zsh: 23 | 24 | ```sh 25 | eval "$(direnv hook zsh)" 26 | ``` 27 | 28 | - For bash, add to your `~/.bashrc`: 29 | 30 | ```sh 31 | eval "$(direnv hook bash)" 32 | ``` 33 | 34 | - Run `direnv allow` inside the project directory to enable it. 35 | 36 | ### Without direnv 37 | 38 | You can also manually load the environment: 39 | 40 | ```sh 41 | nix develop 42 | ``` 43 | 44 | [Nix]: https://nixos.org/ 45 | [direnv]: https://direnv.net/ 46 | [Determinate Nix Installer]: https://github.com/DeterminateSystems/nix-installer 47 | -------------------------------------------------------------------------------- /pallets/faucet/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-faucet" 3 | description = "Testnet faucet pallet for the Torus network. Allows users to get test tokens by performing proof-of-work." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = ["codec/std", "polkadot-sdk/std", "scale-info/std"] 15 | runtime-benchmarks = ["polkadot-sdk/runtime-benchmarks"] 16 | try-runtime = ["polkadot-sdk/try-runtime"] 17 | testnet = [] 18 | 19 | [dependencies] 20 | codec = { workspace = true, features = ["derive"] } 21 | scale-info = { workspace = true, features = ["derive"] } 22 | polkadot-sdk = { workspace = true, features = [ 23 | "experimental", 24 | "runtime", 25 | "pallet-balances", 26 | ] } 27 | 28 | pallet-torus0-api.workspace = true 29 | 30 | [dev-dependencies] 31 | pallet-governance.workspace = true 32 | pallet-torus0.workspace = true 33 | pallet-emission0.workspace = true 34 | pallet-permission0.workspace = true 35 | pallet-governance-api.workspace = true 36 | rand = { workspace = true, features = ["thread_rng"] } 37 | -------------------------------------------------------------------------------- /.github/workflows/build-docs.yml: -------------------------------------------------------------------------------- 1 | name: Build docs and publish to github pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | publish: 10 | permissions: 11 | contents: read 12 | id-token: write 13 | pages: write 14 | runs-on: ubicloud-standard-2 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Setup Rust cache 19 | uses: Swatinem/rust-cache@v2 20 | 21 | - name: Install Rust toolchain 22 | run: | 23 | rustup set profile minimal 24 | rustup show 25 | 26 | - name: Install Protoc 27 | uses: arduino/setup-protoc@v1 28 | with: 29 | version: 3.20.1 30 | repo-token: ${{ secrets.GITHUB_TOKEN }} 31 | 32 | - name: Generate Docs 33 | run: | 34 | cargo doc --workspace --no-deps 35 | 36 | - name: Setup Pages 37 | uses: actions/configure-pages@v5 38 | 39 | - name: Upload artifact 40 | uses: actions/upload-pages-artifact@v3 41 | with: 42 | path: 'target/doc/' 43 | 44 | - name: Deploy to GitHub Pages 45 | id: deployment 46 | uses: actions/deploy-pages@v4 47 | -------------------------------------------------------------------------------- /pallets/torus0/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-torus0" 3 | description = "Torus initial pallet." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = [ 15 | "codec/std", 16 | "polkadot-sdk/std", 17 | "scale-info/std", 18 | "pallet-torus0-api/std", 19 | "pallet-emission0-api/std", 20 | "pallet-governance-api/std", 21 | "pallet-permission0-api/std", 22 | ] 23 | runtime-benchmarks = [ 24 | "polkadot-sdk/runtime-benchmarks", 25 | "pallet-torus0-api/runtime-benchmarks", 26 | ] 27 | try-runtime = ["polkadot-sdk/try-runtime", "pallet-torus0-api/try-runtime"] 28 | testnet = [] 29 | 30 | [dependencies] 31 | codec = { workspace = true, features = ["derive"] } 32 | scale-info = { workspace = true, features = ["derive"] } 33 | polkadot-sdk = { workspace = true, features = ["experimental", "runtime"] } 34 | libm.workspace = true 35 | 36 | pallet-torus0-api.workspace = true 37 | pallet-emission0-api.workspace = true 38 | pallet-governance-api.workspace = true 39 | pallet-permission0-api.workspace = true 40 | 41 | 42 | [dev-dependencies] 43 | test-utils.workspace = true 44 | -------------------------------------------------------------------------------- /data/mainnet/aura.pub.json: -------------------------------------------------------------------------------- 1 | [ 2 | "5FAHNw7kdqkJotQ8yXF21iSmxhSzbwzJkVogrZH3MvypHy1D", 3 | "5Gv1CAGk3aRWvZs5LdP9ta9kVm23dfYopaJyXTZMavQNwZbm", 4 | "5G4JMqfNRJqpzuvXkbaxfhyX4MyeWgh7Tm1K88yNwnSRVyDY", 5 | "5HGNHGa1CVgDtYqcarFvtTS5foKyjqbM4NX5FYv1MLKkdKUG", 6 | "5HnB7htLFzaMAbqgzE66EVgUmJCACVQ1tvZUdDyAAFD71Evq", 7 | "5D5DFRxoSt27NJFEJpTvpyFMfZCf2jRvosKJ2QWQEQs6twnX", 8 | "5EFmiWRcZVEFYj3k1w6rHjieP6yE9YFFPiHDAX5iXAFLhgXv", 9 | "5CRwJKcr8pQ4CZ8wUrSDCGo2aFKqVXHFpsjHmbTERQNQbCQL", 10 | "5DAgeVS3gsVUMUDASy3d5YPasbTQQU7gDkGijF8S3iKGdWjR", 11 | "5G6hp8fP6E89R51cA4Dy8qgae3VsAYFBN4DGZDS2BMnhgRCc", 12 | "5GZEpLT7CfVhJC2aYmULUSzXnCuzWg1VrZij2dQTAQrokm4g", 13 | "5FADATJkVq4Tt6gcaqD5a9rVF6sx6Pc5C9WzRaZG71M4JszX", 14 | "5FZ7dvAYCi5yLiDnFerKqSK3Mo8VNiheTXphrRhZ5m1z8rr1", 15 | "5FRXa3cRPqsAVeNt3AZa2pywF3TEoh51d3fDCvwfgSzTHj8V", 16 | "5EA27GYEXcsdRN9QqdDxEAB8soH8AkqLdWq34fuxwPwGy4hu", 17 | "5CyuGBohZaiQf5rfMnbUjcVrbjATmjrx6ZEKBPP7aWFjx7mX", 18 | "5DMMrzgJ9WnEJH35xivLQ9nuByTrZ3uXHwPQWg2BBXN914fH", 19 | "5CcbfWfBNC5LwZaseEFrrraWkXc6ZLPBBe8PMowSDFmEsKss", 20 | "5Di2kW4nTNkVjphyHjvB67YVaaCoxd5k6quXuAQeRQxwJ8Ad", 21 | "5HVuVMnEPYMCKGuzPighADMzKgVm2swZTUPB1cLtLPSbpunZ", 22 | "5CcaizfVSsdv5AJDqDrGvu1fb6ndNJpQ8RgXNGuCPYfYsVSG" 23 | ] 24 | -------------------------------------------------------------------------------- /pallets/governance/tests/config.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::{ 2 | frame_support::assert_err, 3 | sp_runtime::{Percent, traits::BadOrigin}, 4 | }; 5 | use test_utils::{RuntimeOrigin, Test, assert_ok, get_origin, new_test_ext}; 6 | 7 | #[test] 8 | fn set_emission_params() { 9 | new_test_ext().execute_with(|| { 10 | assert_ok!(pallet_governance::Pallet::::set_emission_params( 11 | RuntimeOrigin::root(), 12 | Percent::from_percent(15), 13 | Percent::from_percent(15), 14 | )); 15 | 16 | assert_eq!( 17 | pallet_emission0::EmissionRecyclingPercentage::::get(), 18 | Percent::from_percent(15) 19 | ); 20 | 21 | assert_eq!( 22 | pallet_governance::TreasuryEmissionFee::::get(), 23 | Percent::from_percent(15) 24 | ); 25 | }); 26 | } 27 | 28 | #[test] 29 | fn set_emission_params_non_root() { 30 | new_test_ext().execute_with(|| { 31 | assert_err!( 32 | pallet_governance::Pallet::::set_emission_params( 33 | get_origin(0), 34 | Percent::from_percent(15), 35 | Percent::from_percent(15), 36 | ), 37 | BadOrigin 38 | ); 39 | }); 40 | } 41 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torus-client" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Renlabs "] 6 | description = "Torus substrate client library using subxt" 7 | license = "MIT" 8 | 9 | [features] 10 | default = ["std", "mainnet"] 11 | std = ["codec/std", "codec/derive", "codec/max-encoded-len", "scale-info/std"] 12 | 13 | mainnet = [] 14 | testnet = [] 15 | devnet = [] 16 | 17 | [dependencies] 18 | anyhow = "1.0.98" 19 | 20 | codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } 21 | scale-info = { version = "2.11.1", default-features = false } 22 | 23 | subxt = { version = "0.41.0" } 24 | subxt-signer = "0.41.0" 25 | 26 | serde_json = "1.0" 27 | 28 | tokio = { version = "1.35", features = ["macros", "time", "rt-multi-thread"] } 29 | 30 | thiserror = "2.0.12" 31 | 32 | hex = "0.4.3" 33 | 34 | rand = "0.9.1" 35 | 36 | keccak-hash = { git = "https://github.com/paritytech/parity-common", rev = "f0dade11ac0b2450e6a54f36035f74b2b8ce4c00" } 37 | 38 | schnorrkel = "0.11.4" 39 | futures = "0.3.31" 40 | 41 | sha2 = "0.10.9" 42 | 43 | paste = "1.0.15" 44 | 45 | [build-dependencies] 46 | tokio = { version = "1.35", features = ["macros", "time", "rt-multi-thread"] } 47 | torus-client-codegen = { path = "codegen" } 48 | -------------------------------------------------------------------------------- /pallets/emission0/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-emission0" 3 | description = "Torus emission pallet." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = [ 15 | "codec/std", 16 | "polkadot-sdk/std", 17 | "scale-info/std", 18 | "num-traits/std", 19 | "pallet-torus0-api/std", 20 | "pallet-governance-api/std", 21 | "pallet-permission0-api/std", 22 | ] 23 | runtime-benchmarks = [ 24 | "polkadot-sdk/runtime-benchmarks", 25 | "pallet-torus0-api/runtime-benchmarks", 26 | "pallet-governance-api/runtime-benchmarks", 27 | "pallet-permission0-api/runtime-benchmarks", 28 | ] 29 | try-runtime = ["polkadot-sdk/try-runtime"] 30 | 31 | 32 | [dependencies] 33 | codec = { workspace = true, features = ["derive"] } 34 | scale-info = { workspace = true, features = ["derive"] } 35 | polkadot-sdk = { workspace = true, features = ["experimental", "runtime"] } 36 | 37 | num-traits.workspace = true 38 | 39 | pallet-torus0-api.workspace = true 40 | pallet-emission0-api.workspace = true 41 | pallet-governance-api.workspace = true 42 | pallet-permission0-api.workspace = true 43 | 44 | [dev-dependencies] 45 | test-utils.workspace = true 46 | -------------------------------------------------------------------------------- /pallets/permission0/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-permission0" 3 | version = "0.1.0" 4 | description = "Permission contract pallet for Torus network" 5 | authors.workspace = true 6 | edition.workspace = true 7 | license = "MIT-0" 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = [ 15 | "codec/std", 16 | "polkadot-sdk/std", 17 | "scale-info/std", 18 | "pallet-permission0-api/std", 19 | "pallet-torus0-api/std", 20 | "pallet-emission0-api/std", 21 | ] 22 | runtime-benchmarks = [ 23 | "polkadot-sdk/runtime-benchmarks", 24 | "pallet-torus0-api/runtime-benchmarks", 25 | "pallet-emission0-api/runtime-benchmarks", 26 | ] 27 | try-runtime = ["polkadot-sdk/try-runtime"] 28 | 29 | [dependencies] 30 | codec = { workspace = true, features = ["derive"] } 31 | scale-info = { workspace = true, features = ["derive"] } 32 | polkadot-sdk = { workspace = true, features = ["experimental", "runtime"] } 33 | bitflags = { workspace = true, default-features = false } 34 | num-traits = { workspace = true } 35 | 36 | # Local dependencies 37 | pallet-permission0-api = { workspace = true, default-features = false } 38 | pallet-torus0-api = { workspace = true, default-features = false } 39 | pallet-emission0-api = { workspace = true, default-features = false } 40 | 41 | [dev-dependencies] 42 | test-utils.workspace = true 43 | -------------------------------------------------------------------------------- /pallets/governance/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-governance" 3 | description = "Torus governance pallet." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = [ 15 | "codec/std", 16 | "polkadot-sdk/std", 17 | "scale-info/std", 18 | "pallet-torus0/std", 19 | "pallet-emission0/std", 20 | ] 21 | runtime-benchmarks = [ 22 | "polkadot-sdk/runtime-benchmarks", 23 | "pallet-torus0/runtime-benchmarks", 24 | "pallet-emission0/runtime-benchmarks", 25 | 26 | "pallet-torus0-api/runtime-benchmarks", 27 | ] 28 | try-runtime = [ 29 | "polkadot-sdk/try-runtime", 30 | "pallet-torus0/try-runtime", 31 | "pallet-emission0/try-runtime", 32 | ] 33 | 34 | 35 | [dependencies] 36 | codec = { workspace = true, features = ["derive"] } 37 | scale-info = { workspace = true, features = ["derive"] } 38 | polkadot-sdk = { workspace = true, features = [ 39 | "experimental", 40 | "runtime", 41 | "pallet-sudo", 42 | ] } 43 | 44 | pallet-torus0.workspace = true 45 | pallet-emission0.workspace = true 46 | 47 | pallet-governance-api.workspace = true 48 | pallet-torus0-api.workspace = true 49 | pallet-permission0-api.workspace = true 50 | 51 | [dev-dependencies] 52 | test-utils.workspace = true 53 | pallet-permission0.workspace = true 54 | -------------------------------------------------------------------------------- /mcp/src/weights.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use rmcp::{ 4 | ErrorData, 5 | model::{CallToolResult, Content}, 6 | }; 7 | use torus_client::subxt::utils::AccountId32; 8 | 9 | use crate::{Client, utils::keypair_from_name}; 10 | 11 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 12 | pub struct SetWeightsRequest { 13 | account_name: String, 14 | weights_by_account_name: HashMap, 15 | } 16 | 17 | pub async fn set_weights( 18 | torus_client: &Client, 19 | request: SetWeightsRequest, 20 | ) -> Result { 21 | let keypair = keypair_from_name(request.account_name)?; 22 | let weights = request 23 | .weights_by_account_name 24 | .into_iter() 25 | .map(|(name, weight)| match keypair_from_name(name) { 26 | Ok(keypair) => Ok((keypair.public_key().to_account_id(), weight)), 27 | Err(err) => Err(err), 28 | }) 29 | .collect::, _>>()?; 30 | 31 | match torus_client 32 | .emission0() 33 | .calls() 34 | .set_weights_wait(weights, keypair) 35 | .await 36 | { 37 | Ok(_) => Ok(CallToolResult::success(vec![Content::text( 38 | "set weights successfully", 39 | )])), 40 | Err(err) => { 41 | dbg!(&err); 42 | Err(ErrorData::internal_error(err.to_string(), None)) 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /.github/workflows/build-docker-xtask.yml: -------------------------------------------------------------------------------- 1 | name: Build xtask cli tool Docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - dev 8 | - github-ci-test 9 | paths: 10 | - "xtask/**" 11 | - ".github/workflows/build-docker-xtask.yml" 12 | - "docker/xtask.dockerfile" 13 | 14 | jobs: 15 | docker: 16 | permissions: 17 | contents: read 18 | packages: write 19 | runs-on: ubicloud-standard-2 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - name: Docker meta 24 | id: meta 25 | uses: docker/metadata-action@v5 26 | with: 27 | images: ghcr.io/${{ github.repository_owner }}/torus-xtask 28 | tags: | 29 | type=sha,prefix=,enable=true 30 | type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }} 31 | 32 | - name: Set up Docker Buildx 33 | uses: docker/setup-buildx-action@v3 34 | 35 | - name: Login to GitHub Container Registry 36 | uses: docker/login-action@v3 37 | with: 38 | registry: ghcr.io 39 | username: ${{ github.actor }} 40 | password: ${{ secrets.GITHUB_TOKEN }} 41 | 42 | - name: Build and push 43 | uses: docker/build-push-action@v5 44 | with: 45 | context: . 46 | push: true 47 | file: ./docker/xtask.dockerfile 48 | tags: ${{ steps.meta.outputs.tags }} 49 | cache-from: type=gha 50 | cache-to: type=gha,mode=max 51 | -------------------------------------------------------------------------------- /mcp/src/balance.rs: -------------------------------------------------------------------------------- 1 | use rmcp::{ 2 | ErrorData, 3 | model::{CallToolResult, Content}, 4 | }; 5 | 6 | use crate::{Client, utils::keypair_from_name}; 7 | 8 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 9 | pub struct BalanceCheckRequest { 10 | account_name: String, 11 | } 12 | 13 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 14 | pub struct BalanceCheckResponse { 15 | free: u128, 16 | reserved: u128, 17 | frozen: u128, 18 | } 19 | 20 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 21 | pub struct FaucetRequest { 22 | account_name: String, 23 | } 24 | 25 | pub async fn check_account_balance( 26 | torus_client: &Client, 27 | request: BalanceCheckRequest, 28 | ) -> Result { 29 | let keypair = keypair_from_name(request.account_name)?; 30 | 31 | match torus_client 32 | .system() 33 | .storage() 34 | .account_get(&keypair.public_key().to_account_id()) 35 | .await 36 | { 37 | Ok(data) => Ok(CallToolResult::success(vec![Content::json( 38 | data.map(|data| BalanceCheckResponse { 39 | free: data.data.free, 40 | reserved: data.data.reserved, 41 | frozen: data.data.frozen, 42 | }) 43 | .unwrap_or(BalanceCheckResponse { 44 | free: 0, 45 | reserved: 0, 46 | frozen: 0, 47 | }), 48 | )?])), 49 | Err(err) => Err(ErrorData::internal_error(format!("{err:?}"), None)), 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /pallets/permission0/src/permission/wallet.rs: -------------------------------------------------------------------------------- 1 | use codec::{Decode, Encode, MaxEncodedLen}; 2 | use polkadot_sdk::frame_support::{CloneNoBound, DebugNoBound, EqNoBound, PartialEqNoBound}; 3 | use scale_info::TypeInfo; 4 | 5 | use crate::Config; 6 | 7 | #[derive(CloneNoBound, DebugNoBound, Encode, Decode, MaxEncodedLen, TypeInfo)] 8 | #[scale_info(skip_type_params(T))] 9 | pub struct WalletScope { 10 | pub recipient: T::AccountId, 11 | pub r#type: WalletScopeType, 12 | } 13 | 14 | impl WalletScope { 15 | /// Cleanup operations when permission is revoked or expired 16 | pub(crate) fn cleanup( 17 | &self, 18 | _permission_id: polkadot_sdk::sp_core::H256, 19 | _last_execution: &Option>, 20 | _delegator: &T::AccountId, 21 | ) { 22 | // No actions to perform 23 | } 24 | } 25 | 26 | #[derive(CloneNoBound, DebugNoBound, Encode, Decode, MaxEncodedLen, TypeInfo)] 27 | pub enum WalletScopeType { 28 | Stake(WalletStake), 29 | } 30 | 31 | #[derive( 32 | CloneNoBound, DebugNoBound, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEqNoBound, EqNoBound, 33 | )] 34 | pub struct WalletStake { 35 | /// If true, allows the recipient to perform transfer of stake between staked accounts. 36 | pub can_transfer_stake: bool, 37 | /// If true, this permission holds exclusive access to the delegator stake, meaning that 38 | /// the delegator has no right to perform operations over stake (including unstaking) 39 | /// while this permission is active. 40 | pub exclusive_stake_access: bool, 41 | } 42 | -------------------------------------------------------------------------------- /pallets/governance/src/whitelist.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::frame_support::dispatch::DispatchResult; 2 | 3 | use crate::{AccountIdOf, application}; 4 | 5 | /// Adds a key to the DAO whitelist, allowing it to register an agent. 6 | pub fn add_to_whitelist(key: AccountIdOf) -> DispatchResult { 7 | if is_whitelisted::(&key) { 8 | return Err(crate::Error::::AlreadyWhitelisted.into()); 9 | } 10 | 11 | if application::exists_for_agent_key::(&key, &application::ApplicationAction::Add) { 12 | return Err(crate::Error::::ApplicationKeyAlreadyUsed.into()); 13 | } 14 | 15 | crate::Whitelist::::insert(key.clone(), ()); 16 | crate::Pallet::::deposit_event(crate::Event::::WhitelistAdded(key)); 17 | 18 | Ok(()) 19 | } 20 | 21 | /// Remove a key from the DAO whitelist, disallowing the key to register an 22 | /// agent, or de-registering an existing one. 23 | pub fn remove_from_whitelist(key: AccountIdOf) -> DispatchResult { 24 | if !is_whitelisted::(&key) { 25 | return Err(crate::Error::::NotWhitelisted.into()); 26 | } 27 | 28 | if application::exists_for_agent_key::(&key, &application::ApplicationAction::Remove) { 29 | return Err(crate::Error::::ApplicationKeyAlreadyUsed.into()); 30 | } 31 | 32 | crate::Whitelist::::remove(&key); 33 | crate::Pallet::::deposit_event(crate::Event::::WhitelistRemoved(key)); 34 | 35 | Ok(()) 36 | } 37 | 38 | pub fn is_whitelisted(key: &AccountIdOf) -> bool { 39 | crate::Whitelist::::contains_key(key) 40 | } 41 | -------------------------------------------------------------------------------- /pallets/torus0/src/fee.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | 3 | use codec::{Decode, Encode, MaxEncodedLen}; 4 | use polkadot_sdk::{ 5 | frame_election_provider_support::Get, frame_support::DebugNoBound, sp_runtime::Percent, 6 | }; 7 | use scale_info::TypeInfo; 8 | 9 | #[derive(DebugNoBound, Decode, Encode, MaxEncodedLen, PartialEq, Eq, TypeInfo)] 10 | #[scale_info(skip_type_params(T))] 11 | pub struct ValidatorFeeConstraints { 12 | pub min_staking_fee: Percent, 13 | pub min_weight_control_fee: Percent, 14 | pub _pd: PhantomData, 15 | } 16 | 17 | impl Default for ValidatorFeeConstraints { 18 | fn default() -> Self { 19 | Self { 20 | min_staking_fee: Percent::from_percent(T::DefaultMinStakingFee::get()), 21 | min_weight_control_fee: Percent::from_percent(T::DefaultMinWeightControlFee::get()), 22 | _pd: PhantomData, 23 | } 24 | } 25 | } 26 | 27 | #[derive(DebugNoBound, Decode, Encode, MaxEncodedLen, PartialEq, Eq, TypeInfo)] 28 | #[scale_info(skip_type_params(T))] 29 | pub struct ValidatorFee { 30 | pub staking_fee: Percent, 31 | pub weight_control_fee: Percent, 32 | pub _pd: PhantomData, 33 | } 34 | 35 | impl Default for ValidatorFee { 36 | fn default() -> Self { 37 | let fee_constraints = crate::FeeConstraints::::get(); 38 | 39 | Self { 40 | staking_fee: fee_constraints.min_staking_fee, 41 | weight_control_fee: fee_constraints.min_weight_control_fee, 42 | _pd: PhantomData, 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /client/src/rpc.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use subxt::backend::rpc::RpcClient; 4 | use subxt::ext::subxt_rpcs::client::RpcParams; 5 | use subxt::utils::{AccountId32, H256}; 6 | use subxt::{OnlineClient, PolkadotConfig}; 7 | 8 | use crate::client::TorusClient; 9 | 10 | impl TorusClient { 11 | pub fn rpc(&self) -> Rpc { 12 | Rpc { 13 | client: self.client.clone(), 14 | rpc_client: self.rpc_client.clone(), 15 | _pd: PhantomData, 16 | } 17 | } 18 | } 19 | 20 | pub struct Rpc { 21 | pub(crate) client: OnlineClient, 22 | pub(crate) rpc_client: RpcClient, 23 | pub(crate) _pd: PhantomData, 24 | } 25 | 26 | impl Rpc { 27 | pub async fn root_namespace_for_account(&self, account_id: AccountId32) -> crate::Result { 28 | let mut rpc_params = RpcParams::new(); 29 | rpc_params.push(account_id)?; 30 | 31 | let res = self 32 | .rpc_client 33 | .request("permission0_rootStreamIdForAccount", rpc_params) 34 | .await?; 35 | 36 | Ok(res) 37 | } 38 | 39 | pub async fn namespace_path_creation_cost( 40 | &self, 41 | account_id: AccountId32, 42 | path: impl AsRef, 43 | ) -> crate::Result<(u128, u128)> { 44 | let mut rpc_params = RpcParams::new(); 45 | rpc_params.push(account_id)?; 46 | 47 | let path = path.as_ref().as_bytes().to_vec(); 48 | rpc_params.push(path)?; 49 | 50 | let res = self 51 | .rpc_client 52 | .request("torus0_namespacePathCreationCost", rpc_params) 53 | .await?; 54 | 55 | Ok(res) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /pallets/permission0/rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use jsonrpsee::{core::RpcResult, types::ErrorObject}; 4 | use pallet_permission0_api::{Permission0RuntimeApi, StreamId}; 5 | use polkadot_sdk::{ 6 | sp_api::ProvideRuntimeApi, 7 | sp_blockchain::HeaderBackend, 8 | sp_runtime::{ 9 | traits::{IdentifyAccount, Verify}, 10 | MultiSignature, 11 | }, 12 | }; 13 | use torus_runtime::opaque::Block; 14 | 15 | type Signature = MultiSignature; 16 | type AccountId = <::Signer as IdentifyAccount>::AccountId; 17 | 18 | #[jsonrpsee::proc_macros::rpc(client, server)] 19 | pub trait Permission0StreamApi { 20 | #[method(name = "permission0_rootStreamIdForAccount")] 21 | async fn root_stream_id_for_account(&self, account: AccountId) -> RpcResult; 22 | } 23 | 24 | pub struct Permission0Rpc { 25 | client: Arc, 26 | } 27 | 28 | impl Permission0Rpc { 29 | pub fn new(client: Arc) -> Self { 30 | Self { client } 31 | } 32 | } 33 | 34 | #[async_trait::async_trait] 35 | impl Permission0StreamApiServer for Permission0Rpc 36 | where 37 | Client: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, 38 | Client::Api: Permission0RuntimeApi, 39 | { 40 | async fn root_stream_id_for_account(&self, account: AccountId) -> RpcResult { 41 | let runtime = &*self.client.runtime_api(); 42 | let at = self.client.info().best_hash; 43 | 44 | runtime 45 | .root_stream_id_for_account(at, account) 46 | .map_err(|err| ErrorObject::owned(1, "Runtime execution failed", Some(err.to_string()))) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pallets/permission0/src/permission/namespace.rs: -------------------------------------------------------------------------------- 1 | use codec::{Decode, Encode, MaxEncodedLen}; 2 | use pallet_torus0_api::NamespacePath; 3 | use polkadot_sdk::{ 4 | frame_support::{CloneNoBound, DebugNoBound}, 5 | sp_runtime::{BoundedBTreeMap, BoundedBTreeSet}, 6 | }; 7 | use scale_info::TypeInfo; 8 | 9 | use crate::{Config, Permissions}; 10 | 11 | use super::PermissionId; 12 | 13 | /// Scope for namespace permissions 14 | #[derive(Encode, Decode, CloneNoBound, TypeInfo, MaxEncodedLen, DebugNoBound)] 15 | #[scale_info(skip_type_params(T))] 16 | pub struct NamespaceScope { 17 | pub recipient: T::AccountId, 18 | /// Set of namespace paths this permission delegates access to 19 | pub paths: BoundedBTreeMap< 20 | Option, 21 | BoundedBTreeSet, 22 | T::MaxNamespacesPerPermission, 23 | >, 24 | /// Maximum number of instances of this permission 25 | pub max_instances: u32, 26 | /// Children permissions 27 | pub children: BoundedBTreeSet, 28 | } 29 | 30 | impl NamespaceScope { 31 | /// Cleanup operations when permission is revoked or expired 32 | pub(super) fn cleanup( 33 | &self, 34 | permission_id: polkadot_sdk::sp_core::H256, 35 | _last_execution: &Option>, 36 | _delegator: &T::AccountId, 37 | ) { 38 | for pid in self.paths.keys().cloned().flatten() { 39 | Permissions::::mutate_extant(pid, |parent| { 40 | if let Some(children) = parent.children_mut() { 41 | children.remove(&permission_id); 42 | } 43 | }); 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pallets/governance/src/config.rs: -------------------------------------------------------------------------------- 1 | use codec::{Decode, Encode, MaxEncodedLen}; 2 | use polkadot_sdk::{ 3 | frame_election_provider_support::Get, frame_support::DebugNoBound, 4 | polkadot_sdk_frame::prelude::BlockNumberFor, sp_runtime::Percent, 5 | }; 6 | use scale_info::TypeInfo; 7 | 8 | use crate::BalanceOf; 9 | 10 | #[derive(Clone, TypeInfo, Decode, Encode, PartialEq, Eq, DebugNoBound, MaxEncodedLen)] 11 | #[scale_info(skip_type_params(T))] 12 | pub struct GovernanceConfiguration { 13 | pub proposal_cost: BalanceOf, 14 | pub proposal_expiration: BlockNumberFor, 15 | pub agent_application_cost: BalanceOf, 16 | pub agent_application_expiration: BlockNumberFor, 17 | pub proposal_reward_treasury_allocation: Percent, 18 | pub max_proposal_reward_treasury_allocation: BalanceOf, 19 | pub proposal_reward_interval: BlockNumberFor, 20 | } 21 | 22 | impl Default for GovernanceConfiguration { 23 | fn default() -> Self { 24 | Self { 25 | proposal_cost: T::DefaultProposalCost::get(), 26 | proposal_expiration: T::DefaultProposalExpiration::get(), //130_000, 27 | agent_application_cost: T::DefaultAgentApplicationCost::get(), /* 100_000_000_000_000_000_000, */ 28 | agent_application_expiration: T::DefaultAgentApplicationExpiration::get(), //2_000, 29 | proposal_reward_treasury_allocation: T::DefaultProposalRewardTreasuryAllocation::get(), /* Percent::from_percent(2), */ 30 | max_proposal_reward_treasury_allocation: 31 | T::DefaultMaxProposalRewardTreasuryAllocation::get(), /* 10_000_000_000_000_000_000_000, */ 32 | proposal_reward_interval: T::DefaultProposalRewardInterval::get(), //75_600, 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /mcp/src/consensus.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use rmcp::{ 4 | ErrorData, 5 | model::{CallToolResult, Content}, 6 | }; 7 | use torus_client::subxt::ext::futures::StreamExt; 8 | 9 | use crate::{Client, utils::name_or_key}; 10 | 11 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 12 | pub struct ConsensusMembersResponse { 13 | consensus_members: Vec, 14 | } 15 | 16 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 17 | pub struct ConsensusMember { 18 | name: String, 19 | weights: HashMap, 20 | last_incentives: u16, 21 | last_dividends: u16, 22 | } 23 | 24 | pub async fn list_consensus_members(torus_client: &Client) -> Result { 25 | let mut stream = match torus_client 26 | .emission0() 27 | .storage() 28 | .consensus_members_iter() 29 | .await 30 | { 31 | Ok(stream) => stream, 32 | Err(err) => { 33 | dbg!(&err); 34 | return Err(ErrorData::internal_error(err.to_string(), None)); 35 | } 36 | }; 37 | 38 | let mut members = Vec::new(); 39 | 40 | while let Some(Ok((id, member))) = stream.next().await { 41 | members.push(ConsensusMember { 42 | name: name_or_key(&id), 43 | weights: member 44 | .weights 45 | .0 46 | .into_iter() 47 | .map(|(id, weight)| (name_or_key(&id), weight)) 48 | .collect(), 49 | last_incentives: member.last_incentives, 50 | last_dividends: member.last_dividends, 51 | }); 52 | } 53 | 54 | Ok(CallToolResult::success(vec![Content::json( 55 | ConsensusMembersResponse { 56 | consensus_members: members, 57 | }, 58 | )?])) 59 | } 60 | -------------------------------------------------------------------------------- /client/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | //! # Torus Client 4 | //! 5 | //! A client library for interacting with the Torus blockchain network using subxt. 6 | //! 7 | //! ## Overview 8 | //! 9 | //! The Torus client provides type-safe, ergonomic access to all Torus blockchain functionality. Built with auto-generated interfaces from runtime metadata, it ensures compatibility and type safety. 10 | //! 11 | //! ## Quick Start 12 | //! 13 | //! Add to your `Cargo.toml`: 14 | //! 15 | //! ```toml 16 | //! [dependencies] 17 | //! torus-client = "0.1.0" 18 | //! tokio = { version = "1.0", features = ["full"] } 19 | //! ``` 20 | //! 21 | //! Basic Usage 22 | //! 23 | //! ```rs 24 | //! #[tokio::main] 25 | //! async fn main() -> Result<(), Box> { 26 | //! // Connect to mainnet 27 | //! let client = TorusClient::for_mainnet().await?; 28 | //! 29 | //! // Or to testnet 30 | //! let client = TorusClient::for_testnet().await?; 31 | //! 32 | //! // Call extrinsics 33 | //! client.torus0().calls().register_agent(...); 34 | //! 35 | //! // Fetch storages 36 | //! client.governance().storage().agent_applications_get(...); 37 | //! 38 | //! // Subscribe to events 39 | //! client.events().subscribe::<...>(); 40 | //! 41 | //! // Call rpcs 42 | //! client.rpc().namespace_path_creation_cost(...); 43 | //! 44 | //! // Access subxt client 45 | //! client.inner_client(); 46 | //! 47 | //! Ok(()) 48 | //! } 49 | //! ``` 50 | //! 51 | //! More examples on the `examples/` folder. 52 | 53 | pub use subxt; 54 | pub use subxt_signer; 55 | 56 | pub mod utils; 57 | 58 | pub mod chain; 59 | pub mod client; 60 | mod error; 61 | pub use error::*; 62 | pub mod events; 63 | pub mod rpc; 64 | 65 | #[allow(clippy::too_many_arguments, dead_code)] 66 | pub mod interfaces; 67 | -------------------------------------------------------------------------------- /client/README.md: -------------------------------------------------------------------------------- 1 | # Torus Client 2 | 3 | [![Crates.io](https://img.shields.io/crates/v/torus-client.svg)](https://crates.io/crates/torus-client) 4 | [![Documentation](https://docs.rs/torus-client/badge.svg)](https://docs.rs/torus-client) 5 | [![Build Status](https://github.com/renlabs-dev/torus-substrate/workflows/CI/badge.svg)](https://github.com/renlabs-dev/torus-substrate/actions) 6 | 7 | A Rust client library for interacting with the Torus blockchain. 8 | 9 | ## Overview 10 | 11 | The Torus client provides type-safe, ergonomic access to all Torus blockchain functionality. Built with auto-generated interfaces from runtime metadata, it ensures compatibility and type safety. 12 | 13 | ## Quick Start 14 | 15 | Add to your `Cargo.toml`: 16 | 17 | ```toml 18 | [dependencies] 19 | torus-client = "0.1.0" 20 | tokio = { version = "1.0", features = ["full"] } 21 | ``` 22 | 23 | Basic Usage 24 | 25 | ```rs 26 | #[tokio::main] 27 | async fn main() -> Result<(), Box> { 28 | // Connect to mainnet 29 | let client = TorusClient::for_mainnet().await?; 30 | 31 | // Or to testnet 32 | let client = TorusClient::for_testnet().await?; 33 | 34 | // Call extrinsics 35 | client.torus0().calls().register_agent(...); 36 | 37 | // Fetch storages 38 | client.governance().storage().agent_applications_get(...); 39 | 40 | // Subscribe to events 41 | client.events().subscribe::<...>(); 42 | 43 | // Call rpcs 44 | client.rpc().namespace_path_creation_cost(...); 45 | 46 | // Access subxt client 47 | client.inner_client(); 48 | 49 | Ok(()) 50 | } 51 | ``` 52 | 53 | More examples on the `examples/` folder. 54 | 55 | ## Contributing 56 | 57 | See ../CONTRIBUTING.md for development setup and guidelines. 58 | 59 | ## License 60 | 61 | Licensed under ../LICENSE. 62 | -------------------------------------------------------------------------------- /data/mainnet/gran.pub.json: -------------------------------------------------------------------------------- 1 | [ 2 | [ 3 | "5GMXEvALNWNDxTyEcoDje7BLWTDULLnMiDcgGNAAqARoorAQ", 4 | 1 5 | ], 6 | [ 7 | "5C6F9jugYXyFrmAiVgwnnVdk61bo2Z1mHdvDPPPBUHJ945rU", 8 | 1 9 | ], 10 | [ 11 | "5Ec72auaTFKnBfSjVJ2AzYJu1difirGrtuoK75J3NqkL7Se5", 12 | 1 13 | ], 14 | [ 15 | "5CfqBtvZQZSU2hofUD3dCxka1kyXEZHmJFSqru9NXZJnfU15", 16 | 1 17 | ], 18 | [ 19 | "5CbP8wXa2LDfaDwuyK4gJG3xn1MoVVRafogTC2uq9KwU8coC", 20 | 1 21 | ], 22 | [ 23 | "5DbmvGs5STwgo4mkQSG46cbnY3WvQkpuJjmvq8sVKEwyjAXB", 24 | 1 25 | ], 26 | [ 27 | "5G8wJKMug3FAF3Xmt79q1FkQ6vbkSyFBHBQCThJVexQF6U2t", 28 | 1 29 | ], 30 | [ 31 | "5DgKEtseBx4ypcjLMTDZKyt1NvXWJuRXZXJVdZdyVA36iciA", 32 | 1 33 | ], 34 | [ 35 | "5DDw6pdWf1r82jaY2m1Do8qw1e2rHTzTsLtz8fRGKHt7ja2L", 36 | 1 37 | ], 38 | [ 39 | "5Fh8Reu8XyvimqPrQ5Mmf5kaR54WhhobRT9EhUjRNJ4ojMkE", 40 | 1 41 | ], 42 | [ 43 | "5FEktJzcgo2wqbMfMYnHHxSvH9LuTcnzszkvE9ubdw3vqEDq", 44 | 1 45 | ], 46 | [ 47 | "5DFf7EeCt6nDEhrvMQEZrByuHU1KGrZ5yo7bLobPiUHSMsvC", 48 | 1 49 | ], 50 | [ 51 | "5GJx6hfASXLaYU9F9kJfh59QDRnMqWh982WuDbNuhcy5TmSD", 52 | 1 53 | ], 54 | [ 55 | "5DLJczfZQo9zeUCCUekr1HxrtHz9uAGGvYh7j5jrQjyDUuK8", 56 | 1 57 | ], 58 | [ 59 | "5FJQKLMBhYze3EhPidCgqMJ8frpPCQKW6sStToReASg3j3dZ", 60 | 1 61 | ], 62 | [ 63 | "5FoirCgSYsa5eHcAAHrSMuJ1WDHZdz4WWdx4VqwY3cAfzHYv", 64 | 1 65 | ], 66 | [ 67 | "5Dngmp9arbJ4Yjarfm5AoSKGVk6SSPrvGhiasLxppEhUdy4K", 68 | 1 69 | ], 70 | [ 71 | "5GpGEtdru4eNKnKzCgNAZnHMmRWudEyEsGmJJeaib5Jr3F3U", 72 | 1 73 | ], 74 | [ 75 | "5F9G2dhk18oSQyJJ34QXuqBdSHiF4ff4qKkbB4F4PvW2oX3d", 76 | 1 77 | ], 78 | [ 79 | "5G9hFMqTb3YcZ8An46fLUrjReNx3p8pdTWatz8XPmPWutmyi", 80 | 1 81 | ], 82 | [ 83 | "5Fen622mpK7xg9n4it3Hav4PBjZnFtg1Uc464rPDWGM8Lzsp", 84 | 1 85 | ] 86 | ] 87 | -------------------------------------------------------------------------------- /client/src/events.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use futures::{stream, TryStreamExt}; 4 | use subxt::{ 5 | events::{EventDetails, StaticEvent}, 6 | OnlineClient, PolkadotConfig, 7 | }; 8 | 9 | use crate::client::TorusClient; 10 | 11 | impl TorusClient { 12 | pub fn events(&self) -> Events { 13 | Events { 14 | client: self.client.clone(), 15 | _pd: PhantomData, 16 | } 17 | } 18 | } 19 | 20 | pub struct Events { 21 | client: OnlineClient, 22 | _pd: PhantomData, 23 | } 24 | 25 | impl Events { 26 | pub async fn subscribe_unfiltered( 27 | &self, 28 | ) -> crate::Result>>> 29 | { 30 | let stream = self.client.blocks().subscribe_finalized().await?; 31 | 32 | let events_stream = stream 33 | .and_then(|block| async move { 34 | let events = block.events().await?; 35 | let all_events: Result, _> = events.iter().collect(); 36 | all_events 37 | }) 38 | .map_ok(|events| stream::iter(events.into_iter().map(Ok))) 39 | .try_flatten(); 40 | 41 | Ok(Box::pin(events_stream)) 42 | } 43 | 44 | pub async fn subscribe( 45 | &self, 46 | ) -> crate::Result>> { 47 | let stream = self.client.blocks().subscribe_finalized().await?; 48 | 49 | let events_stream = stream 50 | .and_then(|block| async move { 51 | let events = block.events().await?; 52 | let all_events: Result, _> = events.find::().collect(); 53 | all_events 54 | }) 55 | .map_ok(|events| stream::iter(events.into_iter().map(Ok))) 56 | .try_flatten(); 57 | 58 | Ok(Box::pin(events_stream)) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /.github/workflows/build-runtime.yml: -------------------------------------------------------------------------------- 1 | name: Build Torus runtime 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - "github-ci-test" 8 | - "build-runtime*" 9 | - "testnet" 10 | tags: 11 | - "runtime/*" 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | build: 19 | runs-on: ubicloud-standard-16 20 | 21 | steps: 22 | - uses: actions/checkout@v4 23 | 24 | - name: Setup Rust cache 25 | uses: Swatinem/rust-cache@v2 26 | 27 | - name: Install Rust toolchain 28 | run: | 29 | rustup set profile minimal 30 | rustup show 31 | 32 | - name: Install Protoc 33 | uses: arduino/setup-protoc@v1 34 | with: 35 | version: 3.20.1 36 | repo-token: ${{ secrets.GITHUB_TOKEN }} 37 | 38 | - name: Build runtime 39 | run: | 40 | echo "Building ${{ startsWith(github.ref, 'refs/tags/runtime/testnet') && 'with testnet feature flag' || 'without testnet feature flag' }}" 41 | cargo build --release --timings --package torus-runtime ${{ startsWith(github.ref, 'refs/tags/runtime/testnet') && '--features testnet' || '' }} 42 | 43 | export SHA256SUM=$(sha256sum target/release/wbuild/torus-runtime/torus_runtime.compact.compressed.wasm | cut -d ' ' -f1) 44 | echo Hash of compact and compressed WASM: $SHA256SUM 45 | 46 | mkdir out 47 | mv target/release/wbuild/torus-runtime/torus_runtime.compact.compressed.wasm out/ 48 | touch out/$SHA256SUM 49 | 50 | - uses: actions/upload-artifact@v4 51 | with: 52 | name: torus_runtime.compact.compressed 53 | path: out/ 54 | if-no-files-found: error 55 | overwrite: true 56 | 57 | - uses: actions/upload-artifact@v4 58 | with: 59 | name: torus-runtime-timings 60 | path: target/cargo-timings/cargo-timing.html 61 | overwrite: true 62 | -------------------------------------------------------------------------------- /.github/workflows/build-docker-node.yml: -------------------------------------------------------------------------------- 1 | name: Build Torus node Docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - dev 8 | - testnet 9 | - github-ci-test 10 | tags: 11 | - "*" 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | build-n-push: 19 | permissions: write-all 20 | runs-on: ubicloud-standard-16 21 | steps: 22 | - uses: actions/checkout@v4 23 | 24 | - id: commit 25 | uses: prompt/actions-commit-hash@v3 26 | 27 | - name: Set up Docker Buildx 28 | uses: docker/setup-buildx-action@v3 29 | 30 | - name: Docker Login 31 | uses: docker/login-action@v3.0.0 32 | with: 33 | registry: ghcr.io 34 | username: ${{ github.actor }} 35 | password: ${{ secrets.GITHUB_TOKEN }} 36 | 37 | - name: Prepare build 38 | id: prepare 39 | run: | 40 | echo "SANITIZED_REF=$(echo "${GITHUB_REF##*/}" | tr '/' '-')" >> $GITHUB_ENV 41 | echo "${{ secrets.AWS_ACCESS_KEY_ID }}" > "$HOME/aws-key-id.txt" 42 | echo "${{ secrets.AWS_SECRET_ACCESS_KEY }}" > "$HOME/aws-secret-key.txt" 43 | echo "AWS_ACCESS_KEY_ID_PATH=$HOME/aws-key-id.txt" >> $GITHUB_ENV 44 | echo "AWS_SECRET_ACCESS_KEY_PATH=$HOME/aws-secret-key.txt" >> $GITHUB_ENV 45 | 46 | - name: Build and push 47 | uses: docker/build-push-action@v6 48 | with: 49 | context: . 50 | push: true 51 | file: docker/node.dockerfile 52 | secret-files: | 53 | aws-key-id=${{ env.AWS_ACCESS_KEY_ID_PATH }} 54 | aws-secret-key=${{ env.AWS_SECRET_ACCESS_KEY_PATH }} 55 | build-args: | 56 | SCCACHE_BUCKET=torus-substrate-cache 57 | SCCACHE_ENDPOINT=${{ secrets.SCCACHE_ENDPOINT }} 58 | SCCACHE_REGION=auto 59 | tags: | 60 | ghcr.io/renlabs-dev/torus-substrate:${{ steps.commit.outputs.short }} 61 | ghcr.io/renlabs-dev/torus-substrate:${{ env.SANITIZED_REF }} 62 | -------------------------------------------------------------------------------- /docker/node.dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:12-slim AS builder 2 | 3 | ARG SCCACHE_BUCKET 4 | ARG SCCACHE_ENDPOINT 5 | ARG SCCACHE_REGION=auto 6 | 7 | WORKDIR /app 8 | COPY . . 9 | 10 | # Dependencies using during the build stage. 11 | RUN apt update && apt install -y --no-install-recommends \ 12 | ca-certificates \ 13 | curl \ 14 | build-essential \ 15 | protobuf-compiler \ 16 | libclang-dev \ 17 | git \ 18 | pkg-config \ 19 | libssl-dev 20 | 21 | ENV PATH=/root/.cargo/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 22 | 23 | # Installs rust with a minimal footprint and adds the WASM chain. 24 | RUN curl https://sh.rustup.rs -sSf | \ 25 | sh -s -- -y --profile=minimal --default-toolchain=1.82.0 26 | 27 | RUN --mount=type=secret,id=aws-key-id \ 28 | --mount=type=secret,id=aws-secret-key \ 29 | export AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws-key-id) && \ 30 | export AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws-secret-key) && \ 31 | if [ -n "$AWS_ACCESS_KEY_ID" ]; then \ 32 | curl https://github.com/mozilla/sccache/releases/download/v0.9.0/sccache-v0.9.0-x86_64-unknown-linux-musl.tar.gz \ 33 | -Lo sccache-v0.9.0-x86_64-unknown-linux-musl.tar.gz; \ 34 | tar -xzf sccache-v0.9.0-x86_64-unknown-linux-musl.tar.gz --strip-components=1 \ 35 | sccache-v0.9.0-x86_64-unknown-linux-musl/sccache; \ 36 | if [ $(./sccache --start-server) ]; then \ 37 | echo "Enabling sccache"; \ 38 | export RUSTC_WRAPPER="/app/sccache"; \ 39 | fi; \ 40 | fi; \ 41 | cargo build -p torus-node --release --locked 42 | 43 | RUN --mount=type=secret,id=aws-key-id \ 44 | export AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws-key-id) && \ 45 | if [ -n "$AWS_ACCESS_KEY_ID" ]; then \ 46 | ./sccache --show-stats; \ 47 | fi 48 | 49 | FROM debian:12-slim 50 | 51 | RUN apt update && apt install -y zlib1g && \ 52 | rm -rf /var/cache/apt/archives /var/lib/apt/lists/* 53 | 54 | COPY --from=builder /app/target/release/torus-node /usr/local/bin 55 | 56 | WORKDIR /torus 57 | 58 | ENV RUST_BACKTRACE=1 59 | CMD ["torus-node"] 60 | -------------------------------------------------------------------------------- /pallets/torus0/rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use jsonrpsee::{core::RpcResult, types::ErrorObject}; 4 | use pallet_torus0_api::{api::Torus0RuntimeApi, NamespacePathInner}; 5 | use polkadot_sdk::{ 6 | sp_api::ProvideRuntimeApi, 7 | sp_blockchain::HeaderBackend, 8 | sp_runtime::{ 9 | traits::{IdentifyAccount, Verify}, 10 | MultiSignature, 11 | }, 12 | }; 13 | use torus_runtime::opaque::Block; 14 | 15 | type Signature = MultiSignature; 16 | type AccountId = <::Signer as IdentifyAccount>::AccountId; 17 | type Balance = u128; 18 | 19 | #[jsonrpsee::proc_macros::rpc(client, server)] 20 | pub trait Torus0Api { 21 | #[method(name = "torus0_namespacePathCreationCost")] 22 | async fn namespace_path_creation_cost( 23 | &self, 24 | account_id: AccountId, 25 | path: NamespacePathInner, 26 | ) -> RpcResult<(Balance, Balance)>; 27 | } 28 | 29 | pub struct Torus0Rpc { 30 | client: Arc, 31 | } 32 | 33 | impl Torus0Rpc { 34 | pub fn new(client: Arc) -> Self { 35 | Self { client } 36 | } 37 | } 38 | 39 | #[async_trait::async_trait] 40 | impl Torus0ApiServer for Torus0Rpc 41 | where 42 | Client: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, 43 | Client::Api: Torus0RuntimeApi, 44 | { 45 | async fn namespace_path_creation_cost( 46 | &self, 47 | account_id: AccountId, 48 | path: NamespacePathInner, 49 | ) -> RpcResult<(Balance, Balance)> { 50 | let runtime = &*self.client.runtime_api(); 51 | let at = self.client.info().best_hash; 52 | 53 | runtime 54 | .namespace_path_creation_cost(at, account_id, path) 55 | .map(|res| { 56 | res.map_err(|err| { 57 | ErrorObject::owned( 58 | 1, 59 | "namespace cost calculation failed", 60 | Some(format!("{err:?}")), 61 | ) 62 | }) 63 | }) 64 | .map_err(|err| { 65 | ErrorObject::owned(1, "Runtime execution failed", Some(err.to_string())) 66 | })? 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /data/mainnet/bootnodes.json: -------------------------------------------------------------------------------- 1 | [ 2 | "/dns4/validator-0.nodes.torus.network/tcp/30333/p2p/12D3KooWMusxwQjbKYf4oXDciA7maVUWoBHw5im6dzi9vDj5M4QD", 3 | "/dns4/validator-1.nodes.torus.network/tcp/30333/p2p/12D3KooWG2nfpHqsoRH5obvN9AtDYbwEQB9ruhFb7fU318VqRKWo", 4 | "/dns4/validator-2.nodes.torus.network/tcp/30333/p2p/12D3KooWGGcQJ4Jk9J9uUVzUwwqVTa1m5vkWina8mRyNZKHpdfBG", 5 | "/dns4/validator-3.nodes.torus.network/tcp/30333/p2p/12D3KooWKv797oXFbuWxv4zGJ3KFzpv3CJwGngXpZnFD5WcctsB4", 6 | "/dns4/validator-4.nodes.torus.network/tcp/30333/p2p/12D3KooWPjuSYUJjroaWjLjExMmHukKGX6fQQjgu3dNavYFXudxE", 7 | "/dns4/validator-5.nodes.torus.network/tcp/30333/p2p/12D3KooWBuydmvQtS8BCWKBnntA31r2Z3PiAiE3FNDX8NnSubFHQ", 8 | "/dns4/validator-6.nodes.torus.network/tcp/30333/p2p/12D3KooWF5fnR9N325nTg9QoEGxvH3AqeonE38DAMDvPLsLXV4Vd", 9 | "/dns4/validator-7.nodes.torus.network/tcp/30333/p2p/12D3KooWPkRzoDMRq3iGSkCbkfSLQEb6DUizQThsiJssjeeeTtey", 10 | "/dns4/validator-8.nodes.torus.network/tcp/30333/p2p/12D3KooWLfxSBtAvkJUK4KYBSaVyhq6rtg4PVNvGC8xNE9a4Adnj", 11 | "/dns4/validator-9.nodes.torus.network/tcp/30333/p2p/12D3KooWQmFzuMt3LWHo97E857yUWGfgcgzWuDEaQZ4aEvdjf1db", 12 | "/dns4/validator-10.nodes.torus.network/tcp/30333/p2p/12D3KooWJt9jbAEDXwiSUs14fRUDxNbnmvo8iAiP354x6D8vFZRq", 13 | "/dns4/validator-11.nodes.torus.network/tcp/30333/p2p/12D3KooWGrKbSLuxyPstp8xyyJdfi4pSMAYJPxX9Z3xzFCNrTrtW", 14 | "/dns4/validator-12.nodes.torus.network/tcp/30333/p2p/12D3KooWGZhYejc8cDvJBFyU6PbKWigeZLRocs28AmJhtMB3JGDt", 15 | "/dns4/validator-13.nodes.torus.network/tcp/30333/p2p/12D3KooWH9AvU5N8qEPHKwqd1QsnqoEZZLn9saEpuQeJpmbfrKMw", 16 | "/dns4/validator-14.nodes.torus.network/tcp/30333/p2p/12D3KooWMoEV8Rg8sqY2yfAiwSxHyp7cajYZpiDoCbNCp4SeYeBA", 17 | "/dns4/validator-15.nodes.torus.network/tcp/30333/p2p/12D3KooWRyfEqB9F8Xg2s2VPnPJ5PxQzZ4qjGvS72sShipZBqtUp", 18 | "/dns4/validator-16.nodes.torus.network/tcp/30333/p2p/12D3KooWDqswAykVFcavDwnk7sepZDuKnr4QCis5SQ8yiCngxBbL", 19 | "/dns4/validator-17.nodes.torus.network/tcp/30333/p2p/12D3KooWHb7J3sUTWJY7nUWmKA7GF4cy2FSjD9Qc6aGuP8WtWKct", 20 | "/dns4/validator-18.nodes.torus.network/tcp/30333/p2p/12D3KooWGoGk99jsq6gBp9BsH5Gfuv5sbDzCxavG5rhQy2QQ2Q1U", 21 | "/dns4/validator-19.nodes.torus.network/tcp/30333/p2p/12D3KooWMVdvhywxVp3k1WrX6FGqfLWTpuSa3HgLFpjC3QsNX1Cn", 22 | "/dns4/validator-20.nodes.torus.network/tcp/30333/p2p/12D3KooWPuQ4BMgeqmoMJ8WKDsFiazWxWAtNsmnAnEAJqzMt9FVq" 23 | ] 24 | -------------------------------------------------------------------------------- /client/codegen/src/ir.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use quote::ToTokens; 4 | use syn::{Ident, Type}; 5 | 6 | #[derive(Debug, Clone, PartialEq)] 7 | pub struct PalletPattern { 8 | pub name: Ident, 9 | pub storages: Vec, 10 | pub calls: Vec, 11 | } 12 | 13 | /// Represents different types of Substrate storage patterns 14 | #[derive(Debug, Clone, PartialEq)] 15 | pub enum StoragePattern { 16 | /// Storage Value - no input parameters, single output value 17 | Value { 18 | name: String, 19 | pallet: String, 20 | return_type: String, 21 | }, 22 | /// Storage Map - single key input, maps to single value 23 | Map { 24 | name: String, 25 | pallet: String, 26 | key_type: String, 27 | return_type: String, 28 | }, 29 | /// Storage Double Map - two key inputs, maps to single value 30 | DoubleMap { 31 | name: String, 32 | pallet: String, 33 | key1_type: String, 34 | key2_type: String, 35 | return_type: String, 36 | }, 37 | /// Storage N Map - multiple key inputs (N > 2), maps to single value 38 | NMap { 39 | name: String, 40 | pallet: String, 41 | key_types: Vec, 42 | return_type: String, 43 | }, 44 | } 45 | 46 | #[derive(Clone)] 47 | pub struct CallPattern { 48 | pub name: Ident, 49 | pub params: Vec<(Ident, Type)>, 50 | pub ret: Type, 51 | pub pallet: Ident, 52 | } 53 | 54 | impl Debug for CallPattern { 55 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 56 | f.debug_struct("CallPattern") 57 | .field("name", &self.name) 58 | .field( 59 | "params", 60 | &self 61 | .params 62 | .iter() 63 | .map(|(ident, ty)| { 64 | ( 65 | ident.to_token_stream().to_string(), 66 | ty.to_token_stream().to_string(), 67 | ) 68 | }) 69 | .collect::>(), 70 | ) 71 | .field("ret", &self.ret.to_token_stream().to_string()) 72 | .field("pallet", &self.pallet) 73 | .finish() 74 | } 75 | } 76 | 77 | impl PartialEq for CallPattern { 78 | fn eq(&self, other: &Self) -> bool { 79 | self.name == other.name && self.pallet == other.pallet 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /pallets/governance/src/roles.rs: -------------------------------------------------------------------------------- 1 | use pallet_permission0_api::{CuratorPermissions, Permission0CuratorApi}; 2 | use polkadot_sdk::{ 3 | frame_election_provider_support::Get, 4 | frame_support::dispatch::DispatchResult, 5 | polkadot_sdk_frame::prelude::OriginFor, 6 | sp_runtime::{DispatchError, Percent}, 7 | }; 8 | 9 | use crate::{AccountIdOf, Allocators, Config, Error, Event, ensure}; 10 | 11 | /// Adds a new allocator to the network, checking wether it's already registered. 12 | #[doc(hidden)] 13 | pub fn add_allocator(key: AccountIdOf) -> DispatchResult { 14 | ensure!( 15 | !Allocators::::contains_key(&key), 16 | Error::::AlreadyAllocator 17 | ); 18 | Allocators::::insert(key, ()); 19 | Ok(()) 20 | } 21 | 22 | /// Removes an existing allocator to the network, checking wether it's registered. 23 | #[doc(hidden)] 24 | pub fn remove_allocator(key: AccountIdOf) -> DispatchResult { 25 | ensure!( 26 | Allocators::::contains_key(&key), 27 | Error::::NotAllocator 28 | ); 29 | Allocators::::remove(&key); 30 | Ok(()) 31 | } 32 | 33 | /// Sets a penalty ratio for the given agent. 34 | pub fn penalize_agent( 35 | origin: OriginFor, 36 | agent_key: AccountIdOf, 37 | percentage: u8, 38 | ) -> DispatchResult { 39 | let curator = ::Permission0::ensure_curator_permission( 40 | origin, 41 | CuratorPermissions::PENALTY_CONTROL, 42 | )?; 43 | 44 | let percentage = Percent::from_parts(percentage); 45 | if percentage > T::MaxPenaltyPercentage::get() { 46 | return Err(Error::::InvalidPenaltyPercentage.into()); 47 | } 48 | 49 | pallet_torus0::Agents::::try_mutate(&agent_key, |agent| { 50 | let Some(agent) = agent else { 51 | return Err(Error::::AgentNotFound.into()); 52 | }; 53 | 54 | agent.weight_penalty_factor = percentage; 55 | 56 | Ok::<(), DispatchError>(()) 57 | })?; 58 | 59 | crate::Pallet::::deposit_event(Event::PenaltyApplied { 60 | curator, 61 | agent: agent_key, 62 | penalty: percentage, 63 | }); 64 | 65 | Ok(()) 66 | } 67 | 68 | /// Returns error if the origin is not listed as an allocator. 69 | pub fn ensure_allocator(key: &AccountIdOf) -> DispatchResult { 70 | if !crate::Allocators::::contains_key(key) { 71 | return Err(Error::::NotAllocator.into()); 72 | } 73 | 74 | Ok(()) 75 | } 76 | -------------------------------------------------------------------------------- /docs/deploy.md: -------------------------------------------------------------------------------- 1 | # Deploy steps 2 | 3 | 1. Checkout to the commit/branch that should be deployed with `git checkout `. 4 | 2. Do some last code checks. 5 | - Make sure all tests pass by running `cargo test` 6 | - Make sure the runtime migrations work by running `just try-runtime-upgrade-{testnet,mainnet}` 7 | 3. Create the release tag following the format `runtime/{test,main}net-{spec_version}`. Example: runtime/testnet-12 (the spec version can be found in `runtime/src/lib.rs` under the `runtime_macro!` call). This will trigger a Github Action that builds the runtime WASM binary. 8 | - The `spec_version` should ALWAYS be the higher number between the latest releases for mainnet and testnet + 1. Example: Latest testnet is 13 and mainnet is 9, the next release should be 14. 9 | - Make sure to check if the spec_version on the code is correct and change it if it's not. 10 | 5. Download said runtime binary and check it's checksum. 11 | - The .zip file can be found on the `Artifacts` section of the Action that built it. Search for your commit message that has `Build Torus runtime` written below it on [this page](https://github.com/renlabs-dev/torus-substrate/actions). The correct file name is `torus_runtime.compact.compressed`. 12 | - Check that the hash given by the command `sha256sum ` is the same as the one beside the download button on the action page, on the `Digest` column. 13 | - The WASM blob to be used on the next steps is inside the downloaded zipped file. Extract it somewhere easy to find later. 14 | 6. Connect the Torus Testnet Root account to [Polkadot Developer Interface](https://polkadot.js.org/apps/#/explorer) using the [PolkadotJS browser extension](https://polkadot.js.org/extension/). 15 | - Make sure the interface is configured to point to the right net by clicking on the left sidebar, scrolling all the way to the bottom and under the development tab it should be Custom with the address `wss://api.testnet.torus.network` (for testnet) or `wss://api.torus.network` (for mainnet). For testnet, it can also be checked through the `rpc=` query param, it should be something like this `?rpc=wss%3A%2F%2Fapi.testnet.torus.network` (the testnet api endpoint url encoded). 16 | 7. Upload the code to the runtime using the [Polkadot Developer Interface](https://polkadot.js.org/apps/#/explorer). 17 | - Click on the `Sudo` button under the `Developer` tab. 18 | - Select the `system` pallet and the `setCode` extrinsic. 19 | - Select the .wasm file extracted on the 4th step. 20 | - Sign and Submit. 21 | 22 | > As of april 9th 2025 the try-runtime cli is broken, install the fork by running `cargo install --git https://github.com/renlabs-dev/try-runtime-cli --locked`. 23 | -------------------------------------------------------------------------------- /runtime/src/precompiles/balance_transfer.rs: -------------------------------------------------------------------------------- 1 | use pallet_evm::{ 2 | ExitError, ExitSucceed, PrecompileFailure, PrecompileHandle, PrecompileOutput, PrecompileResult, 3 | }; 4 | use polkadot_sdk::{ 5 | frame_system::RawOrigin, 6 | pallet_balances, 7 | sp_runtime::traits::{Dispatchable, UniqueSaturatedInto}, 8 | sp_std, 9 | }; 10 | use sp_std::vec; 11 | 12 | use crate::{ 13 | Runtime, RuntimeCall, 14 | precompiles::{bytes_to_account_id, get_method_id, get_slice}, 15 | }; 16 | 17 | pub const BALANCE_TRANSFER_INDEX: u64 = 2048; 18 | 19 | pub struct BalanceTransferPrecompile; 20 | 21 | impl BalanceTransferPrecompile { 22 | pub fn execute(handle: &mut impl PrecompileHandle) -> PrecompileResult { 23 | let input = handle.input(); 24 | 25 | // Check method signature 26 | let method = get_slice(input, 0, 4)?; 27 | if method != get_method_id("transfer(bytes32)") { 28 | return Ok(PrecompileOutput { 29 | exit_status: ExitSucceed::Returned, 30 | output: vec![], 31 | }); 32 | } 33 | 34 | // Get the transfer amount from the transaction 35 | let amount = handle.context().apparent_value; 36 | if amount.is_zero() { 37 | return Ok(PrecompileOutput { 38 | exit_status: ExitSucceed::Returned, 39 | output: vec![], 40 | }); 41 | } 42 | 43 | // Hardcoded precompile substrate address (equivalent to 0x800) 44 | const SOURCE_ADDRESS: [u8; 32] = [ 45 | 0x07, 0xec, 0x71, 0x2a, 0x5d, 0x38, 0x43, 0x4d, 0xdd, 0x03, 0x3f, 0x8f, 0x02, 0x4e, 46 | 0xcd, 0xfc, 0x4b, 0xb5, 0x95, 0x1c, 0x13, 0xc3, 0x08, 0x5c, 0x39, 0x9c, 0x8a, 0x5f, 47 | 0x62, 0x93, 0x70, 0x5d, 48 | ]; 49 | 50 | // Get destination address from input 51 | let destination_bytes = get_slice(input, 4, 36)?; 52 | 53 | // Convert addresses to AccountId32 54 | let source = bytes_to_account_id(&SOURCE_ADDRESS)?; 55 | let destination = bytes_to_account_id(destination_bytes)?; 56 | 57 | // Create the transfer call 58 | let transfer_call = 59 | RuntimeCall::Balances(pallet_balances::Call::::transfer_allow_death { 60 | dest: destination.into(), 61 | value: amount.unique_saturated_into(), 62 | }); 63 | 64 | transfer_call 65 | .dispatch(RawOrigin::Signed(source).into()) 66 | .map_err(|_| PrecompileFailure::Error { 67 | exit_status: ExitError::OutOfFund, 68 | })?; 69 | 70 | Ok(PrecompileOutput { 71 | exit_status: ExitSucceed::Returned, 72 | output: vec![], 73 | }) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /node/src/chain_spec.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | use polkadot_sdk::{ 19 | sc_service::{ChainType, Properties}, 20 | *, 21 | }; 22 | use serde_json::{Value, json}; 23 | use torus_runtime::WASM_BINARY; 24 | 25 | /// This is a specialization of the general Substrate ChainSpec type. 26 | pub type ChainSpec = sc_service::GenericChainSpec; 27 | 28 | fn props() -> Properties { 29 | let mut properties = Properties::new(); 30 | properties.insert("tokenDecimals".to_string(), 18.into()); 31 | properties.insert("tokenSymbol".to_string(), "TORUS".into()); 32 | properties 33 | } 34 | 35 | pub fn development_config() -> Result { 36 | Ok(ChainSpec::builder( 37 | WASM_BINARY.expect("Development wasm not available"), 38 | Default::default(), 39 | ) 40 | .with_name("Torus") 41 | .with_id("torus-devnet") 42 | .with_protocol_id("torus") 43 | .with_chain_type(ChainType::Development) 44 | .with_genesis_config_patch(testnet_genesis()) 45 | .with_properties(props()) 46 | .build()) 47 | } 48 | 49 | /// Configure initial storage state for FRAME pallets. 50 | fn testnet_genesis() -> Value { 51 | use polkadot_sdk::{ 52 | polkadot_sdk_frame::traits::Get, 53 | sp_keyring::{Ed25519Keyring, Sr25519Keyring}, 54 | }; 55 | use torus_runtime::{ 56 | BalancesConfig, SudoConfig, 57 | interface::{Balance, MinimumBalance}, 58 | }; 59 | 60 | let endowment = >::get().max(1) * 1000; 61 | let balances = Sr25519Keyring::iter() 62 | .map(|a| (a.to_account_id(), endowment)) 63 | .collect::>(); 64 | 65 | let aura = [Sr25519Keyring::Alice, Sr25519Keyring::Bob]; 66 | let grandpa = [Ed25519Keyring::Alice, Ed25519Keyring::Bob]; 67 | 68 | json!({ 69 | "balances": BalancesConfig { balances }, 70 | "sudo": SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) }, 71 | "aura": { 72 | "authorities": aura.iter().map(|x| (x.public().to_string())).collect::>(), 73 | }, 74 | "grandpa": { 75 | "authorities": grandpa.iter().map(|x| (x.public().to_string(), 1)).collect::>(), 76 | }, 77 | }) 78 | } 79 | -------------------------------------------------------------------------------- /node/src/cli.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | use polkadot_sdk::{sc_cli::RunCmd, *}; 19 | 20 | pub mod eth; 21 | 22 | #[derive(Clone, Copy, Debug, Default)] 23 | pub enum Consensus { 24 | #[default] 25 | Aura, 26 | ManualSeal(u64), 27 | InstantSeal, 28 | } 29 | 30 | impl std::str::FromStr for Consensus { 31 | type Err = String; 32 | 33 | fn from_str(s: &str) -> Result { 34 | Ok(if s == "aura" { 35 | Consensus::Aura 36 | } else if s == "instant-seal" { 37 | Consensus::InstantSeal 38 | } else if let Some(block_time) = s.strip_prefix("manual-seal-") { 39 | Consensus::ManualSeal(block_time.parse().map_err(|_| "invalid block time")?) 40 | } else { 41 | return Err("incorrect consensus identifier".into()); 42 | }) 43 | } 44 | } 45 | 46 | #[derive(Debug, clap::Parser)] 47 | pub struct Cli { 48 | #[command(subcommand)] 49 | pub subcommand: Option, 50 | 51 | #[arg(long, default_value = "aura")] 52 | pub consensus: Consensus, 53 | 54 | #[clap(flatten)] 55 | pub run: RunCmd, 56 | 57 | #[command(flatten)] 58 | pub eth: eth::EthConfiguration, 59 | } 60 | 61 | #[derive(Debug, clap::Subcommand)] 62 | #[allow(clippy::large_enum_variant)] 63 | pub enum Subcommand { 64 | /// Key management cli utilities 65 | #[command(subcommand)] 66 | Key(sc_cli::KeySubcommand), 67 | 68 | /// Build a chain specification. 69 | BuildSpec(sc_cli::BuildSpecCmd), 70 | 71 | /// Validate blocks. 72 | CheckBlock(sc_cli::CheckBlockCmd), 73 | 74 | /// Export blocks. 75 | ExportBlocks(sc_cli::ExportBlocksCmd), 76 | 77 | /// Export the state of a given block into a chain spec. 78 | ExportState(sc_cli::ExportStateCmd), 79 | 80 | /// Import blocks. 81 | ImportBlocks(sc_cli::ImportBlocksCmd), 82 | 83 | /// Remove the whole chain. 84 | PurgeChain(sc_cli::PurgeChainCmd), 85 | 86 | /// Revert the chain to a previous state. 87 | Revert(sc_cli::RevertCmd), 88 | 89 | /// Db meta columns information. 90 | ChainInfo(sc_cli::ChainInfoCmd), 91 | 92 | /// Sub-commands concerned with benchmarking. 93 | #[command(subcommand)] 94 | Benchmark(frame_benchmarking_cli::BenchmarkCmd), 95 | } 96 | -------------------------------------------------------------------------------- /scripts/adjust-spec-file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import json 5 | 6 | # Utils 7 | 8 | ALICE = "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" 9 | 10 | def load_json(path: str): 11 | with open(path, 'r') as f: 12 | return json.load(f) 13 | 14 | 15 | # CLI args 16 | 17 | parser = argparse.ArgumentParser( 18 | description='Adjust chain spec file with node configuration') 19 | arg = parser.add_argument 20 | 21 | arg('node_env', help='Node environment (e.g. mainnet, testnet)', type=str) 22 | arg('spec_file', help='Path to the input chain spec file', type=str) 23 | 24 | arg("--bootnodes-file", help="Path to the bootnodes file (JSON)", type=str) 25 | arg("--aura-list-file", help="Path to the aura list file (JSON)", type=str) 26 | arg("--gran-list-file", help="Path to the gran list file (JSON)", type=str) 27 | arg("--balances-file", help="Path to the balances file (JSON)", type=str) 28 | 29 | arg("--merge-balances", 30 | help="Merge external balances with the spec file balances", action="store_true") 31 | 32 | arg("--sudo-key", help="Sudo key to use", type=str) 33 | arg("--name", help="Node name", type=str, default="Torus") 34 | 35 | args = parser.parse_args() 36 | 37 | node_env = args.node_env 38 | base_spec_file = args.spec_file 39 | bootnodes_path = args.bootnodes_file 40 | aura_list_path = args.aura_list_file 41 | gran_list_path = args.gran_list_file 42 | 43 | 44 | spec_data = load_json(base_spec_file) 45 | 46 | # == Node config / metadata == 47 | 48 | if node_env == "mainnet": 49 | spec_data['chainType'] = "Live" 50 | spec_data['id'] = "torus" 51 | else: 52 | spec_data['chainType'] = "Development" 53 | spec_data['id'] = f"torus-{node_env}" 54 | 55 | if args.name: 56 | spec_data['name'] = args.name 57 | 58 | if bootnodes_path: 59 | # Inject bootnodes 60 | bootnodes = load_json(bootnodes_path) 61 | spec_data['bootNodes'] = bootnodes 62 | 63 | # == Runtime values patch == 64 | 65 | patch_obj = spec_data['genesis']['runtimeGenesis']['patch'] 66 | 67 | if aura_list_path: 68 | # Inject AURA authority pub key list 69 | aura_list = load_json(aura_list_path) 70 | patch_obj['aura']['authorities'] = aura_list 71 | 72 | if gran_list_path: 73 | # Inject GRANDPA authority pub key list 74 | gran_list = load_json(gran_list_path) 75 | patch_obj['grandpa']['authorities'] = gran_list 76 | 77 | if args.sudo_key: 78 | patch_obj['sudo']['key'] = args.sudo_key 79 | elif node_env != "mainnet": 80 | # Check sudo key for mainnet is not Alice 81 | assert patch_obj['sudo']['key'] == ALICE 82 | 83 | if args.balances_file: 84 | balances = load_json(args.balances_file) 85 | if args.merge_balances: 86 | if node_env == "mainnet": 87 | raise RuntimeError("Cannot merge balances on mainnet") 88 | patch_obj['balances']['balances'] += balances 89 | else: 90 | patch_obj['balances']['balances'] = balances 91 | 92 | 93 | json_txt = json.dumps(spec_data, indent=2) 94 | print(json_txt) 95 | -------------------------------------------------------------------------------- /pallets/permission0/src/permission/curator.rs: -------------------------------------------------------------------------------- 1 | use bitflags::bitflags; 2 | use codec::{Decode, Encode, MaxEncodedLen}; 3 | use polkadot_sdk::{ 4 | frame_support::{CloneNoBound, DebugNoBound, EqNoBound, PartialEqNoBound}, 5 | polkadot_sdk_frame::prelude::BlockNumberFor, 6 | sp_runtime::{BoundedBTreeMap, BoundedBTreeSet}, 7 | }; 8 | use scale_info::TypeInfo; 9 | 10 | use crate::{Config, Permissions}; 11 | 12 | use super::PermissionId; 13 | 14 | #[derive( 15 | CloneNoBound, 16 | Copy, 17 | DebugNoBound, 18 | Encode, 19 | Decode, 20 | EqNoBound, 21 | PartialEqNoBound, 22 | TypeInfo, 23 | MaxEncodedLen, 24 | )] 25 | pub struct CuratorPermissions(u32); 26 | 27 | bitflags! { 28 | impl CuratorPermissions: u32 { 29 | /// Able to appoint other curators. Though not used at the moment, 30 | /// it will be valuable when we remove the SUDO key/multisig. 31 | const ROOT = 0b0000_0001; 32 | /// Permission to review and process agent applications 33 | const APPLICATION_REVIEW = 0b0000_0010; 34 | /// Permission to manage the whitelist (add/remove accounts) 35 | const WHITELIST_MANAGE = 0b0000_0100; 36 | /// Permission to apply penalty factors to agents 37 | const PENALTY_CONTROL = 0b0000_1000; 38 | /// Permission to toggle agent freezing 39 | const AGENT_FREEZING_TOGGLING = 0b0001_0000; 40 | /// Permission to toggle namespace freezing 41 | const NAMESPACE_FREEZING_TOGGLING = 0b0010_0000; 42 | } 43 | } 44 | 45 | #[derive(Encode, Decode, CloneNoBound, PartialEq, TypeInfo, MaxEncodedLen, DebugNoBound)] 46 | #[scale_info(skip_type_params(T))] 47 | pub struct CuratorScope { 48 | pub recipient: T::AccountId, 49 | pub flags: BoundedBTreeMap< 50 | Option, 51 | CuratorPermissions, 52 | T::MaxCuratorSubpermissionsPerPermission, 53 | >, 54 | pub cooldown: Option>, 55 | /// Maximum number of instances of this permission 56 | pub max_instances: u32, 57 | /// Children permissions 58 | pub children: BoundedBTreeSet, 59 | } 60 | 61 | impl CuratorScope { 62 | pub fn has_permission(&self, permission: CuratorPermissions) -> bool { 63 | self.flags.iter().any(|(_, p)| p.contains(permission)) 64 | } 65 | } 66 | 67 | impl CuratorScope { 68 | /// Cleanup operations when permission is revoked or expired 69 | pub(crate) fn cleanup( 70 | &self, 71 | permission_id: polkadot_sdk::sp_core::H256, 72 | _last_execution: &Option>, 73 | _delegator: &T::AccountId, 74 | ) { 75 | for pid in self.flags.keys().cloned().flatten() { 76 | Permissions::::mutate_extant(pid, |parent| { 77 | if let Some(children) = parent.children_mut() { 78 | children.remove(&permission_id); 79 | } 80 | }); 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /pallets/torus0/tests/burn.rs: -------------------------------------------------------------------------------- 1 | use test_utils::Test; 2 | 3 | #[test] 4 | fn burn_with_reached_interval() { 5 | test_utils::new_test_ext().execute_with(|| { 6 | pallet_torus0::BurnConfig::::mutate(|burn_config| { 7 | burn_config.min_burn = 10; 8 | burn_config.max_burn = 1000; 9 | burn_config.target_registrations_interval = 200; 10 | burn_config.target_registrations_per_interval = 27; 11 | burn_config.adjustment_alpha = 9223372036854775807; 12 | }); 13 | 14 | pallet_torus0::burn::adjust_burn::(2200); 15 | 16 | assert_eq!(pallet_torus0::Burn::::get(), 10); 17 | assert_eq!(pallet_torus0::RegistrationsThisInterval::::get(), 0); 18 | }); 19 | } 20 | 21 | #[test] 22 | fn burn_with_min_burn() { 23 | test_utils::new_test_ext().execute_with(|| { 24 | pallet_torus0::BurnConfig::::mutate(|burn_config| { 25 | burn_config.min_burn = 10; 26 | burn_config.max_burn = 1000; 27 | burn_config.target_registrations_interval = 200; 28 | burn_config.target_registrations_per_interval = 25; 29 | burn_config.adjustment_alpha = 9223372036854775807; 30 | }); 31 | 32 | pallet_torus0::burn::adjust_burn::(2200); 33 | 34 | assert_eq!(pallet_torus0::Burn::::get(), 10); 35 | assert_eq!(pallet_torus0::RegistrationsThisInterval::::get(), 0); 36 | }); 37 | } 38 | 39 | #[test] 40 | fn burn_with_max_burn() { 41 | test_utils::new_test_ext().execute_with(|| { 42 | pallet_torus0::BurnConfig::::mutate(|burn_config| { 43 | burn_config.min_burn = 10; 44 | burn_config.max_burn = 1000; 45 | burn_config.target_registrations_interval = 200; 46 | burn_config.target_registrations_per_interval = 25; 47 | burn_config.adjustment_alpha = 9223372036854775807; 48 | }); 49 | 50 | pallet_torus0::Burn::::set(35); 51 | pallet_torus0::RegistrationsThisInterval::::set(3584); 52 | 53 | pallet_torus0::burn::adjust_burn::(22000); 54 | 55 | assert_eq!(pallet_torus0::Burn::::get(), 1000); 56 | assert_eq!(pallet_torus0::RegistrationsThisInterval::::get(), 0); 57 | }); 58 | } 59 | 60 | #[test] 61 | fn burn_in_bounds() { 62 | test_utils::new_test_ext().execute_with(|| { 63 | pallet_torus0::BurnConfig::::mutate(|burn_config| { 64 | burn_config.min_burn = 10; 65 | burn_config.max_burn = 1500; 66 | burn_config.target_registrations_interval = 200; 67 | burn_config.target_registrations_per_interval = 25; 68 | burn_config.adjustment_alpha = 9223372036854775807; 69 | }); 70 | 71 | pallet_torus0::Burn::::set(35); 72 | pallet_torus0::RegistrationsThisInterval::::set(3584); 73 | 74 | pallet_torus0::burn::adjust_burn::(22000); 75 | 76 | assert_eq!(pallet_torus0::Burn::::get(), 1280); 77 | assert_eq!(pallet_torus0::RegistrationsThisInterval::::get(), 0); 78 | }); 79 | } 80 | -------------------------------------------------------------------------------- /client/codegen/src/parser/mod.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use syn::{Item, ItemImpl, ItemMod, Type, TypePath}; 4 | 5 | use crate::{ 6 | ir::PalletPattern, 7 | parser::{calls::parse_calls_module, storage::parse_storage_module}, 8 | }; 9 | 10 | mod calls; 11 | mod storage; 12 | 13 | const IGNORED_MODULES: [&str; 8] = [ 14 | "runtime_types", 15 | "constants", 16 | "storage", 17 | "apis", 18 | "events", 19 | "calls", 20 | "runtime_apis", 21 | "root_mod", 22 | ]; 23 | 24 | pub fn parse_api_file(content: &str) -> Result, Box> { 25 | let syntax_tree = syn::parse_file(content)?; 26 | 27 | let api_mod = syntax_tree 28 | .items 29 | .iter() 30 | .find_map(|item| match item { 31 | Item::Mod(item_mod) => { 32 | if item_mod.ident == "api" { 33 | return Some(item_mod); 34 | } 35 | 36 | None 37 | } 38 | _ => None, 39 | }) 40 | .unwrap(); 41 | 42 | let Some((_, items)) = &api_mod.content else { 43 | return Err("api module not found".into()); 44 | }; 45 | 46 | let pallets: Result, Box> = items 47 | .iter() 48 | .filter_map(|item| match item { 49 | Item::Mod(item_mod) => { 50 | let mod_name = item_mod.ident.to_string(); 51 | if IGNORED_MODULES.contains(&mod_name.as_str()) { 52 | return None; 53 | } 54 | 55 | Some(parse_pallet_module(item_mod)) 56 | } 57 | _ => None, 58 | }) 59 | .collect(); 60 | 61 | let pallets = pallets?; 62 | 63 | if pallets.is_empty() { 64 | return Err("api module not found".into()); 65 | } 66 | 67 | Ok(pallets) 68 | } 69 | 70 | fn parse_pallet_module(pallet_mod: &ItemMod) -> Result> { 71 | let pallet_name = &pallet_mod.ident; 72 | 73 | let mut storages = Vec::new(); 74 | let mut calls = Vec::new(); 75 | 76 | if let Some((_, items)) = &pallet_mod.content { 77 | // Find the storage module 78 | for item in items { 79 | if let Item::Mod(item_mod) = item { 80 | if item_mod.ident == "storage" { 81 | storages.extend(parse_storage_module(item_mod, &pallet_name.to_string())?); 82 | } 83 | 84 | if item_mod.ident == "calls" { 85 | calls.extend(parse_calls_module(item_mod, pallet_name)?); 86 | } 87 | } 88 | } 89 | } 90 | 91 | Ok(PalletPattern { 92 | name: pallet_name.clone(), 93 | storages, 94 | calls, 95 | }) 96 | } 97 | 98 | fn is_api_impl(impl_item: &ItemImpl, name: &'static str) -> bool { 99 | if let Type::Path(TypePath { path, .. }) = &*impl_item.self_ty { 100 | if let Some(segment) = path.segments.last() { 101 | return segment.ident == name; 102 | } 103 | } 104 | false 105 | } 106 | 107 | fn type_to_string(ty: &Type) -> String { 108 | quote::quote! { #ty }.to_string() 109 | } 110 | -------------------------------------------------------------------------------- /runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torus-runtime" 3 | description = "Torus blockchain runtime." 4 | version = "0.1.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [lints] 10 | workspace = true 11 | 12 | [features] 13 | default = ["std"] 14 | std = [ 15 | "codec/std", 16 | "scale-info/std", 17 | "serde_json/std", 18 | 19 | "substrate-wasm-builder", 20 | 21 | "polkadot-sdk/std", 22 | "pallet-torus0/std", 23 | "pallet-emission0/std", 24 | "pallet-governance/std", 25 | "pallet-permission0/std", 26 | 27 | # Frontier 28 | "fp-evm/std", 29 | "fp-rpc/std", 30 | "fp-self-contained/std", 31 | "pallet-ethereum/std", 32 | "pallet-evm/std", 33 | "pallet-evm-chain-id/std", 34 | "pallet-evm-precompile-modexp/std", 35 | "pallet-evm-precompile-sha3fips/std", 36 | "pallet-evm-precompile-simple/std", 37 | ] 38 | runtime-benchmarks = [ 39 | "polkadot-sdk/runtime-benchmarks", 40 | "pallet-torus0/runtime-benchmarks", 41 | "pallet-emission0/runtime-benchmarks", 42 | "pallet-governance/runtime-benchmarks", 43 | "pallet-permission0/runtime-benchmarks", 44 | 45 | # Frontier 46 | "pallet-ethereum/runtime-benchmarks", 47 | "pallet-evm/runtime-benchmarks", 48 | ] 49 | try-runtime = [ 50 | "polkadot-sdk/try-runtime", 51 | "pallet-torus0/try-runtime", 52 | "pallet-emission0/try-runtime", 53 | "pallet-governance/try-runtime", 54 | "pallet-permission0/try-runtime", 55 | 56 | # Frontier 57 | "fp-self-contained/try-runtime", 58 | ] 59 | testnet = ["pallet-torus0/testnet", "pallet-faucet/testnet"] 60 | 61 | [dependencies] 62 | serde_json = { workspace = true, features = ["alloc"] } 63 | 64 | # Substrate 65 | codec = { workspace = true, features = ["derive"] } 66 | scale-info = { features = ["derive", "serde"], workspace = true } 67 | polkadot-sdk = { workspace = true, features = [ 68 | "runtime", 69 | "pallet-balances", 70 | "pallet-sudo", 71 | "pallet-multisig", 72 | "pallet-aura", 73 | "pallet-grandpa", 74 | "pallet-timestamp", 75 | "pallet-transaction-payment", 76 | "pallet-transaction-payment-rpc-runtime-api", 77 | "frame-metadata-hash-extension", 78 | ] } 79 | 80 | # Frontier / EVM 81 | fp-evm = { workspace = true, features = ["serde"] } 82 | fp-rpc.workspace = true 83 | fp-self-contained = { workspace = true, features = ["serde"] } 84 | pallet-ethereum.workspace = true 85 | pallet-evm.workspace = true 86 | pallet-evm-chain-id.workspace = true 87 | pallet-evm-precompile-modexp.workspace = true 88 | pallet-evm-precompile-sha3fips.workspace = true 89 | pallet-evm-precompile-simple.workspace = true 90 | 91 | # Local 92 | pallet-torus0.workspace = true 93 | pallet-emission0.workspace = true 94 | pallet-governance.workspace = true 95 | pallet-permission0.workspace = true 96 | 97 | pallet-torus0-api.workspace = true 98 | pallet-governance-api.workspace = true 99 | pallet-permission0-api.workspace = true 100 | 101 | pallet-faucet = { workspace = true, default-features = false } 102 | 103 | [build-dependencies] 104 | substrate-wasm-builder = { optional = true, workspace = true, features = [ 105 | "metadata-hash", 106 | ] } 107 | -------------------------------------------------------------------------------- /client/src/client.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use subxt::{backend::rpc::RpcClient, blocks::Block, utils::H256, OnlineClient, PolkadotConfig}; 4 | 5 | use crate::chain::Chain; 6 | 7 | #[derive(Clone)] 8 | pub struct TorusClient { 9 | pub(crate) rpc_client: RpcClient, 10 | pub(crate) client: OnlineClient, 11 | pub(crate) url: String, 12 | _pd: PhantomData, 13 | } 14 | 15 | impl TorusClient<()> { 16 | const MAINNET_URL: &'static str = "wss://api.torus.network"; 17 | const TESTNET_URL: &'static str = "wss://api.testnet.torus.network"; 18 | const DEVNET_URL: &'static str = "ws://127.0.0.1:9944"; 19 | 20 | #[cfg(feature = "mainnet")] 21 | pub async fn for_mainnet() -> crate::Result> { 22 | let rpc_client = RpcClient::from_insecure_url(Self::MAINNET_URL).await?; 23 | let client = OnlineClient::from_rpc_client(rpc_client.clone()).await?; 24 | 25 | Ok(TorusClient { 26 | rpc_client, 27 | client, 28 | url: Self::MAINNET_URL.to_string(), 29 | _pd: PhantomData, 30 | }) 31 | } 32 | 33 | #[cfg(feature = "testnet")] 34 | pub async fn for_testnet() -> crate::Result> { 35 | let rpc_client = RpcClient::from_insecure_url(Self::TESTNET_URL).await?; 36 | let client = OnlineClient::from_rpc_client(rpc_client.clone()).await?; 37 | 38 | Ok(TorusClient { 39 | rpc_client, 40 | client, 41 | url: Self::TESTNET_URL.to_string(), 42 | _pd: PhantomData, 43 | }) 44 | } 45 | 46 | #[cfg(feature = "devnet")] 47 | pub async fn for_devnet() -> crate::Result> { 48 | let rpc_client = RpcClient::from_insecure_url(Self::DEVNET_URL).await?; 49 | let client = OnlineClient::from_rpc_client(rpc_client.clone()).await?; 50 | 51 | Ok(TorusClient { 52 | rpc_client, 53 | client, 54 | url: Self::DEVNET_URL.to_string(), 55 | _pd: PhantomData, 56 | }) 57 | } 58 | 59 | pub async fn for_url(url: impl AsRef) -> crate::Result> { 60 | let rpc_client = RpcClient::from_insecure_url(url.as_ref()).await?; 61 | let client = OnlineClient::from_rpc_client(rpc_client.clone()).await?; 62 | 63 | Ok(TorusClient { 64 | rpc_client, 65 | client, 66 | url: url.as_ref().to_string(), 67 | _pd: PhantomData, 68 | }) 69 | } 70 | } 71 | 72 | impl TorusClient { 73 | /// Get the latest block 74 | pub async fn latest_block( 75 | &self, 76 | ) -> crate::Result>> { 77 | Ok(self.client.blocks().at_latest().await?) 78 | } 79 | 80 | /// Get a block via its hash 81 | pub async fn block_at( 82 | &self, 83 | hash: H256, 84 | ) -> crate::Result>> { 85 | Ok(self.client.blocks().at(hash).await?) 86 | } 87 | 88 | /// Access to the underlying subxt client 89 | pub fn inner_client(&self) -> &OnlineClient { 90 | &self.client 91 | } 92 | 93 | /// Access to the underlying subxt client 94 | pub fn inner_rpc_client(&self) -> &RpcClient { 95 | &self.rpc_client 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /pallets/emission0/src/benchmarking.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "runtime-benchmarks")] 2 | 3 | use pallet_governance_api::GovernanceApi; 4 | use pallet_torus0_api::Torus0Api; 5 | use polkadot_sdk::{ 6 | frame_benchmarking::{account, v2::*}, 7 | frame_system::RawOrigin, 8 | sp_std::vec, 9 | }; 10 | 11 | use crate::*; 12 | 13 | #[benchmarks] 14 | mod benchmarks { 15 | use super::*; 16 | 17 | #[benchmark] 18 | fn set_weights() { 19 | let module_key: T::AccountId = account("agent", 0, 2); 20 | let module_key2: T::AccountId = account("agent2", 0, 3); 21 | 22 | ::set_allocator(&module_key2); 23 | 24 | ::force_register_agent(&module_key, b"agent".to_vec(), vec![], vec![]) 25 | .expect("failed to register agent"); 26 | ::force_register_agent(&module_key2, b"agent2".to_vec(), vec![], vec![]) 27 | .expect("failed to register agent"); 28 | 29 | ::force_set_whitelisted(&module_key); 30 | ::force_set_whitelisted(&module_key2); 31 | 32 | ::set_allocator(&module_key2); 33 | let _ = 34 | ::deposit_creating(&module_key2, ::min_validator_stake() * 2); 35 | 36 | ::force_set_stake( 37 | &module_key2, 38 | &module_key2, 39 | ::min_validator_stake(), 40 | ) 41 | .unwrap(); 42 | 43 | let weights = vec![(module_key, 10)]; 44 | 45 | #[extrinsic_call] 46 | set_weights(RawOrigin::Signed(module_key2), weights) 47 | } 48 | 49 | #[benchmark] 50 | fn delegate_weight_control() { 51 | let module_key: T::AccountId = account("agent", 0, 2); 52 | let module_key2: T::AccountId = account("agent2", 0, 3); 53 | 54 | ::force_register_agent(&module_key, b"agent".to_vec(), vec![], vec![]) 55 | .expect("failed to register agent"); 56 | ::force_register_agent(&module_key2, b"agent2".to_vec(), vec![], vec![]) 57 | .expect("failed to register agent"); 58 | 59 | ::force_set_whitelisted(&module_key); 60 | ::force_set_whitelisted(&module_key2); 61 | 62 | ::set_allocator(&module_key2); 63 | 64 | #[extrinsic_call] 65 | delegate_weight_control(RawOrigin::Signed(module_key), module_key2.clone()) 66 | } 67 | 68 | #[benchmark] 69 | fn regain_weight_control() { 70 | let module_key: T::AccountId = account("agent", 0, 2); 71 | let module_key2: T::AccountId = account("agent2", 0, 3); 72 | 73 | ::force_register_agent(&module_key, b"agent".to_vec(), vec![], vec![]) 74 | .expect("failed to register agent"); 75 | ::force_register_agent(&module_key2, b"agent2".to_vec(), vec![], vec![]) 76 | .expect("failed to register agent"); 77 | 78 | ::force_set_whitelisted(&module_key); 79 | ::force_set_whitelisted(&module_key2); 80 | 81 | ::set_allocator(&module_key); 82 | ::set_allocator(&module_key2); 83 | 84 | Pallet::::delegate_weight_control( 85 | RawOrigin::Signed(module_key.clone()).into(), 86 | module_key2.clone(), 87 | ) 88 | .expect("failed to delegate weight control"); 89 | 90 | #[extrinsic_call] 91 | regain_weight_control(RawOrigin::Signed(module_key)) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /runtime/src/precompiles/mod.rs: -------------------------------------------------------------------------------- 1 | use pallet_evm::{ 2 | ExitError, IsPrecompileResult, Precompile, PrecompileFailure, PrecompileHandle, 3 | PrecompileResult, PrecompileSet, 4 | }; 5 | use pallet_evm_precompile_modexp::Modexp; 6 | use pallet_evm_precompile_sha3fips::Sha3FIPS256; 7 | use pallet_evm_precompile_simple::{ECRecover, ECRecoverPublicKey, Identity, Ripemd160, Sha256}; 8 | use polkadot_sdk::{ 9 | sp_core::{H160, hashing::keccak_256}, 10 | sp_runtime::AccountId32, 11 | }; 12 | mod balance_transfer; 13 | 14 | use balance_transfer::*; 15 | 16 | pub struct FrontierPrecompiles; 17 | 18 | impl FrontierPrecompiles { 19 | pub fn used_addresses() -> [H160; 8] { 20 | [ 21 | hash(1), 22 | hash(2), 23 | hash(3), 24 | hash(4), 25 | hash(5), 26 | hash(1024), 27 | hash(1025), 28 | hash(BALANCE_TRANSFER_INDEX), 29 | ] 30 | } 31 | } 32 | impl PrecompileSet for FrontierPrecompiles { 33 | fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { 34 | match handle.code_address() { 35 | // Ethereum precompiles : 36 | a if a == hash(1) => Some(ECRecover::execute(handle)), 37 | a if a == hash(2) => Some(Sha256::execute(handle)), 38 | a if a == hash(3) => Some(Ripemd160::execute(handle)), 39 | a if a == hash(4) => Some(Identity::execute(handle)), 40 | a if a == hash(5) => Some(Modexp::execute(handle)), 41 | // Non-Frontier specific nor Ethereum precompiles : 42 | a if a == hash(1024) => Some(Sha3FIPS256::execute(handle)), 43 | a if a == hash(1025) => Some(ECRecoverPublicKey::execute(handle)), 44 | a if a == hash(BALANCE_TRANSFER_INDEX) => { 45 | Some(BalanceTransferPrecompile::execute(handle)) 46 | } 47 | _ => None, 48 | } 49 | } 50 | 51 | fn is_precompile(&self, address: H160, _gas: u64) -> IsPrecompileResult { 52 | IsPrecompileResult::Answer { 53 | is_precompile: Self::used_addresses().contains(&address), 54 | extra_cost: 0, 55 | } 56 | } 57 | } 58 | 59 | fn hash(a: u64) -> H160 { 60 | H160::from_low_u64_be(a) 61 | } 62 | 63 | /// Returns Ethereum method ID from an str method signature 64 | pub fn get_method_id(method_signature: &str) -> [u8; 4] { 65 | // Calculate the full Keccak-256 hash of the method signature 66 | let hash = keccak_256(method_signature.as_bytes()); 67 | 68 | // Extract the first 4 bytes to get the method ID 69 | [hash[0], hash[1], hash[2], hash[3]] 70 | } 71 | 72 | /// Convert bytes to AccountId32 with PrecompileFailure as Error 73 | /// which consumes all gas 74 | pub fn bytes_to_account_id(account_id_bytes: &[u8]) -> Result { 75 | AccountId32::try_from(account_id_bytes).map_err(|_| { 76 | // log::info!("Error parsing account id bytes {:?}", account_id_bytes); 77 | PrecompileFailure::Error { 78 | exit_status: ExitError::InvalidRange, 79 | } 80 | }) 81 | } 82 | 83 | /// Takes a slice from bytes with PrecompileFailure as Error 84 | pub fn get_slice(data: &[u8], from: usize, to: usize) -> Result<&[u8], PrecompileFailure> { 85 | let maybe_slice = data.get(from..to); 86 | if let Some(slice) = maybe_slice { 87 | Ok(slice) 88 | } else { 89 | Err(PrecompileFailure::Error { 90 | exit_status: ExitError::InvalidRange, 91 | }) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torus-node" 3 | description = "Torus Substrate Node" 4 | version = "0.2.0" 5 | license = "MIT-0" 6 | authors.workspace = true 7 | edition.workspace = true 8 | 9 | [features] 10 | default = ["std"] 11 | std = ["torus-runtime/std", "polkadot-sdk/std"] 12 | runtime-benchmarks = [ 13 | "torus-runtime/runtime-benchmarks", 14 | "polkadot-sdk/runtime-benchmarks", 15 | ] 16 | testnet = ["torus-runtime/testnet"] 17 | 18 | [dependencies] 19 | 20 | clap = { workspace = true, features = ["derive"] } 21 | futures = { workspace = true, features = ["thread-pool"] } 22 | futures-timer.workspace = true 23 | jsonrpsee = { workspace = true, features = ["server"] } 24 | serde_json = { workspace = true, default-features = true } 25 | 26 | # Substrate 27 | polkadot-sdk = { workspace = true, features = [ 28 | "std", 29 | "frame-benchmarking-cli", 30 | "sc-allocator", 31 | "sc-authority-discovery", 32 | "sc-basic-authorship", 33 | "sc-block-builder", 34 | "sc-chain-spec", 35 | "sc-cli", 36 | "sc-client-api", 37 | "sc-client-db", 38 | "sc-consensus", 39 | "sc-consensus-aura", 40 | "sc-consensus-epochs", 41 | "sc-consensus-grandpa", 42 | "sc-consensus-grandpa-rpc", 43 | "sc-consensus-manual-seal", 44 | "sc-consensus-pow", 45 | "sc-consensus-slots", 46 | "sc-executor", 47 | "sc-executor-wasmtime", 48 | "sc-informant", 49 | "sc-keystore", 50 | "sc-network", 51 | "sc-network-common", 52 | "sc-network-gossip", 53 | "sc-network-statement", 54 | "sc-network-sync", 55 | "sc-network-transactions", 56 | "sc-network-types", 57 | "sc-offchain", 58 | "sc-proposer-metrics", 59 | "sc-rpc", 60 | "sc-rpc-api", 61 | "sc-rpc-server", 62 | "sc-rpc-spec-v2", 63 | "sc-service", 64 | "sc-state-db", 65 | "sc-statement-store", 66 | "sc-storage-monitor", 67 | "sc-sync-state-rpc", 68 | "sc-sysinfo", 69 | "sc-telemetry", 70 | "sc-tracing", 71 | "sc-transaction-pool", 72 | "sc-transaction-pool-api", 73 | "sc-utils", 74 | "sp-blockchain", 75 | "sp-consensus", 76 | "sp-core-hashing", 77 | "sp-core-hashing-proc-macro", 78 | "sp-database", 79 | "sp-maybe-compressed-blob", 80 | "sp-panic-handler", 81 | "sp-rpc", 82 | "pallet-transaction-payment-rpc", 83 | "staging-chain-spec-builder", 84 | "staging-node-inspect", 85 | "staging-tracking-allocator", 86 | "subkey", 87 | "substrate-frame-rpc-support", 88 | "substrate-frame-rpc-system", 89 | "substrate-prometheus-endpoint", 90 | "substrate-rpc-client", 91 | ] } 92 | 93 | # Frontier / EVM 94 | fc-api.workspace = true 95 | fc-cli.workspace = true 96 | fc-consensus.workspace = true 97 | fc-db.workspace = true 98 | fc-mapping-sync.workspace = true 99 | fc-rpc = { workspace = true, features = [ 100 | "txpool", 101 | "rpc-binary-search-estimate", 102 | ] } 103 | fc-rpc-core.workspace = true 104 | fc-storage.workspace = true 105 | fp-account.workspace = true 106 | fp-dynamic-fee = { workspace = true, features = ["default"] } 107 | fp-evm = { workspace = true, features = ["default"] } 108 | fp-rpc = { workspace = true, features = ["default"] } 109 | 110 | # Local 111 | torus-runtime.workspace = true 112 | pallet-permission0-rpc.workspace = true 113 | pallet-torus0-rpc.workspace = true 114 | 115 | [build-dependencies] 116 | polkadot-sdk = { workspace = true, features = ["substrate-build-script-utils"] } 117 | -------------------------------------------------------------------------------- /pallets/governance/src/voting.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::frame_support::{dispatch::DispatchResult, ensure}; 2 | 3 | use crate::{AccountIdOf, Error, Event, Proposals, proposal::ProposalStatus}; 4 | 5 | /// Casts a vote on behalf of a voter. 6 | pub fn add_vote( 7 | voter: AccountIdOf, 8 | proposal_id: u64, 9 | agree: bool, 10 | ) -> DispatchResult { 11 | let Some(mut proposal) = Proposals::::get(proposal_id) else { 12 | return Err(Error::::ProposalNotFound.into()); 13 | }; 14 | 15 | let crate::proposal::ProposalStatus::Open { 16 | votes_for, 17 | votes_against, 18 | .. 19 | } = &mut proposal.status 20 | else { 21 | return Err(Error::::ProposalClosed.into()); 22 | }; 23 | 24 | ensure!( 25 | !votes_for.contains(&voter) && !votes_against.contains(&voter), 26 | crate::Error::::AlreadyVoted 27 | ); 28 | 29 | let voter_delegated_stake = pallet_torus0::stake::sum_staked_by::(&voter); 30 | let voter_owned_stake = pallet_torus0::stake::sum_staking_to::(&voter); 31 | 32 | ensure!( 33 | voter_delegated_stake > 0 || voter_owned_stake > 0, 34 | crate::Error::::InsufficientStake 35 | ); 36 | 37 | if !crate::NotDelegatingVotingPower::::get().contains(&voter) && voter_delegated_stake == 0 { 38 | return Err(Error::::VoterIsDelegatingVotingPower.into()); 39 | } 40 | 41 | if agree { 42 | votes_for 43 | .try_insert(voter.clone()) 44 | .map_err(|_| Error::::InternalError)?; 45 | } else { 46 | votes_against 47 | .try_insert(voter.clone()) 48 | .map_err(|_| Error::::InternalError)?; 49 | } 50 | 51 | Proposals::::insert(proposal.id, proposal); 52 | crate::Pallet::::deposit_event(Event::::ProposalVoted(proposal_id, voter, agree)); 53 | Ok(()) 54 | } 55 | 56 | /// Removes the casted vote. 57 | pub fn remove_vote(voter: AccountIdOf, proposal_id: u64) -> DispatchResult { 58 | let Ok(mut proposal) = Proposals::::try_get(proposal_id) else { 59 | return Err(Error::::ProposalNotFound.into()); 60 | }; 61 | 62 | let ProposalStatus::Open { 63 | votes_for, 64 | votes_against, 65 | .. 66 | } = &mut proposal.status 67 | else { 68 | return Err(Error::::ProposalClosed.into()); 69 | }; 70 | 71 | let removed = votes_for.remove(&voter) || votes_against.remove(&voter); 72 | 73 | // Check if the voter has actually voted on the proposal 74 | ensure!(removed, crate::Error::::NotVoted); 75 | 76 | // Update the proposal in storage 77 | Proposals::::insert(proposal.id, proposal); 78 | crate::Pallet::::deposit_event(Event::::ProposalVoteUnregistered(proposal_id, voter)); 79 | Ok(()) 80 | } 81 | 82 | /// Gives voting power delegation to the delegator's staked agents. 83 | pub fn enable_delegation(delegator: AccountIdOf) -> DispatchResult { 84 | crate::NotDelegatingVotingPower::::mutate(|delegators| { 85 | delegators.remove(&delegator); 86 | }); 87 | 88 | Ok(()) 89 | } 90 | 91 | /// Removes voting power delegation to the delegator's staked agents. 92 | pub fn disable_delegation(delegator: AccountIdOf) -> DispatchResult { 93 | crate::NotDelegatingVotingPower::::mutate(|delegators| { 94 | delegators 95 | .try_insert(delegator.clone()) 96 | .map(|_| ()) 97 | .map_err(|_| Error::::InternalError.into()) 98 | }) 99 | } 100 | -------------------------------------------------------------------------------- /mcp/src/emission.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use crate::interfaces::runtime_types::{ 4 | bounded_collections::bounded_btree_map::BoundedBTreeMap, 5 | pallet_permission0::permission::{ 6 | EnforcementAuthority, PermissionDuration, RevocationTerms, 7 | stream::{DistributionControl, StreamAllocation}, 8 | }, 9 | sp_arithmetic::per_things::Percent, 10 | }; 11 | use rmcp::{ 12 | ErrorData, 13 | model::{CallToolResult, Content}, 14 | }; 15 | use torus_client::subxt::utils::H256; 16 | 17 | use crate::{Client, utils::keypair_from_name}; 18 | 19 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 20 | pub struct DelegateEmissionRequest { 21 | stream_hex: Option, 22 | agent_name: String, 23 | target_name: String, 24 | amount: u8, 25 | distribution: Distribution, 26 | duration: Duration, 27 | } 28 | 29 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 30 | pub enum Distribution { 31 | Manual, 32 | Automatic(u128), 33 | AtBlock(u64), 34 | Interval(u64), 35 | } 36 | 37 | impl Distribution { 38 | pub fn as_generated_type(&self) -> DistributionControl { 39 | match self { 40 | Distribution::Manual => DistributionControl::Manual, 41 | Distribution::Automatic(v) => DistributionControl::Automatic(*v), 42 | Distribution::AtBlock(v) => DistributionControl::AtBlock(*v), 43 | Distribution::Interval(v) => DistributionControl::Interval(*v), 44 | } 45 | } 46 | } 47 | 48 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 49 | pub enum Duration { 50 | UntilBlock(u64), 51 | Indefinite, 52 | } 53 | 54 | impl Duration { 55 | pub fn as_generated_type(&self) -> PermissionDuration { 56 | match self { 57 | Duration::UntilBlock(v) => PermissionDuration::UntilBlock(*v), 58 | Duration::Indefinite => PermissionDuration::Indefinite, 59 | } 60 | } 61 | } 62 | 63 | #[derive(schemars::JsonSchema, serde::Deserialize, serde::Serialize)] 64 | pub enum Revocation {} 65 | 66 | pub async fn delegate_emission( 67 | torus_client: &Client, 68 | request: DelegateEmissionRequest, 69 | ) -> Result { 70 | let source_keypair = keypair_from_name(&request.agent_name)?; 71 | let target_keypair = keypair_from_name(&request.target_name)?; 72 | 73 | let stream = if let Some(stream_hex) = request.stream_hex { 74 | H256::from_str(&stream_hex).unwrap() 75 | } else { 76 | torus_client 77 | .rpc() 78 | .root_namespace_for_account(source_keypair.public_key().to_account_id()) 79 | .await 80 | .unwrap() 81 | }; 82 | 83 | match torus_client 84 | .permission0() 85 | .calls() 86 | .delegate_stream_permission_wait( 87 | BoundedBTreeMap(vec![( 88 | target_keypair.public_key().to_account_id(), 89 | u16::MAX, 90 | )]), 91 | StreamAllocation::Streams(BoundedBTreeMap(vec![(stream, Percent(request.amount))])), 92 | request.distribution.as_generated_type(), 93 | request.duration.as_generated_type(), 94 | RevocationTerms::RevocableByDelegator, 95 | EnforcementAuthority::None, 96 | None, 97 | None, 98 | source_keypair, 99 | ) 100 | .await 101 | { 102 | Ok(_) => Ok(CallToolResult::success(vec![Content::text( 103 | "Successfully delegated", 104 | )])), 105 | Err(err) => { 106 | dbg!(&err); 107 | Err(ErrorData::invalid_request(err.to_string(), None)) 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /client/codegen/src/codegen/calls.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::TokenStream; 2 | use quote::{format_ident, quote}; 3 | use stringcase::pascal_case; 4 | use syn::{Ident, Type}; 5 | 6 | use crate::{ 7 | InterfaceSource, 8 | ir::{CallPattern, PalletPattern}, 9 | }; 10 | 11 | const UNSIGNED_CALLS: [&str; 1] = ["faucet"]; 12 | 13 | pub(super) fn generate_pallet_calls( 14 | network: &InterfaceSource, 15 | pallet: &PalletPattern, 16 | ) -> Option { 17 | let client_name = format_ident!("{}", pascal_case(&format!("{}_client", pallet.name))); 18 | let struct_name = format_ident!("{}", pascal_case(&format!("{}_calls", pallet.name))); 19 | let network_chain: Type = syn::parse_str(network.to_chain_type()).unwrap(); 20 | 21 | let functions: Vec = pallet 22 | .calls 23 | .iter() 24 | .map(|pattern| generate_pattern_wrappers(network, pattern)) 25 | .collect(); 26 | 27 | if functions.is_empty() { 28 | return None; 29 | } 30 | 31 | Some(quote! { 32 | impl #client_name<#network_chain> { 33 | pub fn calls(&self) -> #struct_name<#network_chain> { 34 | #struct_name { 35 | client: self.client.clone(), 36 | _pd: PhantomData 37 | } 38 | } 39 | } 40 | 41 | impl #struct_name<#network_chain> { 42 | #(#functions)* 43 | } 44 | }) 45 | } 46 | 47 | fn generate_pattern_wrappers(network: &InterfaceSource, pattern: &CallPattern) -> TokenStream { 48 | let fn_name = &pattern.name; 49 | let (param_idents, param_types) = &pattern 50 | .params 51 | .iter() 52 | .cloned() 53 | .collect::<(Vec, Vec)>(); 54 | 55 | let network = format_ident!("{}", network.to_str()); 56 | let pallet = &pattern.pallet; 57 | 58 | let wait_fn_name = format_ident!("{}_wait", fn_name); 59 | 60 | if !UNSIGNED_CALLS.contains(&fn_name.to_string().as_str()) { 61 | quote! { 62 | pub async fn #fn_name(&self, #(#param_idents: crate::interfaces::#network::api::#pallet::calls::#param_types,)* signer: impl subxt::tx::signer::Signer) -> crate::Result<::Hash> { 63 | let call = crate::interfaces::#network::api::tx().#pallet().#fn_name(#(#param_idents),*); 64 | Ok(self.client.tx().sign_and_submit_default(&call, &signer).await?) 65 | } 66 | 67 | pub async fn #wait_fn_name(&self, #(#param_idents: crate::interfaces::#network::api::#pallet::calls::#param_types,)* signer: impl subxt::tx::signer::Signer) -> crate::Result<()> { 68 | let call = crate::interfaces::#network::api::tx().#pallet().#fn_name(#(#param_idents),*); 69 | 70 | self.client.tx().sign_and_submit_then_watch_default(&call, &signer).await?.wait_for_finalized_success().await?; 71 | 72 | Ok(()) 73 | } 74 | } 75 | } else { 76 | quote! { 77 | pub async fn #fn_name(&self, #(#param_idents: crate::interfaces::#network::api::#pallet::calls::#param_types,)*) -> crate::Result<::Hash> { 78 | let call = crate::interfaces::#network::api::tx().#pallet().#fn_name(#(#param_idents),*); 79 | Ok(self.client.tx().create_unsigned(&call)?.submit().await?) 80 | } 81 | 82 | pub async fn #wait_fn_name(&self, #(#param_idents: crate::interfaces::#network::api::#pallet::calls::#param_types,)*) -> crate::Result<()> { 83 | let call = crate::interfaces::#network::api::tx().#pallet().#fn_name(#(#param_idents),*); 84 | 85 | self.client.tx().create_unsigned(&call)?.submit_and_watch().await?.wait_for_finalized_success().await?; 86 | 87 | Ok(()) 88 | } 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /pallets/emission0/src/weight_control.rs: -------------------------------------------------------------------------------- 1 | use pallet_governance_api::GovernanceApi; 2 | use pallet_torus0_api::Torus0Api; 3 | use polkadot_sdk::{ 4 | frame_support::{dispatch::DispatchResult, ensure}, 5 | frame_system::ensure_signed, 6 | polkadot_sdk_frame::prelude::OriginFor, 7 | sp_core::ConstU32, 8 | sp_runtime::BoundedVec, 9 | sp_std, 10 | }; 11 | 12 | use crate::{ConsensusMember, ConsensusMembers}; 13 | 14 | pub fn set_weights( 15 | origin: OriginFor, 16 | mut weights: sp_std::vec::Vec<(T::AccountId, u16)>, 17 | ) -> DispatchResult { 18 | let acc_id = ensure_signed(origin)?; 19 | ::ensure_allocator(&acc_id)?; 20 | 21 | ensure!( 22 | !crate::WeightControlDelegation::::contains_key(&acc_id), 23 | crate::Error::::CannotSetWeightsWhileDelegating, 24 | ); 25 | 26 | ensure!( 27 | ::is_agent_registered(&acc_id) && ::is_whitelisted(&acc_id), 28 | crate::Error::::AgentIsNotRegistered 29 | ); 30 | 31 | let total_stake: u128 = ::staked_by(&acc_id) 32 | .iter() 33 | .map(|(_, stake)| *stake) 34 | .sum(); 35 | ensure!( 36 | total_stake >= ::min_validator_stake(), 37 | crate::Error::::NotEnoughStakeToSetWeights 38 | ); 39 | 40 | for (target, _) in &weights { 41 | ensure!( 42 | &acc_id != target, 43 | crate::Error::::CannotSetWeightsForSelf 44 | ); 45 | 46 | ensure!( 47 | ::is_agent_registered(target) && ::is_whitelisted(target), 48 | crate::Error::::AgentIsNotRegistered 49 | ); 50 | } 51 | 52 | weights.sort_unstable_by(|(a, _), (b, _)| a.cmp(b)); 53 | weights.dedup(); 54 | 55 | let weights: BoundedVec<_, ConstU32<{ u32::MAX }>> = 56 | BoundedVec::try_from(weights).map_err(|_| crate::Error::::WeightSetTooLarge)?; 57 | 58 | ConsensusMembers::::mutate(&acc_id, |member: &mut Option>| { 59 | let member = member.get_or_insert_with(Default::default); 60 | member.update_weights(weights); 61 | }); 62 | 63 | crate::Pallet::::deposit_event(crate::Event::::WeightsSet(acc_id)); 64 | 65 | Ok(()) 66 | } 67 | 68 | pub fn delegate_weight_control( 69 | delegator: T::AccountId, 70 | delegatee: T::AccountId, 71 | ) -> DispatchResult { 72 | ensure!( 73 | delegator != delegatee, 74 | crate::Error::::CannotDelegateWeightControlToSelf, 75 | ); 76 | 77 | ensure!( 78 | ::is_agent_registered(&delegator) && ::is_whitelisted(&delegator), 79 | crate::Error::::AgentIsNotRegistered 80 | ); 81 | 82 | ensure!( 83 | ::is_agent_registered(&delegatee) && ::is_whitelisted(&delegatee), 84 | crate::Error::::AgentIsNotRegistered 85 | ); 86 | 87 | // At the current network stage, it only makes sense to delegate weight control 88 | // to allocators. 89 | ::ensure_allocator(&delegatee)?; 90 | 91 | crate::WeightControlDelegation::::set(&delegator, Some(delegatee.clone())); 92 | 93 | crate::Pallet::::deposit_event(crate::Event::::DelegatedWeightControl( 94 | delegator, delegatee, 95 | )); 96 | 97 | Ok(()) 98 | } 99 | 100 | pub fn regain_weight_control(origin: OriginFor) -> DispatchResult { 101 | let acc_id = ensure_signed(origin)?; 102 | 103 | if ::ensure_allocator(&acc_id).is_err() { 104 | return Err(crate::Error::::WeightControlNotEnabled.into()); 105 | } 106 | 107 | crate::WeightControlDelegation::::mutate(acc_id, |val| match val.take() { 108 | Some(_) => Ok(()), 109 | None => Err(crate::Error::::AgentIsNotDelegating.into()), 110 | }) 111 | } 112 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | default: fmt check test 2 | 3 | # Build 4 | 5 | build-mainnet: 6 | cargo build --release --timings --package torus-runtime 7 | 8 | build-testnet: 9 | cargo build --release --features testnet --timings --package torus-runtime 10 | 11 | # Development 12 | 13 | check: fmt 14 | cargo clippy --tests 15 | 16 | test: check 17 | SKIP_WASM_BUILD=1 cargo nextest run 18 | 19 | fmt: 20 | cargo fmt 21 | 22 | select crates="" depth="2": 23 | SKIP_WASM_BUILD=1 cargo r -p project-selector -- {{crates}} \ 24 | -b "*polkadot*,*cumulus*,*snowbridge*,*parachain*,*xcm*,pallet*,fp*,*metadata-hash*,bp*,bridge*,fc*,substrate*build*" \ 25 | -a "polkadot-sdk*,pallet*api" \ 26 | -d {{depth}} > rust-project.json 27 | 28 | run-localnode profile="--alice": 29 | cargo xtask run local {{profile}} 30 | 31 | # Specs 32 | 33 | base_spec_path := "node/specs/base.json" 34 | 35 | gen-base-spec: 36 | cargo run -p torus-node --release -- build-spec --chain dev > "{{base_spec_path}}" 37 | 38 | gen-spec-file env: gen-base-spec 39 | mkdir -p tmp/spec 40 | 41 | node_version=$(cargo run -p torus-node --release -- --version) \ 42 | && scripts/adjust-spec-file.py "{{env}}" "{{base_spec_path}}" \ 43 | --balances-file data/torus-genesis-balances.json \ 44 | --merge-balances \ 45 | --aura-list-file "data/{{env}}/aura.pub.json" \ 46 | --gran-list-file "data/{{env}}/gran.pub.json" \ 47 | --bootnodes-file "data/{{env}}/bootnodes.json" \ 48 | --name "Torus {{env}} $node_version" \ 49 | > "tmp/spec/{{env}}.json" 50 | 51 | @echo "Spec file generated at: tmp/spec/{{env}}.json" 52 | 53 | # Benchmarks 54 | 55 | run-benchmarks: 56 | cargo build -r --features runtime-benchmarks 57 | ./target/release/torus-node benchmark pallet --pallet pallet_torus0 --chain dev --extrinsic "*" --steps 50 --repeat 20 --output pallets/torus0/src/weights.rs --template=./.maintain/frame-weight-template.hbs 58 | ./target/release/torus-node benchmark pallet --pallet pallet_governance --chain dev --extrinsic "*" --steps 50 --repeat 20 --output pallets/governance/src/weights.rs --template=./.maintain/frame-weight-template.hbs 59 | ./target/release/torus-node benchmark pallet --pallet pallet_emission0 --chain dev --extrinsic "*" --steps 50 --repeat 20 --output pallets/emission0/src/weights.rs --template=./.maintain/frame-weight-template.hbs 60 | ./target/release/torus-node benchmark pallet --pallet pallet_permission0 --chain dev --extrinsic "*" --steps 50 --repeat 20 --output pallets/permission0/src/weights.rs --template=./.maintain/frame-weight-template.hbs 61 | 62 | # Runtime Update Testing 63 | 64 | install-try-runtime: 65 | cargo install --git https://github.com/paritytech/try-runtime-cli --locked 66 | 67 | try-runtime-upgrade-testnet: 68 | SKIP_WASM_BUILD=0 cargo build --release -p torus-runtime --features try-runtime,testnet 69 | RUST_BACKTRACE=1 RUST_LOG=info try-runtime --runtime target/release/wbuild/torus-runtime/torus_runtime.compact.compressed.wasm on-runtime-upgrade --blocktime 8000 live --uri wss://api.testnet.torus.network 70 | 71 | try-runtime-upgrade-mainnet: 72 | SKIP_WASM_BUILD=0 cargo build --release -p torus-runtime --features try-runtime 73 | RUST_BACKTRACE=1 RUST_LOG=info try-runtime --runtime target/release/wbuild/torus-runtime/torus_runtime.compact.compressed.wasm on-runtime-upgrade --blocktime 8000 live --uri wss://api.torus.network 74 | 75 | # Github Actions 76 | 77 | run-workflows: 78 | act --secret-file .env \ 79 | -P 'ubuntu-24.04-8core-bakunin=ghcr.io/catthehacker/act-ubuntu:24.04' \ 80 | -P 'ubuntu-24.04-16core-friedrich=ghcr.io/catthehacker/act-ubuntu:24.04' \ 81 | -P 'ubuntu-22.04-32core-karl=ghcr.io/catthehacker/ubuntu:act-22.04' 82 | 83 | # Mcp 84 | 85 | build-devnet-mcp: 86 | cargo clean -p torus-client 87 | cargo clean -p torus-mcp 88 | cargo b -p torus-mcp -r --no-default-features --features devnet 89 | 90 | build-testnet-mcp: 91 | cargo clean -p torus-client 92 | cargo clean -p torus-mcp 93 | cargo b -p torus-mcp -r 94 | 95 | install-claude-mcp: build-devnet-mcp 96 | claude mcp add torus-mcp target-mcp/release/torus-mcp 97 | -------------------------------------------------------------------------------- /pallets/torus0/src/burn.rs: -------------------------------------------------------------------------------- 1 | use codec::{Decode, Encode, MaxEncodedLen}; 2 | use polkadot_sdk::{ 3 | frame_election_provider_support::Get, 4 | frame_support::DebugNoBound, 5 | polkadot_sdk_frame::prelude::BlockNumberFor, 6 | sp_runtime::{FixedU128, traits::Saturating}, 7 | }; 8 | use scale_info::{TypeInfo, prelude::marker::PhantomData}; 9 | 10 | use crate::BalanceOf; 11 | 12 | #[derive(Clone, TypeInfo, Decode, Encode, PartialEq, Eq, DebugNoBound, MaxEncodedLen)] 13 | #[scale_info(skip_type_params(T))] 14 | pub struct BurnConfiguration { 15 | pub min_burn: BalanceOf, 16 | pub max_burn: BalanceOf, 17 | pub adjustment_alpha: u64, 18 | pub target_registrations_interval: BlockNumberFor, 19 | pub target_registrations_per_interval: u16, 20 | pub max_registrations_per_interval: u16, 21 | pub _pd: PhantomData, 22 | } 23 | 24 | impl Default for BurnConfiguration 25 | where 26 | T: crate::Config, 27 | { 28 | fn default() -> Self { 29 | Self { 30 | min_burn: T::DefaultMinBurn::get(), 31 | max_burn: T::DefaultMaxBurn::get(), 32 | adjustment_alpha: T::DefaultAdjustmentAlpha::get(), 33 | target_registrations_interval: T::DefaultTargetRegistrationsInterval::get(), 34 | target_registrations_per_interval: T::DefaultTargetRegistrationsPerInterval::get(), 35 | max_registrations_per_interval: T::DefaultMaxRegistrationsPerInterval::get(), 36 | _pd: Default::default(), 37 | } 38 | } 39 | } 40 | 41 | /// Adjusts registration burn for the current block. 42 | /// 43 | /// The next burn is calculated by analyzing the last N 44 | /// (`target_registrations_interval`) blocks and increases if the target 45 | /// registrations per interval was reached. 46 | pub fn adjust_burn(current_block: u64) { 47 | let BurnConfiguration { 48 | min_burn, 49 | max_burn, 50 | adjustment_alpha, 51 | target_registrations_interval, 52 | target_registrations_per_interval, 53 | .. 54 | } = crate::BurnConfig::::get(); 55 | 56 | let target_registrations_interval: u64 = target_registrations_interval 57 | .into() 58 | .try_into() 59 | .expect("block number is 64 bits long"); 60 | let current_burn = crate::Burn::::get(); 61 | let registrations_this_interval = crate::RegistrationsThisInterval::::get(); 62 | 63 | let reached_interval = current_block 64 | .checked_rem(target_registrations_interval) 65 | .is_some_and(|r| r == 0); 66 | 67 | if !reached_interval { 68 | return; 69 | } 70 | 71 | let updated_burn = FixedU128::from_inner(current_burn) 72 | .const_checked_mul(FixedU128::from_u32( 73 | registrations_this_interval.saturating_add(target_registrations_per_interval) as u32, 74 | )) 75 | .unwrap_or_default() 76 | .const_checked_div(FixedU128::from_u32( 77 | target_registrations_per_interval.saturating_mul(2) as u32, 78 | )) 79 | .unwrap_or_default(); 80 | 81 | let alpha = FixedU128::from_inner(adjustment_alpha as u128) 82 | .const_checked_div(FixedU128::from_inner(u64::MAX as u128)) 83 | .unwrap_or_else(|| FixedU128::from_inner(0)); 84 | 85 | let next_value = alpha 86 | .const_checked_mul(FixedU128::from_inner(current_burn)) 87 | .unwrap_or_else(|| FixedU128::from_inner(0)) 88 | .saturating_add( 89 | FixedU128::from_u32(1) 90 | .saturating_sub(alpha) 91 | .const_checked_mul(updated_burn) 92 | .unwrap_or_else(|| FixedU128::from_inner(0)), 93 | ); 94 | 95 | let new_burn = if next_value >= FixedU128::from_inner(max_burn) { 96 | max_burn 97 | } else if next_value <= FixedU128::from_inner(min_burn) { 98 | min_burn 99 | } else { 100 | next_value.into_inner() 101 | }; 102 | 103 | crate::Burn::::set(new_burn); 104 | crate::RegistrationsThisInterval::::set(0); 105 | } 106 | -------------------------------------------------------------------------------- /client/codegen/src/lib.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use codec::Decode; 3 | use proc_macro2::TokenStream; 4 | use quote::quote; 5 | use std::path::{Path, PathBuf}; 6 | use subxt_codegen::{CodegenBuilder, Metadata}; 7 | use subxt_utils_fetchmetadata::{MetadataVersion, Url}; 8 | use syn::parse_quote; 9 | 10 | use crate::codegen::generate_wrappers_for_network; 11 | 12 | mod codegen; 13 | mod ir; 14 | mod parser; 15 | 16 | #[derive(Parser)] 17 | #[command(name = "generate-wrappers")] 18 | #[command(about = "Generate storage wrapper functions from subxt interfaces")] 19 | struct Args { 20 | #[arg(short, long)] 21 | output: PathBuf, 22 | 23 | /// Force overwrite existing output file 24 | #[arg(short, long)] 25 | force: bool, 26 | } 27 | 28 | #[derive(clap::ValueEnum, Clone)] 29 | enum InterfaceSource { 30 | Mainnet, 31 | Testnet, 32 | Devnet, 33 | } 34 | 35 | impl InterfaceSource { 36 | fn to_str(&self) -> &'static str { 37 | match self { 38 | InterfaceSource::Mainnet => "mainnet", 39 | InterfaceSource::Testnet => "testnet", 40 | InterfaceSource::Devnet => "devnet", 41 | } 42 | } 43 | 44 | fn to_chain_type(&self) -> &'static str { 45 | match self { 46 | InterfaceSource::Mainnet => "crate::chain::MainNet", 47 | InterfaceSource::Testnet => "crate::chain::TestNet", 48 | InterfaceSource::Devnet => "crate::chain::DevNet", 49 | } 50 | } 51 | } 52 | 53 | pub async fn generate_interfaces( 54 | output: &Path, 55 | devnet_url: Option, 56 | ) -> Result<(), Box> { 57 | let mainnet_content_tokens = generate_subxt_code_for_url("wss://api.torus.network").await?; 58 | let testnet_content_tokens = 59 | generate_subxt_code_for_url("wss://api.testnet.torus.network").await?; 60 | let devnet_content_tokens = if let Some(url) = &devnet_url { 61 | generate_subxt_code_for_url(url).await? 62 | } else { 63 | quote! {} 64 | }; 65 | 66 | let mainnet_content = mainnet_content_tokens.to_string(); 67 | let testnet_content = testnet_content_tokens.to_string(); 68 | let devnet_content = devnet_content_tokens.to_string(); 69 | 70 | let mainnet_pallets = parser::parse_api_file(&mainnet_content)?; 71 | let testnet_pallets = parser::parse_api_file(&testnet_content)?; 72 | let devnet_pallets = if devnet_url.is_some() { 73 | parser::parse_api_file(&devnet_content)? 74 | } else { 75 | vec![] 76 | }; 77 | 78 | let wrappers_tokens = 79 | generate_wrappers_for_network(&mainnet_pallets, &testnet_pallets, &devnet_pallets); 80 | 81 | // Parse the TokenStream into a syn::File and format with prettyplease 82 | let wrappers_file: syn::File = parse_quote! { 83 | #[cfg(feature = "mainnet")] 84 | pub mod mainnet { 85 | #mainnet_content_tokens 86 | } 87 | 88 | #[cfg(feature = "testnet")] 89 | pub mod testnet { 90 | #testnet_content_tokens 91 | } 92 | 93 | #[cfg(feature = "devnet")] 94 | pub mod devnet { 95 | #devnet_content_tokens 96 | } 97 | 98 | pub mod wrappers { 99 | #wrappers_tokens 100 | } 101 | }; 102 | 103 | let wrappers_code = prettyplease::unparse(&wrappers_file).to_string(); 104 | 105 | // Create output directory if it doesn't exist 106 | if let Some(parent) = output.parent() { 107 | std::fs::create_dir_all(parent)?; 108 | } 109 | 110 | std::fs::write(output, wrappers_code)?; 111 | 112 | Ok(()) 113 | } 114 | 115 | async fn generate_subxt_code_for_url( 116 | url: impl AsRef, 117 | ) -> Result> { 118 | let metadata_bytes = 119 | subxt_utils_fetchmetadata::from_url(Url::parse(url.as_ref())?, MetadataVersion::default()) 120 | .await?; 121 | 122 | let mut metadata = Metadata::decode(&mut &*metadata_bytes)?; 123 | 124 | scale_typegen::utils::ensure_unique_type_paths(metadata.types_mut())?; 125 | 126 | let codegen = CodegenBuilder::default(); 127 | 128 | let code = codegen.generate(metadata)?; 129 | 130 | Ok(code) 131 | } 132 | -------------------------------------------------------------------------------- /node/src/rpc.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! A collection of node-specific RPC methods. 19 | //! Substrate provides the `sc-rpc` crate, which defines the core RPC layer 20 | //! used by Substrate nodes. This file extends those RPC definitions with 21 | //! capabilities that are specific to this project's runtime configuration. 22 | 23 | #![warn(missing_docs)] 24 | 25 | use std::sync::Arc; 26 | 27 | use eth::EthDeps; 28 | use futures::channel::mpsc; 29 | use jsonrpsee::RpcModule; 30 | use pallet_permission0_rpc::{Permission0Rpc, Permission0StreamApiServer}; 31 | use pallet_torus0_rpc::{Torus0ApiServer, Torus0Rpc}; 32 | use polkadot_sdk::{ 33 | pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}, 34 | sc_consensus_manual_seal::{ 35 | EngineCommand, 36 | rpc::{ManualSeal, ManualSealApiServer}, 37 | }, 38 | sc_rpc::SubscriptionTaskExecutor, 39 | sc_transaction_pool::ChainApi, 40 | sc_transaction_pool_api::TransactionPool, 41 | sp_inherents::CreateInherentDataProviders, 42 | sp_runtime::traits::Block as BlockT, 43 | substrate_frame_rpc_system::{System, SystemApiServer}, 44 | }; 45 | use torus_runtime::{interface::Hash, opaque::Block}; 46 | 47 | use crate::service::FullClient; 48 | 49 | /// ETH related RPC calls. 50 | pub mod eth; 51 | 52 | /// Full client dependencies. 53 | pub struct FullDeps { 54 | /// The client instance to use. 55 | pub client: Arc, 56 | /// Transaction pool instance. 57 | pub pool: Arc

, 58 | /// Manual seal command sink 59 | pub command_sink: Option>>, 60 | /// Ethereum-compatibility specific dependencies. 61 | pub eth: EthDeps, 62 | } 63 | 64 | /// Instantiate all full RPC extensions. 65 | pub fn create_full( 66 | deps: FullDeps, 67 | subscription_task_executor: SubscriptionTaskExecutor, 68 | pubsub_notification_sinks: Arc< 69 | fc_mapping_sync::EthereumBlockNotificationSinks< 70 | fc_mapping_sync::EthereumBlockNotification, 71 | >, 72 | >, 73 | ) -> Result, Box> 74 | where 75 | P: TransactionPool + 'static, 76 | A: ChainApi + 'static, 77 | CIDP: CreateInherentDataProviders + Send + 'static, 78 | CT: fp_rpc::ConvertTransaction<::Extrinsic> + Send + Sync + 'static, 79 | { 80 | let mut io = RpcModule::new(()); 81 | let FullDeps { 82 | client, 83 | pool, 84 | command_sink, 85 | eth, 86 | } = deps; 87 | 88 | io.merge(System::new(client.clone(), pool).into_rpc())?; 89 | io.merge(TransactionPayment::new(client.clone()).into_rpc())?; 90 | io.merge(Permission0Rpc::new(client.clone()).into_rpc())?; 91 | io.merge(Torus0Rpc::new(client.clone()).into_rpc())?; 92 | 93 | if let Some(command_sink) = command_sink { 94 | io.merge( 95 | // We provide the rpc handler with the sending end of the channel to allow the rpc 96 | // send EngineCommands to the background block authorship task. 97 | ManualSeal::new(command_sink).into_rpc(), 98 | )?; 99 | } 100 | 101 | // Ethereum compatibility RPCs 102 | let io = eth::create_eth::<_, _, _, _>( 103 | io, 104 | eth, 105 | subscription_task_executor, 106 | pubsub_notification_sinks, 107 | )?; 108 | 109 | Ok(io) 110 | } 111 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "Torus Substrate development environment"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05"; 6 | 7 | flake-utils.url = "github:numtide/flake-utils"; 8 | 9 | rust-overlay.url = "github:oxalica/rust-overlay"; 10 | rust-overlay.inputs.nixpkgs.follows = "nixpkgs"; 11 | 12 | pre-commit-hooks.url = "github:cachix/git-hooks.nix"; 13 | pre-commit-hooks.inputs.nixpkgs.follows = "nixpkgs"; 14 | }; 15 | 16 | outputs = { self, nixpkgs, rust-overlay, pre-commit-hooks, flake-utils, ... }: 17 | flake-utils.lib.eachDefaultSystem (system: 18 | let 19 | overlays = [ (import rust-overlay) ]; 20 | pkgs = import nixpkgs { inherit system overlays; }; 21 | rust = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; 22 | 23 | generalBuildInputs = with pkgs; [ 24 | pkg-config 25 | openssl.dev 26 | rocksdb 27 | zstd.dev 28 | ]; 29 | buildInputs = 30 | if pkgs.stdenv.isLinux then 31 | generalBuildInputs ++ [ pkgs.jemalloc pkgs.pkgsi686Linux.glibc ] 32 | else 33 | generalBuildInputs 34 | ++ [ pkgs.darwin.apple_sdk.frameworks.SystemConfiguration ]; 35 | nativeBuildInputs = with pkgs; [ 36 | git 37 | llvmPackages_17.stdenv 38 | llvmPackages_17.libcxx 39 | llvmPackages_17.libcxxStdenv 40 | llvmPackages_17.clang-unwrapped 41 | rust 42 | protobuf 43 | sccache 44 | ]; 45 | 46 | shellPkgs = [ 47 | pkgs.bashInteractive 48 | # Run project-specific commands 49 | pkgs.just 50 | # Python 51 | pkgs.python310 52 | # Subxt CLI for metadata handling 53 | pkgs.subxt 54 | pkgs.cargo-nextest 55 | # # Code coverage tool 56 | # pkgs.cargo-llvm-cov # marked as broken 57 | ]; 58 | in 59 | { 60 | checks = pkgs.mkShell { 61 | pre-commit-check = pre-commit-hooks.lib.${system}.run { 62 | src = ./.; 63 | hooks = { 64 | rustfmt.enable = true; 65 | 66 | push = { 67 | enable = true; 68 | name = "Tests & Stuff"; 69 | entry = "just test"; 70 | pass_filenames = false; 71 | stages = ["pre-push"]; 72 | }; 73 | }; 74 | }; 75 | }; 76 | 77 | devShells.default = pkgs.mkShell { 78 | buildInputs = buildInputs; 79 | nativeBuildInputs = nativeBuildInputs; 80 | packages = shellPkgs; 81 | 82 | shellHook = '' 83 | ${self.checks.${system}.pre-commit-check.shellHook} 84 | 85 | # Correct paths to build RocksDB and WASM-OPT. 86 | # Don't touch it unless you know what you are doing. 87 | export C_INCLUDE_PATH="${pkgs.llvmPackages_17.clang-unwrapped.lib}/lib/clang/17/include" 88 | export CPLUS_INCLUDE_PATH="${pkgs.llvmPackages_17.libcxx.dev}/include/c++/v1:${pkgs.llvmPackages_17.clang-unwrapped.lib}/lib/clang/17/include" 89 | 90 | # Ensure clang-17 is prioritized in PATH. Oxalica's Rust Overlay 91 | # also ships clang-19 but current polkadot dependencies require 17. 92 | export PATH="${pkgs.llvmPackages_17.clang-unwrapped}/bin:$PATH" 93 | echo "Using clang version: $(clang --version | head -n1)" 94 | ''; 95 | 96 | env = { 97 | LIBCLANG_PATH = "${pkgs.llvmPackages_17.clang-unwrapped.lib}/lib"; 98 | ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; 99 | ZSTD_SYS_USE_PKG_CONFIG = "true"; 100 | OPENSSL_NO_VENDOR = "1"; 101 | OPENSSL_DIR = "${pkgs.openssl.dev}"; 102 | OPENSSL_LIB_DIR = "${pkgs.openssl.out}/lib"; 103 | OPENSSL_INCLUDE_DIR = "${pkgs.openssl.dev}/include"; 104 | RUSTC_WRAPPER = "${pkgs.sccache}/bin/sccache"; 105 | RUST_BACKTRACE = "1"; 106 | } // nixpkgs.lib.optionalAttrs pkgs.stdenv.isLinux { 107 | JEMALLOC_OVERRIDE = "${pkgs.jemalloc}/lib/libjemalloc.so"; 108 | }; 109 | }; 110 | }); 111 | } 112 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-compat": { 4 | "flake": false, 5 | "locked": { 6 | "lastModified": 1696426674, 7 | "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", 8 | "owner": "edolstra", 9 | "repo": "flake-compat", 10 | "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", 11 | "type": "github" 12 | }, 13 | "original": { 14 | "owner": "edolstra", 15 | "repo": "flake-compat", 16 | "type": "github" 17 | } 18 | }, 19 | "flake-utils": { 20 | "inputs": { 21 | "systems": "systems" 22 | }, 23 | "locked": { 24 | "lastModified": 1731533236, 25 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 26 | "owner": "numtide", 27 | "repo": "flake-utils", 28 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 29 | "type": "github" 30 | }, 31 | "original": { 32 | "owner": "numtide", 33 | "repo": "flake-utils", 34 | "type": "github" 35 | } 36 | }, 37 | "gitignore": { 38 | "inputs": { 39 | "nixpkgs": [ 40 | "pre-commit-hooks", 41 | "nixpkgs" 42 | ] 43 | }, 44 | "locked": { 45 | "lastModified": 1709087332, 46 | "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", 47 | "owner": "hercules-ci", 48 | "repo": "gitignore.nix", 49 | "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", 50 | "type": "github" 51 | }, 52 | "original": { 53 | "owner": "hercules-ci", 54 | "repo": "gitignore.nix", 55 | "type": "github" 56 | } 57 | }, 58 | "nixpkgs": { 59 | "locked": { 60 | "lastModified": 1751741127, 61 | "narHash": "sha256-t75Shs76NgxjZSgvvZZ9qOmz5zuBE8buUaYD28BMTxg=", 62 | "owner": "NixOS", 63 | "repo": "nixpkgs", 64 | "rev": "29e290002bfff26af1db6f64d070698019460302", 65 | "type": "github" 66 | }, 67 | "original": { 68 | "owner": "NixOS", 69 | "ref": "nixos-25.05", 70 | "repo": "nixpkgs", 71 | "type": "github" 72 | } 73 | }, 74 | "pre-commit-hooks": { 75 | "inputs": { 76 | "flake-compat": "flake-compat", 77 | "gitignore": "gitignore", 78 | "nixpkgs": [ 79 | "nixpkgs" 80 | ] 81 | }, 82 | "locked": { 83 | "lastModified": 1750779888, 84 | "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", 85 | "owner": "cachix", 86 | "repo": "git-hooks.nix", 87 | "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", 88 | "type": "github" 89 | }, 90 | "original": { 91 | "owner": "cachix", 92 | "repo": "git-hooks.nix", 93 | "type": "github" 94 | } 95 | }, 96 | "root": { 97 | "inputs": { 98 | "flake-utils": "flake-utils", 99 | "nixpkgs": "nixpkgs", 100 | "pre-commit-hooks": "pre-commit-hooks", 101 | "rust-overlay": "rust-overlay" 102 | } 103 | }, 104 | "rust-overlay": { 105 | "inputs": { 106 | "nixpkgs": [ 107 | "nixpkgs" 108 | ] 109 | }, 110 | "locked": { 111 | "lastModified": 1751769931, 112 | "narHash": "sha256-QR2Rp/41NkA5YxcpvZEKD1S2QE1Pb9U415aK8M/4tJc=", 113 | "owner": "oxalica", 114 | "repo": "rust-overlay", 115 | "rev": "3ac4f630e375177ea8317e22f5c804156de177e8", 116 | "type": "github" 117 | }, 118 | "original": { 119 | "owner": "oxalica", 120 | "repo": "rust-overlay", 121 | "type": "github" 122 | } 123 | }, 124 | "systems": { 125 | "locked": { 126 | "lastModified": 1681028828, 127 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 128 | "owner": "nix-systems", 129 | "repo": "default", 130 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 131 | "type": "github" 132 | }, 133 | "original": { 134 | "owner": "nix-systems", 135 | "repo": "default", 136 | "type": "github" 137 | } 138 | } 139 | }, 140 | "root": "root", 141 | "version": 7 142 | } 143 | -------------------------------------------------------------------------------- /pallets/torus0/src/benchmarking.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "runtime-benchmarks")] 2 | 3 | use pallet_governance_api::GovernanceApi; 4 | use pallet_torus0_api::Torus0Api; 5 | use polkadot_sdk::{ 6 | frame_benchmarking::{account, v2::*}, 7 | frame_system::RawOrigin, 8 | sp_runtime::Percent, 9 | }; 10 | 11 | use crate::*; 12 | 13 | fn register_test_agent(id: &T::AccountId, name: &str) { 14 | let name = name.as_bytes().to_vec(); 15 | Pallet::::force_register_agent(id, name.clone(), name.clone(), name.clone()) 16 | .expect("failed to register agent"); 17 | } 18 | 19 | #[benchmarks] 20 | mod benchmarks { 21 | 22 | use super::*; 23 | 24 | #[benchmark] 25 | fn add_stake() { 26 | let agent: T::AccountId = account("agent", 0, 1); 27 | let staker: T::AccountId = account("staker", 1, 1); 28 | let amount = MinAllowedStake::::get(); 29 | 30 | register_test_agent::(&agent, "agent"); 31 | 32 | let _ = ::deposit_creating(&staker, amount.saturating_mul(2)); 33 | 34 | #[extrinsic_call] 35 | add_stake(RawOrigin::Signed(staker), agent, amount) 36 | } 37 | 38 | #[benchmark] 39 | fn remove_stake() { 40 | let agent: T::AccountId = account("agent", 0, 1); 41 | let staker: T::AccountId = account("staker", 1, 1); 42 | 43 | register_test_agent::(&agent, "agent"); 44 | 45 | let amount = MinAllowedStake::::get(); 46 | let _ = ::deposit_creating(&staker, amount.saturating_mul(2)); 47 | Pallet::::force_set_stake(&staker, &agent, amount).expect("failed to add stake"); 48 | 49 | #[extrinsic_call] 50 | remove_stake(RawOrigin::Signed(staker), agent, amount) 51 | } 52 | 53 | #[benchmark] 54 | fn transfer_stake() { 55 | let agent_a: T::AccountId = account("agent-a", 0, 1); 56 | let agent_b: T::AccountId = account("agent-b", 1, 1); 57 | let staker: T::AccountId = account("staker", 2, 1); 58 | 59 | register_test_agent::(&agent_a, "agent-a"); 60 | register_test_agent::(&agent_b, "agent-b"); 61 | 62 | let amount = MinAllowedStake::::get(); 63 | let _ = ::deposit_creating(&staker, amount.saturating_mul(2)); 64 | Pallet::::force_set_stake(&staker, &agent_a, amount).expect("failed to add stake"); 65 | 66 | #[extrinsic_call] 67 | transfer_stake(RawOrigin::Signed(staker), agent_a, agent_b, amount) 68 | } 69 | 70 | #[benchmark] 71 | fn register_agent() { 72 | let agent: T::AccountId = account("agent", 0, 1); 73 | ::force_set_whitelisted(&agent); 74 | 75 | let burn = crate::Burn::::get(); 76 | let _ = ::deposit_creating(&agent, burn.saturating_mul(2)); 77 | 78 | let name = b"agent".to_vec(); 79 | let url = b"agent".to_vec(); 80 | let metadata = b"agent".to_vec(); 81 | 82 | #[extrinsic_call] 83 | register_agent(RawOrigin::Signed(agent.clone()), name, url, metadata) 84 | } 85 | 86 | #[benchmark] 87 | fn deregister_agent() { 88 | let agent: T::AccountId = account("agent", 0, 1); 89 | register_test_agent::(&agent, "agent"); 90 | 91 | #[extrinsic_call] 92 | deregister_agent(RawOrigin::Signed(agent)) 93 | } 94 | 95 | #[benchmark] 96 | fn update_agent() { 97 | let agent: T::AccountId = account("agent", 0, 1); 98 | register_test_agent::(&agent, "agent"); 99 | 100 | AgentUpdateCooldown::::set(Default::default()); 101 | 102 | let url = b"agent".to_vec(); 103 | let metadata = Some(b"agent".to_vec()); 104 | let staking_fee = Some(Percent::from_percent(10)); 105 | let weight_control_fee = Some(Percent::from_percent(10)); 106 | 107 | #[extrinsic_call] 108 | update_agent( 109 | RawOrigin::Signed(agent), 110 | url, 111 | metadata, 112 | staking_fee, 113 | weight_control_fee, 114 | ) 115 | } 116 | 117 | #[benchmark] 118 | fn set_agent_update_cooldown() { 119 | let new_cooldown = 100u32.into(); 120 | 121 | #[extrinsic_call] 122 | set_agent_update_cooldown(RawOrigin::Root, new_cooldown) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /pallets/emission0/tests/weights.rs: -------------------------------------------------------------------------------- 1 | use pallet_emission0::{ 2 | ConsensusMembers, Error, WeightControlDelegation, Weights, 3 | weight_control::{delegate_weight_control, regain_weight_control, set_weights}, 4 | }; 5 | use test_utils::{ 6 | Test, add_stake, get_origin, pallet_governance::Allocators, pallet_torus0::MinValidatorStake, 7 | register_empty_agent, 8 | }; 9 | 10 | #[test] 11 | fn delegates_and_regains_weight_control() { 12 | test_utils::new_test_ext().execute_with(|| { 13 | let delegator = 0; 14 | let delegated = 1; 15 | 16 | assert_eq!( 17 | delegate_weight_control::(delegator, delegator), 18 | Err(Error::::CannotDelegateWeightControlToSelf.into()) 19 | ); 20 | 21 | assert_eq!( 22 | delegate_weight_control::(delegator, delegated), 23 | Err(Error::::AgentIsNotRegistered.into()) 24 | ); 25 | 26 | // TODO: reenable when weight control is enabled again 27 | // assert_eq!( 28 | // regain_weight_control::(get_origin(delegator)), 29 | // Err(Error::::AgentIsNotDelegating.into()) 30 | // ); 31 | 32 | register_empty_agent(delegator); 33 | 34 | assert_eq!( 35 | delegate_weight_control::(delegator, delegated), 36 | Err(Error::::AgentIsNotRegistered.into()) 37 | ); 38 | 39 | register_empty_agent(delegated); 40 | 41 | delegate_weight_control::(delegator, delegated) 42 | .expect_err("cannot delegate to not-allocator"); 43 | 44 | Allocators::::set(delegated, Some(())); 45 | 46 | assert_eq!( 47 | delegate_weight_control::(delegator, delegated), 48 | Ok(()) 49 | ); 50 | 51 | assert!(WeightControlDelegation::::contains_key(delegator)); 52 | 53 | assert_eq!( 54 | regain_weight_control::(get_origin(delegator)), 55 | Err(Error::::WeightControlNotEnabled.into()) 56 | ); 57 | 58 | // TODO: reenable when weight control is enabled 59 | // assert_eq!(regain_weight_control::(get_origin(delegator)), Ok(())); 60 | // assert!(!WeightControlDelegation::::contains_key(delegator)); 61 | }); 62 | } 63 | 64 | #[test] 65 | #[allow(unreachable_code)] 66 | fn sets_weights_correctly() { 67 | test_utils::new_test_ext().execute_with(|| { 68 | let validator = 0; 69 | 70 | assert_eq!( 71 | set_weights::(get_origin(validator), vec![(0, 0); 5]), 72 | Err(test_utils::pallet_governance::Error::::NotAllocator.into()), 73 | ); 74 | 75 | Allocators::::insert(0, ()); 76 | 77 | assert_eq!( 78 | set_weights::(get_origin(validator), vec![(0, 0); 5]), 79 | Err(Error::::AgentIsNotRegistered.into()), 80 | ); 81 | 82 | register_empty_agent(validator); 83 | 84 | assert_eq!( 85 | set_weights::(get_origin(validator), vec![(0, 0); 5]), 86 | Err(Error::::NotEnoughStakeToSetWeights.into()), 87 | ); 88 | 89 | add_stake(validator, validator, MinValidatorStake::::get()); 90 | 91 | assert_eq!( 92 | set_weights::(get_origin(validator), vec![(0, 0); 5]), 93 | Err(Error::::CannotSetWeightsForSelf.into()), 94 | ); 95 | 96 | assert_eq!( 97 | set_weights::(get_origin(validator), vec![(1, 0); 5]), 98 | Err(Error::::AgentIsNotRegistered.into()), 99 | ); 100 | 101 | register_empty_agent(1); 102 | register_empty_agent(2); 103 | 104 | Allocators::::set(1, Some(())); 105 | 106 | delegate_weight_control::(validator, 1).expect("failed to delegate weight control"); 107 | 108 | assert_eq!( 109 | set_weights::(get_origin(validator), vec![(1, 0); 5]), 110 | Err(Error::::CannotSetWeightsWhileDelegating.into()), 111 | ); 112 | 113 | regain_weight_control::(get_origin(validator)) 114 | .expect("failed to regain weight control"); 115 | 116 | assert_eq!( 117 | set_weights::(get_origin(validator), vec![(1, 0), (1, 0), (2, 0)]), 118 | Ok(()), 119 | ); 120 | 121 | assert_eq!( 122 | ConsensusMembers::::get(validator) 123 | .expect("weights were not set") 124 | .weights, 125 | Weights::::truncate_from(vec![(1, 0), (2, 0)]) 126 | ); 127 | }); 128 | } 129 | -------------------------------------------------------------------------------- /pallets/torus0/src/stake.rs: -------------------------------------------------------------------------------- 1 | use polkadot_sdk::{ 2 | frame_support::{dispatch::DispatchResult, ensure, traits::NamedReservableCurrency}, 3 | sp_std::{collections::btree_map::BTreeMap, vec::Vec}, 4 | }; 5 | 6 | use crate::{AccountIdOf, BalanceOf}; 7 | use crate::{StakedBy, StakingTo, TotalStake, agent}; 8 | 9 | pub const STAKE_IDENTIFIER: &[u8; 8] = b"torstake"; 10 | 11 | /// Stakes `amount` tokens from `staker` to `staked` by withdrawing the tokens 12 | /// and adding them to the [`crate::StakingTo`] and [`crate::StakedBy`] maps. 13 | pub fn add_stake( 14 | staker: AccountIdOf, 15 | staked: AccountIdOf, 16 | amount: BalanceOf, 17 | ) -> DispatchResult { 18 | ensure!( 19 | agent::exists::(&staked), 20 | crate::Error::::AgentDoesNotExist 21 | ); 22 | 23 | T::Currency::reserve_named(STAKE_IDENTIFIER, &staker, amount) 24 | .map_err(|_| crate::Error::::NotEnoughBalanceToStake)?; 25 | 26 | StakedBy::::mutate(&staked, &staker, |stake| { 27 | *stake = Some(stake.unwrap_or(0).saturating_add(amount)) 28 | }); 29 | StakingTo::::mutate(&staker, &staked, |stake| { 30 | *stake = Some(stake.unwrap_or(0).saturating_add(amount)) 31 | }); 32 | 33 | TotalStake::::mutate(|total_stake| *total_stake = total_stake.saturating_add(amount)); 34 | 35 | crate::Pallet::::deposit_event(crate::Event::::StakeAdded(staker, staked, amount)); 36 | 37 | Ok(()) 38 | } 39 | 40 | /// Withdraws stake from an agent and gives it back to the staker. 41 | pub fn remove_stake( 42 | staker: AccountIdOf, 43 | staked: AccountIdOf, 44 | amount: BalanceOf, 45 | ) -> DispatchResult { 46 | ensure!( 47 | agent::exists::(&staked), 48 | crate::Error::::AgentDoesNotExist 49 | ); 50 | 51 | ensure!( 52 | StakingTo::::get(&staker, &staked).unwrap_or(0) >= amount, 53 | crate::Error::::NotEnoughStakeToWithdraw 54 | ); 55 | 56 | remove_stake0::(staker, staked, amount, true); 57 | 58 | Ok(()) 59 | } 60 | 61 | fn remove_stake0( 62 | staker: AccountIdOf, 63 | staked: AccountIdOf, 64 | amount: BalanceOf, 65 | keep: bool, 66 | ) { 67 | let Some(stake) = StakingTo::::get(&staker, &staked) else { 68 | return; 69 | }; 70 | 71 | let amount = stake.min(amount); 72 | let new_stake = stake.saturating_sub(amount); 73 | let new_stake = if keep || new_stake > 0 { 74 | Some(new_stake) 75 | } else { 76 | None 77 | }; 78 | 79 | StakingTo::::set(&staker, &staked, new_stake); 80 | StakedBy::::set(&staked, &staker, new_stake); 81 | TotalStake::::mutate(|total_stake| *total_stake = total_stake.saturating_sub(amount)); 82 | 83 | T::Currency::unreserve_named(STAKE_IDENTIFIER, &staker, amount); 84 | 85 | crate::Pallet::::deposit_event(crate::Event::::StakeRemoved(staker, staked, amount)); 86 | } 87 | 88 | /// Transfers stake from an account to another (see [`remove_stake`], 89 | /// [`add_stake`]). 90 | pub fn transfer_stake( 91 | staker: AccountIdOf, 92 | from: AccountIdOf, 93 | to: AccountIdOf, 94 | amount: BalanceOf, 95 | ) -> DispatchResult { 96 | remove_stake::(staker.clone(), from, amount)?; 97 | add_stake::(staker, to, amount)?; 98 | Ok(()) 99 | } 100 | 101 | /// Usually called when de-registering an agent, removes all stakes on a given 102 | /// key. 103 | pub(crate) fn clear_key(key: &AccountIdOf) -> DispatchResult { 104 | let stakes: Vec<_> = StakingTo::::iter().collect(); 105 | for (staker, staked, amount) in stakes { 106 | if &staker == key || &staked == key { 107 | remove_stake0::(staker, staked, amount, false); 108 | } 109 | } 110 | 111 | Ok(()) 112 | } 113 | 114 | #[inline] 115 | pub fn sum_staking_to(staker: &AccountIdOf) -> BalanceOf { 116 | StakingTo::::iter_prefix_values(staker).sum() 117 | } 118 | 119 | #[inline] 120 | pub fn get_staking_to_vector( 121 | staker: &AccountIdOf, 122 | ) -> BTreeMap> { 123 | StakingTo::::iter_prefix(staker).collect() 124 | } 125 | 126 | #[inline] 127 | pub fn get_staked_by_vector( 128 | staked: &AccountIdOf, 129 | ) -> Vec<(T::AccountId, BalanceOf)> { 130 | StakedBy::::iter_prefix(staked).collect() 131 | } 132 | 133 | #[inline] 134 | pub fn sum_staked_by(staked: &AccountIdOf) -> BalanceOf { 135 | StakedBy::::iter_prefix_values(staked).sum() 136 | } 137 | -------------------------------------------------------------------------------- /xtask/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{borrow::Cow, net::IpAddr, os::unix::process::CommandExt}; 2 | 3 | use polkadot_sdk::sp_keyring; 4 | 5 | mod flags; 6 | mod generate_spec; 7 | mod run; 8 | mod workbench; 9 | 10 | fn main() { 11 | let cmd = flags::Xtask::from_env_or_exit(); 12 | match cmd.subcommand { 13 | flags::XtaskCmd::Run(run) => run::run(run), 14 | flags::XtaskCmd::GenerateSpec(cmd) => generate_spec::generate_spec(cmd), 15 | flags::XtaskCmd::Coverage(coverage) => { 16 | const PALLETS: &[&str] = &[ 17 | "pallet-emission0", 18 | "pallet-governance", 19 | "pallet-torus0", 20 | "pallet-permission0", 21 | ]; 22 | 23 | let mut cmd = std::process::Command::new("cargo"); 24 | let mut args = vec![ 25 | "llvm-cov", 26 | "--no-clean", 27 | "--exclude-from-report", 28 | "test-utils", 29 | "--ignore-filename-regex", 30 | "test-utils|weights.rs|migrations.rs|benchmarks.rs", 31 | ]; 32 | 33 | for pallet in PALLETS { 34 | args.extend_from_slice(&["-p", pallet]); 35 | } 36 | 37 | if coverage.html { 38 | let dev_args = ["--html"]; 39 | args.extend_from_slice(&dev_args); 40 | } else { 41 | let ci_args = ["--cobertura", "--output-path", "target/cov.xml"]; 42 | args.extend_from_slice(&ci_args); 43 | } 44 | 45 | cmd.args(args); 46 | let _ = cmd.exec(); 47 | } 48 | flags::XtaskCmd::Workbench(workbench) => workbench::run(workbench), 49 | } 50 | } 51 | 52 | #[derive(Clone)] 53 | pub(crate) struct Node<'a> { 54 | pub(crate) name: Option>, 55 | pub(crate) id: Option>, 56 | pub(crate) key: Option>, 57 | pub(crate) tcp_port: u16, 58 | pub(crate) rpc_port: u16, 59 | pub(crate) validator: bool, 60 | } 61 | 62 | impl Node<'_> { 63 | fn bootnode_uri(&self, addr: IpAddr) -> String { 64 | format!( 65 | "/{}/{addr}/tcp/{}/p2p/{}", 66 | match addr { 67 | IpAddr::V4(_) => "ip4", 68 | IpAddr::V6(_) => "ip6", 69 | }, 70 | self.tcp_port, 71 | self.id.as_ref().unwrap() 72 | ) 73 | } 74 | } 75 | 76 | impl Default for Node<'_> { 77 | fn default() -> Self { 78 | Self { 79 | name: Default::default(), 80 | id: Default::default(), 81 | key: Default::default(), 82 | tcp_port: 30333, 83 | rpc_port: 9944, 84 | validator: false, 85 | } 86 | } 87 | } 88 | 89 | #[allow(dead_code)] 90 | #[derive(Clone)] 91 | struct Account<'a> { 92 | pub(crate) suri: Cow<'a, str>, 93 | pub(crate) aura_address: sp_keyring::Sr25519Keyring, 94 | pub(crate) grandpa_address: sp_keyring::Ed25519Keyring, 95 | } 96 | 97 | impl Default for Account<'_> { 98 | fn default() -> Self { 99 | Self { 100 | suri: "".into(), 101 | aura_address: sp_keyring::Sr25519Keyring::One, 102 | grandpa_address: sp_keyring::Ed25519Keyring::One, 103 | } 104 | } 105 | } 106 | 107 | static ALICE_ACCOUNT: Account<'static> = Account { 108 | suri: Cow::Borrowed( 109 | "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice", 110 | ), 111 | aura_address: sp_keyring::Sr25519Keyring::Alice, 112 | grandpa_address: sp_keyring::Ed25519Keyring::Alice, 113 | }; 114 | 115 | static BOB_ACCOUNT: Account<'static> = Account { 116 | suri: Cow::Borrowed( 117 | "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Bob", 118 | ), 119 | aura_address: sp_keyring::Sr25519Keyring::Bob, 120 | grandpa_address: sp_keyring::Ed25519Keyring::Bob, 121 | }; 122 | 123 | static ALICE_NODE: Node<'static> = Node { 124 | name: Some(Cow::Borrowed("Alice")), 125 | id: Some(Cow::Borrowed( 126 | "12D3KooWBorpca6RKiebVjeFJA5o9iVWnZpg98yQbYqRC6f8CnLw", 127 | )), 128 | key: Some(Cow::Borrowed( 129 | "2756181a3b9bca683a35b51a0a5d75ee536738680bcb9066c68be1db305a1ac5", 130 | )), 131 | tcp_port: 30341, 132 | rpc_port: 9951, 133 | validator: true, 134 | }; 135 | 136 | static BOB_NODE: Node<'static> = Node { 137 | name: Some(Cow::Borrowed("Bob")), 138 | id: Some(Cow::Borrowed( 139 | "12D3KooWQh3CeSp2rpUVvPb6jqvmHVCUieoZmKbkUhZ8rPR77vmA", 140 | )), 141 | key: Some(Cow::Borrowed( 142 | "e83fa0787cb280d95c666ead866a2a4bc1ee1e36faa1ed06623595eb3f474681", 143 | )), 144 | tcp_port: 30342, 145 | rpc_port: 9952, 146 | validator: true, 147 | }; 148 | -------------------------------------------------------------------------------- /client/codegen/src/parser/calls.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use syn::{ 4 | GenericArgument, Ident, ImplItem, Item, ItemImpl, ItemMod, PathArguments, ReturnType, Type, 5 | TypeArray, TypePath, TypeReference, TypeSlice, TypeTuple, 6 | }; 7 | 8 | use crate::{ir::CallPattern, parser::is_api_impl}; 9 | 10 | pub(super) fn parse_calls_module( 11 | calls_mod: &ItemMod, 12 | pallet_name: &Ident, 13 | ) -> Result, Box> { 14 | let mut calls = Vec::new(); 15 | 16 | if let Some((_, items)) = &calls_mod.content { 17 | for item in items { 18 | if let Item::Impl(impl_item) = item { 19 | if is_api_impl(impl_item, "TransactionApi") { 20 | extract_calls(impl_item, &mut calls, pallet_name)?; 21 | } 22 | } 23 | } 24 | } 25 | 26 | Ok(calls) 27 | } 28 | 29 | fn extract_calls( 30 | impl_item: &ItemImpl, 31 | calls: &mut Vec, 32 | pallet: &Ident, 33 | ) -> Result<(), Box> { 34 | for item in &impl_item.items { 35 | if let ImplItem::Fn(method) = item { 36 | let params = method 37 | .sig 38 | .inputs 39 | .iter() 40 | .filter_map(|arg| match arg { 41 | syn::FnArg::Receiver(_) => None, 42 | syn::FnArg::Typed(pat_type) => match &*pat_type.pat { 43 | syn::Pat::Ident(pat_ident) => { 44 | Some((pat_ident.ident.clone(), *pat_type.ty.clone())) 45 | } 46 | _ => None, 47 | }, 48 | }) 49 | .collect::>(); 50 | 51 | let return_type = match &method.sig.output { 52 | ReturnType::Default => syn::Type::Tuple(TypeTuple { 53 | elems: Default::default(), 54 | paren_token: Default::default(), 55 | }), 56 | ReturnType::Type(_, ty) => { 57 | extract_all_generics(ty).first().cloned().cloned().unwrap() 58 | } 59 | }; 60 | 61 | calls.push(CallPattern { 62 | name: method.sig.ident.clone(), 63 | params, 64 | ret: return_type, 65 | pallet: pallet.clone(), 66 | }); 67 | } 68 | } 69 | 70 | Ok(()) 71 | } 72 | 73 | fn extract_all_generics(ty: &Type) -> Vec<&Type> { 74 | let mut generics = Vec::new(); 75 | 76 | match ty { 77 | Type::Path(TypePath { path, .. }) => { 78 | for segment in &path.segments { 79 | if let PathArguments::AngleBracketed(args) = &segment.arguments { 80 | for arg in &args.args { 81 | if let GenericArgument::Type(inner_ty) = arg { 82 | generics.push(inner_ty); 83 | // Recursively extract generics from nested types 84 | generics.extend(extract_all_generics(inner_ty)); 85 | } 86 | } 87 | } 88 | } 89 | } 90 | Type::Reference(TypeReference { elem, .. }) => { 91 | generics.extend(extract_all_generics(elem)); 92 | } 93 | Type::Slice(TypeSlice { elem, .. }) => { 94 | generics.extend(extract_all_generics(elem)); 95 | } 96 | Type::Array(TypeArray { elem, .. }) => { 97 | generics.extend(extract_all_generics(elem)); 98 | } 99 | Type::Tuple(tuple) => { 100 | for elem in &tuple.elems { 101 | generics.extend(extract_all_generics(elem)); 102 | } 103 | } 104 | _ => {} 105 | } 106 | 107 | generics 108 | } 109 | 110 | // fn extract_type_info(items: &[Item]) -> Result, ParseError> { 111 | // let mut type_info = HashMap::new(); 112 | 113 | // // Find the types module 114 | // for item in items { 115 | // if let Item::Mod(types_mod) = item { 116 | // if types_mod.ident == "types" { 117 | // if let Some((_, type_items)) = &types_mod.content { 118 | // for type_item in type_items { 119 | // if let Item::Mod(storage_type_mod) = type_item { 120 | // let storage_name = storage_type_mod.ident.to_string(); 121 | // let info = parse_calls_type_mod(storage_type_mod)?; 122 | // type_info.insert(storage_name, info); 123 | // } 124 | // } 125 | // } 126 | // } 127 | // } 128 | // } 129 | 130 | // Ok(type_info) 131 | // } 132 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | name: Linting and Testing 2 | 3 | on: 4 | push: 5 | pull_request: 6 | branches: [main] 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.ref }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | check: 14 | permissions: 15 | pull-requests: write 16 | id-token: write 17 | pages: write 18 | checks: write 19 | contents: write 20 | 21 | runs-on: ubicloud-standard-8 22 | 23 | steps: 24 | - uses: actions/checkout@v4 25 | 26 | - name: Setup Rust cache 27 | uses: Swatinem/rust-cache@v2 28 | 29 | - name: Install Rust toolchain 30 | run: | 31 | rustup set profile minimal 32 | rustup show 33 | 34 | - name: Ensure Rust formatting 35 | uses: actions-rs/cargo@v1 36 | with: 37 | command: fmt 38 | args: -- --check 39 | 40 | - name: Install Protoc 41 | uses: arduino/setup-protoc@v1 42 | with: 43 | version: 3.20.1 44 | repo-token: ${{ secrets.GITHUB_TOKEN }} 45 | 46 | - name: Check Clippy errors 47 | uses: actions-rs/clippy-check@v1 48 | env: 49 | SKIP_WASM_BUILD: 1 50 | with: 51 | args: --color=always --tests -- -D warnings 52 | token: ${{ secrets.GITHUB_TOKEN }} 53 | 54 | # - name: Run tests 55 | # env: 56 | # RUST_BACKTRACE: 1 57 | # SKIP_WASM_BUILD: 1 58 | # run: cargo test 59 | 60 | - uses: jwalton/gh-find-current-pr@v1 61 | id: findPr 62 | 63 | - name: Extract branch name 64 | shell: bash 65 | run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT 66 | id: extractBranch 67 | 68 | - name: Install cargo-llvm-cov 69 | if: success() && steps.findPr.outputs.number && steps.extractBranch.outputs.branch 70 | uses: taiki-e/install-action@cargo-llvm-cov 71 | 72 | - name: Install cargo-xtask 73 | if: success() && steps.findPr.outputs.number && steps.extractBranch.outputs.branch 74 | run: cargo install cargo-xtask 75 | 76 | - name: Generate lcov code coverage 77 | if: success() && steps.findPr.outputs.number && steps.extractBranch.outputs.branch 78 | run: cargo xtask coverage 79 | env: 80 | RUST_BACKTRACE: 1 81 | SKIP_WASM_BUILD: 1 82 | 83 | - name: Generate coverage summary report 84 | if: success() && steps.findPr.outputs.number && steps.extractBranch.outputs.branch 85 | uses: irongut/CodeCoverageSummary@v1.3.0 86 | with: 87 | filename: target/cov.xml 88 | badge: true 89 | format: markdown 90 | hide_branch_rate: false 91 | hide_complexity: true 92 | indicators: true 93 | output: both 94 | 95 | - name: Generate html code coverage 96 | if: success() && steps.findPr.outputs.number && steps.extractBranch.outputs.branch 97 | run: cargo xtask coverage --html 98 | env: 99 | RUST_BACKTRACE: 1 100 | SKIP_WASM_BUILD: 1 101 | 102 | - name: Upload html report to S3 Bucket 103 | if: success() && steps.findPr.outputs.number && steps.extractBranch.outputs.branch 104 | id: htmlUpload 105 | continue-on-error: true 106 | run: | 107 | aws --endpoint-url $ENDPOINT s3 sync ./target/llvm-cov/html s3://$BUCKET_NAME/$BRANCH --acl public-read 108 | echo "link=https://$BUCKET_NAME.$REGION.$ENDPOINT_DOMAIN/$BRANCH/index.html" >> $GITHUB_OUTPUT 109 | env: 110 | BRANCH: ${{ steps.extractBranch.outputs.branch }} 111 | ENDPOINT: ${{ vars.COV_ENDPOINT_URL }} 112 | BUCKET_NAME: ${{ vars.COV_BUCKET_NAME }} 113 | AWS_ACCESS_KEY_ID: ${{ secrets.COV_AWS_ACCESS_KEY_ID }} 114 | AWS_SECRET_ACCESS_KEY: ${{ secrets.COV_AWS_SECRET_ACCESS_KEY }} 115 | AWS_DEFAULT_REGION: ${{ vars.COV_DEFAULT_REGION }} 116 | REGION: nyc3 117 | ENDPOINT_DOMAIN: "digitaloceanspaces.com" 118 | 119 | - name: Add coverage PR report comment 120 | if: success() && steps.findPr.outputs.number 121 | uses: marocchino/sticky-pull-request-comment@v2 122 | with: 123 | header: report 124 | number: ${{ steps.findPr.outputs.pr }} 125 | recreate: true 126 | path: code-coverage-results.md 127 | 128 | - name: Add coverage PR html comment 129 | if: success() && steps.findPr.outputs.number 130 | uses: marocchino/sticky-pull-request-comment@v2 131 | with: 132 | header: html 133 | number: ${{ steps.findPr.outputs.pr }} 134 | recreate: true 135 | message: | 136 | [Detailed coverage report](${{ steps.htmlUpload.outputs.link }}) 137 | -------------------------------------------------------------------------------- /docs/changelog_prompt.md: -------------------------------------------------------------------------------- 1 | # Substrate Runtime Interface Changelog Prompt 2 | 3 | First, start by reading the output for: 4 | ```bash 5 | ./scripts/diff-previous-tag.sh > temp/diff.diff 6 | ``` 7 | 8 | This automatically finds the correct previous runtime tag and generates the diff. Read `temp/diff.diff` repeatedly until you reach the end of the changes. Given the paths and line numbers, read the files on those offsets for _all_ Rust-related channges, create todos for each of the topics of change. Begin writing the change to `docs/changes/spec-.md`. First Generate a structured changelog of **all interface changes**: 9 | 10 | 1. **Extrinsics** (dispatchable calls) 11 | 2. **Events** 12 | 3. **Storage items** (on‑chain values affecting metadata and client reads) 13 | 4. **Structs & Enums** (types deriving `Encode` or `Decode`) 14 | 5. **Behavior changes** (new mechanisms or runtime‑level behavior) 15 | 16 | For each change in categories 1–4, output a markdown list entry with: 17 | 18 | - A **git diff hunk** snippet showing the exact additions and deletions. 19 | - A **brief paragraph** combining conceptual change and motivation. 20 | 21 | Follow these rules: 22 | 23 | **A. Pallet discovery** 24 | 25 | - Look under `runtime/pallets/` (or project root `pallets/`) and in `runtime/src/lib.rs` where pallets are `construct_runtime!`‑ed. 26 | 27 | **B. Extrinsics** 28 | 29 | - Diff each pallet’s `pub enum Call` or FRAME v2 `#[pallet::call]` items (additions/removals/arg‑type changes). This is VERY important. Pay attention to all extrinsics and functions annotated with `#[pallet::call_index(...)]`. 30 | 31 | **C. Events** 32 | 33 | - Diff `pub enum Event` or FRAME v2 `#[pallet::event]` variants. 34 | 35 | **D. Storage** 36 | 37 | - For FRAME v2: diff each `#[pallet::storage]` block. 38 | - For FRAME v1: diff inside `decl_storage! {}` macros. 39 | 40 | **E. Structs & Enums** 41 | 42 | - Identify types with `#[derive(..., Encode, Decode, ...)]`. 43 | - Only record new/deleted/renamed fields or type changes. Fields unchanged may be elided using `...`. 44 | 45 | **F. Errors** 46 | 47 | - **Exclude** new errors; do not list `#[pallet::error]` changes. 48 | 49 | **G. Format specifics** 50 | 51 | - Use **git diff style** for all snippets (`+`/`-`). 52 | - Combine conceptual change and motivation in one paragraph following the snippet. 53 | - Omit separate "Conceptual change" and "Motivation" headings. 54 | 55 | **H. Behavior Changes** 56 | 57 | - At the end of the output file, append a section titled `## Behavior Changes`. 58 | - To extract behavior changes: 59 | 1. For each changed extrinsic, locate its implementation function in the pallet's `#[pallet::call]` section (commonly in `src/lib.rs` or `src/call.rs`). 60 | 2. Diff the function body to show key logic additions/removals affecting runtime behavior. 61 | 3. Summarize how the code works, noting important branches, side effects, and state mutations. 62 | - For each behavior change or new mechanism, add: 63 | 64 | ```markdown 65 | ### 66 | 67 | **What changed**: [Technical description of the code change] 68 | 69 | **Why it matters**: [Impact on users, validators, miners, or network economics] 70 | 71 | **Migration needed**: [Any action required from network participants] 72 | 73 | *Tests*: Reference specific test functions that validate this behavior. 74 | *Cross-pallet impact*: Note any effects on other pallets via API calls. 75 | ``` 76 | 77 | **I. Analysis Setup** 78 | 79 | - **Test-driven behavior analysis**: Look for test changes in `tests/` directories and `_test.rs` files to understand new behaviors, modified logic, and real-world usage patterns. Check integration tests in `pallets/*/tests/` for cross-pallet dependencies and API trait usage. 80 | - **Migration context**: Check `pallets/*/src/migrations.rs` files for storage schema changes, data transformations, and version increments indicating breaking changes. 81 | - **Torus-specific patterns**: Focus on domain module changes (agent.rs, stake.rs, etc.), generic function patterns `pub fn name()`, API trait implementations, and new `saturating_*` arithmetic operations. 82 | 83 | --- 84 | 85 | ### Example Entry (escaped codeblock) 86 | 87 | ```markdown 88 | - \```diff 89 | + pub enum Call { 90 | + reserve_collateral { who: T::AccountId, amount: BalanceOf }, 91 | \``` 92 | Introduces `reserve_collateral` to lock collateral separately from other operations, allowing finer‑grained control in multi‑step workflows. See [](#BehaviorName). 93 | 94 | - \```diff 95 | + #[pallet::event] 96 | + pub enum Event { 97 | + CollateralReserved(T::AccountId, BalanceOf), 98 | \``` 99 | Emits an event when collateral is reserved, enabling front‑ends and indexers to track reservations in real time. See [](#BehaviorName). 100 | ``` 101 | 102 | Only add See if this change is related to a behavior change. 103 | -------------------------------------------------------------------------------- /client/codegen/src/codegen/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | 3 | use proc_macro2::TokenStream; 4 | use quote::{format_ident, quote}; 5 | use stringcase::pascal_case; 6 | 7 | use crate::{ 8 | InterfaceSource, 9 | codegen::{calls::generate_pallet_calls, storage::generate_pallet_storage}, 10 | ir::PalletPattern, 11 | }; 12 | 13 | mod calls; 14 | mod storage; 15 | 16 | pub fn generate_wrappers_for_network( 17 | mainnet_pallets: &[PalletPattern], 18 | testnet_pallets: &[PalletPattern], 19 | devnet_pallets: &[PalletPattern], 20 | ) -> TokenStream { 21 | let mut chained_pallet_names = mainnet_pallets 22 | .iter() 23 | .chain(testnet_pallets) 24 | .map(|p| p.name.to_string()) 25 | .collect::>() 26 | .into_iter() 27 | .collect::>(); 28 | chained_pallet_names.sort(); 29 | 30 | let clients = generate_client_structs(&chained_pallet_names); 31 | 32 | let mainnet_pallets: Vec = mainnet_pallets 33 | .iter() 34 | .map(|pattern| generate_pallet_mod(&InterfaceSource::Mainnet, pattern)) 35 | .collect(); 36 | 37 | let testnet_pallets: Vec = testnet_pallets 38 | .iter() 39 | .map(|pattern| generate_pallet_mod(&InterfaceSource::Testnet, pattern)) 40 | .collect(); 41 | 42 | let devnet_pallets: Vec = devnet_pallets 43 | .iter() 44 | .map(|pattern| generate_pallet_mod(&InterfaceSource::Devnet, pattern)) 45 | .collect(); 46 | 47 | quote! { 48 | //! Generated storage wrapper functions 49 | //! 50 | //! This module provides ergonomic access to Substrate storage items. 51 | //! Functions are automatically generated from the subxt API metadata. 52 | 53 | #![allow(dead_code)] 54 | 55 | // use std::collections::HashMap; 56 | use subxt::{OnlineClient, PolkadotConfig, utils::H256}; 57 | use codec::Decode; 58 | use futures::{Stream, StreamExt, TryStreamExt}; 59 | use std::marker::PhantomData; 60 | // #api_import 61 | 62 | #clients 63 | 64 | #[cfg(feature = "mainnet")] 65 | pub mod mainnet { 66 | use super::*; 67 | use crate::interfaces::mainnet::api::runtime_types; 68 | 69 | #(#mainnet_pallets)* 70 | } 71 | 72 | #[cfg(feature = "testnet")] 73 | pub mod testnet { 74 | use super::*; 75 | use crate::interfaces::testnet::api::runtime_types; 76 | 77 | #(#testnet_pallets)* 78 | } 79 | 80 | #[cfg(feature = "devnet")] 81 | pub mod devnet { 82 | use super::*; 83 | use crate::interfaces::devnet::api::runtime_types; 84 | 85 | #(#devnet_pallets)* 86 | } 87 | } 88 | } 89 | 90 | fn generate_client_structs(pallet_names: &[String]) -> TokenStream { 91 | let (struct_idents, storage_struct_idents, calls_struct_idents, method_idents) = pallet_names 92 | .iter() 93 | .map(|name| { 94 | ( 95 | format_ident!("{}", pascal_case(&format!("{name}_client"))), 96 | format_ident!("{}", pascal_case(&format!("{name}_storage"))), 97 | format_ident!("{}", pascal_case(&format!("{name}_calls"))), 98 | format_ident!("{}", name), 99 | ) 100 | }) 101 | .collect::<(Vec<_>, Vec<_>, Vec<_>, Vec<_>)>(); 102 | 103 | quote! { 104 | #( 105 | #[derive(Clone)] 106 | pub struct #struct_idents { 107 | pub(crate) client: OnlineClient, 108 | pub(crate) _pd: PhantomData 109 | } 110 | 111 | impl crate::client::TorusClient { 112 | pub fn #method_idents(&self) -> #struct_idents { 113 | #struct_idents { 114 | client: self.client.clone(), 115 | _pd: PhantomData 116 | } 117 | } 118 | } 119 | 120 | #[derive(Clone)] 121 | pub struct #storage_struct_idents { 122 | pub(crate) client: OnlineClient, 123 | pub(crate) block: Option, 124 | pub(crate) _pd: PhantomData, 125 | } 126 | 127 | #[derive(Clone)] 128 | pub struct #calls_struct_idents { 129 | pub(crate) client: OnlineClient, 130 | pub(crate) _pd: PhantomData, 131 | } 132 | )* 133 | } 134 | } 135 | 136 | fn generate_pallet_mod(network: &InterfaceSource, pallet: &PalletPattern) -> TokenStream { 137 | let pallet_name = format_ident!("{}", pallet.name); 138 | 139 | let storage = generate_pallet_storage(network, pallet); 140 | let calls = generate_pallet_calls(network, pallet); 141 | 142 | if storage.is_none() && calls.is_none() { 143 | return quote! {}; 144 | } 145 | 146 | quote! { 147 | pub mod #pallet_name { 148 | use super::*; 149 | 150 | #storage 151 | 152 | #calls 153 | 154 | } 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /pallets/permission0/tests/curator.rs: -------------------------------------------------------------------------------- 1 | use pallet_permission0::{Config, CuratorPermissions, Error, Pallet, PermissionId}; 2 | use pallet_permission0_api::{CuratorPermissions as ApiCuratorPermissions, Permission0CuratorApi}; 3 | use polkadot_sdk::{ 4 | frame_support::{assert_err, dispatch::DispatchResult}, 5 | frame_system::RawOrigin, 6 | polkadot_sdk_frame::prelude::OriginFor, 7 | sp_runtime::BoundedBTreeMap, 8 | }; 9 | use test_utils::*; 10 | 11 | fn ensure_curator(origin: OriginFor, flags: CuratorPermissions) -> DispatchResult { 12 | Pallet::::ensure_curator_permission( 13 | origin, 14 | ApiCuratorPermissions::from_bits_retain(flags.bits()), 15 | )?; 16 | Ok(()) 17 | } 18 | 19 | fn root_permissions( 20 | flags: CuratorPermissions, 21 | ) -> BoundedBTreeMap< 22 | Option, 23 | u32, 24 | ::MaxCuratorSubpermissionsPerPermission, 25 | > { 26 | let mut map = BoundedBTreeMap::new(); 27 | map.try_insert(None, flags.bits()).unwrap(); 28 | map 29 | } 30 | 31 | #[test] 32 | fn delegate_curator_permission_correctly() { 33 | new_test_ext().execute_with(|| { 34 | assert_err!( 35 | Permission0::delegate_curator_permission( 36 | RawOrigin::Signed(0).into(), 37 | 1, 38 | root_permissions(CuratorPermissions::all()), 39 | None, 40 | pallet_permission0::PermissionDuration::Indefinite, 41 | pallet_permission0::RevocationTerms::Irrevocable, 42 | 1 43 | ), 44 | Error::::NotPermissionRecipient 45 | ); 46 | 47 | let existing_curator = 1; 48 | delegate_curator_permission(existing_curator, CuratorPermissions::all(), None); 49 | 50 | assert_err!( 51 | Permission0::delegate_curator_permission( 52 | RawOrigin::Root.into(), 53 | existing_curator, 54 | root_permissions(CuratorPermissions::all()), 55 | None, 56 | pallet_permission0::PermissionDuration::Indefinite, 57 | pallet_permission0::RevocationTerms::Irrevocable, 58 | 1 59 | ), 60 | Error::::DuplicatePermissionInBlock 61 | ); 62 | 63 | step_block(1); 64 | 65 | assert_ok!(Permission0::delegate_curator_permission( 66 | RawOrigin::Root.into(), 67 | existing_curator, 68 | root_permissions(CuratorPermissions::all()), 69 | None, 70 | pallet_permission0::PermissionDuration::Indefinite, 71 | pallet_permission0::RevocationTerms::Irrevocable, 72 | 1 73 | )); 74 | 75 | let key = 0; 76 | 77 | assert_err!( 78 | Permission0::delegate_curator_permission( 79 | RawOrigin::Root.into(), 80 | key, 81 | root_permissions(CuratorPermissions::from_bits_retain( 82 | pallet_permission0::CuratorPermissions::ROOT.bits() 83 | )), 84 | None, 85 | pallet_permission0::PermissionDuration::Indefinite, 86 | pallet_permission0::RevocationTerms::Irrevocable, 87 | 1 88 | ), 89 | Error::::InvalidCuratorPermissions 90 | ); 91 | }); 92 | } 93 | 94 | #[test] 95 | fn ensure_curator_handles_curator_permissions() { 96 | new_test_ext().execute_with(|| { 97 | assert_ok!(ensure_curator( 98 | RawOrigin::Root.into(), 99 | CuratorPermissions::all() 100 | )); 101 | 102 | let key = 0; 103 | assert_err!( 104 | ensure_curator( 105 | RawOrigin::Signed(key).into(), 106 | CuratorPermissions::WHITELIST_MANAGE 107 | ), 108 | Error::::PermissionNotFound 109 | ); 110 | 111 | delegate_curator_permission(key, CuratorPermissions::WHITELIST_MANAGE, None); 112 | assert_ok!(ensure_curator( 113 | RawOrigin::Signed(key).into(), 114 | CuratorPermissions::WHITELIST_MANAGE 115 | )); 116 | 117 | for flags in [ 118 | CuratorPermissions::APPLICATION_REVIEW, 119 | CuratorPermissions::PENALTY_CONTROL, 120 | ] { 121 | assert_err!( 122 | ensure_curator(RawOrigin::Signed(key).into(), flags), 123 | Error::::PermissionNotFound 124 | ); 125 | } 126 | }); 127 | } 128 | 129 | #[test] 130 | fn ensure_curator_handles_cooldown_correctly() { 131 | new_test_ext().execute_with(|| { 132 | let key = 0; 133 | delegate_curator_permission(key, CuratorPermissions::WHITELIST_MANAGE, Some(10)); 134 | let permission_id = Pallet::::get_curator_permission(&key).unwrap(); 135 | 136 | assert_ok!(ensure_curator( 137 | RawOrigin::Signed(key).into(), 138 | CuratorPermissions::WHITELIST_MANAGE 139 | )); 140 | 141 | Pallet::::execute_permission(RawOrigin::Root.into(), permission_id).unwrap(); 142 | 143 | assert_err!( 144 | ensure_curator( 145 | RawOrigin::Signed(key).into(), 146 | CuratorPermissions::WHITELIST_MANAGE 147 | ), 148 | Error::::PermissionInCooldown 149 | ); 150 | 151 | step_block(10); 152 | 153 | assert_ok!(ensure_curator( 154 | RawOrigin::Signed(key).into(), 155 | CuratorPermissions::WHITELIST_MANAGE 156 | )); 157 | }); 158 | } 159 | -------------------------------------------------------------------------------- /.maintain/frame-weight-template.hbs: -------------------------------------------------------------------------------- 1 | {{header}} 2 | //! Autogenerated weights for `{{pallet}}` 3 | //! 4 | //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} 5 | //! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` 6 | //! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}` 7 | //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` 8 | //! WASM-EXECUTION: `{{cmd.wasm_execution}}`, CHAIN: `{{cmd.chain}}`, DB CACHE: `{{cmd.db_cache}}` 9 | 10 | // Executed Command: 11 | {{#each args as |arg|}} 12 | // {{arg}} 13 | {{/each}} 14 | 15 | #![cfg_attr(rustfmt, rustfmt_skip)] 16 | #![allow(unused_parens)] 17 | #![allow(unused_imports)] 18 | #![allow(missing_docs)] 19 | 20 | use polkadot_sdk::{ 21 | frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}, 22 | *, 23 | }; 24 | use core::marker::PhantomData; 25 | 26 | /// Weight functions needed for `{{pallet}}`. 27 | pub trait WeightInfo { 28 | {{#each benchmarks as |benchmark|}} 29 | fn {{benchmark.name~}} 30 | ( 31 | {{~#each benchmark.components as |c| ~}} 32 | {{c.name}}: u32, {{/each~}} 33 | ) -> Weight; 34 | {{/each}} 35 | } 36 | 37 | /// Weights for `{{pallet}}` using the Substrate node and recommended hardware. 38 | pub struct SubstrateWeight(PhantomData); 39 | {{#if (eq pallet "frame_system")}} 40 | impl WeightInfo for SubstrateWeight { 41 | {{else}} 42 | impl WeightInfo for SubstrateWeight { 43 | {{/if}} 44 | {{#each benchmarks as |benchmark|}} 45 | {{#each benchmark.comments as |comment|}} 46 | /// {{comment}} 47 | {{/each}} 48 | {{#each benchmark.component_ranges as |range|}} 49 | /// The range of component `{{range.name}}` is `[{{range.min}}, {{range.max}}]`. 50 | {{/each}} 51 | fn {{benchmark.name~}} 52 | ( 53 | {{~#each benchmark.components as |c| ~}} 54 | {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} 55 | ) -> Weight { 56 | // Proof Size summary in bytes: 57 | // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` 58 | // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` 59 | // Minimum execution time: {{underscore benchmark.min_execution_time}}_000 picoseconds. 60 | Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) 61 | {{#each benchmark.component_weight as |cw|}} 62 | // Standard Error: {{underscore cw.error}} 63 | .saturating_add(Weight::from_parts({{underscore cw.slope}}, 0).saturating_mul({{cw.name}}.into())) 64 | {{/each}} 65 | {{#if (ne benchmark.base_reads "0")}} 66 | .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}}_u64)) 67 | {{/if}} 68 | {{#each benchmark.component_reads as |cr|}} 69 | .saturating_add(T::DbWeight::get().reads(({{cr.slope}}_u64).saturating_mul({{cr.name}}.into()))) 70 | {{/each}} 71 | {{#if (ne benchmark.base_writes "0")}} 72 | .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}}_u64)) 73 | {{/if}} 74 | {{#each benchmark.component_writes as |cw|}} 75 | .saturating_add(T::DbWeight::get().writes(({{cw.slope}}_u64).saturating_mul({{cw.name}}.into()))) 76 | {{/each}} 77 | {{#each benchmark.component_calculated_proof_size as |cp|}} 78 | .saturating_add(Weight::from_parts(0, {{cp.slope}}).saturating_mul({{cp.name}}.into())) 79 | {{/each}} 80 | } 81 | {{/each}} 82 | } 83 | 84 | // For backwards compatibility and tests. 85 | impl WeightInfo for () { 86 | {{#each benchmarks as |benchmark|}} 87 | {{#each benchmark.comments as |comment|}} 88 | /// {{comment}} 89 | {{/each}} 90 | {{#each benchmark.component_ranges as |range|}} 91 | /// The range of component `{{range.name}}` is `[{{range.min}}, {{range.max}}]`. 92 | {{/each}} 93 | fn {{benchmark.name~}} 94 | ( 95 | {{~#each benchmark.components as |c| ~}} 96 | {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} 97 | ) -> Weight { 98 | // Proof Size summary in bytes: 99 | // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` 100 | // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` 101 | // Minimum execution time: {{underscore benchmark.min_execution_time}}_000 picoseconds. 102 | Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) 103 | {{#each benchmark.component_weight as |cw|}} 104 | // Standard Error: {{underscore cw.error}} 105 | .saturating_add(Weight::from_parts({{underscore cw.slope}}, 0).saturating_mul({{cw.name}}.into())) 106 | {{/each}} 107 | {{#if (ne benchmark.base_reads "0")}} 108 | .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}}_u64)) 109 | {{/if}} 110 | {{#each benchmark.component_reads as |cr|}} 111 | .saturating_add(RocksDbWeight::get().reads(({{cr.slope}}_u64).saturating_mul({{cr.name}}.into()))) 112 | {{/each}} 113 | {{#if (ne benchmark.base_writes "0")}} 114 | .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}}_u64)) 115 | {{/if}} 116 | {{#each benchmark.component_writes as |cw|}} 117 | .saturating_add(RocksDbWeight::get().writes(({{cw.slope}}_u64).saturating_mul({{cw.name}}.into()))) 118 | {{/each}} 119 | {{#each benchmark.component_calculated_proof_size as |cp|}} 120 | .saturating_add(Weight::from_parts(0, {{cp.slope}}).saturating_mul({{cp.name}}.into())) 121 | {{/each}} 122 | } 123 | {{/each}} 124 | } 125 | --------------------------------------------------------------------------------