├── .devcontainer └── devcontainer.json ├── .dockerignore ├── .editorconfig ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yaml │ └── feature_request.yaml ├── pull_request_template.md └── workflows │ ├── benchmark-weights.yml │ ├── check-devnet.yml │ ├── check-finney.yml │ ├── check-rust.yml │ ├── check-testnet.yml │ ├── docker.yml │ ├── e2e-bittensor-tests.yml │ ├── label-triggers.yml │ └── update-chainspec.yml ├── .gitignore ├── .maintain └── frame-weight-template.hbs ├── .rustfmt.toml ├── .vscode └── tasks.json ├── CODEOWNERS ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── build.rs ├── docker-compose.yml ├── docs ├── consensus.md ├── delegate-info.json ├── img │ ├── bonds_penalty_0.svg │ ├── bonds_penalty_100.svg │ ├── bonds_penalty_50.svg │ ├── consensus_plots.png │ ├── emission-60.svg │ ├── emission-70.svg │ ├── kappa_40.svg │ ├── kappa_50.svg │ ├── kappa_60.svg │ ├── retention-lines.svg │ ├── validator_emission_0.svg │ ├── validator_emission_25.svg │ ├── validator_emission_50.svg │ ├── weights_stddev_0.svg │ ├── weights_stddev_20.svg │ └── weights_stddev_40.svg ├── running-subtensor-locally.md └── rust-setup.md ├── hyperparameters.md ├── justfile ├── nakamoto_gen.json ├── node ├── Cargo.toml ├── build.rs ├── src │ ├── benchmarking.rs │ ├── chain_spec │ │ ├── finney.rs │ │ ├── localnet.rs │ │ ├── mod.rs │ │ └── testnet.rs │ ├── cli.rs │ ├── command.rs │ ├── lib.rs │ ├── main.rs │ ├── rpc.rs │ └── service.rs └── tests │ └── chain_spec.rs ├── pallets ├── admin-utils │ ├── Cargo.toml │ ├── scripts │ │ └── benchmark.sh │ ├── src │ │ ├── benchmarking.rs │ │ ├── lib.rs │ │ └── weights.rs │ └── tests │ │ ├── mock.rs │ │ └── tests.rs ├── collective │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── benchmarking.rs │ │ ├── lib.rs │ │ ├── tests.rs │ │ └── weights.rs ├── commitments │ ├── Cargo.toml │ ├── scripts │ │ └── benchmark.sh │ └── src │ │ ├── benchmarking.rs │ │ ├── lib.rs │ │ ├── mock.rs │ │ ├── tests.rs │ │ ├── types.rs │ │ └── weights.rs ├── registry │ ├── Cargo.toml │ ├── scripts │ │ └── benchmark.sh │ └── src │ │ ├── benchmarking.rs │ │ ├── lib.rs │ │ ├── mock.rs │ │ ├── tests.rs │ │ ├── types.rs │ │ └── weights.rs └── subtensor │ ├── Cargo.toml │ ├── rpc │ ├── Cargo.toml │ └── src │ │ └── lib.rs │ ├── runtime-api │ ├── Cargo.toml │ └── src │ │ └── lib.rs │ ├── src │ ├── benchmarks.rs │ ├── coinbase │ │ ├── block_step.rs │ │ ├── mod.rs │ │ ├── root.rs │ │ └── run_coinbase.rs │ ├── epoch │ │ ├── math.rs │ │ ├── mod.rs │ │ └── run_epoch.rs │ ├── lib.rs │ ├── macros │ │ ├── config.rs │ │ ├── dispatches.rs │ │ ├── errors.rs │ │ ├── events.rs │ │ ├── genesis.rs │ │ ├── hooks.rs │ │ └── mod.rs │ ├── migrations │ │ ├── migrate_chain_identity.rs │ │ ├── migrate_create_root_network.rs │ │ ├── migrate_delete_subnet_21.rs │ │ ├── migrate_delete_subnet_3.rs │ │ ├── migrate_fix_pending_emission.rs │ │ ├── migrate_fix_total_coldkey_stake.rs │ │ ├── migrate_init_total_issuance.rs │ │ ├── migrate_populate_owned_hotkeys.rs │ │ ├── migrate_populate_staking_hotkeys.rs │ │ ├── migrate_to_v1_separate_emission.rs │ │ ├── migrate_to_v2_fixed_total_stake.rs │ │ ├── migrate_total_issuance.rs │ │ ├── migrate_transfer_ownership_to_foundation.rs │ │ └── mod.rs │ ├── rpc_info │ │ ├── delegate_info.rs │ │ ├── mod.rs │ │ ├── neuron_info.rs │ │ ├── stake_info.rs │ │ └── subnet_info.rs │ ├── staking │ │ ├── add_stake.rs │ │ ├── become_delegate.rs │ │ ├── decrease_take.rs │ │ ├── helpers.rs │ │ ├── increase_take.rs │ │ ├── mod.rs │ │ ├── remove_stake.rs │ │ └── set_children.rs │ ├── subnets │ │ ├── mod.rs │ │ ├── registration.rs │ │ ├── serving.rs │ │ ├── uids.rs │ │ └── weights.rs │ ├── swap │ │ ├── mod.rs │ │ ├── swap_coldkey.rs │ │ └── swap_hotkey.rs │ └── utils │ │ ├── identity.rs │ │ ├── misc.rs │ │ ├── mod.rs │ │ ├── rate_limiting.rs │ │ └── try_state.rs │ └── tests │ ├── batch_tx.rs │ ├── children.rs │ ├── coinbase.rs │ ├── difficulty.rs │ ├── epoch.rs │ ├── math.rs │ ├── migration.rs │ ├── mock.rs │ ├── networks.rs │ ├── neuron_info.rs │ ├── registration.rs │ ├── root.rs │ ├── senate.rs │ ├── serving.rs │ ├── staking.rs │ ├── swap_coldkey.rs │ ├── swap_hotkey.rs │ ├── uids.rs │ └── weights.rs ├── plain_spec_finney.json ├── plain_spec_testfinney.json ├── raw_spec_finney.json ├── raw_spec_testfinney.json ├── recipe.json ├── runtime ├── Cargo.toml ├── build.rs ├── src │ ├── check_nonce.rs │ ├── lib.rs │ ├── migrations │ │ └── mod.rs │ └── spec_version.rs └── tests │ ├── metadata.rs │ └── pallet_proxy.rs ├── rust-toolchain.toml ├── rustfmt.toml ├── scripts ├── benchmark.sh ├── benchmark_all.sh ├── build.sh ├── build_all_chainspecs.sh ├── code-coverage.sh ├── init.sh ├── install_rust.sh ├── localnet.sh ├── publish.sh ├── run │ └── subtensor.sh └── test_specific.sh ├── shell.nix ├── snapshot.json ├── src └── lib.rs ├── support ├── linting │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ ├── lint.rs │ │ └── require_freeze_struct.rs ├── macros │ ├── Cargo.toml │ ├── src │ │ ├── lib.rs │ │ └── visitor.rs │ └── tests │ │ └── tests.rs └── tools │ ├── Cargo.toml │ └── src │ └── bump_version.rs └── zepter.yaml /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Substrate Node template", 3 | "context": "..", 4 | "settings": { 5 | "terminal.integrated.shell.linux": "/bin/bash", 6 | "lldb.executable": "/usr/bin/lldb" 7 | }, 8 | "extensions": [ 9 | "rust-lang.rust", 10 | "bungcip.better-toml", 11 | "vadimcn.vscode-lldb" 12 | ], 13 | "forwardPorts": [ 14 | 3000, 15 | 9944 16 | ], 17 | "onCreateCommand": ["cargo build", "cargo check"], 18 | "postStartCommand": "./target/debug/node-template --dev --ws-external", 19 | "menuActions": [ 20 | {"id": "polkadotjs", 21 | "label": "Open PolkadotJS Apps", 22 | "type": "external-preview", 23 | "args": ["https://polkadot.js.org/apps/?rpc=wss%3A%2F%2F/$HOST/wss"]} 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .devcontainer 2 | .github 3 | .vscode 4 | target/ 5 | .dockerignore 6 | Dockerfile 7 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style=space 5 | indent_size=2 6 | tab_width=2 7 | end_of_line=lf 8 | charset=utf-8 9 | trim_trailing_whitespace=true 10 | insert_final_newline = true 11 | 12 | [*.{rs,toml}] 13 | indent_style=tab 14 | indent_size=tab 15 | tab_width=4 16 | max_line_length=100 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yaml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Create a report to help us improve 3 | labels: [bug] 4 | assignees: [] 5 | 6 | body: 7 | - type: textarea 8 | id: bug-description 9 | attributes: 10 | label: Describe the bug 11 | description: A clear and concise description of what the bug is. 12 | validations: 13 | required: true 14 | 15 | - type: textarea 16 | id: reproduce 17 | attributes: 18 | label: To Reproduce 19 | description: Steps to reproduce the behavior. 20 | placeholder: | 21 | 1. Go to '...' 22 | 2. Run command '...' 23 | 3. Scroll down to '....' 24 | 4. See error 25 | validations: 26 | required: true 27 | 28 | - type: textarea 29 | id: expected-behavior 30 | attributes: 31 | label: Expected behavior 32 | description: A clear and concise description of what you expected to happen. 33 | validations: 34 | required: true 35 | 36 | - type: textarea 37 | id: screenshots 38 | attributes: 39 | label: Screenshots 40 | description: If applicable, add screenshots to help explain your problem. 41 | validations: 42 | required: false 43 | 44 | - type: input 45 | id: environment 46 | attributes: 47 | label: Environment 48 | description: Please specify your OS and Distro, and Bittensor Version. 49 | placeholder: "OS and Distro: [e.g. Linux Ubuntu, Linux Fedora, etc.], Bittensor Version [e.g. 22]" 50 | validations: 51 | required: true 52 | 53 | - type: textarea 54 | id: additional-context 55 | attributes: 56 | label: Additional context 57 | description: Add any other context about the problem here. 58 | validations: 59 | required: false 60 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yaml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for this project 3 | labels: [feature] 4 | assignees: [] 5 | 6 | body: 7 | - type: textarea 8 | id: problem-description 9 | attributes: 10 | label: Is your feature request related to a problem? Please describe. 11 | description: A clear and concise description of what the problem is. 12 | placeholder: "Ex. I'm always frustrated when [...]" 13 | validations: 14 | required: true 15 | 16 | - type: textarea 17 | id: solution 18 | attributes: 19 | label: Describe the solution you'd like 20 | description: A clear and concise description of what you want to happen. 21 | validations: 22 | required: true 23 | 24 | - type: textarea 25 | id: alternatives 26 | attributes: 27 | label: Describe alternatives you've considered 28 | description: A clear and concise description of any alternative solutions or features you've considered. 29 | validations: 30 | required: false 31 | 32 | - type: textarea 33 | id: additional-context 34 | attributes: 35 | label: Additional context 36 | description: Add any other context or screenshots about the feature request here. 37 | validations: 38 | required: false 39 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 5 | 6 | 7 | ## Related Issue(s) 8 | 9 | - Closes #[issue number] 10 | 11 | ## Type of Change 12 | 15 | 16 | - [ ] Bug fix (non-breaking change which fixes an issue) 17 | - [ ] New feature (non-breaking change which adds functionality) 18 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 19 | - [ ] Documentation update 20 | - [ ] Other (please describe): 21 | 22 | ## Breaking Change 23 | 24 | If this PR introduces a breaking change, please provide a detailed description of the impact and the migration path for existing applications. 25 | 26 | ## Checklist 27 | 28 | 31 | 32 | - [ ] I have performed a self-review of my own code 33 | - [ ] I have commented my code, particularly in hard-to-understand areas 34 | - [ ] I have run `cargo fmt` and `cargo clippy` to ensure my code is formatted and linted correctly 35 | - [ ] I have made corresponding changes to the documentation 36 | - [ ] My changes generate no new warnings 37 | - [ ] I have added tests that prove my fix is effective or that my feature works 38 | - [ ] New and existing unit tests pass locally with my changes 39 | - [ ] Any dependent changes have been merged and published in downstream modules 40 | 41 | ## Screenshots (if applicable) 42 | 43 | Please include any relevant screenshots or GIFs that demonstrate the changes made. 44 | 45 | ## Additional Notes 46 | 47 | Please provide any additional information or context that may be helpful for reviewers. -------------------------------------------------------------------------------- /.github/workflows/benchmark-weights.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blixor/subtensor/aa521f46fab9ba4702c752792ba4f5eb38df39fd/.github/workflows/benchmark-weights.yml -------------------------------------------------------------------------------- /.github/workflows/check-devnet.yml: -------------------------------------------------------------------------------- 1 | name: Devnet Deploy Check 2 | 3 | on: 4 | pull_request: 5 | branches: [devnet, devnet-ready] 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | 10 | jobs: 11 | check-spec-version: 12 | name: Check spec_version bump 13 | runs-on: SubtensorCI 14 | if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-spec-version-bump') }} 15 | steps: 16 | - name: Dependencies 17 | run: | 18 | sudo apt-get update && 19 | sudo apt-get install -y curl clang curl libssl-dev llvm \ 20 | libudev-dev protobuf-compiler 21 | 22 | - name: Set up Rust Toolchain 23 | run: curl https://sh.rustup.rs -sSf | sh -s -- -y 24 | 25 | - name: Install substrate-spec-version 26 | run: cargo install substrate-spec-version 27 | 28 | - name: Check-out repository under $GITHUB_WORKSPACE 29 | uses: actions/checkout@v4 30 | 31 | - name: Check that spec_version has been bumped 32 | run: | 33 | spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version wss://dev.chain.opentensor.ai:443 | tr -d '\n') 34 | echo "network spec_version: $spec_version" 35 | : ${spec_version:?bad spec version} 36 | local_spec_version=$(cargo run -p node-subtensor-runtime --bin spec_version | tr -d '\n') 37 | echo "local spec_version: $local_spec_version" 38 | echo "network spec_version: $spec_version" 39 | if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi 40 | echo "$local_spec_version > $spec_version ✅" 41 | 42 | check-devnet-migrations: 43 | name: check devnet migrations 44 | runs-on: ubuntu-22.04 45 | steps: 46 | - name: Checkout sources 47 | uses: actions/checkout@v3 48 | 49 | - name: Run Try Runtime Checks 50 | uses: "paritytech/try-runtime-gha@v0.1.0" 51 | with: 52 | runtime-package: "node-subtensor-runtime" 53 | node-uri: "wss://dev.chain.opentensor.ai:443" 54 | checks: "pre-and-post" 55 | extra-args: "--disable-spec-version-check --no-weight-warnings" 56 | -------------------------------------------------------------------------------- /.github/workflows/check-finney.yml: -------------------------------------------------------------------------------- 1 | name: Finney Deploy Check 2 | 3 | on: 4 | pull_request: 5 | branches: [finney, main] 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | 10 | jobs: 11 | check-spec-version: 12 | name: Check spec_version bump 13 | runs-on: SubtensorCI 14 | if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-spec-version-bump') }} 15 | steps: 16 | - name: Dependencies 17 | run: | 18 | sudo apt-get update && 19 | sudo apt-get install -y curl clang curl libssl-dev llvm \ 20 | libudev-dev protobuf-compiler 21 | 22 | - name: Set up Rust Toolchain 23 | run: curl https://sh.rustup.rs -sSf | sh -s -- -y 24 | 25 | - name: Install substrate-spec-version 26 | run: cargo install substrate-spec-version 27 | 28 | - name: Check-out repository under $GITHUB_WORKSPACE 29 | uses: actions/checkout@v4 30 | 31 | - name: Check that spec_version has been bumped 32 | run: | 33 | spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version ${{ vars.NUCLEUS_ARCHIVE_NODE }} | tr -d '\n') 34 | echo "network spec_version: $spec_version" 35 | : ${spec_version:?bad spec version} 36 | local_spec_version=$(cargo run -p node-subtensor-runtime --bin spec_version | tr -d '\n') 37 | echo "local spec_version: $local_spec_version" 38 | echo "network spec_version: $spec_version" 39 | if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi 40 | echo "$local_spec_version > $spec_version ✅" 41 | 42 | check-finney-migrations: 43 | name: check finney migrations 44 | runs-on: SubtensorCI 45 | steps: 46 | - name: Checkout sources 47 | uses: actions/checkout@v4 48 | - name: Run Try Runtime Checks 49 | uses: "paritytech/try-runtime-gha@v0.1.0" 50 | with: 51 | runtime-package: "node-subtensor-runtime" 52 | node-uri: ${{ vars.NUCLEUS_ARCHIVE_NODE }} 53 | checks: "pre-and-post" 54 | extra-args: "--disable-spec-version-check --no-weight-warnings" 55 | -------------------------------------------------------------------------------- /.github/workflows/check-testnet.yml: -------------------------------------------------------------------------------- 1 | name: Testnet Deploy Check 2 | 3 | on: 4 | pull_request: 5 | branches: [testnet, testnet-ready] 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | 10 | jobs: 11 | check-spec-version: 12 | name: Check spec_version bump 13 | runs-on: SubtensorCI 14 | if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-spec-version-bump') }} 15 | steps: 16 | - name: Dependencies 17 | run: | 18 | sudo apt-get update && 19 | sudo apt-get install -y curl clang curl libssl-dev llvm \ 20 | libudev-dev protobuf-compiler 21 | 22 | - name: Set up Rust Toolchain 23 | run: curl https://sh.rustup.rs -sSf | sh -s -- -y 24 | 25 | - name: Install substrate-spec-version 26 | run: cargo install substrate-spec-version 27 | 28 | - name: Check-out repository under $GITHUB_WORKSPACE 29 | uses: actions/checkout@v4 30 | 31 | - name: Check that spec_version has been bumped 32 | run: | 33 | spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version wss://test.finney.opentensor.ai:443 | tr -d '\n') 34 | echo "network spec_version: $spec_version" 35 | : ${spec_version:?bad spec version} 36 | local_spec_version=$(cargo run -p node-subtensor-runtime --bin spec_version | tr -d '\n') 37 | echo "local spec_version: $local_spec_version" 38 | echo "network spec_version: $spec_version" 39 | if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi 40 | echo "$local_spec_version > $spec_version ✅" 41 | 42 | check-testnet-migrations: 43 | name: check testnet migrations 44 | runs-on: ubuntu-22.04 45 | steps: 46 | - name: Checkout sources 47 | uses: actions/checkout@v3 48 | 49 | - name: Run Try Runtime Checks 50 | uses: "paritytech/try-runtime-gha@v0.1.0" 51 | with: 52 | runtime-package: "node-subtensor-runtime" 53 | node-uri: "wss://test.chain.opentensor.ai:443" 54 | checks: "pre-and-post" 55 | extra-args: "--disable-spec-version-check --no-weight-warnings" 56 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Publish Docker Image 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | permissions: 8 | contents: read 9 | packages: write 10 | actions: read 11 | security-events: write 12 | 13 | jobs: 14 | publish: 15 | runs-on: SubtensorCI 16 | 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v4 20 | 21 | - name: Set up QEMU 22 | uses: docker/setup-qemu-action@v2 23 | 24 | - name: Set up Docker Buildx 25 | uses: docker/setup-buildx-action@v2 26 | 27 | - name: Login to GHCR 28 | uses: docker/login-action@v2 29 | with: 30 | registry: ghcr.io 31 | username: ${{ github.actor }} 32 | password: ${{ secrets.GITHUB_TOKEN }} 33 | 34 | - name: Extract metadata (tags, labels) for Docker 35 | id: meta 36 | uses: docker/metadata-action@v4 37 | with: 38 | images: ghcr.io/${{ github.repository }} 39 | 40 | - name: Build and push Docker image 41 | uses: docker/build-push-action@v4 42 | with: 43 | context: . 44 | push: true 45 | tags: | 46 | ${{ steps.meta.outputs.tags }} 47 | ghcr.io/${{ github.repository }}:latest 48 | labels: ${{ steps.meta.outputs.labels }} 49 | -------------------------------------------------------------------------------- /.github/workflows/e2e-bittensor-tests.yml: -------------------------------------------------------------------------------- 1 | name: E2E Bittensor Tests 2 | 3 | concurrency: 4 | group: e2e-bittensor-${{ github.ref }} 5 | cancel-in-progress: true 6 | 7 | on: 8 | push: 9 | branches: [main, devnet-ready, devnet, testnet, finney] 10 | 11 | pull_request: 12 | 13 | ## Allow running workflow manually from the Actions tab 14 | workflow_dispatch: 15 | inputs: 16 | verbose: 17 | description: "Output more information when triggered manually" 18 | required: false 19 | default: "" 20 | 21 | env: 22 | CARGO_TERM_COLOR: always 23 | VERBOSE: ${{ github.events.input.verbose }} 24 | 25 | jobs: 26 | run: 27 | runs-on: SubtensorCI 28 | strategy: 29 | matrix: 30 | rust-branch: 31 | - nightly-2024-03-05 32 | rust-target: 33 | - x86_64-unknown-linux-gnu 34 | # - x86_64-apple-darwin 35 | os: 36 | - ubuntu-latest 37 | # - macos-latest 38 | include: 39 | - os: ubuntu-latest 40 | # - os: macos-latest 41 | env: 42 | RELEASE_NAME: development 43 | RUSTV: ${{ matrix.rust-branch }} 44 | RUST_BACKTRACE: full 45 | RUST_BIN_DIR: target/${{ matrix.rust-target }} 46 | TARGET: ${{ matrix.rust-target }} 47 | steps: 48 | - name: Check-out repository under $GITHUB_WORKSPACE 49 | uses: actions/checkout@v2 50 | 51 | - name: Install dependencies 52 | run: | 53 | sudo apt-get update && 54 | sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler 55 | 56 | - name: Install Rust ${{ matrix.rust-branch }} 57 | uses: actions-rs/toolchain@v1.0.6 58 | with: 59 | toolchain: ${{ matrix.rust-branch }} 60 | components: rustfmt 61 | profile: minimal 62 | 63 | - name: Clone bittensor repo 64 | run: git clone https://github.com/opentensor/bittensor.git 65 | 66 | - name: Setup bittensor repo 67 | working-directory: ${{ github.workspace }}/bittensor 68 | run: | 69 | git checkout staging 70 | python3 -m pip install -e . 71 | python3 -m pip install torch 72 | python3 -m pip install pytest 73 | 74 | - name: Run tests 75 | working-directory: ${{ github.workspace }}/bittensor 76 | run: | 77 | pwd 78 | ls 79 | LOCALNET_SH_PATH="${{ github.workspace }}/scripts/localnet.sh" pytest tests/e2e_tests/ -s 80 | -------------------------------------------------------------------------------- /.github/workflows/label-triggers.yml: -------------------------------------------------------------------------------- 1 | name: Label Triggers 2 | on: 3 | pull_request: 4 | types: 5 | - labeled 6 | 7 | permissions: 8 | issues: write 9 | pull-requests: write 10 | 11 | jobs: 12 | comment_on_breaking_change: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check if 'breaking change' label is added 16 | if: github.event.label.name == 'breaking-change' 17 | uses: actions/github-script@v6 18 | with: 19 | script: | 20 | github.rest.issues.createComment({ 21 | issue_number: context.issue.number, 22 | owner: context.repo.owner, 23 | repo: context.repo.repo, 24 | body: '@opentensor/cerebrum / @opentensor/gyrus / @opentensor/cortex breaking change detected! Please prepare accordingly!' 25 | }) 26 | -------------------------------------------------------------------------------- /.github/workflows/update-chainspec.yml: -------------------------------------------------------------------------------- 1 | name: Update Chainspecs 2 | 3 | concurrency: 4 | group: update-chainspec-${{ github.ref }} 5 | cancel-in-progress: true 6 | 7 | on: 8 | push: 9 | branches: [main, testnet, staging, staging-ready] 10 | 11 | workflow_dispatch: 12 | inputs: 13 | verbose: 14 | description: "Output more information when triggered manually" 15 | required: false 16 | default: "" 17 | 18 | env: 19 | CARGO_TERM_COLOR: always 20 | VERBOSE: ${{ github.events.input.verbose }} 21 | 22 | jobs: 23 | update-chainspecs: 24 | runs-on: SubtensorCI 25 | permissions: 26 | contents: write 27 | 28 | strategy: 29 | matrix: 30 | rust-branch: 31 | - nightly-2024-03-05 32 | rust-target: 33 | - x86_64-unknown-linux-gnu 34 | os: 35 | - ubuntu-latest 36 | include: 37 | - os: ubuntu-latest 38 | env: 39 | RELEASE_NAME: development 40 | RUSTV: ${{ matrix.rust-branch }} 41 | RUST_BACKTRACE: full 42 | RUST_BIN_DIR: target/${{ matrix.rust-target }} 43 | TARGET: ${{ matrix.rust-target }} 44 | steps: 45 | - name: Check-out repository under $GITHUB_WORKSPACE 46 | uses: actions/checkout@v2 47 | 48 | - name: Install dependencies 49 | run: | 50 | sudo apt-get update && 51 | sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler 52 | 53 | - name: Install Rust ${{ matrix.rust-branch }} 54 | uses: actions-rs/toolchain@v1.0.6 55 | with: 56 | toolchain: ${{ matrix.rust-branch }} 57 | components: rustfmt, clippy 58 | profile: minimal 59 | 60 | - name: Utilize Shared Rust Cache 61 | uses: Swatinem/rust-cache@v2.2.1 62 | with: 63 | key: ${{ matrix.os }}-${{ env.RUST_BIN_DIR }} 64 | 65 | - name: Build chainspecs 66 | run: ./scripts/build_all_chainspecs.sh 67 | 68 | - uses: stefanzweifel/git-auto-commit-action@v5 69 | name: Commit any updated chainspecs 70 | with: 71 | commit_message: Update chainspecs 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | **/target/ 4 | # These are backup files generated by rustfmt 5 | **/*.rs.bk 6 | 7 | **/*.lock 8 | 9 | *.ipynb 10 | 11 | # Generated by code coverage 12 | *.profraw 13 | *.profdata 14 | 15 | .DS_Store 16 | 17 | # The cache for docker container dependency 18 | .cargo 19 | 20 | # The cache for chain data in container 21 | .local 22 | 23 | # direnv files 24 | .envrc 25 | .direnv 26 | 27 | # Vim (BTW™) 28 | *.swp 29 | *.swo 30 | tmp/ 31 | 32 | # Resulting chainspec files from localnet 33 | specs/*.json 34 | 35 | # Git merge/rebase artifacts 36 | *.orig 37 | 38 | # VSCode configuration 39 | .vscode 40 | 41 | # IntelliJ IDEA configuration 42 | .idea 43 | 44 | # Runtime upgrade snapshot 45 | bt.snap 46 | 47 | # localnet spec 48 | scripts/specs/local.json -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | ## 2 | # Notes; 3 | # - commented lines _should™_ be stable defaults 4 | # - uncommented lines are modified 5 | 6 | ## 7 | # rustup run nightly -- rustfmt node/src/main.rs 8 | 9 | # max_width = 180 10 | # hard_tabs = false 11 | # tab_spaces = 4 12 | # newline_style = "Auto" 13 | # indent_style = "Block" 14 | # use_small_heuristics = "Default" 15 | # fn_call_width = 60 16 | # attr_fn_like_width = 70 17 | # struct_lit_width = 18 18 | # struct_variant_width = 35 19 | # array_width = 60 20 | # chain_width = 60 21 | # single_line_if_else_max_width = 50 22 | # wrap_comments = false 23 | # format_code_in_doc_comments = false 24 | # doc_comment_code_block_width = 100 25 | # comment_width = 80 26 | # normalize_comments = false 27 | # normalize_doc_attributes = false 28 | # format_strings = false 29 | # format_macro_matchers = false 30 | # format_macro_bodies = true 31 | # hex_literal_case = "Preserve" 32 | # empty_item_single_line = true 33 | # struct_lit_single_line = true 34 | # fn_single_line = false 35 | # where_single_line = false 36 | # imports_indent = "Block" 37 | # imports_layout = "Mixed" 38 | # imports_granularity = "Preserve" 39 | # group_imports = "Preserve" 40 | # reorder_imports = true 41 | # reorder_modules = true 42 | # reorder_impl_items = false 43 | # type_punctuation_density = "Wide" 44 | # space_before_colon = false 45 | # space_after_colon = true 46 | # spaces_around_ranges = false 47 | # binop_separator = "Front" 48 | # remove_nested_parens = true 49 | # combine_control_expr = true 50 | # short_array_element_width_threshold = 10 51 | # overflow_delimited_expr = false 52 | # struct_field_align_threshold = 0 53 | # enum_discrim_align_threshold = 0 54 | # match_arm_blocks = true 55 | # match_arm_leading_pipes = "Never" 56 | # force_multiline_blocks = false 57 | # fn_args_layout = "Tall" 58 | # brace_style = "SameLineWhere" 59 | # control_brace_style = "AlwaysSameLine" 60 | # trailing_semicolon = true 61 | # trailing_comma = "Vertical" 62 | # match_block_trailing_comma = false 63 | # blank_lines_upper_bound = 1 64 | # blank_lines_lower_bound = 0 65 | # edition = "2015" 66 | # version = "One" 67 | # inline_attribute_width = 0 68 | # format_generated_files = true 69 | # merge_derives = true 70 | # use_try_shorthand = false 71 | # use_field_init_shorthand = false 72 | # force_explicit_abi = true 73 | # condense_wildcard_suffixes = false 74 | # color = "Auto" 75 | # required_version = "1.5.1" 76 | # unstable_features = false 77 | # disable_all_formatting = false 78 | # skip_children = true 79 | # hide_parse_errors = false 80 | # error_on_line_overflow = false 81 | # error_on_unformatted = false 82 | # ignore = [] 83 | # emit_mode = "Files" 84 | # make_backup = false 85 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | // See https://go.microsoft.com/fwlink/?LinkId=733558 3 | // for the documentation about the tasks.json format 4 | "version": "2.0.0", 5 | "tasks": [ 6 | { 7 | "label": "Run ", 8 | "type": "shell", 9 | "command": "cargo", 10 | "args": ["run", "--release", "--", "--dev"], 11 | "group": { 12 | "kind": "build", 13 | "isDefault": true 14 | }, 15 | "presentation": { 16 | "reveal": "always", 17 | "panel": "new" 18 | }, 19 | "problemMatcher": [ 20 | { 21 | "owner": "rust", 22 | "fileLocation": ["relative", "${workspaceRoot}"], 23 | "pattern": { 24 | "regexp": "^(.*):(\\d+):(\\d+):\\s+(\\d+):(\\d+)\\s+(warning|error):\\s+(.*)$", 25 | "file": 1, 26 | "line": 2, 27 | "column": 3, 28 | "endLine": 4, 29 | "endColumn": 5, 30 | "severity": 6, 31 | "message": 7 32 | } 33 | } 34 | ] 35 | } 36 | ] 37 | } -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @unconst 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE=ubuntu:20.04 2 | 3 | FROM $BASE_IMAGE AS builder 4 | SHELL ["/bin/bash", "-c"] 5 | 6 | # Set noninteractive mode for apt-get 7 | ARG DEBIAN_FRONTEND=noninteractive 8 | 9 | LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ 10 | ai.opentensor.image.vendor="Opentensor Foundation" \ 11 | ai.opentensor.image.title="opentensor/subtensor" \ 12 | ai.opentensor.image.description="Opentensor Subtensor Blockchain" \ 13 | ai.opentensor.image.documentation="https://docs.bittensor.com" 14 | 15 | # Set up Rust environment 16 | ENV RUST_BACKTRACE=1 17 | RUN apt-get update && \ 18 | apt-get install -y curl build-essential protobuf-compiler clang git && \ 19 | rm -rf /var/lib/apt/lists/* 20 | 21 | RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y 22 | ENV PATH="/root/.cargo/bin:${PATH}" 23 | RUN rustup update stable 24 | RUN rustup target add wasm32-unknown-unknown --toolchain stable 25 | 26 | # Copy entire repository 27 | COPY . /build 28 | WORKDIR /build 29 | 30 | # Build the project 31 | RUN cargo build -p node-subtensor --profile production --features="runtime-benchmarks metadata-hash" --locked 32 | 33 | # Verify the binary was produced 34 | RUN test -e /build/target/production/node-subtensor 35 | 36 | EXPOSE 30333 9933 9944 37 | 38 | FROM $BASE_IMAGE AS subtensor 39 | 40 | # Copy all chainspec files 41 | COPY --from=builder /build/*.json / 42 | 43 | # Copy final binary 44 | COPY --from=builder /build/target/production/node-subtensor /usr/local/bin 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | use std::{ 3 | env, fs, 4 | path::{Path, PathBuf}, 5 | str::FromStr, 6 | sync::mpsc::channel, 7 | }; 8 | use walkdir::WalkDir; 9 | 10 | use subtensor_linting::*; 11 | 12 | fn main() { 13 | // need to list all rust directories here 14 | println!("cargo:rerun-if-changed=pallets"); 15 | println!("cargo:rerun-if-changed=node"); 16 | println!("cargo:rerun-if-changed=runtime"); 17 | println!("cargo:rerun-if-changed=lints"); 18 | println!("cargo:rerun-if-changed=build.rs"); 19 | println!("cargo:rerun-if-changed=src"); 20 | println!("cargo:rerun-if-changed=support"); 21 | // Get the root directory of the workspace 22 | let workspace_root = env::var("CARGO_MANIFEST_DIR").unwrap(); 23 | let workspace_root = Path::new(&workspace_root); 24 | 25 | // Collect all Rust source files in the workspace 26 | let rust_files = collect_rust_files(workspace_root); 27 | 28 | // Channel used to communicate errors back to the main thread from the parallel processing 29 | // as we process each Rust file 30 | let (tx, rx) = channel(); 31 | 32 | // Parse each rust file with syn and run the linting suite on it in parallel 33 | rust_files.par_iter().for_each_with(tx.clone(), |tx, file| { 34 | let Ok(content) = fs::read_to_string(file) else { 35 | return; 36 | }; 37 | let Ok(parsed_tokens) = proc_macro2::TokenStream::from_str(&content) else { 38 | return; 39 | }; 40 | let Ok(parsed_file) = syn::parse2::(parsed_tokens) else { 41 | return; 42 | }; 43 | 44 | let track_lint = |result: Result| { 45 | let Err(errors) = result else { 46 | return; 47 | }; 48 | let relative_path = file.strip_prefix(workspace_root).unwrap_or(file.as_path()); 49 | for error in errors { 50 | let loc = error.span().start(); 51 | let file_path = relative_path.display(); 52 | // note that spans can't go across thread boundaries without losing their location 53 | // info so we we serialize here and send a String 54 | tx.send(format!( 55 | "cargo:warning={}:{}:{}: {}", 56 | file_path, loc.line, loc.column, error, 57 | )) 58 | .unwrap(); 59 | } 60 | }; 61 | 62 | track_lint(RequireFreezeStruct::lint(&parsed_file)); 63 | }); 64 | 65 | // Collect and print all errors after the parallel processing is done 66 | drop(tx); // Close the sending end of the channel 67 | 68 | for error in rx { 69 | println!("{error}"); 70 | } 71 | } 72 | 73 | /// Recursively collects all Rust files in the given directory 74 | fn collect_rust_files(dir: &Path) -> Vec { 75 | let mut rust_files = Vec::new(); 76 | 77 | for entry in WalkDir::new(dir) { 78 | let entry = entry.unwrap(); 79 | let path = entry.path(); 80 | 81 | // Skip any path that contains "target" directory 82 | if path 83 | .components() 84 | .any(|component| component.as_os_str() == "target") 85 | || path.ends_with("build.rs") 86 | { 87 | continue; 88 | } 89 | 90 | if path.is_file() && path.extension().and_then(|ext| ext.to_str()) == Some("rs") { 91 | rust_files.push(path.to_path_buf()); 92 | } 93 | } 94 | 95 | rust_files 96 | } 97 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | volumes: 4 | mainnet-lite-volume: 5 | mainnet-archive-volume: 6 | testnet-lite-volume: 7 | testnet-archive-volume: 8 | 9 | services: 10 | common: &common 11 | image: ghcr.io/opentensor/subtensor:latest 12 | build: 13 | context: . 14 | dockerfile: Dockerfile 15 | target: subtensor 16 | cpu_count: 4 17 | mem_limit: 40000000000 18 | memswap_limit: 80000000000 19 | ports: 20 | - "9944:9944" 21 | - "30333:30333" 22 | - "9933:9933" 23 | expose: 24 | - "9944" 25 | - "30333" 26 | - "9933" 27 | environment: 28 | - CARGO_HOME=/var/www/node-subtensor/.cargo 29 | 30 | mainnet-lite: 31 | <<: *common 32 | container_name: subtensor-mainnet-lite 33 | volumes: 34 | - mainnet-lite-volume:/tmp/blockchain 35 | command: 36 | - /bin/bash 37 | - -c 38 | - | 39 | node-subtensor \ 40 | --base-path /tmp/blockchain \ 41 | --chain raw_spec_finney.json \ 42 | --rpc-external --rpc-cors all \ 43 | --no-mdns \ 44 | --in-peers 500 --out-peers 500 \ 45 | --bootnodes /dns/bootnode.finney.chain.opentensor.ai/tcp/30333/ws/p2p/12D3KooWRwbMb85RWnT8DSXSYMWQtuDwh4LJzndoRrTDotTR5gDC \ 46 | --sync warp 47 | 48 | mainnet-archive: 49 | <<: *common 50 | container_name: subtensor-mainnet-archive 51 | volumes: 52 | - mainnet-archive-volume:/tmp/blockchain 53 | command: 54 | - /bin/bash 55 | - -c 56 | - | 57 | node-subtensor \ 58 | --base-path /tmp/blockchain \ 59 | --chain raw_spec_finney.json \ 60 | --rpc-external --rpc-cors all \ 61 | --no-mdns \ 62 | --in-peers 500 --out-peers 500 \ 63 | --bootnodes /dns/bootnode.finney.chain.opentensor.ai/tcp/30333/ws/p2p/12D3KooWRwbMb85RWnT8DSXSYMWQtuDwh4LJzndoRrTDotTR5gDC \ 64 | --pruning=archive 65 | 66 | testnet-lite: 67 | <<: *common 68 | container_name: subtensor-testnet-lite 69 | volumes: 70 | - testnet-lite-volume:/tmp/blockchain 71 | command: 72 | - /bin/bash 73 | - -c 74 | - | 75 | node-subtensor \ 76 | --base-path /tmp/blockchain \ 77 | --chain raw_spec_testfinney.json \ 78 | --rpc-external --rpc-cors all \ 79 | --no-mdns \ 80 | --in-peers 500 --out-peers 500 \ 81 | --bootnodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ 82 | --sync warp 83 | --reserved-nodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ 84 | --reserved-only 85 | 86 | testnet-archive: 87 | <<: *common 88 | container_name: subtensor-testnet-archive 89 | volumes: 90 | - testnet-archive-volume:/tmp/blockchain 91 | command: 92 | - /bin/bash 93 | - -c 94 | - | 95 | node-subtensor \ 96 | --base-path /tmp/blockchain \ 97 | --chain raw_spec_testfinney.json \ 98 | --rpc-external --rpc-cors all \ 99 | --no-mdns \ 100 | --in-peers 500 --out-peers 500 \ 101 | --bootnodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ 102 | --pruning=archive 103 | --reserved-nodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ 104 | --reserved-only 105 | -------------------------------------------------------------------------------- /docs/img/consensus_plots.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blixor/subtensor/aa521f46fab9ba4702c752792ba4f5eb38df39fd/docs/img/consensus_plots.png -------------------------------------------------------------------------------- /docs/running-subtensor-locally.md: -------------------------------------------------------------------------------- 1 | # Running subtensor node locally 2 | 3 | See the [**Subtensor Nodes** section in Bittensor Developer Documentation](https://docs.bittensor.com/subtensor-nodes). 4 | -------------------------------------------------------------------------------- /hyperparameters.md: -------------------------------------------------------------------------------- 1 | ### Global settings 2 | ```rust 3 | DefaultTake: u16 = 11_796; // 18% honest number. 4 | TxRateLimit: u64 = 1; // [1 @ 64,888] 5 | ``` 6 | 7 | ### netuid 1 (text_prompting) 8 | ```rust 9 | Rho: u16 = 10; 10 | Kappa: u16 = 32_767; // 0.5 = 65535/2 11 | MaxAllowedUids: u16 = 1024; 12 | Issuance: u64 = 0; 13 | MinAllowedWeights: u16 = 8; 14 | EmissionValue: u64 = 142_223_000; 15 | MaxWeightsLimit: 455; // 455/2^16 = 0.0069 16 | ValidatorBatchSize: u16 = 1; 17 | ValidatorSequenceLen: u16 = 2048; // 2048 18 | ValidatorEpochLen: u16 = 100; 19 | ValidatorEpochsPerReset: u16 = 60; 20 | ValidatorExcludeQuantile: u16 = 0; // 0% of u16 21 | ValidatorPruneLen: u64 = 1; 22 | ValidatorLogitsDivergence: u16 = 1310; // 2% of u16 23 | ScalingLawPower: u16 = 50; // 0.5 24 | SynergyScalingLawPower: u16 = 50; // 0.5 25 | MaxAllowedValidators: u16 = 128; 26 | Tempo: u16 = 99; 27 | Difficulty: u64 = 10_000_000; 28 | AdjustmentInterval: u16 = 225; 29 | TargetRegistrationsPerInterval: u16 = 2; 30 | ImmunityPeriod: u16 = 7200; 31 | ActivityCutoff: u16 = 5000; 32 | MaxRegistrationsPerBlock: u16 = 1; 33 | PruningScore : u16 = u16::MAX; 34 | BondsMovingAverage: u64 = 900_000; 35 | WeightsVersionKey: u64 = 1020; 36 | MinDifficulty: u64 = 10_000_000; 37 | MaxDifficulty: u64 = u64::MAX / 4; 38 | ServingRateLimit: u64 = 10; 39 | Burn: u64 = 1_000_000_000; // 1 tao 40 | MinBurn: u64 = 1_000_000_000; // 1 tao 41 | MaxBurn: u64 = 100_000_000_000; // 100 tao 42 | WeightsSetRateLimit: u64 = 100; 43 | ``` 44 | 45 | ### netuid 3 (causallmnext) 46 | ```rust 47 | Rho: u16 = 10; 48 | Kappa: u16 = 32_767; // 0.5 = 65535/2 49 | MaxAllowedUids: u16 = 4096; 50 | Issuance: u64 = 0; 51 | MinAllowedWeights: u16 = 50; 52 | EmissionValue: u64 = 857_777_000; 53 | MaxWeightsLimit: u16 = 655; // 655/2^16 = 0.01 [655 @ 7,160] 54 | ValidatorBatchSize: u16 = 32; // 32 55 | ValidatorSequenceLen: u16 = 256; // 256 56 | ValidatorEpochLen: u16 = 250; // [250 @ 7,161] 57 | ValidatorEpochsPerReset: u16 = 60; 58 | ValidatorExcludeQuantile: u16 = 3277; // 5% of u16 [3277 @ 65,065] 59 | ValidatorPruneLen: u64 = 1; 60 | ValidatorLogitsDivergence: u16 = 1310; // 2% of u16 61 | ScalingLawPower: u16 = 50; // 0.5 62 | SynergyScalingLawPower: u16 = 50; // 0.5 63 | MaxAllowedValidators: u16 = 128; 64 | Tempo: u16 = 99; 65 | Difficulty: u64 = 671_088_640_000_000; // Same as nakamoto at block = 3606775 [671T @ 26,310] 66 | AdjustmentInterval: u16 = 100; 67 | TargetRegistrationsPerInterval: u16 = 2; 68 | ImmunityPeriod: u16 = 4096; 69 | ActivityCutoff: u16 = 5000; // [5000 @ 7,163] 70 | MaxRegistrationsPerBlock: u16 = 1; 71 | PruningScore : u16 = u16::MAX; 72 | BondsMovingAverage: u64 = 900_000; 73 | WeightsVersionKey: u64 = 400; 74 | MinDifficulty: u64 = 10_000_000; 75 | MaxDifficulty: u64 = u64::MAX / 4; 76 | ServingRateLimit: u64 = 250; // [250 @ 7,166] 77 | Burn: u64 = 100_000_000_000; // 100 tao [100 tao @ 26,310] 78 | MinBurn: u64 = 1_000_000_000; // 1 tao [1 tao @ 26,310] 79 | MaxBurn: u64 = 21_000_000_000_000_000; // 21M tao [21M tao @ 26,310] 80 | WeightsSetRateLimit: u64 = 250; // [250 @ 7,168] 81 | ``` 82 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env just --justfile 2 | 3 | export RUST_BACKTRACE := "full" 4 | export SKIP_WASM_BUILD := "1" 5 | export RUST_BIN_DIR := "target/x86_64-unknown-linux-gnu" 6 | export TARGET := "x86_64-unknown-linux-gnu" 7 | export RUSTV := "stable" 8 | export RELEASE_NAME := "development" 9 | 10 | fmt: 11 | @echo "Running cargo fmt..." 12 | cargo +{{RUSTV}} fmt --all 13 | 14 | check: 15 | @echo "Running cargo check..." 16 | cargo +{{RUSTV}} check --workspace 17 | 18 | test: 19 | @echo "Running cargo test..." 20 | cargo +{{RUSTV}} test --workspace 21 | 22 | benchmarks: 23 | @echo "Running cargo test with benchmarks..." 24 | cargo +{{RUSTV}} test --workspace --features=runtime-benchmarks 25 | 26 | clippy: 27 | @echo "Running cargo clippy..." 28 | cargo +{{RUSTV}} clippy --workspace --all-targets -- \ 29 | -D clippy::todo \ 30 | -D clippy::unimplemented 31 | 32 | clippy-fix: 33 | @echo "Running cargo clippy with automatic fixes on potentially dirty code..." 34 | cargo +{{RUSTV}} clippy --fix --allow-dirty --allow-staged --workspace --all-targets -- \ 35 | -A clippy::todo \ 36 | -A clippy::unimplemented \ 37 | -A clippy::indexing_slicing 38 | 39 | fix: 40 | @echo "Running cargo fix..." 41 | cargo +{{RUSTV}} fix --workspace 42 | git diff --exit-code || (echo "There are local changes after running 'cargo fix --workspace' ❌" && exit 1) 43 | 44 | lint: 45 | @echo "Running cargo fmt..." 46 | just fmt 47 | @echo "Running cargo clippy with automatic fixes on potentially dirty code..." 48 | just clippy-fix 49 | @echo "Running cargo clippy..." 50 | just clippy 51 | 52 | production: 53 | @echo "Running cargo build with metadata-hash generation..." 54 | cargo +{{RUSTV}} build --profile production --features="metadata-hash" 55 | -------------------------------------------------------------------------------- /node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "node-subtensor" 3 | version = "4.0.0-dev" 4 | description = "A fresh FRAME-based Substrate node, ready for hacking." 5 | authors = ["Substrate DevHub "] 6 | homepage = "https://substrate.io/" 7 | edition = "2021" 8 | license = "Unlicense" 9 | publish = false 10 | repository = "https://github.com/opentensor/subtensor" 11 | build = "build.rs" 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [package.metadata.docs.rs] 17 | targets = ["x86_64-unknown-linux-gnu"] 18 | 19 | [[bin]] 20 | name = "node-subtensor" 21 | 22 | [dependencies] 23 | clap = { workspace = true, features = ["derive"] } 24 | futures = { workspace = true, features = ["thread-pool"] } 25 | serde = { workspace = true, features = ["derive"] } 26 | 27 | # Storage import 28 | memmap2 = { workspace = true } 29 | serde_json = { workspace = true } 30 | 31 | sc-cli = { workspace = true } 32 | sp-core = { workspace = true } 33 | sc-executor = { workspace = true } 34 | sc-service = { workspace = true } 35 | sc-telemetry = { workspace = true } 36 | sc-keystore = { workspace = true } 37 | sc-transaction-pool = { workspace = true } 38 | sc-transaction-pool-api = { workspace = true } 39 | sc-offchain = { workspace = true } 40 | sc-network = { workspace = true } 41 | sc-consensus-aura = { workspace = true } 42 | sp-consensus-aura = { workspace = true } 43 | sp-consensus = { workspace = true } 44 | sc-consensus = { workspace = true } 45 | sc-consensus-grandpa = { workspace = true } 46 | sc-consensus-grandpa-rpc = { workspace = true } 47 | sp-consensus-grandpa = { workspace = true } 48 | sc-chain-spec-derive = { workspace = true } 49 | sc-chain-spec = { workspace = true } 50 | sc-consensus-slots = { workspace = true } 51 | sc-client-api = { workspace = true } 52 | sp-runtime = { workspace = true } 53 | sp-io = { workspace = true } 54 | sp-timestamp = { workspace = true } 55 | sp-inherents = { workspace = true } 56 | sp-keyring = { workspace = true } 57 | frame-metadata-hash-extension = { workspace = true } 58 | frame-system = { workspace = true } 59 | pallet-transaction-payment = { workspace = true } 60 | pallet-commitments = { path = "../pallets/commitments" } 61 | 62 | # These dependencies are used for the subtensor's RPCs 63 | jsonrpsee = { workspace = true, features = ["server"] } 64 | sc-rpc = { workspace = true } 65 | sp-api = { workspace = true } 66 | sc-rpc-api = { workspace = true } 67 | sp-blockchain = { workspace = true } 68 | sp-block-builder = { workspace = true } 69 | sc-basic-authorship = { workspace = true } 70 | substrate-frame-rpc-system = { workspace = true } 71 | pallet-transaction-payment-rpc = { workspace = true } 72 | 73 | # These dependencies are used for runtime benchmarking 74 | frame-benchmarking = { workspace = true } 75 | frame-benchmarking-cli = { workspace = true } 76 | 77 | # Local Dependencies 78 | node-subtensor-runtime = { path = "../runtime" } 79 | subtensor-custom-rpc = { path = "../pallets/subtensor/rpc" } 80 | subtensor-custom-rpc-runtime-api = { path = "../pallets/subtensor/runtime-api" } 81 | 82 | [build-dependencies] 83 | substrate-build-script-utils = { workspace = true } 84 | 85 | [features] 86 | default = [] 87 | # Dependencies that are only required if runtime benchmarking should be build. 88 | runtime-benchmarks = [ 89 | "node-subtensor-runtime/runtime-benchmarks", 90 | "frame-benchmarking/runtime-benchmarks", 91 | "frame-benchmarking-cli/runtime-benchmarks", 92 | "frame-system/runtime-benchmarks", 93 | "sc-service/runtime-benchmarks", 94 | "sp-runtime/runtime-benchmarks", 95 | "pallet-commitments/runtime-benchmarks", 96 | ] 97 | pow-faucet = [] 98 | 99 | # Enable features that allow the runtime to be tried and debugged. Name might be subject to change 100 | # in the near future. 101 | try-runtime = [ 102 | "node-subtensor-runtime/try-runtime", 103 | "frame-system/try-runtime", 104 | "pallet-transaction-payment/try-runtime", 105 | "sp-runtime/try-runtime", 106 | "pallet-commitments/try-runtime", 107 | ] 108 | 109 | metadata-hash = ["node-subtensor-runtime/metadata-hash"] 110 | -------------------------------------------------------------------------------- /node/build.rs: -------------------------------------------------------------------------------- 1 | use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; 2 | 3 | fn main() { 4 | generate_cargo_keys(); 5 | 6 | rerun_if_git_head_changed(); 7 | } 8 | -------------------------------------------------------------------------------- /node/src/chain_spec/localnet.rs: -------------------------------------------------------------------------------- 1 | // Allowed since it's actually better to panic during chain setup when there is an error 2 | #![allow(clippy::unwrap_used)] 3 | 4 | use super::*; 5 | 6 | pub fn localnet_config() -> Result { 7 | let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; 8 | 9 | // Give front-ends necessary data to present to users 10 | let mut properties = sc_service::Properties::new(); 11 | properties.insert("tokenSymbol".into(), "TAO".into()); 12 | properties.insert("tokenDecimals".into(), 9.into()); 13 | properties.insert("ss58Format".into(), 42.into()); 14 | 15 | Ok(ChainSpec::builder( 16 | wasm_binary, 17 | Extensions { 18 | bad_blocks: Some(HashSet::from_iter(vec![ 19 | // Example bad block 20 | H256::from_str( 21 | "0xc174d485de4bc3813ac249fe078af605c74ff91d07b0a396cf75fa04f81fa312", 22 | ) 23 | .unwrap(), 24 | ])), 25 | ..Default::default() 26 | }, 27 | ) 28 | .with_name("Bittensor") 29 | .with_protocol_id("bittensor") 30 | .with_id("bittensor") 31 | .with_chain_type(ChainType::Development) 32 | .with_genesis_config_patch(localnet_genesis( 33 | // Initial PoA authorities (Validators) 34 | // aura | grandpa 35 | vec![ 36 | // Keys for debug 37 | authority_keys_from_seed("Alice"), 38 | authority_keys_from_seed("Bob"), 39 | ], 40 | // Pre-funded accounts 41 | true, 42 | )) 43 | .with_properties(properties) 44 | .build()) 45 | } 46 | 47 | fn localnet_genesis( 48 | initial_authorities: Vec<(AuraId, GrandpaId)>, 49 | _enable_println: bool, 50 | ) -> serde_json::Value { 51 | let mut balances = vec![ 52 | ( 53 | get_account_id_from_seed::("Alice"), 54 | 1000000000000000u128, 55 | ), 56 | ( 57 | get_account_id_from_seed::("Bob"), 58 | 1000000000000000u128, 59 | ), 60 | ( 61 | get_account_id_from_seed::("Charlie"), 62 | 1000000000000000u128, 63 | ), 64 | ( 65 | get_account_id_from_seed::("Dave"), 66 | 2000000000000u128, 67 | ), 68 | ( 69 | get_account_id_from_seed::("Eve"), 70 | 2000000000000u128, 71 | ), 72 | ( 73 | get_account_id_from_seed::("Ferdie"), 74 | 2000000000000u128, 75 | ), 76 | ]; 77 | 78 | // Check if the environment variable is set 79 | if let Ok(bt_wallet) = env::var("BT_DEFAULT_TOKEN_WALLET") { 80 | if let Ok(decoded_wallet) = Ss58Codec::from_ss58check(&bt_wallet) { 81 | balances.push((decoded_wallet, 1_000_000_000_000_000u128)); 82 | } else { 83 | eprintln!("Invalid format for BT_DEFAULT_TOKEN_WALLET."); 84 | } 85 | } 86 | 87 | let trimvirate_members: Vec = bounded_vec![ 88 | get_account_id_from_seed::("Alice"), 89 | get_account_id_from_seed::("Bob"), 90 | get_account_id_from_seed::("Charlie"), 91 | ]; 92 | 93 | let senate_members: Vec = bounded_vec![ 94 | get_account_id_from_seed::("Dave"), 95 | get_account_id_from_seed::("Eve"), 96 | get_account_id_from_seed::("Ferdie"), 97 | ]; 98 | 99 | serde_json::json!({ 100 | "balances": { "balances": balances }, 101 | "aura": { 102 | "authorities": initial_authorities.iter().map(|x| (x.0.clone())).collect::>() 103 | }, 104 | "grandpa": { 105 | "authorities": initial_authorities 106 | .iter() 107 | .map(|x| (x.1.clone(), 1)) 108 | .collect::>() 109 | }, 110 | "sudo": { 111 | "key": Some(get_account_id_from_seed::("Alice")) 112 | }, 113 | "triumvirateMembers": { 114 | "members": trimvirate_members 115 | }, 116 | "senateMembers": { 117 | "members": senate_members, 118 | }, 119 | }) 120 | } 121 | -------------------------------------------------------------------------------- /node/src/chain_spec/mod.rs: -------------------------------------------------------------------------------- 1 | // Allowed since it's actually better to panic during chain setup when there is an error 2 | #![allow(clippy::unwrap_used)] 3 | 4 | pub mod finney; 5 | pub mod localnet; 6 | pub mod testnet; 7 | 8 | use node_subtensor_runtime::{AccountId, Block, RuntimeGenesisConfig, Signature, WASM_BINARY}; 9 | use sc_chain_spec_derive::ChainSpecExtension; 10 | use sc_service::ChainType; 11 | use sp_consensus_aura::sr25519::AuthorityId as AuraId; 12 | use sp_consensus_grandpa::AuthorityId as GrandpaId; 13 | use sp_core::crypto::Ss58Codec; 14 | use sp_core::{bounded_vec, sr25519, Pair, Public, H256}; 15 | use sp_runtime::traits::{IdentifyAccount, Verify}; 16 | use sp_runtime::AccountId32; 17 | use std::collections::HashSet; 18 | use std::env; 19 | use std::str::FromStr; 20 | 21 | // The URL for the telemetry server. 22 | // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; 23 | 24 | /// Node `ChainSpec` extensions. 25 | #[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)] 26 | #[serde(rename_all = "camelCase")] 27 | pub struct Extensions { 28 | /// Block numbers with known hashes. 29 | pub fork_blocks: sc_client_api::ForkBlocks, 30 | /// Known bad block hashes. 31 | pub bad_blocks: sc_client_api::BadBlocks, 32 | } 33 | 34 | /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. 35 | pub type ChainSpec = sc_service::GenericChainSpec; 36 | 37 | /// Generate a crypto pair from seed. 38 | pub fn get_from_seed(seed: &str) -> ::Public { 39 | TPublic::Pair::from_string(&format!("//{}", seed), None) 40 | .expect("static values are valid; qed") 41 | .public() 42 | } 43 | 44 | type AccountPublic = ::Signer; 45 | 46 | /// Generate an account ID from seed. 47 | pub fn get_account_id_from_seed(seed: &str) -> AccountId 48 | where 49 | AccountPublic: From<::Public>, 50 | { 51 | AccountPublic::from(get_from_seed::(seed)).into_account() 52 | } 53 | 54 | /// Generate an Aura authority key. 55 | pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { 56 | (get_from_seed::(s), get_from_seed::(s)) 57 | } 58 | 59 | pub fn authority_keys_from_ss58(s_aura: &str, s_grandpa: &str) -> (AuraId, GrandpaId) { 60 | ( 61 | get_aura_from_ss58_addr(s_aura), 62 | get_grandpa_from_ss58_addr(s_grandpa), 63 | ) 64 | } 65 | 66 | pub fn get_aura_from_ss58_addr(s: &str) -> AuraId { 67 | Ss58Codec::from_ss58check(s).unwrap() 68 | } 69 | 70 | pub fn get_grandpa_from_ss58_addr(s: &str) -> GrandpaId { 71 | Ss58Codec::from_ss58check(s).unwrap() 72 | } 73 | 74 | // Includes for nakamoto genesis 75 | use serde::{Deserialize, Serialize}; 76 | use serde_json as json; 77 | use std::{fs::File, path::PathBuf}; 78 | 79 | // Configure storage from nakamoto data 80 | #[derive(Deserialize, Debug)] 81 | struct ColdkeyHotkeys { 82 | stakes: std::collections::HashMap>, 83 | balances: std::collections::HashMap, 84 | } 85 | -------------------------------------------------------------------------------- /node/src/cli.rs: -------------------------------------------------------------------------------- 1 | use sc_cli::RunCmd; 2 | 3 | #[derive(Debug, clap::Parser)] 4 | pub struct Cli { 5 | #[command(subcommand)] 6 | pub subcommand: Option, 7 | 8 | #[clap(flatten)] 9 | pub run: RunCmd, 10 | } 11 | 12 | #[allow(clippy::large_enum_variant)] 13 | #[derive(Debug, clap::Subcommand)] 14 | pub enum Subcommand { 15 | // Key management cli utilities 16 | #[command(subcommand)] 17 | Key(sc_cli::KeySubcommand), 18 | 19 | // Build a chain specification. 20 | BuildSpec(sc_cli::BuildSpecCmd), 21 | 22 | // Validate blocks. 23 | CheckBlock(sc_cli::CheckBlockCmd), 24 | 25 | // Export blocks. 26 | ExportBlocks(sc_cli::ExportBlocksCmd), 27 | 28 | // Export the state of a given block into a chain spec. 29 | ExportState(sc_cli::ExportStateCmd), 30 | 31 | // Import blocks. 32 | ImportBlocks(sc_cli::ImportBlocksCmd), 33 | 34 | // Remove the whole chain. 35 | PurgeChain(sc_cli::PurgeChainCmd), 36 | 37 | // Revert the chain to a previous state. 38 | Revert(sc_cli::RevertCmd), 39 | 40 | // Sub-commands concerned with benchmarking. 41 | #[cfg(feature = "runtime-benchmarks")] 42 | #[command(subcommand)] 43 | Benchmark(frame_benchmarking_cli::BenchmarkCmd), 44 | 45 | // Db meta columns information. 46 | ChainInfo(sc_cli::ChainInfoCmd), 47 | } 48 | -------------------------------------------------------------------------------- /node/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod chain_spec; 2 | pub mod rpc; 3 | pub mod service; 4 | -------------------------------------------------------------------------------- /node/src/main.rs: -------------------------------------------------------------------------------- 1 | //! Substrate Node Subtensor CLI library. 2 | #![warn(missing_docs)] 3 | 4 | mod chain_spec; 5 | #[macro_use] 6 | mod service; 7 | #[cfg(feature = "runtime-benchmarks")] 8 | mod benchmarking; 9 | mod cli; 10 | mod command; 11 | mod rpc; 12 | 13 | fn main() -> sc_cli::Result<()> { 14 | command::run() 15 | } 16 | -------------------------------------------------------------------------------- /node/src/rpc.rs: -------------------------------------------------------------------------------- 1 | //! A collection of node-specific RPC methods. 2 | //! Substrate provides the `sc-rpc` crate, which defines the core RPC layer 3 | //! used by Substrate nodes. This file extends those RPC definitions with 4 | //! capabilities that are specific to this project's runtime configuration. 5 | 6 | #![warn(missing_docs)] 7 | 8 | use std::sync::Arc; 9 | 10 | use jsonrpsee::RpcModule; 11 | use node_subtensor_runtime::{opaque::Block, AccountId, Balance, BlockNumber, Hash, Index}; 12 | use sc_consensus_grandpa::FinalityProofProvider; 13 | use sc_transaction_pool_api::TransactionPool; 14 | use sp_api::ProvideRuntimeApi; 15 | use sp_block_builder::BlockBuilder; 16 | use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; 17 | 18 | pub use sc_rpc_api::DenyUnsafe; 19 | 20 | /// Dependencies for GRANDPA 21 | pub struct GrandpaDeps { 22 | /// Voting round info. 23 | pub shared_voter_state: sc_consensus_grandpa::SharedVoterState, 24 | /// Authority set info. 25 | pub shared_authority_set: sc_consensus_grandpa::SharedAuthoritySet, 26 | /// Receives notifications about justification events from Grandpa. 27 | pub justification_stream: sc_consensus_grandpa::GrandpaJustificationStream, 28 | /// Executor to drive the subscription manager in the Grandpa RPC handler. 29 | pub subscription_executor: sc_rpc::SubscriptionTaskExecutor, 30 | /// Finality proof provider. 31 | pub finality_provider: Arc>, 32 | } 33 | 34 | /// Full client dependencies. 35 | pub struct FullDeps { 36 | /// The client instance to use. 37 | pub client: Arc, 38 | /// Transaction pool instance. 39 | pub pool: Arc

, 40 | /// Whether to deny unsafe calls 41 | pub deny_unsafe: DenyUnsafe, 42 | /// Grandpa block import setup. 43 | pub grandpa: GrandpaDeps, 44 | /// Backend used by the node. 45 | pub _backend: Arc, 46 | } 47 | 48 | /// Instantiate all full RPC extensions. 49 | pub fn create_full( 50 | deps: FullDeps, 51 | ) -> Result, Box> 52 | where 53 | C: ProvideRuntimeApi, 54 | C: HeaderBackend + HeaderMetadata + 'static, 55 | C: Send + Sync + 'static, 56 | C::Api: substrate_frame_rpc_system::AccountNonceApi, 57 | C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, 58 | C::Api: BlockBuilder, 59 | C::Api: subtensor_custom_rpc_runtime_api::DelegateInfoRuntimeApi, 60 | C::Api: subtensor_custom_rpc_runtime_api::NeuronInfoRuntimeApi, 61 | C::Api: subtensor_custom_rpc_runtime_api::SubnetInfoRuntimeApi, 62 | C::Api: subtensor_custom_rpc_runtime_api::SubnetRegistrationRuntimeApi, 63 | B: sc_client_api::Backend + Send + Sync + 'static, 64 | P: TransactionPool + 'static, 65 | { 66 | use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; 67 | use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer}; 68 | use substrate_frame_rpc_system::{System, SystemApiServer}; 69 | use subtensor_custom_rpc::{SubtensorCustom, SubtensorCustomApiServer}; 70 | 71 | let mut module = RpcModule::new(()); 72 | let FullDeps { 73 | client, 74 | pool, 75 | deny_unsafe, 76 | grandpa, 77 | _backend: _, 78 | } = deps; 79 | 80 | // Custom RPC methods for Paratensor 81 | module.merge(SubtensorCustom::new(client.clone()).into_rpc())?; 82 | 83 | module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; 84 | module.merge(TransactionPayment::new(client).into_rpc())?; 85 | 86 | let GrandpaDeps { 87 | shared_voter_state, 88 | shared_authority_set, 89 | justification_stream, 90 | subscription_executor, 91 | finality_provider, 92 | } = grandpa; 93 | 94 | module.merge( 95 | Grandpa::new( 96 | subscription_executor, 97 | shared_authority_set.clone(), 98 | shared_voter_state, 99 | justification_stream, 100 | finality_provider, 101 | ) 102 | .into_rpc(), 103 | )?; 104 | 105 | // Extend this RPC with a custom API by using the following syntax. 106 | // `YourRpcStruct` should have a reference to a client, which is needed 107 | // to call into the runtime. 108 | // `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;` 109 | 110 | Ok(module) 111 | } 112 | -------------------------------------------------------------------------------- /node/tests/chain_spec.rs: -------------------------------------------------------------------------------- 1 | use sp_core::sr25519; 2 | // use sp_consensus_aura::sr25519::AuthorityId as AuraId; 3 | // use sp_consensus_grandpa::AuthorityId as GrandpaId; 4 | 5 | use node_subtensor::chain_spec::*; 6 | 7 | #[test] 8 | fn test_get_from_seed() { 9 | let seed = "WoOt"; 10 | let pare = get_from_seed::(seed); 11 | let expected = "5Gj3QEiZaFJPFK1yN4Lkj6FLM4V7GEBCewVBVniuvZ75S2Fd"; 12 | assert_eq!(pare.to_string(), expected); 13 | } 14 | 15 | #[test] 16 | #[should_panic(expected = "static values are valid; qed: InvalidFormat")] 17 | fn test_get_from_seed_panics() { 18 | let bad_seed = ""; 19 | get_from_seed::(bad_seed); 20 | } 21 | 22 | #[test] 23 | fn test_get_account_id_from_seed() { 24 | let seed = "WoOt"; 25 | let account_id = get_account_id_from_seed::(seed); 26 | let expected = "5Gj3QEiZaFJPFK1yN4Lkj6FLM4V7GEBCewVBVniuvZ75S2Fd"; 27 | assert_eq!(account_id.to_string(), expected); 28 | } 29 | 30 | #[test] 31 | #[should_panic(expected = "static values are valid; qed: InvalidFormat")] 32 | fn test_get_account_id_from_seed_panics() { 33 | let bad_seed = ""; 34 | get_account_id_from_seed::(bad_seed); 35 | } 36 | 37 | #[test] 38 | fn test_authority_keys_from_seed() { 39 | let seed = "WoOt"; 40 | let (aura_id, grandpa_id) = authority_keys_from_seed(seed); 41 | 42 | let expected_aura_id = "5Gj3QEiZaFJPFK1yN4Lkj6FLM4V7GEBCewVBVniuvZ75S2Fd"; 43 | let expected_grandpa_id = "5H7623Nvxq655p9xrLQPip1mwssFRMfL5fvT5LUSa4nWwLSm"; 44 | 45 | assert_eq!(aura_id.to_string(), expected_aura_id); 46 | assert_eq!(grandpa_id.to_string(), expected_grandpa_id); 47 | } 48 | 49 | #[test] 50 | #[should_panic(expected = "static values are valid; qed: InvalidFormat")] 51 | fn test_authority_keys_from_seed_panics() { 52 | let bad_seed = ""; 53 | authority_keys_from_seed(bad_seed); 54 | } 55 | -------------------------------------------------------------------------------- /pallets/admin-utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-admin-utils" 3 | version = "4.0.0-dev" 4 | description = "FRAME pallet for extending admin utilities." 5 | authors = ["Bittensor Nucleus Team"] 6 | homepage = "https://bittensor.com" 7 | edition = "2021" 8 | license = "Unlicense" 9 | publish = false 10 | repository = "https://github.com/opentensor/subtensor" 11 | 12 | [lints] 13 | workspace = true 14 | 15 | [package.metadata.docs.rs] 16 | targets = ["x86_64-unknown-linux-gnu"] 17 | 18 | [dependencies] 19 | subtensor-macros.workspace = true 20 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ 21 | "derive", 22 | ] } 23 | scale-info = { workspace = true, features = ["derive"] } 24 | frame-benchmarking = { workspace = true, optional = true } 25 | frame-support = { workspace = true } 26 | frame-system = { workspace = true } 27 | sp-runtime = { workspace = true } 28 | log = { workspace = true } 29 | pallet-subtensor = { version = "4.0.0-dev", default-features = false, path = "../subtensor" } 30 | sp-weights = { workspace = true } 31 | substrate-fixed = { workspace = true } 32 | 33 | 34 | [dev-dependencies] 35 | sp-core = { workspace = true } 36 | sp-io = { workspace = true } 37 | sp-tracing = { workspace = true } 38 | sp-consensus-aura = { workspace = true } 39 | pallet-balances = { workspace = true, features = ["std"] } 40 | pallet-scheduler = { workspace = true } 41 | sp-std = { workspace = true } 42 | 43 | [features] 44 | default = ["std"] 45 | std = [ 46 | "codec/std", 47 | "frame-benchmarking?/std", 48 | "frame-support/std", 49 | "frame-system/std", 50 | "scale-info/std", 51 | "pallet-subtensor/std", 52 | "sp-consensus-aura/std", 53 | "pallet-balances/std", 54 | "pallet-scheduler/std", 55 | "sp-runtime/std", 56 | "sp-tracing/std", 57 | "sp-weights/std", 58 | "log/std", 59 | "sp-core/std", 60 | "sp-io/std", 61 | "sp-std/std", 62 | "substrate-fixed/std", 63 | ] 64 | runtime-benchmarks = [ 65 | "frame-benchmarking/runtime-benchmarks", 66 | "frame-support/runtime-benchmarks", 67 | "frame-system/runtime-benchmarks", 68 | "pallet-balances/runtime-benchmarks", 69 | "sp-runtime/runtime-benchmarks", 70 | "pallet-subtensor/runtime-benchmarks", 71 | "pallet-scheduler/runtime-benchmarks", 72 | ] 73 | try-runtime = [ 74 | "frame-support/try-runtime", 75 | "frame-system/try-runtime", 76 | "pallet-balances/try-runtime", 77 | "pallet-scheduler/try-runtime", 78 | "sp-runtime/try-runtime", 79 | "pallet-subtensor/try-runtime" 80 | ] 81 | -------------------------------------------------------------------------------- /pallets/admin-utils/scripts/benchmark.sh: -------------------------------------------------------------------------------- 1 | cargo build --profile production --features runtime-benchmarks 2 | ./target/production/node-subtensor benchmark pallet \ 3 | --chain=local \ 4 | --pallet=pallet_admin_utils \ 5 | --extrinsic="*" \ 6 | --steps 50 \ 7 | --repeat 20 \ 8 | --output=pallets/admin-utils/src/weights.rs \ 9 | --template=./.maintain/frame-weight-template.hbs 10 | -------------------------------------------------------------------------------- /pallets/collective/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-collective" 3 | version = "4.0.0-dev" 4 | authors = ["Parity Technologies , Opentensor Technologies"] 5 | edition = "2021" 6 | license = "Apache-2.0" 7 | homepage = "https://bittensor.com" 8 | repository = "https://github.com/opentensor/subtensor" 9 | description = "Collective system: Members of a set of account IDs can make their collective feelings known through dispatched calls from one of two specialized origins." 10 | readme = "README.md" 11 | 12 | [lints] 13 | workspace = true 14 | 15 | [package.metadata.docs.rs] 16 | targets = ["x86_64-unknown-linux-gnu"] 17 | 18 | [dependencies] 19 | subtensor-macros.workspace = true 20 | codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = [ 21 | "derive", 22 | ] } 23 | log = { workspace = true } 24 | scale-info = { workspace = true, features = ["derive"] } 25 | frame-benchmarking = { workspace = true, optional = true } 26 | frame-support = { workspace = true } 27 | frame-system = { workspace = true } 28 | sp-core = { workspace = true } 29 | sp-io = { workspace = true } 30 | sp-runtime = { workspace = true } 31 | sp-std = { workspace = true } 32 | 33 | [features] 34 | default = ["std"] 35 | std = [ 36 | "codec/std", 37 | "frame-benchmarking?/std", 38 | "frame-support/std", 39 | "frame-system/std", 40 | "log/std", 41 | "scale-info/std", 42 | "sp-core/std", 43 | "sp-io/std", 44 | "sp-runtime/std", 45 | "sp-std/std", 46 | ] 47 | runtime-benchmarks = [ 48 | "frame-benchmarking/runtime-benchmarks", 49 | "frame-support/runtime-benchmarks", 50 | "frame-system/runtime-benchmarks", 51 | "sp-runtime/runtime-benchmarks", 52 | ] 53 | try-runtime = [ 54 | "frame-support/try-runtime", 55 | "frame-system/try-runtime", 56 | "sp-runtime/try-runtime" 57 | ] 58 | -------------------------------------------------------------------------------- /pallets/collective/README.md: -------------------------------------------------------------------------------- 1 | Collective system: Members of a set of account IDs can make their collective feelings known 2 | through dispatched calls from one of two specialized origins. 3 | 4 | The membership can be provided in one of two ways: either directly, using the Root-dispatchable 5 | function `set_members`, or indirectly, through implementing the `ChangeMembers`. 6 | The pallet assumes that the amount of members stays at or below `MaxMembers` for its weight 7 | calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. 8 | 9 | A "prime" member may be set to help determine the default vote behavior based on chain 10 | config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any 11 | abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then 12 | abstentations will first follow the majority of the collective voting, and then the prime 13 | member. 14 | 15 | Voting happens through motions comprising a proposal (i.e. a dispatchable) plus a 16 | number of approvals required for it to pass and be called. Motions are open for members to 17 | vote on for a minimum period given by `MotionDuration`. As soon as the required number of 18 | approvals is given, the motion is closed and executed. If the number of approvals is not reached 19 | during the voting period, then `close` may be called by any account in order to force the end 20 | the motion explicitly. If a prime member is defined, then their vote is used instead of any 21 | abstentions and the proposal is executed if there are enough approvals counting the new votes. 22 | 23 | If there are not, or if no prime member is set, then the motion is dropped without being executed. 24 | 25 | License: Apache-2.0 26 | -------------------------------------------------------------------------------- /pallets/commitments/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-commitments" 3 | version = "4.0.0-dev" 4 | description = "Add the ability to commit generic hashed data for network participants." 5 | authors = ["Bittensor Nucleus Team"] 6 | homepage = "https://bittensor.com" 7 | edition = "2021" 8 | license = "Unlicense" 9 | publish = false 10 | repository = "https://github.com/opentensor/subtensor" 11 | 12 | [lints] 13 | workspace = true 14 | 15 | [package.metadata.docs.rs] 16 | targets = ["x86_64-unknown-linux-gnu"] 17 | 18 | [dependencies] 19 | subtensor-macros.workspace = true 20 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ 21 | "derive", 22 | "max-encoded-len", 23 | ] } 24 | scale-info = { workspace = true, features = ["derive"] } 25 | frame-benchmarking = { workspace = true, optional = true } 26 | frame-support = { workspace = true } 27 | frame-system = { workspace = true } 28 | sp-runtime = { workspace = true } 29 | sp-std = { workspace = true } 30 | enumflags2 = { workspace = true } 31 | 32 | [dev-dependencies] 33 | sp-core = { workspace = true } 34 | sp-io = { workspace = true } 35 | pallet-balances = { workspace = true } 36 | 37 | [features] 38 | default = ["std"] 39 | std = [ 40 | "codec/std", 41 | "frame-benchmarking?/std", 42 | "frame-support/std", 43 | "frame-system/std", 44 | "scale-info/std", 45 | "sp-std/std", 46 | "sp-runtime/std", 47 | "enumflags2/std", 48 | "pallet-balances/std", 49 | "sp-core/std", 50 | "sp-io/std" 51 | ] 52 | runtime-benchmarks = [ 53 | "frame-benchmarking/runtime-benchmarks", 54 | "frame-support/runtime-benchmarks", 55 | "frame-system/runtime-benchmarks", 56 | "sp-runtime/runtime-benchmarks", 57 | "pallet-balances/runtime-benchmarks" 58 | ] 59 | try-runtime = [ 60 | "frame-support/try-runtime", 61 | "frame-system/try-runtime", 62 | "pallet-balances/try-runtime", 63 | "sp-runtime/try-runtime" 64 | ] 65 | -------------------------------------------------------------------------------- /pallets/commitments/scripts/benchmark.sh: -------------------------------------------------------------------------------- 1 | cargo build --profile production --features runtime-benchmarks 2 | ./target/production/node-subtensor benchmark pallet \ 3 | --chain=local \ 4 | --pallet=pallet_commitments \ 5 | --extrinsic="*" \ 6 | --output=pallets/commitments/src/weights.rs \ 7 | --template=./.maintain/frame-weight-template.hbs 8 | -------------------------------------------------------------------------------- /pallets/commitments/src/benchmarking.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarking setup 2 | #![cfg(feature = "runtime-benchmarks")] 3 | #![allow(clippy::arithmetic_side_effects)] 4 | use super::*; 5 | 6 | #[allow(unused)] 7 | use crate::Pallet as Commitments; 8 | use frame_benchmarking::v2::*; 9 | use frame_system::RawOrigin; 10 | use sp_std::vec; 11 | 12 | use sp_runtime::traits::Bounded; 13 | 14 | fn assert_last_event(generic_event: ::RuntimeEvent) { 15 | frame_system::Pallet::::assert_last_event(generic_event.into()); 16 | } 17 | 18 | // This creates an `IdentityInfo` object with `num_fields` extra fields. 19 | // All data is pre-populated with some arbitrary bytes. 20 | fn create_identity_info(_num_fields: u32) -> CommitmentInfo { 21 | let _data = Data::Raw( 22 | vec![0; 32] 23 | .try_into() 24 | .expect("vec length is less than 64; qed"), 25 | ); 26 | 27 | CommitmentInfo { 28 | fields: Default::default(), 29 | } 30 | } 31 | 32 | #[benchmarks] 33 | mod benchmarks { 34 | use super::*; 35 | 36 | #[benchmark] 37 | fn set_commitment() { 38 | // The target user 39 | let netuid = 1; 40 | let caller: T::AccountId = whitelisted_caller(); 41 | let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); 42 | 43 | #[extrinsic_call] 44 | _( 45 | RawOrigin::Signed(caller.clone()), 46 | netuid, 47 | Box::new(create_identity_info::(0)), 48 | ); 49 | 50 | assert_last_event::( 51 | Event::::Commitment { 52 | netuid, 53 | who: caller, 54 | } 55 | .into(), 56 | ); 57 | } 58 | 59 | //impl_benchmark_test_suite!(Commitments, crate::tests::new_test_ext(), crate::tests::Test); 60 | } 61 | -------------------------------------------------------------------------------- /pallets/commitments/src/mock.rs: -------------------------------------------------------------------------------- 1 | use crate as pallet_commitments; 2 | use frame_support::traits::{ConstU16, ConstU64}; 3 | use sp_core::H256; 4 | use sp_runtime::{ 5 | traits::{BlakeTwo256, IdentityLookup}, 6 | BuildStorage, 7 | }; 8 | 9 | type Block = frame_system::mocking::MockBlock; 10 | 11 | // Configure a mock runtime to test the pallet. 12 | frame_support::construct_runtime!( 13 | pub enum Test 14 | { 15 | System: frame_system, 16 | Commitments: pallet_commitments, 17 | } 18 | ); 19 | 20 | impl frame_system::Config for Test { 21 | type BaseCallFilter = frame_support::traits::Everything; 22 | type BlockWeights = (); 23 | type BlockLength = (); 24 | type DbWeight = (); 25 | type RuntimeOrigin = RuntimeOrigin; 26 | type RuntimeCall = RuntimeCall; 27 | type Nonce = u64; 28 | type Hash = H256; 29 | type Hashing = BlakeTwo256; 30 | type AccountId = u64; 31 | type Lookup = IdentityLookup; 32 | type Block = Block; 33 | type RuntimeEvent = RuntimeEvent; 34 | type BlockHashCount = ConstU64<250>; 35 | type Version = (); 36 | type PalletInfo = PalletInfo; 37 | type AccountData = (); 38 | type OnNewAccount = (); 39 | type OnKilledAccount = (); 40 | type SystemWeightInfo = (); 41 | type SS58Prefix = ConstU16<42>; 42 | type OnSetCode = (); 43 | type MaxConsumers = frame_support::traits::ConstU32<16>; 44 | } 45 | 46 | impl pallet_commitments::Config for Test { 47 | type RuntimeEvent = RuntimeEvent; 48 | type WeightInfo = (); 49 | type MaxAdditionalFields = frame_support::traits::ConstU32<16>; 50 | type CanRegisterIdentity = (); 51 | } 52 | 53 | // Build genesis storage according to the mock runtime. 54 | pub fn new_test_ext() -> sp_io::TestExternalities { 55 | frame_system::GenesisConfig::::default() 56 | .build_storage() 57 | .unwrap() 58 | .into() 59 | } 60 | -------------------------------------------------------------------------------- /pallets/commitments/src/tests.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_camel_case_types)] 2 | 3 | use super::*; 4 | use crate as pallet_commitments; 5 | use frame_support::derive_impl; 6 | use frame_support::traits::ConstU64; 7 | use sp_core::H256; 8 | use sp_runtime::{ 9 | testing::Header, 10 | traits::{BlakeTwo256, ConstU16, IdentityLookup}, 11 | }; 12 | 13 | pub type Block = sp_runtime::generic::Block; 14 | pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; 15 | 16 | frame_support::construct_runtime!( 17 | pub enum Test 18 | { 19 | System: frame_system, 20 | Balances: pallet_balances, 21 | Commitments: pallet_commitments 22 | } 23 | ); 24 | 25 | #[allow(dead_code)] 26 | pub type AccountId = u64; 27 | 28 | // The address format for describing accounts. 29 | #[allow(dead_code)] 30 | pub type Address = AccountId; 31 | 32 | // Balance of an account. 33 | #[allow(dead_code)] 34 | pub type Balance = u64; 35 | 36 | // An index to a block. 37 | #[allow(dead_code)] 38 | pub type BlockNumber = u64; 39 | 40 | #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] 41 | impl pallet_balances::Config for Test { 42 | type MaxLocks = (); 43 | type MaxReserves = (); 44 | type ReserveIdentifier = [u8; 8]; 45 | type Balance = u64; 46 | type RuntimeEvent = RuntimeEvent; 47 | type DustRemoval = (); 48 | type ExistentialDeposit = ConstU64<1>; 49 | type AccountStore = System; 50 | type WeightInfo = (); 51 | type FreezeIdentifier = (); 52 | type MaxFreezes = (); 53 | } 54 | 55 | #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] 56 | impl frame_system::Config for Test { 57 | type BaseCallFilter = frame_support::traits::Everything; 58 | type BlockWeights = (); 59 | type BlockLength = (); 60 | type DbWeight = (); 61 | type RuntimeOrigin = RuntimeOrigin; 62 | type RuntimeCall = RuntimeCall; 63 | type Hash = H256; 64 | type Hashing = BlakeTwo256; 65 | type AccountId = u64; 66 | type Lookup = IdentityLookup; 67 | type RuntimeEvent = RuntimeEvent; 68 | type BlockHashCount = ConstU64<250>; 69 | type Version = (); 70 | type PalletInfo = PalletInfo; 71 | type AccountData = pallet_balances::AccountData; 72 | type OnNewAccount = (); 73 | type OnKilledAccount = (); 74 | type SystemWeightInfo = (); 75 | type SS58Prefix = ConstU16<42>; 76 | type OnSetCode = (); 77 | type MaxConsumers = frame_support::traits::ConstU32<16>; 78 | type Block = Block; 79 | type Nonce = u64; 80 | } 81 | 82 | impl pallet_commitments::Config for Test { 83 | type RuntimeEvent = RuntimeEvent; 84 | type Currency = Balances; 85 | type WeightInfo = (); 86 | type MaxFields = frame_support::traits::ConstU32<16>; 87 | type CanCommit = (); 88 | type FieldDeposit = frame_support::traits::ConstU64<0>; 89 | type InitialDeposit = frame_support::traits::ConstU64<0>; 90 | type RateLimit = frame_support::traits::ConstU64<0>; 91 | } 92 | 93 | // // Build genesis storage according to the mock runtime. 94 | // pub fn new_test_ext() -> sp_io::TestExternalities { 95 | // let t = frame_system::GenesisConfig::::default() 96 | // .build_storage() 97 | // .unwrap(); 98 | // let mut ext = sp_io::TestExternalities::new(t); 99 | // ext.execute_with(|| System::set_block_number(1)); 100 | // ext 101 | // } 102 | -------------------------------------------------------------------------------- /pallets/commitments/src/weights.rs: -------------------------------------------------------------------------------- 1 | 2 | //! Autogenerated weights for `pallet_commitments` 3 | //! 4 | //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev 5 | //! DATE: 2023-12-08, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` 6 | //! WORST CASE MAP SIZE: `1000000` 7 | //! HOSTNAME: `rustys-mbp.lan`, CPU: `` 8 | //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: `1024` 9 | 10 | // Executed Command: 11 | // ./target/release/node-subtensor 12 | // benchmark 13 | // pallet 14 | // --chain=local 15 | // --execution=wasm 16 | // --wasm-execution=compiled 17 | // --pallet=pallet_commitments 18 | // --extrinsic=* 19 | // --output=pallets/commitments/src/weights.rs 20 | // --template=./.maintain/frame-weight-template.hbs 21 | 22 | #![cfg_attr(rustfmt, rustfmt_skip)] 23 | #![allow(unused_parens)] 24 | #![allow(unused_imports)] 25 | #![allow(missing_docs)] 26 | 27 | use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; 28 | use core::marker::PhantomData; 29 | 30 | /// Weight functions needed for `pallet_commitments`. 31 | pub trait WeightInfo { 32 | fn set_commitment() -> Weight; 33 | } 34 | 35 | /// Weights for `pallet_commitments` using the Substrate node and recommended hardware. 36 | pub struct SubstrateWeight(PhantomData); 37 | impl WeightInfo for SubstrateWeight { 38 | /// Storage: Commitments LastCommitment (r:1 w:1) 39 | /// Proof Skipped: Commitments LastCommitment (max_values: None, max_size: None, mode: Measured) 40 | /// Storage: Commitments CommitmentOf (r:1 w:1) 41 | /// Proof Skipped: Commitments CommitmentOf (max_values: None, max_size: None, mode: Measured) 42 | fn set_commitment() -> Weight { 43 | // Proof Size summary in bytes: 44 | // Measured: `697` 45 | // Estimated: `6344` 46 | // Minimum execution time: 28_000_000 picoseconds. 47 | Weight::from_parts(28_000_000, 6344) 48 | .saturating_add(T::DbWeight::get().reads(2_u64)) 49 | .saturating_add(T::DbWeight::get().writes(2_u64)) 50 | } 51 | } 52 | 53 | // For backwards compatibility and tests. 54 | impl WeightInfo for () { 55 | /// Storage: Commitments LastCommitment (r:1 w:1) 56 | /// Proof Skipped: Commitments LastCommitment (max_values: None, max_size: None, mode: Measured) 57 | /// Storage: Commitments CommitmentOf (r:1 w:1) 58 | /// Proof Skipped: Commitments CommitmentOf (max_values: None, max_size: None, mode: Measured) 59 | fn set_commitment() -> Weight { 60 | // Proof Size summary in bytes: 61 | // Measured: `697` 62 | // Estimated: `6344` 63 | // Minimum execution time: 28_000_000 picoseconds. 64 | Weight::from_parts(28_000_000, 6344) 65 | .saturating_add(RocksDbWeight::get().reads(2_u64)) 66 | .saturating_add(RocksDbWeight::get().writes(2_u64)) 67 | } 68 | } -------------------------------------------------------------------------------- /pallets/registry/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-registry" 3 | version = "4.0.0-dev" 4 | description = "Simplified identity system for network participants." 5 | authors = ["Bittensor Nucleus Team"] 6 | homepage = "https://bittensor.com" 7 | edition = "2021" 8 | license = "Unlicense" 9 | publish = false 10 | repository = "https://github.com/opentensor/subtensor" 11 | 12 | [lints] 13 | workspace = true 14 | 15 | [package.metadata.docs.rs] 16 | targets = ["x86_64-unknown-linux-gnu"] 17 | 18 | [dependencies] 19 | subtensor-macros.workspace = true 20 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ 21 | "derive", 22 | "max-encoded-len", 23 | ] } 24 | scale-info = { workspace = true, features = ["derive"] } 25 | frame-benchmarking = { workspace = true, optional = true } 26 | frame-support = { workspace = true } 27 | frame-system = { workspace = true } 28 | sp-runtime = { workspace = true } 29 | sp-std = { workspace = true } 30 | enumflags2 = { workspace = true } 31 | 32 | [dev-dependencies] 33 | sp-core = { workspace = true } 34 | sp-io = { workspace = true } 35 | 36 | [features] 37 | default = ["std"] 38 | std = [ 39 | "codec/std", 40 | "frame-benchmarking?/std", 41 | "frame-support/std", 42 | "frame-system/std", 43 | "scale-info/std", 44 | "sp-std/std", 45 | "sp-runtime/std", 46 | "enumflags2/std", 47 | "sp-core/std", 48 | "sp-io/std" 49 | ] 50 | runtime-benchmarks = [ 51 | "frame-benchmarking/runtime-benchmarks", 52 | "frame-support/runtime-benchmarks", 53 | "frame-system/runtime-benchmarks", 54 | "sp-runtime/runtime-benchmarks", 55 | ] 56 | try-runtime = [ 57 | "frame-support/try-runtime", 58 | "frame-system/try-runtime", 59 | "sp-runtime/try-runtime" 60 | ] 61 | -------------------------------------------------------------------------------- /pallets/registry/scripts/benchmark.sh: -------------------------------------------------------------------------------- 1 | cargo build --release --features runtime-benchmarks 2 | ./target/production/node-subtensor benchmark pallet \ 3 | --chain=local \ 4 | --pallet=pallet_registry \ 5 | --extrinsic="*" \ 6 | --output=pallets/registry/src/weights.rs \ 7 | --template=./.maintain/frame-weight-template.hbs 8 | -------------------------------------------------------------------------------- /pallets/registry/src/benchmarking.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarking setup 2 | #![cfg(feature = "runtime-benchmarks")] 3 | #![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] 4 | use super::*; 5 | 6 | #[allow(unused)] 7 | use crate::Pallet as Registry; 8 | use frame_benchmarking::v1::account; 9 | use frame_benchmarking::v2::*; 10 | use frame_support::traits::tokens::fungible::Mutate; 11 | use frame_system::RawOrigin; 12 | use sp_std::vec; 13 | 14 | use sp_runtime::traits::Bounded; 15 | 16 | fn assert_last_event(generic_event: ::RuntimeEvent) { 17 | frame_system::Pallet::::assert_last_event(generic_event.into()); 18 | } 19 | 20 | // This creates an `IdentityInfo` object with `num_fields` extra fields. 21 | // All data is pre-populated with some arbitrary bytes. 22 | fn create_identity_info(_num_fields: u32) -> IdentityInfo { 23 | let data = Data::Raw( 24 | vec![0; 32] 25 | .try_into() 26 | .expect("size does not exceed 64; qed"), 27 | ); 28 | 29 | IdentityInfo { 30 | additional: Default::default(), 31 | display: data.clone(), 32 | legal: data.clone(), 33 | web: data.clone(), 34 | riot: data.clone(), 35 | email: data.clone(), 36 | pgp_fingerprint: Some([0; 20]), 37 | image: data.clone(), 38 | twitter: data, 39 | } 40 | } 41 | 42 | #[benchmarks] 43 | mod benchmarks { 44 | use super::*; 45 | 46 | #[benchmark] 47 | fn set_identity() { 48 | // The target user 49 | let caller: T::AccountId = whitelisted_caller(); 50 | let _ = T::Currency::set_balance(&caller, BalanceOf::::max_value()); 51 | 52 | #[extrinsic_call] 53 | _( 54 | RawOrigin::Signed(caller.clone()), 55 | account::("account", 0, 0u32), 56 | Box::new(create_identity_info::(0)), 57 | ); 58 | 59 | assert_last_event::(Event::::IdentitySet { who: caller }.into()); 60 | } 61 | 62 | #[benchmark] 63 | fn clear_identity() { 64 | // The target user 65 | let caller: T::AccountId = whitelisted_caller(); 66 | let _ = T::Currency::set_balance(&caller, BalanceOf::::max_value()); 67 | 68 | let vali_account = account::("account", 0, 0u32); 69 | 70 | Registry::::set_identity( 71 | RawOrigin::Signed(caller.clone()).into(), 72 | vali_account.clone(), 73 | Box::new(create_identity_info::(0)), 74 | ) 75 | .unwrap(); 76 | 77 | #[extrinsic_call] 78 | _(RawOrigin::Signed(caller.clone()), vali_account); 79 | 80 | assert_last_event::(Event::::IdentityDissolved { who: caller }.into()); 81 | } 82 | 83 | //impl_benchmark_test_suite!(Registry, crate::mock::new_test_ext(), crate::mock::Test); 84 | } 85 | -------------------------------------------------------------------------------- /pallets/registry/src/mock.rs: -------------------------------------------------------------------------------- 1 | use crate as pallet_template; 2 | use frame_support::traits::{ConstU16, ConstU64}; 3 | use sp_core::H256; 4 | use sp_runtime::{ 5 | traits::{BlakeTwo256, IdentityLookup}, 6 | BuildStorage, 7 | }; 8 | 9 | type Block = frame_system::mocking::MockBlock; 10 | 11 | // Configure a mock runtime to test the pallet. 12 | frame_support::construct_runtime!( 13 | pub enum Test 14 | { 15 | System: frame_system, 16 | TemplateModule: pallet_template, 17 | } 18 | ); 19 | 20 | impl frame_system::Config for Test { 21 | type BaseCallFilter = frame_support::traits::Everything; 22 | type BlockWeights = (); 23 | type BlockLength = (); 24 | type DbWeight = (); 25 | type RuntimeOrigin = RuntimeOrigin; 26 | type RuntimeCall = RuntimeCall; 27 | type Nonce = u64; 28 | type Hash = H256; 29 | type Hashing = BlakeTwo256; 30 | type AccountId = u64; 31 | type Lookup = IdentityLookup; 32 | type Block = Block; 33 | type RuntimeEvent = RuntimeEvent; 34 | type BlockHashCount = ConstU64<250>; 35 | type Version = (); 36 | type PalletInfo = PalletInfo; 37 | type AccountData = (); 38 | type OnNewAccount = (); 39 | type OnKilledAccount = (); 40 | type SystemWeightInfo = (); 41 | type SS58Prefix = ConstU16<42>; 42 | type OnSetCode = (); 43 | type MaxConsumers = frame_support::traits::ConstU32<16>; 44 | } 45 | 46 | impl pallet_registry::Config for Test { 47 | type RuntimeEvent = RuntimeEvent; 48 | type WeightInfo = (); 49 | type MaxAdditionalFields = frame_support::traits::ConstU32<16>; 50 | type CanRegisterIdentity = (); 51 | } 52 | 53 | // Build genesis storage according to the mock runtime. 54 | pub fn new_test_ext() -> sp_io::TestExternalities { 55 | frame_system::GenesisConfig::::default() 56 | .build_storage() 57 | .unwrap() 58 | .into() 59 | } 60 | -------------------------------------------------------------------------------- /pallets/registry/src/tests.rs: -------------------------------------------------------------------------------- 1 | // Testing 2 | -------------------------------------------------------------------------------- /pallets/registry/src/weights.rs: -------------------------------------------------------------------------------- 1 | 2 | //! Autogenerated weights for `pallet_registry` 3 | //! 4 | //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev 5 | //! DATE: 2023-10-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` 6 | //! WORST CASE MAP SIZE: `1000000` 7 | //! HOSTNAME: `rustys-MacBook-Pro.local`, CPU: `` 8 | //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: `1024` 9 | 10 | // Executed Command: 11 | // ./target/release/node-subtensor 12 | // benchmark 13 | // pallet 14 | // --chain=local 15 | // --execution=wasm 16 | // --wasm-execution=compiled 17 | // --pallet=pallet_registry 18 | // --extrinsic=* 19 | // --output=pallets/registry/src/weights.rs 20 | // --template=./.maintain/frame-weight-template.hbs 21 | 22 | #![cfg_attr(rustfmt, rustfmt_skip)] 23 | #![allow(unused_parens)] 24 | #![allow(unused_imports)] 25 | #![allow(missing_docs)] 26 | 27 | use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; 28 | use core::marker::PhantomData; 29 | 30 | /// Weight functions needed for `pallet_registry`. 31 | pub trait WeightInfo { 32 | fn set_identity() -> Weight; 33 | fn clear_identity() -> Weight; 34 | } 35 | 36 | /// Weights for `pallet_registry` using the Substrate node and recommended hardware. 37 | pub struct SubstrateWeight(PhantomData); 38 | impl WeightInfo for SubstrateWeight { 39 | /// Storage: Registry IdentityOf (r:1 w:1) 40 | /// Proof Skipped: Registry IdentityOf (max_values: None, max_size: None, mode: Measured) 41 | fn set_identity() -> Weight { 42 | // Proof Size summary in bytes: 43 | // Measured: `1024` 44 | // Estimated: `3499` 45 | // Minimum execution time: 41_000_000 picoseconds. 46 | Weight::from_parts(41_000_000, 3499) 47 | .saturating_add(T::DbWeight::get().reads(1_u64)) 48 | .saturating_add(T::DbWeight::get().writes(1_u64)) 49 | } 50 | /// Storage: Registry IdentityOf (r:1 w:1) 51 | /// Proof Skipped: Registry IdentityOf (max_values: None, max_size: None, mode: Measured) 52 | fn clear_identity() -> Weight { 53 | // Proof Size summary in bytes: 54 | // Measured: `1385` 55 | // Estimated: `3860` 56 | // Minimum execution time: 36_000_000 picoseconds. 57 | Weight::from_parts(36_000_000, 3860) 58 | .saturating_add(T::DbWeight::get().reads(1_u64)) 59 | .saturating_add(T::DbWeight::get().writes(1_u64)) 60 | } 61 | } 62 | 63 | // For backwards compatibility and tests. 64 | impl WeightInfo for () { 65 | /// Storage: Registry IdentityOf (r:1 w:1) 66 | /// Proof Skipped: Registry IdentityOf (max_values: None, max_size: None, mode: Measured) 67 | fn set_identity() -> Weight { 68 | // Proof Size summary in bytes: 69 | // Measured: `1024` 70 | // Estimated: `3499` 71 | // Minimum execution time: 41_000_000 picoseconds. 72 | Weight::from_parts(41_000_000, 3499) 73 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 74 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 75 | } 76 | /// Storage: Registry IdentityOf (r:1 w:1) 77 | /// Proof Skipped: Registry IdentityOf (max_values: None, max_size: None, mode: Measured) 78 | fn clear_identity() -> Weight { 79 | // Proof Size summary in bytes: 80 | // Measured: `1385` 81 | // Estimated: `3860` 82 | // Minimum execution time: 36_000_000 picoseconds. 83 | Weight::from_parts(36_000_000, 3860) 84 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 85 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 86 | } 87 | } -------------------------------------------------------------------------------- /pallets/subtensor/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-subtensor" 3 | version = "4.0.0-dev" 4 | description = "FRAME pallet for runtime logic of Subtensor Blockchain." 5 | authors = ["Bittensor Nucleus Team"] 6 | homepage = "https://bittensor.com" 7 | edition = "2021" 8 | license = "Unlicense" 9 | publish = false 10 | repository = "https://github.com/opentensor/subtensor" 11 | 12 | [lints] 13 | workspace = true 14 | 15 | [package.metadata.docs.rs] 16 | targets = ["x86_64-unknown-linux-gnu"] 17 | 18 | [dependencies] 19 | subtensor-macros.workspace = true 20 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ 21 | "derive", 22 | ] } 23 | sp-core = { workspace = true } 24 | pallet-balances = { workspace = true } 25 | scale-info = { workspace = true, features = ["derive"] } 26 | frame-benchmarking = { workspace = true, optional = true } 27 | frame-support = { workspace = true } 28 | frame-system = { workspace = true } 29 | sp-io = { workspace = true } 30 | serde = { workspace = true, features = ["derive"] } 31 | serde_json = { workspace = true } 32 | serde-tuple-vec-map = { workspace = true } 33 | serde_bytes = { workspace = true, features = ["alloc"] } 34 | serde_with = { workspace = true, features = ["macros"] } 35 | sp-runtime = { workspace = true } 36 | sp-std = { workspace = true } 37 | log = { workspace = true } 38 | substrate-fixed = { workspace = true } 39 | pallet-transaction-payment = { workspace = true } 40 | pallet-utility = { workspace = true } 41 | ndarray = { workspace = true } 42 | hex = { workspace = true } 43 | 44 | # Used for sudo decentralization 45 | pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../collective" } 46 | pallet-membership = { workspace = true } 47 | hex-literal = { workspace = true } 48 | num-traits = { version = "0.2.19", default-features = false, features = ["libm"] } 49 | 50 | [dev-dependencies] 51 | pallet-balances = { workspace = true, features = ["std"] } 52 | pallet-scheduler = { workspace = true } 53 | sp-version = { workspace = true } 54 | # Substrate 55 | sp-tracing = { workspace = true } 56 | parity-util-mem = { workspace = true, features = ["primitive-types"] } 57 | rand = { workspace = true } 58 | sp-core = { workspace = true } 59 | sp-std = { workspace = true } 60 | pallet-preimage = { workspace = true } 61 | 62 | [features] 63 | default = ["std"] 64 | std = [ 65 | "codec/std", 66 | "frame-benchmarking/std", 67 | "frame-support/std", 68 | "frame-system/std", 69 | "scale-info/std", 70 | "pallet-collective/std", 71 | "pallet-membership/std", 72 | "substrate-fixed/std", 73 | "pallet-balances/std", 74 | "pallet-preimage/std", 75 | "pallet-scheduler/std", 76 | "pallet-transaction-payment/std", 77 | "pallet-utility/std", 78 | "sp-core/std", 79 | "sp-io/std", 80 | "sp-runtime/std", 81 | "sp-std/std", 82 | "sp-tracing/std", 83 | "sp-version/std", 84 | "hex/std", 85 | "log/std", 86 | "ndarray/std", 87 | "serde/std", 88 | "serde_bytes/std", 89 | "serde_with/std", 90 | "substrate-fixed/std", 91 | "num-traits/std", 92 | "serde_json/std" 93 | ] 94 | runtime-benchmarks = [ 95 | "frame-benchmarking/runtime-benchmarks", 96 | "frame-support/runtime-benchmarks", 97 | "frame-system/runtime-benchmarks", 98 | "pallet-balances/runtime-benchmarks", 99 | "pallet-membership/runtime-benchmarks", 100 | "pallet-utility/runtime-benchmarks", 101 | "sp-runtime/runtime-benchmarks", 102 | "pallet-collective/runtime-benchmarks", 103 | "pallet-preimage/runtime-benchmarks", 104 | "pallet-scheduler/runtime-benchmarks", 105 | ] 106 | try-runtime = [ 107 | "frame-support/try-runtime", 108 | "frame-system/try-runtime", 109 | "pallet-balances/try-runtime", 110 | "pallet-membership/try-runtime", 111 | "pallet-preimage/try-runtime", 112 | "pallet-scheduler/try-runtime", 113 | "pallet-transaction-payment/try-runtime", 114 | "pallet-utility/try-runtime", 115 | "sp-runtime/try-runtime", 116 | "pallet-collective/try-runtime" 117 | ] 118 | pow-faucet = [] 119 | -------------------------------------------------------------------------------- /pallets/subtensor/rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "subtensor-custom-rpc" 3 | version = "0.0.2" 4 | edition = "2021" 5 | authors = ['Cameron Fairchild '] 6 | repository = 'https://github.com/opentensor/subtensor' 7 | description = "A pallet that adds custom RPC calls to subtensor" 8 | license = "MIT" 9 | publish = false 10 | 11 | [lints] 12 | workspace = true 13 | 14 | [dependencies] 15 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ 16 | "derive", 17 | ] } 18 | jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } 19 | serde = { workspace = true, features = ["derive"] } 20 | 21 | # Substrate packages 22 | sp-api = { workspace = true } 23 | sp-blockchain = { workspace = true } 24 | sp-rpc = { workspace = true } 25 | sp-runtime = { workspace = true } 26 | 27 | # local packages 28 | 29 | subtensor-custom-rpc-runtime-api = { path = "../runtime-api", default-features = false } 30 | pallet-subtensor = { path = "../../subtensor", default-features = false } 31 | 32 | [features] 33 | default = ["std"] 34 | std = [ 35 | "sp-api/std", 36 | "sp-runtime/std", 37 | "subtensor-custom-rpc-runtime-api/std", 38 | "pallet-subtensor/std", 39 | "codec/std", 40 | "serde/std" 41 | ] 42 | pow-faucet = [] 43 | -------------------------------------------------------------------------------- /pallets/subtensor/runtime-api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "subtensor-custom-rpc-runtime-api" 3 | version = "0.0.2" 4 | edition = "2021" 5 | authors = ['Cameron Fairchild '] 6 | repository = 'https://github.com/opentensor/subtensor' 7 | description = "A pallet that adds a custom runtime API to Subtensor" 8 | license = "MIT" 9 | publish = false 10 | 11 | [lints] 12 | workspace = true 13 | 14 | [dependencies] 15 | sp-api = { workspace = true } 16 | frame-support = { workspace = true } 17 | serde = { workspace = true, features = ["derive"] } 18 | 19 | # local 20 | pallet-subtensor = { version = "4.0.0-dev", path = "../../subtensor", default-features = false } 21 | 22 | [features] 23 | default = ["std"] 24 | std = [ 25 | "sp-api/std", 26 | "frame-support/std", 27 | "pallet-subtensor/std", 28 | "serde/std" 29 | ] 30 | pow-faucet = [] 31 | -------------------------------------------------------------------------------- /pallets/subtensor/runtime-api/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | extern crate alloc; 3 | use alloc::vec::Vec; 4 | 5 | // Here we declare the runtime API. It is implemented it the `impl` block in 6 | // src/neuron_info.rs, src/subnet_info.rs, and src/delegate_info.rs 7 | sp_api::decl_runtime_apis! { 8 | pub trait DelegateInfoRuntimeApi { 9 | fn get_delegates() -> Vec; 10 | fn get_delegate( delegate_account_vec: Vec ) -> Vec; 11 | fn get_delegated( delegatee_account_vec: Vec ) -> Vec; 12 | } 13 | 14 | pub trait NeuronInfoRuntimeApi { 15 | fn get_neurons(netuid: u16) -> Vec; 16 | fn get_neuron(netuid: u16, uid: u16) -> Vec; 17 | fn get_neurons_lite(netuid: u16) -> Vec; 18 | fn get_neuron_lite(netuid: u16, uid: u16) -> Vec; 19 | } 20 | 21 | pub trait SubnetInfoRuntimeApi { 22 | fn get_subnet_info(netuid: u16) -> Vec; 23 | fn get_subnets_info() -> Vec; 24 | fn get_subnet_info_v2(netuid: u16) -> Vec; 25 | fn get_subnets_info_v2() -> Vec; 26 | fn get_subnet_hyperparams(netuid: u16) -> Vec; 27 | } 28 | 29 | pub trait StakeInfoRuntimeApi { 30 | fn get_stake_info_for_coldkey( coldkey_account_vec: Vec ) -> Vec; 31 | fn get_stake_info_for_coldkeys( coldkey_account_vecs: Vec> ) -> Vec; 32 | } 33 | 34 | pub trait SubnetRegistrationRuntimeApi { 35 | fn get_network_registration_cost() -> u64; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /pallets/subtensor/src/coinbase/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod block_step; 3 | pub mod root; 4 | pub mod run_coinbase; 5 | -------------------------------------------------------------------------------- /pallets/subtensor/src/epoch/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod math; 3 | pub mod run_epoch; 4 | -------------------------------------------------------------------------------- /pallets/subtensor/src/macros/hooks.rs: -------------------------------------------------------------------------------- 1 | use frame_support::pallet_macros::pallet_section; 2 | 3 | /// A [`pallet_section`] that defines the events for a pallet. 4 | /// This can later be imported into the pallet using [`import_section`]. 5 | #[pallet_section] 6 | mod hooks { 7 | // ================ 8 | // ==== Hooks ===== 9 | // ================ 10 | #[pallet::hooks] 11 | impl Hooks> for Pallet { 12 | // ---- Called on the initialization of this pallet. (the order of on_finalize calls is determined in the runtime) 13 | // 14 | // # Args: 15 | // * 'n': (BlockNumberFor): 16 | // - The number of the block we are initializing. 17 | fn on_initialize(_block_number: BlockNumberFor) -> Weight { 18 | let block_step_result = Self::block_step(); 19 | match block_step_result { 20 | Ok(_) => { 21 | // --- If the block step was successful, return the weight. 22 | log::debug!("Successfully ran block step."); 23 | Weight::from_parts(110_634_229_000_u64, 0) 24 | .saturating_add(T::DbWeight::get().reads(8304_u64)) 25 | .saturating_add(T::DbWeight::get().writes(110_u64)) 26 | } 27 | Err(e) => { 28 | // --- If the block step was unsuccessful, return the weight anyway. 29 | log::error!("Error while stepping block: {:?}", e); 30 | Weight::from_parts(110_634_229_000_u64, 0) 31 | .saturating_add(T::DbWeight::get().reads(8304_u64)) 32 | .saturating_add(T::DbWeight::get().writes(110_u64)) 33 | } 34 | } 35 | } 36 | 37 | fn on_runtime_upgrade() -> frame_support::weights::Weight { 38 | // --- Migrate storage 39 | let mut weight = frame_support::weights::Weight::from_parts(0, 0); 40 | 41 | // Hex encoded foundation coldkey 42 | let hex = hex_literal::hex![ 43 | "feabaafee293d3b76dae304e2f9d885f77d2b17adab9e17e921b321eccd61c77" 44 | ]; 45 | weight = weight 46 | // Initializes storage version (to 1) 47 | .saturating_add(migrations::migrate_to_v1_separate_emission::migrate_to_v1_separate_emission::()) 48 | // Storage version v1 -> v2 49 | .saturating_add(migrations::migrate_to_v2_fixed_total_stake::migrate_to_v2_fixed_total_stake::()) 50 | // Doesn't check storage version. TODO: Remove after upgrade 51 | .saturating_add(migrations::migrate_create_root_network::migrate_create_root_network::()) 52 | // Storage version v2 -> v3 53 | .saturating_add(migrations::migrate_transfer_ownership_to_foundation::migrate_transfer_ownership_to_foundation::( 54 | hex, 55 | )) 56 | // Storage version v3 -> v4 57 | .saturating_add(migrations::migrate_delete_subnet_21::migrate_delete_subnet_21::()) 58 | // Storage version v4 -> v5 59 | .saturating_add(migrations::migrate_delete_subnet_3::migrate_delete_subnet_3::()) 60 | // Doesn't check storage version. TODO: Remove after upgrade 61 | // Storage version v5 -> v6 62 | .saturating_add(migrations::migrate_total_issuance::migrate_total_issuance::(false)) 63 | // Populate OwnedHotkeys map for coldkey swap. Doesn't update storage vesion. 64 | // Storage version v6 -> v7 65 | .saturating_add(migrations::migrate_populate_owned_hotkeys::migrate_populate_owned::()) 66 | // Populate StakingHotkeys map for coldkey swap. Doesn't update storage vesion. 67 | // Storage version v7 -> v8 68 | .saturating_add(migrations::migrate_populate_staking_hotkeys::migrate_populate_staking_hotkeys::()) 69 | // Fix total coldkey stake. 70 | // Storage version v8 -> v9 71 | .saturating_add(migrations::migrate_fix_total_coldkey_stake::migrate_fix_total_coldkey_stake::()) 72 | // Migrate Delegate Ids on chain 73 | .saturating_add(migrations::migrate_chain_identity::migrate_set_hotkey_identities::()); 74 | weight 75 | } 76 | 77 | #[cfg(feature = "try-runtime")] 78 | fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { 79 | Self::check_accounting_invariants()?; 80 | Ok(()) 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /pallets/subtensor/src/macros/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod config; 2 | pub mod dispatches; 3 | pub mod errors; 4 | pub mod events; 5 | pub mod genesis; 6 | pub mod hooks; 7 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_create_root_network.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::{Identity, OptionQuery}, 4 | storage_alias, 5 | traits::{DefensiveResult, Get}, 6 | weights::Weight, 7 | }; 8 | use sp_std::vec::Vec; 9 | 10 | // TODO (camfairchild): TEST MIGRATION 11 | 12 | /// Module containing deprecated storage format for LoadedEmission 13 | pub mod deprecated_loaded_emission_format { 14 | use super::*; 15 | 16 | #[storage_alias] 17 | pub(super) type LoadedEmission = 18 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 19 | } 20 | 21 | /// Migrates the storage to create the root network 22 | /// 23 | /// This function performs the following steps: 24 | /// 1. Checks if the root network already exists 25 | /// 2. If not, creates the root network with default settings 26 | /// 3. Removes all existing senate members 27 | /// 28 | /// # Arguments 29 | /// 30 | /// * `T` - The Config trait of the pallet 31 | /// 32 | /// # Returns 33 | /// 34 | /// * `Weight` - The computational weight of this operation 35 | /// 36 | /// # Example 37 | /// 38 | /// ```ignore 39 | /// let weight = migrate_create_root_network::(); 40 | /// ``` 41 | pub fn migrate_create_root_network() -> Weight { 42 | // Define the root network UID 43 | let root_netuid: u16 = 0; 44 | 45 | // Initialize weight counter 46 | let mut weight = T::DbWeight::get().reads(1); 47 | 48 | // Check if root network already exists 49 | if NetworksAdded::::get(root_netuid) { 50 | // Return early if root network already exists 51 | return weight; 52 | } 53 | 54 | // Set the root network as added 55 | NetworksAdded::::insert(root_netuid, true); 56 | 57 | // Increment the total number of networks 58 | TotalNetworks::::mutate(|n| *n = n.saturating_add(1)); 59 | 60 | // Set the maximum number of UIDs to the number of senate members 61 | MaxAllowedUids::::insert(root_netuid, 64); 62 | 63 | // Set the maximum number of validators to all members 64 | MaxAllowedValidators::::insert(root_netuid, 64); 65 | 66 | // Set the minimum allowed weights to zero (no weight restrictions) 67 | MinAllowedWeights::::insert(root_netuid, 0); 68 | 69 | // Set the maximum weight limit to u16::MAX (no weight restrictions) 70 | MaxWeightsLimit::::insert(root_netuid, u16::MAX); 71 | 72 | // Set default root tempo 73 | Tempo::::insert(root_netuid, 100); 74 | 75 | // Set the root network as open for registration 76 | NetworkRegistrationAllowed::::insert(root_netuid, true); 77 | 78 | // Set target registrations for validators as 1 per block 79 | TargetRegistrationsPerInterval::::insert(root_netuid, 1); 80 | 81 | // TODO: Consider if WeightsSetRateLimit should be set 82 | // WeightsSetRateLimit::::insert(root_netuid, 7200); 83 | 84 | // Accrue weight for database writes 85 | weight.saturating_accrue(T::DbWeight::get().writes(8)); 86 | 87 | // Remove all existing senate members 88 | for hotkey_i in T::SenateMembers::members().iter() { 89 | // Remove votes associated with the member 90 | T::TriumvirateInterface::remove_votes(hotkey_i).defensive_ok(); 91 | // Remove the member from the senate 92 | T::SenateMembers::remove_member(hotkey_i).defensive_ok(); 93 | 94 | // Accrue weight for database operations 95 | weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); 96 | } 97 | 98 | log::info!("Migrated create root network"); 99 | weight 100 | } 101 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::*, 4 | storage_alias, 5 | traits::{Get, GetStorageVersion, StorageVersion}, 6 | weights::Weight, 7 | }; 8 | use log::info; 9 | use sp_std::vec::Vec; 10 | 11 | /// Constant for logging purposes 12 | const LOG_TARGET: &str = "migrate_delete_subnet_21"; 13 | 14 | /// Module containing deprecated storage format 15 | pub mod deprecated_loaded_emission_format { 16 | use super::*; 17 | 18 | #[storage_alias] 19 | pub(super) type LoadedEmission = 20 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 21 | } 22 | 23 | /// Migrates the storage to delete subnet 21 24 | /// 25 | /// This function performs the following steps: 26 | /// 1. Checks if the migration is necessary 27 | /// 2. Removes all storage related to subnet 21 28 | /// 3. Updates the storage version 29 | /// 30 | /// # Arguments 31 | /// 32 | /// * `T` - The Config trait of the pallet 33 | /// 34 | /// # Returns 35 | /// 36 | /// * `Weight` - The computational weight of this operation 37 | /// 38 | /// # Example 39 | /// 40 | /// ```ignore 41 | /// let weight = migrate_delete_subnet_21::(); 42 | /// ``` 43 | pub fn migrate_delete_subnet_21() -> Weight { 44 | let new_storage_version = 4; 45 | 46 | // Setup migration weight 47 | let mut weight = T::DbWeight::get().reads(1); 48 | 49 | // Grab current version 50 | let onchain_version = Pallet::::on_chain_storage_version(); 51 | 52 | // Only runs if we haven't already updated version past above new_storage_version and subnet 21 exists. 53 | if onchain_version < new_storage_version && Pallet::::if_subnet_exist(21) { 54 | info!(target: LOG_TARGET, ">>> Removing subnet 21 {:?}", onchain_version); 55 | 56 | let netuid = 21; 57 | 58 | // We do this all manually as we don't want to call code related to giving subnet owner back their locked token cost. 59 | // Remove network count 60 | SubnetworkN::::remove(netuid); 61 | 62 | // Remove network modality storage 63 | NetworkModality::::remove(netuid); 64 | 65 | // Remove netuid from added networks 66 | NetworksAdded::::remove(netuid); 67 | 68 | // Decrement the network counter 69 | TotalNetworks::::mutate(|n| *n = n.saturating_sub(1)); 70 | 71 | // Remove network registration time 72 | NetworkRegisteredAt::::remove(netuid); 73 | 74 | weight.saturating_accrue(T::DbWeight::get().writes(5)); 75 | 76 | // Remove incentive mechanism memory 77 | let _ = Uids::::clear_prefix(netuid, u32::MAX, None); 78 | let _ = Keys::::clear_prefix(netuid, u32::MAX, None); 79 | let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); 80 | let _ = Weights::::clear_prefix(netuid, u32::MAX, None); 81 | 82 | weight.saturating_accrue(T::DbWeight::get().writes(4)); 83 | 84 | // Remove various network-related parameters 85 | Rank::::remove(netuid); 86 | Trust::::remove(netuid); 87 | Active::::remove(netuid); 88 | Emission::::remove(netuid); 89 | Incentive::::remove(netuid); 90 | Consensus::::remove(netuid); 91 | Dividends::::remove(netuid); 92 | PruningScores::::remove(netuid); 93 | LastUpdate::::remove(netuid); 94 | ValidatorPermit::::remove(netuid); 95 | ValidatorTrust::::remove(netuid); 96 | 97 | weight.saturating_accrue(T::DbWeight::get().writes(11)); 98 | 99 | // Erase network parameters 100 | Tempo::::remove(netuid); 101 | Kappa::::remove(netuid); 102 | Difficulty::::remove(netuid); 103 | MaxAllowedUids::::remove(netuid); 104 | ImmunityPeriod::::remove(netuid); 105 | ActivityCutoff::::remove(netuid); 106 | EmissionValues::::remove(netuid); 107 | MaxWeightsLimit::::remove(netuid); 108 | MinAllowedWeights::::remove(netuid); 109 | RegistrationsThisInterval::::remove(netuid); 110 | POWRegistrationsThisInterval::::remove(netuid); 111 | BurnRegistrationsThisInterval::::remove(netuid); 112 | 113 | weight.saturating_accrue(T::DbWeight::get().writes(12)); 114 | 115 | // Update storage version 116 | StorageVersion::new(new_storage_version).put::>(); 117 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 118 | 119 | weight 120 | } else { 121 | info!(target: LOG_TARGET, "Migration to v4 already done or subnet 21 doesn't exist!"); 122 | Weight::zero() 123 | } 124 | } 125 | 126 | // TODO: Add unit tests for this migration 127 | // TODO: Consider adding error handling for storage operations 128 | // TODO: Verify that all relevant storage items for subnet 21 are removed 129 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::*, 4 | storage_alias, 5 | traits::{Get, GetStorageVersion, StorageVersion}, 6 | weights::Weight, 7 | }; 8 | use log::info; 9 | use sp_std::vec::Vec; 10 | 11 | /// Constant for logging purposes 12 | const LOG_TARGET: &str = "migrate_delete_subnet_3"; 13 | 14 | /// Module containing deprecated storage format 15 | pub mod deprecated_loaded_emission_format { 16 | use super::*; 17 | 18 | #[storage_alias] 19 | pub(super) type LoadedEmission = 20 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 21 | } 22 | 23 | /// Migrates the storage to delete subnet 3 24 | /// 25 | /// This function performs the following steps: 26 | /// 1. Checks if the migration is necessary 27 | /// 2. Removes all storage related to subnet 3 28 | /// 3. Updates the storage version 29 | /// 30 | /// # Arguments 31 | /// 32 | /// * `T` - The Config trait of the pallet 33 | /// 34 | /// # Returns 35 | /// 36 | /// * `Weight` - The computational weight of this operation 37 | /// 38 | /// # Example 39 | /// 40 | /// ```ignore 41 | /// let weight = migrate_delete_subnet_3::(); 42 | /// ``` 43 | pub fn migrate_delete_subnet_3() -> Weight { 44 | let new_storage_version = 5; 45 | 46 | // Initialize weight counter 47 | let mut weight = T::DbWeight::get().reads(1); 48 | 49 | // Get current on-chain storage version 50 | let onchain_version = Pallet::::on_chain_storage_version(); 51 | 52 | // Only proceed if current version is less than the new version and subnet 3 exists 53 | if onchain_version < new_storage_version && Pallet::::if_subnet_exist(3) { 54 | info!( 55 | target: LOG_TARGET, 56 | "Removing subnet 3. Current version: {:?}", 57 | onchain_version 58 | ); 59 | 60 | let netuid = 3; 61 | 62 | // Remove network count 63 | SubnetworkN::::remove(netuid); 64 | 65 | // Remove network modality storage 66 | NetworkModality::::remove(netuid); 67 | 68 | // Remove netuid from added networks 69 | NetworksAdded::::remove(netuid); 70 | 71 | // Decrement the network counter 72 | TotalNetworks::::mutate(|n| *n = n.saturating_sub(1)); 73 | 74 | // Remove network registration time 75 | NetworkRegisteredAt::::remove(netuid); 76 | 77 | weight.saturating_accrue(T::DbWeight::get().writes(5)); 78 | 79 | // Remove incentive mechanism memory 80 | let _ = Uids::::clear_prefix(netuid, u32::MAX, None); 81 | let _ = Keys::::clear_prefix(netuid, u32::MAX, None); 82 | let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); 83 | let _ = Weights::::clear_prefix(netuid, u32::MAX, None); 84 | 85 | weight.saturating_accrue(T::DbWeight::get().writes(4)); 86 | 87 | // Remove various network-related parameters 88 | Rank::::remove(netuid); 89 | Trust::::remove(netuid); 90 | Active::::remove(netuid); 91 | Emission::::remove(netuid); 92 | Incentive::::remove(netuid); 93 | Consensus::::remove(netuid); 94 | Dividends::::remove(netuid); 95 | PruningScores::::remove(netuid); 96 | LastUpdate::::remove(netuid); 97 | ValidatorPermit::::remove(netuid); 98 | ValidatorTrust::::remove(netuid); 99 | 100 | weight.saturating_accrue(T::DbWeight::get().writes(11)); 101 | 102 | // Erase network parameters 103 | Tempo::::remove(netuid); 104 | Kappa::::remove(netuid); 105 | Difficulty::::remove(netuid); 106 | MaxAllowedUids::::remove(netuid); 107 | ImmunityPeriod::::remove(netuid); 108 | ActivityCutoff::::remove(netuid); 109 | EmissionValues::::remove(netuid); 110 | MaxWeightsLimit::::remove(netuid); 111 | MinAllowedWeights::::remove(netuid); 112 | RegistrationsThisInterval::::remove(netuid); 113 | POWRegistrationsThisInterval::::remove(netuid); 114 | BurnRegistrationsThisInterval::::remove(netuid); 115 | 116 | weight.saturating_accrue(T::DbWeight::get().writes(12)); 117 | 118 | // Update storage version 119 | StorageVersion::new(new_storage_version).put::>(); 120 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 121 | 122 | weight 123 | } else { 124 | info!(target: LOG_TARGET, "Migration to v5 already completed or subnet 3 doesn't exist"); 125 | Weight::zero() 126 | } 127 | } 128 | 129 | // TODO: Add unit tests for this migration 130 | // TODO: Consider adding error handling for storage operations 131 | // TODO: Verify that all relevant storage items for subnet 3 are removed 132 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_fix_total_coldkey_stake.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use alloc::string::String; 3 | use frame_support::{ 4 | pallet_prelude::{Identity, OptionQuery}, 5 | storage_alias, 6 | traits::{Get, StorageVersion}, 7 | weights::Weight, 8 | }; 9 | use sp_std::vec::Vec; 10 | 11 | // TODO (camfairchild): TEST MIGRATION 12 | pub mod deprecated_loaded_emission_format { 13 | use super::*; 14 | 15 | #[storage_alias] 16 | pub(super) type LoadedEmission = 17 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 18 | } 19 | 20 | /// Migrates and fixes the total coldkey stake. 21 | /// 22 | /// This function iterates through all staking hotkeys, calculates the total stake for each coldkey, 23 | /// and updates the `TotalColdkeyStake` storage accordingly. The migration is only performed if the 24 | /// on-chain storage version is 6. 25 | /// 26 | /// # Returns 27 | /// The weight of the migration process. 28 | pub fn do_migrate_fix_total_coldkey_stake() -> Weight { 29 | // Initialize the weight with one read operation. 30 | let mut weight = T::DbWeight::get().reads(1); 31 | 32 | // Iterate through all staking hotkeys. 33 | for (coldkey, hotkey_vec) in StakingHotkeys::::iter() { 34 | // Init the zero value. 35 | let mut coldkey_stake_sum: u64 = 0; 36 | weight = weight.saturating_add(T::DbWeight::get().reads(1)); 37 | 38 | // Calculate the total stake for the current coldkey. 39 | for hotkey in hotkey_vec { 40 | // Cant fail on retrieval. 41 | coldkey_stake_sum = 42 | coldkey_stake_sum.saturating_add(Stake::::get(hotkey, coldkey.clone())); 43 | weight = weight.saturating_add(T::DbWeight::get().reads(1)); 44 | } 45 | // Update the `TotalColdkeyStake` storage with the calculated stake sum. 46 | // Cant fail on insert. 47 | TotalColdkeyStake::::insert(coldkey.clone(), coldkey_stake_sum); 48 | weight = weight.saturating_add(T::DbWeight::get().writes(1)); 49 | } 50 | weight 51 | } 52 | // Public migrate function to be called by Lib.rs on upgrade. 53 | pub fn migrate_fix_total_coldkey_stake() -> Weight { 54 | let migration_name = b"fix_total_coldkey_stake_v7".to_vec(); 55 | 56 | // Initialize the weight with one read operation. 57 | let mut weight = T::DbWeight::get().reads(1); 58 | 59 | // Check if the migration has already run 60 | if HasMigrationRun::::get(&migration_name) { 61 | log::info!( 62 | "Migration '{:?}' has already run. Skipping.", 63 | migration_name 64 | ); 65 | return Weight::zero(); 66 | } 67 | 68 | log::info!( 69 | "Running migration '{}'", 70 | String::from_utf8_lossy(&migration_name) 71 | ); 72 | 73 | // Run the migration 74 | weight = weight.saturating_add(do_migrate_fix_total_coldkey_stake::()); 75 | 76 | // Mark the migration as completed 77 | HasMigrationRun::::insert(&migration_name, true); 78 | weight = weight.saturating_add(T::DbWeight::get().writes(1)); 79 | 80 | // Set the storage version to 7 81 | StorageVersion::new(7).put::>(); 82 | weight = weight.saturating_add(T::DbWeight::get().writes(1)); 83 | 84 | log::info!( 85 | "Migration '{:?}' completed. Storage version set to 7.", 86 | String::from_utf8_lossy(&migration_name) 87 | ); 88 | 89 | // Return the migration weight. 90 | weight 91 | } 92 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_init_total_issuance.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::pallet_prelude::OptionQuery; 3 | use frame_support::{pallet_prelude::Identity, storage_alias}; 4 | use sp_std::vec::Vec; 5 | 6 | // TODO: Implement comprehensive tests for this migration 7 | 8 | /// Module containing deprecated storage format for LoadedEmission 9 | pub mod deprecated_loaded_emission_format { 10 | use super::*; 11 | 12 | #[storage_alias] 13 | pub(super) type LoadedEmission = 14 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 15 | } 16 | 17 | pub mod initialise_total_issuance { 18 | use frame_support::pallet_prelude::Weight; 19 | use frame_support::traits::{fungible, OnRuntimeUpgrade}; 20 | use sp_core::Get; 21 | 22 | use crate::*; 23 | 24 | pub struct Migration(PhantomData); 25 | 26 | impl OnRuntimeUpgrade for Migration { 27 | /// Performs the migration to initialize and update the total issuance. 28 | /// 29 | /// This function does the following: 30 | /// 1. Calculates the total locked tokens across all subnets 31 | /// 2. Retrieves the total account balances and total stake 32 | /// 3. Computes and updates the new total issuance 33 | /// 34 | /// Returns the weight of the migration operation. 35 | fn on_runtime_upgrade() -> Weight { 36 | // Calculate the total locked tokens across all subnets 37 | let subnets_len = crate::SubnetLocked::::iter().count() as u64; 38 | let total_subnet_locked: u64 = 39 | crate::SubnetLocked::::iter().fold(0, |acc, (_, v)| acc.saturating_add(v)); 40 | 41 | // Retrieve the total balance of all accounts 42 | let total_account_balances = <::Currency as fungible::Inspect< 43 | ::AccountId, 44 | >>::total_issuance(); 45 | 46 | // Get the total stake from the system 47 | let total_stake = crate::TotalStake::::get(); 48 | 49 | // Retrieve the previous total issuance for logging purposes 50 | let prev_total_issuance = crate::TotalIssuance::::get(); 51 | 52 | // Calculate the new total issuance 53 | let new_total_issuance = total_account_balances 54 | .saturating_add(total_stake) 55 | .saturating_add(total_subnet_locked); 56 | 57 | // Update the total issuance in storage 58 | crate::TotalIssuance::::put(new_total_issuance); 59 | 60 | // Log the change in total issuance 61 | log::info!( 62 | "Subtensor Pallet Total Issuance Updated: previous: {:?}, new: {:?}", 63 | prev_total_issuance, 64 | new_total_issuance 65 | ); 66 | 67 | // Return the weight of the operation 68 | // We performed subnets_len + 5 reads and 1 write 69 | ::DbWeight::get() 70 | .reads_writes(subnets_len.saturating_add(5), 1) 71 | } 72 | 73 | /// Performs post-upgrade checks to ensure the migration was successful. 74 | /// 75 | /// This function is only compiled when the "try-runtime" feature is enabled. 76 | #[cfg(feature = "try-runtime")] 77 | fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { 78 | // Verify that all accounting invariants are satisfied after the migration 79 | crate::Pallet::::check_accounting_invariants()?; 80 | Ok(()) 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_populate_owned_hotkeys.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::{Identity, OptionQuery}, 4 | storage_alias, 5 | traits::Get, 6 | weights::Weight, 7 | }; 8 | use log::info; 9 | use sp_std::vec::Vec; 10 | 11 | const LOG_TARGET_1: &str = "migrate_populate_owned"; 12 | 13 | /// Module containing deprecated storage format for LoadedEmission 14 | pub mod deprecated_loaded_emission_format { 15 | use super::*; 16 | 17 | #[storage_alias] 18 | pub(super) type LoadedEmission = 19 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 20 | } 21 | 22 | /// Migrate the OwnedHotkeys map to the new storage format 23 | pub fn migrate_populate_owned() -> Weight { 24 | // Setup migration weight 25 | let mut weight = T::DbWeight::get().reads(1); 26 | let migration_name = "Populate OwnedHotkeys map"; 27 | 28 | // Check if this migration is needed (if OwnedHotkeys map is empty) 29 | let migrate = OwnedHotkeys::::iter().next().is_none(); 30 | 31 | // Only runs if the migration is needed 32 | if migrate { 33 | info!(target: LOG_TARGET_1, ">>> Starting Migration: {}", migration_name); 34 | 35 | let mut longest_hotkey_vector: usize = 0; 36 | let mut longest_coldkey: Option = None; 37 | let mut keys_touched: u64 = 0; 38 | let mut storage_reads: u64 = 0; 39 | let mut storage_writes: u64 = 0; 40 | 41 | // Iterate through all Owner entries 42 | Owner::::iter().for_each(|(hotkey, coldkey)| { 43 | storage_reads = storage_reads.saturating_add(1); // Read from Owner storage 44 | let mut hotkeys = OwnedHotkeys::::get(&coldkey); 45 | storage_reads = storage_reads.saturating_add(1); // Read from OwnedHotkeys storage 46 | 47 | // Add the hotkey if it's not already in the vector 48 | if !hotkeys.contains(&hotkey) { 49 | hotkeys.push(hotkey); 50 | keys_touched = keys_touched.saturating_add(1); 51 | 52 | // Update longest hotkey vector info 53 | if longest_hotkey_vector < hotkeys.len() { 54 | longest_hotkey_vector = hotkeys.len(); 55 | longest_coldkey = Some(coldkey.clone()); 56 | } 57 | 58 | // Update the OwnedHotkeys storage 59 | OwnedHotkeys::::insert(&coldkey, hotkeys); 60 | storage_writes = storage_writes.saturating_add(1); // Write to OwnedHotkeys storage 61 | } 62 | 63 | // Accrue weight for reads and writes 64 | weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); 65 | }); 66 | 67 | // Log migration results 68 | info!( 69 | target: LOG_TARGET_1, 70 | "Migration {} finished. Keys touched: {}, Longest hotkey vector: {}, Storage reads: {}, Storage writes: {}", 71 | migration_name, keys_touched, longest_hotkey_vector, storage_reads, storage_writes 72 | ); 73 | if let Some(c) = longest_coldkey { 74 | info!(target: LOG_TARGET_1, "Longest hotkey vector is controlled by: {:?}", c); 75 | } 76 | 77 | weight 78 | } else { 79 | info!(target: LOG_TARGET_1, "Migration {} already done!", migration_name); 80 | Weight::zero() 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_populate_staking_hotkeys.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::{Identity, OptionQuery}, 4 | storage_alias, 5 | traits::Get, 6 | weights::Weight, 7 | }; 8 | use log::info; 9 | use sp_std::vec::Vec; 10 | const LOG_TARGET_1: &str = "migrate_populate_owned"; 11 | 12 | /// Module containing deprecated storage format for LoadedEmission 13 | pub mod deprecated_loaded_emission_format { 14 | use super::*; 15 | 16 | #[storage_alias] 17 | pub(super) type LoadedEmission = 18 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 19 | } 20 | 21 | /// Populate the StakingHotkeys map from Stake map 22 | pub fn migrate_populate_staking_hotkeys() -> Weight { 23 | // Setup migration weight 24 | let mut weight = T::DbWeight::get().reads(1); 25 | let migration_name = "Populate StakingHotkeys map"; 26 | 27 | // Check if this migration is needed (if StakingHotkeys map is empty) 28 | let migrate = StakingHotkeys::::iter().next().is_none(); 29 | 30 | // Only runs if the migration is needed 31 | if migrate { 32 | info!(target: LOG_TARGET_1, ">>> Starting Migration: {}", migration_name); 33 | 34 | let mut longest_hotkey_vector: usize = 0; 35 | let mut longest_coldkey: Option = None; 36 | let mut keys_touched: u64 = 0; 37 | let mut storage_reads: u64 = 0; 38 | let mut storage_writes: u64 = 0; 39 | 40 | // Iterate through all Owner entries 41 | Stake::::iter().for_each(|(hotkey, coldkey, stake)| { 42 | storage_reads = storage_reads.saturating_add(1); // Read from Owner storage 43 | if stake > 0 { 44 | let mut hotkeys = StakingHotkeys::::get(&coldkey); 45 | storage_reads = storage_reads.saturating_add(1); // Read from StakingHotkeys storage 46 | 47 | // Add the hotkey if it's not already in the vector 48 | if !hotkeys.contains(&hotkey) { 49 | hotkeys.push(hotkey); 50 | keys_touched = keys_touched.saturating_add(1); 51 | 52 | // Update longest hotkey vector info 53 | if longest_hotkey_vector < hotkeys.len() { 54 | longest_hotkey_vector = hotkeys.len(); 55 | longest_coldkey = Some(coldkey.clone()); 56 | } 57 | 58 | // Update the StakingHotkeys storage 59 | StakingHotkeys::::insert(&coldkey, hotkeys); 60 | storage_writes = storage_writes.saturating_add(1); // Write to StakingHotkeys storage 61 | } 62 | 63 | // Accrue weight for reads and writes 64 | weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); 65 | } 66 | }); 67 | 68 | // Log migration results 69 | info!( 70 | target: LOG_TARGET_1, 71 | "Migration {} finished. Keys touched: {}, Longest hotkey vector: {}, Storage reads: {}, Storage writes: {}", 72 | migration_name, keys_touched, longest_hotkey_vector, storage_reads, storage_writes 73 | ); 74 | if let Some(c) = longest_coldkey { 75 | info!(target: LOG_TARGET_1, "Longest hotkey vector is controlled by: {:?}", c); 76 | } 77 | 78 | weight 79 | } else { 80 | info!(target: LOG_TARGET_1, "Migration {} already done!", migration_name); 81 | Weight::zero() 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_to_v1_separate_emission.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::*, 4 | storage_alias, 5 | traits::{Get, GetStorageVersion, StorageVersion}, 6 | weights::Weight, 7 | }; 8 | use log::{info, warn}; 9 | use sp_std::vec::Vec; 10 | 11 | /// Constant for logging purposes 12 | const LOG_TARGET: &str = "loadedemissionmigration"; 13 | const LOG_TARGET_1: &str = "fixtotalstakestorage"; 14 | 15 | /// Module containing deprecated storage format 16 | pub mod deprecated_loaded_emission_format { 17 | use super::*; 18 | 19 | type AccountIdOf = ::AccountId; 20 | 21 | #[storage_alias] 22 | pub(super) type LoadedEmission = 23 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 24 | } 25 | 26 | /// Migrates the LoadedEmission storage to a new format 27 | /// 28 | /// # Arguments 29 | /// 30 | /// * `T` - The runtime configuration trait 31 | /// 32 | /// # Returns 33 | /// 34 | /// * `Weight` - The computational weight of this operation 35 | /// 36 | /// # Example 37 | /// 38 | /// ```ignore 39 | /// let weight = migrate_to_v1_separate_emission::(); 40 | /// ``` 41 | pub fn migrate_to_v1_separate_emission() -> Weight { 42 | use deprecated_loaded_emission_format as old; 43 | 44 | // Initialize weight counter 45 | let mut weight = T::DbWeight::get().reads_writes(1, 0); 46 | 47 | // Get current on-chain storage version 48 | let onchain_version = Pallet::::on_chain_storage_version(); 49 | 50 | // Only proceed if current version is less than 1 51 | if onchain_version < 1 { 52 | info!( 53 | target: LOG_TARGET, 54 | ">>> Updating the LoadedEmission to a new format {:?}", onchain_version 55 | ); 56 | 57 | // Collect all network IDs (netuids) from old LoadedEmission storage 58 | let curr_loaded_emission: Vec = old::LoadedEmission::::iter_keys().collect(); 59 | 60 | // Remove any undecodable entries 61 | for netuid in curr_loaded_emission { 62 | weight.saturating_accrue(T::DbWeight::get().reads(1)); 63 | if old::LoadedEmission::::try_get(netuid).is_err() { 64 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 65 | old::LoadedEmission::::remove(netuid); 66 | warn!( 67 | "Was unable to decode old loaded_emission for netuid {}", 68 | netuid 69 | ); 70 | } 71 | } 72 | 73 | // Translate old storage values to new format 74 | LoadedEmission::::translate::, u64)>, _>( 75 | |netuid: u16, 76 | netuid_emissions: Vec<(AccountIdOf, u64)>| 77 | -> Option, u64, u64)>> { 78 | info!(target: LOG_TARGET, " Do migration of netuid: {:?}...", netuid); 79 | 80 | // Convert old format (server, validator_emission) to new format (server, server_emission, validator_emission) 81 | // Assume all loaded emission is validator emissions 82 | let new_netuid_emissions = netuid_emissions 83 | .into_iter() 84 | .map(|(server, validator_emission)| (server, 0_u64, validator_emission)) 85 | .collect(); 86 | 87 | // Update weight for read and write operations 88 | weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); 89 | 90 | Some(new_netuid_emissions) 91 | }, 92 | ); 93 | 94 | // Update storage version to 1 95 | StorageVersion::new(1).put::>(); 96 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 97 | 98 | weight 99 | } else { 100 | info!(target: LOG_TARGET_1, "Migration to v1 already completed!"); 101 | Weight::zero() 102 | } 103 | } 104 | 105 | // TODO: Add unit tests for this migration 106 | // TODO: Consider adding error handling for edge cases 107 | // TODO: Verify that all possible states of the old format are handled correctly 108 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_to_v2_fixed_total_stake.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::*, 4 | storage_alias, 5 | traits::{Get, GetStorageVersion, StorageVersion}, 6 | weights::Weight, 7 | }; 8 | use log::info; 9 | use sp_std::vec::Vec; 10 | 11 | /// Constant for logging purposes 12 | const LOG_TARGET: &str = "fix_total_stake_storage"; 13 | 14 | /// Module containing deprecated storage format 15 | pub mod deprecated_loaded_emission_format { 16 | use super::*; 17 | 18 | #[storage_alias] 19 | pub(super) type LoadedEmission = 20 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 21 | } 22 | 23 | /// Migrates the storage to fix TotalStake and TotalColdkeyStake 24 | /// 25 | /// This function performs the following steps: 26 | /// 1. Resets TotalStake to 0 27 | /// 2. Resets all TotalColdkeyStake entries to 0 28 | /// 3. Recalculates TotalStake and TotalColdkeyStake based on the Stake map 29 | /// 30 | /// # Arguments 31 | /// 32 | /// * `T` - The Config trait of the pallet 33 | /// 34 | /// # Returns 35 | /// 36 | /// * `Weight` - The computational weight of this operation 37 | /// 38 | /// # Example 39 | /// 40 | /// ```ignore 41 | /// let weight = migrate_to_v2_fixed_total_stake::(); 42 | /// ``` 43 | pub fn migrate_to_v2_fixed_total_stake() -> Weight { 44 | let new_storage_version = 2; 45 | 46 | // Initialize weight counter 47 | let mut weight = T::DbWeight::get().reads(1); 48 | 49 | // Get current on-chain storage version 50 | let onchain_version = Pallet::::on_chain_storage_version(); 51 | 52 | // Only proceed if current version is less than the new version 53 | if onchain_version < new_storage_version { 54 | info!( 55 | target: LOG_TARGET, 56 | "Fixing the TotalStake and TotalColdkeyStake storage. Current version: {:?}", 57 | onchain_version 58 | ); 59 | 60 | // Reset TotalStake to 0 61 | TotalStake::::put(0); 62 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 63 | 64 | // Reset all TotalColdkeyStake entries to 0 65 | let total_coldkey_stake_keys = TotalColdkeyStake::::iter_keys().collect::>(); 66 | for coldkey in total_coldkey_stake_keys { 67 | weight.saturating_accrue(T::DbWeight::get().reads(1)); 68 | TotalColdkeyStake::::insert(coldkey, 0); 69 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 70 | } 71 | 72 | // Recalculate TotalStake and TotalColdkeyStake based on the Stake map 73 | for (_, coldkey, stake) in Stake::::iter() { 74 | weight.saturating_accrue(T::DbWeight::get().reads(1)); 75 | 76 | // Update TotalColdkeyStake 77 | let mut total_coldkey_stake = TotalColdkeyStake::::get(coldkey.clone()); 78 | weight.saturating_accrue(T::DbWeight::get().reads(1)); 79 | total_coldkey_stake = total_coldkey_stake.saturating_add(stake); 80 | TotalColdkeyStake::::insert(coldkey, total_coldkey_stake); 81 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 82 | 83 | // Update TotalStake 84 | let mut total_stake = TotalStake::::get(); 85 | weight.saturating_accrue(T::DbWeight::get().reads(1)); 86 | total_stake = total_stake.saturating_add(stake); 87 | TotalStake::::put(total_stake); 88 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 89 | } 90 | 91 | // Update storage version to prevent re-running this migration 92 | StorageVersion::new(new_storage_version).put::>(); 93 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 94 | 95 | weight 96 | } else { 97 | info!(target: LOG_TARGET, "Migration to v2 already completed"); 98 | Weight::zero() 99 | } 100 | } 101 | 102 | // TODO: Add unit tests for this migration function 103 | // TODO: Consider adding error handling for potential arithmetic overflow 104 | // TODO: Optimize the iteration over Stake map if possible to reduce database reads 105 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_total_issuance.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::pallet_prelude::OptionQuery; 3 | use frame_support::{ 4 | pallet_prelude::Identity, 5 | storage_alias, 6 | traits::{fungible::Inspect, Get, GetStorageVersion, StorageVersion}, 7 | weights::Weight, 8 | }; 9 | use sp_std::vec::Vec; 10 | 11 | // TODO: Implement comprehensive tests for this migration 12 | 13 | /// Module containing deprecated storage format for LoadedEmission 14 | pub mod deprecated_loaded_emission_format { 15 | use super::*; 16 | 17 | #[storage_alias] 18 | pub(super) type LoadedEmission = 19 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 20 | } 21 | 22 | /// Performs migration to update the total issuance based on the sum of stakes and total balances. 23 | /// 24 | /// This migration is applicable only if the current storage version is 5, after which it updates the storage version to 6. 25 | /// 26 | /// # Arguments 27 | /// 28 | /// * `test` - A boolean flag to force migration execution for testing purposes. 29 | /// 30 | /// # Returns 31 | /// 32 | /// * `Weight` - The computational weight of this operation. 33 | /// 34 | /// # Example 35 | /// 36 | /// ```ignore 37 | /// let weight = migrate_total_issuance::(false); 38 | /// ``` 39 | pub fn migrate_total_issuance(test: bool) -> Weight { 40 | // Initialize migration weight with the cost of reading the storage version 41 | let mut weight = T::DbWeight::get().reads(1); 42 | 43 | // Execute migration if the current storage version is 5 or if in test mode 44 | if Pallet::::on_chain_storage_version() == StorageVersion::new(5) || test { 45 | // Calculate the sum of all stake values 46 | let stake_sum: u64 = 47 | Stake::::iter().fold(0, |acc, (_, _, stake)| acc.saturating_add(stake)); 48 | // Add weight for reading all stake entries 49 | weight = weight.saturating_add(T::DbWeight::get().reads(Stake::::iter().count() as u64)); 50 | 51 | // Calculate the sum of all locked subnet values 52 | let locked_sum: u64 = 53 | SubnetLocked::::iter().fold(0, |acc, (_, locked)| acc.saturating_add(locked)); 54 | // Add weight for reading all subnet locked entries 55 | weight = weight 56 | .saturating_add(T::DbWeight::get().reads(SubnetLocked::::iter().count() as u64)); 57 | 58 | // Retrieve the total balance sum 59 | let total_balance = T::Currency::total_issuance(); 60 | // Add weight for reading total issuance 61 | weight = weight.saturating_add(T::DbWeight::get().reads(1)); 62 | 63 | // Attempt to convert total balance to u64 64 | match TryInto::::try_into(total_balance) { 65 | Ok(total_balance_sum) => { 66 | // Compute the total issuance value 67 | let total_issuance_value: u64 = stake_sum 68 | .saturating_add(total_balance_sum) 69 | .saturating_add(locked_sum); 70 | 71 | // Update the total issuance in storage 72 | TotalIssuance::::put(total_issuance_value); 73 | 74 | // Update the storage version to 6 75 | StorageVersion::new(6).put::>(); 76 | 77 | // Add weight for writing total issuance and storage version 78 | weight = weight.saturating_add(T::DbWeight::get().writes(2)); 79 | } 80 | Err(_) => { 81 | // TODO: Implement proper error handling for conversion failure 82 | log::error!("Failed to convert total balance to u64, migration aborted"); 83 | } 84 | } 85 | } 86 | 87 | // Return the computed weight of the migration process 88 | weight 89 | } 90 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::{ 3 | pallet_prelude::{Identity, OptionQuery}, 4 | storage_alias, 5 | traits::{GetStorageVersion, StorageVersion}, 6 | weights::Weight, 7 | }; 8 | use log::info; 9 | use sp_core::Get; 10 | use sp_std::vec::Vec; 11 | 12 | /// Constant for logging purposes 13 | const LOG_TARGET: &str = "migrate_transfer_ownership"; 14 | 15 | /// Module containing deprecated storage format 16 | pub mod deprecated_loaded_emission_format { 17 | use super::*; 18 | 19 | #[storage_alias] 20 | pub(super) type LoadedEmission = 21 | StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; 22 | } 23 | 24 | /// Migrates subnet ownership to the foundation and updates related storage 25 | /// 26 | /// # Arguments 27 | /// 28 | /// * `coldkey` - 32-byte array representing the foundation's coldkey 29 | /// 30 | /// # Returns 31 | /// 32 | /// * `Weight` - The computational weight of this operation 33 | /// 34 | /// # Example 35 | /// 36 | /// ```ignore 37 | /// let foundation_coldkey = [0u8; 32]; // Replace with actual foundation coldkey 38 | /// let weight = migrate_transfer_ownership_to_foundation::(foundation_coldkey); 39 | /// ``` 40 | pub fn migrate_transfer_ownership_to_foundation(coldkey: [u8; 32]) -> Weight { 41 | let new_storage_version = 3; 42 | 43 | // Initialize weight counter 44 | let mut weight = T::DbWeight::get().reads(1); 45 | 46 | // Get current on-chain storage version 47 | let onchain_version = Pallet::::on_chain_storage_version(); 48 | 49 | // Only proceed if current version is less than the new version 50 | if onchain_version < new_storage_version { 51 | info!( 52 | target: LOG_TARGET, 53 | "Migrating subnet 1 and 11 to foundation control. Current version: {:?}", 54 | onchain_version 55 | ); 56 | 57 | // Decode the foundation's coldkey into an AccountId 58 | // TODO: Consider error handling for decoding failure 59 | let coldkey_account: T::AccountId = T::AccountId::decode(&mut &coldkey[..]) 60 | .expect("coldkey should be a valid 32-byte array"); 61 | info!(target: LOG_TARGET, "Foundation coldkey: {:?}", coldkey_account); 62 | 63 | // Get the current block number 64 | let current_block = Pallet::::get_current_block_as_u64(); 65 | weight.saturating_accrue(T::DbWeight::get().reads(1)); 66 | 67 | // Transfer ownership of subnets 1 and 11 to the foundation 68 | SubnetOwner::::insert(1, coldkey_account.clone()); 69 | SubnetOwner::::insert(11, coldkey_account); 70 | 71 | // Set the registration time for subnet 1 to extend immunity period 72 | NetworkRegisteredAt::::insert(1, current_block.saturating_add(13 * 7200)); 73 | // Set the registration time for subnet 11 to the current block 74 | NetworkRegisteredAt::::insert(11, current_block); 75 | 76 | weight.saturating_accrue(T::DbWeight::get().writes(4)); 77 | 78 | // Update the storage version to prevent re-running this migration 79 | StorageVersion::new(new_storage_version).put::>(); 80 | weight.saturating_accrue(T::DbWeight::get().writes(1)); 81 | 82 | weight 83 | } else { 84 | info!(target: LOG_TARGET, "Migration to v3 already completed"); 85 | Weight::zero() 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /pallets/subtensor/src/migrations/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod migrate_chain_identity; 3 | pub mod migrate_create_root_network; 4 | pub mod migrate_delete_subnet_21; 5 | pub mod migrate_delete_subnet_3; 6 | pub mod migrate_fix_pending_emission; 7 | pub mod migrate_fix_total_coldkey_stake; 8 | pub mod migrate_init_total_issuance; 9 | pub mod migrate_populate_owned_hotkeys; 10 | pub mod migrate_populate_staking_hotkeys; 11 | pub mod migrate_to_v1_separate_emission; 12 | pub mod migrate_to_v2_fixed_total_stake; 13 | pub mod migrate_total_issuance; 14 | pub mod migrate_transfer_ownership_to_foundation; 15 | -------------------------------------------------------------------------------- /pallets/subtensor/src/rpc_info/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod delegate_info; 3 | pub mod neuron_info; 4 | pub mod stake_info; 5 | pub mod subnet_info; 6 | -------------------------------------------------------------------------------- /pallets/subtensor/src/rpc_info/stake_info.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use frame_support::pallet_prelude::{Decode, Encode}; 3 | extern crate alloc; 4 | use codec::Compact; 5 | use sp_core::hexdisplay::AsBytesRef; 6 | 7 | #[freeze_struct("86d64c14d71d44b9")] 8 | #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] 9 | pub struct StakeInfo { 10 | hotkey: T::AccountId, 11 | coldkey: T::AccountId, 12 | stake: Compact, 13 | } 14 | 15 | impl Pallet { 16 | fn _get_stake_info_for_coldkeys( 17 | coldkeys: Vec, 18 | ) -> Vec<(T::AccountId, Vec>)> { 19 | if coldkeys.is_empty() { 20 | return Vec::new(); // No coldkeys to check 21 | } 22 | 23 | let mut stake_info: Vec<(T::AccountId, Vec>)> = Vec::new(); 24 | for coldkey_ in coldkeys { 25 | let mut stake_info_for_coldkey: Vec> = Vec::new(); 26 | 27 | for (hotkey, coldkey, stake) in >::iter() { 28 | if coldkey == coldkey_ { 29 | stake_info_for_coldkey.push(StakeInfo { 30 | hotkey, 31 | coldkey, 32 | stake: stake.into(), 33 | }); 34 | } 35 | } 36 | 37 | stake_info.push((coldkey_, stake_info_for_coldkey)); 38 | } 39 | 40 | stake_info 41 | } 42 | 43 | pub fn get_stake_info_for_coldkeys( 44 | coldkey_account_vecs: Vec>, 45 | ) -> Vec<(T::AccountId, Vec>)> { 46 | let mut coldkeys: Vec = Vec::new(); 47 | for coldkey_account_vec in coldkey_account_vecs { 48 | if coldkey_account_vec.len() != 32 { 49 | continue; // Invalid coldkey 50 | } 51 | let Ok(coldkey) = T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { 52 | continue; 53 | }; 54 | coldkeys.push(coldkey); 55 | } 56 | 57 | if coldkeys.is_empty() { 58 | return Vec::new(); // Invalid coldkey 59 | } 60 | 61 | Self::_get_stake_info_for_coldkeys(coldkeys) 62 | } 63 | 64 | pub fn get_stake_info_for_coldkey(coldkey_account_vec: Vec) -> Vec> { 65 | if coldkey_account_vec.len() != 32 { 66 | return Vec::new(); // Invalid coldkey 67 | } 68 | 69 | let Ok(coldkey) = T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { 70 | return Vec::new(); 71 | }; 72 | let stake_info = Self::_get_stake_info_for_coldkeys(vec![coldkey]); 73 | 74 | if stake_info.is_empty() { 75 | Vec::new() // Invalid coldkey 76 | } else { 77 | let Some(first) = stake_info.first() else { 78 | return Vec::new(); 79 | }; 80 | 81 | first.1.clone() 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /pallets/subtensor/src/staking/add_stake.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | impl Pallet { 4 | /// ---- The implementation for the extrinsic add_stake: Adds stake to a hotkey account. 5 | /// 6 | /// # Args: 7 | /// * 'origin': (RuntimeOrigin): 8 | /// - The signature of the caller's coldkey. 9 | /// 10 | /// * 'hotkey' (T::AccountId): 11 | /// - The associated hotkey account. 12 | /// 13 | /// * 'stake_to_be_added' (u64): 14 | /// - The amount of stake to be added to the hotkey staking account. 15 | /// 16 | /// # Event: 17 | /// * StakeAdded; 18 | /// - On the successfully adding stake to a global account. 19 | /// 20 | /// # Raises: 21 | /// * 'NotEnoughBalanceToStake': 22 | /// - Not enough balance on the coldkey to add onto the global account. 23 | /// 24 | /// * 'NonAssociatedColdKey': 25 | /// - The calling coldkey is not associated with this hotkey. 26 | /// 27 | /// * 'BalanceWithdrawalError': 28 | /// - Errors stemming from transaction pallet. 29 | /// 30 | /// * 'TxRateLimitExceeded': 31 | /// - Thrown if key has hit transaction rate limit 32 | /// 33 | pub fn do_add_stake( 34 | origin: T::RuntimeOrigin, 35 | hotkey: T::AccountId, 36 | stake_to_be_added: u64, 37 | ) -> dispatch::DispatchResult { 38 | // We check that the transaction is signed by the caller and retrieve the T::AccountId coldkey information. 39 | let coldkey = ensure_signed(origin)?; 40 | log::debug!( 41 | "do_add_stake( origin:{:?} hotkey:{:?}, stake_to_be_added:{:?} )", 42 | coldkey, 43 | hotkey, 44 | stake_to_be_added 45 | ); 46 | 47 | // Ensure the callers coldkey has enough stake to perform the transaction. 48 | ensure!( 49 | Self::can_remove_balance_from_coldkey_account(&coldkey, stake_to_be_added), 50 | Error::::NotEnoughBalanceToStake 51 | ); 52 | 53 | // Ensure that the hotkey account exists this is only possible through registration. 54 | ensure!( 55 | Self::hotkey_account_exists(&hotkey), 56 | Error::::HotKeyAccountNotExists 57 | ); 58 | 59 | // Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. 60 | ensure!( 61 | Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), 62 | Error::::HotKeyNotDelegateAndSignerNotOwnHotKey 63 | ); 64 | 65 | // Ensure we don't exceed stake rate limit 66 | let stakes_this_interval = 67 | Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); 68 | ensure!( 69 | stakes_this_interval < Self::get_target_stakes_per_interval(), 70 | Error::::StakeRateLimitExceeded 71 | ); 72 | 73 | // Track this addition in the stake delta. 74 | StakeDeltaSinceLastEmissionDrain::::mutate(&hotkey, &coldkey, |stake_delta| { 75 | *stake_delta = stake_delta.saturating_add_unsigned(stake_to_be_added as u128); 76 | }); 77 | 78 | // If coldkey is not owner of the hotkey, it's a nomination stake. 79 | if !Self::coldkey_owns_hotkey(&coldkey, &hotkey) { 80 | let total_stake_after_add = 81 | Stake::::get(&hotkey, &coldkey).saturating_add(stake_to_be_added); 82 | 83 | ensure!( 84 | total_stake_after_add >= NominatorMinRequiredStake::::get(), 85 | Error::::NomStakeBelowMinimumThreshold 86 | ); 87 | } 88 | 89 | // Ensure the remove operation from the coldkey is a success. 90 | let actual_amount_to_stake = 91 | Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?; 92 | 93 | // If we reach here, add the balance to the hotkey. 94 | Self::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, actual_amount_to_stake); 95 | 96 | // Set last block for rate limiting 97 | let block: u64 = Self::get_current_block_as_u64(); 98 | Self::set_last_tx_block(&coldkey, block); 99 | 100 | // Emit the staking event. 101 | Self::set_stakes_this_interval_for_coldkey_hotkey( 102 | &coldkey, 103 | &hotkey, 104 | stakes_this_interval.saturating_add(1), 105 | block, 106 | ); 107 | log::debug!( 108 | "StakeAdded( hotkey:{:?}, stake_to_be_added:{:?} )", 109 | hotkey, 110 | actual_amount_to_stake 111 | ); 112 | Self::deposit_event(Event::StakeAdded(hotkey, actual_amount_to_stake)); 113 | 114 | // Ok and return. 115 | Ok(()) 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /pallets/subtensor/src/staking/become_delegate.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | impl Pallet { 4 | /// ---- The implementation for the extrinsic become_delegate: signals that this hotkey allows delegated stake. 5 | /// 6 | /// # Args: 7 | /// * 'origin': (RuntimeOrigin): 8 | /// - The signature of the caller's coldkey. 9 | /// 10 | /// * 'hotkey' (T::AccountId): 11 | /// - The hotkey we are delegating (must be owned by the coldkey.) 12 | /// 13 | /// * 'take' (u16): 14 | /// - The stake proportion that this hotkey takes from delegations. 15 | /// 16 | /// # Event: 17 | /// * DelegateAdded; 18 | /// - On successfully setting a hotkey as a delegate. 19 | /// 20 | /// # Raises: 21 | /// * 'NotRegistered': 22 | /// - The hotkey we are delegating is not registered on the network. 23 | /// 24 | /// * 'NonAssociatedColdKey': 25 | /// - The hotkey we are delegating is not owned by the calling coldket. 26 | /// 27 | /// * 'TxRateLimitExceeded': 28 | /// - Thrown if key has hit transaction rate limit 29 | /// 30 | pub fn do_become_delegate( 31 | origin: T::RuntimeOrigin, 32 | hotkey: T::AccountId, 33 | take: u16, 34 | ) -> dispatch::DispatchResult { 35 | // --- 1. We check the coldkey signuture. 36 | let coldkey = ensure_signed(origin)?; 37 | log::debug!( 38 | "do_become_delegate( origin:{:?} hotkey:{:?}, take:{:?} )", 39 | coldkey, 40 | hotkey, 41 | take 42 | ); 43 | 44 | // --- 2. Ensure we are delegating an known key. 45 | // --- 3. Ensure that the coldkey is the owner. 46 | Self::do_take_checks(&coldkey, &hotkey)?; 47 | 48 | // --- 4. Ensure we are not already a delegate (dont allow changing delegate take.) 49 | ensure!( 50 | !Self::hotkey_is_delegate(&hotkey), 51 | Error::::HotKeyAlreadyDelegate 52 | ); 53 | 54 | // --- 5. Ensure we don't exceed tx rate limit 55 | let block: u64 = Self::get_current_block_as_u64(); 56 | ensure!( 57 | !Self::exceeds_tx_rate_limit(Self::get_last_tx_block(&coldkey), block), 58 | Error::::DelegateTxRateLimitExceeded 59 | ); 60 | 61 | // --- 5.1 Ensure take is within the min ..= InitialDefaultDelegateTake (18%) range 62 | let min_take = MinDelegateTake::::get(); 63 | let max_take = MaxDelegateTake::::get(); 64 | ensure!(take >= min_take, Error::::DelegateTakeTooLow); 65 | ensure!(take <= max_take, Error::::DelegateTakeTooHigh); 66 | 67 | // --- 6. Delegate the key. 68 | Self::delegate_hotkey(&hotkey, take); 69 | 70 | // Set last block for rate limiting 71 | Self::set_last_tx_block(&coldkey, block); 72 | Self::set_last_tx_block_delegate_take(&coldkey, block); 73 | 74 | // --- 7. Emit the staking event. 75 | log::debug!( 76 | "DelegateAdded( coldkey:{:?}, hotkey:{:?}, take:{:?} )", 77 | coldkey, 78 | hotkey, 79 | take 80 | ); 81 | Self::deposit_event(Event::DelegateAdded(coldkey, hotkey, take)); 82 | 83 | // --- 8. Ok and return. 84 | Ok(()) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /pallets/subtensor/src/staking/decrease_take.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | impl Pallet { 4 | /// ---- The implementation for the extrinsic decrease_take 5 | /// 6 | /// # Args: 7 | /// * 'origin': (::RuntimeOrigin): 8 | /// - The signature of the caller's coldkey. 9 | /// 10 | /// * 'hotkey' (T::AccountId): 11 | /// - The hotkey we are delegating (must be owned by the coldkey.) 12 | /// 13 | /// * 'take' (u16): 14 | /// - The stake proportion that this hotkey takes from delegations for subnet ID. 15 | /// 16 | /// # Event: 17 | /// * TakeDecreased; 18 | /// - On successfully setting a decreased take for this hotkey. 19 | /// 20 | /// # Raises: 21 | /// * 'NotRegistered': 22 | /// - The hotkey we are delegating is not registered on the network. 23 | /// 24 | /// * 'NonAssociatedColdKey': 25 | /// - The hotkey we are delegating is not owned by the calling coldket. 26 | /// 27 | /// * 'DelegateTakeTooLow': 28 | /// - The delegate is setting a take which is not lower than the previous. 29 | /// 30 | pub fn do_decrease_take( 31 | origin: T::RuntimeOrigin, 32 | hotkey: T::AccountId, 33 | take: u16, 34 | ) -> dispatch::DispatchResult { 35 | // --- 1. We check the coldkey signature. 36 | let coldkey = ensure_signed(origin)?; 37 | log::debug!( 38 | "do_decrease_take( origin:{:?} hotkey:{:?}, take:{:?} )", 39 | coldkey, 40 | hotkey, 41 | take 42 | ); 43 | 44 | // --- 2. Ensure we are delegating a known key. 45 | // Ensure that the coldkey is the owner. 46 | Self::do_take_checks(&coldkey, &hotkey)?; 47 | 48 | // --- 3. Ensure we are always strictly decreasing, never increasing take 49 | if let Ok(current_take) = Delegates::::try_get(&hotkey) { 50 | ensure!(take < current_take, Error::::DelegateTakeTooLow); 51 | } 52 | 53 | // --- 3.1 Ensure take is within the min ..= InitialDefaultDelegateTake (18%) range 54 | let min_take = MinDelegateTake::::get(); 55 | ensure!(take >= min_take, Error::::DelegateTakeTooLow); 56 | 57 | // --- 4. Set the new take value. 58 | Delegates::::insert(hotkey.clone(), take); 59 | 60 | // --- 5. Emit the take value. 61 | log::debug!( 62 | "TakeDecreased( coldkey:{:?}, hotkey:{:?}, take:{:?} )", 63 | coldkey, 64 | hotkey, 65 | take 66 | ); 67 | Self::deposit_event(Event::TakeDecreased(coldkey, hotkey, take)); 68 | 69 | // --- 6. Ok and return. 70 | Ok(()) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /pallets/subtensor/src/staking/increase_take.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | impl Pallet { 4 | /// ---- The implementation for the extrinsic increase_take 5 | /// 6 | /// # Args: 7 | /// * 'origin': (::RuntimeOrigin): 8 | /// - The signature of the caller's coldkey. 9 | /// 10 | /// * 'hotkey' (T::AccountId): 11 | /// - The hotkey we are delegating (must be owned by the coldkey.) 12 | /// 13 | /// * 'take' (u16): 14 | /// - The stake proportion that this hotkey takes from delegations for subnet ID. 15 | /// 16 | /// # Event: 17 | /// * TakeIncreased; 18 | /// - On successfully setting a increased take for this hotkey. 19 | /// 20 | /// # Raises: 21 | /// * 'NotRegistered': 22 | /// - The hotkey we are delegating is not registered on the network. 23 | /// 24 | /// * 'NonAssociatedColdKey': 25 | /// - The hotkey we are delegating is not owned by the calling coldket. 26 | /// 27 | /// * 'TxRateLimitExceeded': 28 | /// - Thrown if key has hit transaction rate limit 29 | /// 30 | /// * 'DelegateTakeTooLow': 31 | /// - The delegate is setting a take which is not greater than the previous. 32 | /// 33 | pub fn do_increase_take( 34 | origin: T::RuntimeOrigin, 35 | hotkey: T::AccountId, 36 | take: u16, 37 | ) -> dispatch::DispatchResult { 38 | // --- 1. We check the coldkey signature. 39 | let coldkey = ensure_signed(origin)?; 40 | log::debug!( 41 | "do_increase_take( origin:{:?} hotkey:{:?}, take:{:?} )", 42 | coldkey, 43 | hotkey, 44 | take 45 | ); 46 | 47 | // --- 2. Ensure we are delegating a known key. 48 | // Ensure that the coldkey is the owner. 49 | Self::do_take_checks(&coldkey, &hotkey)?; 50 | 51 | // --- 3. Ensure we are strinctly increasing take 52 | if let Ok(current_take) = Delegates::::try_get(&hotkey) { 53 | ensure!(take > current_take, Error::::DelegateTakeTooLow); 54 | } 55 | 56 | // --- 4. Ensure take is within the min ..= InitialDefaultDelegateTake (18%) range 57 | let max_take = MaxDelegateTake::::get(); 58 | ensure!(take <= max_take, Error::::DelegateTakeTooHigh); 59 | 60 | // --- 5. Enforce the rate limit (independently on do_add_stake rate limits) 61 | let block: u64 = Self::get_current_block_as_u64(); 62 | ensure!( 63 | !Self::exceeds_tx_delegate_take_rate_limit( 64 | Self::get_last_tx_block_delegate_take(&coldkey), 65 | block 66 | ), 67 | Error::::DelegateTxRateLimitExceeded 68 | ); 69 | 70 | // Set last block for rate limiting 71 | Self::set_last_tx_block_delegate_take(&coldkey, block); 72 | 73 | // --- 6. Set the new take value. 74 | Delegates::::insert(hotkey.clone(), take); 75 | 76 | // --- 7. Emit the take value. 77 | log::debug!( 78 | "TakeIncreased( coldkey:{:?}, hotkey:{:?}, take:{:?} )", 79 | coldkey, 80 | hotkey, 81 | take 82 | ); 83 | Self::deposit_event(Event::TakeIncreased(coldkey, hotkey, take)); 84 | 85 | // --- 8. Ok and return. 86 | Ok(()) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /pallets/subtensor/src/staking/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod add_stake; 3 | pub mod become_delegate; 4 | pub mod decrease_take; 5 | pub mod helpers; 6 | pub mod increase_take; 7 | pub mod remove_stake; 8 | pub mod set_children; 9 | -------------------------------------------------------------------------------- /pallets/subtensor/src/staking/remove_stake.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | impl Pallet { 4 | /// ---- The implementation for the extrinsic remove_stake: Removes stake from a hotkey account and adds it onto a coldkey. 5 | /// 6 | /// # Args: 7 | /// * 'origin': (RuntimeOrigin): 8 | /// - The signature of the caller's coldkey. 9 | /// 10 | /// * 'hotkey' (T::AccountId): 11 | /// - The associated hotkey account. 12 | /// 13 | /// * 'stake_to_be_added' (u64): 14 | /// - The amount of stake to be added to the hotkey staking account. 15 | /// 16 | /// # Event: 17 | /// * StakeRemoved; 18 | /// - On the successfully removing stake from the hotkey account. 19 | /// 20 | /// # Raises: 21 | /// * 'NotRegistered': 22 | /// - Thrown if the account we are attempting to unstake from is non existent. 23 | /// 24 | /// * 'NonAssociatedColdKey': 25 | /// - Thrown if the coldkey does not own the hotkey we are unstaking from. 26 | /// 27 | /// * 'NotEnoughStakeToWithdraw': 28 | /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. 29 | /// 30 | /// * 'TxRateLimitExceeded': 31 | /// - Thrown if key has hit transaction rate limit 32 | /// 33 | pub fn do_remove_stake( 34 | origin: T::RuntimeOrigin, 35 | hotkey: T::AccountId, 36 | stake_to_be_removed: u64, 37 | ) -> dispatch::DispatchResult { 38 | // We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. 39 | let coldkey = ensure_signed(origin)?; 40 | log::debug!( 41 | "do_remove_stake( origin:{:?} hotkey:{:?}, stake_to_be_removed:{:?} )", 42 | coldkey, 43 | hotkey, 44 | stake_to_be_removed 45 | ); 46 | 47 | // Ensure that the hotkey account exists this is only possible through registration. 48 | ensure!( 49 | Self::hotkey_account_exists(&hotkey), 50 | Error::::HotKeyAccountNotExists 51 | ); 52 | 53 | // Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. 54 | ensure!( 55 | Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), 56 | Error::::HotKeyNotDelegateAndSignerNotOwnHotKey 57 | ); 58 | 59 | // Ensure that the stake amount to be removed is above zero. 60 | ensure!(stake_to_be_removed > 0, Error::::StakeToWithdrawIsZero); 61 | 62 | // Ensure that the hotkey has enough stake to withdraw. 63 | ensure!( 64 | Self::has_enough_stake(&coldkey, &hotkey, stake_to_be_removed), 65 | Error::::NotEnoughStakeToWithdraw 66 | ); 67 | 68 | // Ensure we don't exceed stake rate limit 69 | let unstakes_this_interval = 70 | Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); 71 | ensure!( 72 | unstakes_this_interval < Self::get_target_stakes_per_interval(), 73 | Error::::UnstakeRateLimitExceeded 74 | ); 75 | 76 | // We remove the balance from the hotkey. 77 | Self::decrease_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_to_be_removed); 78 | 79 | // Track this removal in the stake delta. 80 | StakeDeltaSinceLastEmissionDrain::::mutate(&hotkey, &coldkey, |stake_delta| { 81 | *stake_delta = stake_delta.saturating_sub_unsigned(stake_to_be_removed as u128); 82 | }); 83 | 84 | // We add the balance to the coldkey. If the above fails we will not credit this coldkey. 85 | Self::add_balance_to_coldkey_account(&coldkey, stake_to_be_removed); 86 | 87 | // If the stake is below the minimum, we clear the nomination from storage. 88 | // This only applies to nominator stakes. 89 | // If the coldkey does not own the hotkey, it's a nominator stake. 90 | let new_stake = Self::get_stake_for_coldkey_and_hotkey(&coldkey, &hotkey); 91 | Self::clear_small_nomination_if_required(&hotkey, &coldkey, new_stake); 92 | 93 | // Set last block for rate limiting 94 | let block: u64 = Self::get_current_block_as_u64(); 95 | Self::set_last_tx_block(&coldkey, block); 96 | 97 | // Emit the unstaking event. 98 | Self::set_stakes_this_interval_for_coldkey_hotkey( 99 | &coldkey, 100 | &hotkey, 101 | unstakes_this_interval.saturating_add(1), 102 | block, 103 | ); 104 | log::debug!( 105 | "StakeRemoved( hotkey:{:?}, stake_to_be_removed:{:?} )", 106 | hotkey, 107 | stake_to_be_removed 108 | ); 109 | Self::deposit_event(Event::StakeRemoved(hotkey, stake_to_be_removed)); 110 | 111 | // Done and ok. 112 | Ok(()) 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /pallets/subtensor/src/subnets/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod registration; 3 | pub mod serving; 4 | pub mod uids; 5 | pub mod weights; 6 | -------------------------------------------------------------------------------- /pallets/subtensor/src/swap/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod swap_coldkey; 3 | pub mod swap_hotkey; 4 | -------------------------------------------------------------------------------- /pallets/subtensor/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | pub mod identity; 3 | pub mod misc; 4 | pub mod rate_limiting; 5 | pub mod try_state; 6 | -------------------------------------------------------------------------------- /pallets/subtensor/src/utils/rate_limiting.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use sp_core::Get; 3 | 4 | /// Enum representing different types of transactions 5 | #[derive(Copy, Clone)] 6 | pub enum TransactionType { 7 | SetChildren, 8 | SetChildkeyTake, 9 | Unknown, 10 | } 11 | 12 | /// Implement conversion from TransactionType to u16 13 | impl From for u16 { 14 | fn from(tx_type: TransactionType) -> Self { 15 | match tx_type { 16 | TransactionType::SetChildren => 0, 17 | TransactionType::SetChildkeyTake => 1, 18 | TransactionType::Unknown => 2, 19 | } 20 | } 21 | } 22 | 23 | /// Implement conversion from u16 to TransactionType 24 | impl From for TransactionType { 25 | fn from(value: u16) -> Self { 26 | match value { 27 | 0 => TransactionType::SetChildren, 28 | 1 => TransactionType::SetChildkeyTake, 29 | _ => TransactionType::Unknown, 30 | } 31 | } 32 | } 33 | impl Pallet { 34 | // ======================== 35 | // ==== Rate Limiting ===== 36 | // ======================== 37 | /// Get the rate limit for a specific transaction type 38 | pub fn get_rate_limit(tx_type: &TransactionType) -> u64 { 39 | match tx_type { 40 | TransactionType::SetChildren => (DefaultTempo::::get().saturating_mul(2)).into(), // Cannot set children twice within the default tempo period. 41 | TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), 42 | TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) 43 | } 44 | } 45 | 46 | /// Check if a transaction should be rate limited on a specific subnet 47 | pub fn passes_rate_limit_on_subnet( 48 | tx_type: &TransactionType, 49 | hotkey: &T::AccountId, 50 | netuid: u16, 51 | ) -> bool { 52 | let block: u64 = Self::get_current_block_as_u64(); 53 | let limit: u64 = Self::get_rate_limit(tx_type); 54 | let last_block: u64 = Self::get_last_transaction_block(hotkey, netuid, tx_type); 55 | 56 | // Allow the first transaction (when last_block is 0) or if the rate limit has passed 57 | last_block == 0 || block.saturating_sub(last_block) >= limit 58 | } 59 | 60 | /// Check if a transaction should be rate limited globally 61 | pub fn passes_rate_limit_globally(tx_type: &TransactionType, hotkey: &T::AccountId) -> bool { 62 | let netuid: u16 = u16::MAX; 63 | let block: u64 = Self::get_current_block_as_u64(); 64 | let limit: u64 = Self::get_rate_limit(tx_type); 65 | let last_block: u64 = Self::get_last_transaction_block(hotkey, netuid, tx_type); 66 | block.saturating_sub(last_block) >= limit 67 | } 68 | 69 | /// Get the block number of the last transaction for a specific hotkey, network, and transaction type 70 | pub fn get_last_transaction_block( 71 | hotkey: &T::AccountId, 72 | netuid: u16, 73 | tx_type: &TransactionType, 74 | ) -> u64 { 75 | let tx_as_u16: u16 = (*tx_type).into(); 76 | TransactionKeyLastBlock::::get((hotkey, netuid, tx_as_u16)) 77 | } 78 | 79 | /// Set the block number of the last transaction for a specific hotkey, network, and transaction type 80 | pub fn set_last_transaction_block( 81 | hotkey: &T::AccountId, 82 | netuid: u16, 83 | tx_type: &TransactionType, 84 | block: u64, 85 | ) { 86 | let tx_as_u16: u16 = (*tx_type).into(); 87 | TransactionKeyLastBlock::::insert((hotkey, netuid, tx_as_u16), block); 88 | } 89 | 90 | pub fn set_last_tx_block(key: &T::AccountId, block: u64) { 91 | LastTxBlock::::insert(key, block) 92 | } 93 | pub fn get_last_tx_block(key: &T::AccountId) -> u64 { 94 | LastTxBlock::::get(key) 95 | } 96 | pub fn set_last_tx_block_delegate_take(key: &T::AccountId, block: u64) { 97 | LastTxBlockDelegateTake::::insert(key, block) 98 | } 99 | pub fn get_last_tx_block_delegate_take(key: &T::AccountId) -> u64 { 100 | LastTxBlockDelegateTake::::get(key) 101 | } 102 | 103 | pub fn set_last_tx_block_childkey_take(key: &T::AccountId, block: u64) { 104 | LastTxBlockChildKeyTake::::insert(key, block) 105 | } 106 | pub fn get_last_tx_block_childkey_take(key: &T::AccountId) -> u64 { 107 | LastTxBlockChildKeyTake::::get(key) 108 | } 109 | pub fn exceeds_tx_rate_limit(prev_tx_block: u64, current_block: u64) -> bool { 110 | let rate_limit: u64 = Self::get_tx_rate_limit(); 111 | if rate_limit == 0 || prev_tx_block == 0 { 112 | return false; 113 | } 114 | 115 | current_block.saturating_sub(prev_tx_block) <= rate_limit 116 | } 117 | pub fn exceeds_tx_delegate_take_rate_limit(prev_tx_block: u64, current_block: u64) -> bool { 118 | let rate_limit: u64 = Self::get_tx_delegate_take_rate_limit(); 119 | if rate_limit == 0 || prev_tx_block == 0 { 120 | return false; 121 | } 122 | 123 | current_block.saturating_sub(prev_tx_block) <= rate_limit 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /pallets/subtensor/src/utils/try_state.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | impl Pallet { 4 | /// Checks if the accounting invariants for [`TotalStake`], [`TotalSubnetLocked`], and [`TotalIssuance`] are correct. 5 | /// 6 | /// This function verifies that: 7 | /// 1. The sum of all stakes matches the [`TotalStake`]. 8 | /// 2. The [`TotalSubnetLocked`] is correctly calculated. 9 | /// 3. The [`TotalIssuance`] equals the sum of currency issuance, total stake, and total subnet locked. 10 | /// 11 | /// # Returns 12 | /// 13 | /// Returns `Ok(())` if all invariants are correct, otherwise returns an error. 14 | #[cfg(feature = "try-runtime")] 15 | pub fn check_accounting_invariants() -> Result<(), sp_runtime::TryRuntimeError> { 16 | use frame_support::traits::fungible::Inspect; 17 | 18 | // Calculate the total staked amount 19 | let mut total_staked: u64 = 0; 20 | for (_hotkey, _coldkey, stake) in Stake::::iter() { 21 | total_staked = total_staked.saturating_add(stake); 22 | } 23 | 24 | // Verify that the calculated total stake matches the stored TotalStake 25 | ensure!( 26 | total_staked == TotalStake::::get(), 27 | "TotalStake does not match total staked", 28 | ); 29 | 30 | // Get the total subnet locked amount 31 | let total_subnet_locked: u64 = Self::get_total_subnet_locked(); 32 | 33 | // Get the total currency issuance 34 | let currency_issuance: u64 = T::Currency::total_issuance(); 35 | 36 | // Calculate the expected total issuance 37 | let expected_total_issuance: u64 = currency_issuance 38 | .saturating_add(total_staked) 39 | .saturating_add(total_subnet_locked); 40 | 41 | // Verify that the calculated total issuance matches the stored TotalIssuance 42 | ensure!( 43 | TotalIssuance::::get() == expected_total_issuance, 44 | "TotalIssuance accounting discrepancy", 45 | ); 46 | 47 | Ok(()) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /pallets/subtensor/tests/batch_tx.rs: -------------------------------------------------------------------------------- 1 | use frame_support::{assert_ok, traits::Currency}; 2 | use frame_system::Config; 3 | use sp_core::U256; 4 | mod mock; 5 | use mock::*; 6 | 7 | #[test] 8 | fn test_batch_txs() { 9 | let alice = U256::from(0); 10 | let bob = U256::from(1); 11 | let charlie = U256::from(2); 12 | let initial_balances = vec![ 13 | (alice, 8_000_000_000), 14 | (bob, 1_000_000_000), 15 | (charlie, 1_000_000_000), 16 | ]; 17 | test_ext_with_balances(initial_balances).execute_with(|| { 18 | assert_ok!(Utility::batch( 19 | <::RuntimeOrigin>::signed(alice), 20 | vec![ 21 | RuntimeCall::Balances(BalanceCall::transfer_allow_death { 22 | dest: bob, 23 | value: 1_000_000_000 24 | }), 25 | RuntimeCall::Balances(BalanceCall::transfer_allow_death { 26 | dest: charlie, 27 | value: 1_000_000_000 28 | }) 29 | ] 30 | )); 31 | assert_eq!(Balances::total_balance(&alice), 6_000_000_000); 32 | assert_eq!(Balances::total_balance(&bob), 2_000_000_000); 33 | assert_eq!(Balances::total_balance(&charlie), 2_000_000_000); 34 | }); 35 | } 36 | -------------------------------------------------------------------------------- /pallets/subtensor/tests/neuron_info.rs: -------------------------------------------------------------------------------- 1 | mod mock; 2 | use mock::*; 3 | 4 | use sp_core::U256; 5 | 6 | #[test] 7 | fn test_get_neuron_none() { 8 | new_test_ext(1).execute_with(|| { 9 | let netuid: u16 = 1; 10 | let uid: u16 = 42; 11 | 12 | let neuron = SubtensorModule::get_neuron(netuid, uid); 13 | assert_eq!(neuron, None); 14 | }); 15 | } 16 | 17 | #[test] 18 | fn test_get_neuron_some() { 19 | new_test_ext(1).execute_with(|| { 20 | let netuid: u16 = 1; 21 | 22 | let tempo: u16 = 2; 23 | let modality: u16 = 2; 24 | 25 | let uid: u16 = 0; 26 | let hotkey0 = U256::from(0); 27 | let coldkey0 = U256::from(0); 28 | 29 | add_network(netuid, tempo, modality); 30 | register_ok_neuron(netuid, hotkey0, coldkey0, 39420842); 31 | 32 | let neuron = SubtensorModule::get_neuron(netuid, uid); 33 | assert_ne!(neuron, None); 34 | }); 35 | } 36 | 37 | /* @TODO: Add more neurons to list */ 38 | #[test] 39 | fn test_get_neurons_list() { 40 | new_test_ext(1).execute_with(|| { 41 | let netuid: u16 = 1; 42 | 43 | let tempo: u16 = 2; 44 | let modality: u16 = 2; 45 | 46 | add_network(netuid, tempo, modality); 47 | 48 | let _uid: u16 = 42; 49 | 50 | let neuron_count = 1; 51 | for index in 0..neuron_count { 52 | let hotkey = U256::from(index); 53 | let coldkey = U256::from(index); 54 | let nonce: u64 = 39420842 + index; 55 | register_ok_neuron(netuid, hotkey, coldkey, nonce); 56 | } 57 | 58 | let neurons = SubtensorModule::get_neurons(netuid); 59 | assert_eq!(neurons.len(), neuron_count as usize); 60 | }); 61 | } 62 | 63 | #[test] 64 | fn test_get_neurons_empty() { 65 | new_test_ext(1).execute_with(|| { 66 | let netuid: u16 = 1; 67 | 68 | let neuron_count = 0; 69 | let neurons = SubtensorModule::get_neurons(netuid); 70 | assert_eq!(neurons.len(), neuron_count as usize); 71 | }); 72 | } 73 | -------------------------------------------------------------------------------- /runtime/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | #[cfg(all(feature = "std", not(feature = "metadata-hash")))] 3 | { 4 | substrate_wasm_builder::WasmBuilder::new() 5 | .with_current_project() 6 | .export_heap_base() 7 | .import_memory() 8 | .build(); 9 | } 10 | #[cfg(all(feature = "std", feature = "metadata-hash"))] 11 | { 12 | substrate_wasm_builder::WasmBuilder::new() 13 | .with_current_project() 14 | .export_heap_base() 15 | .import_memory() 16 | .enable_metadata_hash("TAO", 9) 17 | .build(); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /runtime/src/check_nonce.rs: -------------------------------------------------------------------------------- 1 | use codec::{Decode, Encode}; 2 | use frame_support::dispatch::{DispatchInfo, Pays}; 3 | use frame_system::Config; 4 | use scale_info::TypeInfo; 5 | use sp_runtime::{ 6 | traits::{DispatchInfoOf, Dispatchable, One, SignedExtension, Zero}, 7 | transaction_validity::{ 8 | InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, 9 | ValidTransaction, 10 | }, 11 | Saturating, 12 | }; 13 | use sp_std::vec; 14 | use subtensor_macros::freeze_struct; 15 | 16 | /// Nonce check and increment to give replay protection for transactions. 17 | /// 18 | /// # Transaction Validity 19 | /// 20 | /// This extension affects `requires` and `provides` tags of validity, but DOES NOT 21 | /// set the `priority` field. Make sure that AT LEAST one of the signed extension sets 22 | /// some kind of priority upon validating transactions. 23 | #[freeze_struct("610b76f62cdb521e")] 24 | #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] 25 | #[scale_info(skip_type_params(T))] 26 | pub struct CheckNonce(#[codec(compact)] pub T::Nonce); 27 | 28 | impl CheckNonce { 29 | /// utility constructor. Used only in client/factory code. 30 | pub fn from(nonce: T::Nonce) -> Self { 31 | Self(nonce) 32 | } 33 | } 34 | 35 | impl sp_std::fmt::Debug for CheckNonce { 36 | #[cfg(feature = "std")] 37 | fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { 38 | write!(f, "CheckNonce({})", self.0) 39 | } 40 | 41 | #[cfg(not(feature = "std"))] 42 | fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { 43 | Ok(()) 44 | } 45 | } 46 | 47 | impl SignedExtension for CheckNonce 48 | where 49 | T::RuntimeCall: Dispatchable, 50 | { 51 | type AccountId = T::AccountId; 52 | type Call = T::RuntimeCall; 53 | type AdditionalSigned = (); 54 | type Pre = (); 55 | const IDENTIFIER: &'static str = "CheckNonce"; 56 | 57 | fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { 58 | Ok(()) 59 | } 60 | 61 | fn pre_dispatch( 62 | self, 63 | who: &Self::AccountId, 64 | _call: &Self::Call, 65 | info: &DispatchInfoOf, 66 | _len: usize, 67 | ) -> Result<(), TransactionValidityError> { 68 | let mut account = frame_system::Account::::get(who); 69 | match info.pays_fee { 70 | Pays::Yes => { 71 | if account.providers.is_zero() && account.sufficients.is_zero() { 72 | // Nonce storage not paid for 73 | return Err(InvalidTransaction::Payment.into()); 74 | } 75 | } 76 | // not check providers and sufficients for Pays::No extrinsic 77 | Pays::No => {} 78 | } 79 | 80 | if self.0 != account.nonce { 81 | return Err(if self.0 < account.nonce { 82 | InvalidTransaction::Stale 83 | } else { 84 | InvalidTransaction::Future 85 | } 86 | .into()); 87 | } 88 | account.nonce.saturating_inc(); 89 | frame_system::Account::::insert(who, account); 90 | Ok(()) 91 | } 92 | 93 | fn validate( 94 | &self, 95 | who: &Self::AccountId, 96 | _call: &Self::Call, 97 | info: &DispatchInfoOf, 98 | _len: usize, 99 | ) -> TransactionValidity { 100 | let account = frame_system::Account::::get(who); 101 | match info.pays_fee { 102 | Pays::Yes => { 103 | if account.providers.is_zero() && account.sufficients.is_zero() { 104 | // Nonce storage not paid for 105 | return Err(InvalidTransaction::Payment.into()); 106 | } 107 | } 108 | // not check providers and sufficients for Pays::No extrinsic 109 | Pays::No => {} 110 | } 111 | if self.0 < account.nonce { 112 | return InvalidTransaction::Stale.into(); 113 | } 114 | 115 | let provides = vec![Encode::encode(&(who, self.0))]; 116 | let requires = if account.nonce < self.0 { 117 | vec![Encode::encode(&(who, self.0.saturating_sub(One::one())))] 118 | } else { 119 | vec![] 120 | }; 121 | 122 | Ok(ValidTransaction { 123 | priority: 0, 124 | requires, 125 | provides, 126 | longevity: TransactionLongevity::MAX, 127 | propagate: true, 128 | }) 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /runtime/src/migrations/mod.rs: -------------------------------------------------------------------------------- 1 | //! Export migrations from here. 2 | -------------------------------------------------------------------------------- /runtime/src/spec_version.rs: -------------------------------------------------------------------------------- 1 | use node_subtensor_runtime::VERSION; 2 | 3 | fn main() { 4 | println!("{}", VERSION.spec_version); 5 | } 6 | -------------------------------------------------------------------------------- /runtime/tests/metadata.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::indexing_slicing)] 2 | 3 | use frame_metadata::RuntimeMetadata; 4 | use node_subtensor_runtime::Runtime; 5 | use scale_info::TypeDef; 6 | 7 | fn is_pallet_error(segments: &[String]) -> bool { 8 | let pallet_list: Vec<&str> = vec![ 9 | "pallet_admin_utils", 10 | "pallet_collective", 11 | "pallet_commitments", 12 | "pallet_registry", 13 | "pallet_subtensor", 14 | ]; 15 | 16 | if segments.len() != 3 { 17 | false 18 | } else { 19 | pallet_list.contains(&segments[0].as_str()) 20 | && segments[1] == "pallet" 21 | && segments[2] == "Error" 22 | } 23 | } 24 | 25 | // test make sure all errors are documented 26 | #[test] 27 | fn test_metadata() { 28 | let metadata = Runtime::metadata().1; 29 | // current metadata version should be 14 30 | assert!(matches!(metadata, RuntimeMetadata::V14(_))); 31 | 32 | if let RuntimeMetadata::V14(value) = metadata { 33 | let types = value.types.types; 34 | for ty in types.iter() { 35 | let segments = &ty.ty.path.segments; 36 | if is_pallet_error(segments) { 37 | // error call and event should be enum type 38 | assert!(matches!(ty.ty.type_def, TypeDef::Variant(_))); 39 | if let TypeDef::Variant(variants) = &ty.ty.type_def { 40 | // check docs not empty 41 | for variant in variants.variants.iter() { 42 | // print name make it easier to find out failed item 43 | println!("{}", variant.name); 44 | assert!(!variant.docs.is_empty()); 45 | assert!(!variant.docs[0].is_empty()); 46 | } 47 | } 48 | } 49 | } 50 | }; 51 | } 52 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "stable" 3 | components = [ 4 | "cargo", 5 | "clippy", 6 | "rust-analyzer", 7 | "rust-src", 8 | "rust-std", 9 | "rustc", 10 | "rustc-dev", 11 | "rustfmt", 12 | ] 13 | targets = ["wasm32-unknown-unknown"] 14 | profile = "minimal" 15 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Basic 2 | hard_tabs = true 3 | max_width = 100 4 | use_small_heuristics = "Max" 5 | # Imports 6 | imports_granularity = "Crate" 7 | reorder_imports = true 8 | # Consistency 9 | newline_style = "Unix" 10 | # Format comments 11 | comment_width = 100 12 | wrap_comments = true 13 | # Misc 14 | chain_width = 80 15 | spaces_around_ranges = false 16 | binop_separator = "Back" 17 | reorder_impl_items = false 18 | match_arm_leading_pipes = "Preserve" 19 | match_arm_blocks = false 20 | match_block_trailing_comma = true 21 | trailing_comma = "Vertical" 22 | trailing_semicolon = false 23 | use_field_init_shorthand = true 24 | -------------------------------------------------------------------------------- /scripts/benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DEFAULT_BIN_PATH='./target/production/node-subtensor' 4 | BIN_PATH=$DEFAULT_BIN_PATH 5 | TMP_SPEC='temp.json' 6 | OUTPUT_FILE='benchmarking.txt' 7 | 8 | # Getting arguments from user 9 | while [[ $# -gt 0 ]]; do 10 | case $1 in 11 | -p | --bin-path) 12 | BIN_PATH="$2" 13 | shift 14 | shift 15 | ;; 16 | -* | --*) 17 | echo "Unknown option $1" 18 | exit 1 19 | ;; 20 | *) 21 | POSITIONAL_ARGS+=("$1") 22 | shift 23 | ;; 24 | esac 25 | done 26 | 27 | # Ensure binary exists before node-subtensor executions 28 | if [ ! -f $BIN_PATH ]; then 29 | if [[ "$DEFAULT_BIN_PATH" == "$BIN_PATH" ]]; then 30 | cargo build --profile production --features runtime-benchmarks 31 | else 32 | echo "Binary '$BIN_PATH' does not exist. You can use -p or --bin-path to specify a different location." 33 | exit 1 34 | fi 35 | fi 36 | 37 | # Build Temporary Spec 38 | $BIN_PATH build-spec --disable-default-bootnode --raw --chain local >$TMP_SPEC 39 | 40 | # Run benchmark 41 | $BIN_PATH benchmark pallet \ 42 | --chain=$TMP_SPEC \ 43 | --pallet pallet-subtensor --extrinsic 'schedule_coldkey_swap' \ 44 | --output $OUTPUT_FILE 45 | 46 | rm $TMP_SPEC 47 | -------------------------------------------------------------------------------- /scripts/benchmark_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | # List of pallets you want to benchmark 5 | pallets=("pallet_subtensor" "pallet_collective" "pallet_commitments" "pallet_registry" "pallet_admin_utils") 6 | 7 | # Chain spec and output directory 8 | chain_spec="finney" # or your specific chain spec 9 | 10 | for pallet in "${pallets[@]}" 11 | do 12 | echo "Benchmarking $pallet..." 13 | cargo run --profile=production --features=runtime-benchmarks,try-runtime --bin node-subtensor -- benchmark pallet \ 14 | --chain $chain_spec \ 15 | --wasm-execution=compiled \ 16 | --pallet $pallet \ 17 | --extrinsic '*' \ 18 | --steps 50 \ 19 | --repeat 5 \ 20 | --output "pallets/$pallet/src/weights.rs" \ 21 | --template ./.maintain/frame-weight-template.hbs # Adjust this path to your template file 22 | done 23 | 24 | echo "All pallets have been benchmarked and weights updated." 25 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | cargo build --profile production --features "metadata-hash" 2 | 3 | -------------------------------------------------------------------------------- /scripts/build_all_chainspecs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "*** Building node..." 6 | cargo build 7 | 8 | echo "*** Building new chainspecs..." 9 | 10 | finney_genesis_temp=$(mktemp) 11 | testfinney_genesis_temp=$(mktemp) 12 | raw_spec_finney_temp=$(mktemp) 13 | raw_spec_testfinney_temp=$(mktemp) 14 | 15 | # Save old genesis state before doing anything 16 | jq -r ".genesis" raw_spec_finney.json >"$finney_genesis_temp" 17 | jq -r ".genesis" raw_spec_testfinney.json >"$testfinney_genesis_temp" 18 | 19 | # Build new chainspecs 20 | ./target/debug/node-subtensor build-spec --raw --chain finney >"$raw_spec_finney_temp" 21 | ./target/debug/node-subtensor build-spec --chain finney >plain_spec_finney.json 22 | 23 | ./target/debug/node-subtensor build-spec --raw --chain test_finney >"$raw_spec_testfinney_temp" 24 | ./target/debug/node-subtensor build-spec --chain test_finney >plain_spec_testfinney.json 25 | 26 | echo "*** Updating genesis..." 27 | 28 | # The genesis is not allowed to change. Since the wasm genesis will change depending on the system 29 | # architecture used, we need to extract the genesis from the old chain specs and insert them into 30 | # the new chain specs to ensure there are no genesis mismatch issues. 31 | 32 | # Update genesis in new chainspecs using the extracted genesis data from the temporary files 33 | jq --slurpfile genesis "$finney_genesis_temp" '.genesis = $genesis[0]' "$raw_spec_finney_temp" >raw_spec_finney.json 34 | jq --slurpfile genesis "$testfinney_genesis_temp" '.genesis = $genesis[0]' "$raw_spec_testfinney_temp" >raw_spec_testfinney.json 35 | 36 | # Cleanup 37 | rm -f "$finney_genesis_temp" "$testfinney_genesis_temp" "$raw_spec_finney_temp" "$raw_spec_testfinney_temp" 38 | 39 | echo "*** Done!" 40 | -------------------------------------------------------------------------------- /scripts/code-coverage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -TeE 4 | 5 | ## Find true directory this script resides in 6 | __SOURCE__="${BASH_SOURCE[0]}" 7 | while [[ -h "${__SOURCE__}" ]]; do 8 | __SOURCE__="$(find "${__SOURCE__}" -type l -ls | sed -n 's@^.* -> \(.*\)@\1@p')" 9 | done 10 | __DIR__="$(cd -P "$(dirname "${__SOURCE__}")" && pwd)" 11 | __G_DIR__="$(dirname "${__DIR__}")" 12 | 13 | ## Sub-directory name under: ../target/ 14 | _target_dir_name='tarpaulin' 15 | 16 | _tarpaulin_options=( 17 | --skip-clean 18 | --no-fail-fast 19 | --ignore-tests 20 | --exclude-files "${__G_DIR__}/target/*" 21 | ) 22 | 23 | if (( VERBOSE )); then 24 | _tarpaulin_options+=( --verbose ) 25 | fi 26 | 27 | ## 28 | # Do not fool around with contents of: ../target/debug 29 | # - https://lib.rs/crates/cargo-tarpaulin#readme-recompilation 30 | _tarpaulin_options+=( 31 | --target-dir "${__G_DIR__}/target/${_target_dir_name}" 32 | ) 33 | 34 | ## 35 | # Allow additional CLI parameters too 36 | _extra_arguments=("${@}") 37 | if ((${#_extra_arguments[@]})); then 38 | _tarpaulin_options+=( "${_extra_arguments[@]}" ) 39 | fi 40 | 41 | SKIP_WASM_BUILD=1 cargo +nightly tarpaulin "${_tarpaulin_options[@]}" | 42 | grep -vE '^\|\|\s+(target/debug)' 43 | 44 | -------------------------------------------------------------------------------- /scripts/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script is meant to be run on Unix/Linux based systems 3 | set -e 4 | 5 | echo "*** Initializing WASM build environment" 6 | 7 | if ! (( ${#CI_PROJECT_NAME} )) ; then 8 | rustup update nightly 9 | rustup update stable 10 | fi 11 | 12 | rustup target add wasm32-unknown-unknown --toolchain nightly 13 | -------------------------------------------------------------------------------- /scripts/install_rust.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "*** Checking if Rust is already installed" 4 | 5 | if which rustup >/dev/null 2>&1; then 6 | echo "Rust is already installed. Exiting." 7 | exit 0 8 | fi 9 | 10 | echo "*** Installing Rust" 11 | 12 | if [[ "$(uname)" == "Darwin" ]]; then 13 | # macOS 14 | if ! which brew >/dev/null 2>&1; then 15 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" 16 | fi 17 | 18 | brew update 19 | brew install openssl cmake llvm 20 | elif [[ "$(uname)" == "Linux" ]]; then 21 | if [[ -f "/etc/arch-release" ]]; then 22 | # Arch Linux 23 | sudo pacman -Syu --noconfirm 24 | sudo pacman -S --noconfirm cmake pkgconf openssl git gcc clang 25 | else 26 | # Ubuntu (and other Debian-based distributions) 27 | sudo apt-get update 28 | sudo apt-get install -y cmake pkg-config libssl-dev git gcc build-essential clang libclang-dev 29 | fi 30 | else 31 | echo "Unsupported operating system. Exiting." 32 | exit 1 33 | fi 34 | 35 | curl https://sh.rustup.rs -sSf | sh -s -- -y 36 | source "$HOME/.cargo/env" 37 | rustup default stable 38 | 39 | rustup update nightly 40 | rustup target add wasm32-unknown-unknown --toolchain nightly 41 | 42 | echo "*** Rust installation complete" 43 | -------------------------------------------------------------------------------- /scripts/localnet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if `--no-purge` passed as a parameter 4 | NO_PURGE=0 5 | for arg in "$@"; do 6 | if [ "$arg" = "--no-purge" ]; then 7 | NO_PURGE=1 8 | break 9 | fi 10 | done 11 | 12 | # Determine the directory this script resides in. This allows invoking it from any location. 13 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" 14 | 15 | # The base directory of the subtensor project 16 | BASE_DIR="$SCRIPT_DIR/.." 17 | 18 | # get parameters 19 | # Get the value of fast_blocks from the first argument 20 | fast_blocks=${1:-"True"} 21 | 22 | # Check the value of fast_blocks 23 | if [ "$fast_blocks" == "False" ]; then 24 | # Block of code to execute if fast_blocks is False 25 | echo "fast_blocks is Off" 26 | : "${CHAIN:=local}" 27 | : "${BUILD_BINARY:=1}" 28 | : "${FEATURES:="pow-faucet runtime-benchmarks"}" 29 | else 30 | # Block of code to execute if fast_blocks is not False 31 | echo "fast_blocks is On" 32 | : "${CHAIN:=local}" 33 | : "${BUILD_BINARY:=1}" 34 | : "${FEATURES:="pow-faucet runtime-benchmarks fast-blocks"}" 35 | fi 36 | 37 | SPEC_PATH="${SCRIPT_DIR}/specs/" 38 | FULL_PATH="$SPEC_PATH$CHAIN.json" 39 | 40 | # Kill any existing nodes which may have not exited correctly after a previous 41 | # run. 42 | pkill -9 'node-subtensor' 43 | 44 | if [ ! -d "$SPEC_PATH" ]; then 45 | echo "*** Creating directory ${SPEC_PATH}..." 46 | mkdir $SPEC_PATH 47 | fi 48 | 49 | if [[ $BUILD_BINARY == "1" ]]; then 50 | echo "*** Building substrate binary..." 51 | cargo build --workspace --profile=release --features "$FEATURES" --manifest-path "$BASE_DIR/Cargo.toml" 52 | echo "*** Binary compiled" 53 | fi 54 | 55 | echo "*** Building chainspec..." 56 | "$BASE_DIR/target/release/node-subtensor" build-spec --disable-default-bootnode --raw --chain $CHAIN >$FULL_PATH 57 | echo "*** Chainspec built and output to file" 58 | 59 | if [ $NO_PURGE -eq 1 ]; then 60 | echo "*** Purging previous state skipped..." 61 | else 62 | echo "*** Purging previous state..." 63 | "$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/bob --chain="$FULL_PATH" >/dev/null 2>&1 64 | "$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/alice --chain="$FULL_PATH" >/dev/null 2>&1 65 | echo "*** Previous chainstate purged" 66 | fi 67 | 68 | echo "*** Starting localnet nodes..." 69 | alice_start=( 70 | "$BASE_DIR/target/release/node-subtensor" 71 | --base-path /tmp/alice 72 | --chain="$FULL_PATH" 73 | --alice 74 | --port 30334 75 | --rpc-port 9946 76 | --validator 77 | --rpc-cors=all 78 | --allow-private-ipv4 79 | --discover-local 80 | ) 81 | 82 | bob_start=( 83 | "$BASE_DIR"/target/release/node-subtensor 84 | --base-path /tmp/bob 85 | --chain="$FULL_PATH" 86 | --bob 87 | --port 30335 88 | --rpc-port 9945 89 | --validator 90 | --allow-private-ipv4 91 | --discover-local 92 | ) 93 | 94 | trap 'pkill -P $$' EXIT SIGINT SIGTERM 95 | 96 | ( 97 | ("${alice_start[@]}" 2>&1) & 98 | ("${bob_start[@]}" 2>&1) 99 | wait 100 | ) 101 | -------------------------------------------------------------------------------- /scripts/publish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | cd support/macros 4 | cargo publish 5 | cd ../.. 6 | cd pallets/commitments 7 | cargo publish 8 | cd .. 9 | cd collective 10 | cargo publish 11 | cd .. 12 | cd registry 13 | cargo publish 14 | cd .. 15 | cd subtensor 16 | cargo publish 17 | cd runtime-api 18 | cargo publish 19 | cd ../.. 20 | cd admin-utils 21 | cargo publish 22 | cd ../.. 23 | cd runtime 24 | cargo publish 25 | cd .. 26 | cd node 27 | cargo publish 28 | echo "published successfully." 29 | -------------------------------------------------------------------------------- /scripts/run/subtensor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Helper functions 5 | # 6 | 7 | function run_command() { 8 | F_NETWORK=$1 9 | F_NODE_TYPE=$2 10 | F_BIN_PATH=$3 11 | 12 | # Different command options by network and node type 13 | MAINNET_BOOTNODE='--bootnodes /dns/bootnode.finney.chain.opentensor.ai/tcp/30333/ws/p2p/12D3KooWRwbMb85RWnT8DSXSYMWQtuDwh4LJzndoRrTDotTR5gDC' 14 | TESTNET_BOOTNODE='--bootnodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/ws/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr' 15 | NODE_TYPE_ARCHIVE='--pruning=archive' 16 | NODE_TYPE_LITE='--sync warp' 17 | 18 | # Options by the type of node we offer 19 | MAINNET_ARCHIVE_OPTIONS="$MAINNET_BOOTNODE $NODE_TYPE_ARCHIVE" 20 | MAINNET_LITE_OPTIONS="$MAINNET_BOOTNODE $NODE_TYPE_LITE" 21 | TESTNET_ARCHIVE_OPTIONS="$TESTNET_BOOTNODE $NODE_TYPE_ARCHIVE" 22 | TESTNET_LITE_OPTIONS="$TESTNET_BOOTNODE $NODE_TYPE_LITE" 23 | 24 | # Checking options to use 25 | if [[ "$F_NETWORK" == "mainnet" ]] && [[ "$F_NODE_TYPE" == "archive" ]]; then 26 | SPECIFIC_OPTIONS=$MAINNET_ARCHIVE_OPTIONS 27 | elif [[ "$F_NETWORK" == "mainnet" ]] && [[ "$F_NODE_TYPE" == "lite" ]]; then 28 | SPECIFIC_OPTIONS=$MAINNET_LITE_OPTIONS 29 | elif [[ "$F_NETWORK" == "testnet" ]] && [[ "$F_NODE_TYPE" == "archive" ]]; then 30 | SPECIFIC_OPTIONS=$TESTNET_ARCHIVE_OPTIONS 31 | elif [[ "$F_NETWORK" == "testnet" ]] && [[ "$F_NODE_TYPE" == "lite" ]]; then 32 | SPECIFIC_OPTIONS=$TESTNET_LITE_OPTIONS 33 | fi 34 | 35 | if [ ! -f $F_BIN_PATH ]; then 36 | echo "Binary '$F_BIN_PATH' does not exist. You can use -p or --bin-path to specify a different location." 37 | echo "Please ensure you have compiled the binary first." 38 | exit 1 39 | fi 40 | 41 | # Command to run subtensor 42 | $F_BIN_PATH \ 43 | --base-path /tmp/blockchain \ 44 | --chain ./raw_spec_finney.json \ 45 | --rpc-external --rpc-cors all \ 46 | --no-mdns \ 47 | --rpc-max-connections 10000 --in-peers 500 --out-peers 500 \ 48 | $SPECIFIC_OPTIONS 49 | } 50 | 51 | # Default values 52 | EXEC_TYPE="docker" 53 | NETWORK="mainnet" 54 | NODE_TYPE="lite" 55 | BUILD="" 56 | BIN_PATH="./target/production/node-subtensor" 57 | 58 | # Getting arguments from user 59 | while [[ $# -gt 0 ]]; do 60 | case $1 in 61 | -h | --help) 62 | help 63 | exit 0 64 | ;; 65 | -e | --execution) 66 | EXEC_TYPE="$2" 67 | shift # past argument 68 | shift # past value 69 | ;; 70 | -b | --build) 71 | BUILD="--build" 72 | shift # past argument 73 | ;; 74 | -n | --network) 75 | NETWORK="$2" 76 | shift 77 | shift 78 | ;; 79 | -t | --node-type) 80 | NODE_TYPE="$2" 81 | shift 82 | shift 83 | ;; 84 | -p | --bin-path) 85 | BIN_PATH="$2" 86 | shift 87 | shift 88 | ;; 89 | -* | --*) 90 | echo "Unknown option $1" 91 | exit 1 92 | ;; 93 | *) 94 | POSITIONAL_ARGS+=("$1") 95 | shift 96 | ;; 97 | esac 98 | done 99 | 100 | # Verifying arguments values 101 | if ! [[ "$EXEC_TYPE" =~ ^(docker|binary)$ ]]; then 102 | echo "Exec type not expected: $EXEC_TYPE" 103 | exit 1 104 | fi 105 | 106 | if ! [[ "$NETWORK" =~ ^(mainnet|testnet)$ ]]; then 107 | echo "Network not expected: $NETWORK" 108 | exit 1 109 | fi 110 | 111 | if ! [[ "$NODE_TYPE" =~ ^(lite|archive)$ ]]; then 112 | echo "Node type not expected: $NODE_TYPE" 113 | exit 1 114 | fi 115 | 116 | # Running subtensor 117 | case $EXEC_TYPE in 118 | docker) 119 | docker compose down --remove-orphans 120 | echo "Running docker compose up $BUILD --detach $NETWORK-$NODE_TYPE" 121 | docker compose up $BUILD --detach $NETWORK-$NODE_TYPE 122 | ;; 123 | binary) 124 | run_command $NETWORK $NODE_TYPE $BIN_PATH 125 | ;; 126 | esac 127 | -------------------------------------------------------------------------------- /scripts/test_specific.sh: -------------------------------------------------------------------------------- 1 | pallet="${3:-pallet-subtensor}" 2 | features="${4:-pow-faucet}" 3 | 4 | SKIP_WASM_BUILD=1 RUST_LOG=DEBUG cargo test --release --features=$features -p $pallet --test $1 -- $2 --nocapture --exact -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let 2 | mozillaOverlay = 3 | import (builtins.fetchGit { 4 | url = "https://github.com/mozilla/nixpkgs-mozilla.git"; 5 | rev = "57c8084c7ef41366993909c20491e359bbb90f54"; 6 | }); 7 | pinned = builtins.fetchGit { 8 | # Descriptive name to make the store path easier to identify 9 | url = "https://github.com/nixos/nixpkgs/"; 10 | # Commit hash for nixos-unstable as of 2020-04-26 11 | # `git ls-remote https://github.com/nixos/nixpkgs nixos-unstable` 12 | ref = "refs/heads/nixos-unstable"; 13 | rev = "1fe6ed37fd9beb92afe90671c0c2a662a03463dd"; 14 | }; 15 | nixpkgs = import pinned { overlays = [ mozillaOverlay ]; }; 16 | toolchain = with nixpkgs; (rustChannelOf { date = "2021-09-14"; channel = "nightly"; }); 17 | rust-wasm = toolchain.rust.override { 18 | targets = [ "wasm32-unknown-unknown" ]; 19 | }; 20 | in 21 | with nixpkgs; pkgs.mkShell { 22 | buildInputs = [ 23 | clang 24 | pkg-config 25 | rust-wasm 26 | ] ++ stdenv.lib.optionals stdenv.isDarwin [ 27 | darwin.apple_sdk.frameworks.Security 28 | ]; 29 | 30 | LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; 31 | PROTOC = "${protobuf}/bin/protoc"; 32 | RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library/"; 33 | ROCKSDB_LIB_DIR = "${rocksdb}/lib"; 34 | 35 | } 36 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /support/linting/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "subtensor-linting" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | syn.workspace = true 8 | quote.workspace = true 9 | proc-macro2.workspace = true 10 | 11 | [lints] 12 | workspace = true 13 | -------------------------------------------------------------------------------- /support/linting/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod lint; 2 | pub use lint::*; 3 | 4 | mod require_freeze_struct; 5 | 6 | pub use require_freeze_struct::RequireFreezeStruct; 7 | -------------------------------------------------------------------------------- /support/linting/src/lint.rs: -------------------------------------------------------------------------------- 1 | use syn::File; 2 | 3 | pub type Result = core::result::Result<(), Vec>; 4 | 5 | /// A trait that defines custom lints that can be run within our workspace. 6 | /// 7 | /// Each lint is run in parallel on all Rust source files in the workspace. Within a lint you 8 | /// can issue an error the same way you would in a proc macro, and otherwise return `Ok(())` if 9 | /// there are no errors. 10 | pub trait Lint: Send + Sync { 11 | /// Lints the given Rust source file, returning a compile error if any issues are found. 12 | fn lint(source: &File) -> Result; 13 | } 14 | -------------------------------------------------------------------------------- /support/linting/src/require_freeze_struct.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use syn::{ 3 | parse_quote, punctuated::Punctuated, visit::Visit, Attribute, File, ItemStruct, Meta, MetaList, 4 | Path, Token, 5 | }; 6 | 7 | pub struct RequireFreezeStruct; 8 | 9 | impl Lint for RequireFreezeStruct { 10 | fn lint(source: &File) -> Result { 11 | let mut visitor = EncodeDecodeVisitor::default(); 12 | 13 | visitor.visit_file(source); 14 | 15 | if !visitor.errors.is_empty() { 16 | return Err(visitor.errors); 17 | } 18 | 19 | Ok(()) 20 | } 21 | } 22 | 23 | #[derive(Default)] 24 | struct EncodeDecodeVisitor { 25 | errors: Vec, 26 | } 27 | 28 | impl<'ast> Visit<'ast> for EncodeDecodeVisitor { 29 | fn visit_item_struct(&mut self, node: &'ast ItemStruct) { 30 | let has_encode_decode = node.attrs.iter().any(is_derive_encode_or_decode); 31 | let has_freeze_struct = node.attrs.iter().any(is_freeze_struct); 32 | 33 | if has_encode_decode && !has_freeze_struct { 34 | self.errors.push(syn::Error::new( 35 | node.ident.span(), 36 | "Struct with Encode/Decode derive must also have #[freeze_struct(..)] attribute.", 37 | )); 38 | } 39 | 40 | syn::visit::visit_item_struct(self, node); 41 | } 42 | } 43 | 44 | fn is_freeze_struct(attr: &Attribute) -> bool { 45 | if let Meta::List(meta_list) = &attr.meta { 46 | let Some(seg) = meta_list.path.segments.last() else { 47 | return false; 48 | }; 49 | if seg.ident == "freeze_struct" && !meta_list.tokens.is_empty() { 50 | return true; 51 | } 52 | } 53 | false 54 | } 55 | 56 | fn is_derive_encode_or_decode(attr: &Attribute) -> bool { 57 | if let Meta::List(MetaList { path, tokens, .. }) = &attr.meta { 58 | if path.is_ident("derive") { 59 | let nested: Punctuated = parse_quote!(#tokens); 60 | return nested.iter().any(|nested| { 61 | nested.segments.iter().any(|seg| seg.ident == "Encode") 62 | || nested.segments.iter().any(|seg| seg.ident == "Decode") 63 | }); 64 | } 65 | } 66 | false 67 | } 68 | 69 | #[cfg(test)] 70 | mod tests { 71 | use super::*; 72 | 73 | fn lint_struct(input: &str) -> Result { 74 | let item_struct: ItemStruct = syn::parse_str(input).expect("should only use on a struct"); 75 | let mut visitor = EncodeDecodeVisitor::default(); 76 | visitor.visit_item_struct(&item_struct); 77 | if !visitor.errors.is_empty() { 78 | return Err(visitor.errors); 79 | } 80 | Ok(()) 81 | } 82 | 83 | #[test] 84 | fn test_no_attributes() { 85 | let input = r#" 86 | pub struct Test { 87 | field: u32, 88 | } 89 | "#; 90 | assert!(lint_struct(input).is_ok()); 91 | } 92 | 93 | #[test] 94 | fn test_freeze_struct_only() { 95 | let input = r#" 96 | #[freeze_struct("12345")] 97 | pub struct Test { 98 | field: u32, 99 | } 100 | "#; 101 | assert!(lint_struct(input).is_ok()); 102 | } 103 | 104 | #[test] 105 | fn test_encode_only() { 106 | let input = r#" 107 | #[derive(Encode)] 108 | pub struct Test { 109 | field: u32, 110 | } 111 | "#; 112 | assert!(lint_struct(input).is_err()); 113 | } 114 | 115 | #[test] 116 | fn test_decode_only() { 117 | let input = r#" 118 | #[derive(Decode)] 119 | pub struct Test { 120 | field: u32, 121 | } 122 | "#; 123 | assert!(lint_struct(input).is_err()); 124 | } 125 | 126 | #[test] 127 | fn test_encode_and_freeze_struct() { 128 | let input = r#" 129 | #[freeze_struct("12345")] 130 | #[derive(Encode)] 131 | pub struct Test { 132 | field: u32, 133 | } 134 | "#; 135 | assert!(lint_struct(input).is_ok()); 136 | } 137 | 138 | #[test] 139 | fn test_decode_and_freeze_struct() { 140 | let input = r#" 141 | #[freeze_struct("12345")] 142 | #[derive(Decode)] 143 | pub struct Test { 144 | field: u32, 145 | } 146 | "#; 147 | assert!(lint_struct(input).is_ok()); 148 | } 149 | 150 | #[test] 151 | fn test_encode_decode_without_freeze_struct() { 152 | let input = r#" 153 | #[derive(Encode, Decode)] 154 | pub struct Test { 155 | field: u32, 156 | } 157 | "#; 158 | assert!(lint_struct(input).is_err()); 159 | } 160 | 161 | #[test] 162 | fn test_encode_decode_with_freeze_struct() { 163 | let input = r#" 164 | #[freeze_struct("12345")] 165 | #[derive(Encode, Decode)] 166 | pub struct Test { 167 | field: u32, 168 | } 169 | "#; 170 | assert!(lint_struct(input).is_ok()); 171 | } 172 | 173 | #[test] 174 | fn test_temporary_freeze_struct() { 175 | let input = r#" 176 | #[freeze_struct] 177 | #[derive(Encode, Decode)] 178 | pub struct Test { 179 | field: u32, 180 | } 181 | "#; 182 | assert!(lint_struct(input).is_err()); 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /support/macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "subtensor-macros" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | description = "support macros for Subtensor" 8 | repository = "https://github.com/opentensor/subtensor" 9 | homepage = "https://bittensor.com/" 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [dependencies] 15 | syn.workspace = true 16 | proc-macro2.workspace = true 17 | quote.workspace = true 18 | ahash = "0.8" 19 | 20 | [lints] 21 | workspace = true 22 | -------------------------------------------------------------------------------- /support/macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | use proc_macro2::TokenStream as TokenStream2; 3 | use quote::ToTokens; 4 | use syn::{parse2, visit_mut::visit_item_struct_mut, Error, ItemStruct, LitStr, Result}; 5 | 6 | mod visitor; 7 | use visitor::*; 8 | 9 | /// Freezes the layout of a struct to the current hash of its fields, ensuring that future 10 | /// changes require updating the hash. 11 | /// 12 | /// ``` 13 | /// use subtensor_macros::freeze_struct; 14 | /// 15 | /// #[freeze_struct("13f75e4ea46b4e80")] 16 | /// #[derive(Copy, Clone, PartialEq, Eq)] 17 | /// pub struct MyStruct { 18 | /// pub a: u32, 19 | /// pub b: u64, 20 | /// } 21 | /// ``` 22 | #[proc_macro_attribute] 23 | pub fn freeze_struct(attr: TokenStream, tokens: TokenStream) -> TokenStream { 24 | match freeze_struct_impl(attr, tokens) { 25 | Ok(item_struct) => item_struct.to_token_stream().into(), 26 | Err(err) => err.to_compile_error().into(), 27 | } 28 | } 29 | 30 | fn freeze_struct_impl( 31 | attr: impl Into, 32 | tokens: impl Into, 33 | ) -> Result { 34 | let attr = attr.into(); 35 | let tokens = tokens.into(); 36 | 37 | let item = parse2::(tokens)?; 38 | let mut item_clone = item.clone(); 39 | 40 | let mut visitor = CleanDocComments::new(); 41 | visit_item_struct_mut(&mut visitor, &mut item_clone); 42 | 43 | let calculated_hash = generate_hash(&item_clone); 44 | let calculated_hash_hex = format!("{:x}", calculated_hash); 45 | 46 | if attr.is_empty() { 47 | return Err(Error::new_spanned(item, 48 | format!("You must provide a hashcode in the `freeze_struct` attribute to freeze this struct.\n\n\ 49 | expected hashcode: `#[freeze_struct(\"{calculated_hash_hex}\")]`"), 50 | )); 51 | } 52 | 53 | let parsed_attr = parse2::(attr)?; 54 | let provided_hash_hex = parsed_attr.value().to_lowercase(); 55 | 56 | if provided_hash_hex != calculated_hash_hex { 57 | return Err(Error::new_spanned(item, 58 | format!( 59 | "You have made a non-trivial change to this struct and the provided hashcode no longer matches:\n{} != {}\n\n\ 60 | If this was intentional, please update the hashcode in the `freeze_struct` attribute to:\n\ 61 | {}\n\nNote that if you are changing a storage struct in any way, including simply re-ordering fields, \ 62 | you will need a migration to prevent data corruption.", 63 | provided_hash_hex, calculated_hash_hex, calculated_hash_hex 64 | ), 65 | )); 66 | } 67 | Ok(item) 68 | } 69 | -------------------------------------------------------------------------------- /support/macros/src/visitor.rs: -------------------------------------------------------------------------------- 1 | use ahash::RandomState; 2 | use syn::{parse_quote, visit_mut::VisitMut}; 3 | 4 | pub struct CleanDocComments; 5 | 6 | impl CleanDocComments { 7 | pub fn new() -> Self { 8 | Self 9 | } 10 | } 11 | 12 | impl VisitMut for CleanDocComments { 13 | fn visit_attribute_mut(&mut self, attr: &mut syn::Attribute) { 14 | if attr.path().is_ident("doc") { 15 | *attr = parse_quote!(#[doc = ""]); 16 | } 17 | if attr.path().is_ident("freeze_struct") { 18 | *attr = parse_quote!(#[freeze_struct]); 19 | } 20 | syn::visit_mut::visit_attribute_mut(self, attr); 21 | } 22 | } 23 | 24 | pub fn generate_hash + Clone>(item: &T) -> u64 { 25 | let item = item.clone(); 26 | 27 | // Define a fixed seed 28 | const SEED1: u64 = 0x12345678; 29 | const SEED2: u64 = 0x87654321; 30 | 31 | // use a fixed seed for predictable hashes 32 | let fixed_state = RandomState::with_seeds(SEED1, SEED2, SEED1, SEED2); 33 | 34 | // hash item 35 | let item = Into::::into(item); 36 | fixed_state.hash_one(&item) 37 | } 38 | 39 | #[cfg(test)] 40 | mod tests { 41 | use super::*; 42 | use syn::Item; 43 | 44 | #[test] 45 | fn test_clean_doc_comments() { 46 | // Example code with doc comments 47 | let item: Item = parse_quote! { 48 | /// This is a doc comment 49 | #[cfg(feature = "example")] 50 | fn example() { 51 | println!("Hello, world!"); 52 | } 53 | }; 54 | 55 | let hash_before = generate_hash(&item); 56 | 57 | let mut item_clone = item.clone(); 58 | let mut cleaner = CleanDocComments; 59 | cleaner.visit_item_mut(&mut item_clone); 60 | 61 | // Calculate the hash of the cleaned item 62 | let hash_after = generate_hash(&item_clone); 63 | 64 | assert_ne!(hash_before, hash_after); 65 | 66 | let item2: Item = parse_quote! { 67 | #[doc = ""] 68 | #[cfg(feature = "example")] 69 | fn example() { 70 | println!("Hello, world!"); 71 | } 72 | }; 73 | 74 | assert_eq!(hash_after, generate_hash(&item2)); 75 | } 76 | 77 | #[test] 78 | fn test_clean_doc_comments_struct() { 79 | // Example code with doc comments in a struct 80 | let item: Item = parse_quote! { 81 | /// Another doc comment 82 | struct MyStruct { 83 | #[cfg(feature = "field")] 84 | field1: i32, 85 | /// Field doc comment 86 | field2: String, 87 | } 88 | }; 89 | 90 | let hash_before = generate_hash(&item); 91 | 92 | let mut item_clone = item.clone(); 93 | let mut cleaner = CleanDocComments; 94 | cleaner.visit_item_mut(&mut item_clone); 95 | 96 | // Calculate the hash of the cleaned item 97 | let hash_after = generate_hash(&item_clone); 98 | 99 | assert_ne!(hash_before, hash_after); 100 | 101 | let item2: Item = parse_quote! { 102 | #[doc = ""] 103 | struct MyStruct { 104 | #[cfg(feature = "field")] 105 | field1: i32, 106 | #[doc = ""] 107 | field2: String, 108 | } 109 | }; 110 | 111 | assert_eq!(hash_after, generate_hash(&item2)); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /support/macros/tests/tests.rs: -------------------------------------------------------------------------------- 1 | use subtensor_macros::freeze_struct; 2 | 3 | #[freeze_struct("ecdcaac0f6da589a")] 4 | pub struct MyStruct { 5 | pub ip: u32, 6 | pub port: u32, 7 | } 8 | -------------------------------------------------------------------------------- /support/tools/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "subtensor-tools" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | description = "support tools for Subtensor" 8 | repository = "https://github.com/opentensor/subtensor" 9 | homepage = "https://bittensor.com" 10 | 11 | [[bin]] 12 | name = "bump-version" 13 | path = "src/bump_version.rs" 14 | 15 | [dependencies] 16 | anyhow = "1.0" 17 | clap = { version = "4.5", features = ["derive"] } 18 | semver = "1.0" 19 | toml_edit = "0.22" 20 | -------------------------------------------------------------------------------- /support/tools/src/bump_version.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use semver::Version; 3 | use std::{ 4 | fs, 5 | io::{Read, Seek, Write}, 6 | str::FromStr, 7 | }; 8 | use toml_edit::{DocumentMut, Item, Value}; 9 | 10 | const TOML_PATHS: [&str; 9] = [ 11 | "support/macros", 12 | "pallets/commitments", 13 | "pallets/collective", 14 | "pallets/registry", 15 | "pallets/subtensor", 16 | "pallets/subtensor/runtime-api", 17 | "pallets/admin-utils", 18 | "runtime", 19 | "node", 20 | ]; 21 | 22 | #[derive(Parser)] 23 | struct CliArgs { 24 | #[arg(required = true)] 25 | version: Version, 26 | } 27 | 28 | fn main() -> anyhow::Result<()> { 29 | let args = CliArgs::parse(); 30 | let version = args.version; 31 | 32 | for path in TOML_PATHS { 33 | let cargo_toml_path = format!("{path}/Cargo.toml"); 34 | let mut toml_file = fs::File::options() 35 | .read(true) 36 | .write(true) 37 | .open(&cargo_toml_path)?; 38 | let mut toml_str = String::new(); 39 | toml_file.read_to_string(&mut toml_str)?; 40 | let mut modified_toml_doc = DocumentMut::from_str(&toml_str)?; 41 | 42 | modified_toml_doc["package"]["version"] = Item::Value(Value::from(version.to_string())); 43 | toml_file.set_len(0)?; 44 | toml_file.rewind()?; 45 | toml_file.write_all(modified_toml_doc.to_string().as_bytes())?; 46 | } 47 | 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /zepter.yaml: -------------------------------------------------------------------------------- 1 | version: 2 | format: 1 3 | # Minimum version of the binary that is expected to work. This is just for printing a nice error 4 | # message when someone tries to use an older version. 5 | binary: 0.13.2 6 | 7 | # The examples in this file assume crate `A` to have a dependency on crate `B`. 8 | workflows: 9 | check: 10 | - [ 11 | "lint", 12 | # Check that `A` activates the features of `B`. 13 | "propagate-feature", 14 | # These are the features to check: 15 | "--features=try-runtime,runtime-benchmarks,std", 16 | # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. 17 | "--left-side-feature-missing=ignore", 18 | # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. 19 | "--left-side-outside-workspace=ignore", 20 | # Some features imply that they activate a specific dependency as non-optional. Otherwise the default behaviour with a `?` is used. 21 | "--feature-enables-dep=try-runtime:frame-try-runtime,runtime-benchmarks:frame-benchmarking", # Auxillary flags: 22 | "--offline", 23 | "--locked", 24 | "--show-path", 25 | "--quiet", 26 | ] 27 | # Same as `check`, but with the `--fix` flag. 28 | default: 29 | - [$check.0, "--fix"] 30 | 31 | # Will be displayed when any workflow fails: 32 | help: 33 | text: | 34 | Polkadot-SDK uses the Zepter CLI to detect abnormalities in the feature configuration. 35 | It looks like one more more checks failed; please check the console output. You can try to automatically address them by running `zepter`. 36 | Otherwise please ask directly in the Merge Request, GitHub Discussions or on Matrix Chat, thank you. 37 | links: 38 | - "https://github.com/paritytech/polkadot-sdk/issues/1831" 39 | - "https://github.com/ggwpez/zepter" 40 | --------------------------------------------------------------------------------