├── .github └── workflows │ ├── build.yml │ └── docs.yml ├── .gitignore ├── .rustfmt.toml ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── nimbus-consensus ├── Cargo.toml └── src │ ├── import_queue.rs │ ├── lib.rs │ └── manual_seal.rs ├── nimbus-primitives ├── Cargo.toml └── src │ ├── digests.rs │ ├── inherents.rs │ └── lib.rs ├── pallets ├── aura-style-filter │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── author-inherent │ ├── Cargo.toml │ └── src │ │ ├── benchmarks.rs │ │ ├── exec.rs │ │ ├── lib.rs │ │ ├── mock.rs │ │ ├── tests.rs │ │ └── weights.rs └── author-slot-filter │ ├── Cargo.toml │ └── src │ ├── benchmarks.rs │ ├── lib.rs │ ├── migration.rs │ ├── mock.rs │ ├── num.rs │ ├── tests.rs │ └── weights.rs ├── parachain-template ├── LICENSE ├── README.md ├── node │ ├── Cargo.toml │ ├── build.rs │ └── src │ │ ├── chain_spec.rs │ │ ├── cli.rs │ │ ├── command.rs │ │ ├── main.rs │ │ ├── rpc.rs │ │ └── service.rs ├── pallets │ └── template │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src │ │ ├── benchmarking.rs │ │ ├── lib.rs │ │ ├── mock.rs │ │ └── tests.rs ├── polkadot-launch │ └── config.json └── runtime │ ├── Cargo.toml │ ├── build.rs │ └── src │ ├── lib.rs │ └── pallet_account_set.rs ├── rust-toolchain ├── scripts ├── check-cargo-toml-files-format.sh └── toml-sort.sh └── toml-sort.toml /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # This CI Tries to be both simple and effective. It is inspired by: 2 | # https://github.com/marketplace/actions/rust-cache 3 | # https://github.com/actions-rs/toolchain/issues/126#issuecomment-782989659 4 | # https://github.com/actions-rs/example/blob/master/.github/workflows/quickstart.yml 5 | 6 | name: Rust Checks 7 | 8 | on: 9 | pull_request: 10 | push: 11 | branches: 12 | - main 13 | 14 | jobs: 15 | check-cargo-toml-format: 16 | name: "Check Cargo.toml files format" 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v2 21 | # With rustup's nice new toml format, we just need to run rustup show to install the toolchain 22 | # https://github.com/actions-rs/toolchain/issues/126#issuecomment-782989659 23 | - name: Setup Rust toolchain 24 | run: rustup show 25 | - name: Check Cargo.toml files format with toml_sort 26 | run: chmod u+x ./scripts/check-cargo-toml-files-format.sh && ./scripts/check-cargo-toml-files-format.sh 27 | 28 | check-rust-fmt: 29 | name: Check rustfmt 30 | runs-on: ubuntu-latest 31 | steps: 32 | - name: Checkout 33 | uses: actions/checkout@v2 34 | 35 | - name: Setup rust toolchain 36 | run: rustup show 37 | 38 | - name: Run cargo fmt check 39 | uses: actions-rs/cargo@v1 40 | with: 41 | command: fmt 42 | args: -- --check 43 | 44 | cargo-check: 45 | name: Cargo check 46 | runs-on: ubuntu-latest 47 | steps: 48 | - name: Install tooling 49 | run: | 50 | sudo apt-get install -y protobuf-compiler 51 | protoc --version 52 | - name: Checkout sources 53 | uses: actions/checkout@v2 54 | 55 | - name: Setup rust toolchain 56 | run: rustup show 57 | 58 | - name: Rust Cache 59 | uses: Swatinem/rust-cache@v1 60 | 61 | - name: Run cargo check 62 | uses: actions-rs/cargo@v1 63 | with: 64 | command: check 65 | 66 | - name: Run cargo test 67 | uses: actions-rs/cargo@v1 68 | with: 69 | command: test 70 | 71 | check-copyright: 72 | runs-on: ubuntu-latest 73 | steps: 74 | - name: Checkout 75 | uses: actions/checkout@v2 76 | 77 | - name: Find un-copyrighted files 78 | run: | 79 | find . -name '*.rs' -not -path "*/parachain-template/*" -exec grep -H -E -o -c Copyright {} \; | grep ':0' || true 80 | FILECOUNT=$(find . -name '*.rs' -not -path "*/parachain-template/*" -exec grep -H -E -o -c 'Copyright' {} \; | grep -c ':0' || true) 81 | if [[ $FILECOUNT -eq 0 ]]; then 82 | true 83 | else 84 | false 85 | fi 86 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Publish Rust Docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | deploy-docs: 10 | name: Deploy docs 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout repository 15 | uses: actions/checkout@v1 16 | 17 | # With rustup's nice new toml format, we just need to run rustup show to install the toolchain 18 | # https://github.com/actions-rs/toolchain/issues/126#issuecomment-782989659 19 | - name: Setup Rust toolchain 20 | run: rustup show 21 | 22 | #TODO consider using the rust Cache action like tin the rust CI 23 | - uses: actions/cache@v2 24 | with: 25 | path: | 26 | ~/.cargo/registry 27 | ~/.cargo/git 28 | target 29 | key: ${{ runner.os }}-cargo-doc-${{ hashFiles('**/Cargo.lock') }} 30 | 31 | - name: Build rustdocs 32 | uses: actions-rs/cargo@v1 33 | env: 34 | CARGO_INCREMENTAL: "0" 35 | with: 36 | command: doc 37 | args: --all --no-deps 38 | 39 | # Make an index.html file so we start at the nimbus consensus worker 40 | # Copied from https://github.com/substrate-developer-hub/rustdocs/blob/master/index.html 41 | - name: Make index.html 42 | run: echo "" > ./target/doc/index.html 43 | 44 | - name: Deploy documentation 45 | uses: peaceiris/actions-gh-pages@v3 46 | with: 47 | github_token: ${{ secrets.GITHUB_TOKEN }} 48 | publish_branch: gh-pages 49 | publish_dir: ./target/doc 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | # These formatting rules to try conform the Substrate style guidelines: 2 | # > https://wiki.parity.io/Substrate-Style-Guide 3 | 4 | reorder_imports = true 5 | hard_tabs = true 6 | max_width = 100 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "nimbus-consensus", 4 | "nimbus-primitives", 5 | "pallets/aura-style-filter", 6 | "pallets/author-inherent", 7 | "pallets/author-slot-filter", 8 | "parachain-template/node", 9 | "parachain-template/runtime", 10 | ] 11 | 12 | [profile.release] 13 | panic = "unwind" 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## ⚠️ Nimbus has been migrated to [Moonkit](https://github.com/Moonsong-Labs/moonkit) 2 | 3 | # Cumulo -- Nimbus ⛈️ 4 | 5 | Nimbus is a framework for building parachain consensus systems on [cumulus](https://github.com/paritytech/cumulus)-based parachains. 6 | 7 | Given the regular six-second pulse-like nature of the relay chain, it is natural to think about slot- 8 | based consensus algorithms for parachains. The parachain network is responsible for liveness and 9 | decentralization and the relay chain is responsible for finality. There is a rich design space for such 10 | algorithms, yet some tasks are common to all (or most) of them. These common tasks include: 11 | 12 | * Signing and signature checking blocks 13 | * Injecting authorship information into the parachain 14 | * Block authorship and import accounting 15 | * Filtering a large (potentially unbounded) set of potential authors to a smaller (but still potentially unbounded) set. 16 | * Detecting when it is your turn to author an skipping other slots 17 | 18 | Nimbus aims to provide standard implementations for the logistical parts of such consensus engines, 19 | along with helpful traits for implementing the parts that researchers and developers want to customize. 20 | 21 | ## Try the Demo 22 | 23 | While Nimbus is primarily a development framework meant to be included in other projects, it is useful 24 | to see a basic network in action. An example network is included in the `parachain-template` example collator. You 25 | can build it with `cargo build --release` and launch it like any other cumulus parachain. 26 | Make sure to specify `--chain nimbus`. 27 | 28 | Rather than reiterate how to start a relay-para network here, I'll simply recommend you use the 29 | excellent [Polkadot Launch](https://github.com/paritytech/polkadot-launch) tool. This repo was tested with version 1.4.1. 30 | A [lauch config file](./parachain-template/polkadot-launch/config.json) is provided. 31 | 32 | ```bash 33 | # Install polkadot launch (I used v1.4.1) 34 | npm i -g polkadot-launch 35 | 36 | # Build polkadot (I used 82aa404c; check Cargo.lock to be sure) 37 | cd polkadot 38 | cargo build --release 39 | cd .. 40 | 41 | # Build Polkadot-parachains example collator 42 | cd cumulus 43 | git checkout nimbus 44 | cargo build --release 45 | 46 | # Launch the multi-chain 47 | polkdot-launch ./parachain-template/polkadot-launch/config.json 48 | ``` 49 | 50 | To learn more about launching relay-para networks, check out the [cumulus workshop](https://substrate.dev/cumulus-workshop). 51 | 52 | ## Design Overview 53 | 54 | If you want to start using Nimbus in your project, it is worth reading this. 55 | 56 | At its core nimbus is a consensus engine that considers blocks valid if and only if they inject the author's public identity into the runtime, _and_ seal the block with a signature 57 | by the author's private key. 58 | 59 | Compared to most consensus engines, this is _very_ permissive -- anyone who can create a signature can author valid blocks. In order to build more useful and familiar consensus engine on this foundation, nimbus provides a framework for creating filters to further restrict the set of eligible authors. These filters live inside the runtime. 60 | 61 | Being general in the consensus layer and deferring most checks to the runtime is the key 62 | to nimbus's re-usability as a framework. And is the reason that *writing a consensus engine is as easy as writing a pallet* when you use nimbus. 63 | 64 | ### Author Inherent 65 | 66 | The Author inherent pallet allows block authors to insert their identity into 67 | the runtime. This feature alone is useful in many blockchains and can be used for things like block rewards. 68 | 69 | The author inherent provides a validation hook called `CanAuthor`. This check will be called during the inherent execution and is the main entry point to nimbus's author filters. 70 | If you don't want to restrict authorship at all, you can just use `()`. 71 | 72 | As a concrete example, in a simple Proof of Stake system this check will determine 73 | whether the author is staked. In a more realistic PoS system the `CanAuthor` check might 74 | first make sure the author is staked, and then make sure they are eligible in _this slot_ according to round robin rules. 75 | 76 | Finally, the pallet copies the authorship information into a consensus digest that will stick around 77 | in the block header. This digest can be used by UIs to display the author, and also by the consensus 78 | engine to verify the block authorship. 79 | 80 | **PreRuntimeDigest** 81 | I believe the design should be changed slightly to use a preruntime digest rather than an inherent for a few reasons: 82 | 83 | * The data wouldn't be duplicated between an inherent and a digest. 84 | * Nimbus client-side worker would support non-frame runtimes. 85 | * That's how sc-consensus-aura does it. 86 | 87 | ### Author Filters 88 | 89 | A primary job of a consensus engine is deciding who can author each block. Some may have a static set, others 90 | may rotate the set each era, others may elect an always-changing subset of all potential authors. There 91 | is much space for creativity, research, and design, and Nimbus strives to provide a flexible interface 92 | for this creative work. You can express all the interesting parts of your 93 | consensus engine simply by creating filters that implement the `CanAuthor` trait. The rest of Nimbus will #JustWork for you. 94 | 95 | This repository comes with a few example filters already, and additional examples are welcome. The examples are: 96 | * PseudoRandom FixedSized Subset - This filter takes a finite set (eg a staked set) and filters it down to a pseudo-random 97 | subset at each height. The eligible count is configurable in the pallet. This is a good learning example. 98 | * Aura - The authority round consensus engine is popular in the Substrate ecosystem because it was one 99 | of the first (and simplest!) engines implemented in Substrate. Aura can be expressed in the Nimbus 100 | filter framework and is included as an example filter. If you are considering using aura, that crate 101 | has good documentation on how it differs from `sc-consensus-aura`. 102 | * (Planned) FixedSizedSubset - The author submits a VRF output that has to be below a threshold to be able to author. 103 | * (Planed) Filter Combinator - A filter that wraps two other filters. It uses one in even slots and the other in odd slots. 104 | 105 | ### Author Filter Runtime API 106 | 107 | Nimbus makes the design choice to include the author checking logic in the runtime. This is in contrast to the existing implementations of Aura and Babe where the authorship checks are offchain. 108 | 109 | While moving the check in-runtime, provides a lot of flexibility, and simplifies interfacing with relay-chain validators, it makes it impossible 110 | for authoring nodes to predict whether they will be eligible without calling into the runtime. 111 | To achieve this, we provide a runtime API that makes the minimal calculation necessary to determine 112 | whether a specified author will be eligible at the specified slot. 113 | 114 | ### Nimbus Consensus Worker 115 | 116 | Nimbus consensus is the primary client-side consensus worker. It implements the `ParachainConsensus` 117 | trait introduced to cumulus in https://github.com/paritytech/cumulus/pull/329. It is not likely that 118 | you will need to change this code directly to implement your engine as it is entirely abstracted over 119 | the filters you use. The consensus engine performs these tasks: 120 | 121 | * Slot prediction - it calls the runtime API mentioned previously to determine whether ti is eligible. If not, it returns early. 122 | * Authorship - It calls into a standard Substrate proposer to construct a block (probably including the author inherent). 123 | * Self import - it imports the block that the proposer created (called the pre-block) into the node's local database. 124 | * Sealing - It adds a seal digest to the block - This is what is used by other nodes to verify the authorship information. 125 | 126 | ### Verifier and Import Queue 127 | 128 | For a parachain node to import a sealed block authored by one of its peers, it needs to first check that the signature is valid by the author that was injected into the runtime. This is the job of the verifier. It 129 | will remove the nimbus seal and check it against the nimbus consensus digest from the runtime. If that process fails, 130 | the block is immediately thrown away before the expensive execution even begins. If it succeeds, then 131 | the pre-block (the part that's left after the seal is stripped) is passed into the 132 | [import pipeline](https://substrate.dev/docs/en/knowledgebase/advanced/block-import) for processing 133 | and execution. Finally, the locally produced result is compared to the result received across the network. 134 | 135 | ### Custom Block Executor 136 | 137 | We've already discussed how parachain nodes (both the one that authors a block, and also its peers) 138 | import blocks. In a standalone blockchain, that's the end of the story. But for a parachain, we also 139 | need our relay chain validators to re-execute and validate the parachain block. Validators do this in 140 | a unique way, and entirely in wasm. Providing the `validate_block` function that the validators use 141 | is the job of the `register_validate_block!` macro from Cumulus. 142 | 143 | Typically a cumulus runtime invokes that macro like this: 144 | ```rust 145 | cumulus_pallet_parachain_system::register_validate_block!(Runtime, Executive); 146 | ``` 147 | 148 | You can see that the validators use the exact same executive that the parachain nodes do. Now that 149 | we have sealed blocks, that must change. The validators need to strip and verify the seal, and re-execute 150 | the pre-block just like the parachain nodes did. And without access to an offchain verifier, they must 151 | do this all in the runtime. For that purpose, we provide and alternate executive which wraps the normal 152 | FRAME executive. The wrapper strips and checks the seal, just like the verifier did, and then passes the pre-block to the inner FRAME executive for re-execution. 153 | 154 | ## Write Your Own Consensus Logic 155 | 156 | If you have an idea for a new slot-based parachain consensus algorithm, Nimbus is a quick way to get 157 | it working! The fastest way to start hacking is to fork this repo and customize the template node. 158 | 159 | If you'd rather dive in than read one more sentence, then **start hacking in the `author-slot-filter` 160 | pallet.** 161 | 162 | In most cases, you can use all the off-the-shelf components and simply write your filters. It is also 163 | possible to compose existing filters to build more complex logic from smaller pieces. 164 | 165 | ## Authoring and Import Diagrams 166 | 167 | One node authors the block, then it is processed in three different ways. 168 | 169 | | | Author | Parachain Peer | Relay Validator | 170 | | ------------------- | ------ | -------------- | --------- | 171 | | Predict Eligibility | ✅ | ❌ | ❌ | 172 | | Author Block | ✅ | ❌ | ❌ | 173 | | Runs Verifier | ❌ | ✅ | ❌ | 174 | | Import Pipeline | ✅ | ✅ | ❌ | 175 | | Custom Pre exec | ❌ | ❌ | ✅ | 176 | | Normal FRAME exec | ✅ | ✅ | ✅ | 177 | 178 | ## Roadmap 179 | 180 | The Nimbus framework is intended to be loosely coupled with Cumulus. 181 | 182 | ### Next tasks 183 | * Proper trait for interacting with digests 184 | * More example filters 185 | * Share code between verifier and wrapper executive 186 | * Client-side worker for standalone (non para) blockchain 187 | * Aurand as an example of composing filters 188 | * Second filter trait for exhaustive sets (As opposed to current propositional approach) 189 | 190 | ## Contributions Welcome 191 | 192 | Try it out, open issues, submit PRs, review code. Whether you like to tinker with a running node, or 193 | analyze security from an academic perspective, your contributions are welcome. 194 | 195 | I am happy to support users who want to use nimbus, or want feedback on their consensus engines. 196 | -------------------------------------------------------------------------------- /nimbus-consensus/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nimbus-consensus" 3 | description = "Client-side worker for the Nimbus family of consensus algorithms" 4 | edition = "2021" 5 | version = "0.9.0" 6 | [dependencies] 7 | # Substrate deps 8 | sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 9 | sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 10 | sc-consensus-manual-seal = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 11 | sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 12 | sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 13 | sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 14 | sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 15 | sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 16 | sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 17 | sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 18 | sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 19 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 20 | substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 21 | 22 | # Cumulus dependencies 23 | cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 24 | cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 25 | cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 26 | 27 | # Nimbus Dependencies 28 | nimbus-primitives = { path = "../nimbus-primitives" } 29 | 30 | # Other deps 31 | async-trait = "0.1" 32 | codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } 33 | futures = { version = "0.3.24", features = [ "compat" ] } 34 | log = "0.4.17" 35 | parking_lot = "0.12" 36 | tracing = "0.1.22" 37 | -------------------------------------------------------------------------------- /nimbus-consensus/src/import_queue.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use std::{marker::PhantomData, sync::Arc}; 18 | 19 | use log::debug; 20 | use nimbus_primitives::{digests::CompatibleDigestItem, NimbusId, NimbusPair, NIMBUS_ENGINE_ID}; 21 | use sc_consensus::{ 22 | import_queue::{BasicQueue, Verifier as VerifierT}, 23 | BlockImport, BlockImportParams, 24 | }; 25 | use sp_api::ProvideRuntimeApi; 26 | use sp_application_crypto::{ByteArray, Pair as _}; 27 | use sp_block_builder::BlockBuilder as BlockBuilderApi; 28 | use sp_blockchain::Result as ClientResult; 29 | use sp_consensus::error::Error as ConsensusError; 30 | use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; 31 | use sp_runtime::{ 32 | traits::{Block as BlockT, Header as HeaderT}, 33 | DigestItem, 34 | }; 35 | 36 | /// The Nimbus verifier strips the seal digest, and checks that it is a valid signature by 37 | /// the same key that was injected into the runtime and noted in the Seal digest. 38 | /// From Nimbu's perspective any block that faithfully reports its authorship to the runtime 39 | /// is valid. The intention is that the runtime itself may then put further restrictions on 40 | /// the identity of the author. 41 | struct Verifier { 42 | client: Arc, 43 | create_inherent_data_providers: CIDP, 44 | _marker: PhantomData, 45 | } 46 | 47 | #[async_trait::async_trait] 48 | impl VerifierT for Verifier 49 | where 50 | Block: BlockT, 51 | Client: ProvideRuntimeApi + Send + Sync, 52 | >::Api: BlockBuilderApi, 53 | CIDP: CreateInherentDataProviders, 54 | { 55 | async fn verify( 56 | &mut self, 57 | mut block_params: BlockImportParams, 58 | ) -> Result, String> { 59 | // Skip checks that include execution, if being told so or when importing only state. 60 | // 61 | // This is done for example when gap syncing and it is expected that the block after the gap 62 | // was checked/chosen properly, e.g. by warp syncing to this block using a finality proof. 63 | // Or when we are importing state only and can not verify the seal. 64 | if block_params.with_state() || block_params.state_action.skip_execution_checks() { 65 | // When we are importing only the state of a block, it will be the best block. 66 | block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( 67 | block_params.with_state(), 68 | )); 69 | 70 | return Ok(block_params); 71 | } 72 | 73 | debug!( 74 | target: crate::LOG_TARGET, 75 | "🪲 Header hash before popping digest {:?}", 76 | block_params.header.hash() 77 | ); 78 | // Grab the seal digest. Assume it is last (since it is a seal after-all). 79 | let seal = block_params 80 | .header 81 | .digest_mut() 82 | .pop() 83 | .ok_or("Block should have at least one digest on it")?; 84 | 85 | let signature = seal 86 | .as_nimbus_seal() 87 | .ok_or_else(|| String::from("HeaderUnsealed"))?; 88 | 89 | debug!( 90 | target: crate::LOG_TARGET, 91 | "🪲 Header hash after popping digest {:?}", 92 | block_params.header.hash() 93 | ); 94 | 95 | debug!( 96 | target: crate::LOG_TARGET, 97 | "🪲 Signature according to verifier is {:?}", signature 98 | ); 99 | 100 | // Grab the author information from either the preruntime digest or the consensus digest 101 | //TODO use the trait 102 | let claimed_author = block_params 103 | .header 104 | .digest() 105 | .logs 106 | .iter() 107 | .find_map(|digest| match *digest { 108 | DigestItem::Consensus(id, ref author_id) if id == NIMBUS_ENGINE_ID => { 109 | Some(author_id.clone()) 110 | } 111 | DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => { 112 | Some(author_id.clone()) 113 | } 114 | _ => None, 115 | }) 116 | .ok_or("Expected one consensus or pre-runtime digest that contains author id bytes")?; 117 | 118 | debug!( 119 | target: crate::LOG_TARGET, 120 | "🪲 Claimed Author according to verifier is {:?}", claimed_author 121 | ); 122 | 123 | // Verify the signature 124 | let valid_signature = NimbusPair::verify( 125 | &signature, 126 | block_params.header.hash(), 127 | &NimbusId::from_slice(&claimed_author) 128 | .map_err(|_| "Invalid Nimbus ID (wrong length)")?, 129 | ); 130 | 131 | debug!( 132 | target: crate::LOG_TARGET, 133 | "🪲 Valid signature? {:?}", valid_signature 134 | ); 135 | 136 | if !valid_signature { 137 | return Err("Block signature invalid".into()); 138 | } 139 | 140 | // This part copied from RelayChainConsensus. I guess this is the inherent checking. 141 | if let Some(inner_body) = block_params.body.take() { 142 | let inherent_data_providers = self 143 | .create_inherent_data_providers 144 | .create_inherent_data_providers(*block_params.header.parent_hash(), ()) 145 | .await 146 | .map_err(|e| e.to_string())?; 147 | 148 | let inherent_data = inherent_data_providers 149 | .create_inherent_data() 150 | .await 151 | .map_err(|e| format!("{:?}", e))?; 152 | 153 | let block = Block::new(block_params.header.clone(), inner_body); 154 | 155 | let inherent_res = self 156 | .client 157 | .runtime_api() 158 | .check_inherents( 159 | *block_params.header.parent_hash(), 160 | block.clone(), 161 | inherent_data, 162 | ) 163 | .map_err(|e| format!("{:?}", e))?; 164 | 165 | if !inherent_res.ok() { 166 | for (i, e) in inherent_res.into_errors() { 167 | match inherent_data_providers.try_handle_error(&i, &e).await { 168 | Some(r) => r.map_err(|e| format!("{:?}", e))?, 169 | None => Err(format!( 170 | "Unhandled inherent error from `{}`.", 171 | String::from_utf8_lossy(&i) 172 | ))?, 173 | } 174 | } 175 | } 176 | 177 | let (_, inner_body) = block.deconstruct(); 178 | block_params.body = Some(inner_body); 179 | } 180 | 181 | block_params.post_digests.push(seal); 182 | 183 | // The standard is to use the longest chain rule. This is overridden by the `NimbusBlockImport` in the parachain context. 184 | block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); 185 | 186 | debug!( 187 | target: crate::LOG_TARGET, 188 | "🪲 Just finished verifier. posthash from params is {:?}", 189 | &block_params.post_hash() 190 | ); 191 | 192 | Ok(block_params) 193 | } 194 | } 195 | 196 | /// Start an import queue for a Cumulus collator that does not uses any special authoring logic. 197 | pub fn import_queue( 198 | client: Arc, 199 | block_import: I, 200 | create_inherent_data_providers: CIDP, 201 | spawner: &impl sp_core::traits::SpawnEssentialNamed, 202 | registry: Option<&substrate_prometheus_endpoint::Registry>, 203 | parachain: bool, 204 | ) -> ClientResult> 205 | where 206 | I: BlockImport + Send + Sync + 'static, 207 | I::Transaction: Send, 208 | Client: ProvideRuntimeApi + Send + Sync + 'static, 209 | >::Api: BlockBuilderApi, 210 | CIDP: CreateInherentDataProviders + 'static, 211 | { 212 | let verifier = Verifier { 213 | client, 214 | create_inherent_data_providers, 215 | _marker: PhantomData, 216 | }; 217 | 218 | Ok(BasicQueue::new( 219 | verifier, 220 | Box::new(NimbusBlockImport::new(block_import, parachain)), 221 | None, 222 | spawner, 223 | registry, 224 | )) 225 | } 226 | 227 | /// Nimbus specific block import. 228 | /// 229 | /// Nimbus supports both parachain and non-parachain contexts. In the parachain 230 | /// context, new blocks should not be imported as best. Cumulus's ParachainBlockImport 231 | /// handles this correctly, but does not work in non-parachain contexts. 232 | /// This block import has a field indicating whether we should apply parachain rules or not. 233 | /// 234 | /// There may be additional nimbus-specific logic here in the future, but for now it is 235 | /// only the conditional parachain logic 236 | pub struct NimbusBlockImport { 237 | inner: I, 238 | parachain_context: bool, 239 | } 240 | 241 | impl NimbusBlockImport { 242 | /// Create a new instance. 243 | pub fn new(inner: I, parachain_context: bool) -> Self { 244 | Self { 245 | inner, 246 | parachain_context, 247 | } 248 | } 249 | } 250 | 251 | #[async_trait::async_trait] 252 | impl BlockImport for NimbusBlockImport 253 | where 254 | Block: BlockT, 255 | I: BlockImport + Send, 256 | { 257 | type Error = I::Error; 258 | type Transaction = I::Transaction; 259 | 260 | async fn check_block( 261 | &mut self, 262 | block: sc_consensus::BlockCheckParams, 263 | ) -> Result { 264 | self.inner.check_block(block).await 265 | } 266 | 267 | async fn import_block( 268 | &mut self, 269 | mut block_import_params: sc_consensus::BlockImportParams, 270 | ) -> Result { 271 | // If we are in the parachain context, best block is determined by the relay chain 272 | // except during initial sync 273 | if self.parachain_context { 274 | block_import_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( 275 | block_import_params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, 276 | )); 277 | } 278 | 279 | // Now continue on to the rest of the import pipeline. 280 | self.inner.import_block(block_import_params).await 281 | } 282 | } 283 | -------------------------------------------------------------------------------- /nimbus-consensus/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! The nimbus consensus client-side worker 18 | //! 19 | //! It queries the in-runtime filter to determine whether any keys 20 | //! stored in its keystore are eligible to author at this slot. If it has an eligible 21 | //! key it authors. 22 | 23 | use cumulus_client_consensus_common::{ 24 | ParachainBlockImport, ParachainCandidate, ParachainConsensus, 25 | }; 26 | use cumulus_primitives_core::{relay_chain::Hash as PHash, ParaId, PersistedValidationData}; 27 | pub use import_queue::import_queue; 28 | use log::{debug, info, warn}; 29 | use nimbus_primitives::{ 30 | CompatibleDigestItem, DigestsProvider, NimbusApi, NimbusId, NIMBUS_KEY_ID, 31 | }; 32 | use parking_lot::Mutex; 33 | use sc_client_api::backend::Backend; 34 | use sc_consensus::{BlockImport, BlockImportParams}; 35 | use sp_api::ProvideRuntimeApi; 36 | use sp_application_crypto::ByteArray; 37 | use sp_consensus::{ 38 | BlockOrigin, EnableProofRecording, Environment, ProofRecording, Proposal, Proposer, 39 | }; 40 | use sp_core::{crypto::CryptoTypeId, sr25519}; 41 | use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; 42 | use sp_keystore::{Keystore, KeystorePtr}; 43 | use sp_runtime::{ 44 | traits::{Block as BlockT, Header as HeaderT}, 45 | DigestItem, 46 | }; 47 | use std::convert::TryInto; 48 | use std::{marker::PhantomData, sync::Arc, time::Duration}; 49 | use tracing::error; 50 | mod import_queue; 51 | mod manual_seal; 52 | pub use manual_seal::NimbusManualSealConsensusDataProvider; 53 | 54 | const LOG_TARGET: &str = "filtering-consensus"; 55 | 56 | /// The implementation of the relay-chain provided consensus for parachains. 57 | pub struct NimbusConsensus { 58 | para_id: ParaId, 59 | proposer_factory: Arc>, 60 | create_inherent_data_providers: Arc, 61 | block_import: Arc>>, 62 | parachain_client: Arc, 63 | keystore: KeystorePtr, 64 | skip_prediction: bool, 65 | additional_digests_provider: Arc, 66 | _phantom: PhantomData, 67 | } 68 | 69 | impl Clone 70 | for NimbusConsensus 71 | { 72 | fn clone(&self) -> Self { 73 | Self { 74 | para_id: self.para_id, 75 | proposer_factory: self.proposer_factory.clone(), 76 | create_inherent_data_providers: self.create_inherent_data_providers.clone(), 77 | block_import: self.block_import.clone(), 78 | parachain_client: self.parachain_client.clone(), 79 | keystore: self.keystore.clone(), 80 | skip_prediction: self.skip_prediction, 81 | additional_digests_provider: self.additional_digests_provider.clone(), 82 | _phantom: PhantomData, 83 | } 84 | } 85 | } 86 | 87 | impl NimbusConsensus 88 | where 89 | B: BlockT, 90 | PF: 'static, 91 | BI: 'static, 92 | BE: Backend + 'static, 93 | ParaClient: ProvideRuntimeApi + 'static, 94 | CIDP: CreateInherentDataProviders + 'static, 95 | DP: DigestsProvider::Hash> + 'static, 96 | { 97 | /// Create a new instance of nimbus consensus. 98 | pub fn build( 99 | BuildNimbusConsensusParams { 100 | para_id, 101 | proposer_factory, 102 | create_inherent_data_providers, 103 | block_import, 104 | backend, 105 | parachain_client, 106 | keystore, 107 | skip_prediction, 108 | additional_digests_provider, 109 | }: BuildNimbusConsensusParams, 110 | ) -> Box> 111 | where 112 | Self: ParachainConsensus, 113 | { 114 | Box::new(Self { 115 | para_id, 116 | proposer_factory: Arc::new(Mutex::new(proposer_factory)), 117 | create_inherent_data_providers: Arc::new(create_inherent_data_providers), 118 | block_import: Arc::new(futures::lock::Mutex::new(ParachainBlockImport::new( 119 | block_import, 120 | backend, 121 | ))), 122 | parachain_client, 123 | keystore, 124 | skip_prediction, 125 | additional_digests_provider: Arc::new(additional_digests_provider), 126 | _phantom: PhantomData, 127 | }) 128 | } 129 | 130 | //TODO Could this be a provided implementation now that we have this async inherent stuff? 131 | /// Create the data. 132 | async fn inherent_data( 133 | &self, 134 | parent: B::Hash, 135 | validation_data: &PersistedValidationData, 136 | relay_parent: PHash, 137 | author_id: NimbusId, 138 | ) -> Option { 139 | let inherent_data_providers = self 140 | .create_inherent_data_providers 141 | .create_inherent_data_providers( 142 | parent, 143 | (relay_parent, validation_data.clone(), author_id), 144 | ) 145 | .await 146 | .map_err(|e| { 147 | tracing::error!( 148 | target: LOG_TARGET, 149 | error = ?e, 150 | "Failed to create inherent data providers.", 151 | ) 152 | }) 153 | .ok()?; 154 | 155 | inherent_data_providers 156 | .create_inherent_data() 157 | .await 158 | .map_err(|e| { 159 | tracing::error!( 160 | target: LOG_TARGET, 161 | error = ?e, 162 | "Failed to create inherent data.", 163 | ) 164 | }) 165 | .ok() 166 | } 167 | } 168 | 169 | /// Grabs any available nimbus key from the keystore. 170 | /// This may be useful in situations where you expect exactly one key 171 | /// and intend to perform an operation with it regardless of whether it is 172 | /// expected to be eligible. Concretely, this is used in the consensus worker 173 | /// to implement the `skip_prediction` feature. 174 | pub(crate) fn first_available_key(keystore: &dyn Keystore) -> Option> { 175 | // Get all the available keys 176 | match Keystore::keys(keystore, NIMBUS_KEY_ID) { 177 | Ok(available_keys) => { 178 | if available_keys.is_empty() { 179 | warn!( 180 | target: LOG_TARGET, 181 | "🔏 No Nimbus keys available. We will not be able to author." 182 | ); 183 | None 184 | } else { 185 | Some(available_keys[0].clone()) 186 | } 187 | } 188 | _ => None, 189 | } 190 | } 191 | 192 | /// Grab the first eligible nimbus key from the keystore 193 | /// If multiple keys are eligible this function still only returns one 194 | /// and makes no guarantees which one as that depends on the keystore's iterator behavior. 195 | /// This is the standard way of determining which key to author with. 196 | pub(crate) fn first_eligible_key( 197 | client: Arc, 198 | keystore: &dyn Keystore, 199 | parent: &B::Header, 200 | slot_number: u32, 201 | ) -> Option> 202 | where 203 | C: ProvideRuntimeApi, 204 | C::Api: NimbusApi, 205 | { 206 | // Get all the available keys 207 | let available_keys = Keystore::keys(keystore, NIMBUS_KEY_ID).ok()?; 208 | 209 | // Print a more helpful message than "not eligible" when there are no keys at all. 210 | if available_keys.is_empty() { 211 | warn!( 212 | target: LOG_TARGET, 213 | "🔏 No Nimbus keys available. We will not be able to author." 214 | ); 215 | return None; 216 | } 217 | 218 | // Iterate keys until we find an eligible one, or run out of candidates. 219 | // If we are skipping prediction, then we author with the first key we find. 220 | // prediction skipping only really makes sense when there is a single key in the keystore. 221 | let maybe_key = available_keys.into_iter().find(|type_public_pair| { 222 | // Have to convert to a typed NimbusId to pass to the runtime API. Maybe this is a clue 223 | // That I should be passing Vec across the wasm boundary? 224 | if let Ok(nimbus_id) = NimbusId::from_slice(&type_public_pair) { 225 | NimbusApi::can_author( 226 | &*client.runtime_api(), 227 | parent.hash(), 228 | nimbus_id, 229 | slot_number, 230 | parent, 231 | ) 232 | .unwrap_or_default() 233 | } else { 234 | false 235 | } 236 | }); 237 | 238 | // If there are no eligible keys, print the log, and exit early. 239 | if maybe_key.is_none() { 240 | info!( 241 | target: LOG_TARGET, 242 | "🔮 Skipping candidate production because we are not eligible for slot {}", slot_number 243 | ); 244 | } 245 | 246 | maybe_key 247 | } 248 | 249 | pub(crate) fn seal_header( 250 | header: &B::Header, 251 | keystore: &dyn Keystore, 252 | public_pair: &Vec, 253 | crypto_id: &CryptoTypeId, 254 | ) -> DigestItem 255 | where 256 | B: BlockT, 257 | { 258 | let pre_hash = header.hash(); 259 | 260 | let raw_sig = Keystore::sign_with( 261 | &*keystore, 262 | NIMBUS_KEY_ID, 263 | *crypto_id, 264 | public_pair, 265 | pre_hash.as_ref(), 266 | ) 267 | .expect("Keystore should be able to sign") 268 | .expect("We already checked that the key was present"); 269 | 270 | debug!(target: LOG_TARGET, "The signature is \n{:?}", raw_sig); 271 | 272 | let signature = raw_sig 273 | .clone() 274 | .try_into() 275 | .expect("signature bytes produced by keystore should be right length"); 276 | 277 | ::nimbus_seal(signature) 278 | } 279 | 280 | #[async_trait::async_trait] 281 | impl ParachainConsensus 282 | for NimbusConsensus 283 | where 284 | B: BlockT, 285 | BI: BlockImport + Send + Sync + 'static, 286 | BE: Backend + Send + Sync + 'static, 287 | PF: Environment + Send + Sync + 'static, 288 | PF::Proposer: Proposer< 289 | B, 290 | Transaction = BI::Transaction, 291 | ProofRecording = EnableProofRecording, 292 | Proof = ::Proof, 293 | >, 294 | ParaClient: ProvideRuntimeApi + Send + Sync + 'static, 295 | ParaClient::Api: NimbusApi, 296 | CIDP: CreateInherentDataProviders + 'static, 297 | DP: DigestsProvider::Hash> + 'static + Send + Sync, 298 | { 299 | async fn produce_candidate( 300 | &mut self, 301 | parent: &B::Header, 302 | relay_parent: PHash, 303 | validation_data: &PersistedValidationData, 304 | ) -> Option> { 305 | // Determine if runtime change 306 | let runtime_upgraded = if *parent.number() > sp_runtime::traits::Zero::zero() { 307 | use sp_api::Core as _; 308 | let previous_runtime_version: sp_api::RuntimeVersion = self 309 | .parachain_client 310 | .runtime_api() 311 | .version(parent.hash()) 312 | .ok()?; 313 | let runtime_version: sp_api::RuntimeVersion = self 314 | .parachain_client 315 | .runtime_api() 316 | .version(parent.hash()) 317 | .ok()?; 318 | 319 | previous_runtime_version != runtime_version 320 | } else { 321 | false 322 | }; 323 | 324 | let maybe_key = if self.skip_prediction || runtime_upgraded { 325 | first_available_key(&*self.keystore) 326 | } else { 327 | first_eligible_key::( 328 | self.parachain_client.clone(), 329 | &*self.keystore, 330 | parent, 331 | validation_data.relay_parent_number, 332 | ) 333 | }; 334 | 335 | // If there are no eligible keys, print the log, and exit early. 336 | let type_public_pair = match maybe_key { 337 | Some(p) => p, 338 | None => { 339 | return None; 340 | } 341 | }; 342 | 343 | let proposer_future = self.proposer_factory.lock().init(&parent); 344 | 345 | let proposer = proposer_future 346 | .await 347 | .map_err(|e| error!(target: LOG_TARGET, error = ?e, "Could not create proposer.")) 348 | .ok()?; 349 | 350 | let nimbus_id = NimbusId::from_slice(&type_public_pair) 351 | .map_err( 352 | |e| error!(target: LOG_TARGET, error = ?e, "Invalid Nimbus ID (wrong length)."), 353 | ) 354 | .ok()?; 355 | 356 | let inherent_data = self 357 | .inherent_data( 358 | parent.hash(), 359 | &validation_data, 360 | relay_parent, 361 | nimbus_id.clone(), 362 | ) 363 | .await?; 364 | 365 | let mut logs = vec![CompatibleDigestItem::nimbus_pre_digest(nimbus_id.clone())]; 366 | logs.extend( 367 | self.additional_digests_provider 368 | .provide_digests(nimbus_id, parent.hash()), 369 | ); 370 | let inherent_digests = sp_runtime::generic::Digest { logs }; 371 | 372 | let Proposal { 373 | block, 374 | storage_changes, 375 | proof, 376 | } = proposer 377 | .propose( 378 | inherent_data, 379 | inherent_digests, 380 | //TODO: Fix this. 381 | Duration::from_millis(500), 382 | // Set the block limit to 50% of the maximum PoV size. 383 | // 384 | // TODO: If we got benchmarking that includes that encapsulates the proof size, 385 | // we should be able to use the maximum pov size. 386 | Some((validation_data.max_pov_size / 2) as usize), 387 | ) 388 | .await 389 | .map_err(|e| error!(target: LOG_TARGET, error = ?e, "Proposing failed.")) 390 | .ok()?; 391 | 392 | let (header, extrinsics) = block.clone().deconstruct(); 393 | 394 | let sig_digest = seal_header::( 395 | &header, 396 | &*self.keystore, 397 | &type_public_pair, 398 | &sr25519::CRYPTO_ID, 399 | ); 400 | 401 | let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header.clone()); 402 | block_import_params.post_digests.push(sig_digest.clone()); 403 | block_import_params.body = Some(extrinsics.clone()); 404 | block_import_params.state_action = sc_consensus::StateAction::ApplyChanges( 405 | sc_consensus::StorageChanges::Changes(storage_changes), 406 | ); 407 | 408 | // Print the same log line as slots (aura and babe) 409 | info!( 410 | "🔖 Sealed block for proposal at {}. Hash now {:?}, previously {:?}.", 411 | *header.number(), 412 | block_import_params.post_hash(), 413 | header.hash(), 414 | ); 415 | 416 | if let Err(err) = self 417 | .block_import 418 | .lock() 419 | .await 420 | .import_block(block_import_params) 421 | .await 422 | { 423 | error!( 424 | target: LOG_TARGET, 425 | at = ?parent.hash(), 426 | error = ?err, 427 | "Error importing built block.", 428 | ); 429 | 430 | return None; 431 | } 432 | 433 | // Compute info about the block after the digest is added 434 | let mut post_header = header.clone(); 435 | post_header.digest_mut().logs.push(sig_digest.clone()); 436 | let post_block = B::new(post_header, extrinsics); 437 | 438 | // Returning the block WITH the seal for distribution around the network. 439 | Some(ParachainCandidate { 440 | block: post_block, 441 | proof, 442 | }) 443 | } 444 | } 445 | 446 | /// Paramaters of [`build_relay_chain_consensus`]. 447 | /// 448 | /// I briefly tried the async keystore approach, but decided to go sync so I can copy 449 | /// code from Aura. Maybe after it is working, Jeremy can help me go async. 450 | pub struct BuildNimbusConsensusParams { 451 | pub para_id: ParaId, 452 | pub proposer_factory: PF, 453 | pub create_inherent_data_providers: CIDP, 454 | pub block_import: BI, 455 | pub backend: Arc, 456 | pub parachain_client: Arc, 457 | pub keystore: KeystorePtr, 458 | pub skip_prediction: bool, 459 | pub additional_digests_provider: DP, 460 | } 461 | -------------------------------------------------------------------------------- /nimbus-consensus/src/manual_seal.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use cumulus_primitives_parachain_inherent::{ 18 | ParachainInherentData, INHERENT_IDENTIFIER as PARACHAIN_INHERENT_IDENTIFIER, 19 | }; 20 | use nimbus_primitives::{ 21 | CompatibleDigestItem, DigestsProvider, NimbusApi, NimbusId, NIMBUS_ENGINE_ID, 22 | }; 23 | use sc_consensus::BlockImportParams; 24 | use sc_consensus_manual_seal::{ConsensusDataProvider, Error}; 25 | use sp_api::{BlockT, HeaderT, ProvideRuntimeApi, TransactionFor}; 26 | use sp_application_crypto::ByteArray; 27 | use sp_core::sr25519; 28 | use sp_inherents::InherentData; 29 | use sp_keystore::KeystorePtr; 30 | use sp_runtime::{Digest, DigestItem}; 31 | use std::{marker::PhantomData, sync::Arc}; 32 | 33 | /// Provides nimbus-compatible pre-runtime digests for use with manual seal consensus 34 | pub struct NimbusManualSealConsensusDataProvider { 35 | /// Shared reference to keystore 36 | pub keystore: KeystorePtr, 37 | 38 | /// Shared reference to the client 39 | pub client: Arc, 40 | // Could have a skip_prediction field here if it becomes desireable 41 | /// Additional digests provider 42 | pub additional_digests_provider: DP, 43 | 44 | pub _phantom: PhantomData

, 45 | } 46 | 47 | impl ConsensusDataProvider for NimbusManualSealConsensusDataProvider 48 | where 49 | B: BlockT, 50 | C: ProvideRuntimeApi + Send + Sync, 51 | C::Api: NimbusApi, 52 | DP: DigestsProvider::Hash> + Send + Sync, 53 | P: Send + Sync, 54 | { 55 | type Transaction = TransactionFor; 56 | type Proof = P; 57 | 58 | fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { 59 | // Retrieve the relay chain block number to use as the slot number from the parachain inherent 60 | let slot_number = inherents 61 | .get_data::(&PARACHAIN_INHERENT_IDENTIFIER) 62 | .expect("Parachain inherent should decode correctly") 63 | .expect("Parachain inherent should be present because we are mocking it") 64 | .validation_data 65 | .relay_parent_number; 66 | 67 | // Fetch first eligible key from keystore 68 | let maybe_key = crate::first_eligible_key::( 69 | self.client.clone(), 70 | &*self.keystore, 71 | parent, 72 | // For now we author all blocks in slot zero, which is consistent with how we are 73 | // mocking the relay chain height which the runtime uses for slot beacon. 74 | // This should improve. See https://github.com/PureStake/nimbus/issues/3 75 | slot_number, 76 | ); 77 | 78 | // If we aren't eligible, return an appropriate error 79 | match maybe_key { 80 | Some(key) => { 81 | let nimbus_id = NimbusId::from_slice(&key).map_err(|_| { 82 | Error::StringError(String::from("invalid nimbus id (wrong length)")) 83 | })?; 84 | let mut logs = vec![CompatibleDigestItem::nimbus_pre_digest(nimbus_id.clone())]; 85 | logs.extend( 86 | self.additional_digests_provider 87 | .provide_digests(nimbus_id, parent.hash()), 88 | ); 89 | Ok(Digest { logs }) 90 | } 91 | None => Err(Error::StringError(String::from( 92 | "no nimbus keys available to manual seal", 93 | ))), 94 | } 95 | } 96 | 97 | // This is where we actually sign with the nimbus key and attach the seal 98 | fn append_block_import( 99 | &self, 100 | _parent: &B::Header, 101 | params: &mut BlockImportParams, 102 | _inherents: &InherentData, 103 | _proof: Self::Proof, 104 | ) -> Result<(), Error> { 105 | // We have to reconstruct the type-public pair which is only communicated through the pre-runtime digest 106 | let claimed_author = params 107 | .header 108 | .digest() 109 | .logs 110 | .iter() 111 | .find_map(|digest| { 112 | match *digest { 113 | // We do not support the older author inherent in manual seal 114 | DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => { 115 | Some(author_id.clone()) 116 | } 117 | _ => None, 118 | } 119 | }) 120 | .expect("Expected one pre-runtime digest that contains author id bytes"); 121 | 122 | let nimbus_public = NimbusId::from_slice(&claimed_author) 123 | .map_err(|_| Error::StringError(String::from("invalid nimbus id (wrong length)")))?; 124 | 125 | let sig_digest = crate::seal_header::( 126 | ¶ms.header, 127 | &*self.keystore, 128 | &nimbus_public.to_raw_vec(), 129 | &sr25519::CRYPTO_ID, 130 | ); 131 | 132 | params.post_digests.push(sig_digest); 133 | 134 | Ok(()) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /nimbus-primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nimbus-primitives" 3 | authors = [ "PureStake" ] 4 | description = "Primitive types and traits used in the Nimbus consensus framework" 5 | edition = "2021" 6 | version = "0.9.0" 7 | 8 | [dependencies] 9 | async-trait = { version = "0.1", optional = true } 10 | parity-scale-codec = { version = "3.0.0", default-features = false, features = [ "derive" ] } 11 | 12 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 13 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 14 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] } 15 | sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 16 | sp-application-crypto = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 17 | sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 18 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 19 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 20 | 21 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" } 22 | 23 | [features] 24 | default = [ "std" ] 25 | std = [ 26 | "async-trait", 27 | "frame-support/std", 28 | "frame-system/std", 29 | "parity-scale-codec/std", 30 | "scale-info/std", 31 | "sp-api/std", 32 | "sp-application-crypto/std", 33 | "sp-inherents/std", 34 | "sp-runtime/std", 35 | "sp-std/std", 36 | ] 37 | 38 | runtime-benchmarks = [ "frame-benchmarking", "sp-runtime/runtime-benchmarks" ] 39 | 40 | try-runtime = [ "frame-support/try-runtime" ] 41 | -------------------------------------------------------------------------------- /nimbus-primitives/src/digests.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! A convenient interface over the digests used in nimbus. 18 | //! 19 | //! Currently Nimbus has two digests; 20 | //! 1. A consensus digest that contains the block author identity 21 | //! This information is copied from the author inehrent. 22 | //! This may be replaced with a pre-runtime digest in the future. 23 | //! 2. A seal digest that contains a signature over the rest of the 24 | //! block including the first digest. 25 | 26 | use crate::{NimbusId, NimbusSignature, NIMBUS_ENGINE_ID}; 27 | use parity_scale_codec::Encode; 28 | use sp_runtime::generic::DigestItem; 29 | 30 | /// A digest item which is usable with aura consensus. 31 | pub trait CompatibleDigestItem: Sized { 32 | /// Construct a pre-runtime digest from the given AuthorId 33 | fn nimbus_pre_digest(author: NimbusId) -> Self; 34 | 35 | /// If this item is a nimbus pre-runtime digest, return the author 36 | fn as_nimbus_pre_digest(&self) -> Option; 37 | 38 | /// Construct a seal digest item from the given signature 39 | fn nimbus_seal(signature: NimbusSignature) -> Self; 40 | 41 | /// If this item is a nimbus seal, return the signature. 42 | fn as_nimbus_seal(&self) -> Option; 43 | 44 | /// This will be deprecated in the future 45 | /// Construct a consensus digest from the given AuthorId 46 | fn nimbus_consensus_digest(author: NimbusId) -> Self; 47 | 48 | /// This will be deprecated in the future 49 | /// If this item is a nimbus consensus digest, return the author 50 | fn as_nimbus_consensus_digest(&self) -> Option; 51 | } 52 | 53 | impl CompatibleDigestItem for DigestItem { 54 | fn nimbus_pre_digest(author: NimbusId) -> Self { 55 | DigestItem::PreRuntime(NIMBUS_ENGINE_ID, author.encode()) 56 | } 57 | 58 | fn as_nimbus_pre_digest(&self) -> Option { 59 | self.pre_runtime_try_to(&NIMBUS_ENGINE_ID) 60 | } 61 | 62 | fn nimbus_seal(signature: NimbusSignature) -> Self { 63 | DigestItem::Seal(NIMBUS_ENGINE_ID, signature.encode()) 64 | } 65 | 66 | fn as_nimbus_seal(&self) -> Option { 67 | self.seal_try_to(&NIMBUS_ENGINE_ID) 68 | } 69 | 70 | // Remove this once deprecated 71 | fn nimbus_consensus_digest(author: NimbusId) -> Self { 72 | DigestItem::Consensus(NIMBUS_ENGINE_ID, author.encode()) 73 | } 74 | 75 | // Remove this once deprecated. I don't think it is used anyway. 76 | // Notice that it calls the pre_runtime helper function. 77 | fn as_nimbus_consensus_digest(&self) -> Option { 78 | self.pre_runtime_try_to(&NIMBUS_ENGINE_ID) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /nimbus-primitives/src/inherents.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use sp_inherents::{InherentData, InherentIdentifier}; 18 | 19 | /// The InherentIdentifier for nimbus's author inherent 20 | pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"author__"; 21 | 22 | /// A bare minimum inherent data provider that provides no real data. 23 | /// The inherent is simply used as a way to kick off some computation 24 | /// until https://github.com/paritytech/substrate/pull/10128 lands. 25 | pub struct InherentDataProvider; 26 | 27 | #[cfg(feature = "std")] 28 | #[async_trait::async_trait] 29 | impl sp_inherents::InherentDataProvider for InherentDataProvider { 30 | async fn provide_inherent_data( 31 | &self, 32 | inherent_data: &mut InherentData, 33 | ) -> Result<(), sp_inherents::Error> { 34 | inherent_data.put_data(INHERENT_IDENTIFIER, &()) 35 | } 36 | 37 | async fn try_handle_error( 38 | &self, 39 | identifier: &InherentIdentifier, 40 | _error: &[u8], 41 | ) -> Option> { 42 | // Dont' process modules from other inherents 43 | if *identifier != INHERENT_IDENTIFIER { 44 | return None; 45 | } 46 | 47 | // All errors with the author inehrent are fatal 48 | Some(Err(sp_inherents::Error::Application(Box::from( 49 | String::from("Error processing dummy nimbus inherent"), 50 | )))) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /nimbus-primitives/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! Nimbus Consensus Primitives 18 | //! 19 | //! Primitive types and traits for working with the Nimbus consensus framework. 20 | //! This code can be built to no_std for use in the runtime 21 | 22 | #![cfg_attr(not(feature = "std"), no_std)] 23 | 24 | use sp_application_crypto::KeyTypeId; 25 | use sp_runtime::generic::DigestItem; 26 | use sp_runtime::traits::BlockNumberProvider; 27 | use sp_runtime::ConsensusEngineId; 28 | #[cfg(feature = "runtime-benchmarks")] 29 | use sp_std::vec::{self, Vec}; 30 | 31 | pub mod digests; 32 | mod inherents; 33 | 34 | pub use digests::CompatibleDigestItem; 35 | 36 | pub use inherents::{InherentDataProvider, INHERENT_IDENTIFIER}; 37 | 38 | pub trait DigestsProvider { 39 | type Digests: IntoIterator; 40 | fn provide_digests(&self, id: Id, parent: BlockHash) -> Self::Digests; 41 | } 42 | 43 | impl DigestsProvider for () { 44 | type Digests = [DigestItem; 0]; 45 | fn provide_digests(&self, _id: Id, _parent: BlockHash) -> Self::Digests { 46 | [] 47 | } 48 | } 49 | 50 | impl DigestsProvider for F 51 | where 52 | F: Fn(Id, BlockHash) -> D, 53 | D: IntoIterator, 54 | { 55 | type Digests = D; 56 | 57 | fn provide_digests(&self, id: Id, parent: BlockHash) -> Self::Digests { 58 | (*self)(id, parent) 59 | } 60 | } 61 | 62 | /// The given account ID is the author of the current block. 63 | pub trait EventHandler { 64 | //TODO should we be tking ownership here? 65 | fn note_author(author: Author); 66 | } 67 | 68 | impl EventHandler for () { 69 | fn note_author(_author: T) {} 70 | } 71 | 72 | /// A mechanism for determining the current slot. 73 | /// For now we use u32 as the slot type everywhere. Let's see how long we can get away with that. 74 | pub trait SlotBeacon { 75 | fn slot() -> u32; 76 | #[cfg(feature = "runtime-benchmarks")] 77 | fn set_slot(_slot: u32) {} 78 | } 79 | 80 | /// Anything that can provide a block height can be used as a slot beacon. This could be 81 | /// used in at least two realistic ways. 82 | /// 1. Use your own chain's height as the slot number 83 | /// 2. If you're a parachain, use the relay chain's height as the slot number. 84 | impl> SlotBeacon for T { 85 | fn slot() -> u32 { 86 | Self::current_block_number() 87 | } 88 | #[cfg(feature = "runtime-benchmarks")] 89 | fn set_slot(slot: u32) { 90 | Self::set_block_number(slot); 91 | } 92 | } 93 | 94 | /// PLANNED: A SlotBeacon that starts a new slot based on the timestamp. Behaviorally, this is 95 | /// similar to what aura, babe and company do. Implementation-wise it is different because it 96 | /// depends on the timestamp pallet for its notion of time. 97 | pub struct IntervalBeacon; 98 | 99 | impl SlotBeacon for IntervalBeacon { 100 | fn slot() -> u32 { 101 | todo!() 102 | } 103 | } 104 | 105 | /// Trait to determine whether this author is eligible to author in this slot. 106 | /// This is the primary trait your nimbus filter needs to implement. 107 | /// 108 | /// This is the proposition-logic variant. 109 | /// That is to say the caller specifies an author an author and the implementation 110 | /// replies whether that author is eligible. This is useful in many cases and is 111 | /// particularly useful when the active set is unbounded. 112 | /// There may be another variant where the caller only supplies a slot and the 113 | /// implementation replies with a complete set of eligible authors. 114 | pub trait CanAuthor { 115 | #[cfg(feature = "try-runtime")] 116 | // With `try-runtime` the local author should always be able to author a block. 117 | fn can_author(author: &AuthorId, slot: &u32) -> bool { 118 | true 119 | } 120 | #[cfg(not(feature = "try-runtime"))] 121 | fn can_author(author: &AuthorId, slot: &u32) -> bool; 122 | #[cfg(feature = "runtime-benchmarks")] 123 | fn get_authors(_slot: &u32) -> Vec { 124 | vec![] 125 | } 126 | #[cfg(feature = "runtime-benchmarks")] 127 | fn set_eligible_author(_slot: &u32) {} 128 | } 129 | /// Default implementation where anyone can author. 130 | /// 131 | /// This is identical to Cumulus's RelayChainConsensus 132 | impl CanAuthor for () { 133 | fn can_author(_: &T, _: &u32) -> bool { 134 | true 135 | } 136 | } 137 | 138 | /// A Trait to lookup runtime AccountIds from AuthorIds (probably NimbusIds) 139 | /// The trait is generic over the AccountId, becuase different runtimes use 140 | /// different notions of AccoutId. It is also generic over the AuthorId to 141 | /// support the usecase where the author inherent is used for beneficiary info 142 | /// and contains an AccountId directly. 143 | pub trait AccountLookup { 144 | fn lookup_account(author: &NimbusId) -> Option; 145 | } 146 | 147 | // A dummy impl used in simple tests 148 | impl AccountLookup for () { 149 | fn lookup_account(_: &NimbusId) -> Option { 150 | None 151 | } 152 | } 153 | 154 | /// The ConsensusEngineId for nimbus consensus 155 | /// this same identifier will be used regardless of the filters installed 156 | pub const NIMBUS_ENGINE_ID: ConsensusEngineId = *b"nmbs"; 157 | 158 | /// The KeyTypeId used in the Nimbus consensus framework regardles of wat filters are in place. 159 | /// If this gets well adopted, we could move this definition to sp_core to avoid conflicts. 160 | pub const NIMBUS_KEY_ID: KeyTypeId = KeyTypeId(*b"nmbs"); 161 | 162 | // The strongly-typed crypto wrappers to be used by Nimbus in the keystore 163 | mod nimbus_crypto { 164 | use sp_application_crypto::{app_crypto, sr25519}; 165 | app_crypto!(sr25519, crate::NIMBUS_KEY_ID); 166 | } 167 | 168 | /// A nimbus author identifier (A public key). 169 | pub type NimbusId = nimbus_crypto::Public; 170 | 171 | /// A nimbus signature. 172 | pub type NimbusSignature = nimbus_crypto::Signature; 173 | 174 | sp_application_crypto::with_pair! { 175 | /// A nimbus keypair 176 | pub type NimbusPair = nimbus_crypto::Pair; 177 | } 178 | 179 | sp_api::decl_runtime_apis! { 180 | /// The runtime api used to predict whether a Nimbus author will be eligible in the given slot 181 | pub trait NimbusApi { 182 | fn can_author(author: NimbusId, relay_parent: u32, parent_header: &Block::Header) -> bool; 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /pallets/aura-style-filter/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-aura-style-filter" 3 | authors = [ "PureStake" ] 4 | description = "The Aura (authority round) consensus engine implemented in the Nimbus framework" 5 | edition = "2021" 6 | version = "0.9.0" 7 | 8 | [dependencies] 9 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 10 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 11 | nimbus-primitives = { path = "../../nimbus-primitives", default-features = false } 12 | parity-scale-codec = { version = "3.0.0", default-features = false, features = [ "derive" ] } 13 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] } 14 | serde = { version = "1.0.101", optional = true, features = [ "derive" ] } 15 | sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 16 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 17 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 18 | 19 | [features] 20 | default = [ "std" ] 21 | std = [ 22 | "frame-support/std", 23 | "frame-system/std", 24 | "nimbus-primitives/std", 25 | "parity-scale-codec/std", 26 | "scale-info/std", 27 | "serde", 28 | "sp-core/std", 29 | "sp-runtime/std", 30 | "sp-std/std", 31 | ] 32 | 33 | try-runtime = [ "frame-support/try-runtime", "nimbus-primitives/try-runtime" ] 34 | -------------------------------------------------------------------------------- /pallets/aura-style-filter/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! A Nimbus filter for the AuRa consensus algorithm. This filter does not use any entropy, it 18 | //! simply rotates authors in order. A single author is eligible at each slot. 19 | //! 20 | //! In the Substrate ecosystem, this algorithm is typically known as AuRa (authority round). 21 | //! There is a well known implementation in the main Substrate repository and published at 22 | //! https://crates.io/crates/sc-consensus-aura. There are two primary differences between 23 | //! the approaches: 24 | //! 25 | //! 1. This filter leverages all the heavy lifting of the Nimbus framework and consequently is 26 | //! capable of expressing Aura in < 100 lines of code. 27 | //! 28 | //! Whereas sc-consensus-aura includes the entire consensus stack including block signing, digest 29 | //! formats, and slot prediction. This is a lot of overhead for a sipmle round robin 30 | //! consensus that basically boils down to this function 31 | //! https://github.com/paritytech/substrate/blob/0f849efc/client/consensus/aura/src/lib.rs#L91-L106 32 | //! 33 | //! 2. The Nimbus framework places the author checking logic in the runtime which makes it relatively 34 | //! easy for relay chain validators to confirm the author is valid. 35 | //! 36 | //! Whereas sc-consensus-aura places the author checking offchain. The offchain approach is fine 37 | //! for standalone layer 1 blockchains, but not well suited for verification on the relay chain 38 | //! where validators only run a wasm blob. 39 | 40 | #![cfg_attr(not(feature = "std"), no_std)] 41 | 42 | use frame_support::pallet; 43 | pub use pallet::*; 44 | 45 | #[pallet] 46 | pub mod pallet { 47 | 48 | use frame_support::pallet_prelude::*; 49 | use sp_std::vec::Vec; 50 | 51 | //TODO Now that the CanAuthor trait takes a slot number, I don't think this even needs to be a pallet. 52 | // I think it could eb jsut a simple type. 53 | /// The Author Filter pallet 54 | #[pallet::pallet] 55 | pub struct Pallet(PhantomData); 56 | 57 | /// Configuration trait of this pallet. 58 | #[pallet::config] 59 | pub trait Config: frame_system::Config { 60 | /// A source for the complete set of potential authors. 61 | /// The starting point of the filtering. 62 | type PotentialAuthors: Get>; 63 | } 64 | 65 | // This code will be called by the author-inherent pallet to check whether the reported author 66 | // of this block is eligible at this slot. We calculate that result on demand and do not 67 | // record it instorage. 68 | impl nimbus_primitives::CanAuthor for Pallet { 69 | #[cfg(not(feature = "try-runtime"))] 70 | fn can_author(account: &T::AccountId, slot: &u32) -> bool { 71 | let active: Vec = T::PotentialAuthors::get(); 72 | 73 | // This is the core Aura logic right here. 74 | let active_author = &active[*slot as usize % active.len()]; 75 | 76 | account == active_author 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /pallets/author-inherent/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-author-inherent" 3 | authors = [ "PureStake" ] 4 | description = "This pallet is the core of the in-runtime portion of Nimbus." 5 | edition = "2021" 6 | license = "GPL-3.0-only" 7 | version = "0.9.0" 8 | 9 | [dependencies] 10 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 11 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 12 | log = { version = "0.4.17", default-features = false } 13 | nimbus-primitives = { path = "../../nimbus-primitives", default-features = false } 14 | parity-scale-codec = { version = "3.0.0", default-features = false, features = [ "derive" ] } 15 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] } 16 | sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 17 | sp-application-crypto = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 18 | sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 19 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 20 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 21 | 22 | # Benchmarks 23 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" } 24 | 25 | [dev-dependencies] 26 | frame-support-test = { git = "https://github.com/paritytech/substrate", version = "3.0.0", branch = "polkadot-v0.9.43" } 27 | sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 28 | sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 29 | 30 | [features] 31 | default = [ "std" ] 32 | std = [ 33 | "frame-benchmarking/std", 34 | "frame-support/std", 35 | "frame-system/std", 36 | "log/std", 37 | "nimbus-primitives/std", 38 | "parity-scale-codec/std", 39 | "scale-info/std", 40 | "sp-api/std", 41 | "sp-application-crypto/std", 42 | "sp-inherents/std", 43 | "sp-runtime/std", 44 | "sp-std/std", 45 | ] 46 | 47 | runtime-benchmarks = [ 48 | "frame-benchmarking", 49 | "nimbus-primitives/runtime-benchmarks", 50 | ] 51 | -------------------------------------------------------------------------------- /pallets/author-inherent/src/benchmarks.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Moonbeam. 3 | 4 | // Moonbeam is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Moonbeam is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Moonbeam. If not, see . 16 | 17 | #![cfg(feature = "runtime-benchmarks")] 18 | 19 | use crate::{Call, Config, Pallet}; 20 | use frame_benchmarking::benchmarks; 21 | use frame_system::RawOrigin; 22 | use nimbus_primitives::CanAuthor; 23 | use nimbus_primitives::SlotBeacon; 24 | benchmarks! { 25 | kick_off_authorship_validation { 26 | // The slot inserted needs to be higher than that already in storage 27 | T::SlotBeacon::set_slot(100); 28 | Pallet::::set_eligible_author(&T::SlotBeacon::slot()); 29 | }: _(RawOrigin::None) 30 | } 31 | -------------------------------------------------------------------------------- /pallets/author-inherent/src/exec.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! Block executive to be used by relay chain validators when validating parachain blocks built 18 | //! with the nimubs consensus family. 19 | 20 | use frame_support::traits::ExecuteBlock; 21 | use sp_api::{BlockT, HeaderT}; 22 | // For some reason I can't get these logs to actually print 23 | use log::debug; 24 | use nimbus_primitives::{digests::CompatibleDigestItem, NimbusId, NIMBUS_ENGINE_ID}; 25 | use sp_application_crypto::ByteArray; 26 | use sp_runtime::{generic::DigestItem, RuntimeAppPublic}; 27 | 28 | /// Block executive to be used by relay chain validators when validating parachain blocks built 29 | /// with the nimubs consensus family. 30 | /// 31 | /// This will strip the seal digest, and confirm that it contains a valid signature 32 | /// By the block author reported in the author inherent. 33 | /// 34 | /// Essentially this contains the logic of the verifier plus the inner executive. 35 | /// TODO Degisn improvement: 36 | /// Can we share code with the verifier? 37 | /// Can this struct take a verifier as an associated type? 38 | /// Or maybe this will just get simpler in general when https://github.com/paritytech/polkadot/issues/2888 lands 39 | pub struct BlockExecutor(sp_std::marker::PhantomData<(T, I)>); 40 | 41 | impl ExecuteBlock for BlockExecutor 42 | where 43 | Block: BlockT, 44 | I: ExecuteBlock, 45 | { 46 | fn execute_block(block: Block) { 47 | let (mut header, extrinsics) = block.deconstruct(); 48 | 49 | debug!(target: "executive", "In hacked Executive. Initial digests are {:?}", header.digest()); 50 | 51 | // Set the seal aside for checking. 52 | let seal = header 53 | .digest_mut() 54 | .pop() 55 | .expect("Seal digest is present and is last item"); 56 | 57 | debug!(target: "executive", "In hacked Executive. digests after stripping {:?}", header.digest()); 58 | debug!(target: "executive", "The seal we got {:?}", seal); 59 | 60 | let signature = seal 61 | .as_nimbus_seal() 62 | .unwrap_or_else(|| panic!("HeaderUnsealed")); 63 | 64 | debug!(target: "executive", "🪲 Header hash after popping digest {:?}", header.hash()); 65 | 66 | debug!(target: "executive", "🪲 Signature according to executive is {:?}", signature); 67 | 68 | // Grab the author information from the preruntime digest 69 | //TODO use the trait 70 | let claimed_author = header 71 | .digest() 72 | .logs 73 | .iter() 74 | .find_map(|digest| match *digest { 75 | DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => { 76 | Some(author_id.clone()) 77 | } 78 | _ => None, 79 | }) 80 | .expect("Expected pre-runtime digest that contains author id bytes"); 81 | 82 | debug!(target: "executive", "🪲 Claimed Author according to executive is {:?}", claimed_author); 83 | 84 | // Verify the signature 85 | let valid_signature = NimbusId::from_slice(&claimed_author) 86 | .expect("Expected claimed author to be a valid NimbusId.") 87 | .verify(&header.hash(), &signature); 88 | 89 | debug!(target: "executive", "🪲 Valid signature? {:?}", valid_signature); 90 | 91 | if !valid_signature { 92 | panic!("Block signature invalid"); 93 | } 94 | 95 | // Now that we've verified the signature, hand execution off to the inner executor 96 | // which is probably the normal frame executive. 97 | I::execute_block(Block::new(header, extrinsics)); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /pallets/author-inherent/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! Pallet that allows block authors to include their identity in a block via an inherent. 18 | //! Currently the author does not _prove_ their identity, just states it. So it should not be used, 19 | //! for things like equivocation slashing that require authenticated authorship information. 20 | 21 | #![cfg_attr(not(feature = "std"), no_std)] 22 | 23 | use frame_support::traits::{FindAuthor, Get}; 24 | use nimbus_primitives::{ 25 | AccountLookup, CanAuthor, NimbusId, SlotBeacon, INHERENT_IDENTIFIER, NIMBUS_ENGINE_ID, 26 | }; 27 | use parity_scale_codec::{Decode, Encode, FullCodec}; 28 | use sp_inherents::{InherentIdentifier, IsFatalError}; 29 | use sp_runtime::{ConsensusEngineId, RuntimeString}; 30 | 31 | mod exec; 32 | pub use exec::BlockExecutor; 33 | 34 | pub use pallet::*; 35 | 36 | #[cfg(any(test, feature = "runtime-benchmarks"))] 37 | mod benchmarks; 38 | 39 | pub mod weights; 40 | 41 | #[cfg(test)] 42 | mod mock; 43 | #[cfg(test)] 44 | mod tests; 45 | 46 | #[frame_support::pallet] 47 | pub mod pallet { 48 | use super::*; 49 | use crate::weights::WeightInfo; 50 | use frame_support::pallet_prelude::*; 51 | use frame_system::pallet_prelude::*; 52 | 53 | /// The Author Inherent pallet. The core of the nimbus consensus framework's runtime presence. 54 | #[pallet::pallet] 55 | pub struct Pallet(PhantomData); 56 | 57 | #[pallet::config] 58 | pub trait Config: frame_system::Config { 59 | /// Type used to refer to a block author. 60 | type AuthorId: sp_std::fmt::Debug + PartialEq + Clone + FullCodec + TypeInfo + MaxEncodedLen; 61 | 62 | /// A type to convert between NimbusId and AuthorId. This is useful when you want to associate 63 | /// Block authoring behavior with an AuthorId for rewards or slashing. If you do not need to 64 | /// hold an AuthorId responsible for authoring use `()` which acts as an identity mapping. 65 | type AccountLookup: AccountLookup; 66 | 67 | /// The final word on whether the reported author can author at this height. 68 | /// This will be used when executing the inherent. This check is often stricter than the 69 | /// Preliminary check, because it can use more data. 70 | /// If the pallet that implements this trait depends on an inherent, that inherent **must** 71 | /// be included before this one. 72 | type CanAuthor: CanAuthor; 73 | 74 | /// Some way of determining the current slot for purposes of verifying the author's eligibility 75 | type SlotBeacon: SlotBeacon; 76 | 77 | type WeightInfo: WeightInfo; 78 | } 79 | 80 | impl sp_runtime::BoundToRuntimeAppPublic for Pallet { 81 | type Public = NimbusId; 82 | } 83 | 84 | #[pallet::error] 85 | pub enum Error { 86 | /// Author already set in block. 87 | AuthorAlreadySet, 88 | /// No AccountId was found to be associated with this author 89 | NoAccountId, 90 | /// The author in the inherent is not an eligible author. 91 | CannotBeAuthor, 92 | } 93 | 94 | /// Author of current block. 95 | #[pallet::storage] 96 | pub type Author = StorageValue<_, T::AuthorId, OptionQuery>; 97 | 98 | /// The highest slot that has been seen in the history of this chain. 99 | /// This is a strictly-increasing value. 100 | #[pallet::storage] 101 | pub type HighestSlotSeen = StorageValue<_, u32, ValueQuery>; 102 | 103 | #[pallet::hooks] 104 | impl Hooks> for Pallet { 105 | fn on_initialize(_: T::BlockNumber) -> Weight { 106 | // Now extract the author from the digest 107 | let digest = >::digest(); 108 | let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); 109 | if let Some(author) = Self::find_author(pre_runtime_digests) { 110 | // Store the author so we can confirm eligibility after the inherents have executed 111 | >::put(&author); 112 | } 113 | 114 | T::DbWeight::get().writes(1) 115 | } 116 | } 117 | 118 | #[pallet::call] 119 | impl Pallet { 120 | /// This inherent is a workaround to run code after the "real" inherents have executed, 121 | /// but before transactions are executed. 122 | // This should go into on_post_inherents when it is ready https://github.com/paritytech/substrate/pull/10128 123 | // TODO better weight. For now we just set a somewhat conservative fudge factor 124 | #[pallet::call_index(0)] 125 | #[pallet::weight((T::WeightInfo::kick_off_authorship_validation(), DispatchClass::Mandatory))] 126 | pub fn kick_off_authorship_validation(origin: OriginFor) -> DispatchResultWithPostInfo { 127 | ensure_none(origin)?; 128 | 129 | // First check that the slot number is valid (greater than the previous highest) 130 | let slot = T::SlotBeacon::slot(); 131 | assert!( 132 | slot > HighestSlotSeen::::get(), 133 | "Block invalid; Supplied slot number is not high enough" 134 | ); 135 | 136 | // Now check that the author is valid in this slot 137 | assert!( 138 | T::CanAuthor::can_author(&Self::get(), &slot), 139 | "Block invalid, supplied author is not eligible." 140 | ); 141 | 142 | // Once that is validated, update the stored slot number 143 | HighestSlotSeen::::put(slot); 144 | 145 | Ok(Pays::No.into()) 146 | } 147 | } 148 | 149 | #[pallet::inherent] 150 | impl ProvideInherent for Pallet { 151 | type Call = Call; 152 | type Error = InherentError; 153 | const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; 154 | 155 | fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { 156 | // Return Ok(Some(_)) unconditionally because this inherent is required in every block 157 | // If it is not found, throw an AuthorInherentRequired error. 158 | Ok(Some(InherentError::Other( 159 | sp_runtime::RuntimeString::Borrowed( 160 | "Inherent required to manually initiate author validation", 161 | ), 162 | ))) 163 | } 164 | 165 | // Regardless of whether the client is still supplying the author id, 166 | // we will create the new empty-payload inherent extrinsic. 167 | fn create_inherent(_data: &InherentData) -> Option { 168 | Some(Call::kick_off_authorship_validation {}) 169 | } 170 | 171 | fn is_inherent(call: &Self::Call) -> bool { 172 | matches!(call, Call::kick_off_authorship_validation { .. }) 173 | } 174 | } 175 | 176 | impl FindAuthor for Pallet { 177 | fn find_author<'a, I>(digests: I) -> Option 178 | where 179 | I: 'a + IntoIterator, 180 | { 181 | for (id, mut data) in digests.into_iter() { 182 | if id == NIMBUS_ENGINE_ID { 183 | let author_id = NimbusId::decode(&mut data) 184 | .expect("NimbusId encoded in preruntime digest must be valid"); 185 | 186 | let author_account = T::AccountLookup::lookup_account(&author_id) 187 | .expect("No Account Mapped to this NimbusId"); 188 | 189 | return Some(author_account); 190 | } 191 | } 192 | 193 | None 194 | } 195 | } 196 | 197 | impl Get for Pallet { 198 | fn get() -> T::AuthorId { 199 | Author::::get().expect("Block author not inserted into Author Inherent Pallet") 200 | } 201 | } 202 | 203 | /// To learn whether a given NimbusId can author, as opposed to an account id, you 204 | /// can ask this pallet directly. It will do the mapping for you. 205 | impl CanAuthor for Pallet { 206 | fn can_author(author: &NimbusId, slot: &u32) -> bool { 207 | let account = match T::AccountLookup::lookup_account(author) { 208 | Some(account) => account, 209 | // Authors whose account lookups fail will not be eligible 210 | None => { 211 | return false; 212 | } 213 | }; 214 | 215 | T::CanAuthor::can_author(&account, slot) 216 | } 217 | #[cfg(feature = "runtime-benchmarks")] 218 | fn set_eligible_author(slot: &u32) { 219 | let eligible_authors = T::CanAuthor::get_authors(slot); 220 | if let Some(author) = eligible_authors.first() { 221 | Author::::put(author) 222 | } 223 | } 224 | } 225 | } 226 | 227 | #[derive(Encode)] 228 | #[cfg_attr(feature = "std", derive(Debug, Decode))] 229 | pub enum InherentError { 230 | Other(RuntimeString), 231 | } 232 | 233 | impl IsFatalError for InherentError { 234 | fn is_fatal_error(&self) -> bool { 235 | match *self { 236 | InherentError::Other(_) => true, 237 | } 238 | } 239 | } 240 | 241 | impl InherentError { 242 | /// Try to create an instance ouf of the given identifier and data. 243 | #[cfg(feature = "std")] 244 | pub fn try_from(id: &InherentIdentifier, data: &[u8]) -> Option { 245 | if id == &INHERENT_IDENTIFIER { 246 | ::decode(&mut &data[..]).ok() 247 | } else { 248 | None 249 | } 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /pallets/author-inherent/src/mock.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use crate::{self as pallet_testing, AccountLookup, NimbusId}; 18 | use frame_support::parameter_types; 19 | use frame_support::traits::ConstU32; 20 | use frame_support::weights::RuntimeDbWeight; 21 | use frame_system; 22 | use sp_core::H256; 23 | use sp_runtime::{ 24 | testing::Header, 25 | traits::{BlakeTwo256, IdentityLookup}, 26 | }; 27 | 28 | type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; 29 | type Block = frame_system::mocking::MockBlock; 30 | 31 | frame_support::construct_runtime!( 32 | pub enum Test where 33 | Block = Block, 34 | NodeBlock = Block, 35 | UncheckedExtrinsic = UncheckedExtrinsic, 36 | { 37 | System: frame_system::{Pallet, Call, Config, Storage, Event}, 38 | AuthorInherent: pallet_testing::{Pallet, Call, Storage}, 39 | } 40 | ); 41 | 42 | parameter_types! { 43 | pub const BlockHashCount: u64 = 250; 44 | pub Authors: Vec = vec![1, 2, 3, 4, 5]; 45 | pub const TestDbWeight: RuntimeDbWeight = RuntimeDbWeight { 46 | read: 1, 47 | write: 10, 48 | }; 49 | } 50 | 51 | impl frame_system::Config for Test { 52 | type BaseCallFilter = frame_support::traits::Everything; 53 | type BlockWeights = (); 54 | type BlockLength = (); 55 | type DbWeight = TestDbWeight; 56 | type RuntimeOrigin = RuntimeOrigin; 57 | type RuntimeCall = RuntimeCall; 58 | type Index = u64; 59 | type BlockNumber = u64; 60 | type Hash = H256; 61 | type Hashing = BlakeTwo256; 62 | type AccountId = u64; 63 | type Lookup = IdentityLookup; 64 | type Header = Header; 65 | type RuntimeEvent = RuntimeEvent; 66 | type BlockHashCount = BlockHashCount; 67 | type Version = (); 68 | type PalletInfo = PalletInfo; 69 | type AccountData = (); 70 | type OnNewAccount = (); 71 | type OnKilledAccount = (); 72 | type SystemWeightInfo = (); 73 | type SS58Prefix = (); 74 | type OnSetCode = (); 75 | type MaxConsumers = ConstU32<16>; 76 | } 77 | 78 | pub struct DummyBeacon {} 79 | impl nimbus_primitives::SlotBeacon for DummyBeacon { 80 | fn slot() -> u32 { 81 | 0 82 | } 83 | } 84 | 85 | pub const ALICE: u64 = 1; 86 | pub const ALICE_NIMBUS: [u8; 32] = [1; 32]; 87 | pub struct MockAccountLookup; 88 | impl AccountLookup for MockAccountLookup { 89 | fn lookup_account(nimbus_id: &NimbusId) -> Option { 90 | let nimbus_id_bytes: &[u8] = nimbus_id.as_ref(); 91 | 92 | if nimbus_id_bytes == &ALICE_NIMBUS { 93 | Some(ALICE) 94 | } else { 95 | None 96 | } 97 | } 98 | } 99 | 100 | impl pallet_testing::Config for Test { 101 | type AuthorId = u64; 102 | type AccountLookup = MockAccountLookup; 103 | type CanAuthor = (); 104 | type SlotBeacon = DummyBeacon; 105 | type WeightInfo = (); 106 | } 107 | 108 | /// Build genesis storage according to the mock runtime. 109 | pub fn new_test_ext() -> sp_io::TestExternalities { 110 | frame_system::GenesisConfig::default() 111 | .build_storage::() 112 | .unwrap() 113 | .into() 114 | } 115 | -------------------------------------------------------------------------------- /pallets/author-inherent/src/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use crate::mock::*; 18 | use crate::pallet::Author; 19 | use frame_support::traits::{OnFinalize, OnInitialize}; 20 | use nimbus_primitives::{NimbusId, NIMBUS_ENGINE_ID}; 21 | use parity_scale_codec::Encode; 22 | use sp_core::{ByteArray, H256}; 23 | use sp_runtime::{Digest, DigestItem}; 24 | 25 | #[test] 26 | fn kick_off_authorship_validation_is_mandatory() { 27 | use frame_support::dispatch::{DispatchClass, GetDispatchInfo}; 28 | 29 | let info = crate::Call::::kick_off_authorship_validation {}.get_dispatch_info(); 30 | assert_eq!(info.class, DispatchClass::Mandatory); 31 | } 32 | 33 | #[test] 34 | fn test_author_is_available_after_on_initialize() { 35 | new_test_ext().execute_with(|| { 36 | let block_number = 1; 37 | System::initialize( 38 | &block_number, 39 | &H256::default(), 40 | &Digest { 41 | logs: vec![DigestItem::PreRuntime( 42 | NIMBUS_ENGINE_ID, 43 | NimbusId::from_slice(&ALICE_NIMBUS).unwrap().encode(), 44 | )], 45 | }, 46 | ); 47 | 48 | AuthorInherent::on_initialize(block_number); 49 | assert_eq!(Some(ALICE), >::get()); 50 | }); 51 | } 52 | 53 | #[test] 54 | fn test_author_is_still_available_after_on_finalize() { 55 | new_test_ext().execute_with(|| { 56 | let block_number = 1; 57 | System::initialize( 58 | &block_number, 59 | &H256::default(), 60 | &Digest { 61 | logs: vec![DigestItem::PreRuntime( 62 | NIMBUS_ENGINE_ID, 63 | NimbusId::from_slice(&ALICE_NIMBUS).unwrap().encode(), 64 | )], 65 | }, 66 | ); 67 | 68 | AuthorInherent::on_initialize(block_number); 69 | AuthorInherent::on_finalize(block_number); 70 | assert_eq!(Some(ALICE), >::get()); 71 | }); 72 | } 73 | -------------------------------------------------------------------------------- /pallets/author-inherent/src/weights.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Moonbeam. 3 | 4 | // Moonbeam is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Moonbeam is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Moonbeam. If not, see . 16 | 17 | 18 | //! Autogenerated weights for pallet_author_inherent 19 | //! 20 | //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev 21 | //! DATE: 2023-05-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` 22 | //! WORST CASE MAP SIZE: `1000000` 23 | //! HOSTNAME: `benchmarker`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` 24 | //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 25 | 26 | // Executed Command: 27 | // ./target/release/moonbeam 28 | // benchmark 29 | // pallet 30 | // --execution=wasm 31 | // --wasm-execution=compiled 32 | // --pallet 33 | // * 34 | // --extrinsic 35 | // * 36 | // --steps 37 | // 50 38 | // --repeat 39 | // 20 40 | // --template=./benchmarking/frame-weight-template.hbs 41 | // --json-file 42 | // raw.json 43 | // --output 44 | // weights/ 45 | 46 | #![cfg_attr(rustfmt, rustfmt_skip)] 47 | #![allow(unused_parens)] 48 | #![allow(unused_imports)] 49 | 50 | use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; 51 | use sp_std::marker::PhantomData; 52 | 53 | /// Weight functions needed for pallet_author_inherent. 54 | pub trait WeightInfo { 55 | fn kick_off_authorship_validation() -> Weight; 56 | } 57 | 58 | /// Weights for pallet_author_inherent using the Substrate node and recommended hardware. 59 | pub struct SubstrateWeight(PhantomData); 60 | impl WeightInfo for SubstrateWeight { 61 | /// Storage: ParachainSystem ValidationData (r:1 w:0) 62 | /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) 63 | /// Storage: AuthorInherent HighestSlotSeen (r:1 w:1) 64 | /// Proof: AuthorInherent HighestSlotSeen (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) 65 | /// Storage: AuthorInherent Author (r:1 w:0) 66 | /// Proof: AuthorInherent Author (max_values: Some(1), max_size: Some(20), added: 515, mode: MaxEncodedLen) 67 | /// Storage: ParachainStaking SelectedCandidates (r:1 w:0) 68 | /// Proof Skipped: ParachainStaking SelectedCandidates (max_values: Some(1), max_size: None, mode: Measured) 69 | /// Storage: AuthorFilter EligibleCount (r:1 w:0) 70 | /// Proof Skipped: AuthorFilter EligibleCount (max_values: Some(1), max_size: None, mode: Measured) 71 | /// Storage: Randomness PreviousLocalVrfOutput (r:1 w:0) 72 | /// Proof Skipped: Randomness PreviousLocalVrfOutput (max_values: Some(1), max_size: None, mode: Measured) 73 | fn kick_off_authorship_validation() -> Weight { 74 | // Proof Size summary in bytes: 75 | // Measured: `371` 76 | // Estimated: `10418` 77 | // Minimum execution time: 25_775_000 picoseconds. 78 | Weight::from_parts(26_398_000, 10418) 79 | .saturating_add(T::DbWeight::get().reads(6_u64)) 80 | .saturating_add(T::DbWeight::get().writes(1_u64)) 81 | } 82 | } 83 | 84 | // For backwards compatibility and tests 85 | impl WeightInfo for () { 86 | /// Storage: ParachainSystem ValidationData (r:1 w:0) 87 | /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) 88 | /// Storage: AuthorInherent HighestSlotSeen (r:1 w:1) 89 | /// Proof: AuthorInherent HighestSlotSeen (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) 90 | /// Storage: AuthorInherent Author (r:1 w:0) 91 | /// Proof: AuthorInherent Author (max_values: Some(1), max_size: Some(20), added: 515, mode: MaxEncodedLen) 92 | /// Storage: ParachainStaking SelectedCandidates (r:1 w:0) 93 | /// Proof Skipped: ParachainStaking SelectedCandidates (max_values: Some(1), max_size: None, mode: Measured) 94 | /// Storage: AuthorFilter EligibleCount (r:1 w:0) 95 | /// Proof Skipped: AuthorFilter EligibleCount (max_values: Some(1), max_size: None, mode: Measured) 96 | /// Storage: Randomness PreviousLocalVrfOutput (r:1 w:0) 97 | /// Proof Skipped: Randomness PreviousLocalVrfOutput (max_values: Some(1), max_size: None, mode: Measured) 98 | fn kick_off_authorship_validation() -> Weight { 99 | // Proof Size summary in bytes: 100 | // Measured: `371` 101 | // Estimated: `10418` 102 | // Minimum execution time: 25_775_000 picoseconds. 103 | Weight::from_parts(26_398_000, 10418) 104 | .saturating_add(RocksDbWeight::get().reads(6_u64)) 105 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 106 | } 107 | } -------------------------------------------------------------------------------- /pallets/author-slot-filter/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-author-slot-filter" 3 | authors = [ "PureStake" ] 4 | description = "Selects a pseudorandom Subset of eligible (probably staked) authors at each slot" 5 | edition = "2021" 6 | version = "0.9.0" 7 | 8 | [dependencies] 9 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 10 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 11 | log = { version = "0.4.17", default-features = false } 12 | nimbus-primitives = { path = "../../nimbus-primitives", default-features = false } 13 | parity-scale-codec = { version = "3.0.0", default-features = false, features = [ "derive" ] } 14 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] } 15 | serde = { version = "1.0.101", default-features = false, features = [ "derive" ] } 16 | sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 17 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 18 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 19 | 20 | # Benchmarks 21 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" } 22 | 23 | [dev-dependencies] 24 | frame-support-test = { git = "https://github.com/paritytech/substrate", version = "3.0.0", branch = "polkadot-v0.9.43" } 25 | sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 26 | 27 | [features] 28 | default = [ "std" ] 29 | std = [ 30 | "frame-benchmarking/std", 31 | "frame-support/std", 32 | "frame-system/std", 33 | "log/std", 34 | "nimbus-primitives/std", 35 | "parity-scale-codec/std", 36 | "scale-info/std", 37 | "serde/std", 38 | "sp-core/std", 39 | "sp-runtime/std", 40 | "sp-std/std", 41 | ] 42 | 43 | runtime-benchmarks = [ "frame-benchmarking", "nimbus-primitives/runtime-benchmarks" ] 44 | 45 | try-runtime = [ "frame-support/try-runtime", "nimbus-primitives/try-runtime" ] 46 | -------------------------------------------------------------------------------- /pallets/author-slot-filter/src/benchmarks.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Moonbeam. 3 | 4 | // Moonbeam is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Moonbeam is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Moonbeam. If not, see . 16 | 17 | #![cfg(feature = "runtime-benchmarks")] 18 | 19 | use crate::num::NonZeroU32; 20 | use crate::{Call, Config, Pallet}; 21 | use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; 22 | use frame_system::RawOrigin; 23 | 24 | benchmarks! { 25 | set_eligible { 26 | let count = NonZeroU32::new_unchecked(34); 27 | }: _(RawOrigin::Root, count.clone()) 28 | verify { 29 | assert_eq!(Pallet::::eligible_count(), count); 30 | } 31 | } 32 | 33 | #[cfg(test)] 34 | mod tests { 35 | use crate::tests::Test; 36 | use sp_io::TestExternalities; 37 | 38 | pub fn new_test_ext() -> TestExternalities { 39 | let t = frame_system::GenesisConfig::default() 40 | .build_storage::() 41 | .unwrap(); 42 | TestExternalities::new(t) 43 | } 44 | } 45 | 46 | impl_benchmark_test_suite!( 47 | Pallet, 48 | crate::benchmarks::tests::new_test_ext(), 49 | crate::tests::Test 50 | ); 51 | -------------------------------------------------------------------------------- /pallets/author-slot-filter/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! Small pallet responsible determining which accounts are eligible to author at the current 18 | //! slot. 19 | //! 20 | //! Using a randomness beacon supplied by the `Randomness` trait, this pallet takes the set of 21 | //! currently active accounts from an upstream source, and filters them down to a pseudorandom subset. 22 | //! The current technique gives no preference to any particular author. In the future, we could 23 | //! disfavor authors who are authoring a disproportionate amount of the time in an attempt to 24 | //! "even the playing field". 25 | 26 | #![cfg_attr(not(feature = "std"), no_std)] 27 | 28 | use frame_support::pallet; 29 | 30 | pub use pallet::*; 31 | 32 | #[cfg(any(test, feature = "runtime-benchmarks"))] 33 | mod benchmarks; 34 | 35 | pub mod migration; 36 | pub mod num; 37 | pub mod weights; 38 | 39 | #[cfg(test)] 40 | mod mock; 41 | #[cfg(test)] 42 | mod tests; 43 | 44 | #[allow(deprecated)] 45 | #[pallet] 46 | pub mod pallet { 47 | 48 | use crate::num::NonZeroU32; 49 | use crate::weights::WeightInfo; 50 | use frame_support::{pallet_prelude::*, traits::Randomness}; 51 | use frame_system::pallet_prelude::*; 52 | use log::debug; 53 | use nimbus_primitives::CanAuthor; 54 | use sp_core::H256; 55 | use sp_runtime::Percent; 56 | use sp_std::vec::Vec; 57 | 58 | /// The Author Filter pallet 59 | #[pallet::pallet] 60 | #[pallet::without_storage_info] 61 | pub struct Pallet(PhantomData); 62 | 63 | /// Configuration trait of this pallet. 64 | #[pallet::config] 65 | pub trait Config: frame_system::Config { 66 | /// The overarching event type 67 | type RuntimeEvent: From + IsType<::RuntimeEvent>; 68 | /// Deterministic on-chain pseudo-randomness used to do the filtering 69 | type RandomnessSource: Randomness; 70 | //TODO introduce a new trait for exhaustive sets and use it here. 71 | // Oh actually, we can use the same trait. First we call the inner one 72 | // to determine whether this particular author is eligible there. then we 73 | // use the author as part of the subject when querying eligibility. I like this better. 74 | /// A source for the complete set of potential authors. 75 | /// The starting point of the filtering. 76 | type PotentialAuthors: Get>; 77 | type WeightInfo: WeightInfo; 78 | } 79 | 80 | /// Compute a pseudo-random subset of the input accounts by using Pallet's 81 | /// source of randomness, `Config::RandomnessSource`. 82 | /// Returns (Eligible, Ineligible), each is a set of accounts 83 | pub fn compute_pseudo_random_subset( 84 | mut active: Vec, 85 | seed: &u32, 86 | ) -> (Vec, Vec) { 87 | let mut num_eligible = EligibleCount::::get().get() as usize; 88 | if num_eligible > active.len() { 89 | num_eligible = active.len(); 90 | } 91 | 92 | let mut eligible = Vec::with_capacity(num_eligible); 93 | 94 | for i in 0..num_eligible { 95 | // A context identifier for grabbing the randomness. Consists of three parts 96 | // 1. Constant string *b"filter" - to identify this pallet 97 | // 2. First 2 bytes of index.to_le_bytes when selecting the ith eligible author 98 | // 3. First 4 bytes of seed.to_be_bytes 99 | let mut first_two_bytes_of_index = &i.to_le_bytes()[..2]; 100 | let mut first_four_bytes_of_seed = &seed.to_be_bytes()[..4]; 101 | let mut constant_string: [u8; 6] = [b'f', b'i', b'l', b't', b'e', b'r']; 102 | let mut subject: [u8; 12] = [0u8; 12]; 103 | subject[..6].copy_from_slice(&mut constant_string); 104 | subject[6..8].copy_from_slice(&mut first_two_bytes_of_index); 105 | subject[8..].copy_from_slice(&mut first_four_bytes_of_seed); 106 | let (randomness, _) = T::RandomnessSource::random(&subject); 107 | debug!(target: "author-filter", "🎲Randomness sample {}: {:?}", i, &randomness); 108 | 109 | // Cast to u32 first so we get consistent results on 32- and 64-bit platforms. 110 | let bytes: [u8; 4] = randomness.to_fixed_bytes()[0..4] 111 | .try_into() 112 | .expect("H256 has at least 4 bytes; qed"); 113 | let randomness = u32::from_le_bytes(bytes) as usize; 114 | 115 | // Move the selected author from the original vector into the eligible vector 116 | // TODO we could short-circuit this check by returning early when the claimed 117 | // author is selected. For now I'll leave it like this because: 118 | // 1. it is easier to understand what our core filtering logic is 119 | // 2. we currently show the entire filtered set in the debug event 120 | eligible.push(active.remove(randomness % active.len())); 121 | } 122 | (eligible, active) 123 | } 124 | 125 | // This code will be called by the author-inherent pallet to check whether the reported author 126 | // of this block is eligible in this slot. We calculate that result on demand and do not 127 | // record it in storage (although we do emit a debugging event for now). 128 | impl CanAuthor for Pallet { 129 | #[cfg(not(feature = "try-runtime"))] 130 | fn can_author(author: &T::AccountId, slot: &u32) -> bool { 131 | // Compute pseudo-random subset of potential authors 132 | let (eligible, ineligible) = 133 | compute_pseudo_random_subset::(T::PotentialAuthors::get(), slot); 134 | 135 | // Print some logs for debugging purposes. 136 | debug!(target: "author-filter", "Eligible Authors: {:?}", eligible); 137 | debug!(target: "author-filter", "Ineligible Authors: {:?}", &ineligible); 138 | debug!(target: "author-filter", 139 | "Current author, {:?}, is eligible: {}", 140 | author, 141 | eligible.contains(author) 142 | ); 143 | 144 | eligible.contains(author) 145 | } 146 | #[cfg(feature = "runtime-benchmarks")] 147 | fn get_authors(slot: &u32) -> Vec { 148 | // Compute pseudo-random subset of potential authors 149 | let (eligible, _) = compute_pseudo_random_subset::(T::PotentialAuthors::get(), slot); 150 | eligible 151 | } 152 | } 153 | 154 | #[pallet::call] 155 | impl Pallet { 156 | /// Update the eligible count. Intended to be called by governance. 157 | #[pallet::call_index(0)] 158 | #[pallet::weight(T::WeightInfo::set_eligible())] 159 | pub fn set_eligible( 160 | origin: OriginFor, 161 | new: EligibilityValue, 162 | ) -> DispatchResultWithPostInfo { 163 | ensure_root(origin)?; 164 | EligibleCount::::put(&new); 165 | >::deposit_event(Event::EligibleUpdated(new)); 166 | 167 | Ok(Default::default()) 168 | } 169 | } 170 | 171 | /// The type of eligibility to use 172 | pub type EligibilityValue = NonZeroU32; 173 | 174 | impl EligibilityValue { 175 | /// Default total number of eligible authors, must NOT be 0. 176 | pub fn default() -> Self { 177 | NonZeroU32::new_unchecked(50) 178 | } 179 | } 180 | 181 | #[pallet::storage] 182 | #[pallet::getter(fn eligible_ratio)] 183 | #[deprecated(note = "use `pallet::EligibleCount` instead")] 184 | pub type EligibleRatio = StorageValue<_, Percent, ValueQuery, Half>; 185 | 186 | // Default value for the `EligibleRatio` is one half. 187 | #[pallet::type_value] 188 | pub fn Half() -> Percent { 189 | Percent::from_percent(50) 190 | } 191 | 192 | /// The number of active authors that will be eligible at each height. 193 | #[pallet::storage] 194 | #[pallet::getter(fn eligible_count)] 195 | pub type EligibleCount = 196 | StorageValue<_, EligibilityValue, ValueQuery, DefaultEligibilityValue>; 197 | 198 | // Default value for the `EligibleCount`. 199 | #[pallet::type_value] 200 | pub fn DefaultEligibilityValue() -> EligibilityValue { 201 | EligibilityValue::default() 202 | } 203 | 204 | #[pallet::genesis_config] 205 | pub struct GenesisConfig { 206 | pub eligible_count: EligibilityValue, 207 | } 208 | 209 | #[cfg(feature = "std")] 210 | impl Default for GenesisConfig { 211 | fn default() -> Self { 212 | Self { 213 | eligible_count: EligibilityValue::default(), 214 | } 215 | } 216 | } 217 | 218 | #[pallet::genesis_build] 219 | impl GenesisBuild for GenesisConfig { 220 | fn build(&self) { 221 | EligibleCount::::put(self.eligible_count.clone()); 222 | } 223 | } 224 | 225 | #[pallet::event] 226 | #[pallet::generate_deposit(fn deposit_event)] 227 | pub enum Event { 228 | /// The amount of eligible authors for the filter to select has been changed. 229 | EligibleUpdated(EligibilityValue), 230 | } 231 | } 232 | -------------------------------------------------------------------------------- /pallets/author-slot-filter/src/migration.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use core::marker::PhantomData; 18 | use frame_support::traits::Get; 19 | use frame_support::traits::OnRuntimeUpgrade; 20 | use frame_support::weights::Weight; 21 | use sp_runtime::Percent; 22 | 23 | use super::num::NonZeroU32; 24 | use super::pallet::Config; 25 | use super::pallet::EligibilityValue; 26 | use super::pallet::EligibleCount; 27 | use super::pallet::Pallet; 28 | 29 | #[cfg(feature = "try-runtime")] 30 | use { 31 | parity_scale_codec::{Decode, Encode}, 32 | sp_std::vec::Vec, 33 | }; 34 | 35 | pub struct EligibleRatioToEligiblityCount(PhantomData); 36 | 37 | impl OnRuntimeUpgrade for EligibleRatioToEligiblityCount 38 | where 39 | T: Config, 40 | { 41 | fn on_runtime_upgrade() -> Weight { 42 | log::info!(target: "EligibleRatioToEligiblityCount", "starting migration"); 43 | 44 | let old_value = >::eligible_ratio(); 45 | let total_authors = ::PotentialAuthors::get().len(); 46 | let new_value = percent_of_num(old_value, total_authors as u32); 47 | let new_value = NonZeroU32::new(new_value).unwrap_or_else(EligibilityValue::default); 48 | >::put(new_value); 49 | 50 | T::DbWeight::get().reads_writes(1, 1) 51 | } 52 | 53 | #[cfg(feature = "try-runtime")] 54 | fn pre_upgrade() -> Result, &'static str> { 55 | let old_value = >::eligible_ratio(); 56 | 57 | let total_authors = ::PotentialAuthors::get().len(); 58 | let new_value = percent_of_num(old_value, total_authors as u32); 59 | let expected_value = NonZeroU32::new(new_value).unwrap_or_else(EligibilityValue::default); 60 | 61 | Ok(expected_value.encode()) 62 | } 63 | 64 | #[cfg(feature = "try-runtime")] 65 | fn post_upgrade(state: Vec) -> Result<(), &'static str> { 66 | let expected: NonZeroU32 = 67 | Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); 68 | 69 | let actual = >::eligible_count(); 70 | 71 | assert_eq!(expected, actual); 72 | 73 | Ok(()) 74 | } 75 | } 76 | 77 | fn percent_of_num(percent: Percent, num: u32) -> u32 { 78 | percent.mul_ceil(num as u32) 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use super::percent_of_num; 84 | use super::*; 85 | 86 | #[test] 87 | fn test_percent_of_num_ceils_value() { 88 | let fifty_percent = Percent::from_float(0.5); 89 | 90 | let actual = percent_of_num(fifty_percent, 5); 91 | assert_eq!(3, actual); 92 | 93 | let actual = percent_of_num(fifty_percent, 20); 94 | assert_eq!(10, actual); 95 | } 96 | 97 | #[test] 98 | fn test_percent_of_num_hundred_percent_uses_full_value() { 99 | let one_hundred_percent = Percent::from_float(1.0); 100 | 101 | let actual = percent_of_num(one_hundred_percent, 5); 102 | assert_eq!(5, actual); 103 | 104 | let actual = percent_of_num(one_hundred_percent, 20); 105 | assert_eq!(20, actual); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /pallets/author-slot-filter/src/mock.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use crate as pallet_testing; 18 | use frame_support::parameter_types; 19 | use frame_support::sp_io; 20 | use frame_support::traits::ConstU32; 21 | use frame_support::weights::RuntimeDbWeight; 22 | use frame_support_test::TestRandomness; 23 | use sp_core::H256; 24 | use sp_runtime::{ 25 | testing::Header, 26 | traits::{BlakeTwo256, IdentityLookup}, 27 | }; 28 | 29 | type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; 30 | type Block = frame_system::mocking::MockBlock; 31 | 32 | frame_support::construct_runtime!( 33 | pub enum Test where 34 | Block = Block, 35 | NodeBlock = Block, 36 | UncheckedExtrinsic = UncheckedExtrinsic, 37 | { 38 | System: frame_system::{Pallet, Call, Config, Storage, Event}, 39 | AuthorSlotFilter: pallet_testing::{Pallet, Call, Storage, Event}, 40 | } 41 | ); 42 | 43 | parameter_types! { 44 | pub const BlockHashCount: u64 = 250; 45 | pub Authors: Vec = vec![1, 2, 3, 4, 5]; 46 | pub const TestDbWeight: RuntimeDbWeight = RuntimeDbWeight { 47 | read: 1, 48 | write: 10, 49 | }; 50 | } 51 | 52 | impl frame_system::Config for Test { 53 | type BaseCallFilter = frame_support::traits::Everything; 54 | type BlockWeights = (); 55 | type BlockLength = (); 56 | type DbWeight = TestDbWeight; 57 | type RuntimeOrigin = RuntimeOrigin; 58 | type RuntimeCall = RuntimeCall; 59 | type Index = u64; 60 | type BlockNumber = u64; 61 | type Hash = H256; 62 | type Hashing = BlakeTwo256; 63 | type AccountId = u64; 64 | type Lookup = IdentityLookup; 65 | type Header = Header; 66 | type RuntimeEvent = RuntimeEvent; 67 | type BlockHashCount = BlockHashCount; 68 | type Version = (); 69 | type PalletInfo = PalletInfo; 70 | type AccountData = (); 71 | type OnNewAccount = (); 72 | type OnKilledAccount = (); 73 | type SystemWeightInfo = (); 74 | type SS58Prefix = (); 75 | type OnSetCode = (); 76 | type MaxConsumers = ConstU32<16>; 77 | } 78 | 79 | impl pallet_testing::Config for Test { 80 | type RuntimeEvent = RuntimeEvent; 81 | type RandomnessSource = TestRandomness; 82 | type PotentialAuthors = Authors; 83 | type WeightInfo = (); 84 | } 85 | 86 | /// Build genesis storage according to the mock runtime. 87 | pub fn new_test_ext() -> sp_io::TestExternalities { 88 | frame_system::GenesisConfig::default() 89 | .build_storage::() 90 | .unwrap() 91 | .into() 92 | } 93 | -------------------------------------------------------------------------------- /pallets/author-slot-filter/src/num.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | //! Implements a [NonZeroU32] type that interplays nicely with the 18 | //! subtrate storage and the SCALE codec. 19 | 20 | use parity_scale_codec::{Decode, Encode, Error, Input}; 21 | use scale_info::TypeInfo; 22 | use serde::de::Error as DeserializeError; 23 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; 24 | 25 | #[derive(Clone, Debug, TypeInfo, Encode, PartialEq, Eq, PartialOrd, Ord, Hash)] 26 | pub struct NonZeroU32(u32); 27 | 28 | impl core::ops::Deref for NonZeroU32 { 29 | type Target = u32; 30 | fn deref(&self) -> &Self::Target { 31 | &self.0 32 | } 33 | } 34 | 35 | impl parity_scale_codec::EncodeLike for NonZeroU32 {} 36 | 37 | impl NonZeroU32 { 38 | /// Creates a new `Some(NonZeroU32)` instance if value is 0, `None` otherwise. 39 | #[inline] 40 | pub const fn new(n: u32) -> Option { 41 | if n != 0 { 42 | Some(Self(n)) 43 | } else { 44 | None 45 | } 46 | } 47 | 48 | /// new_unchecked creats a `NonZeroU32` where the user MUST guarantee 49 | /// that the value is nonzero. 50 | #[inline] 51 | pub const fn new_unchecked(n: u32) -> Self { 52 | Self(n) 53 | } 54 | 55 | /// Returns the the underlying number 56 | pub fn get(&self) -> u32 { 57 | self.0 58 | } 59 | } 60 | 61 | #[cfg(feature = "std")] 62 | impl Serialize for NonZeroU32 { 63 | fn serialize(&self, serializer: S) -> Result 64 | where 65 | S: Serializer, 66 | { 67 | self.clone().get().serialize(serializer) 68 | } 69 | } 70 | 71 | #[cfg(feature = "std")] 72 | impl<'de> Deserialize<'de> for NonZeroU32 { 73 | fn deserialize(deserializer: D) -> Result 74 | where 75 | D: Deserializer<'de>, 76 | { 77 | let value = Deserialize::deserialize(deserializer)?; 78 | match NonZeroU32::new(value) { 79 | Some(nonzero) => Ok(nonzero), 80 | None => Err(DeserializeError::custom("expected a non-zero value")), 81 | } 82 | } 83 | } 84 | 85 | impl Decode for NonZeroU32 { 86 | fn decode(input: &mut I) -> Result { 87 | Self::new(Decode::decode(input)?) 88 | .ok_or_else(|| Error::from("cannot create non-zero number from 0")) 89 | } 90 | } 91 | 92 | #[cfg(test)] 93 | mod tests { 94 | use super::*; 95 | use parity_scale_codec::Encode; 96 | 97 | #[test] 98 | fn test_new_returns_none_if_zero() { 99 | assert_eq!(None, NonZeroU32::new(0)); 100 | } 101 | 102 | #[test] 103 | fn test_new_returns_some_if_nonzero() { 104 | let n = 10; 105 | let expected = Some(NonZeroU32::new_unchecked(n)); 106 | 107 | let actual = NonZeroU32::new(n); 108 | assert_eq!(expected, actual); 109 | assert_eq!(n, actual.unwrap().get()); 110 | } 111 | 112 | #[test] 113 | fn test_decode_errors_if_zero_value() { 114 | let buf: Vec = 0u32.encode(); 115 | let result = NonZeroU32::decode(&mut &buf[..]); 116 | assert!(result.is_err(), "expected error, got {:?}", result); 117 | } 118 | 119 | #[test] 120 | fn test_decode_succeeds_if_nonzero_value() { 121 | let buf: Vec = 1u32.encode(); 122 | 123 | let result = NonZeroU32::decode(&mut &buf[..]); 124 | assert!(result.is_ok(), "unexpected error, got {:?}", result); 125 | assert_eq!(Ok(NonZeroU32::new_unchecked(1)), result); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /pallets/author-slot-filter/src/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Nimbus. 3 | 4 | // Nimbus is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Nimbus is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Nimbus. If not, see . 16 | 17 | use super::*; 18 | use crate::mock::*; 19 | use crate::num::NonZeroU32; 20 | 21 | use frame_support::assert_ok; 22 | use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; 23 | use sp_runtime::Percent; 24 | 25 | #[test] 26 | fn test_set_eligibility_works() { 27 | new_test_ext().execute_with(|| { 28 | let value = num::NonZeroU32::new_unchecked(34); 29 | 30 | assert_ok!(AuthorSlotFilter::set_eligible( 31 | RuntimeOrigin::root(), 32 | value.clone() 33 | )); 34 | assert_eq!(AuthorSlotFilter::eligible_count(), value) 35 | }); 36 | } 37 | 38 | #[allow(deprecated)] 39 | #[test] 40 | fn test_migration_works_for_converting_existing_eligible_ratio_to_eligible_count() { 41 | new_test_ext().execute_with(|| { 42 | let input_eligible_ratio = Percent::from_percent(50); 43 | let total_author_count = mock::Authors::get().len(); 44 | let eligible_author_count = input_eligible_ratio.mul_ceil(total_author_count) as u32; 45 | let expected_eligible_count = NonZeroU32::new_unchecked(eligible_author_count); 46 | let expected_weight = 47 | Weight::from_parts(TestDbWeight::get().write + TestDbWeight::get().read, 0); 48 | 49 | >::put(input_eligible_ratio); 50 | 51 | let actual_weight = migration::EligibleRatioToEligiblityCount::::on_runtime_upgrade(); 52 | assert_eq!(expected_weight, actual_weight); 53 | 54 | let actual_eligible_ratio_after = AuthorSlotFilter::eligible_ratio(); 55 | let actual_eligible_count = AuthorSlotFilter::eligible_count(); 56 | assert_eq!(expected_eligible_count, actual_eligible_count); 57 | assert_eq!(input_eligible_ratio, actual_eligible_ratio_after); 58 | }); 59 | } 60 | 61 | #[allow(deprecated)] 62 | #[test] 63 | fn test_migration_works_for_converting_existing_zero_eligible_ratio_to_default_eligible_count() { 64 | new_test_ext().execute_with(|| { 65 | let input_eligible_ratio = Percent::from_percent(0); 66 | let expected_eligible_count = EligibilityValue::default(); 67 | let expected_weight = 68 | Weight::from_parts(TestDbWeight::get().write + TestDbWeight::get().read, 0); 69 | 70 | >::put(input_eligible_ratio); 71 | 72 | let actual_weight = migration::EligibleRatioToEligiblityCount::::on_runtime_upgrade(); 73 | assert_eq!(expected_weight, actual_weight); 74 | 75 | let actual_eligible_ratio_after = AuthorSlotFilter::eligible_ratio(); 76 | let actual_eligible_count = AuthorSlotFilter::eligible_count(); 77 | assert_eq!(expected_eligible_count, actual_eligible_count); 78 | assert_eq!(input_eligible_ratio, actual_eligible_ratio_after); 79 | }); 80 | } 81 | 82 | #[allow(deprecated)] 83 | #[test] 84 | fn test_migration_inserts_default_value_for_missing_eligible_ratio() { 85 | new_test_ext().execute_with(|| { 86 | let default_eligible_ratio = Percent::from_percent(50); 87 | let expected_default_eligible_count = 88 | NonZeroU32::new_unchecked(default_eligible_ratio.mul_ceil(Authors::get().len() as u32)); 89 | let expected_weight = 90 | Weight::from_parts(TestDbWeight::get().write + TestDbWeight::get().read, 0); 91 | 92 | let actual_weight = migration::EligibleRatioToEligiblityCount::::on_runtime_upgrade(); 93 | assert_eq!(expected_weight, actual_weight); 94 | 95 | let actual_eligible_count = AuthorSlotFilter::eligible_count(); 96 | assert_eq!(expected_default_eligible_count, actual_eligible_count); 97 | }); 98 | } 99 | -------------------------------------------------------------------------------- /pallets/author-slot-filter/src/weights.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 PureStake Inc. 2 | // This file is part of Moonbeam. 3 | 4 | // Moonbeam is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Moonbeam is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Moonbeam. If not, see . 16 | 17 | 18 | //! Autogenerated weights for pallet_author_slot_filter 19 | //! 20 | //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev 21 | //! DATE: 2023-05-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` 22 | //! WORST CASE MAP SIZE: `1000000` 23 | //! HOSTNAME: `benchmarker`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` 24 | //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 25 | 26 | // Executed Command: 27 | // ./target/release/moonbeam 28 | // benchmark 29 | // pallet 30 | // --execution=wasm 31 | // --wasm-execution=compiled 32 | // --pallet 33 | // * 34 | // --extrinsic 35 | // * 36 | // --steps 37 | // 50 38 | // --repeat 39 | // 20 40 | // --template=./benchmarking/frame-weight-template.hbs 41 | // --json-file 42 | // raw.json 43 | // --output 44 | // weights/ 45 | 46 | #![cfg_attr(rustfmt, rustfmt_skip)] 47 | #![allow(unused_parens)] 48 | #![allow(unused_imports)] 49 | 50 | use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; 51 | use sp_std::marker::PhantomData; 52 | 53 | /// Weight functions needed for pallet_author_slot_filter. 54 | pub trait WeightInfo { 55 | fn set_eligible() -> Weight; 56 | } 57 | 58 | /// Weights for pallet_author_slot_filter using the Substrate node and recommended hardware. 59 | pub struct SubstrateWeight(PhantomData); 60 | impl WeightInfo for SubstrateWeight { 61 | /// Storage: AuthorFilter EligibleCount (r:0 w:1) 62 | /// Proof Skipped: AuthorFilter EligibleCount (max_values: Some(1), max_size: None, mode: Measured) 63 | fn set_eligible() -> Weight { 64 | // Proof Size summary in bytes: 65 | // Measured: `0` 66 | // Estimated: `0` 67 | // Minimum execution time: 12_558_000 picoseconds. 68 | Weight::from_parts(12_823_000, 0) 69 | .saturating_add(T::DbWeight::get().writes(1_u64)) 70 | } 71 | } 72 | 73 | // For backwards compatibility and tests 74 | impl WeightInfo for () { 75 | /// Storage: AuthorFilter EligibleCount (r:0 w:1) 76 | /// Proof Skipped: AuthorFilter EligibleCount (max_values: Some(1), max_size: None, mode: Measured) 77 | fn set_eligible() -> Weight { 78 | // Proof Size summary in bytes: 79 | // Measured: `0` 80 | // Estimated: `0` 81 | // Minimum execution time: 12_558_000 picoseconds. 82 | Weight::from_parts(12_823_000, 0) 83 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 84 | } 85 | } -------------------------------------------------------------------------------- /parachain-template/LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /parachain-template/README.md: -------------------------------------------------------------------------------- 1 | # Substrate Cumulus Parachain Template 2 | 3 | A new [Cumulus](https://github.com/paritytech/cumulus/)-based Substrate node, ready for hacking ☁️.. 4 | 5 | This project is a fork of the [Substrate Node Template](https://github.com/substrate-developer-hub/substrate-node-template) 6 | modified to include dependencies required for registering this node as a **parathread** or 7 | **parachain** to the Rococo **relay chain**. 8 | Rococo is [Polkadot's parachain testnet](https://polkadot.network/blog/introducing-rococo-polkadots-parachain-testnet/) 👑. 9 | 10 | 👉 Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains), and 11 | parathreads [here](https://wiki.polkadot.network/docs/learn-parathreads). 12 | 13 | To learn about how to actually use the template to hack together your own parachain check out the 14 | `README` from the [`substrate-parachain-template` repository](https://github.com/substrate-developer-hub/substrate-parachain-template/). 15 | -------------------------------------------------------------------------------- /parachain-template/node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parachain-template-node" 3 | authors = [ "Anonymous" ] 4 | build = "build.rs" 5 | description = "A Substrate node that demonstrates using the Nimbus consensus framework with instant seal and as a parachain." 6 | edition = "2021" 7 | homepage = "https://substrate.dev" 8 | license = "Unlicense" 9 | repository = "https://github.com/paritytech/cumulus/" 10 | version = "0.9.0" 11 | 12 | [package.metadata.docs.rs] 13 | targets = [ "x86_64-unknown-linux-gnu" ] 14 | 15 | [[bin]] 16 | name = "parachain-collator" 17 | path = "src/main.rs" 18 | 19 | [dependencies] 20 | clap = { version = "4.0.9", features = [ "derive" ] } 21 | codec = { package = "parity-scale-codec", version = "3.0.0" } 22 | derive_more = "0.99.2" 23 | flume = "0.10.9" 24 | hex-literal = "0.3.1" 25 | log = "0.4.17" 26 | serde = { version = "1.0.119", features = [ "derive" ] } 27 | 28 | # RPC related Dependencies 29 | jsonrpsee = { version = "0.16.2", features = [ "macros", "server" ] } 30 | 31 | # Local Dependencies 32 | nimbus-consensus = { path = "../../nimbus-consensus" } 33 | nimbus-primitives = { path = "../../nimbus-primitives" } 34 | parachain-template-runtime = { path = "../runtime" } 35 | 36 | # Substrate Dependencies 37 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 38 | frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 39 | 40 | pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 41 | 42 | substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 43 | substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 44 | 45 | ## Substrate Client Dependencies 46 | sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 47 | sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 48 | sc-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 49 | sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 50 | sc-client-db = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 51 | sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 52 | sc-consensus-manual-seal = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 53 | sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 54 | sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 55 | sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 56 | sc-network-sync = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 57 | sc-network-common = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 58 | sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 59 | sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 60 | sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 61 | sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 62 | sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 63 | sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 64 | sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 65 | 66 | ## Substrate Primitive Dependencies 67 | sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 68 | sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 69 | sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 70 | #sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.26" } 71 | sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 72 | sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 73 | sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 74 | sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 75 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 76 | sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 77 | sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 78 | sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 79 | 80 | # Cumulus dependencies 81 | cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 82 | cumulus-client-collator = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 83 | cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 84 | cumulus-client-network = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 85 | cumulus-client-service = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 86 | cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 87 | cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 88 | cumulus-relay-chain-inprocess-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 89 | cumulus-relay-chain-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 90 | cumulus-relay-chain-rpc-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 91 | cumulus-relay-chain-minimal-node = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } 92 | 93 | # Polkadot dependencies 94 | polkadot-cli = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } 95 | polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } 96 | polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } 97 | polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } 98 | polkadot-test-service = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } 99 | 100 | [build-dependencies] 101 | substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 102 | 103 | [features] 104 | runtime-benchmarks = [ "parachain-template-runtime/runtime-benchmarks", "polkadot-service/runtime-benchmarks", "polkadot-test-service/runtime-benchmarks" ] 105 | try-runtime = [ "parachain-template-runtime/try-runtime" ] 106 | -------------------------------------------------------------------------------- /parachain-template/node/build.rs: -------------------------------------------------------------------------------- 1 | use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; 2 | 3 | fn main() { 4 | generate_cargo_keys(); 5 | 6 | rerun_if_git_head_changed(); 7 | } 8 | -------------------------------------------------------------------------------- /parachain-template/node/src/chain_spec.rs: -------------------------------------------------------------------------------- 1 | use cumulus_primitives_core::ParaId; 2 | use parachain_template_runtime::{AccountId, NimbusId, Signature}; 3 | use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; 4 | use sc_service::ChainType; 5 | use serde::{Deserialize, Serialize}; 6 | use sp_core::{sr25519, Pair, Public}; 7 | use sp_runtime::traits::{IdentifyAccount, Verify}; 8 | 9 | /// Specialized `ChainSpec` for the normal parachain runtime. 10 | pub type ChainSpec = 11 | sc_service::GenericChainSpec; 12 | 13 | /// Helper function to generate a crypto pair from seed 14 | pub fn get_pair_from_seed(seed: &str) -> ::Public { 15 | TPublic::Pair::from_string(&format!("//{}", seed), None) 16 | .expect("static values are valid; qed") 17 | .public() 18 | } 19 | 20 | /// The extensions for the [`ChainSpec`]. 21 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] 22 | #[serde(deny_unknown_fields)] 23 | pub struct Extensions { 24 | /// The relay chain of the Parachain. 25 | pub relay_chain: String, 26 | /// The id of the Parachain. 27 | pub para_id: u32, 28 | } 29 | 30 | impl Extensions { 31 | /// Try to get the extension from the given `ChainSpec`. 32 | pub fn try_get(chain_spec: &dyn sc_service::ChainSpec) -> Option<&Self> { 33 | sc_chain_spec::get_extension(chain_spec.extensions()) 34 | } 35 | } 36 | 37 | type AccountPublic = ::Signer; 38 | 39 | /// Generate collator keys from seed. 40 | /// 41 | /// This function's return type must always match the session keys of the chain in tuple format. 42 | pub fn get_collator_keys_from_seed(seed: &str) -> NimbusId { 43 | get_pair_from_seed::(seed) 44 | } 45 | 46 | /// Helper function to generate an account ID from seed 47 | pub fn get_account_id_from_seed(seed: &str) -> AccountId 48 | where 49 | AccountPublic: From<::Public>, 50 | { 51 | AccountPublic::from(get_pair_from_seed::(seed)).into_account() 52 | } 53 | 54 | pub fn development_config() -> ChainSpec { 55 | // Give your base currency a unit name and decimal places 56 | let mut properties = sc_chain_spec::Properties::new(); 57 | properties.insert("tokenSymbol".into(), "ROC".into()); 58 | properties.insert("tokenDecimals".into(), 12.into()); 59 | properties.insert("ss58Format".into(), 42.into()); 60 | 61 | ChainSpec::from_genesis( 62 | // Name 63 | "Development", 64 | // ID 65 | "dev", 66 | ChainType::Development, 67 | move || { 68 | testnet_genesis( 69 | // initial collators. 70 | vec![( 71 | get_account_id_from_seed::("Alice"), 72 | get_collator_keys_from_seed("Alice"), 73 | )], 74 | vec![ 75 | get_account_id_from_seed::("Alice"), 76 | get_account_id_from_seed::("Bob"), 77 | get_account_id_from_seed::("Charlie"), 78 | get_account_id_from_seed::("Dave"), 79 | get_account_id_from_seed::("Eve"), 80 | get_account_id_from_seed::("Ferdie"), 81 | get_account_id_from_seed::("Alice//stash"), 82 | get_account_id_from_seed::("Bob//stash"), 83 | get_account_id_from_seed::("Charlie//stash"), 84 | get_account_id_from_seed::("Dave//stash"), 85 | get_account_id_from_seed::("Eve//stash"), 86 | get_account_id_from_seed::("Ferdie//stash"), 87 | ], 88 | 1000.into(), 89 | ) 90 | }, 91 | vec![], 92 | None, 93 | None, 94 | None, 95 | None, 96 | Extensions { 97 | relay_chain: "rococo-local".into(), // You MUST set this to the correct network! 98 | para_id: 1000, 99 | }, 100 | ) 101 | } 102 | 103 | pub fn local_testnet_config() -> ChainSpec { 104 | // Give your base currency a unit name and decimal places 105 | let mut properties = sc_chain_spec::Properties::new(); 106 | properties.insert("tokenSymbol".into(), "ROC".into()); 107 | properties.insert("tokenDecimals".into(), 12.into()); 108 | properties.insert("ss58Format".into(), 42.into()); 109 | 110 | ChainSpec::from_genesis( 111 | // Name 112 | "Local Testnet", 113 | // ID 114 | "local_testnet", 115 | ChainType::Local, 116 | move || { 117 | testnet_genesis( 118 | // initial collators. 119 | vec![ 120 | ( 121 | get_account_id_from_seed::("Alice"), 122 | get_collator_keys_from_seed("Alice"), 123 | ), 124 | ( 125 | get_account_id_from_seed::("Bob"), 126 | get_collator_keys_from_seed("Bob"), 127 | ), 128 | ], 129 | vec![ 130 | get_account_id_from_seed::("Alice"), 131 | get_account_id_from_seed::("Bob"), 132 | get_account_id_from_seed::("Charlie"), 133 | get_account_id_from_seed::("Dave"), 134 | get_account_id_from_seed::("Eve"), 135 | get_account_id_from_seed::("Ferdie"), 136 | get_account_id_from_seed::("Alice//stash"), 137 | get_account_id_from_seed::("Bob//stash"), 138 | get_account_id_from_seed::("Charlie//stash"), 139 | get_account_id_from_seed::("Dave//stash"), 140 | get_account_id_from_seed::("Eve//stash"), 141 | get_account_id_from_seed::("Ferdie//stash"), 142 | ], 143 | 1000.into(), 144 | ) 145 | }, 146 | // Bootnodes 147 | vec![], 148 | // Telemetry 149 | None, 150 | // Protocol ID 151 | Some("template-local"), 152 | // Fork ID 153 | None, 154 | // Properties 155 | Some(properties), 156 | // Extensions 157 | Extensions { 158 | relay_chain: "rococo-local".into(), // You MUST set this to the correct network! 159 | para_id: 1000, 160 | }, 161 | ) 162 | } 163 | 164 | fn testnet_genesis( 165 | authorities: Vec<(AccountId, NimbusId)>, 166 | endowed_accounts: Vec, 167 | id: ParaId, 168 | ) -> parachain_template_runtime::GenesisConfig { 169 | parachain_template_runtime::GenesisConfig { 170 | system: parachain_template_runtime::SystemConfig { 171 | code: parachain_template_runtime::WASM_BINARY 172 | .expect("WASM binary was not build, please build it!") 173 | .to_vec(), 174 | }, 175 | balances: parachain_template_runtime::BalancesConfig { 176 | balances: endowed_accounts 177 | .iter() 178 | .cloned() 179 | .map(|k| (k, 1 << 60)) 180 | .collect(), 181 | }, 182 | parachain_info: parachain_template_runtime::ParachainInfoConfig { parachain_id: id }, 183 | author_filter: parachain_template_runtime::AuthorFilterConfig { 184 | eligible_count: parachain_template_runtime::EligibilityValue::default(), 185 | }, 186 | potential_author_set: parachain_template_runtime::PotentialAuthorSetConfig { 187 | mapping: authorities, 188 | }, 189 | parachain_system: Default::default(), 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /parachain-template/node/src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | /// Sub-commands supported by the collator. 4 | #[derive(Debug, clap::Subcommand)] 5 | pub enum Subcommand { 6 | /// Build a chain specification. 7 | BuildSpec(sc_cli::BuildSpecCmd), 8 | 9 | /// Validate blocks. 10 | CheckBlock(sc_cli::CheckBlockCmd), 11 | 12 | /// Export blocks. 13 | ExportBlocks(sc_cli::ExportBlocksCmd), 14 | 15 | /// Export the state of a given block into a chain spec. 16 | ExportState(sc_cli::ExportStateCmd), 17 | 18 | /// Import blocks. 19 | ImportBlocks(sc_cli::ImportBlocksCmd), 20 | 21 | /// Revert the chain to a previous state. 22 | Revert(sc_cli::RevertCmd), 23 | 24 | /// Remove the whole chain. 25 | PurgeChain(cumulus_client_cli::PurgeChainCmd), 26 | 27 | /// Export the genesis state of the parachain. 28 | ExportGenesisState(cumulus_client_cli::ExportGenesisStateCommand), 29 | 30 | /// Export the genesis wasm of the parachain. 31 | ExportGenesisWasm(cumulus_client_cli::ExportGenesisWasmCommand), 32 | 33 | /// Run Instant Seal 34 | RunInstantSeal(sc_cli::RunCmd), 35 | 36 | /// Sub-commands concerned with benchmarking. 37 | /// The pallet benchmarking moved to the `pallet` sub-command. 38 | #[command(subcommand)] 39 | Benchmark(frame_benchmarking_cli::BenchmarkCmd), 40 | } 41 | 42 | #[derive(Debug, clap::Parser)] 43 | #[clap(propagate_version = true)] 44 | #[clap(args_conflicts_with_subcommands = true)] 45 | #[clap(subcommand_negates_reqs = true)] 46 | pub struct Cli { 47 | #[command(subcommand)] 48 | pub subcommand: Option, 49 | 50 | #[command(flatten)] 51 | pub run: cumulus_client_cli::RunCmd, 52 | 53 | /// Relaychain arguments 54 | #[arg(raw = true, value_parser)] 55 | pub relay_chain_args: Vec, 56 | } 57 | 58 | #[derive(Debug)] 59 | pub struct RelayChainCli { 60 | /// The actual relay chain cli object. 61 | pub base: polkadot_cli::RunCmd, 62 | 63 | /// Optional chain id that should be passed to the relay chain. 64 | pub chain_id: Option, 65 | 66 | /// The base path that should be used by the relay chain. 67 | pub base_path: PathBuf, 68 | } 69 | 70 | impl RelayChainCli { 71 | /// Parse the relay chain CLI parameters using the para chain `Configuration`. 72 | pub fn new<'a>( 73 | para_config: &sc_service::Configuration, 74 | relay_chain_args: impl Iterator, 75 | ) -> Self { 76 | let extension = crate::chain_spec::Extensions::try_get(&*para_config.chain_spec); 77 | let chain_id = extension.map(|e| e.relay_chain.clone()); 78 | let base_path = para_config.base_path.path().join("polkadot"); 79 | Self { 80 | base_path, 81 | chain_id, 82 | base: clap::Parser::parse_from(relay_chain_args), 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /parachain-template/node/src/command.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use codec::Encode; 4 | use cumulus_client_cli::generate_genesis_block; 5 | use cumulus_primitives_core::ParaId; 6 | use frame_benchmarking_cli::BenchmarkCmd; 7 | use log::info; 8 | use parachain_template_runtime::{Block, RuntimeApi}; 9 | use sc_cli::{ 10 | ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, 11 | NetworkParams, Result, RuntimeVersion, SharedParams, SubstrateCli, 12 | }; 13 | use sc_service::config::{BasePath, PrometheusConfig}; 14 | use sp_core::hexdisplay::HexDisplay; 15 | use sp_runtime::traits::{AccountIdConversion, Block as BlockT}; 16 | 17 | use crate::{ 18 | chain_spec, 19 | cli::{Cli, RelayChainCli, Subcommand}, 20 | service::{new_partial, TemplateRuntimeExecutor}, 21 | }; 22 | 23 | fn load_spec(id: &str) -> std::result::Result, String> { 24 | Ok(match id { 25 | "dev" => Box::new(chain_spec::development_config()), 26 | "template-rococo" => Box::new(chain_spec::local_testnet_config()), 27 | "" | "local" => Box::new(chain_spec::local_testnet_config()), 28 | path => Box::new(chain_spec::ChainSpec::from_json_file( 29 | std::path::PathBuf::from(path), 30 | )?), 31 | }) 32 | } 33 | 34 | impl SubstrateCli for Cli { 35 | fn impl_name() -> String { 36 | "Parachain Collator Template".into() 37 | } 38 | 39 | fn impl_version() -> String { 40 | env!("SUBSTRATE_CLI_IMPL_VERSION").into() 41 | } 42 | 43 | fn description() -> String { 44 | format!( 45 | "Parachain Collator Template\n\nThe command-line arguments provided first will be \ 46 | passed to the parachain node, while the arguments provided after -- will be passed \ 47 | to the relay chain node.\n\n\ 48 | {} -- ", 49 | Self::executable_name() 50 | ) 51 | } 52 | 53 | fn author() -> String { 54 | env!("CARGO_PKG_AUTHORS").into() 55 | } 56 | 57 | fn support_url() -> String { 58 | "https://github.com/paritytech/cumulus/issues/new".into() 59 | } 60 | 61 | fn copyright_start_year() -> i32 { 62 | 2020 63 | } 64 | 65 | fn load_spec(&self, id: &str) -> std::result::Result, String> { 66 | load_spec(id) 67 | } 68 | 69 | fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { 70 | ¶chain_template_runtime::VERSION 71 | } 72 | } 73 | 74 | impl SubstrateCli for RelayChainCli { 75 | fn impl_name() -> String { 76 | "Parachain Collator Template".into() 77 | } 78 | 79 | fn impl_version() -> String { 80 | env!("SUBSTRATE_CLI_IMPL_VERSION").into() 81 | } 82 | 83 | fn description() -> String { 84 | format!( 85 | "Parachain Collator Template\n\nThe command-line arguments provided first will be \ 86 | passed to the parachain node, while the arguments provided after -- will be passed \ 87 | to the relay chain node.\n\n\ 88 | {} -- ", 89 | Self::executable_name() 90 | ) 91 | } 92 | 93 | fn author() -> String { 94 | env!("CARGO_PKG_AUTHORS").into() 95 | } 96 | 97 | fn support_url() -> String { 98 | "https://github.com/paritytech/cumulus/issues/new".into() 99 | } 100 | 101 | fn copyright_start_year() -> i32 { 102 | 2020 103 | } 104 | 105 | fn load_spec(&self, id: &str) -> std::result::Result, String> { 106 | polkadot_cli::Cli::from_iter([RelayChainCli::executable_name()].iter()).load_spec(id) 107 | } 108 | 109 | fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion { 110 | polkadot_cli::Cli::native_runtime_version(chain_spec) 111 | } 112 | } 113 | 114 | macro_rules! construct_async_run { 115 | (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ 116 | let runner = $cli.create_runner($cmd)?; 117 | runner.async_run(|$config| { 118 | let $components = new_partial::< 119 | RuntimeApi, 120 | TemplateRuntimeExecutor, 121 | >( 122 | // We default to the non-parachain import queue and select chain. 123 | &$config, false, 124 | )?; 125 | let task_manager = $components.task_manager; 126 | { $( $code )* }.map(|v| (v, task_manager)) 127 | }) 128 | }} 129 | } 130 | 131 | /// Parse command line arguments into service configuration. 132 | pub fn run() -> Result<()> { 133 | let cli = Cli::from_args(); 134 | 135 | match &cli.subcommand { 136 | Some(Subcommand::BuildSpec(cmd)) => { 137 | let runner = cli.create_runner(cmd)?; 138 | runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) 139 | } 140 | Some(Subcommand::CheckBlock(cmd)) => { 141 | construct_async_run!(|components, cli, cmd, config| { 142 | Ok(cmd.run(components.client, components.import_queue)) 143 | }) 144 | } 145 | Some(Subcommand::ExportBlocks(cmd)) => { 146 | construct_async_run!(|components, cli, cmd, config| { 147 | Ok(cmd.run(components.client, config.database)) 148 | }) 149 | } 150 | Some(Subcommand::ExportState(cmd)) => { 151 | construct_async_run!(|components, cli, cmd, config| { 152 | Ok(cmd.run(components.client, config.chain_spec)) 153 | }) 154 | } 155 | Some(Subcommand::ImportBlocks(cmd)) => { 156 | construct_async_run!(|components, cli, cmd, config| { 157 | Ok(cmd.run(components.client, components.import_queue)) 158 | }) 159 | } 160 | Some(Subcommand::Revert(cmd)) => { 161 | construct_async_run!(|components, cli, cmd, config| { 162 | Ok(cmd.run(components.client, components.backend, None)) 163 | }) 164 | } 165 | Some(Subcommand::PurgeChain(cmd)) => { 166 | let runner = cli.create_runner(cmd)?; 167 | 168 | runner.sync_run(|config| { 169 | let polkadot_cli = RelayChainCli::new( 170 | &config, 171 | [RelayChainCli::executable_name()] 172 | .iter() 173 | .chain(cli.relay_chain_args.iter()), 174 | ); 175 | 176 | let polkadot_config = SubstrateCli::create_configuration( 177 | &polkadot_cli, 178 | &polkadot_cli, 179 | config.tokio_handle.clone(), 180 | ) 181 | .map_err(|err| format!("Relay chain argument error: {}", err))?; 182 | 183 | cmd.run(config, polkadot_config) 184 | }) 185 | } 186 | Some(Subcommand::ExportGenesisState(cmd)) => { 187 | let runner = cli.create_runner(cmd)?; 188 | runner.sync_run(|_config| { 189 | let spec = cli.load_spec(&cmd.shared_params.chain.clone().unwrap_or_default())?; 190 | let state_version = Cli::native_runtime_version(&spec).state_version(); 191 | cmd.run::(&*spec, state_version) 192 | }) 193 | } 194 | Some(Subcommand::ExportGenesisWasm(cmd)) => { 195 | let runner = cli.create_runner(cmd)?; 196 | runner.sync_run(|_config| { 197 | let spec = cli.load_spec(&cmd.shared_params.chain.clone().unwrap_or_default())?; 198 | cmd.run(&*spec) 199 | }) 200 | } 201 | Some(Subcommand::Benchmark(cmd)) => { 202 | let runner = cli.create_runner(cmd)?; 203 | // Switch on the concrete benchmark sub-command- 204 | match cmd { 205 | BenchmarkCmd::Pallet(cmd) => { 206 | if cfg!(feature = "runtime-benchmarks") { 207 | runner.sync_run(|config| cmd.run::(config)) 208 | } else { 209 | Err("Benchmarking wasn't enabled when building the node. \ 210 | You can enable it with `--features runtime-benchmarks`." 211 | .into()) 212 | } 213 | } 214 | BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { 215 | let partials = 216 | new_partial::(&config, false)?; 217 | cmd.run(partials.client) 218 | }), 219 | #[cfg(not(feature = "runtime-benchmarks"))] 220 | BenchmarkCmd::Storage(_) => { 221 | return Err(sc_cli::Error::Input( 222 | "Compile with --features=runtime-benchmarks \ 223 | to enable storage benchmarks." 224 | .into(), 225 | ) 226 | .into()) 227 | } 228 | #[cfg(feature = "runtime-benchmarks")] 229 | BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { 230 | let partials = 231 | new_partial::(&config, false)?; 232 | let db = partials.backend.expose_db(); 233 | let storage = partials.backend.expose_storage(); 234 | 235 | cmd.run(config, partials.client.clone(), db, storage) 236 | }), 237 | BenchmarkCmd::Overhead(_) => Err("Unsupported benchmarking command".into()), 238 | BenchmarkCmd::Extrinsic(_) => Err("Unsupported benchmarking command".into()), 239 | BenchmarkCmd::Machine(cmd) => runner.sync_run(|config| { 240 | cmd.run( 241 | &config, 242 | frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE.clone(), 243 | ) 244 | }), 245 | } 246 | } 247 | Some(Subcommand::RunInstantSeal(run_cmd)) => { 248 | let runner = cli.create_runner(run_cmd)?; 249 | runner.run_node_until_exit(|config| async move { 250 | crate::service::start_instant_seal_node(config).map_err(sc_cli::Error::Service) 251 | }) 252 | } 253 | None => { 254 | let runner = cli.create_runner(&cli.run.normalize())?; 255 | let collator_options = cli.run.collator_options(); 256 | 257 | runner.run_node_until_exit(|config| async move { 258 | let para_id = chain_spec::Extensions::try_get(&*config.chain_spec) 259 | .map(|e| e.para_id) 260 | .ok_or_else(|| "Could not find parachain ID in chain-spec.")?; 261 | 262 | let polkadot_cli = RelayChainCli::new( 263 | &config, 264 | [RelayChainCli::executable_name()] 265 | .iter() 266 | .chain(cli.relay_chain_args.iter()), 267 | ); 268 | 269 | let id = ParaId::from(para_id); 270 | 271 | let parachain_account = 272 | AccountIdConversion::::into_account_truncating( 273 | &id, 274 | ); 275 | 276 | let state_version = 277 | RelayChainCli::native_runtime_version(&config.chain_spec).state_version(); 278 | let block: Block = generate_genesis_block(&*config.chain_spec, state_version) 279 | .map_err(|e| format!("{:?}", e))?; 280 | let genesis_state = format!("0x{:?}", HexDisplay::from(&block.header().encode())); 281 | 282 | let tokio_handle = config.tokio_handle.clone(); 283 | let polkadot_config = 284 | SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) 285 | .map_err(|err| format!("Relay chain argument error: {}", err))?; 286 | 287 | info!("Parachain id: {:?}", id); 288 | info!("Parachain Account: {}", parachain_account); 289 | info!("Parachain genesis state: {}", genesis_state); 290 | info!( 291 | "Is collating: {}", 292 | if config.role.is_authority() { 293 | "yes" 294 | } else { 295 | "no" 296 | } 297 | ); 298 | 299 | crate::service::start_parachain_node(config, polkadot_config, collator_options, id) 300 | .await 301 | .map(|r| r.0) 302 | .map_err(Into::into) 303 | }) 304 | } 305 | } 306 | } 307 | 308 | impl DefaultConfigurationValues for RelayChainCli { 309 | fn p2p_listen_port() -> u16 { 310 | 30334 311 | } 312 | 313 | fn rpc_listen_port() -> u16 { 314 | 9945 315 | } 316 | 317 | fn prometheus_listen_port() -> u16 { 318 | 9616 319 | } 320 | } 321 | 322 | impl CliConfiguration for RelayChainCli { 323 | fn shared_params(&self) -> &SharedParams { 324 | self.base.base.shared_params() 325 | } 326 | 327 | fn import_params(&self) -> Option<&ImportParams> { 328 | self.base.base.import_params() 329 | } 330 | 331 | fn network_params(&self) -> Option<&NetworkParams> { 332 | self.base.base.network_params() 333 | } 334 | 335 | fn keystore_params(&self) -> Option<&KeystoreParams> { 336 | self.base.base.keystore_params() 337 | } 338 | 339 | fn base_path(&self) -> Result> { 340 | Ok(self 341 | .shared_params() 342 | .base_path()? 343 | .or_else(|| Some(self.base_path.clone().into()))) 344 | } 345 | 346 | fn rpc_addr(&self, default_listen_port: u16) -> Result> { 347 | self.base.base.rpc_addr(default_listen_port) 348 | } 349 | 350 | fn prometheus_config( 351 | &self, 352 | default_listen_port: u16, 353 | chain_spec: &Box, 354 | ) -> Result> { 355 | self.base 356 | .base 357 | .prometheus_config(default_listen_port, chain_spec) 358 | } 359 | 360 | fn init( 361 | &self, 362 | _support_url: &String, 363 | _impl_version: &String, 364 | _logger_hook: F, 365 | _config: &sc_service::Configuration, 366 | ) -> Result<()> 367 | where 368 | F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), 369 | { 370 | unreachable!("PolkadotCli is never initialized; qed"); 371 | } 372 | 373 | fn chain_id(&self, is_dev: bool) -> Result { 374 | let chain_id = self.base.base.chain_id(is_dev)?; 375 | 376 | Ok(if chain_id.is_empty() { 377 | self.chain_id.clone().unwrap_or_default() 378 | } else { 379 | chain_id 380 | }) 381 | } 382 | 383 | fn role(&self, is_dev: bool) -> Result { 384 | self.base.base.role(is_dev) 385 | } 386 | 387 | fn transaction_pool(&self, is_dev: bool) -> Result { 388 | self.base.base.transaction_pool(is_dev) 389 | } 390 | 391 | fn rpc_methods(&self) -> Result { 392 | self.base.base.rpc_methods() 393 | } 394 | 395 | fn rpc_max_connections(&self) -> Result { 396 | self.base.base.rpc_max_connections() 397 | } 398 | 399 | fn rpc_cors(&self, is_dev: bool) -> Result>> { 400 | self.base.base.rpc_cors(is_dev) 401 | } 402 | 403 | fn default_heap_pages(&self) -> Result> { 404 | self.base.base.default_heap_pages() 405 | } 406 | 407 | fn force_authoring(&self) -> Result { 408 | self.base.base.force_authoring() 409 | } 410 | 411 | fn disable_grandpa(&self) -> Result { 412 | self.base.base.disable_grandpa() 413 | } 414 | 415 | fn max_runtime_instances(&self) -> Result> { 416 | self.base.base.max_runtime_instances() 417 | } 418 | 419 | fn announce_block(&self) -> Result { 420 | self.base.base.announce_block() 421 | } 422 | 423 | fn telemetry_endpoints( 424 | &self, 425 | chain_spec: &Box, 426 | ) -> Result> { 427 | self.base.base.telemetry_endpoints(chain_spec) 428 | } 429 | } 430 | -------------------------------------------------------------------------------- /parachain-template/node/src/main.rs: -------------------------------------------------------------------------------- 1 | //! Substrate Node CLI library. 2 | 3 | #![warn(missing_docs)] 4 | 5 | mod chain_spec; 6 | #[macro_use] 7 | mod service; 8 | mod cli; 9 | mod command; 10 | mod rpc; 11 | 12 | fn main() -> sc_cli::Result<()> { 13 | command::run() 14 | } 15 | -------------------------------------------------------------------------------- /parachain-template/node/src/rpc.rs: -------------------------------------------------------------------------------- 1 | //! A collection of node-specific RPC methods. 2 | //! Substrate provides the `sc-rpc` crate, which defines the core RPC layer 3 | //! used by Substrate nodes. This file extends those RPC definitions with 4 | //! capabilities that are specific to this project's runtime configuration. 5 | 6 | #![warn(missing_docs)] 7 | 8 | use std::sync::Arc; 9 | 10 | use parachain_template_runtime::{opaque::Block, AccountId, Balance, Index as Nonce}; 11 | 12 | use sc_client_api::AuxStore; 13 | pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; 14 | use sc_transaction_pool_api::TransactionPool; 15 | use sp_api::ProvideRuntimeApi; 16 | use sp_block_builder::BlockBuilder; 17 | use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; 18 | 19 | /// A type representing all RPC extensions. 20 | pub type RpcExtension = jsonrpsee::RpcModule<()>; 21 | 22 | /// Full client dependencies 23 | pub struct FullDeps { 24 | /// The client instance to use. 25 | pub client: Arc, 26 | /// Transaction pool instance. 27 | pub pool: Arc

, 28 | /// Whether to deny unsafe calls 29 | pub deny_unsafe: DenyUnsafe, 30 | } 31 | 32 | /// Instantiate all RPC extensions. 33 | pub fn create_full( 34 | deps: FullDeps, 35 | ) -> Result> 36 | where 37 | C: ProvideRuntimeApi 38 | + HeaderBackend 39 | + AuxStore 40 | + HeaderMetadata 41 | + Send 42 | + Sync 43 | + 'static, 44 | C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, 45 | C::Api: substrate_frame_rpc_system::AccountNonceApi, 46 | C::Api: BlockBuilder, 47 | P: TransactionPool + Sync + Send + 'static, 48 | { 49 | use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; 50 | use substrate_frame_rpc_system::{System, SystemApiServer}; 51 | 52 | let mut module = RpcExtension::new(()); 53 | let FullDeps { 54 | client, 55 | pool, 56 | deny_unsafe, 57 | } = deps; 58 | 59 | module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; 60 | module.merge(TransactionPayment::new(client).into_rpc())?; 61 | 62 | Ok(module) 63 | } 64 | -------------------------------------------------------------------------------- /parachain-template/node/src/service.rs: -------------------------------------------------------------------------------- 1 | //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. 2 | 3 | // std 4 | use std::{sync::Arc, time::Duration}; 5 | 6 | // Local Runtime Types 7 | use parachain_template_runtime::{opaque::Block, AccountId, Balance, Index as Nonce, RuntimeApi}; 8 | 9 | use nimbus_consensus::{ 10 | BuildNimbusConsensusParams, NimbusConsensus, NimbusManualSealConsensusDataProvider, 11 | }; 12 | 13 | // Cumulus Imports 14 | use cumulus_client_cli::CollatorOptions; 15 | use cumulus_client_consensus_common::ParachainConsensus; 16 | use cumulus_client_network::BlockAnnounceValidator; 17 | use cumulus_client_service::{ 18 | prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, 19 | }; 20 | use cumulus_primitives_core::ParaId; 21 | use cumulus_primitives_parachain_inherent::{ 22 | MockValidationDataInherentDataProvider, MockXcmConfig, 23 | }; 24 | use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; 25 | use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult}; 26 | use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; 27 | 28 | use polkadot_service::CollatorPair; 29 | 30 | // Substrate Imports 31 | use sc_consensus::ImportQueue; 32 | use sc_consensus_manual_seal::{run_instant_seal, InstantSealParams}; 33 | use sc_executor::{ 34 | HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, 35 | }; 36 | use sc_network::{config::FullNetworkConfiguration, NetworkBlock}; 37 | use sc_network_sync::SyncingService; 38 | use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; 39 | use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; 40 | use sp_api::ConstructRuntimeApi; 41 | use sp_keystore::KeystorePtr; 42 | use sp_runtime::traits::BlakeTwo256; 43 | use substrate_prometheus_endpoint::Registry; 44 | 45 | /// Native executor instance. 46 | pub struct TemplateRuntimeExecutor; 47 | 48 | impl sc_executor::NativeExecutionDispatch for TemplateRuntimeExecutor { 49 | type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; 50 | 51 | fn dispatch(method: &str, data: &[u8]) -> Option> { 52 | parachain_template_runtime::api::dispatch(method, data) 53 | } 54 | 55 | fn native_version() -> sc_executor::NativeVersion { 56 | parachain_template_runtime::native_version() 57 | } 58 | } 59 | 60 | /// Starts a `ServiceBuilder` for a full service. 61 | /// 62 | /// Use this macro if you don't actually need the full service, but just the builder in order to 63 | /// be able to perform chain operations. 64 | #[allow(clippy::type_complexity)] 65 | pub fn new_partial( 66 | config: &Configuration, 67 | parachain: bool, 68 | ) -> Result< 69 | PartialComponents< 70 | TFullClient>, 71 | TFullBackend, 72 | sc_consensus::LongestChain, Block>, 73 | sc_consensus::DefaultImportQueue< 74 | Block, 75 | TFullClient>, 76 | >, 77 | sc_transaction_pool::FullPool< 78 | Block, 79 | TFullClient>, 80 | >, 81 | (Option, Option), 82 | >, 83 | sc_service::Error, 84 | > 85 | where 86 | RuntimeApi: ConstructRuntimeApi>> 87 | + Send 88 | + Sync 89 | + 'static, 90 | RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue 91 | + sp_api::Metadata 92 | + sp_session::SessionKeys 93 | + sp_api::ApiExt< 94 | Block, 95 | StateBackend = sc_client_api::StateBackendFor, Block>, 96 | > + sp_offchain::OffchainWorkerApi 97 | + sp_block_builder::BlockBuilder, 98 | sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, 99 | Executor: sc_executor::NativeExecutionDispatch + 'static, 100 | { 101 | let telemetry = config 102 | .telemetry_endpoints 103 | .clone() 104 | .filter(|x| !x.is_empty()) 105 | .map(|endpoints| -> Result<_, sc_telemetry::Error> { 106 | let worker = TelemetryWorker::new(16)?; 107 | let telemetry = worker.handle().new_telemetry(endpoints); 108 | Ok((worker, telemetry)) 109 | }) 110 | .transpose()?; 111 | 112 | let heap_pages = config 113 | .default_heap_pages 114 | .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { 115 | extra_pages: h as _, 116 | }); 117 | 118 | let wasm = WasmExecutor::builder() 119 | .with_execution_method(config.wasm_method) 120 | .with_onchain_heap_alloc_strategy(heap_pages) 121 | .with_offchain_heap_alloc_strategy(heap_pages) 122 | .with_max_runtime_instances(config.max_runtime_instances) 123 | .with_runtime_cache_size(config.runtime_cache_size) 124 | .build(); 125 | 126 | let executor = sc_executor::NativeElseWasmExecutor::::new_with_wasm_executor(wasm); 127 | 128 | let (client, backend, keystore_container, task_manager) = 129 | sc_service::new_full_parts::( 130 | config, 131 | telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), 132 | executor, 133 | )?; 134 | let client = Arc::new(client); 135 | 136 | let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); 137 | 138 | let telemetry = telemetry.map(|(worker, telemetry)| { 139 | task_manager 140 | .spawn_handle() 141 | .spawn("telemetry", None, worker.run()); 142 | telemetry 143 | }); 144 | 145 | // Although this will not be used by the parachain collator, it will be used by the instant seal 146 | // And sovereign nodes, so we create it anyway. 147 | let select_chain = sc_consensus::LongestChain::new(backend.clone()); 148 | 149 | let transaction_pool = sc_transaction_pool::BasicPool::new_full( 150 | config.transaction_pool.clone(), 151 | config.role.is_authority().into(), 152 | config.prometheus_registry(), 153 | task_manager.spawn_essential_handle(), 154 | client.clone(), 155 | ); 156 | 157 | let import_queue = nimbus_consensus::import_queue( 158 | client.clone(), 159 | client.clone(), 160 | move |_, _| async move { 161 | let time = sp_timestamp::InherentDataProvider::from_system_time(); 162 | 163 | Ok((time,)) 164 | }, 165 | &task_manager.spawn_essential_handle(), 166 | config.prometheus_registry().clone(), 167 | parachain, 168 | )?; 169 | 170 | let params = PartialComponents { 171 | backend, 172 | client, 173 | import_queue, 174 | keystore_container, 175 | task_manager, 176 | transaction_pool, 177 | select_chain, 178 | other: (telemetry, telemetry_worker_handle), 179 | }; 180 | 181 | Ok(params) 182 | } 183 | 184 | async fn build_relay_chain_interface( 185 | polkadot_config: Configuration, 186 | parachain_config: &Configuration, 187 | telemetry_worker_handle: Option, 188 | task_manager: &mut TaskManager, 189 | collator_options: CollatorOptions, 190 | ) -> RelayChainResult<( 191 | Arc<(dyn RelayChainInterface + 'static)>, 192 | Option, 193 | )> { 194 | if !collator_options.relay_chain_rpc_urls.is_empty() { 195 | build_minimal_relay_chain_node( 196 | polkadot_config, 197 | task_manager, 198 | collator_options.relay_chain_rpc_urls, 199 | ) 200 | .await 201 | } else { 202 | build_inprocess_relay_chain( 203 | polkadot_config, 204 | parachain_config, 205 | telemetry_worker_handle, 206 | task_manager, 207 | None, 208 | ) 209 | } 210 | } 211 | 212 | /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. 213 | /// 214 | /// This is the actual implementation that is abstract over the executor and the runtime api. 215 | #[sc_tracing::logging::prefix_logs_with("Parachain")] 216 | async fn start_node_impl( 217 | parachain_config: Configuration, 218 | polkadot_config: Configuration, 219 | collator_options: CollatorOptions, 220 | id: ParaId, 221 | _rpc_ext_builder: RB, 222 | build_consensus: BIC, 223 | ) -> sc_service::error::Result<( 224 | TaskManager, 225 | Arc>>, 226 | )> 227 | where 228 | RuntimeApi: ConstructRuntimeApi>> 229 | + Send 230 | + Sync 231 | + 'static, 232 | RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue 233 | + sp_api::Metadata 234 | + sp_session::SessionKeys 235 | + sp_api::ApiExt< 236 | Block, 237 | StateBackend = sc_client_api::StateBackendFor, Block>, 238 | > + sp_offchain::OffchainWorkerApi 239 | + sp_block_builder::BlockBuilder 240 | + cumulus_primitives_core::CollectCollationInfo 241 | + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi 242 | + substrate_frame_rpc_system::AccountNonceApi, 243 | sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, 244 | Executor: sc_executor::NativeExecutionDispatch + 'static, 245 | RB: Fn( 246 | Arc>, 247 | ) -> Result 248 | + Send 249 | + 'static, 250 | BIC: FnOnce( 251 | Arc>>, 252 | Arc>, 253 | Option<&Registry>, 254 | Option, 255 | &TaskManager, 256 | Arc, 257 | Arc< 258 | sc_transaction_pool::FullPool< 259 | Block, 260 | TFullClient>, 261 | >, 262 | >, 263 | Arc>, 264 | KeystorePtr, 265 | bool, 266 | ) -> Result>, sc_service::Error>, 267 | { 268 | let parachain_config = prepare_node_config(parachain_config); 269 | 270 | let params = new_partial::(¶chain_config, true)?; 271 | let (mut telemetry, telemetry_worker_handle) = params.other; 272 | 273 | let client = params.client.clone(); 274 | let backend = params.backend.clone(); 275 | let mut task_manager = params.task_manager; 276 | 277 | let (relay_chain_interface, collator_key) = build_relay_chain_interface( 278 | polkadot_config, 279 | ¶chain_config, 280 | telemetry_worker_handle, 281 | &mut task_manager, 282 | collator_options.clone(), 283 | ) 284 | .await 285 | .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; 286 | 287 | let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface.clone(), id); 288 | 289 | let force_authoring = parachain_config.force_authoring; 290 | let validator = parachain_config.role.is_authority(); 291 | let prometheus_registry = parachain_config.prometheus_registry().cloned(); 292 | let transaction_pool = params.transaction_pool.clone(); 293 | let import_queue_service = params.import_queue.service(); 294 | 295 | let net_config = FullNetworkConfiguration::new(¶chain_config.network); 296 | 297 | let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = 298 | sc_service::build_network(sc_service::BuildNetworkParams { 299 | config: ¶chain_config, 300 | client: client.clone(), 301 | transaction_pool: transaction_pool.clone(), 302 | spawn_handle: task_manager.spawn_handle(), 303 | import_queue: params.import_queue, 304 | block_announce_validator_builder: Some(Box::new(|_| { 305 | Box::new(block_announce_validator) 306 | })), 307 | warp_sync_params: None, 308 | net_config, 309 | })?; 310 | 311 | let rpc_extensions_builder = { 312 | let client = client.clone(); 313 | let transaction_pool = transaction_pool.clone(); 314 | 315 | Box::new(move |deny_unsafe, _| { 316 | let deps = crate::rpc::FullDeps { 317 | client: client.clone(), 318 | pool: transaction_pool.clone(), 319 | deny_unsafe, 320 | }; 321 | 322 | crate::rpc::create_full(deps).map_err(Into::into) 323 | }) 324 | }; 325 | 326 | sc_service::spawn_tasks(sc_service::SpawnTasksParams { 327 | rpc_builder: rpc_extensions_builder, 328 | client: client.clone(), 329 | transaction_pool: transaction_pool.clone(), 330 | task_manager: &mut task_manager, 331 | config: parachain_config, 332 | keystore: params.keystore_container.keystore(), 333 | backend: backend.clone(), 334 | network: network.clone(), 335 | sync_service: sync_service.clone(), 336 | system_rpc_tx, 337 | tx_handler_controller, 338 | telemetry: telemetry.as_mut(), 339 | })?; 340 | 341 | let announce_block = { 342 | let sync_service = sync_service.clone(); 343 | Arc::new(move |hash, data| sync_service.announce_block(hash, data)) 344 | }; 345 | 346 | let relay_chain_slot_duration = Duration::from_secs(6); 347 | let overseer_handle = relay_chain_interface 348 | .overseer_handle() 349 | .map_err(|e| sc_service::Error::Application(Box::new(e)))?; 350 | 351 | if validator { 352 | let parachain_consensus = build_consensus( 353 | client.clone(), 354 | backend.clone(), 355 | prometheus_registry.as_ref(), 356 | telemetry.as_ref().map(|t| t.handle()), 357 | &task_manager, 358 | relay_chain_interface.clone(), 359 | transaction_pool, 360 | sync_service.clone(), 361 | params.keystore_container.keystore(), 362 | force_authoring, 363 | )?; 364 | 365 | let spawner = task_manager.spawn_handle(); 366 | 367 | let params = StartCollatorParams { 368 | para_id: id, 369 | block_status: client.clone(), 370 | announce_block, 371 | client: client.clone(), 372 | task_manager: &mut task_manager, 373 | relay_chain_interface, 374 | spawner, 375 | parachain_consensus, 376 | import_queue: import_queue_service, 377 | recovery_handle: Box::new(overseer_handle), 378 | collator_key: collator_key.expect("Command line arguments do not allow this. qed"), 379 | relay_chain_slot_duration, 380 | sync_service, 381 | }; 382 | 383 | start_collator(params).await?; 384 | } else { 385 | let params = StartFullNodeParams { 386 | client: client.clone(), 387 | announce_block, 388 | task_manager: &mut task_manager, 389 | para_id: id, 390 | relay_chain_interface, 391 | relay_chain_slot_duration, 392 | import_queue: import_queue_service, 393 | recovery_handle: Box::new(overseer_handle), 394 | sync_service, 395 | }; 396 | 397 | start_full_node(params)?; 398 | } 399 | 400 | start_network.start_network(); 401 | 402 | Ok((task_manager, client)) 403 | } 404 | 405 | /// Start a parachain node. 406 | pub async fn start_parachain_node( 407 | parachain_config: Configuration, 408 | polkadot_config: Configuration, 409 | collator_options: CollatorOptions, 410 | id: ParaId, 411 | ) -> sc_service::error::Result<( 412 | TaskManager, 413 | Arc>>, 414 | )> { 415 | start_node_impl::( 416 | parachain_config, 417 | polkadot_config, 418 | collator_options, 419 | id, 420 | |_| Ok(crate::rpc::RpcExtension::new(())), 421 | |client, 422 | backend, 423 | prometheus_registry, 424 | telemetry, 425 | task_manager, 426 | relay_chain_interface, 427 | transaction_pool, 428 | _sync_oracle, 429 | keystore, 430 | force_authoring| { 431 | let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( 432 | task_manager.spawn_handle(), 433 | client.clone(), 434 | transaction_pool, 435 | prometheus_registry, 436 | telemetry.clone(), 437 | ); 438 | 439 | Ok(NimbusConsensus::build(BuildNimbusConsensusParams { 440 | para_id: id, 441 | proposer_factory, 442 | block_import: client.clone(), 443 | backend, 444 | parachain_client: client.clone(), 445 | keystore, 446 | skip_prediction: force_authoring, 447 | create_inherent_data_providers: move |_, 448 | ( 449 | relay_parent, 450 | validation_data, 451 | _author_id, 452 | )| { 453 | let relay_chain_interface = relay_chain_interface.clone(); 454 | async move { 455 | let parachain_inherent = 456 | cumulus_primitives_parachain_inherent::ParachainInherentData::create_at( 457 | relay_parent, 458 | &relay_chain_interface, 459 | &validation_data, 460 | id, 461 | ).await; 462 | 463 | let time = sp_timestamp::InherentDataProvider::from_system_time(); 464 | 465 | let parachain_inherent = parachain_inherent.ok_or_else(|| { 466 | Box::::from( 467 | "Failed to create parachain inherent", 468 | ) 469 | })?; 470 | 471 | let nimbus_inherent = nimbus_primitives::InherentDataProvider; 472 | 473 | Ok((time, parachain_inherent, nimbus_inherent)) 474 | } 475 | }, 476 | additional_digests_provider: (), 477 | })) 478 | }, 479 | ) 480 | .await 481 | } 482 | 483 | /// Builds a new service for a full client. 484 | pub fn start_instant_seal_node(config: Configuration) -> Result { 485 | let sc_service::PartialComponents { 486 | client, 487 | backend, 488 | mut task_manager, 489 | import_queue, 490 | keystore_container, 491 | select_chain, 492 | transaction_pool, 493 | other: (mut telemetry, _), 494 | } = new_partial::(&config, false)?; 495 | 496 | let net_config = FullNetworkConfiguration::new(&config.network); 497 | 498 | let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = 499 | sc_service::build_network(sc_service::BuildNetworkParams { 500 | config: &config, 501 | client: client.clone(), 502 | transaction_pool: transaction_pool.clone(), 503 | spawn_handle: task_manager.spawn_handle(), 504 | import_queue, 505 | block_announce_validator_builder: None, 506 | warp_sync_params: None, 507 | net_config, 508 | })?; 509 | 510 | if config.offchain_worker.enabled { 511 | sc_service::build_offchain_workers( 512 | &config, 513 | task_manager.spawn_handle(), 514 | client.clone(), 515 | network.clone(), 516 | ); 517 | } 518 | 519 | let is_authority = config.role.is_authority(); 520 | let prometheus_registry = config.prometheus_registry().cloned(); 521 | 522 | let rpc_extensions_builder = { 523 | let client = client.clone(); 524 | let transaction_pool = transaction_pool.clone(); 525 | 526 | Box::new(move |deny_unsafe, _| { 527 | let deps = crate::rpc::FullDeps { 528 | client: client.clone(), 529 | pool: transaction_pool.clone(), 530 | deny_unsafe, 531 | }; 532 | 533 | crate::rpc::create_full(deps).map_err(Into::into) 534 | }) 535 | }; 536 | 537 | sc_service::spawn_tasks(sc_service::SpawnTasksParams { 538 | network, 539 | client: client.clone(), 540 | keystore: keystore_container.keystore(), 541 | task_manager: &mut task_manager, 542 | transaction_pool: transaction_pool.clone(), 543 | rpc_builder: rpc_extensions_builder, 544 | backend, 545 | system_rpc_tx, 546 | sync_service: sync_service.clone(), 547 | config, 548 | tx_handler_controller, 549 | telemetry: telemetry.as_mut(), 550 | })?; 551 | 552 | if is_authority { 553 | let proposer = sc_basic_authorship::ProposerFactory::new( 554 | task_manager.spawn_handle(), 555 | client.clone(), 556 | transaction_pool.clone(), 557 | prometheus_registry.as_ref(), 558 | telemetry.as_ref().map(|t| t.handle()), 559 | ); 560 | 561 | let client_set_aside_for_cidp = client.clone(); 562 | 563 | // Create channels for mocked XCM messages. 564 | let (_downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); 565 | let (_hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); 566 | 567 | let authorship_future = run_instant_seal(InstantSealParams { 568 | block_import: client.clone(), 569 | env: proposer, 570 | client: client.clone(), 571 | pool: transaction_pool.clone(), 572 | select_chain, 573 | consensus_data_provider: Some(Box::new(NimbusManualSealConsensusDataProvider { 574 | keystore: keystore_container.keystore(), 575 | client: client.clone(), 576 | additional_digests_provider: (), 577 | _phantom: Default::default(), 578 | })), 579 | create_inherent_data_providers: move |block, _extra_args| { 580 | let downward_xcm_receiver = downward_xcm_receiver.clone(); 581 | let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); 582 | 583 | let client_for_xcm = client_set_aside_for_cidp.clone(); 584 | 585 | async move { 586 | let time = sp_timestamp::InherentDataProvider::from_system_time(); 587 | 588 | // The nimbus runtime is shared among all nodes including the parachain node. 589 | // Because this is not a parachain context, we need to mock the parachain inherent data provider. 590 | //TODO might need to go back and get the block number like how I do in Moonbeam 591 | let mocked_parachain = MockValidationDataInherentDataProvider { 592 | current_para_block: 0, 593 | relay_offset: 0, 594 | relay_blocks_per_para_block: 0, 595 | para_blocks_per_relay_epoch: 0, 596 | relay_randomness_config: (), 597 | xcm_config: MockXcmConfig::new( 598 | &*client_for_xcm, 599 | block, 600 | Default::default(), 601 | Default::default(), 602 | ), 603 | raw_downward_messages: downward_xcm_receiver.drain().collect(), 604 | raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), 605 | }; 606 | 607 | Ok((time, mocked_parachain)) 608 | } 609 | }, 610 | }); 611 | 612 | task_manager.spawn_essential_handle().spawn_blocking( 613 | "instant-seal", 614 | None, 615 | authorship_future, 616 | ); 617 | }; 618 | 619 | network_starter.start_network(); 620 | Ok(task_manager) 621 | } 622 | -------------------------------------------------------------------------------- /parachain-template/pallets/template/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-template" 3 | authors = [ "Anonymous" ] 4 | description = "FRAME pallet template for defining custom runtime logic." 5 | edition = "2021" 6 | homepage = "https://substrate.dev" 7 | license = "Unlicense" 8 | repository = "https://github.com/paritytech/substrate/" 9 | version = "0.1.0" 10 | 11 | [package.metadata.docs.rs] 12 | targets = [ "x86_64-unknown-linux-gnu" ] 13 | 14 | [dependencies] 15 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } 16 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] } 17 | 18 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" } 19 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 20 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 21 | 22 | [dev-dependencies] 23 | serde = { version = "1.0.119" } 24 | sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 25 | sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 26 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 27 | 28 | [features] 29 | default = [ "std" ] 30 | std = [ 31 | "codec/std", 32 | "frame-benchmarking/std", 33 | "frame-support/std", 34 | "frame-system/std", 35 | "scale-info/std", 36 | ] 37 | runtime-benchmarks = [ "frame-benchmarking" ] 38 | -------------------------------------------------------------------------------- /parachain-template/pallets/template/README.md: -------------------------------------------------------------------------------- 1 | License: Unlicense 2 | -------------------------------------------------------------------------------- /parachain-template/pallets/template/src/benchmarking.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarking setup for pallet-template 2 | 3 | use super::*; 4 | 5 | #[allow(unused)] 6 | use crate::Pallet; 7 | use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; 8 | use frame_system::RawOrigin; 9 | 10 | benchmarks! { 11 | do_something { 12 | let s in 0 .. 100; 13 | let caller: T::AccountId = whitelisted_caller(); 14 | }: _(RawOrigin::Signed(caller), s) 15 | verify { 16 | assert_eq!(Something::::get(), Some(s)); 17 | } 18 | } 19 | 20 | impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,); 21 | -------------------------------------------------------------------------------- /parachain-template/pallets/template/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | 3 | /// Edit this file to define custom logic or remove it if it is not needed. 4 | /// Learn more about FRAME and the core library of Substrate FRAME pallets: 5 | /// 6 | pub use pallet::*; 7 | 8 | #[cfg(test)] 9 | mod mock; 10 | 11 | #[cfg(test)] 12 | mod tests; 13 | 14 | #[cfg(feature = "runtime-benchmarks")] 15 | mod benchmarking; 16 | 17 | #[frame_support::pallet] 18 | pub mod pallet { 19 | use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*}; 20 | use frame_system::pallet_prelude::*; 21 | 22 | /// Configure the pallet by specifying the parameters and types on which it depends. 23 | #[pallet::config] 24 | pub trait Config: frame_system::Config { 25 | /// Because this pallet emits events, it depends on the runtime's definition of an event. 26 | type RuntimeEvent: From> + IsType<::RuntimeEvent>; 27 | } 28 | 29 | #[pallet::pallet] 30 | pub struct Pallet(_); 31 | 32 | // The pallet's runtime storage items. 33 | // https://substrate.dev/docs/en/knowledgebase/runtime/storage 34 | #[pallet::storage] 35 | #[pallet::getter(fn something)] 36 | // Learn more about declaring storage items: 37 | // https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items 38 | pub type Something = StorageValue<_, u32>; 39 | 40 | // Pallets use events to inform users when important changes are made. 41 | // https://substrate.dev/docs/en/knowledgebase/runtime/events 42 | #[pallet::event] 43 | #[pallet::generate_deposit(pub(super) fn deposit_event)] 44 | pub enum Event { 45 | /// Event documentation should end with an array that provides descriptive names for event 46 | /// parameters. [something, who] 47 | SomethingStored(u32, T::AccountId), 48 | } 49 | 50 | // Errors inform users that something went wrong. 51 | #[pallet::error] 52 | pub enum Error { 53 | /// Error names should be descriptive. 54 | NoneValue, 55 | /// Errors should have helpful documentation associated with them. 56 | StorageOverflow, 57 | } 58 | 59 | #[pallet::hooks] 60 | impl Hooks> for Pallet {} 61 | 62 | // Dispatchable functions allows users to interact with the pallet and invoke state changes. 63 | // These functions materialize as "extrinsics", which are often compared to transactions. 64 | // Dispatchable functions must be annotated with a weight and must return a DispatchResult. 65 | #[pallet::call] 66 | impl Pallet { 67 | /// An example dispatchable that takes a singles value as a parameter, writes the value to 68 | /// storage and emits an event. This function must be dispatched by a signed extrinsic. 69 | #[pallet::call_index(0)] 70 | #[pallet::weight(Weight::from_parts(10_000, 1024).saturating_add(T::DbWeight::get().writes(1)))] 71 | pub fn do_something(origin: OriginFor, something: u32) -> DispatchResultWithPostInfo { 72 | // Check that the extrinsic was signed and get the signer. 73 | // This function will return an error if the extrinsic is not signed. 74 | // https://substrate.dev/docs/en/knowledgebase/runtime/origin 75 | let who = ensure_signed(origin)?; 76 | 77 | // Update storage. 78 | >::put(something); 79 | 80 | // Emit an event. 81 | Self::deposit_event(Event::SomethingStored(something, who)); 82 | // Return a successful DispatchResultWithPostInfo 83 | Ok(().into()) 84 | } 85 | 86 | /// An example dispatchable that may throw a custom error. 87 | #[pallet::call_index(1)] 88 | #[pallet::weight(Weight::from_parts(10_000, 1024).saturating_add(T::DbWeight::get().reads_writes(1,1)))] 89 | pub fn cause_error(origin: OriginFor) -> DispatchResultWithPostInfo { 90 | let _who = ensure_signed(origin)?; 91 | 92 | // Read a value from storage. 93 | match >::get() { 94 | // Return an error if the value has not been set. 95 | None => Err(Error::::NoneValue)?, 96 | Some(old) => { 97 | // Increment the value read from storage; will error in the event of overflow. 98 | let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; 99 | // Update the value in storage with the incremented result. 100 | >::put(new); 101 | Ok(().into()) 102 | } 103 | } 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /parachain-template/pallets/template/src/mock.rs: -------------------------------------------------------------------------------- 1 | use crate as pallet_template; 2 | use frame_support::{parameter_types, traits::Everything}; 3 | use frame_system as system; 4 | use sp_core::H256; 5 | use sp_runtime::{ 6 | testing::Header, 7 | traits::{BlakeTwo256, IdentityLookup}, 8 | }; 9 | 10 | type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; 11 | type Block = frame_system::mocking::MockBlock; 12 | 13 | // Configure a mock runtime to test the pallet. 14 | frame_support::construct_runtime!( 15 | pub enum Test where 16 | Block = Block, 17 | NodeBlock = Block, 18 | UncheckedExtrinsic = UncheckedExtrinsic, 19 | { 20 | System: frame_system::{Pallet, Call, Config, Storage, Event}, 21 | TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, 22 | } 23 | ); 24 | 25 | parameter_types! { 26 | pub const BlockHashCount: u64 = 250; 27 | pub const SS58Prefix: u8 = 42; 28 | } 29 | 30 | impl system::Config for Test { 31 | type BaseCallFilter = Everything; 32 | type BlockWeights = (); 33 | type BlockLength = (); 34 | type DbWeight = (); 35 | type RuntimeOrigin = RuntimeOrigin; 36 | type RuntimeCall = RuntimeCall; 37 | type Index = u64; 38 | type BlockNumber = u64; 39 | type Hash = H256; 40 | type Hashing = BlakeTwo256; 41 | type AccountId = u64; 42 | type Lookup = IdentityLookup; 43 | type Header = Header; 44 | type RuntimeEvent = RuntimeEvent; 45 | type BlockHashCount = BlockHashCount; 46 | type Version = (); 47 | type PalletInfo = PalletInfo; 48 | type AccountData = (); 49 | type OnNewAccount = (); 50 | type OnKilledAccount = (); 51 | type SystemWeightInfo = (); 52 | type SS58Prefix = SS58Prefix; 53 | type OnSetCode = (); 54 | type MaxConsumers = frame_support::traits::ConstU32<16>; 55 | } 56 | 57 | impl pallet_template::Config for Test { 58 | type RuntimeEvent = RuntimeEvent; 59 | } 60 | 61 | // Build genesis storage according to the mock runtime. 62 | pub fn new_test_ext() -> sp_io::TestExternalities { 63 | system::GenesisConfig::default() 64 | .build_storage::() 65 | .unwrap() 66 | .into() 67 | } 68 | -------------------------------------------------------------------------------- /parachain-template/pallets/template/src/tests.rs: -------------------------------------------------------------------------------- 1 | use crate::{mock::*, Error}; 2 | use frame_support::{assert_noop, assert_ok}; 3 | 4 | #[test] 5 | fn it_works_for_default_value() { 6 | new_test_ext().execute_with(|| { 7 | // Dispatch a signed extrinsic. 8 | assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); 9 | // Read pallet storage and assert an expected result. 10 | assert_eq!(TemplateModule::something(), Some(42)); 11 | }); 12 | } 13 | 14 | #[test] 15 | fn correct_error_for_none_value() { 16 | new_test_ext().execute_with(|| { 17 | // Ensure the expected error is thrown when no value is present. 18 | assert_noop!( 19 | TemplateModule::cause_error(RuntimeOrigin::signed(1)), 20 | Error::::NoneValue 21 | ); 22 | }); 23 | } 24 | -------------------------------------------------------------------------------- /parachain-template/polkadot-launch/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relaychain": { 3 | "bin": "../../polkadot/target/release/polkadot", 4 | "chain": "rococo-local", 5 | "nodes": [ 6 | { 7 | "name": "alice", 8 | "wsPort": 9944, 9 | "port": 30444 10 | }, 11 | { 12 | "name": "bob", 13 | "wsPort": 9955, 14 | "port": 30555 15 | } 16 | ] 17 | }, 18 | "parachains": [ 19 | { 20 | "bin": "../target/release/parachain-collator", 21 | "id": "200", 22 | "balance": "1000000000000000000000", 23 | "nodes": [ 24 | { 25 | "wsPort": 9988, 26 | "name": "alice", 27 | "port": 31200, 28 | "flags": [ 29 | "--force-authoring", 30 | "--", 31 | "--execution=wasm" 32 | ] 33 | } 34 | ] 35 | } 36 | ], 37 | "types": { 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /parachain-template/runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parachain-template-runtime" 3 | authors = [ "Anonymous" ] 4 | description = "A FRAME-based Substrate Runtime, that demonstrates the Nimbus consensus framework." 5 | edition = "2021" 6 | homepage = "https://substrate.dev" 7 | license = "Unlicense" 8 | repository = "https://github.com/paritytech/cumulus/" 9 | version = "0.9.0" 10 | 11 | [package.metadata.docs.rs] 12 | targets = [ "x86_64-unknown-linux-gnu" ] 13 | 14 | [dependencies] 15 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } 16 | hex-literal = "0.3.1" 17 | log = { version = "0.4.17", default-features = false } 18 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] } 19 | serde = { version = "1.0.119", optional = true, features = [ "derive" ] } 20 | smallvec = "1.6.1" 21 | 22 | # Local Dependencies 23 | pallet-template = { path = "../pallets/template", default-features = false } 24 | 25 | # Substrate Dependencies 26 | ## Substrate Primitive Dependencies 27 | sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 28 | sp-block-builder = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 29 | sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 30 | sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 31 | sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 32 | sp-offchain = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 33 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 34 | sp-session = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 35 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 36 | sp-transaction-pool = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 37 | sp-version = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 38 | 39 | ## Substrate FRAME Dependencies 40 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" } 41 | frame-executive = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 42 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 43 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 44 | frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" } 45 | frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 46 | 47 | ## Substrate Pallet Dependencies 48 | pallet-balances = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 49 | pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 50 | pallet-session = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 51 | pallet-sudo = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 52 | pallet-timestamp = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 53 | pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 54 | pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" } 55 | 56 | # Cumulus dependencies 57 | cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 58 | cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 59 | cumulus-pallet-session-benchmarking = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43", version = "3.0.0" } 60 | cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 61 | cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 62 | cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 63 | cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 64 | cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 65 | pallet-collator-selection = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 66 | parachain-info = { git = "https://github.com/paritytech/cumulus", default-features = false, branch = "polkadot-v0.9.43" } 67 | 68 | # Nimbus Dependencies 69 | nimbus-primitives = { path = "../../nimbus-primitives", default-features = false } 70 | pallet-author-inherent = { path = "../../pallets/author-inherent", default-features = false } 71 | pallet-author-slot-filter = { path = "../../pallets/author-slot-filter", default-features = false } 72 | 73 | 74 | # Polkadot Dependencies 75 | pallet-xcm = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.43" } 76 | polkadot-parachain = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.43" } 77 | polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.43" } 78 | xcm = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.43" } 79 | xcm-builder = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.43" } 80 | xcm-executor = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "release-v0.9.43" } 81 | 82 | [build-dependencies] 83 | substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } 84 | 85 | [features] 86 | default = [ 87 | "std", 88 | ] 89 | std = [ 90 | "codec/std", 91 | "cumulus-pallet-dmp-queue/std", 92 | "cumulus-pallet-parachain-system/std", 93 | "cumulus-pallet-xcm/std", 94 | "cumulus-pallet-xcmp-queue/std", 95 | "cumulus-primitives-core/std", 96 | "cumulus-primitives-timestamp/std", 97 | "cumulus-primitives-utility/std", 98 | "frame-executive/std", 99 | "frame-support/std", 100 | "frame-system/std", 101 | "log/std", 102 | "nimbus-primitives/std", 103 | "pallet-author-inherent/std", 104 | "pallet-author-slot-filter/std", 105 | "pallet-balances/std", 106 | "pallet-collator-selection/std", 107 | "pallet-insecure-randomness-collective-flip/std", 108 | "pallet-session/std", 109 | "pallet-sudo/std", 110 | "pallet-template/std", 111 | "pallet-timestamp/std", 112 | "pallet-transaction-payment-rpc-runtime-api/std", 113 | "pallet-transaction-payment/std", 114 | "parachain-info/std", 115 | "polkadot-parachain/std", 116 | "polkadot-runtime-common/std", 117 | "scale-info/std", 118 | "serde", 119 | "sp-api/std", 120 | "sp-block-builder/std", 121 | "sp-core/std", 122 | "sp-inherents/std", 123 | "sp-io/std", 124 | "sp-offchain/std", 125 | "sp-runtime/std", 126 | "sp-session/std", 127 | "sp-std/std", 128 | "sp-transaction-pool/std", 129 | "sp-version/std", 130 | "xcm-builder/std", 131 | "xcm-executor/std", 132 | "xcm/std", 133 | ] 134 | 135 | runtime-benchmarks = [ 136 | "frame-benchmarking", 137 | "frame-support/runtime-benchmarks", 138 | "frame-system-benchmarking", 139 | "frame-system/runtime-benchmarks", 140 | "pallet-balances/runtime-benchmarks", 141 | "pallet-collator-selection/runtime-benchmarks", 142 | "pallet-template/runtime-benchmarks", 143 | "pallet-timestamp/runtime-benchmarks", 144 | "pallet-xcm/runtime-benchmarks", 145 | "sp-runtime/runtime-benchmarks", 146 | "xcm-builder/runtime-benchmarks", 147 | "cumulus-pallet-session-benchmarking/runtime-benchmarks", 148 | ] 149 | 150 | try-runtime = [ 151 | "frame-support/try-runtime", 152 | "pallet-author-slot-filter/try-runtime", 153 | ] 154 | -------------------------------------------------------------------------------- /parachain-template/runtime/build.rs: -------------------------------------------------------------------------------- 1 | use substrate_wasm_builder::WasmBuilder; 2 | 3 | fn main() { 4 | WasmBuilder::new() 5 | .with_current_project() 6 | .export_heap_base() 7 | .import_memory() 8 | .build() 9 | } 10 | -------------------------------------------------------------------------------- /parachain-template/runtime/src/pallet_account_set.rs: -------------------------------------------------------------------------------- 1 | //! Small pallet responsible for storing a set of accounts, and their associated session keys. 2 | //! This is a minimal solution where staking would be used in practice. 3 | //! The accounts are set and genesis and never change. 4 | //! 5 | //! The Substrate ecosystem has a wide variety of real-world solutions and examples of what this 6 | //! pallet could be replaced with. 7 | //! Gautam's validator set pallet - https://github.com/paritytech/substrate/tree/master/frame/staking/ 8 | //! Parity's pallet staking - https://github.com/paritytech/substrate/tree/master/frame/staking/ 9 | //! Moonbeam's Parachain Staking - https://github.com/PureStake/moonbeam/tree/master/pallets/parachain-staking 10 | //! Recipe for AccountSet, VecSet, and MapSet 11 | 12 | use frame_support::pallet; 13 | 14 | pub use pallet::*; 15 | 16 | #[pallet] 17 | pub mod pallet { 18 | 19 | use frame_support::pallet_prelude::*; 20 | #[cfg(feature = "std")] 21 | use log::warn; 22 | use nimbus_primitives::{AccountLookup, CanAuthor, NimbusId}; 23 | use sp_std::vec::Vec; 24 | 25 | /// The Account Set pallet 26 | #[pallet::pallet] 27 | #[pallet::without_storage_info] 28 | pub struct Pallet(PhantomData); 29 | 30 | /// Configuration trait of this pallet. 31 | #[pallet::config] 32 | pub trait Config: frame_system::Config {} 33 | 34 | /// The set of accounts that is stored in this pallet. 35 | #[pallet::storage] 36 | pub type StoredAccounts = StorageValue<_, Vec, ValueQuery>; 37 | 38 | impl Get> for Pallet { 39 | fn get() -> Vec { 40 | StoredAccounts::::get() 41 | } 42 | } 43 | 44 | #[pallet::storage] 45 | #[pallet::getter(fn account_id_of)] 46 | /// A mapping from the AuthorIds used in the consensus layer 47 | /// to the AccountIds runtime. 48 | type Mapping = StorageMap<_, Twox64Concat, NimbusId, T::AccountId, OptionQuery>; 49 | 50 | #[pallet::genesis_config] 51 | /// Genesis config for author mapping pallet 52 | pub struct GenesisConfig { 53 | /// The associations that should exist at chain genesis 54 | pub mapping: Vec<(T::AccountId, NimbusId)>, 55 | } 56 | 57 | #[cfg(feature = "std")] 58 | impl Default for GenesisConfig { 59 | fn default() -> Self { 60 | Self { mapping: vec![] } 61 | } 62 | } 63 | 64 | #[pallet::genesis_build] 65 | impl GenesisBuild for GenesisConfig { 66 | fn build(&self) { 67 | if self.mapping.is_empty() { 68 | warn!(target: "account-set", "No mappings at genesis. Your chain will have no valid authors."); 69 | } 70 | for (account_id, author_id) in &self.mapping { 71 | Mapping::::insert(author_id, account_id); 72 | StoredAccounts::::append(account_id); 73 | } 74 | } 75 | } 76 | 77 | /// This pallet is compatible with nimbus's author filtering system. Any account stored in this pallet 78 | /// is a valid author. Notice that this implementation does not have an inner filter, so it 79 | /// can only be the beginning of the nimbus filter pipeline. 80 | impl CanAuthor for Pallet { 81 | fn can_author(author: &T::AccountId, _slot: &u32) -> bool { 82 | StoredAccounts::::get().contains(author) 83 | } 84 | } 85 | 86 | impl AccountLookup for Pallet { 87 | fn lookup_account(author: &NimbusId) -> Option { 88 | Mapping::::get(&author) 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.69.0" 3 | components = [ "rustfmt", "clippy" ] 4 | targets = [ "wasm32-unknown-unknown" ] 5 | profile = "minimal" 6 | -------------------------------------------------------------------------------- /scripts/check-cargo-toml-files-format.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | find . -name "Cargo.toml" -not -path "*/target/*" -exec toml-sort {} \; 4 | 5 | CMD="git diff --name-only" 6 | 7 | stdbuf -oL $CMD | { 8 | while IFS= read -r line; do 9 | echo ║ $line 10 | if [[ "$line" == *"Cargo.toml" ]]; then 11 | echo "Check fails: $line" 12 | echo "Please run './scripts/toml-sort.sh' to format Cargo.toml files properly." 13 | exit 1 14 | fi 15 | done 16 | } 17 | -------------------------------------------------------------------------------- /scripts/toml-sort.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # From the workspace directory, run : 4 | # ./scripts/toml-sort.sh 5 | # to format all Cargo.toml files, and 6 | # ./scripts/toml-sort.sh --check 7 | # to only check the formatting. 8 | 9 | if ! type "toml-sort" > /dev/null; then 10 | echo "Please install toml-sort with command 'cargo install --git https://github.com/PureStake/toml_sort'" 11 | else 12 | find . -name "Cargo.toml" -not -path "*/target/*" -exec toml-sort {} $@ \; 13 | fi 14 | -------------------------------------------------------------------------------- /toml-sort.toml: -------------------------------------------------------------------------------- 1 | keys = [ 2 | "workspace", 3 | "name", 4 | "package", 5 | "bin", 6 | "lib", 7 | "test", 8 | "dependencies", 9 | "dev-dependencies", 10 | "build-dependencies", 11 | "features", 12 | "default", 13 | "std", 14 | ] 15 | 16 | inline_keys = [ 17 | "package", 18 | "path", 19 | "git", 20 | "branch", 21 | "rev", 22 | "version", 23 | "optional", 24 | "default-features", 25 | "features", 26 | ] 27 | 28 | sort_string_arrays = true --------------------------------------------------------------------------------