├── .github
└── workflows
│ ├── build.yml
│ └── docs.yml
├── .gitignore
├── .rustfmt.toml
├── Cargo.lock
├── Cargo.toml
├── LICENSE
├── README.md
├── nimbus-consensus
├── Cargo.toml
└── src
│ ├── import_queue.rs
│ ├── lib.rs
│ └── manual_seal.rs
├── nimbus-primitives
├── Cargo.toml
└── src
│ ├── digests.rs
│ ├── inherents.rs
│ └── lib.rs
├── pallets
├── aura-style-filter
│ ├── Cargo.toml
│ └── src
│ │ └── lib.rs
├── author-inherent
│ ├── Cargo.toml
│ └── src
│ │ ├── benchmarks.rs
│ │ ├── exec.rs
│ │ ├── lib.rs
│ │ ├── mock.rs
│ │ ├── tests.rs
│ │ └── weights.rs
└── author-slot-filter
│ ├── Cargo.toml
│ └── src
│ ├── benchmarks.rs
│ ├── lib.rs
│ ├── migration.rs
│ ├── mock.rs
│ ├── num.rs
│ ├── tests.rs
│ └── weights.rs
├── parachain-template
├── LICENSE
├── README.md
├── node
│ ├── Cargo.toml
│ ├── build.rs
│ └── src
│ │ ├── chain_spec.rs
│ │ ├── cli.rs
│ │ ├── command.rs
│ │ ├── main.rs
│ │ ├── rpc.rs
│ │ └── service.rs
├── pallets
│ └── template
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ └── src
│ │ ├── benchmarking.rs
│ │ ├── lib.rs
│ │ ├── mock.rs
│ │ └── tests.rs
├── polkadot-launch
│ └── config.json
└── runtime
│ ├── Cargo.toml
│ ├── build.rs
│ └── src
│ ├── lib.rs
│ └── pallet_account_set.rs
├── rust-toolchain
├── scripts
├── check-cargo-toml-files-format.sh
└── toml-sort.sh
└── toml-sort.toml
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | # This CI Tries to be both simple and effective. It is inspired by:
2 | # https://github.com/marketplace/actions/rust-cache
3 | # https://github.com/actions-rs/toolchain/issues/126#issuecomment-782989659
4 | # https://github.com/actions-rs/example/blob/master/.github/workflows/quickstart.yml
5 |
6 | name: Rust Checks
7 |
8 | on:
9 | pull_request:
10 | push:
11 | branches:
12 | - main
13 |
14 | jobs:
15 | check-cargo-toml-format:
16 | name: "Check Cargo.toml files format"
17 | runs-on: ubuntu-latest
18 | steps:
19 | - name: Checkout
20 | uses: actions/checkout@v2
21 | # With rustup's nice new toml format, we just need to run rustup show to install the toolchain
22 | # https://github.com/actions-rs/toolchain/issues/126#issuecomment-782989659
23 | - name: Setup Rust toolchain
24 | run: rustup show
25 | - name: Check Cargo.toml files format with toml_sort
26 | run: chmod u+x ./scripts/check-cargo-toml-files-format.sh && ./scripts/check-cargo-toml-files-format.sh
27 |
28 | check-rust-fmt:
29 | name: Check rustfmt
30 | runs-on: ubuntu-latest
31 | steps:
32 | - name: Checkout
33 | uses: actions/checkout@v2
34 |
35 | - name: Setup rust toolchain
36 | run: rustup show
37 |
38 | - name: Run cargo fmt check
39 | uses: actions-rs/cargo@v1
40 | with:
41 | command: fmt
42 | args: -- --check
43 |
44 | cargo-check:
45 | name: Cargo check
46 | runs-on: ubuntu-latest
47 | steps:
48 | - name: Install tooling
49 | run: |
50 | sudo apt-get install -y protobuf-compiler
51 | protoc --version
52 | - name: Checkout sources
53 | uses: actions/checkout@v2
54 |
55 | - name: Setup rust toolchain
56 | run: rustup show
57 |
58 | - name: Rust Cache
59 | uses: Swatinem/rust-cache@v1
60 |
61 | - name: Run cargo check
62 | uses: actions-rs/cargo@v1
63 | with:
64 | command: check
65 |
66 | - name: Run cargo test
67 | uses: actions-rs/cargo@v1
68 | with:
69 | command: test
70 |
71 | check-copyright:
72 | runs-on: ubuntu-latest
73 | steps:
74 | - name: Checkout
75 | uses: actions/checkout@v2
76 |
77 | - name: Find un-copyrighted files
78 | run: |
79 | find . -name '*.rs' -not -path "*/parachain-template/*" -exec grep -H -E -o -c Copyright {} \; | grep ':0' || true
80 | FILECOUNT=$(find . -name '*.rs' -not -path "*/parachain-template/*" -exec grep -H -E -o -c 'Copyright' {} \; | grep -c ':0' || true)
81 | if [[ $FILECOUNT -eq 0 ]]; then
82 | true
83 | else
84 | false
85 | fi
86 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Publish Rust Docs
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | deploy-docs:
10 | name: Deploy docs
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - name: Checkout repository
15 | uses: actions/checkout@v1
16 |
17 | # With rustup's nice new toml format, we just need to run rustup show to install the toolchain
18 | # https://github.com/actions-rs/toolchain/issues/126#issuecomment-782989659
19 | - name: Setup Rust toolchain
20 | run: rustup show
21 |
22 | #TODO consider using the rust Cache action like tin the rust CI
23 | - uses: actions/cache@v2
24 | with:
25 | path: |
26 | ~/.cargo/registry
27 | ~/.cargo/git
28 | target
29 | key: ${{ runner.os }}-cargo-doc-${{ hashFiles('**/Cargo.lock') }}
30 |
31 | - name: Build rustdocs
32 | uses: actions-rs/cargo@v1
33 | env:
34 | CARGO_INCREMENTAL: "0"
35 | with:
36 | command: doc
37 | args: --all --no-deps
38 |
39 | # Make an index.html file so we start at the nimbus consensus worker
40 | # Copied from https://github.com/substrate-developer-hub/rustdocs/blob/master/index.html
41 | - name: Make index.html
42 | run: echo "" > ./target/doc/index.html
43 |
44 | - name: Deploy documentation
45 | uses: peaceiris/actions-gh-pages@v3
46 | with:
47 | github_token: ${{ secrets.GITHUB_TOKEN }}
48 | publish_branch: gh-pages
49 | publish_dir: ./target/doc
50 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/target
--------------------------------------------------------------------------------
/.rustfmt.toml:
--------------------------------------------------------------------------------
1 | # These formatting rules to try conform the Substrate style guidelines:
2 | # > https://wiki.parity.io/Substrate-Style-Guide
3 |
4 | reorder_imports = true
5 | hard_tabs = true
6 | max_width = 100
7 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | members = [
3 | "nimbus-consensus",
4 | "nimbus-primitives",
5 | "pallets/aura-style-filter",
6 | "pallets/author-inherent",
7 | "pallets/author-slot-filter",
8 | "parachain-template/node",
9 | "parachain-template/runtime",
10 | ]
11 |
12 | [profile.release]
13 | panic = "unwind"
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## ⚠️ Nimbus has been migrated to [Moonkit](https://github.com/Moonsong-Labs/moonkit)
2 |
3 | # Cumulo -- Nimbus ⛈️
4 |
5 | Nimbus is a framework for building parachain consensus systems on [cumulus](https://github.com/paritytech/cumulus)-based parachains.
6 |
7 | Given the regular six-second pulse-like nature of the relay chain, it is natural to think about slot-
8 | based consensus algorithms for parachains. The parachain network is responsible for liveness and
9 | decentralization and the relay chain is responsible for finality. There is a rich design space for such
10 | algorithms, yet some tasks are common to all (or most) of them. These common tasks include:
11 |
12 | * Signing and signature checking blocks
13 | * Injecting authorship information into the parachain
14 | * Block authorship and import accounting
15 | * Filtering a large (potentially unbounded) set of potential authors to a smaller (but still potentially unbounded) set.
16 | * Detecting when it is your turn to author an skipping other slots
17 |
18 | Nimbus aims to provide standard implementations for the logistical parts of such consensus engines,
19 | along with helpful traits for implementing the parts that researchers and developers want to customize.
20 |
21 | ## Try the Demo
22 |
23 | While Nimbus is primarily a development framework meant to be included in other projects, it is useful
24 | to see a basic network in action. An example network is included in the `parachain-template` example collator. You
25 | can build it with `cargo build --release` and launch it like any other cumulus parachain.
26 | Make sure to specify `--chain nimbus`.
27 |
28 | Rather than reiterate how to start a relay-para network here, I'll simply recommend you use the
29 | excellent [Polkadot Launch](https://github.com/paritytech/polkadot-launch) tool. This repo was tested with version 1.4.1.
30 | A [lauch config file](./parachain-template/polkadot-launch/config.json) is provided.
31 |
32 | ```bash
33 | # Install polkadot launch (I used v1.4.1)
34 | npm i -g polkadot-launch
35 |
36 | # Build polkadot (I used 82aa404c; check Cargo.lock to be sure)
37 | cd polkadot
38 | cargo build --release
39 | cd ..
40 |
41 | # Build Polkadot-parachains example collator
42 | cd cumulus
43 | git checkout nimbus
44 | cargo build --release
45 |
46 | # Launch the multi-chain
47 | polkdot-launch ./parachain-template/polkadot-launch/config.json
48 | ```
49 |
50 | To learn more about launching relay-para networks, check out the [cumulus workshop](https://substrate.dev/cumulus-workshop).
51 |
52 | ## Design Overview
53 |
54 | If you want to start using Nimbus in your project, it is worth reading this.
55 |
56 | At its core nimbus is a consensus engine that considers blocks valid if and only if they inject the author's public identity into the runtime, _and_ seal the block with a signature
57 | by the author's private key.
58 |
59 | Compared to most consensus engines, this is _very_ permissive -- anyone who can create a signature can author valid blocks. In order to build more useful and familiar consensus engine on this foundation, nimbus provides a framework for creating filters to further restrict the set of eligible authors. These filters live inside the runtime.
60 |
61 | Being general in the consensus layer and deferring most checks to the runtime is the key
62 | to nimbus's re-usability as a framework. And is the reason that *writing a consensus engine is as easy as writing a pallet* when you use nimbus.
63 |
64 | ### Author Inherent
65 |
66 | The Author inherent pallet allows block authors to insert their identity into
67 | the runtime. This feature alone is useful in many blockchains and can be used for things like block rewards.
68 |
69 | The author inherent provides a validation hook called `CanAuthor`. This check will be called during the inherent execution and is the main entry point to nimbus's author filters.
70 | If you don't want to restrict authorship at all, you can just use `()`.
71 |
72 | As a concrete example, in a simple Proof of Stake system this check will determine
73 | whether the author is staked. In a more realistic PoS system the `CanAuthor` check might
74 | first make sure the author is staked, and then make sure they are eligible in _this slot_ according to round robin rules.
75 |
76 | Finally, the pallet copies the authorship information into a consensus digest that will stick around
77 | in the block header. This digest can be used by UIs to display the author, and also by the consensus
78 | engine to verify the block authorship.
79 |
80 | **PreRuntimeDigest**
81 | I believe the design should be changed slightly to use a preruntime digest rather than an inherent for a few reasons:
82 |
83 | * The data wouldn't be duplicated between an inherent and a digest.
84 | * Nimbus client-side worker would support non-frame runtimes.
85 | * That's how sc-consensus-aura does it.
86 |
87 | ### Author Filters
88 |
89 | A primary job of a consensus engine is deciding who can author each block. Some may have a static set, others
90 | may rotate the set each era, others may elect an always-changing subset of all potential authors. There
91 | is much space for creativity, research, and design, and Nimbus strives to provide a flexible interface
92 | for this creative work. You can express all the interesting parts of your
93 | consensus engine simply by creating filters that implement the `CanAuthor` trait. The rest of Nimbus will #JustWork for you.
94 |
95 | This repository comes with a few example filters already, and additional examples are welcome. The examples are:
96 | * PseudoRandom FixedSized Subset - This filter takes a finite set (eg a staked set) and filters it down to a pseudo-random
97 | subset at each height. The eligible count is configurable in the pallet. This is a good learning example.
98 | * Aura - The authority round consensus engine is popular in the Substrate ecosystem because it was one
99 | of the first (and simplest!) engines implemented in Substrate. Aura can be expressed in the Nimbus
100 | filter framework and is included as an example filter. If you are considering using aura, that crate
101 | has good documentation on how it differs from `sc-consensus-aura`.
102 | * (Planned) FixedSizedSubset - The author submits a VRF output that has to be below a threshold to be able to author.
103 | * (Planed) Filter Combinator - A filter that wraps two other filters. It uses one in even slots and the other in odd slots.
104 |
105 | ### Author Filter Runtime API
106 |
107 | Nimbus makes the design choice to include the author checking logic in the runtime. This is in contrast to the existing implementations of Aura and Babe where the authorship checks are offchain.
108 |
109 | While moving the check in-runtime, provides a lot of flexibility, and simplifies interfacing with relay-chain validators, it makes it impossible
110 | for authoring nodes to predict whether they will be eligible without calling into the runtime.
111 | To achieve this, we provide a runtime API that makes the minimal calculation necessary to determine
112 | whether a specified author will be eligible at the specified slot.
113 |
114 | ### Nimbus Consensus Worker
115 |
116 | Nimbus consensus is the primary client-side consensus worker. It implements the `ParachainConsensus`
117 | trait introduced to cumulus in https://github.com/paritytech/cumulus/pull/329. It is not likely that
118 | you will need to change this code directly to implement your engine as it is entirely abstracted over
119 | the filters you use. The consensus engine performs these tasks:
120 |
121 | * Slot prediction - it calls the runtime API mentioned previously to determine whether ti is eligible. If not, it returns early.
122 | * Authorship - It calls into a standard Substrate proposer to construct a block (probably including the author inherent).
123 | * Self import - it imports the block that the proposer created (called the pre-block) into the node's local database.
124 | * Sealing - It adds a seal digest to the block - This is what is used by other nodes to verify the authorship information.
125 |
126 | ### Verifier and Import Queue
127 |
128 | For a parachain node to import a sealed block authored by one of its peers, it needs to first check that the signature is valid by the author that was injected into the runtime. This is the job of the verifier. It
129 | will remove the nimbus seal and check it against the nimbus consensus digest from the runtime. If that process fails,
130 | the block is immediately thrown away before the expensive execution even begins. If it succeeds, then
131 | the pre-block (the part that's left after the seal is stripped) is passed into the
132 | [import pipeline](https://substrate.dev/docs/en/knowledgebase/advanced/block-import) for processing
133 | and execution. Finally, the locally produced result is compared to the result received across the network.
134 |
135 | ### Custom Block Executor
136 |
137 | We've already discussed how parachain nodes (both the one that authors a block, and also its peers)
138 | import blocks. In a standalone blockchain, that's the end of the story. But for a parachain, we also
139 | need our relay chain validators to re-execute and validate the parachain block. Validators do this in
140 | a unique way, and entirely in wasm. Providing the `validate_block` function that the validators use
141 | is the job of the `register_validate_block!` macro from Cumulus.
142 |
143 | Typically a cumulus runtime invokes that macro like this:
144 | ```rust
145 | cumulus_pallet_parachain_system::register_validate_block!(Runtime, Executive);
146 | ```
147 |
148 | You can see that the validators use the exact same executive that the parachain nodes do. Now that
149 | we have sealed blocks, that must change. The validators need to strip and verify the seal, and re-execute
150 | the pre-block just like the parachain nodes did. And without access to an offchain verifier, they must
151 | do this all in the runtime. For that purpose, we provide and alternate executive which wraps the normal
152 | FRAME executive. The wrapper strips and checks the seal, just like the verifier did, and then passes the pre-block to the inner FRAME executive for re-execution.
153 |
154 | ## Write Your Own Consensus Logic
155 |
156 | If you have an idea for a new slot-based parachain consensus algorithm, Nimbus is a quick way to get
157 | it working! The fastest way to start hacking is to fork this repo and customize the template node.
158 |
159 | If you'd rather dive in than read one more sentence, then **start hacking in the `author-slot-filter`
160 | pallet.**
161 |
162 | In most cases, you can use all the off-the-shelf components and simply write your filters. It is also
163 | possible to compose existing filters to build more complex logic from smaller pieces.
164 |
165 | ## Authoring and Import Diagrams
166 |
167 | One node authors the block, then it is processed in three different ways.
168 |
169 | | | Author | Parachain Peer | Relay Validator |
170 | | ------------------- | ------ | -------------- | --------- |
171 | | Predict Eligibility | ✅ | ❌ | ❌ |
172 | | Author Block | ✅ | ❌ | ❌ |
173 | | Runs Verifier | ❌ | ✅ | ❌ |
174 | | Import Pipeline | ✅ | ✅ | ❌ |
175 | | Custom Pre exec | ❌ | ❌ | ✅ |
176 | | Normal FRAME exec | ✅ | ✅ | ✅ |
177 |
178 | ## Roadmap
179 |
180 | The Nimbus framework is intended to be loosely coupled with Cumulus.
181 |
182 | ### Next tasks
183 | * Proper trait for interacting with digests
184 | * More example filters
185 | * Share code between verifier and wrapper executive
186 | * Client-side worker for standalone (non para) blockchain
187 | * Aurand as an example of composing filters
188 | * Second filter trait for exhaustive sets (As opposed to current propositional approach)
189 |
190 | ## Contributions Welcome
191 |
192 | Try it out, open issues, submit PRs, review code. Whether you like to tinker with a running node, or
193 | analyze security from an academic perspective, your contributions are welcome.
194 |
195 | I am happy to support users who want to use nimbus, or want feedback on their consensus engines.
196 |
--------------------------------------------------------------------------------
/nimbus-consensus/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "nimbus-consensus"
3 | description = "Client-side worker for the Nimbus family of consensus algorithms"
4 | edition = "2021"
5 | version = "0.9.0"
6 | [dependencies]
7 | # Substrate deps
8 | sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
9 | sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
10 | sc-consensus-manual-seal = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
11 | sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
12 | sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
13 | sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
14 | sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
15 | sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
16 | sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
17 | sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
18 | sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
19 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
20 | substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" }
21 |
22 | # Cumulus dependencies
23 | cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" }
24 | cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" }
25 | cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" }
26 |
27 | # Nimbus Dependencies
28 | nimbus-primitives = { path = "../nimbus-primitives" }
29 |
30 | # Other deps
31 | async-trait = "0.1"
32 | codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] }
33 | futures = { version = "0.3.24", features = [ "compat" ] }
34 | log = "0.4.17"
35 | parking_lot = "0.12"
36 | tracing = "0.1.22"
37 |
--------------------------------------------------------------------------------
/nimbus-consensus/src/import_queue.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | use std::{marker::PhantomData, sync::Arc};
18 |
19 | use log::debug;
20 | use nimbus_primitives::{digests::CompatibleDigestItem, NimbusId, NimbusPair, NIMBUS_ENGINE_ID};
21 | use sc_consensus::{
22 | import_queue::{BasicQueue, Verifier as VerifierT},
23 | BlockImport, BlockImportParams,
24 | };
25 | use sp_api::ProvideRuntimeApi;
26 | use sp_application_crypto::{ByteArray, Pair as _};
27 | use sp_block_builder::BlockBuilder as BlockBuilderApi;
28 | use sp_blockchain::Result as ClientResult;
29 | use sp_consensus::error::Error as ConsensusError;
30 | use sp_inherents::{CreateInherentDataProviders, InherentDataProvider};
31 | use sp_runtime::{
32 | traits::{Block as BlockT, Header as HeaderT},
33 | DigestItem,
34 | };
35 |
36 | /// The Nimbus verifier strips the seal digest, and checks that it is a valid signature by
37 | /// the same key that was injected into the runtime and noted in the Seal digest.
38 | /// From Nimbu's perspective any block that faithfully reports its authorship to the runtime
39 | /// is valid. The intention is that the runtime itself may then put further restrictions on
40 | /// the identity of the author.
41 | struct Verifier {
42 | client: Arc,
43 | create_inherent_data_providers: CIDP,
44 | _marker: PhantomData,
45 | }
46 |
47 | #[async_trait::async_trait]
48 | impl VerifierT for Verifier
49 | where
50 | Block: BlockT,
51 | Client: ProvideRuntimeApi + Send + Sync,
52 | >::Api: BlockBuilderApi,
53 | CIDP: CreateInherentDataProviders,
54 | {
55 | async fn verify(
56 | &mut self,
57 | mut block_params: BlockImportParams,
58 | ) -> Result, String> {
59 | // Skip checks that include execution, if being told so or when importing only state.
60 | //
61 | // This is done for example when gap syncing and it is expected that the block after the gap
62 | // was checked/chosen properly, e.g. by warp syncing to this block using a finality proof.
63 | // Or when we are importing state only and can not verify the seal.
64 | if block_params.with_state() || block_params.state_action.skip_execution_checks() {
65 | // When we are importing only the state of a block, it will be the best block.
66 | block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom(
67 | block_params.with_state(),
68 | ));
69 |
70 | return Ok(block_params);
71 | }
72 |
73 | debug!(
74 | target: crate::LOG_TARGET,
75 | "🪲 Header hash before popping digest {:?}",
76 | block_params.header.hash()
77 | );
78 | // Grab the seal digest. Assume it is last (since it is a seal after-all).
79 | let seal = block_params
80 | .header
81 | .digest_mut()
82 | .pop()
83 | .ok_or("Block should have at least one digest on it")?;
84 |
85 | let signature = seal
86 | .as_nimbus_seal()
87 | .ok_or_else(|| String::from("HeaderUnsealed"))?;
88 |
89 | debug!(
90 | target: crate::LOG_TARGET,
91 | "🪲 Header hash after popping digest {:?}",
92 | block_params.header.hash()
93 | );
94 |
95 | debug!(
96 | target: crate::LOG_TARGET,
97 | "🪲 Signature according to verifier is {:?}", signature
98 | );
99 |
100 | // Grab the author information from either the preruntime digest or the consensus digest
101 | //TODO use the trait
102 | let claimed_author = block_params
103 | .header
104 | .digest()
105 | .logs
106 | .iter()
107 | .find_map(|digest| match *digest {
108 | DigestItem::Consensus(id, ref author_id) if id == NIMBUS_ENGINE_ID => {
109 | Some(author_id.clone())
110 | }
111 | DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => {
112 | Some(author_id.clone())
113 | }
114 | _ => None,
115 | })
116 | .ok_or("Expected one consensus or pre-runtime digest that contains author id bytes")?;
117 |
118 | debug!(
119 | target: crate::LOG_TARGET,
120 | "🪲 Claimed Author according to verifier is {:?}", claimed_author
121 | );
122 |
123 | // Verify the signature
124 | let valid_signature = NimbusPair::verify(
125 | &signature,
126 | block_params.header.hash(),
127 | &NimbusId::from_slice(&claimed_author)
128 | .map_err(|_| "Invalid Nimbus ID (wrong length)")?,
129 | );
130 |
131 | debug!(
132 | target: crate::LOG_TARGET,
133 | "🪲 Valid signature? {:?}", valid_signature
134 | );
135 |
136 | if !valid_signature {
137 | return Err("Block signature invalid".into());
138 | }
139 |
140 | // This part copied from RelayChainConsensus. I guess this is the inherent checking.
141 | if let Some(inner_body) = block_params.body.take() {
142 | let inherent_data_providers = self
143 | .create_inherent_data_providers
144 | .create_inherent_data_providers(*block_params.header.parent_hash(), ())
145 | .await
146 | .map_err(|e| e.to_string())?;
147 |
148 | let inherent_data = inherent_data_providers
149 | .create_inherent_data()
150 | .await
151 | .map_err(|e| format!("{:?}", e))?;
152 |
153 | let block = Block::new(block_params.header.clone(), inner_body);
154 |
155 | let inherent_res = self
156 | .client
157 | .runtime_api()
158 | .check_inherents(
159 | *block_params.header.parent_hash(),
160 | block.clone(),
161 | inherent_data,
162 | )
163 | .map_err(|e| format!("{:?}", e))?;
164 |
165 | if !inherent_res.ok() {
166 | for (i, e) in inherent_res.into_errors() {
167 | match inherent_data_providers.try_handle_error(&i, &e).await {
168 | Some(r) => r.map_err(|e| format!("{:?}", e))?,
169 | None => Err(format!(
170 | "Unhandled inherent error from `{}`.",
171 | String::from_utf8_lossy(&i)
172 | ))?,
173 | }
174 | }
175 | }
176 |
177 | let (_, inner_body) = block.deconstruct();
178 | block_params.body = Some(inner_body);
179 | }
180 |
181 | block_params.post_digests.push(seal);
182 |
183 | // The standard is to use the longest chain rule. This is overridden by the `NimbusBlockImport` in the parachain context.
184 | block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain);
185 |
186 | debug!(
187 | target: crate::LOG_TARGET,
188 | "🪲 Just finished verifier. posthash from params is {:?}",
189 | &block_params.post_hash()
190 | );
191 |
192 | Ok(block_params)
193 | }
194 | }
195 |
196 | /// Start an import queue for a Cumulus collator that does not uses any special authoring logic.
197 | pub fn import_queue(
198 | client: Arc,
199 | block_import: I,
200 | create_inherent_data_providers: CIDP,
201 | spawner: &impl sp_core::traits::SpawnEssentialNamed,
202 | registry: Option<&substrate_prometheus_endpoint::Registry>,
203 | parachain: bool,
204 | ) -> ClientResult>
205 | where
206 | I: BlockImport + Send + Sync + 'static,
207 | I::Transaction: Send,
208 | Client: ProvideRuntimeApi + Send + Sync + 'static,
209 | >::Api: BlockBuilderApi,
210 | CIDP: CreateInherentDataProviders + 'static,
211 | {
212 | let verifier = Verifier {
213 | client,
214 | create_inherent_data_providers,
215 | _marker: PhantomData,
216 | };
217 |
218 | Ok(BasicQueue::new(
219 | verifier,
220 | Box::new(NimbusBlockImport::new(block_import, parachain)),
221 | None,
222 | spawner,
223 | registry,
224 | ))
225 | }
226 |
227 | /// Nimbus specific block import.
228 | ///
229 | /// Nimbus supports both parachain and non-parachain contexts. In the parachain
230 | /// context, new blocks should not be imported as best. Cumulus's ParachainBlockImport
231 | /// handles this correctly, but does not work in non-parachain contexts.
232 | /// This block import has a field indicating whether we should apply parachain rules or not.
233 | ///
234 | /// There may be additional nimbus-specific logic here in the future, but for now it is
235 | /// only the conditional parachain logic
236 | pub struct NimbusBlockImport {
237 | inner: I,
238 | parachain_context: bool,
239 | }
240 |
241 | impl NimbusBlockImport {
242 | /// Create a new instance.
243 | pub fn new(inner: I, parachain_context: bool) -> Self {
244 | Self {
245 | inner,
246 | parachain_context,
247 | }
248 | }
249 | }
250 |
251 | #[async_trait::async_trait]
252 | impl BlockImport for NimbusBlockImport
253 | where
254 | Block: BlockT,
255 | I: BlockImport + Send,
256 | {
257 | type Error = I::Error;
258 | type Transaction = I::Transaction;
259 |
260 | async fn check_block(
261 | &mut self,
262 | block: sc_consensus::BlockCheckParams,
263 | ) -> Result {
264 | self.inner.check_block(block).await
265 | }
266 |
267 | async fn import_block(
268 | &mut self,
269 | mut block_import_params: sc_consensus::BlockImportParams,
270 | ) -> Result {
271 | // If we are in the parachain context, best block is determined by the relay chain
272 | // except during initial sync
273 | if self.parachain_context {
274 | block_import_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom(
275 | block_import_params.origin == sp_consensus::BlockOrigin::NetworkInitialSync,
276 | ));
277 | }
278 |
279 | // Now continue on to the rest of the import pipeline.
280 | self.inner.import_block(block_import_params).await
281 | }
282 | }
283 |
--------------------------------------------------------------------------------
/nimbus-consensus/src/lib.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | //! The nimbus consensus client-side worker
18 | //!
19 | //! It queries the in-runtime filter to determine whether any keys
20 | //! stored in its keystore are eligible to author at this slot. If it has an eligible
21 | //! key it authors.
22 |
23 | use cumulus_client_consensus_common::{
24 | ParachainBlockImport, ParachainCandidate, ParachainConsensus,
25 | };
26 | use cumulus_primitives_core::{relay_chain::Hash as PHash, ParaId, PersistedValidationData};
27 | pub use import_queue::import_queue;
28 | use log::{debug, info, warn};
29 | use nimbus_primitives::{
30 | CompatibleDigestItem, DigestsProvider, NimbusApi, NimbusId, NIMBUS_KEY_ID,
31 | };
32 | use parking_lot::Mutex;
33 | use sc_client_api::backend::Backend;
34 | use sc_consensus::{BlockImport, BlockImportParams};
35 | use sp_api::ProvideRuntimeApi;
36 | use sp_application_crypto::ByteArray;
37 | use sp_consensus::{
38 | BlockOrigin, EnableProofRecording, Environment, ProofRecording, Proposal, Proposer,
39 | };
40 | use sp_core::{crypto::CryptoTypeId, sr25519};
41 | use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider};
42 | use sp_keystore::{Keystore, KeystorePtr};
43 | use sp_runtime::{
44 | traits::{Block as BlockT, Header as HeaderT},
45 | DigestItem,
46 | };
47 | use std::convert::TryInto;
48 | use std::{marker::PhantomData, sync::Arc, time::Duration};
49 | use tracing::error;
50 | mod import_queue;
51 | mod manual_seal;
52 | pub use manual_seal::NimbusManualSealConsensusDataProvider;
53 |
54 | const LOG_TARGET: &str = "filtering-consensus";
55 |
56 | /// The implementation of the relay-chain provided consensus for parachains.
57 | pub struct NimbusConsensus {
58 | para_id: ParaId,
59 | proposer_factory: Arc>,
60 | create_inherent_data_providers: Arc,
61 | block_import: Arc>>,
62 | parachain_client: Arc,
63 | keystore: KeystorePtr,
64 | skip_prediction: bool,
65 | additional_digests_provider: Arc,
66 | _phantom: PhantomData,
67 | }
68 |
69 | impl Clone
70 | for NimbusConsensus
71 | {
72 | fn clone(&self) -> Self {
73 | Self {
74 | para_id: self.para_id,
75 | proposer_factory: self.proposer_factory.clone(),
76 | create_inherent_data_providers: self.create_inherent_data_providers.clone(),
77 | block_import: self.block_import.clone(),
78 | parachain_client: self.parachain_client.clone(),
79 | keystore: self.keystore.clone(),
80 | skip_prediction: self.skip_prediction,
81 | additional_digests_provider: self.additional_digests_provider.clone(),
82 | _phantom: PhantomData,
83 | }
84 | }
85 | }
86 |
87 | impl NimbusConsensus
88 | where
89 | B: BlockT,
90 | PF: 'static,
91 | BI: 'static,
92 | BE: Backend + 'static,
93 | ParaClient: ProvideRuntimeApi + 'static,
94 | CIDP: CreateInherentDataProviders + 'static,
95 | DP: DigestsProvider::Hash> + 'static,
96 | {
97 | /// Create a new instance of nimbus consensus.
98 | pub fn build(
99 | BuildNimbusConsensusParams {
100 | para_id,
101 | proposer_factory,
102 | create_inherent_data_providers,
103 | block_import,
104 | backend,
105 | parachain_client,
106 | keystore,
107 | skip_prediction,
108 | additional_digests_provider,
109 | }: BuildNimbusConsensusParams,
110 | ) -> Box>
111 | where
112 | Self: ParachainConsensus,
113 | {
114 | Box::new(Self {
115 | para_id,
116 | proposer_factory: Arc::new(Mutex::new(proposer_factory)),
117 | create_inherent_data_providers: Arc::new(create_inherent_data_providers),
118 | block_import: Arc::new(futures::lock::Mutex::new(ParachainBlockImport::new(
119 | block_import,
120 | backend,
121 | ))),
122 | parachain_client,
123 | keystore,
124 | skip_prediction,
125 | additional_digests_provider: Arc::new(additional_digests_provider),
126 | _phantom: PhantomData,
127 | })
128 | }
129 |
130 | //TODO Could this be a provided implementation now that we have this async inherent stuff?
131 | /// Create the data.
132 | async fn inherent_data(
133 | &self,
134 | parent: B::Hash,
135 | validation_data: &PersistedValidationData,
136 | relay_parent: PHash,
137 | author_id: NimbusId,
138 | ) -> Option {
139 | let inherent_data_providers = self
140 | .create_inherent_data_providers
141 | .create_inherent_data_providers(
142 | parent,
143 | (relay_parent, validation_data.clone(), author_id),
144 | )
145 | .await
146 | .map_err(|e| {
147 | tracing::error!(
148 | target: LOG_TARGET,
149 | error = ?e,
150 | "Failed to create inherent data providers.",
151 | )
152 | })
153 | .ok()?;
154 |
155 | inherent_data_providers
156 | .create_inherent_data()
157 | .await
158 | .map_err(|e| {
159 | tracing::error!(
160 | target: LOG_TARGET,
161 | error = ?e,
162 | "Failed to create inherent data.",
163 | )
164 | })
165 | .ok()
166 | }
167 | }
168 |
169 | /// Grabs any available nimbus key from the keystore.
170 | /// This may be useful in situations where you expect exactly one key
171 | /// and intend to perform an operation with it regardless of whether it is
172 | /// expected to be eligible. Concretely, this is used in the consensus worker
173 | /// to implement the `skip_prediction` feature.
174 | pub(crate) fn first_available_key(keystore: &dyn Keystore) -> Option> {
175 | // Get all the available keys
176 | match Keystore::keys(keystore, NIMBUS_KEY_ID) {
177 | Ok(available_keys) => {
178 | if available_keys.is_empty() {
179 | warn!(
180 | target: LOG_TARGET,
181 | "🔏 No Nimbus keys available. We will not be able to author."
182 | );
183 | None
184 | } else {
185 | Some(available_keys[0].clone())
186 | }
187 | }
188 | _ => None,
189 | }
190 | }
191 |
192 | /// Grab the first eligible nimbus key from the keystore
193 | /// If multiple keys are eligible this function still only returns one
194 | /// and makes no guarantees which one as that depends on the keystore's iterator behavior.
195 | /// This is the standard way of determining which key to author with.
196 | pub(crate) fn first_eligible_key(
197 | client: Arc,
198 | keystore: &dyn Keystore,
199 | parent: &B::Header,
200 | slot_number: u32,
201 | ) -> Option>
202 | where
203 | C: ProvideRuntimeApi,
204 | C::Api: NimbusApi,
205 | {
206 | // Get all the available keys
207 | let available_keys = Keystore::keys(keystore, NIMBUS_KEY_ID).ok()?;
208 |
209 | // Print a more helpful message than "not eligible" when there are no keys at all.
210 | if available_keys.is_empty() {
211 | warn!(
212 | target: LOG_TARGET,
213 | "🔏 No Nimbus keys available. We will not be able to author."
214 | );
215 | return None;
216 | }
217 |
218 | // Iterate keys until we find an eligible one, or run out of candidates.
219 | // If we are skipping prediction, then we author with the first key we find.
220 | // prediction skipping only really makes sense when there is a single key in the keystore.
221 | let maybe_key = available_keys.into_iter().find(|type_public_pair| {
222 | // Have to convert to a typed NimbusId to pass to the runtime API. Maybe this is a clue
223 | // That I should be passing Vec across the wasm boundary?
224 | if let Ok(nimbus_id) = NimbusId::from_slice(&type_public_pair) {
225 | NimbusApi::can_author(
226 | &*client.runtime_api(),
227 | parent.hash(),
228 | nimbus_id,
229 | slot_number,
230 | parent,
231 | )
232 | .unwrap_or_default()
233 | } else {
234 | false
235 | }
236 | });
237 |
238 | // If there are no eligible keys, print the log, and exit early.
239 | if maybe_key.is_none() {
240 | info!(
241 | target: LOG_TARGET,
242 | "🔮 Skipping candidate production because we are not eligible for slot {}", slot_number
243 | );
244 | }
245 |
246 | maybe_key
247 | }
248 |
249 | pub(crate) fn seal_header(
250 | header: &B::Header,
251 | keystore: &dyn Keystore,
252 | public_pair: &Vec,
253 | crypto_id: &CryptoTypeId,
254 | ) -> DigestItem
255 | where
256 | B: BlockT,
257 | {
258 | let pre_hash = header.hash();
259 |
260 | let raw_sig = Keystore::sign_with(
261 | &*keystore,
262 | NIMBUS_KEY_ID,
263 | *crypto_id,
264 | public_pair,
265 | pre_hash.as_ref(),
266 | )
267 | .expect("Keystore should be able to sign")
268 | .expect("We already checked that the key was present");
269 |
270 | debug!(target: LOG_TARGET, "The signature is \n{:?}", raw_sig);
271 |
272 | let signature = raw_sig
273 | .clone()
274 | .try_into()
275 | .expect("signature bytes produced by keystore should be right length");
276 |
277 | ::nimbus_seal(signature)
278 | }
279 |
280 | #[async_trait::async_trait]
281 | impl ParachainConsensus
282 | for NimbusConsensus
283 | where
284 | B: BlockT,
285 | BI: BlockImport + Send + Sync + 'static,
286 | BE: Backend + Send + Sync + 'static,
287 | PF: Environment + Send + Sync + 'static,
288 | PF::Proposer: Proposer<
289 | B,
290 | Transaction = BI::Transaction,
291 | ProofRecording = EnableProofRecording,
292 | Proof = ::Proof,
293 | >,
294 | ParaClient: ProvideRuntimeApi + Send + Sync + 'static,
295 | ParaClient::Api: NimbusApi,
296 | CIDP: CreateInherentDataProviders + 'static,
297 | DP: DigestsProvider::Hash> + 'static + Send + Sync,
298 | {
299 | async fn produce_candidate(
300 | &mut self,
301 | parent: &B::Header,
302 | relay_parent: PHash,
303 | validation_data: &PersistedValidationData,
304 | ) -> Option> {
305 | // Determine if runtime change
306 | let runtime_upgraded = if *parent.number() > sp_runtime::traits::Zero::zero() {
307 | use sp_api::Core as _;
308 | let previous_runtime_version: sp_api::RuntimeVersion = self
309 | .parachain_client
310 | .runtime_api()
311 | .version(parent.hash())
312 | .ok()?;
313 | let runtime_version: sp_api::RuntimeVersion = self
314 | .parachain_client
315 | .runtime_api()
316 | .version(parent.hash())
317 | .ok()?;
318 |
319 | previous_runtime_version != runtime_version
320 | } else {
321 | false
322 | };
323 |
324 | let maybe_key = if self.skip_prediction || runtime_upgraded {
325 | first_available_key(&*self.keystore)
326 | } else {
327 | first_eligible_key::(
328 | self.parachain_client.clone(),
329 | &*self.keystore,
330 | parent,
331 | validation_data.relay_parent_number,
332 | )
333 | };
334 |
335 | // If there are no eligible keys, print the log, and exit early.
336 | let type_public_pair = match maybe_key {
337 | Some(p) => p,
338 | None => {
339 | return None;
340 | }
341 | };
342 |
343 | let proposer_future = self.proposer_factory.lock().init(&parent);
344 |
345 | let proposer = proposer_future
346 | .await
347 | .map_err(|e| error!(target: LOG_TARGET, error = ?e, "Could not create proposer."))
348 | .ok()?;
349 |
350 | let nimbus_id = NimbusId::from_slice(&type_public_pair)
351 | .map_err(
352 | |e| error!(target: LOG_TARGET, error = ?e, "Invalid Nimbus ID (wrong length)."),
353 | )
354 | .ok()?;
355 |
356 | let inherent_data = self
357 | .inherent_data(
358 | parent.hash(),
359 | &validation_data,
360 | relay_parent,
361 | nimbus_id.clone(),
362 | )
363 | .await?;
364 |
365 | let mut logs = vec![CompatibleDigestItem::nimbus_pre_digest(nimbus_id.clone())];
366 | logs.extend(
367 | self.additional_digests_provider
368 | .provide_digests(nimbus_id, parent.hash()),
369 | );
370 | let inherent_digests = sp_runtime::generic::Digest { logs };
371 |
372 | let Proposal {
373 | block,
374 | storage_changes,
375 | proof,
376 | } = proposer
377 | .propose(
378 | inherent_data,
379 | inherent_digests,
380 | //TODO: Fix this.
381 | Duration::from_millis(500),
382 | // Set the block limit to 50% of the maximum PoV size.
383 | //
384 | // TODO: If we got benchmarking that includes that encapsulates the proof size,
385 | // we should be able to use the maximum pov size.
386 | Some((validation_data.max_pov_size / 2) as usize),
387 | )
388 | .await
389 | .map_err(|e| error!(target: LOG_TARGET, error = ?e, "Proposing failed."))
390 | .ok()?;
391 |
392 | let (header, extrinsics) = block.clone().deconstruct();
393 |
394 | let sig_digest = seal_header::(
395 | &header,
396 | &*self.keystore,
397 | &type_public_pair,
398 | &sr25519::CRYPTO_ID,
399 | );
400 |
401 | let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header.clone());
402 | block_import_params.post_digests.push(sig_digest.clone());
403 | block_import_params.body = Some(extrinsics.clone());
404 | block_import_params.state_action = sc_consensus::StateAction::ApplyChanges(
405 | sc_consensus::StorageChanges::Changes(storage_changes),
406 | );
407 |
408 | // Print the same log line as slots (aura and babe)
409 | info!(
410 | "🔖 Sealed block for proposal at {}. Hash now {:?}, previously {:?}.",
411 | *header.number(),
412 | block_import_params.post_hash(),
413 | header.hash(),
414 | );
415 |
416 | if let Err(err) = self
417 | .block_import
418 | .lock()
419 | .await
420 | .import_block(block_import_params)
421 | .await
422 | {
423 | error!(
424 | target: LOG_TARGET,
425 | at = ?parent.hash(),
426 | error = ?err,
427 | "Error importing built block.",
428 | );
429 |
430 | return None;
431 | }
432 |
433 | // Compute info about the block after the digest is added
434 | let mut post_header = header.clone();
435 | post_header.digest_mut().logs.push(sig_digest.clone());
436 | let post_block = B::new(post_header, extrinsics);
437 |
438 | // Returning the block WITH the seal for distribution around the network.
439 | Some(ParachainCandidate {
440 | block: post_block,
441 | proof,
442 | })
443 | }
444 | }
445 |
446 | /// Paramaters of [`build_relay_chain_consensus`].
447 | ///
448 | /// I briefly tried the async keystore approach, but decided to go sync so I can copy
449 | /// code from Aura. Maybe after it is working, Jeremy can help me go async.
450 | pub struct BuildNimbusConsensusParams {
451 | pub para_id: ParaId,
452 | pub proposer_factory: PF,
453 | pub create_inherent_data_providers: CIDP,
454 | pub block_import: BI,
455 | pub backend: Arc,
456 | pub parachain_client: Arc,
457 | pub keystore: KeystorePtr,
458 | pub skip_prediction: bool,
459 | pub additional_digests_provider: DP,
460 | }
461 |
--------------------------------------------------------------------------------
/nimbus-consensus/src/manual_seal.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | use cumulus_primitives_parachain_inherent::{
18 | ParachainInherentData, INHERENT_IDENTIFIER as PARACHAIN_INHERENT_IDENTIFIER,
19 | };
20 | use nimbus_primitives::{
21 | CompatibleDigestItem, DigestsProvider, NimbusApi, NimbusId, NIMBUS_ENGINE_ID,
22 | };
23 | use sc_consensus::BlockImportParams;
24 | use sc_consensus_manual_seal::{ConsensusDataProvider, Error};
25 | use sp_api::{BlockT, HeaderT, ProvideRuntimeApi, TransactionFor};
26 | use sp_application_crypto::ByteArray;
27 | use sp_core::sr25519;
28 | use sp_inherents::InherentData;
29 | use sp_keystore::KeystorePtr;
30 | use sp_runtime::{Digest, DigestItem};
31 | use std::{marker::PhantomData, sync::Arc};
32 |
33 | /// Provides nimbus-compatible pre-runtime digests for use with manual seal consensus
34 | pub struct NimbusManualSealConsensusDataProvider {
35 | /// Shared reference to keystore
36 | pub keystore: KeystorePtr,
37 |
38 | /// Shared reference to the client
39 | pub client: Arc,
40 | // Could have a skip_prediction field here if it becomes desireable
41 | /// Additional digests provider
42 | pub additional_digests_provider: DP,
43 |
44 | pub _phantom: PhantomData
,
45 | }
46 |
47 | impl ConsensusDataProvider for NimbusManualSealConsensusDataProvider
48 | where
49 | B: BlockT,
50 | C: ProvideRuntimeApi + Send + Sync,
51 | C::Api: NimbusApi,
52 | DP: DigestsProvider::Hash> + Send + Sync,
53 | P: Send + Sync,
54 | {
55 | type Transaction = TransactionFor;
56 | type Proof = P;
57 |
58 | fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result {
59 | // Retrieve the relay chain block number to use as the slot number from the parachain inherent
60 | let slot_number = inherents
61 | .get_data::(&PARACHAIN_INHERENT_IDENTIFIER)
62 | .expect("Parachain inherent should decode correctly")
63 | .expect("Parachain inherent should be present because we are mocking it")
64 | .validation_data
65 | .relay_parent_number;
66 |
67 | // Fetch first eligible key from keystore
68 | let maybe_key = crate::first_eligible_key::(
69 | self.client.clone(),
70 | &*self.keystore,
71 | parent,
72 | // For now we author all blocks in slot zero, which is consistent with how we are
73 | // mocking the relay chain height which the runtime uses for slot beacon.
74 | // This should improve. See https://github.com/PureStake/nimbus/issues/3
75 | slot_number,
76 | );
77 |
78 | // If we aren't eligible, return an appropriate error
79 | match maybe_key {
80 | Some(key) => {
81 | let nimbus_id = NimbusId::from_slice(&key).map_err(|_| {
82 | Error::StringError(String::from("invalid nimbus id (wrong length)"))
83 | })?;
84 | let mut logs = vec![CompatibleDigestItem::nimbus_pre_digest(nimbus_id.clone())];
85 | logs.extend(
86 | self.additional_digests_provider
87 | .provide_digests(nimbus_id, parent.hash()),
88 | );
89 | Ok(Digest { logs })
90 | }
91 | None => Err(Error::StringError(String::from(
92 | "no nimbus keys available to manual seal",
93 | ))),
94 | }
95 | }
96 |
97 | // This is where we actually sign with the nimbus key and attach the seal
98 | fn append_block_import(
99 | &self,
100 | _parent: &B::Header,
101 | params: &mut BlockImportParams,
102 | _inherents: &InherentData,
103 | _proof: Self::Proof,
104 | ) -> Result<(), Error> {
105 | // We have to reconstruct the type-public pair which is only communicated through the pre-runtime digest
106 | let claimed_author = params
107 | .header
108 | .digest()
109 | .logs
110 | .iter()
111 | .find_map(|digest| {
112 | match *digest {
113 | // We do not support the older author inherent in manual seal
114 | DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => {
115 | Some(author_id.clone())
116 | }
117 | _ => None,
118 | }
119 | })
120 | .expect("Expected one pre-runtime digest that contains author id bytes");
121 |
122 | let nimbus_public = NimbusId::from_slice(&claimed_author)
123 | .map_err(|_| Error::StringError(String::from("invalid nimbus id (wrong length)")))?;
124 |
125 | let sig_digest = crate::seal_header::(
126 | ¶ms.header,
127 | &*self.keystore,
128 | &nimbus_public.to_raw_vec(),
129 | &sr25519::CRYPTO_ID,
130 | );
131 |
132 | params.post_digests.push(sig_digest);
133 |
134 | Ok(())
135 | }
136 | }
137 |
--------------------------------------------------------------------------------
/nimbus-primitives/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "nimbus-primitives"
3 | authors = [ "PureStake" ]
4 | description = "Primitive types and traits used in the Nimbus consensus framework"
5 | edition = "2021"
6 | version = "0.9.0"
7 |
8 | [dependencies]
9 | async-trait = { version = "0.1", optional = true }
10 | parity-scale-codec = { version = "3.0.0", default-features = false, features = [ "derive" ] }
11 |
12 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
13 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
14 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] }
15 | sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
16 | sp-application-crypto = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
17 | sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
18 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
19 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
20 |
21 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" }
22 |
23 | [features]
24 | default = [ "std" ]
25 | std = [
26 | "async-trait",
27 | "frame-support/std",
28 | "frame-system/std",
29 | "parity-scale-codec/std",
30 | "scale-info/std",
31 | "sp-api/std",
32 | "sp-application-crypto/std",
33 | "sp-inherents/std",
34 | "sp-runtime/std",
35 | "sp-std/std",
36 | ]
37 |
38 | runtime-benchmarks = [ "frame-benchmarking", "sp-runtime/runtime-benchmarks" ]
39 |
40 | try-runtime = [ "frame-support/try-runtime" ]
41 |
--------------------------------------------------------------------------------
/nimbus-primitives/src/digests.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | //! A convenient interface over the digests used in nimbus.
18 | //!
19 | //! Currently Nimbus has two digests;
20 | //! 1. A consensus digest that contains the block author identity
21 | //! This information is copied from the author inehrent.
22 | //! This may be replaced with a pre-runtime digest in the future.
23 | //! 2. A seal digest that contains a signature over the rest of the
24 | //! block including the first digest.
25 |
26 | use crate::{NimbusId, NimbusSignature, NIMBUS_ENGINE_ID};
27 | use parity_scale_codec::Encode;
28 | use sp_runtime::generic::DigestItem;
29 |
30 | /// A digest item which is usable with aura consensus.
31 | pub trait CompatibleDigestItem: Sized {
32 | /// Construct a pre-runtime digest from the given AuthorId
33 | fn nimbus_pre_digest(author: NimbusId) -> Self;
34 |
35 | /// If this item is a nimbus pre-runtime digest, return the author
36 | fn as_nimbus_pre_digest(&self) -> Option;
37 |
38 | /// Construct a seal digest item from the given signature
39 | fn nimbus_seal(signature: NimbusSignature) -> Self;
40 |
41 | /// If this item is a nimbus seal, return the signature.
42 | fn as_nimbus_seal(&self) -> Option;
43 |
44 | /// This will be deprecated in the future
45 | /// Construct a consensus digest from the given AuthorId
46 | fn nimbus_consensus_digest(author: NimbusId) -> Self;
47 |
48 | /// This will be deprecated in the future
49 | /// If this item is a nimbus consensus digest, return the author
50 | fn as_nimbus_consensus_digest(&self) -> Option;
51 | }
52 |
53 | impl CompatibleDigestItem for DigestItem {
54 | fn nimbus_pre_digest(author: NimbusId) -> Self {
55 | DigestItem::PreRuntime(NIMBUS_ENGINE_ID, author.encode())
56 | }
57 |
58 | fn as_nimbus_pre_digest(&self) -> Option {
59 | self.pre_runtime_try_to(&NIMBUS_ENGINE_ID)
60 | }
61 |
62 | fn nimbus_seal(signature: NimbusSignature) -> Self {
63 | DigestItem::Seal(NIMBUS_ENGINE_ID, signature.encode())
64 | }
65 |
66 | fn as_nimbus_seal(&self) -> Option {
67 | self.seal_try_to(&NIMBUS_ENGINE_ID)
68 | }
69 |
70 | // Remove this once deprecated
71 | fn nimbus_consensus_digest(author: NimbusId) -> Self {
72 | DigestItem::Consensus(NIMBUS_ENGINE_ID, author.encode())
73 | }
74 |
75 | // Remove this once deprecated. I don't think it is used anyway.
76 | // Notice that it calls the pre_runtime helper function.
77 | fn as_nimbus_consensus_digest(&self) -> Option {
78 | self.pre_runtime_try_to(&NIMBUS_ENGINE_ID)
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/nimbus-primitives/src/inherents.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | use sp_inherents::{InherentData, InherentIdentifier};
18 |
19 | /// The InherentIdentifier for nimbus's author inherent
20 | pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"author__";
21 |
22 | /// A bare minimum inherent data provider that provides no real data.
23 | /// The inherent is simply used as a way to kick off some computation
24 | /// until https://github.com/paritytech/substrate/pull/10128 lands.
25 | pub struct InherentDataProvider;
26 |
27 | #[cfg(feature = "std")]
28 | #[async_trait::async_trait]
29 | impl sp_inherents::InherentDataProvider for InherentDataProvider {
30 | async fn provide_inherent_data(
31 | &self,
32 | inherent_data: &mut InherentData,
33 | ) -> Result<(), sp_inherents::Error> {
34 | inherent_data.put_data(INHERENT_IDENTIFIER, &())
35 | }
36 |
37 | async fn try_handle_error(
38 | &self,
39 | identifier: &InherentIdentifier,
40 | _error: &[u8],
41 | ) -> Option> {
42 | // Dont' process modules from other inherents
43 | if *identifier != INHERENT_IDENTIFIER {
44 | return None;
45 | }
46 |
47 | // All errors with the author inehrent are fatal
48 | Some(Err(sp_inherents::Error::Application(Box::from(
49 | String::from("Error processing dummy nimbus inherent"),
50 | ))))
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/nimbus-primitives/src/lib.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | //! Nimbus Consensus Primitives
18 | //!
19 | //! Primitive types and traits for working with the Nimbus consensus framework.
20 | //! This code can be built to no_std for use in the runtime
21 |
22 | #![cfg_attr(not(feature = "std"), no_std)]
23 |
24 | use sp_application_crypto::KeyTypeId;
25 | use sp_runtime::generic::DigestItem;
26 | use sp_runtime::traits::BlockNumberProvider;
27 | use sp_runtime::ConsensusEngineId;
28 | #[cfg(feature = "runtime-benchmarks")]
29 | use sp_std::vec::{self, Vec};
30 |
31 | pub mod digests;
32 | mod inherents;
33 |
34 | pub use digests::CompatibleDigestItem;
35 |
36 | pub use inherents::{InherentDataProvider, INHERENT_IDENTIFIER};
37 |
38 | pub trait DigestsProvider {
39 | type Digests: IntoIterator;
40 | fn provide_digests(&self, id: Id, parent: BlockHash) -> Self::Digests;
41 | }
42 |
43 | impl DigestsProvider for () {
44 | type Digests = [DigestItem; 0];
45 | fn provide_digests(&self, _id: Id, _parent: BlockHash) -> Self::Digests {
46 | []
47 | }
48 | }
49 |
50 | impl DigestsProvider for F
51 | where
52 | F: Fn(Id, BlockHash) -> D,
53 | D: IntoIterator,
54 | {
55 | type Digests = D;
56 |
57 | fn provide_digests(&self, id: Id, parent: BlockHash) -> Self::Digests {
58 | (*self)(id, parent)
59 | }
60 | }
61 |
62 | /// The given account ID is the author of the current block.
63 | pub trait EventHandler {
64 | //TODO should we be tking ownership here?
65 | fn note_author(author: Author);
66 | }
67 |
68 | impl EventHandler for () {
69 | fn note_author(_author: T) {}
70 | }
71 |
72 | /// A mechanism for determining the current slot.
73 | /// For now we use u32 as the slot type everywhere. Let's see how long we can get away with that.
74 | pub trait SlotBeacon {
75 | fn slot() -> u32;
76 | #[cfg(feature = "runtime-benchmarks")]
77 | fn set_slot(_slot: u32) {}
78 | }
79 |
80 | /// Anything that can provide a block height can be used as a slot beacon. This could be
81 | /// used in at least two realistic ways.
82 | /// 1. Use your own chain's height as the slot number
83 | /// 2. If you're a parachain, use the relay chain's height as the slot number.
84 | impl> SlotBeacon for T {
85 | fn slot() -> u32 {
86 | Self::current_block_number()
87 | }
88 | #[cfg(feature = "runtime-benchmarks")]
89 | fn set_slot(slot: u32) {
90 | Self::set_block_number(slot);
91 | }
92 | }
93 |
94 | /// PLANNED: A SlotBeacon that starts a new slot based on the timestamp. Behaviorally, this is
95 | /// similar to what aura, babe and company do. Implementation-wise it is different because it
96 | /// depends on the timestamp pallet for its notion of time.
97 | pub struct IntervalBeacon;
98 |
99 | impl SlotBeacon for IntervalBeacon {
100 | fn slot() -> u32 {
101 | todo!()
102 | }
103 | }
104 |
105 | /// Trait to determine whether this author is eligible to author in this slot.
106 | /// This is the primary trait your nimbus filter needs to implement.
107 | ///
108 | /// This is the proposition-logic variant.
109 | /// That is to say the caller specifies an author an author and the implementation
110 | /// replies whether that author is eligible. This is useful in many cases and is
111 | /// particularly useful when the active set is unbounded.
112 | /// There may be another variant where the caller only supplies a slot and the
113 | /// implementation replies with a complete set of eligible authors.
114 | pub trait CanAuthor {
115 | #[cfg(feature = "try-runtime")]
116 | // With `try-runtime` the local author should always be able to author a block.
117 | fn can_author(author: &AuthorId, slot: &u32) -> bool {
118 | true
119 | }
120 | #[cfg(not(feature = "try-runtime"))]
121 | fn can_author(author: &AuthorId, slot: &u32) -> bool;
122 | #[cfg(feature = "runtime-benchmarks")]
123 | fn get_authors(_slot: &u32) -> Vec {
124 | vec![]
125 | }
126 | #[cfg(feature = "runtime-benchmarks")]
127 | fn set_eligible_author(_slot: &u32) {}
128 | }
129 | /// Default implementation where anyone can author.
130 | ///
131 | /// This is identical to Cumulus's RelayChainConsensus
132 | impl CanAuthor for () {
133 | fn can_author(_: &T, _: &u32) -> bool {
134 | true
135 | }
136 | }
137 |
138 | /// A Trait to lookup runtime AccountIds from AuthorIds (probably NimbusIds)
139 | /// The trait is generic over the AccountId, becuase different runtimes use
140 | /// different notions of AccoutId. It is also generic over the AuthorId to
141 | /// support the usecase where the author inherent is used for beneficiary info
142 | /// and contains an AccountId directly.
143 | pub trait AccountLookup {
144 | fn lookup_account(author: &NimbusId) -> Option;
145 | }
146 |
147 | // A dummy impl used in simple tests
148 | impl AccountLookup for () {
149 | fn lookup_account(_: &NimbusId) -> Option {
150 | None
151 | }
152 | }
153 |
154 | /// The ConsensusEngineId for nimbus consensus
155 | /// this same identifier will be used regardless of the filters installed
156 | pub const NIMBUS_ENGINE_ID: ConsensusEngineId = *b"nmbs";
157 |
158 | /// The KeyTypeId used in the Nimbus consensus framework regardles of wat filters are in place.
159 | /// If this gets well adopted, we could move this definition to sp_core to avoid conflicts.
160 | pub const NIMBUS_KEY_ID: KeyTypeId = KeyTypeId(*b"nmbs");
161 |
162 | // The strongly-typed crypto wrappers to be used by Nimbus in the keystore
163 | mod nimbus_crypto {
164 | use sp_application_crypto::{app_crypto, sr25519};
165 | app_crypto!(sr25519, crate::NIMBUS_KEY_ID);
166 | }
167 |
168 | /// A nimbus author identifier (A public key).
169 | pub type NimbusId = nimbus_crypto::Public;
170 |
171 | /// A nimbus signature.
172 | pub type NimbusSignature = nimbus_crypto::Signature;
173 |
174 | sp_application_crypto::with_pair! {
175 | /// A nimbus keypair
176 | pub type NimbusPair = nimbus_crypto::Pair;
177 | }
178 |
179 | sp_api::decl_runtime_apis! {
180 | /// The runtime api used to predict whether a Nimbus author will be eligible in the given slot
181 | pub trait NimbusApi {
182 | fn can_author(author: NimbusId, relay_parent: u32, parent_header: &Block::Header) -> bool;
183 | }
184 | }
185 |
--------------------------------------------------------------------------------
/pallets/aura-style-filter/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-aura-style-filter"
3 | authors = [ "PureStake" ]
4 | description = "The Aura (authority round) consensus engine implemented in the Nimbus framework"
5 | edition = "2021"
6 | version = "0.9.0"
7 |
8 | [dependencies]
9 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
10 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
11 | nimbus-primitives = { path = "../../nimbus-primitives", default-features = false }
12 | parity-scale-codec = { version = "3.0.0", default-features = false, features = [ "derive" ] }
13 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] }
14 | serde = { version = "1.0.101", optional = true, features = [ "derive" ] }
15 | sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
16 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
17 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
18 |
19 | [features]
20 | default = [ "std" ]
21 | std = [
22 | "frame-support/std",
23 | "frame-system/std",
24 | "nimbus-primitives/std",
25 | "parity-scale-codec/std",
26 | "scale-info/std",
27 | "serde",
28 | "sp-core/std",
29 | "sp-runtime/std",
30 | "sp-std/std",
31 | ]
32 |
33 | try-runtime = [ "frame-support/try-runtime", "nimbus-primitives/try-runtime" ]
34 |
--------------------------------------------------------------------------------
/pallets/aura-style-filter/src/lib.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | //! A Nimbus filter for the AuRa consensus algorithm. This filter does not use any entropy, it
18 | //! simply rotates authors in order. A single author is eligible at each slot.
19 | //!
20 | //! In the Substrate ecosystem, this algorithm is typically known as AuRa (authority round).
21 | //! There is a well known implementation in the main Substrate repository and published at
22 | //! https://crates.io/crates/sc-consensus-aura. There are two primary differences between
23 | //! the approaches:
24 | //!
25 | //! 1. This filter leverages all the heavy lifting of the Nimbus framework and consequently is
26 | //! capable of expressing Aura in < 100 lines of code.
27 | //!
28 | //! Whereas sc-consensus-aura includes the entire consensus stack including block signing, digest
29 | //! formats, and slot prediction. This is a lot of overhead for a sipmle round robin
30 | //! consensus that basically boils down to this function
31 | //! https://github.com/paritytech/substrate/blob/0f849efc/client/consensus/aura/src/lib.rs#L91-L106
32 | //!
33 | //! 2. The Nimbus framework places the author checking logic in the runtime which makes it relatively
34 | //! easy for relay chain validators to confirm the author is valid.
35 | //!
36 | //! Whereas sc-consensus-aura places the author checking offchain. The offchain approach is fine
37 | //! for standalone layer 1 blockchains, but not well suited for verification on the relay chain
38 | //! where validators only run a wasm blob.
39 |
40 | #![cfg_attr(not(feature = "std"), no_std)]
41 |
42 | use frame_support::pallet;
43 | pub use pallet::*;
44 |
45 | #[pallet]
46 | pub mod pallet {
47 |
48 | use frame_support::pallet_prelude::*;
49 | use sp_std::vec::Vec;
50 |
51 | //TODO Now that the CanAuthor trait takes a slot number, I don't think this even needs to be a pallet.
52 | // I think it could eb jsut a simple type.
53 | /// The Author Filter pallet
54 | #[pallet::pallet]
55 | pub struct Pallet(PhantomData);
56 |
57 | /// Configuration trait of this pallet.
58 | #[pallet::config]
59 | pub trait Config: frame_system::Config {
60 | /// A source for the complete set of potential authors.
61 | /// The starting point of the filtering.
62 | type PotentialAuthors: Get>;
63 | }
64 |
65 | // This code will be called by the author-inherent pallet to check whether the reported author
66 | // of this block is eligible at this slot. We calculate that result on demand and do not
67 | // record it instorage.
68 | impl nimbus_primitives::CanAuthor for Pallet {
69 | #[cfg(not(feature = "try-runtime"))]
70 | fn can_author(account: &T::AccountId, slot: &u32) -> bool {
71 | let active: Vec = T::PotentialAuthors::get();
72 |
73 | // This is the core Aura logic right here.
74 | let active_author = &active[*slot as usize % active.len()];
75 |
76 | account == active_author
77 | }
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/pallets/author-inherent/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-author-inherent"
3 | authors = [ "PureStake" ]
4 | description = "This pallet is the core of the in-runtime portion of Nimbus."
5 | edition = "2021"
6 | license = "GPL-3.0-only"
7 | version = "0.9.0"
8 |
9 | [dependencies]
10 | frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
11 | frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
12 | log = { version = "0.4.17", default-features = false }
13 | nimbus-primitives = { path = "../../nimbus-primitives", default-features = false }
14 | parity-scale-codec = { version = "3.0.0", default-features = false, features = [ "derive" ] }
15 | scale-info = { version = "2.0.0", default-features = false, features = [ "derive" ] }
16 | sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
17 | sp-application-crypto = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
18 | sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
19 | sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
20 | sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
21 |
22 | # Benchmarks
23 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", optional = true, default-features = false, branch = "polkadot-v0.9.43" }
24 |
25 | [dev-dependencies]
26 | frame-support-test = { git = "https://github.com/paritytech/substrate", version = "3.0.0", branch = "polkadot-v0.9.43" }
27 | sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
28 | sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.43" }
29 |
30 | [features]
31 | default = [ "std" ]
32 | std = [
33 | "frame-benchmarking/std",
34 | "frame-support/std",
35 | "frame-system/std",
36 | "log/std",
37 | "nimbus-primitives/std",
38 | "parity-scale-codec/std",
39 | "scale-info/std",
40 | "sp-api/std",
41 | "sp-application-crypto/std",
42 | "sp-inherents/std",
43 | "sp-runtime/std",
44 | "sp-std/std",
45 | ]
46 |
47 | runtime-benchmarks = [
48 | "frame-benchmarking",
49 | "nimbus-primitives/runtime-benchmarks",
50 | ]
51 |
--------------------------------------------------------------------------------
/pallets/author-inherent/src/benchmarks.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Moonbeam.
3 |
4 | // Moonbeam is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Moonbeam is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Moonbeam. If not, see .
16 |
17 | #![cfg(feature = "runtime-benchmarks")]
18 |
19 | use crate::{Call, Config, Pallet};
20 | use frame_benchmarking::benchmarks;
21 | use frame_system::RawOrigin;
22 | use nimbus_primitives::CanAuthor;
23 | use nimbus_primitives::SlotBeacon;
24 | benchmarks! {
25 | kick_off_authorship_validation {
26 | // The slot inserted needs to be higher than that already in storage
27 | T::SlotBeacon::set_slot(100);
28 | Pallet::::set_eligible_author(&T::SlotBeacon::slot());
29 | }: _(RawOrigin::None)
30 | }
31 |
--------------------------------------------------------------------------------
/pallets/author-inherent/src/exec.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | //! Block executive to be used by relay chain validators when validating parachain blocks built
18 | //! with the nimubs consensus family.
19 |
20 | use frame_support::traits::ExecuteBlock;
21 | use sp_api::{BlockT, HeaderT};
22 | // For some reason I can't get these logs to actually print
23 | use log::debug;
24 | use nimbus_primitives::{digests::CompatibleDigestItem, NimbusId, NIMBUS_ENGINE_ID};
25 | use sp_application_crypto::ByteArray;
26 | use sp_runtime::{generic::DigestItem, RuntimeAppPublic};
27 |
28 | /// Block executive to be used by relay chain validators when validating parachain blocks built
29 | /// with the nimubs consensus family.
30 | ///
31 | /// This will strip the seal digest, and confirm that it contains a valid signature
32 | /// By the block author reported in the author inherent.
33 | ///
34 | /// Essentially this contains the logic of the verifier plus the inner executive.
35 | /// TODO Degisn improvement:
36 | /// Can we share code with the verifier?
37 | /// Can this struct take a verifier as an associated type?
38 | /// Or maybe this will just get simpler in general when https://github.com/paritytech/polkadot/issues/2888 lands
39 | pub struct BlockExecutor(sp_std::marker::PhantomData<(T, I)>);
40 |
41 | impl ExecuteBlock for BlockExecutor
42 | where
43 | Block: BlockT,
44 | I: ExecuteBlock,
45 | {
46 | fn execute_block(block: Block) {
47 | let (mut header, extrinsics) = block.deconstruct();
48 |
49 | debug!(target: "executive", "In hacked Executive. Initial digests are {:?}", header.digest());
50 |
51 | // Set the seal aside for checking.
52 | let seal = header
53 | .digest_mut()
54 | .pop()
55 | .expect("Seal digest is present and is last item");
56 |
57 | debug!(target: "executive", "In hacked Executive. digests after stripping {:?}", header.digest());
58 | debug!(target: "executive", "The seal we got {:?}", seal);
59 |
60 | let signature = seal
61 | .as_nimbus_seal()
62 | .unwrap_or_else(|| panic!("HeaderUnsealed"));
63 |
64 | debug!(target: "executive", "🪲 Header hash after popping digest {:?}", header.hash());
65 |
66 | debug!(target: "executive", "🪲 Signature according to executive is {:?}", signature);
67 |
68 | // Grab the author information from the preruntime digest
69 | //TODO use the trait
70 | let claimed_author = header
71 | .digest()
72 | .logs
73 | .iter()
74 | .find_map(|digest| match *digest {
75 | DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => {
76 | Some(author_id.clone())
77 | }
78 | _ => None,
79 | })
80 | .expect("Expected pre-runtime digest that contains author id bytes");
81 |
82 | debug!(target: "executive", "🪲 Claimed Author according to executive is {:?}", claimed_author);
83 |
84 | // Verify the signature
85 | let valid_signature = NimbusId::from_slice(&claimed_author)
86 | .expect("Expected claimed author to be a valid NimbusId.")
87 | .verify(&header.hash(), &signature);
88 |
89 | debug!(target: "executive", "🪲 Valid signature? {:?}", valid_signature);
90 |
91 | if !valid_signature {
92 | panic!("Block signature invalid");
93 | }
94 |
95 | // Now that we've verified the signature, hand execution off to the inner executor
96 | // which is probably the normal frame executive.
97 | I::execute_block(Block::new(header, extrinsics));
98 | }
99 | }
100 |
--------------------------------------------------------------------------------
/pallets/author-inherent/src/lib.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2019-2022 PureStake Inc.
2 | // This file is part of Nimbus.
3 |
4 | // Nimbus is free software: you can redistribute it and/or modify
5 | // it under the terms of the GNU General Public License as published by
6 | // the Free Software Foundation, either version 3 of the License, or
7 | // (at your option) any later version.
8 |
9 | // Nimbus is distributed in the hope that it will be useful,
10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | // GNU General Public License for more details.
13 |
14 | // You should have received a copy of the GNU General Public License
15 | // along with Nimbus. If not, see .
16 |
17 | //! Pallet that allows block authors to include their identity in a block via an inherent.
18 | //! Currently the author does not _prove_ their identity, just states it. So it should not be used,
19 | //! for things like equivocation slashing that require authenticated authorship information.
20 |
21 | #![cfg_attr(not(feature = "std"), no_std)]
22 |
23 | use frame_support::traits::{FindAuthor, Get};
24 | use nimbus_primitives::{
25 | AccountLookup, CanAuthor, NimbusId, SlotBeacon, INHERENT_IDENTIFIER, NIMBUS_ENGINE_ID,
26 | };
27 | use parity_scale_codec::{Decode, Encode, FullCodec};
28 | use sp_inherents::{InherentIdentifier, IsFatalError};
29 | use sp_runtime::{ConsensusEngineId, RuntimeString};
30 |
31 | mod exec;
32 | pub use exec::BlockExecutor;
33 |
34 | pub use pallet::*;
35 |
36 | #[cfg(any(test, feature = "runtime-benchmarks"))]
37 | mod benchmarks;
38 |
39 | pub mod weights;
40 |
41 | #[cfg(test)]
42 | mod mock;
43 | #[cfg(test)]
44 | mod tests;
45 |
46 | #[frame_support::pallet]
47 | pub mod pallet {
48 | use super::*;
49 | use crate::weights::WeightInfo;
50 | use frame_support::pallet_prelude::*;
51 | use frame_system::pallet_prelude::*;
52 |
53 | /// The Author Inherent pallet. The core of the nimbus consensus framework's runtime presence.
54 | #[pallet::pallet]
55 | pub struct Pallet(PhantomData);
56 |
57 | #[pallet::config]
58 | pub trait Config: frame_system::Config {
59 | /// Type used to refer to a block author.
60 | type AuthorId: sp_std::fmt::Debug + PartialEq + Clone + FullCodec + TypeInfo + MaxEncodedLen;
61 |
62 | /// A type to convert between NimbusId and AuthorId. This is useful when you want to associate
63 | /// Block authoring behavior with an AuthorId for rewards or slashing. If you do not need to
64 | /// hold an AuthorId responsible for authoring use `()` which acts as an identity mapping.
65 | type AccountLookup: AccountLookup;
66 |
67 | /// The final word on whether the reported author can author at this height.
68 | /// This will be used when executing the inherent. This check is often stricter than the
69 | /// Preliminary check, because it can use more data.
70 | /// If the pallet that implements this trait depends on an inherent, that inherent **must**
71 | /// be included before this one.
72 | type CanAuthor: CanAuthor;
73 |
74 | /// Some way of determining the current slot for purposes of verifying the author's eligibility
75 | type SlotBeacon: SlotBeacon;
76 |
77 | type WeightInfo: WeightInfo;
78 | }
79 |
80 | impl sp_runtime::BoundToRuntimeAppPublic for Pallet {
81 | type Public = NimbusId;
82 | }
83 |
84 | #[pallet::error]
85 | pub enum Error {
86 | /// Author already set in block.
87 | AuthorAlreadySet,
88 | /// No AccountId was found to be associated with this author
89 | NoAccountId,
90 | /// The author in the inherent is not an eligible author.
91 | CannotBeAuthor,
92 | }
93 |
94 | /// Author of current block.
95 | #[pallet::storage]
96 | pub type Author = StorageValue<_, T::AuthorId, OptionQuery>;
97 |
98 | /// The highest slot that has been seen in the history of this chain.
99 | /// This is a strictly-increasing value.
100 | #[pallet::storage]
101 | pub type HighestSlotSeen = StorageValue<_, u32, ValueQuery>;
102 |
103 | #[pallet::hooks]
104 | impl Hooks> for Pallet {
105 | fn on_initialize(_: T::BlockNumber) -> Weight {
106 | // Now extract the author from the digest
107 | let digest = >::digest();
108 | let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime());
109 | if let Some(author) = Self::find_author(pre_runtime_digests) {
110 | // Store the author so we can confirm eligibility after the inherents have executed
111 | >::put(&author);
112 | }
113 |
114 | T::DbWeight::get().writes(1)
115 | }
116 | }
117 |
118 | #[pallet::call]
119 | impl Pallet {
120 | /// This inherent is a workaround to run code after the "real" inherents have executed,
121 | /// but before transactions are executed.
122 | // This should go into on_post_inherents when it is ready https://github.com/paritytech/substrate/pull/10128
123 | // TODO better weight. For now we just set a somewhat conservative fudge factor
124 | #[pallet::call_index(0)]
125 | #[pallet::weight((T::WeightInfo::kick_off_authorship_validation(), DispatchClass::Mandatory))]
126 | pub fn kick_off_authorship_validation(origin: OriginFor) -> DispatchResultWithPostInfo {
127 | ensure_none(origin)?;
128 |
129 | // First check that the slot number is valid (greater than the previous highest)
130 | let slot = T::SlotBeacon::slot();
131 | assert!(
132 | slot > HighestSlotSeen::::get(),
133 | "Block invalid; Supplied slot number is not high enough"
134 | );
135 |
136 | // Now check that the author is valid in this slot
137 | assert!(
138 | T::CanAuthor::can_author(&Self::get(), &slot),
139 | "Block invalid, supplied author is not eligible."
140 | );
141 |
142 | // Once that is validated, update the stored slot number
143 | HighestSlotSeen::::put(slot);
144 |
145 | Ok(Pays::No.into())
146 | }
147 | }
148 |
149 | #[pallet::inherent]
150 | impl ProvideInherent for Pallet {
151 | type Call = Call;
152 | type Error = InherentError;
153 | const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER;
154 |
155 | fn is_inherent_required(_: &InherentData) -> Result