├── .github ├── actions │ ├── check-version-bumped │ │ └── action.yml │ └── install-rust-toolchain │ │ └── action.yml ├── dependabot.yml ├── scripts │ └── readme-cargo-toml-versions-aligned-consensus.sh └── workflows │ ├── cargo-audit.yml │ ├── check-version-bumped-consensus.yml │ ├── check-version-bumped-crypto.yml │ ├── check-version-bumped-mock.yml │ ├── check-version-bumped-rmc.yml │ ├── check-version-bumped-types.yml │ ├── ci.yml │ ├── gh-pages.yml │ ├── post-dod-checklist.yml │ ├── publish-packages.yml │ ├── push-foundation-repo.yml │ └── readme-cargo-toml-versions-aligned-consensus.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── consensus ├── Cargo.toml └── src │ ├── alerts │ ├── handler.rs │ ├── mod.rs │ └── service.rs │ ├── backup │ ├── loader.rs │ ├── mod.rs │ └── saver.rs │ ├── collection │ ├── mod.rs │ └── service.rs │ ├── config.rs │ ├── consensus │ ├── handler.rs │ ├── mod.rs │ └── service.rs │ ├── creation │ ├── collector.rs │ ├── creator.rs │ ├── mod.rs │ └── packer.rs │ ├── dag │ ├── mod.rs │ ├── reconstruction │ │ ├── dag.rs │ │ ├── mod.rs │ │ └── parents.rs │ └── validation.rs │ ├── dissemination │ ├── mod.rs │ ├── responder.rs │ └── task.rs │ ├── extension │ ├── election.rs │ ├── extender.rs │ ├── mod.rs │ └── units.rs │ ├── interface.rs │ ├── lib.rs │ ├── network │ ├── hub.rs │ ├── mod.rs │ └── unit.rs │ ├── task_queue.rs │ ├── terminator.rs │ ├── testing │ ├── alerts.rs │ ├── behind.rs │ ├── byzantine.rs │ ├── crash.rs │ ├── crash_recovery.rs │ ├── creation.rs │ ├── dag.rs │ ├── mod.rs │ └── unreliable.rs │ └── units │ ├── control_hash.rs │ ├── mod.rs │ ├── store.rs │ ├── testing.rs │ └── validator.rs ├── conventions.md ├── cov_report.sh ├── crypto ├── Cargo.toml ├── README.md └── src │ ├── lib.rs │ ├── node.rs │ └── signature.rs ├── docs ├── book.toml └── src │ ├── SUMMARY.md │ ├── aleph_bft_api.md │ ├── differences.md │ ├── how_alephbft_does_it.md │ ├── internals.md │ ├── reliable_broadcast.md │ └── what_is_aleph_bft.md ├── examples ├── blockchain │ ├── .gitignore │ ├── Cargo.toml │ ├── run.sh │ └── src │ │ ├── chain.rs │ │ ├── data.rs │ │ ├── main.rs │ │ └── network.rs └── ordering │ ├── .gitignore │ ├── Cargo.toml │ ├── run.sh │ └── src │ ├── dataio.rs │ ├── main.rs │ └── network.rs ├── gen_cov_data.sh ├── install_cov_tools.sh ├── mock ├── Cargo.toml ├── README.md └── src │ ├── crypto │ ├── keychain.rs │ ├── mod.rs │ ├── signable.rs │ ├── signature.rs │ └── wrappers.rs │ ├── dataio.rs │ ├── hasher.rs │ ├── lib.rs │ ├── network.rs │ └── spawner.rs ├── rmc ├── Cargo.toml ├── README.md └── src │ ├── handler.rs │ ├── lib.rs │ ├── scheduler.rs │ └── service.rs ├── run_local_pipeline.sh ├── rust-toolchain.toml ├── rustfmt.toml └── types ├── Cargo.toml ├── README.md └── src ├── dataio.rs ├── lib.rs ├── network.rs └── tasks.rs /.github/actions/check-version-bumped/action.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Check if version bumped 3 | description: Checks if version in Cargo.toml is bumped 4 | 5 | inputs: 6 | cargo-toml-path: 7 | description: "Path to Cargo.toml" 8 | type: string 9 | required: true 10 | 11 | runs: 12 | using: composite 13 | steps: 14 | - name: Check 15 | run: | 16 | if ! git diff HEAD origin/main -- "${{ inputs.cargo-toml-path }}" | grep -q '^+version ='; then 17 | echo "None of commits in this PR has changed version in ${{ inputs.cargo-toml-path }}!" 18 | exit 1 19 | fi 20 | shell: bash 21 | -------------------------------------------------------------------------------- /.github/actions/install-rust-toolchain/action.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Install rust toolchain 3 | description: | 4 | Parses rust-toolchain.toml file and installs rust toolchain based on its contents: 5 | * channel, e.g nightly-2022-10-30 6 | * list of targets, e.g. ["wasm32-unknown-unknown"] 7 | * list of components, e.g. ["clippy", "rustfmt"] 8 | Also, this action contains set of inputs that can override any of the above. 9 | Existence of rust-toolchain.toml file is not mandatory, yet channel is then required to be 10 | passed as an input. 11 | inputs: 12 | channel: 13 | description: | 14 | Toolchain channel. It's required only when rust-toolchain.toml file does not specify it. 15 | required: false 16 | targets: 17 | description: Optional. List of targets to install with the given channel. 18 | required: false 19 | components: 20 | description: Optional. List of cargo components to install. 21 | required: false 22 | 23 | runs: 24 | using: composite 25 | steps: 26 | # This step needs to be extracted either to docker image or to setup of self-hosted runner 27 | - name: Install rustup 28 | shell: bash 29 | run: | 30 | if ! command -v rustup &>/dev/null; then 31 | curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL \ 32 | "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y 33 | echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH 34 | fi 35 | 36 | - name: Read channel from rust-toolchain.toml 37 | id: toolchain-channel 38 | uses: SebRollen/toml-action@v1.0.2 39 | with: 40 | file: 'rust-toolchain.toml' 41 | field: 'toolchain.channel' 42 | 43 | - name: Read components from rust-toolchain.toml 44 | id: toolchain-components 45 | uses: SebRollen/toml-action@v1.0.2 46 | with: 47 | file: 'rust-toolchain.toml' 48 | field: 'toolchain.components' 49 | 50 | - name: Read targets from rust-toolchain.toml 51 | id: toolchain-targets 52 | uses: SebRollen/toml-action@v1.0.2 53 | with: 54 | file: 'rust-toolchain.toml' 55 | field: 'toolchain.targets' 56 | 57 | - name: Install rust toolchain 58 | id: install-rust-toolchain 59 | shell: bash 60 | env: 61 | CHANNEL: ${{ inputs.channel || steps.toolchain-channel.outputs.value }} 62 | run: | 63 | if [[ -z "${{ steps.toolchain-channel.outputs.value }}" ]]; then 64 | echo "Could not find value for toolchain.channel in rust-toolchain.toml!" 65 | exit 1 66 | fi 67 | rustup toolchain install ${{ env.CHANNEL }} 68 | echo "channel=${{ env.CHANNEL }}" >> $GITHUB_OUTPUT 69 | 70 | - name: Add components (optional) 71 | if: inputs.components != '' || steps.toolchain-components.outputs.value != '' 72 | shell: bash 73 | env: 74 | COMPONENTS: ${{ inputs.components || steps.toolchain-components.outputs.value }} 75 | run: | 76 | components=$(echo ${{ env.COMPONENTS }} | tr -d '[]' | sed 's/,/ /g') 77 | for component in $components; do 78 | rustup component add $component 79 | done 80 | 81 | - name: Add targets (optional) 82 | if: inputs.targets != '' || steps.toolchain-targets.outputs.value != '' 83 | shell: bash 84 | env: 85 | TARGETS: ${{ inputs.targets || steps.toolchain-targets.outputs.value }} 86 | run: | 87 | targets=$(echo ${{ env.TARGETS }} | tr -d '[]' | sed 's/,/ /g') 88 | for target in $targets; do 89 | rustup target add $target \ 90 | --toolchain ${{ steps.install-rust-toolchain.outputs.channel }} 91 | done 92 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: cargo 5 | directory: / 6 | schedule: 7 | interval: daily 8 | # UTC time 9 | time: "06:00" 10 | rebase-strategy: disabled 11 | commit-message: 12 | prefix: "A0-3951: " 13 | groups: 14 | all-rust-deps: 15 | patterns: 16 | - "*" 17 | pull-request-branch-name: 18 | separator: "-" 19 | 20 | - package-ecosystem: github-actions 21 | directory: / 22 | schedule: 23 | interval: weekly 24 | day: sunday 25 | # UTC time 26 | time: "06:15" 27 | rebase-strategy: disabled 28 | commit-message: 29 | prefix: "A0-3952: " 30 | groups: 31 | all-github-actions: 32 | patterns: 33 | - "*" 34 | pull-request-branch-name: 35 | separator: "-" 36 | reviewers: 37 | - "Marcin-Radecki" 38 | 39 | -------------------------------------------------------------------------------- /.github/scripts/readme-cargo-toml-versions-aligned-consensus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | function get_major_version() { 6 | echo "$1" | cut -d '.' -f 1 7 | } 8 | 9 | function get_minor_version() { 10 | echo "$1" | cut -d '.' -f 2 11 | } 12 | 13 | function trim_version() { 14 | grep -e "$1" "$2" | cut -d "=" -f 2 | tr -d "\"^ " 15 | } 16 | 17 | function check_versions() { 18 | if [ "$1" != "$2" ]; then 19 | echo "aleph-bft Cargo's toml $3 version $1 different than README.md's $3 version $2!" 20 | exit 1 21 | fi 22 | } 23 | 24 | cargo_toml_version=$(trim_version '^version =' "consensus/Cargo.toml") 25 | cargo_toml_major_version=$(get_major_version "${cargo_toml_version}") 26 | cargo_toml_minor_version=$(get_minor_version "${cargo_toml_version}") 27 | 28 | readme_version=$(trim_version '\s*aleph-bft =' "README.md") 29 | readme_major_version=$(get_major_version "${readme_version}") 30 | readme_minor_version=$(get_minor_version "${readme_version}") 31 | 32 | check_versions "${cargo_toml_major_version}" "${readme_major_version}" "major" 33 | check_versions "${cargo_toml_minor_version}" "${readme_minor_version}" "minor" 34 | echo "Versions from README and Cargo.toml match." 35 | -------------------------------------------------------------------------------- /.github/workflows/cargo-audit.yml: -------------------------------------------------------------------------------- 1 | name: cargo-audit 2 | 3 | on: 4 | merge_group: 5 | pull_request: 6 | paths: 7 | - '**/Cargo.toml' 8 | - '**/Cargo.lock' 9 | - '.github/workflows/cargo-audit.yml' 10 | 11 | concurrency: 12 | group: ${{ github.ref }}-${{ github.workflow }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | build: 17 | name: Run `cargo-audit` 18 | runs-on: ubuntu-24.04 19 | steps: 20 | - name: Checkout source code 21 | uses: actions/checkout@v4 22 | 23 | - name: Install cargo audit 24 | shell: bash 25 | run: | 26 | cargo install cargo-audit --locked 27 | 28 | - name: Run `cargo-audit` 29 | uses: actions-rs/audit-check@v1 30 | with: 31 | token: ${{ secrets.GITHUB_TOKEN }} 32 | -------------------------------------------------------------------------------- /.github/workflows/check-version-bumped-consensus.yml: -------------------------------------------------------------------------------- 1 | name: Version bump check for code changes in the aleph-bft package 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | paths: 8 | - 'consensus/src/**' 9 | 10 | concurrency: 11 | group: ${{ github.ref }}-${{ github.workflow }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | main: 16 | runs-on: ubuntu-24.04 17 | steps: 18 | - name: GIT | Checkout 19 | uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | 23 | - name: Check if version bumped for 'consensus' 24 | uses: ./.github/actions/check-version-bumped 25 | with: 26 | cargo-toml-path: consensus/Cargo.toml 27 | -------------------------------------------------------------------------------- /.github/workflows/check-version-bumped-crypto.yml: -------------------------------------------------------------------------------- 1 | name: Version bump check for code changes in the aleph-bft-crypto package 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | paths: 8 | - 'crypto/src/**' 9 | 10 | concurrency: 11 | group: ${{ github.ref }}-${{ github.workflow }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | main: 16 | runs-on: ubuntu-24.04 17 | steps: 18 | - name: GIT | Checkout 19 | uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | 23 | - name: Check if version bumped for 'crypto' 24 | uses: ./.github/actions/check-version-bumped 25 | with: 26 | cargo-toml-path: crypto/Cargo.toml 27 | -------------------------------------------------------------------------------- /.github/workflows/check-version-bumped-mock.yml: -------------------------------------------------------------------------------- 1 | name: Version bump check for code changes in the aleph-bft-mock package 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | paths: 8 | - 'mock/src/**' 9 | 10 | concurrency: 11 | group: ${{ github.ref }}-${{ github.workflow }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | main: 16 | runs-on: ubuntu-24.04 17 | steps: 18 | - name: GIT | Checkout 19 | uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | 23 | - name: Check if version bumped for 'mock' 24 | uses: ./.github/actions/check-version-bumped 25 | with: 26 | cargo-toml-path: mock/Cargo.toml 27 | -------------------------------------------------------------------------------- /.github/workflows/check-version-bumped-rmc.yml: -------------------------------------------------------------------------------- 1 | name: Version bump check for code changes in the aleph-bft-rmc package 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | paths: 8 | - 'rmc/src/**' 9 | 10 | concurrency: 11 | group: ${{ github.ref }}-${{ github.workflow }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | main: 16 | runs-on: ubuntu-24.04 17 | steps: 18 | - name: GIT | Checkout 19 | uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | 23 | - name: Check if version bumped for 'rmc' 24 | uses: ./.github/actions/check-version-bumped 25 | with: 26 | cargo-toml-path: rmc/Cargo.toml 27 | -------------------------------------------------------------------------------- /.github/workflows/check-version-bumped-types.yml: -------------------------------------------------------------------------------- 1 | name: Version bump check for code changes in the aleph-bft-types package 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | paths: 8 | - 'types/src/**' 9 | 10 | concurrency: 11 | group: ${{ github.ref }}-${{ github.workflow }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | main: 16 | runs-on: ubuntu-24.04 17 | steps: 18 | - name: GIT | Checkout 19 | uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | 23 | - name: Check if version bumped for 'types' 24 | uses: ./.github/actions/check-version-bumped 25 | with: 26 | cargo-toml-path: types/Cargo.toml 27 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | merge_group: 5 | push: 6 | pull_request: 7 | 8 | concurrency: 9 | group: ${{ github.ref }}-${{ github.workflow }} 10 | cancel-in-progress: true 11 | 12 | env: 13 | CARGO_TERM_COLOR: always 14 | 15 | defaults: 16 | run: 17 | shell: bash 18 | 19 | jobs: 20 | pr: 21 | name: pull_request 22 | if: "github.event_name == 'pull_request'" 23 | runs-on: ubuntu-24.04 24 | needs: lint 25 | steps: 26 | - name: checkout the source code 27 | uses: actions/checkout@v4 28 | - name: install rustup 29 | uses: ./.github/actions/install-rust-toolchain 30 | - name: check 31 | uses: actions-rs/cargo@v1 32 | with: 33 | command: check 34 | - name: test 35 | uses: actions-rs/cargo@v1 36 | with: 37 | command: test 38 | args: '--lib' 39 | master: 40 | name: push 41 | if: "github.event_name == 'push'" 42 | runs-on: ubuntu-24.04 43 | needs: lint 44 | steps: 45 | - name: checkout the source code 46 | uses: actions/checkout@v4 47 | - name: install rustup 48 | uses: ./.github/actions/install-rust-toolchain 49 | - name: check 50 | uses: actions-rs/cargo@v1 51 | with: 52 | command: check 53 | - name: test 54 | uses: actions-rs/cargo@v1 55 | with: 56 | command: test 57 | args: '--lib' 58 | lint: 59 | name: lint 60 | runs-on: ubuntu-24.04 61 | steps: 62 | - name: checkout the source code 63 | uses: actions/checkout@v4 64 | - name: install rustup 65 | uses: ./.github/actions/install-rust-toolchain 66 | with: 67 | components: rustfmt, clippy 68 | - name: fmt 69 | uses: actions-rs/cargo@v1 70 | with: 71 | command: fmt 72 | args: --all -- --check 73 | - name: clippy 74 | uses: actions-rs/cargo@v1 75 | with: 76 | command: clippy 77 | args: --all-targets --all-features -- -D warnings 78 | -------------------------------------------------------------------------------- /.github/workflows/gh-pages.yml: -------------------------------------------------------------------------------- 1 | name: github pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - 'docs/**' 9 | - '.github/workflows/gh-pages.yml' 10 | 11 | jobs: 12 | deploy: 13 | runs-on: ubuntu-24.04 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Setup mdBook 17 | uses: peaceiris/actions-mdbook@v2 18 | with: 19 | mdbook-version: '0.4.8' 20 | - name: Build the book 21 | working-directory: ./docs 22 | run: mdbook build 23 | - name: Deploy 24 | uses: peaceiris/actions-gh-pages@v4 25 | if: github.ref == 'refs/heads/main' 26 | with: 27 | github_token: ${{ secrets.GITHUB_TOKEN }} 28 | publish_dir: ./docs/book 29 | -------------------------------------------------------------------------------- /.github/workflows/post-dod-checklist.yml: -------------------------------------------------------------------------------- 1 | name: Post DoD checklist 2 | on: [pull_request_target] 3 | jobs: 4 | run: 5 | runs-on: ubuntu-24.04 6 | steps: 7 | - uses: wow-actions/auto-comment@v1 8 | with: 9 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 10 | pullRequestOpened: | 11 | Please make sure the following happened 12 | - [ ] Appropriate tests created 13 | - [ ] Infrastructure updated accordingly 14 | - [ ] Updated existing documentation 15 | - [ ] New documentation created 16 | - [ ] Version bumped if breaking changes 17 | -------------------------------------------------------------------------------- /.github/workflows/publish-packages.yml: -------------------------------------------------------------------------------- 1 | name: Publish to crates.io 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | publish: 11 | environment: Autobump version 12 | runs-on: ubuntu-24.04 13 | if: ${{ github.repository == 'Cardinal-Cryptography/AlephBFT'}} 14 | steps: 15 | - name: Publish 16 | if: ${{ github.event_name == 'push' }} 17 | run: 18 | echo "Publishing crates." 19 | 20 | - name: Dry-ryn 21 | if: ${{ github.event_name != 'push' }} 22 | run: 23 | echo "Dry-run - not publishing crates." 24 | 25 | - uses: actions/checkout@v4 26 | 27 | - uses: ./.github/actions/install-rust-toolchain 28 | 29 | - uses: katyo/publish-crates@v2 30 | with: 31 | registry-token: ${{ secrets.CRATES_IO_TOKEN }} 32 | publish-delay: 30000 33 | dry-run: ${{ github.event_name != 'push' }} 34 | -------------------------------------------------------------------------------- /.github/workflows/push-foundation-repo.yml: -------------------------------------------------------------------------------- 1 | name: Sync Cardinal-Cryptography repo with Aleph-Zero-Foundation repo 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | sync: 10 | runs-on: ubuntu-24.04 11 | if: ${{ github.repository == 'Cardinal-Cryptography/AlephBFT'}} 12 | steps: 13 | - uses: actions/checkout@v4 14 | with: 15 | fetch-depth: 0 16 | ref: "main" 17 | token: ${{ secrets.SYNCAZF }} 18 | - name: Push to Aleph-Zero-Foundation 19 | run: git push https://x-access-token:${{ secrets.SYNCAZF }}@github.com/aleph-zero-foundation/AlephBFT.git 20 | -------------------------------------------------------------------------------- /.github/workflows/readme-cargo-toml-versions-aligned-consensus.yml: -------------------------------------------------------------------------------- 1 | name: Versions are aligned (consensus) 2 | 3 | on: 4 | merge_group: 5 | pull_request: 6 | branches: 7 | - main 8 | paths: 9 | - 'consensus/Cargo.toml' 10 | - 'README.md' 11 | - '.github/workflows/readme-cargo-toml-versions-aligned-consensus.yml' 12 | 13 | concurrency: 14 | group: ${{ github.ref }}-${{ github.workflow }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | readme-cargo-toml-versions-aligned-consensus: 19 | runs-on: ubuntu-24.04 20 | steps: 21 | - name: checkout 22 | uses: actions/checkout@v4 23 | - name: readme-cargo-toml-versions-aligned-consensus 24 | run: ./.github/scripts/readme-cargo-toml-versions-aligned-consensus.sh 25 | shell: bash 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IDEs 2 | /.idea 3 | /.vscode 4 | .*.swp 5 | 6 | # Rust 7 | .cargo/config 8 | *target 9 | **/*.rs.bk 10 | 11 | # Profiler 12 | *.profdata 13 | 14 | /docs/book 15 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | 5 | # Published packages 6 | "consensus", 7 | "types", 8 | "crypto", 9 | "rmc", 10 | "mock", 11 | 12 | # Examples 13 | "examples/ordering", 14 | "examples/blockchain", 15 | 16 | ] 17 | 18 | [profile.test] 19 | opt-level = 3 20 | -------------------------------------------------------------------------------- /consensus/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aleph-bft" 3 | version = "0.45.4" 4 | edition = "2021" 5 | authors = ["Cardinal Cryptography"] 6 | categories = ["algorithms", "data-structures", "cryptography", "database"] 7 | documentation = "https://docs.rs/?" 8 | homepage = "https://alephzero.org" 9 | repository = "https://github.com/Cardinal-Cryptography/AlephBFT" 10 | keywords = ["asynchronous", "consensus", "bft", "blockchain", "distributed-systems"] 11 | license = "Apache-2.0" 12 | readme = "../README.md" 13 | description = "AlephBFT is an asynchronous and Byzantine fault tolerant consensus protocol aimed at ordering arbitrary messages (transactions). It has been designed to continuously operate even in the harshest conditions: with no bounds on message-delivery delays and in the presence of malicious actors. This makes it an excellent fit for blockchain-related applications." 14 | 15 | [dependencies] 16 | aleph-bft-rmc = { path = "../rmc", version = "0.16" } 17 | aleph-bft-types = { path = "../types", version = "0.16" } 18 | anyhow = "1.0" 19 | async-trait = "0.1" 20 | codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } 21 | derivative = "2.2.0" 22 | futures = "0.3" 23 | futures-timer = "3.0" 24 | itertools = "0.13" 25 | log = "0.4" 26 | parking_lot = "0.12" 27 | rand = "0.8" 28 | thiserror = "2.0" 29 | 30 | [dev-dependencies] 31 | aleph-bft-mock = { path = "../mock" } 32 | env_logger = "0.11" 33 | tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread", "time"] } 34 | serial_test = "3.2.0" 35 | 36 | [features] 37 | default = ["initial_unit_collection"] 38 | initial_unit_collection = [] 39 | -------------------------------------------------------------------------------- /consensus/src/alerts/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | units::{UncheckedSignedUnit, Unit}, 3 | Data, Hasher, Index, Keychain, MultiKeychain, Multisigned, NodeIndex, PartialMultisignature, 4 | Signable, Signature, UncheckedSigned, 5 | }; 6 | use aleph_bft_rmc::Message as RmcMessage; 7 | use codec::{Decode, Encode}; 8 | use derivative::Derivative; 9 | use parking_lot::RwLock; 10 | use std::ops::Deref; 11 | 12 | mod handler; 13 | mod service; 14 | 15 | pub use handler::Handler; 16 | pub use service::{Service, IO}; 17 | 18 | pub type ForkProof = (UncheckedSignedUnit, UncheckedSignedUnit); 19 | 20 | pub type NetworkMessage = 21 | AlertMessage::Signature, ::PartialMultisignature>; 22 | 23 | #[derive(Debug, Decode, Derivative, Encode)] 24 | #[derivative(Eq, PartialEq, Hash)] 25 | pub struct Alert { 26 | sender: NodeIndex, 27 | proof: ForkProof, 28 | legit_units: Vec>, 29 | #[codec(skip)] 30 | #[derivative(PartialEq = "ignore")] 31 | #[derivative(Hash = "ignore")] 32 | hash: RwLock>, 33 | } 34 | 35 | impl Clone for Alert { 36 | fn clone(&self) -> Self { 37 | let hash = match self.hash.try_read() { 38 | None => None, 39 | Some(guard) => *guard.deref(), 40 | }; 41 | Alert { 42 | sender: self.sender, 43 | proof: self.proof.clone(), 44 | legit_units: self.legit_units.clone(), 45 | hash: RwLock::new(hash), 46 | } 47 | } 48 | } 49 | 50 | impl Alert { 51 | pub fn new( 52 | sender: NodeIndex, 53 | proof: ForkProof, 54 | legit_units: Vec>, 55 | ) -> Alert { 56 | Alert { 57 | sender, 58 | proof, 59 | legit_units, 60 | hash: RwLock::new(None), 61 | } 62 | } 63 | 64 | fn hash(&self) -> H::Hash { 65 | let hash = *self.hash.read(); 66 | match hash { 67 | Some(hash) => hash, 68 | None => { 69 | let hash = self.using_encoded(H::hash); 70 | *self.hash.write() = Some(hash); 71 | hash 72 | } 73 | } 74 | } 75 | 76 | /// Simplified forker check, should only be called for alerts that have already been checked to 77 | /// contain valid proofs. 78 | pub fn forker(&self) -> NodeIndex { 79 | self.proof.0.as_signable().creator() 80 | } 81 | 82 | pub fn included_data(&self) -> Vec { 83 | // Only legit units might end up in the DAG, we can ignore the fork proof. 84 | self.legit_units 85 | .iter() 86 | .filter_map(|uu| uu.as_signable().data().clone()) 87 | .collect() 88 | } 89 | } 90 | 91 | impl Index for Alert { 92 | fn index(&self) -> NodeIndex { 93 | self.sender 94 | } 95 | } 96 | 97 | impl Signable for Alert { 98 | type Hash = H::Hash; 99 | fn hash(&self) -> Self::Hash { 100 | self.hash() 101 | } 102 | } 103 | 104 | /// A message concerning alerts. 105 | #[derive(Clone, Eq, PartialEq, Hash, Debug, Decode, Encode)] 106 | pub enum AlertMessage { 107 | /// Alert regarding forks, signed by the person claiming misconduct. 108 | ForkAlert(UncheckedSigned, S>), 109 | /// An internal RMC message, together with the id of the sender. 110 | RmcMessage(NodeIndex, RmcMessage), 111 | /// A request by a node for a fork alert identified by the given hash. 112 | AlertRequest(NodeIndex, H::Hash), 113 | } 114 | 115 | impl AlertMessage { 116 | pub fn included_data(&self) -> Vec { 117 | match self { 118 | Self::ForkAlert(unchecked_alert) => unchecked_alert.as_signable().included_data(), 119 | Self::RmcMessage(_, _) => Vec::new(), 120 | Self::AlertRequest(_, _) => Vec::new(), 121 | } 122 | } 123 | } 124 | 125 | // Notifications being sent to consensus, so that it can learn about proven forkers and receive 126 | // legitimized units. 127 | #[derive(Clone, Eq, PartialEq, Hash, Debug, Decode, Encode)] 128 | pub enum ForkingNotification { 129 | Forker(ForkProof), 130 | Units(Vec>), 131 | } 132 | 133 | #[derive(Clone, Debug, Decode, Encode, PartialEq)] 134 | pub enum AlertData { 135 | OwnAlert(Alert), 136 | NetworkAlert(Alert), 137 | MultisignedHash(Multisigned), 138 | } 139 | -------------------------------------------------------------------------------- /consensus/src/alerts/service.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | alerts::{ 3 | handler::{Handler, RmcResponse}, 4 | Alert, AlertMessage, ForkingNotification, NetworkMessage, 5 | }, 6 | Data, Hasher, MultiKeychain, Multisigned, NodeIndex, Receiver, Recipient, Sender, Terminator, 7 | }; 8 | use aleph_bft_rmc::{DoublingDelayScheduler, Message as RmcMessage}; 9 | use futures::{FutureExt, StreamExt}; 10 | use log::{debug, error, trace, warn}; 11 | use std::time::Duration; 12 | 13 | const LOG_TARGET: &str = "AlephBFT-alerter"; 14 | type RmcService = 15 | aleph_bft_rmc::Service>>; 16 | 17 | pub struct Service { 18 | messages_for_network: Sender<(NetworkMessage, Recipient)>, 19 | messages_from_network: Receiver>, 20 | notifications_for_units: Sender>, 21 | alerts_from_units: Receiver>, 22 | node_index: NodeIndex, 23 | exiting: bool, 24 | handler: Handler, 25 | rmc_service: RmcService, 26 | } 27 | 28 | pub struct IO { 29 | pub messages_for_network: Sender<(NetworkMessage, Recipient)>, 30 | pub messages_from_network: Receiver>, 31 | pub notifications_for_units: Sender>, 32 | pub alerts_from_units: Receiver>, 33 | } 34 | 35 | impl Service { 36 | pub fn new(keychain: MK, io: IO, handler: Handler) -> Service { 37 | let IO { 38 | messages_for_network, 39 | messages_from_network, 40 | notifications_for_units, 41 | alerts_from_units, 42 | } = io; 43 | 44 | let node_index = keychain.index(); 45 | let rmc_handler = aleph_bft_rmc::Handler::new(keychain); 46 | let rmc_service = aleph_bft_rmc::Service::new( 47 | DoublingDelayScheduler::new(Duration::from_millis(500)), 48 | rmc_handler, 49 | ); 50 | 51 | Service { 52 | messages_for_network, 53 | messages_from_network, 54 | notifications_for_units, 55 | alerts_from_units, 56 | node_index, 57 | exiting: false, 58 | handler, 59 | rmc_service, 60 | } 61 | } 62 | 63 | fn rmc_message_to_network( 64 | &mut self, 65 | message: RmcMessage, 66 | ) { 67 | self.send_message_for_network( 68 | AlertMessage::RmcMessage(self.node_index, message), 69 | Recipient::Everyone, 70 | ); 71 | } 72 | 73 | fn send_notification_for_units( 74 | &mut self, 75 | notification: ForkingNotification, 76 | ) { 77 | if self 78 | .notifications_for_units 79 | .unbounded_send(notification) 80 | .is_err() 81 | { 82 | warn!( 83 | target: LOG_TARGET, 84 | "Channel with forking notifications should be open" 85 | ); 86 | self.exiting = true; 87 | } 88 | } 89 | 90 | fn send_message_for_network( 91 | &mut self, 92 | message: AlertMessage, 93 | recipient: Recipient, 94 | ) { 95 | if self 96 | .messages_for_network 97 | .unbounded_send((message, recipient)) 98 | .is_err() 99 | { 100 | warn!( 101 | target: LOG_TARGET, 102 | "Channel with notifications for network should be open" 103 | ); 104 | self.exiting = true; 105 | } 106 | } 107 | 108 | fn handle_message_from_network( 109 | &mut self, 110 | message: AlertMessage, 111 | ) { 112 | match message { 113 | AlertMessage::ForkAlert(alert) => match self.handler.on_network_alert(alert.clone()) { 114 | Ok((maybe_notification, hash)) => { 115 | if let Some(multisigned) = self.rmc_service.start_rmc(hash) { 116 | self.handle_multisigned(multisigned); 117 | } 118 | if let Some(notification) = maybe_notification { 119 | self.send_notification_for_units(notification); 120 | } 121 | } 122 | Err(error) => debug!(target: LOG_TARGET, "{}", error), 123 | }, 124 | AlertMessage::RmcMessage(sender, message) => { 125 | match self.handler.on_rmc_message(sender, message) { 126 | RmcResponse::RmcMessage(message) => { 127 | if let Some(multisigned) = self.rmc_service.process_message(message) { 128 | self.handle_multisigned(multisigned); 129 | } 130 | } 131 | RmcResponse::AlertRequest(hash, recipient) => { 132 | let message = AlertMessage::AlertRequest(self.node_index, hash); 133 | self.send_message_for_network(message, recipient); 134 | } 135 | RmcResponse::Noop => {} 136 | } 137 | } 138 | AlertMessage::AlertRequest(node, hash) => { 139 | match self.handler.on_alert_request(node, hash) { 140 | Ok((alert, recipient)) => { 141 | self.send_message_for_network(AlertMessage::ForkAlert(alert), recipient); 142 | } 143 | Err(error) => debug!(target: LOG_TARGET, "{}", error), 144 | } 145 | } 146 | } 147 | } 148 | 149 | fn handle_alert_from_consensus(&mut self, alert: Alert) { 150 | trace!(target: LOG_TARGET, "Handling alert {:?}.", alert); 151 | let (message, recipient, hash) = self.handler.on_own_alert(alert.clone()); 152 | self.send_message_for_network(message, recipient); 153 | if let Some(multisigned) = self.rmc_service.start_rmc(hash) { 154 | self.handle_multisigned(multisigned); 155 | } 156 | } 157 | 158 | fn handle_multisigned(&mut self, multisigned: Multisigned) { 159 | match self.handler.alert_confirmed(multisigned.clone()) { 160 | Ok(notification) => { 161 | self.send_notification_for_units(notification); 162 | } 163 | Err(error) => warn!(target: LOG_TARGET, "{}", error), 164 | } 165 | } 166 | 167 | pub async fn run(&mut self, mut terminator: Terminator) { 168 | loop { 169 | futures::select! { 170 | message = self.messages_from_network.next() => match message { 171 | Some(message) => self.handle_message_from_network(message), 172 | None => { 173 | error!(target: LOG_TARGET, "Message stream closed."); 174 | break; 175 | } 176 | }, 177 | alert = self.alerts_from_units.next() => match alert { 178 | Some(alert) => self.handle_alert_from_consensus(alert), 179 | None => { 180 | error!(target: LOG_TARGET, "Alert stream closed."); 181 | break; 182 | } 183 | }, 184 | message = self.rmc_service.next_message().fuse() => { 185 | self.rmc_message_to_network(message); 186 | }, 187 | _ = terminator.get_exit().fuse() => { 188 | debug!(target: LOG_TARGET, "Received exit signal."); 189 | self.exiting = true; 190 | }, 191 | } 192 | if self.exiting { 193 | debug!(target: LOG_TARGET, "Alerter decided to exit."); 194 | terminator.terminate_sync().await; 195 | break; 196 | } 197 | } 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /consensus/src/backup/mod.rs: -------------------------------------------------------------------------------- 1 | pub use loader::BackupLoader; 2 | pub use saver::BackupSaver; 3 | 4 | mod loader; 5 | mod saver; 6 | -------------------------------------------------------------------------------- /consensus/src/backup/saver.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | 3 | use crate::{ 4 | dag::DagUnit, 5 | units::{UncheckedSignedUnit, WrappedUnit}, 6 | Data, Hasher, MultiKeychain, Receiver, Sender, Terminator, 7 | }; 8 | use codec::Encode; 9 | use futures::{AsyncWrite, AsyncWriteExt, FutureExt, StreamExt}; 10 | use log::{debug, error}; 11 | 12 | const LOG_TARGET: &str = "AlephBFT-backup-saver"; 13 | 14 | /// Component responsible for saving units into backup. 15 | /// It waits for items to appear on its receivers, and writes them to backup. 16 | /// It announces a successful write through an appropriate response sender. 17 | pub struct BackupSaver { 18 | units_from_consensus: Receiver>, 19 | responses_for_consensus: Sender>, 20 | backup: Pin>, 21 | } 22 | 23 | impl BackupSaver { 24 | pub fn new( 25 | units_from_consensus: Receiver>, 26 | responses_for_consensus: Sender>, 27 | backup: W, 28 | ) -> BackupSaver { 29 | BackupSaver { 30 | units_from_consensus, 31 | responses_for_consensus, 32 | backup: Box::pin(backup), 33 | } 34 | } 35 | 36 | pub async fn save_unit(&mut self, unit: &DagUnit) -> Result<(), std::io::Error> { 37 | let unit: UncheckedSignedUnit<_, _, _> = unit.clone().unpack().into(); 38 | self.backup.write_all(&unit.encode()).await?; 39 | self.backup.flush().await 40 | } 41 | 42 | pub async fn run(&mut self, mut terminator: Terminator) { 43 | let mut terminator_exit = false; 44 | loop { 45 | futures::select! { 46 | unit = self.units_from_consensus.next() => { 47 | let item = match unit { 48 | Some(unit) => unit, 49 | None => { 50 | error!(target: LOG_TARGET, "receiver of units to save closed early"); 51 | break; 52 | }, 53 | }; 54 | if let Err(e) = self.save_unit(&item).await { 55 | error!(target: LOG_TARGET, "couldn't save item to backup: {:?}", e); 56 | break; 57 | } 58 | if self.responses_for_consensus.unbounded_send(item).is_err() { 59 | error!(target: LOG_TARGET, "couldn't respond with saved unit to consensus"); 60 | break; 61 | } 62 | }, 63 | _ = terminator.get_exit().fuse() => { 64 | debug!(target: LOG_TARGET, "backup saver received exit signal."); 65 | terminator_exit = true; 66 | } 67 | } 68 | 69 | if terminator_exit { 70 | debug!(target: LOG_TARGET, "backup saver decided to exit."); 71 | terminator.terminate_sync().await; 72 | break; 73 | } 74 | } 75 | } 76 | } 77 | 78 | #[cfg(test)] 79 | mod tests { 80 | use futures::{ 81 | channel::{mpsc, oneshot}, 82 | StreamExt, 83 | }; 84 | 85 | use aleph_bft_mock::{Data, Hasher64, Keychain, Saver}; 86 | 87 | use crate::{ 88 | backup::BackupSaver, 89 | dag::ReconstructedUnit, 90 | units::{creator_set, preunit_to_signed_unit, TestingSignedUnit}, 91 | NodeCount, Terminator, 92 | }; 93 | 94 | type TestUnit = ReconstructedUnit; 95 | type TestBackupSaver = BackupSaver; 96 | struct PrepareSaverResponse { 97 | task: F, 98 | units_for_saver: mpsc::UnboundedSender, 99 | units_from_saver: mpsc::UnboundedReceiver, 100 | exit_tx: oneshot::Sender<()>, 101 | } 102 | 103 | fn prepare_saver() -> PrepareSaverResponse { 104 | let (units_for_saver, units_from_consensus) = mpsc::unbounded(); 105 | let (units_for_consensus, units_from_saver) = mpsc::unbounded(); 106 | let (exit_tx, exit_rx) = oneshot::channel(); 107 | let backup = Saver::new(); 108 | 109 | let task = { 110 | let mut saver: TestBackupSaver = 111 | BackupSaver::new(units_from_consensus, units_for_consensus, backup); 112 | 113 | async move { 114 | saver.run(Terminator::create_root(exit_rx, "saver")).await; 115 | } 116 | }; 117 | 118 | PrepareSaverResponse { 119 | task, 120 | units_for_saver, 121 | units_from_saver, 122 | exit_tx, 123 | } 124 | } 125 | 126 | #[tokio::test] 127 | async fn test_proper_relative_responses_ordering() { 128 | let node_count = NodeCount(5); 129 | let PrepareSaverResponse { 130 | task, 131 | units_for_saver, 132 | mut units_from_saver, 133 | exit_tx, 134 | } = prepare_saver(); 135 | 136 | let handle = tokio::spawn(async { 137 | task.await; 138 | }); 139 | 140 | let creators = creator_set(node_count); 141 | let keychains: Vec<_> = node_count 142 | .into_iterator() 143 | .map(|id| Keychain::new(node_count, id)) 144 | .collect(); 145 | let units: Vec = node_count 146 | .into_iterator() 147 | .map(|id| { 148 | ReconstructedUnit::initial(preunit_to_signed_unit( 149 | creators[id.0].create_unit(0).unwrap(), 150 | 0, 151 | &keychains[id.0], 152 | )) 153 | }) 154 | .collect(); 155 | 156 | for u in units.iter() { 157 | units_for_saver.unbounded_send(u.clone()).unwrap(); 158 | } 159 | 160 | for u in units { 161 | let u_backup = units_from_saver.next().await.unwrap(); 162 | assert_eq!(u, u_backup); 163 | } 164 | 165 | exit_tx.send(()).unwrap(); 166 | handle.await.unwrap(); 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /consensus/src/collection/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::DelaySchedule, 3 | network::UnitMessageTo, 4 | units::{UncheckedSignedUnit, Validator}, 5 | Data, Hasher, Keychain, MultiKeychain, NodeIndex, Receiver, Round, Sender, Signable, Signature, 6 | UncheckedSigned, 7 | }; 8 | use codec::{Decode, Encode}; 9 | use futures::{channel::oneshot, Future}; 10 | use std::{ 11 | collections::hash_map::DefaultHasher, 12 | hash::{Hash, Hasher as _}, 13 | }; 14 | 15 | mod service; 16 | 17 | pub use service::{Collection, IO}; 18 | 19 | const LOG_TARGET: &str = "AlephBFT-collection"; 20 | 21 | /// Salt uniquely identifying an initial unit collection instance. 22 | pub type Salt = u64; 23 | 24 | fn generate_salt() -> Salt { 25 | let mut hasher = DefaultHasher::new(); 26 | std::time::Instant::now().hash(&mut hasher); 27 | hasher.finish() 28 | } 29 | 30 | /// A response to the request for the newest unit. 31 | #[derive(Clone, Eq, PartialEq, Hash, Debug, Default, Decode, Encode)] 32 | pub struct NewestUnitResponse { 33 | requester: NodeIndex, 34 | responder: NodeIndex, 35 | unit: Option>, 36 | salt: Salt, 37 | } 38 | 39 | impl Signable for NewestUnitResponse { 40 | type Hash = Vec; 41 | 42 | fn hash(&self) -> Self::Hash { 43 | self.encode() 44 | } 45 | } 46 | 47 | impl crate::Index for NewestUnitResponse { 48 | fn index(&self) -> NodeIndex { 49 | self.responder 50 | } 51 | } 52 | 53 | impl NewestUnitResponse { 54 | /// Create a newest unit response. 55 | pub fn new( 56 | requester: NodeIndex, 57 | responder: NodeIndex, 58 | unit: Option>, 59 | salt: Salt, 60 | ) -> Self { 61 | NewestUnitResponse { 62 | requester, 63 | responder, 64 | unit, 65 | salt, 66 | } 67 | } 68 | 69 | /// The data included in this message, i.e. contents of the unit if any. 70 | pub fn included_data(&self) -> Vec { 71 | match &self.unit { 72 | Some(u) => u.as_signable().included_data(), 73 | None => Vec::new(), 74 | } 75 | } 76 | } 77 | 78 | pub type CollectionResponse = UncheckedSigned< 79 | NewestUnitResponse::Signature>, 80 | ::Signature, 81 | >; 82 | 83 | #[cfg(feature = "initial_unit_collection")] 84 | pub fn initial_unit_collection<'a, H: Hasher, D: Data, MK: MultiKeychain>( 85 | keychain: &'a MK, 86 | validator: &'a Validator, 87 | messages_for_network: Sender>, 88 | starting_round_sender: oneshot::Sender>, 89 | starting_round_from_backup: Round, 90 | responses_from_network: Receiver>, 91 | request_delay: DelaySchedule, 92 | ) -> Result + 'a, ()> { 93 | let collection = Collection::new(keychain, validator); 94 | 95 | let collection = IO::new( 96 | starting_round_sender, 97 | starting_round_from_backup, 98 | responses_from_network, 99 | messages_for_network, 100 | collection, 101 | request_delay, 102 | ); 103 | Ok(collection.run()) 104 | } 105 | 106 | /// A trivial start that doesn't actually perform the initial unit collection. 107 | #[cfg(not(feature = "initial_unit_collection"))] 108 | pub fn initial_unit_collection( 109 | _keychain: &'a MK, 110 | _validator: &'a Validator, 111 | _messages_for_network: Sender>, 112 | starting_round_sender: oneshot::Sender>, 113 | starting_round_from_backup: Round, 114 | _responses_from_network: Receiver>, 115 | _request_delay: DelaySchedule, 116 | ) -> Result, ()> { 117 | if let Err(e) = starting_round_sender.send(Some(starting_round_from_backup)) { 118 | error!(target: LOG_TARGET, "Unable to send the starting round: {}", e); 119 | return Err(()); 120 | } 121 | Ok(async {}) 122 | } 123 | -------------------------------------------------------------------------------- /consensus/src/consensus/handler.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | alerts::{Alert, ForkingNotification}, 3 | collection::Salt, 4 | consensus::LOG_TARGET, 5 | dag::{Dag, DagResult, DagStatus, DagUnit, Request as ReconstructionRequest}, 6 | dissemination::{Addressed, DisseminationMessage, Responder, TaskManager, TaskManagerStatus}, 7 | extension::Ordering, 8 | units::{UncheckedSignedUnit, Unit, UnitStore, UnitStoreStatus, Validator}, 9 | Data, DelayConfig, Hasher, MultiKeychain, NodeIndex, UnitFinalizationHandler, 10 | }; 11 | use log::{debug, trace}; 12 | use std::{ 13 | cmp::max, 14 | fmt::{Display, Formatter, Result as FmtResult}, 15 | time::Duration, 16 | }; 17 | 18 | /// The main logic of the consensus, minus all the asynchronous components. 19 | pub struct Consensus 20 | where 21 | UFH: UnitFinalizationHandler, 22 | MK: MultiKeychain, 23 | { 24 | store: UnitStore>, 25 | dag: Dag, 26 | responder: Responder, 27 | ordering: Ordering, 28 | task_manager: TaskManager, 29 | } 30 | 31 | /// The status of the consensus, for logging purposes. 32 | pub struct Status { 33 | task_manager_status: TaskManagerStatus, 34 | dag_status: DagStatus, 35 | store_status: UnitStoreStatus, 36 | } 37 | 38 | impl Status { 39 | fn short_report(&self) -> String { 40 | let rounds_behind = max(self.dag_status.top_round(), self.store_status.top_round()) 41 | - self.store_status.top_round(); 42 | match rounds_behind { 43 | (0..=2) => "healthy".to_string(), 44 | (3..) => format!("behind by {rounds_behind} rounds"), 45 | } 46 | } 47 | } 48 | 49 | impl Display for Status { 50 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 51 | write!(f, "{}", self.short_report())?; 52 | write!(f, ";reconstructed DAG: {}", self.store_status)?; 53 | write!(f, ";additional information: {}", self.dag_status)?; 54 | write!(f, ";task manager: {}", self.task_manager_status)?; 55 | Ok(()) 56 | } 57 | } 58 | 59 | type AddressedDisseminationMessage = Addressed>; 60 | 61 | /// The result of some operation within the consensus, requiring either other components should get 62 | /// informed about it, or messages should be sent to the network. 63 | pub struct ConsensusResult { 64 | /// Units that should be sent for backup saving. 65 | pub units: Vec>, 66 | /// Alerts that should be sent to the alerting component. 67 | pub alerts: Vec>, 68 | /// Messages that should be sent to other committee members. 69 | pub messages: Vec>, 70 | } 71 | 72 | impl ConsensusResult { 73 | fn noop() -> Self { 74 | ConsensusResult { 75 | units: Vec::new(), 76 | alerts: Vec::new(), 77 | messages: Vec::new(), 78 | } 79 | } 80 | } 81 | 82 | impl Consensus 83 | where 84 | UFH: UnitFinalizationHandler, 85 | MK: MultiKeychain, 86 | { 87 | /// Create a new Consensus. 88 | pub fn new( 89 | keychain: MK, 90 | validator: Validator, 91 | finalization_handler: UFH, 92 | delay_config: DelayConfig, 93 | ) -> Self { 94 | let n_members = keychain.node_count(); 95 | let index = keychain.index(); 96 | Consensus { 97 | store: UnitStore::new(n_members), 98 | dag: Dag::new(validator), 99 | responder: Responder::new(keychain), 100 | ordering: Ordering::new(finalization_handler), 101 | task_manager: TaskManager::new(index, n_members, delay_config), 102 | } 103 | } 104 | 105 | fn handle_dag_result( 106 | &mut self, 107 | result: DagResult, 108 | ) -> ConsensusResult { 109 | let DagResult { 110 | units, 111 | alerts, 112 | requests, 113 | } = result; 114 | for request in requests { 115 | self.task_manager.add_request(request); 116 | } 117 | let messages = self.trigger_tasks(); 118 | ConsensusResult { 119 | units, 120 | alerts, 121 | messages, 122 | } 123 | } 124 | 125 | /// Process a unit received (usually) from the network. 126 | pub fn process_incoming_unit( 127 | &mut self, 128 | unit: UncheckedSignedUnit, 129 | ) -> ConsensusResult { 130 | let result = self.dag.add_unit(unit, &self.store); 131 | self.handle_dag_result(result) 132 | } 133 | 134 | /// Process a request received from the network. 135 | pub fn process_request( 136 | &mut self, 137 | request: ReconstructionRequest, 138 | node_id: NodeIndex, 139 | ) -> Option> { 140 | match self.responder.handle_request(request, &self.store) { 141 | Ok(response) => Some(Addressed::addressed_to(response.into(), node_id)), 142 | Err(err) => { 143 | debug!(target: LOG_TARGET, "Not answering request from node {:?}: {}.", node_id, err); 144 | None 145 | } 146 | } 147 | } 148 | 149 | /// Process a parents response. 150 | pub fn process_parents( 151 | &mut self, 152 | u_hash: ::Hash, 153 | parents: Vec>, 154 | ) -> ConsensusResult { 155 | if self.store.unit(&u_hash).is_some() { 156 | trace!(target: LOG_TARGET, "We got parents response but already imported the unit."); 157 | return ConsensusResult::noop(); 158 | } 159 | let result = self.dag.add_parents(u_hash, parents, &self.store); 160 | self.handle_dag_result(result) 161 | } 162 | 163 | /// Process a newest unit request. 164 | pub fn process_newest_unit_request( 165 | &mut self, 166 | salt: Salt, 167 | node_id: NodeIndex, 168 | ) -> AddressedDisseminationMessage { 169 | Addressed::addressed_to( 170 | self.responder 171 | .handle_newest_unit_request(node_id, salt, &self.store) 172 | .into(), 173 | node_id, 174 | ) 175 | } 176 | 177 | /// Process a forking notification. 178 | pub fn process_forking_notification( 179 | &mut self, 180 | notification: ForkingNotification, 181 | ) -> ConsensusResult { 182 | let result = self 183 | .dag 184 | .process_forking_notification(notification, &self.store); 185 | self.handle_dag_result(result) 186 | } 187 | 188 | /// What to do once a unit has been securely backed up on disk. 189 | pub fn on_unit_backup_saved( 190 | &mut self, 191 | unit: DagUnit, 192 | ) -> Option> { 193 | let unit_hash = unit.hash(); 194 | self.store.insert(unit.clone()); 195 | self.dag.finished_processing(&unit_hash); 196 | self.ordering.add_unit(unit.clone()); 197 | self.task_manager.add_unit(&unit) 198 | } 199 | 200 | /// When should `trigger_tasks` be called next. 201 | pub fn next_tick(&self) -> Duration { 202 | self.task_manager.next_tick() 203 | } 204 | 205 | /// Trigger all the ready tasks and get all the messages that should be sent now. 206 | pub fn trigger_tasks( 207 | &mut self, 208 | ) -> Vec> { 209 | self.task_manager 210 | .trigger_tasks(&self.store, self.dag.processing_units()) 211 | } 212 | 213 | /// The status of the consensus handler, for logging purposes. 214 | pub fn status(&self) -> Status { 215 | Status { 216 | dag_status: self.dag.status(), 217 | store_status: self.store.status(), 218 | task_manager_status: self.task_manager.status(), 219 | } 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /consensus/src/creation/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | units::{PreUnit, SignedUnit, Unit}, 4 | DataProvider, MultiKeychain, Receiver, Round, Sender, Terminator, 5 | }; 6 | use futures::{ 7 | channel::{ 8 | mpsc::{SendError, TrySendError}, 9 | oneshot, 10 | }, 11 | FutureExt, StreamExt, 12 | }; 13 | use futures_timer::Delay; 14 | use log::{debug, error, trace, warn}; 15 | 16 | mod collector; 17 | mod creator; 18 | mod packer; 19 | 20 | pub use creator::Creator; 21 | use packer::Packer; 22 | 23 | const LOG_TARGET: &str = "AlephBFT-creator"; 24 | 25 | enum CreatorError { 26 | OutChannelClosed(SendError), 27 | ParentsChannelClosed, 28 | } 29 | 30 | impl From> for CreatorError { 31 | fn from(e: TrySendError) -> Self { 32 | Self::OutChannelClosed(e.into_send_error()) 33 | } 34 | } 35 | 36 | pub struct IO { 37 | pub incoming_parents: Receiver, 38 | pub outgoing_units: Sender>, 39 | pub data_provider: DP, 40 | } 41 | 42 | async fn create_unit( 43 | round: Round, 44 | creator: &mut Creator, 45 | incoming_parents: &mut Receiver, 46 | ) -> Result, CreatorError> { 47 | loop { 48 | match creator.create_unit(round) { 49 | Ok(unit) => return Ok(unit), 50 | Err(err) => { 51 | trace!(target: LOG_TARGET, "Creator unable to create a new unit at round {}: {}.", round, err) 52 | } 53 | } 54 | process_unit(creator, incoming_parents).await?; 55 | } 56 | } 57 | 58 | /// Tries to process a single parent from given `incoming_parents` receiver. 59 | /// Returns error when `incoming_parents` channel is closed. 60 | async fn process_unit( 61 | creator: &mut Creator, 62 | incoming_parents: &mut Receiver, 63 | ) -> anyhow::Result<(), CreatorError> { 64 | let unit = incoming_parents 65 | .next() 66 | .await 67 | .ok_or(CreatorError::ParentsChannelClosed)?; 68 | creator.add_unit(&unit); 69 | Ok(()) 70 | } 71 | 72 | async fn keep_processing_units( 73 | creator: &mut Creator, 74 | incoming_parents: &mut Receiver, 75 | ) -> anyhow::Result<(), CreatorError> { 76 | loop { 77 | process_unit(creator, incoming_parents).await?; 78 | } 79 | } 80 | 81 | async fn keep_processing_units_until( 82 | creator: &mut Creator, 83 | incoming_parents: &mut Receiver, 84 | until: Delay, 85 | ) -> anyhow::Result<(), CreatorError> { 86 | futures::select! { 87 | result = keep_processing_units(creator, incoming_parents).fuse() => { 88 | result? 89 | }, 90 | _ = until.fuse() => { 91 | debug!(target: LOG_TARGET, "Delay passed."); 92 | }, 93 | } 94 | Ok(()) 95 | } 96 | 97 | /// A process responsible for creating new units. It receives all the units added locally to the Dag 98 | /// via the `incoming_parents` channel. It creates units according to an internal strategy respecting 99 | /// always the following constraints: if round is equal to 0, U has no parents, otherwise for a unit U of round r > 0 100 | /// - all U's parents are from round (r-1), 101 | /// - all U's parents are created by different nodes, 102 | /// - one of U's parents is the (r-1)-round unit by U's creator, 103 | /// - U has > floor(2*N/3) parents. 104 | /// - U will appear in the channel only if all U's parents appeared there before 105 | /// 106 | /// The currently implemented strategy creates the unit U according to a delay schedule and when enough 107 | /// candidates for parents are available for all the above constraints to be satisfied. 108 | /// 109 | /// We refer to the documentation https://cardinal-cryptography.github.io/AlephBFT/internals.html 110 | /// Section 5.1 for a discussion of this component. 111 | pub async fn run( 112 | conf: Config, 113 | mut io: IO, 114 | keychain: MK, 115 | mut starting_round: oneshot::Receiver>, 116 | mut terminator: Terminator, 117 | ) { 118 | futures::select! { 119 | _ = read_starting_round_and_run_creator(conf, &mut io, keychain, &mut starting_round).fuse() => 120 | debug!(target: LOG_TARGET, "Creator is about to finish."), 121 | _ = terminator.get_exit().fuse() => 122 | debug!(target: LOG_TARGET, "Received an exit signal."), 123 | } 124 | 125 | terminator.terminate_sync().await; 126 | } 127 | 128 | async fn read_starting_round_and_run_creator( 129 | conf: Config, 130 | io: &mut IO, 131 | keychain: MK, 132 | starting_round: &mut oneshot::Receiver>, 133 | ) { 134 | let maybe_round = starting_round.await; 135 | let starting_round = match maybe_round { 136 | Ok(Some(round)) => round, 137 | Ok(None) => { 138 | warn!(target: LOG_TARGET, "None starting round provided. Exiting."); 139 | return; 140 | } 141 | Err(e) => { 142 | error!(target: LOG_TARGET, "Starting round not provided: {}", e); 143 | return; 144 | } 145 | }; 146 | 147 | if let Err(err) = run_creator(conf, io, keychain, starting_round).await { 148 | match err { 149 | CreatorError::OutChannelClosed(e) => { 150 | warn!(target: LOG_TARGET, "Notification send error: {}. Exiting.", e) 151 | } 152 | CreatorError::ParentsChannelClosed => { 153 | debug!(target: LOG_TARGET, "Incoming parent channel closed, exiting.") 154 | } 155 | } 156 | } 157 | } 158 | 159 | async fn run_creator( 160 | conf: Config, 161 | io: &mut IO, 162 | keychain: MK, 163 | starting_round: Round, 164 | ) -> anyhow::Result<(), CreatorError> { 165 | let node_id = conf.node_ix(); 166 | let n_members = conf.n_members(); 167 | let create_delay = conf.delay_config().unit_creation_delay.clone(); 168 | let max_round = conf.max_round(); 169 | let session_id = conf.session_id(); 170 | let mut creator = Creator::new(node_id, n_members); 171 | let packer = Packer::new(keychain, session_id); 172 | let incoming_parents = &mut io.incoming_parents; 173 | let outgoing_units = &io.outgoing_units; 174 | let data_provider = &mut io.data_provider; 175 | 176 | debug!(target: LOG_TARGET, "Creator starting from round {}", starting_round); 177 | for round in starting_round..max_round { 178 | // Skip waiting if someone created a unit of a higher round. 179 | // In such a case at least 2/3 nodes created units from this round so we aren't skipping a 180 | // delay we should observe. 181 | let skip_delay = creator.current_round() > round; 182 | if !skip_delay { 183 | let delay = Delay::new(create_delay(round.into())); 184 | 185 | keep_processing_units_until(&mut creator, incoming_parents, delay).await?; 186 | } 187 | 188 | let preunit = create_unit(round, &mut creator, incoming_parents).await?; 189 | trace!(target: LOG_TARGET, "Created a new preunit {:?} at round {:?}.", preunit, round); 190 | let data = data_provider.get_data().await; 191 | trace!(target: LOG_TARGET, "Received data: {:?}.", data); 192 | let unit = packer.pack(preunit, data); 193 | 194 | outgoing_units.unbounded_send(unit)?; 195 | } 196 | 197 | warn!(target: LOG_TARGET, "Maximum round reached. Not creating another unit."); 198 | Ok(()) 199 | } 200 | -------------------------------------------------------------------------------- /consensus/src/creation/packer.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | units::{FullUnit, PreUnit, SignedUnit}, 3 | Data, Hasher, MultiKeychain, SessionId, Signed, 4 | }; 5 | 6 | /// The component responsible for packing Data into PreUnits, 7 | /// and signing the outcome, thus creating SignedUnits that are sent back to consensus. 8 | pub struct Packer { 9 | keychain: MK, 10 | session_id: SessionId, 11 | } 12 | 13 | impl Packer { 14 | pub fn new(keychain: MK, session_id: SessionId) -> Self { 15 | Packer { 16 | keychain, 17 | session_id, 18 | } 19 | } 20 | 21 | pub fn pack( 22 | &self, 23 | preunit: PreUnit, 24 | data: Option, 25 | ) -> SignedUnit { 26 | Signed::sign( 27 | FullUnit::new(preunit, data, self.session_id), 28 | &self.keychain, 29 | ) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /consensus/src/dissemination/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | collection::{NewestUnitResponse, Salt}, 3 | dag::Request as ReconstructionRequest, 4 | network::UnitMessage, 5 | units::UncheckedSignedUnit, 6 | Data, Hasher, NodeIndex, Recipient, Signature, UncheckedSigned, 7 | }; 8 | 9 | mod responder; 10 | mod task; 11 | 12 | pub use responder::Responder; 13 | pub use task::{Manager as TaskManager, ManagerStatus as TaskManagerStatus}; 14 | 15 | const LOG_TARGET: &str = "AlephBFT-dissemination"; 16 | 17 | /// Some form of message with the intended recipients. 18 | #[derive(Eq, PartialEq, Debug, Clone)] 19 | pub struct Addressed { 20 | message: T, 21 | recipients: Vec, 22 | } 23 | 24 | impl Addressed { 25 | /// Message with the given recipients. 26 | pub fn new(message: T, recipients: Vec) -> Self { 27 | Addressed { 28 | message, 29 | recipients, 30 | } 31 | } 32 | 33 | /// Message with the single specified recipient. 34 | pub fn addressed_to(message: T, node_id: NodeIndex) -> Self { 35 | Addressed::new(message, vec![Recipient::Node(node_id)]) 36 | } 37 | 38 | /// Message that should be broadcast. 39 | pub fn broadcast(message: T) -> Self { 40 | Addressed::new(message, vec![Recipient::Everyone]) 41 | } 42 | 43 | /// All the recipients of this message. 44 | pub fn recipients(&self) -> &Vec { 45 | &self.recipients 46 | } 47 | 48 | /// The associated message. 49 | pub fn message(&self) -> &T { 50 | &self.message 51 | } 52 | 53 | /// Convert the underlying message. Cannot be done through a `From` implementation due to it 54 | /// overriding the blanked identity `From` implementation. 55 | pub fn into>(self) -> Addressed { 56 | let Addressed { 57 | message, 58 | recipients, 59 | } = self; 60 | Addressed { 61 | message: message.into(), 62 | recipients, 63 | } 64 | } 65 | } 66 | 67 | /// Responses to requests. 68 | #[derive(Eq, PartialEq, Debug, Clone)] 69 | pub enum DisseminationResponse { 70 | /// Response to a coord request, just a single unit. 71 | Coord(UncheckedSignedUnit), 72 | /// All the parents of the specified unit. 73 | Parents(H::Hash, Vec>), 74 | /// The newest unit response for initial unit collection. 75 | NewestUnit(UncheckedSigned, S>), 76 | } 77 | 78 | /// A message that has to be passed between committee members for consensus to work. 79 | #[derive(Eq, PartialEq, Debug, Clone)] 80 | pub enum DisseminationMessage { 81 | /// Unit, either broadcast or in response to a coord request. 82 | Unit(UncheckedSignedUnit), 83 | /// Request coming from the specified node for something. 84 | Request(NodeIndex, ReconstructionRequest), 85 | /// Response to a parent request. 86 | ParentsResponse(H::Hash, Vec>), 87 | /// Initial unit collection request. 88 | NewestUnitRequest(NodeIndex, Salt), 89 | /// Response to initial unit collection. 90 | NewestUnitResponse(UncheckedSigned, S>), 91 | } 92 | 93 | impl From> 94 | for DisseminationMessage 95 | { 96 | fn from(message: UnitMessage) -> Self { 97 | use DisseminationMessage::*; 98 | match message { 99 | UnitMessage::Unit(u) => Unit(u), 100 | UnitMessage::CoordRequest(node_id, coord) => { 101 | Request(node_id, ReconstructionRequest::Coord(coord)) 102 | } 103 | UnitMessage::ParentsRequest(node_id, hash) => { 104 | Request(node_id, ReconstructionRequest::ParentsOf(hash)) 105 | } 106 | UnitMessage::ParentsResponse(h, units) => ParentsResponse(h, units), 107 | UnitMessage::NewestRequest(node_id, salt) => NewestUnitRequest(node_id, salt), 108 | UnitMessage::NewestResponse(response) => NewestUnitResponse(response), 109 | } 110 | } 111 | } 112 | 113 | impl From> 114 | for UnitMessage 115 | { 116 | fn from(message: DisseminationMessage) -> Self { 117 | use DisseminationMessage::*; 118 | match message { 119 | Unit(u) => UnitMessage::Unit(u), 120 | Request(node_id, ReconstructionRequest::Coord(coord)) => { 121 | UnitMessage::CoordRequest(node_id, coord) 122 | } 123 | Request(node_id, ReconstructionRequest::ParentsOf(hash)) => { 124 | UnitMessage::ParentsRequest(node_id, hash) 125 | } 126 | ParentsResponse(h, units) => UnitMessage::ParentsResponse(h, units), 127 | NewestUnitRequest(node_id, salt) => UnitMessage::NewestRequest(node_id, salt), 128 | NewestUnitResponse(response) => UnitMessage::NewestResponse(response), 129 | } 130 | } 131 | } 132 | 133 | impl From> 134 | for DisseminationMessage 135 | { 136 | fn from(message: DisseminationResponse) -> Self { 137 | use DisseminationMessage::*; 138 | use DisseminationResponse::*; 139 | match message { 140 | Coord(u) => Unit(u), 141 | Parents(h, units) => ParentsResponse(h, units), 142 | NewestUnit(response) => NewestUnitResponse(response), 143 | } 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /consensus/src/extension/extender.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | extension::{ 3 | election::{ElectionResult, RoundElection}, 4 | units::Units, 5 | }, 6 | units::UnitWithParents, 7 | Round, 8 | }; 9 | 10 | pub struct Extender { 11 | election: Option>, 12 | units: Units, 13 | round: Round, 14 | } 15 | 16 | impl Extender { 17 | /// Create a new extender with no units. 18 | pub fn new() -> Self { 19 | Extender { 20 | election: None, 21 | units: Units::new(), 22 | round: 0, 23 | } 24 | } 25 | 26 | fn handle_election_result(&mut self, result: ElectionResult) -> Option> { 27 | use ElectionResult::*; 28 | match result { 29 | // Wait for more voters for this election. 30 | Pending(election) => { 31 | self.election = Some(election); 32 | None 33 | } 34 | // Advance to the next round and return the ordered batch. 35 | Elected(head) => { 36 | self.round += 1; 37 | Some(self.units.remove_batch(&head)) 38 | } 39 | } 40 | } 41 | 42 | /// Add a unit to the extender. Might return several batches of ordered units as a result. 43 | pub fn add_unit(&mut self, u: U) -> Vec> { 44 | let hash = u.hash(); 45 | self.units.add_unit(u); 46 | let unit = self.units.get(&hash).expect("just added"); 47 | let mut result = Vec::new(); 48 | // If we have an ongoing election try to finish it. 49 | if let Some(election) = self.election.take() { 50 | if let Some(batch) = self.handle_election_result(election.add_voter(unit, &self.units)) 51 | { 52 | result.push(batch); 53 | } 54 | } 55 | // Try finding another election to be working on. 56 | while self.election.is_none() { 57 | match RoundElection::for_round(self.round, &self.units) { 58 | Ok(election_result) => { 59 | if let Some(batch) = self.handle_election_result(election_result) { 60 | result.push(batch); 61 | } 62 | } 63 | // Not enough voters yet. 64 | Err(()) => break, 65 | } 66 | } 67 | result 68 | } 69 | } 70 | 71 | #[cfg(test)] 72 | mod test { 73 | use crate::units::{minimal_reconstructed_dag_units_up_to, Unit, UnitWithParents}; 74 | use crate::{ 75 | extension::extender::Extender, units::random_full_parent_reconstrusted_units_up_to, 76 | NodeCount, Round, 77 | }; 78 | use aleph_bft_mock::Keychain; 79 | 80 | #[test] 81 | fn easy_elections() { 82 | let mut extender = Extender::new(); 83 | let n_members = NodeCount(4); 84 | let max_round: Round = 43; 85 | let session_id = 2137; 86 | let keychains = Keychain::new_vec(n_members); 87 | let mut batches = Vec::new(); 88 | for round_units in random_full_parent_reconstrusted_units_up_to( 89 | max_round, n_members, session_id, &keychains, 90 | ) { 91 | for unit in round_units { 92 | batches.append(&mut extender.add_unit(unit)); 93 | } 94 | } 95 | assert_eq!(batches.len(), (max_round - 3).into()); 96 | assert_eq!(batches[0].len(), 1); 97 | for batch in batches.iter().skip(1) { 98 | assert_eq!(batch.len(), n_members.0); 99 | } 100 | } 101 | 102 | #[test] 103 | fn given_minimal_dag_with_orphaned_node_when_producing_batches_have_correct_length() { 104 | let mut extender = Extender::new(); 105 | let n_members = NodeCount(4); 106 | let threshold = n_members.consensus_threshold(); 107 | let max_round: Round = 11; 108 | let session_id = 2137; 109 | let keychains = Keychain::new_vec(n_members); 110 | let mut batches = Vec::new(); 111 | let (dag, _) = 112 | minimal_reconstructed_dag_units_up_to(max_round, n_members, session_id, &keychains); 113 | for round in dag { 114 | for unit in round { 115 | batches.append(&mut extender.add_unit(unit)); 116 | } 117 | } 118 | assert_eq!(batches.len(), (max_round - 3).into()); 119 | assert_eq!(batches[0].len(), 1); 120 | assert_eq!(batches[0][0].round(), 0); 121 | for batch in batches.iter().skip(1) { 122 | assert!(batch.len() == threshold.0 || batch.len() == n_members.0); 123 | if batch.len() == n_members.0 { 124 | // the batch that should have ancient unit 125 | assert!(batch.iter().any(|unit| unit.parents().count() == 0)); 126 | } 127 | } 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /consensus/src/extension/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{dag::DagUnit, MultiKeychain, UnitFinalizationHandler}; 2 | 3 | mod election; 4 | mod extender; 5 | mod units; 6 | 7 | use extender::Extender; 8 | 9 | /// A struct responsible for executing the Consensus protocol on a local copy of the Dag. 10 | /// It receives units which are guaranteed to eventually appear in the Dags 11 | /// of all honest nodes. The static Aleph Consensus algorithm is then run on this Dag in order 12 | /// to finalize subsequent rounds of the Dag. More specifically whenever a new unit is received 13 | /// this process checks whether a new round can be finalized and if so, it computes the batch of 14 | /// units that should be finalized, and uses the finalization handler to report that to the user. 15 | /// 16 | /// We refer to the documentation https://cardinal-cryptography.github.io/AlephBFT/internals.html 17 | /// Section 5.4 for a discussion of this component. 18 | pub struct Ordering { 19 | extender: Extender>, 20 | finalization_handler: UFH, 21 | } 22 | 23 | impl Ordering { 24 | pub fn new(finalization_handler: UFH) -> Self { 25 | let extender = Extender::new(); 26 | Ordering { 27 | extender, 28 | finalization_handler, 29 | } 30 | } 31 | 32 | pub fn add_unit(&mut self, unit: DagUnit) { 33 | for batch in self.extender.add_unit(unit) { 34 | self.finalization_handler 35 | .batch_finalized(batch.into_iter().map(|unit| unit.into()).collect()); 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /consensus/src/extension/units.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, VecDeque}; 2 | 3 | use crate::{ 4 | units::{HashFor, UnitWithParents}, 5 | Round, 6 | }; 7 | 8 | /// Units kept in a way optimized for easy batch extraction. 9 | pub struct Units { 10 | units: HashMap, U>, 11 | by_round: HashMap>>, 12 | highest_round: Round, 13 | } 14 | 15 | impl Units { 16 | /// Create empty unit store. 17 | pub fn new() -> Self { 18 | Units { 19 | units: HashMap::new(), 20 | by_round: HashMap::new(), 21 | highest_round: 0, 22 | } 23 | } 24 | 25 | /// Add a unit to the store. 26 | pub fn add_unit(&mut self, u: U) { 27 | let round = u.round(); 28 | if round > self.highest_round { 29 | self.highest_round = round; 30 | } 31 | 32 | self.by_round.entry(round).or_default().push(u.hash()); 33 | self.units.insert(u.hash(), u); 34 | } 35 | 36 | pub fn get(&self, hash: &HashFor) -> Option<&U> { 37 | self.units.get(hash) 38 | } 39 | 40 | /// Get the list of unit hashes from the given round. 41 | /// Panics if called for a round greater or equal to the round 42 | /// of the highest head of a removed batch. 43 | pub fn in_round(&self, round: Round) -> Option> { 44 | self.by_round.get(&round).map(|hashes| { 45 | hashes 46 | .iter() 47 | .map(|hash| self.units.get(hash).expect("we have all the units")) 48 | .collect() 49 | }) 50 | } 51 | 52 | /// The highest round among all added units, or 0 if there are none. 53 | pub fn highest_round(&self) -> Round { 54 | self.highest_round 55 | } 56 | 57 | /// Remove a batch of units, deterministically ordered based on the given head. 58 | pub fn remove_batch(&mut self, head: &HashFor) -> Vec { 59 | let mut batch = Vec::new(); 60 | let mut queue = VecDeque::new(); 61 | queue.push_back( 62 | self.units 63 | .remove(head) 64 | .expect("head is picked among units we have"), 65 | ); 66 | while let Some(u) = queue.pop_front() { 67 | for u_hash in u.parents() { 68 | if let Some(v) = self.units.remove(u_hash) { 69 | queue.push_back(v); 70 | } 71 | } 72 | batch.push(u); 73 | } 74 | // Since we construct the batch using BFS, the ordering is canonical and respects the DAG partial order. 75 | 76 | // We reverse for the batch to start with least recent units. 77 | batch.reverse(); 78 | batch 79 | } 80 | } 81 | 82 | #[cfg(test)] 83 | mod test { 84 | use crate::{ 85 | extension::units::Units, 86 | units::{random_full_parent_reconstrusted_units_up_to, TestingDagUnit, Unit}, 87 | NodeCount, 88 | }; 89 | use aleph_bft_mock::Keychain; 90 | 91 | #[test] 92 | fn initially_empty() { 93 | let units = Units::::new(); 94 | assert!(units.in_round(0).is_none()); 95 | assert_eq!(units.highest_round(), 0); 96 | } 97 | 98 | #[test] 99 | fn accepts_unit() { 100 | let mut units = Units::new(); 101 | let n_members = NodeCount(4); 102 | let session_id = 2137; 103 | let keychains = Keychain::new_vec(n_members); 104 | let unit = 105 | &random_full_parent_reconstrusted_units_up_to(0, n_members, session_id, &keychains)[0] 106 | [0]; 107 | units.add_unit(unit.clone()); 108 | assert_eq!(units.highest_round(), 0); 109 | assert_eq!(units.in_round(0), Some(vec![unit])); 110 | assert_eq!(units.get(&unit.hash()), Some(unit)); 111 | } 112 | 113 | #[test] 114 | fn returns_batches_all_parents() { 115 | let mut units = Units::new(); 116 | let n_members = NodeCount(4); 117 | let max_round = 43; 118 | let session_id = 2137; 119 | let keychains = Keychain::new_vec(n_members); 120 | let mut heads = Vec::new(); 121 | for (round, round_units) in random_full_parent_reconstrusted_units_up_to( 122 | max_round, n_members, session_id, &keychains, 123 | ) 124 | .into_iter() 125 | .enumerate() 126 | { 127 | heads.push(round_units[round % n_members.0].clone()); 128 | for unit in round_units { 129 | units.add_unit(unit); 130 | } 131 | } 132 | assert_eq!(units.highest_round(), max_round); 133 | assert_eq!(units.in_round(max_round + 1), None); 134 | for head in heads { 135 | let mut batch = units.remove_batch(&head.hash()); 136 | assert_eq!(batch.pop(), Some(head)); 137 | } 138 | } 139 | 140 | #[test] 141 | fn batch_order_constant_with_different_insertion_order() { 142 | let mut units = Units::new(); 143 | let mut units_but_backwards = Units::new(); 144 | let n_members = NodeCount(4); 145 | let max_round = 43; 146 | let session_id = 2137; 147 | let keychains = Keychain::new_vec(n_members); 148 | let mut heads = Vec::new(); 149 | for (round, round_units) in random_full_parent_reconstrusted_units_up_to( 150 | max_round, n_members, session_id, &keychains, 151 | ) 152 | .into_iter() 153 | .enumerate() 154 | { 155 | heads.push(round_units[round % n_members.0].clone()); 156 | for unit in &round_units { 157 | units.add_unit(unit.clone()); 158 | } 159 | for unit in round_units.into_iter().rev() { 160 | units_but_backwards.add_unit(unit); 161 | } 162 | } 163 | for head in heads { 164 | let batch1 = units.remove_batch(&head.hash()); 165 | let batch2 = units_but_backwards.remove_batch(&head.hash()); 166 | assert_eq!(batch1, batch2); 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /consensus/src/interface.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | Data, DataProvider, FinalizationHandler, Hasher, OrderedUnit, UnitFinalizationHandler, 3 | }; 4 | use futures::{AsyncRead, AsyncWrite}; 5 | use std::marker::PhantomData; 6 | 7 | /// This adapter allows to map an implementation of [`FinalizationHandler`] onto implementation of [`UnitFinalizationHandler`]. 8 | pub struct FinalizationHandlerAdapter { 9 | finalization_handler: FH, 10 | _phantom: PhantomData<(D, H)>, 11 | } 12 | 13 | impl From for FinalizationHandlerAdapter { 14 | fn from(value: FH) -> Self { 15 | Self { 16 | finalization_handler: value, 17 | _phantom: PhantomData, 18 | } 19 | } 20 | } 21 | 22 | impl> UnitFinalizationHandler 23 | for FinalizationHandlerAdapter 24 | { 25 | type Data = D; 26 | type Hasher = H; 27 | 28 | fn batch_finalized(&mut self, batch: Vec>) { 29 | for unit in batch { 30 | if let Some(data) = unit.data { 31 | self.finalization_handler.data_finalized(data) 32 | } 33 | } 34 | } 35 | } 36 | 37 | /// The local interface of the consensus algorithm. Contains a [`DataProvider`] as a source of data 38 | /// to order, a [`UnitFinalizationHandler`] for handling ordered units, and a pair of read/write 39 | /// structs intended for saving and restorin the state of the algorithm within the session, as a 40 | /// contingency in the case of a crash. 41 | #[derive(Clone)] 42 | pub struct LocalIO { 43 | data_provider: DP, 44 | finalization_handler: UFH, 45 | unit_saver: US, 46 | unit_loader: UL, 47 | } 48 | 49 | impl< 50 | H: Hasher, 51 | DP: DataProvider, 52 | FH: FinalizationHandler, 53 | US: AsyncWrite, 54 | UL: AsyncRead, 55 | > LocalIO, US, UL> 56 | { 57 | /// Create a new local interface. Note that this uses the simplified, and recommended, 58 | /// finalization handler that only deals with ordered data. 59 | pub fn new( 60 | data_provider: DP, 61 | finalization_handler: FH, 62 | unit_saver: US, 63 | unit_loader: UL, 64 | ) -> Self { 65 | Self { 66 | data_provider, 67 | finalization_handler: finalization_handler.into(), 68 | unit_saver, 69 | unit_loader, 70 | } 71 | } 72 | } 73 | 74 | impl 75 | LocalIO 76 | { 77 | /// Create a new local interface, providing a full implementation of a 78 | /// [`UnitFinalizationHandler`].Implementing [`UnitFinalizationHandler`] directly is more 79 | /// complex, and should be unnecessary for most usecases. Implement [`FinalizationHandler`] 80 | /// and use `new` instead, unless you absolutely know what you are doing. 81 | pub fn new_with_unit_finalization_handler( 82 | data_provider: DP, 83 | finalization_handler: UFH, 84 | unit_saver: US, 85 | unit_loader: UL, 86 | ) -> Self { 87 | Self { 88 | data_provider, 89 | finalization_handler, 90 | unit_saver, 91 | unit_loader, 92 | } 93 | } 94 | 95 | /// Disassemble the interface into components. 96 | pub fn into_components(self) -> (DP, UFH, US, UL) { 97 | let LocalIO { 98 | data_provider, 99 | finalization_handler, 100 | unit_saver, 101 | unit_loader, 102 | } = self; 103 | (data_provider, finalization_handler, unit_saver, unit_loader) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /consensus/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Implements the Aleph BFT Consensus protocol as a "finality gadget". The [run_session] function 2 | //! requires access to a network layer, a cryptographic primitive, and a data provider that 3 | //! gives appropriate access to the set of available data that we need to make consensus on. 4 | 5 | mod alerts; 6 | mod collection; 7 | mod config; 8 | mod consensus; 9 | mod creation; 10 | mod dag; 11 | mod dissemination; 12 | mod extension; 13 | mod interface; 14 | mod network; 15 | mod terminator; 16 | mod units; 17 | 18 | mod backup; 19 | mod task_queue; 20 | #[cfg(test)] 21 | mod testing; 22 | 23 | pub use aleph_bft_types::{ 24 | Data, DataProvider, FinalizationHandler, Hasher, IncompleteMultisignatureError, Index, Indexed, 25 | Keychain, MultiKeychain, Multisigned, Network, NodeCount, NodeIndex, NodeMap, NodeSubset, 26 | OrderedUnit, PartialMultisignature, PartiallyMultisigned, Recipient, Round, SessionId, 27 | Signable, Signature, SignatureError, SignatureSet, Signed, SpawnHandle, TaskHandle, 28 | UncheckedSigned, UnitFinalizationHandler, 29 | }; 30 | pub use config::{ 31 | create_config, default_config, default_delay_config, exponential_slowdown, Config, DelayConfig, 32 | }; 33 | pub use consensus::run_session; 34 | pub use interface::LocalIO; 35 | pub use network::NetworkData; 36 | pub use terminator::{handle_task_termination, Terminator}; 37 | 38 | type Receiver = futures::channel::mpsc::UnboundedReceiver; 39 | type Sender = futures::channel::mpsc::UnboundedSender; 40 | -------------------------------------------------------------------------------- /consensus/src/network/hub.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | alerts::AlertMessage, 3 | network::{NetworkData, NetworkDataInner, UnitMessage}, 4 | Data, Hasher, Network, PartialMultisignature, Receiver, Recipient, Sender, Signature, 5 | Terminator, 6 | }; 7 | use futures::{FutureExt, StreamExt}; 8 | use log::{debug, error, warn}; 9 | 10 | pub struct Hub< 11 | H: Hasher, 12 | D: Data, 13 | S: Signature, 14 | MS: PartialMultisignature, 15 | N: Network>, 16 | > { 17 | network: N, 18 | units_to_send: Receiver<(UnitMessage, Recipient)>, 19 | units_received: Sender>, 20 | alerts_to_send: Receiver<(AlertMessage, Recipient)>, 21 | alerts_received: Sender>, 22 | } 23 | 24 | impl< 25 | H: Hasher, 26 | D: Data, 27 | S: Signature, 28 | MS: PartialMultisignature, 29 | N: Network>, 30 | > Hub 31 | { 32 | pub fn new( 33 | network: N, 34 | units_to_send: Receiver<(UnitMessage, Recipient)>, 35 | units_received: Sender>, 36 | alerts_to_send: Receiver<(AlertMessage, Recipient)>, 37 | alerts_received: Sender>, 38 | ) -> Self { 39 | Hub { 40 | network, 41 | units_to_send, 42 | units_received, 43 | alerts_to_send, 44 | alerts_received, 45 | } 46 | } 47 | 48 | fn send(&self, data: NetworkData, recipient: Recipient) { 49 | self.network.send(data, recipient); 50 | } 51 | 52 | fn handle_incoming(&self, network_data: NetworkData) { 53 | let NetworkData(network_data) = network_data; 54 | use NetworkDataInner::*; 55 | match network_data { 56 | Units(unit_message) => { 57 | if let Err(e) = self.units_received.unbounded_send(unit_message) { 58 | warn!(target: "AlephBFT-network-hub", "Error when sending units to consensus {:?}", e); 59 | } 60 | } 61 | 62 | Alert(alert_message) => { 63 | if let Err(e) = self.alerts_received.unbounded_send(alert_message) { 64 | warn!(target: "AlephBFT-network-hub", "Error when sending alerts to consensus {:?}", e); 65 | } 66 | } 67 | } 68 | } 69 | 70 | pub async fn run(mut self, mut terminator: Terminator) { 71 | loop { 72 | use NetworkDataInner::*; 73 | futures::select! { 74 | unit_message = self.units_to_send.next() => match unit_message { 75 | Some((unit_message, recipient)) => self.send(NetworkData(Units(unit_message)), recipient), 76 | None => { 77 | error!(target: "AlephBFT-network-hub", "Outgoing units stream closed."); 78 | break; 79 | } 80 | }, 81 | alert_message = self.alerts_to_send.next() => match alert_message { 82 | Some((alert_message, recipient)) => self.send(NetworkData(Alert(alert_message)), recipient), 83 | None => { 84 | error!(target: "AlephBFT-network-hub", "Outgoing alerts stream closed."); 85 | break; 86 | } 87 | }, 88 | incoming_message = self.network.next_event().fuse() => match incoming_message { 89 | Some(incoming_message) => self.handle_incoming(incoming_message), 90 | None => { 91 | error!(target: "AlephBFT-network-hub", "Network stopped working."); 92 | break; 93 | } 94 | }, 95 | _ = terminator.get_exit().fuse() => { 96 | terminator.terminate_sync().await; 97 | break; 98 | } 99 | } 100 | } 101 | 102 | debug!(target: "AlephBFT-network-hub", "Network ended."); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /consensus/src/network/unit.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | collection::NewestUnitResponse, 3 | units::{UncheckedSignedUnit, UnitCoord}, 4 | Data, Hasher, NodeIndex, Signature, UncheckedSigned, 5 | }; 6 | use codec::{Decode, Encode}; 7 | 8 | /// A message concerning units, either about new units or some requests for them. 9 | #[derive(Clone, Eq, PartialEq, Debug, Decode, Encode)] 10 | pub enum UnitMessage { 11 | /// For disseminating newly created units. 12 | Unit(UncheckedSignedUnit), 13 | /// Request for a unit by its coord. 14 | CoordRequest(NodeIndex, UnitCoord), 15 | /// Request for the full list of parents of a unit. 16 | ParentsRequest(NodeIndex, H::Hash), 17 | /// Response to a request for a full list of parents. 18 | ParentsResponse(H::Hash, Vec>), 19 | /// Request by a node for the newest unit created by them, together with a u64 salt 20 | NewestRequest(NodeIndex, u64), 21 | /// Response to RequestNewest: (our index, maybe unit, salt) signed by us 22 | NewestResponse(UncheckedSigned, S>), 23 | } 24 | 25 | impl UnitMessage { 26 | pub fn included_data(&self) -> Vec { 27 | use UnitMessage::*; 28 | match self { 29 | Unit(uu) => uu.as_signable().included_data(), 30 | ParentsResponse(_, units) => units 31 | .iter() 32 | .flat_map(|uu| uu.as_signable().included_data()) 33 | .collect(), 34 | NewestResponse(response) => response.as_signable().included_data(), 35 | NewestRequest(_, _) | CoordRequest(_, _) | ParentsRequest(_, _) => Vec::new(), 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /consensus/src/task_queue.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cmp::Ordering, 3 | collections::{binary_heap::PeekMut, BinaryHeap}, 4 | fmt::{Debug, Formatter}, 5 | time, 6 | time::Duration, 7 | }; 8 | 9 | #[derive(Clone, Eq, PartialEq)] 10 | struct ScheduledTask { 11 | task: T, 12 | scheduled_time: time::Instant, 13 | } 14 | 15 | impl PartialOrd for ScheduledTask { 16 | fn partial_cmp(&self, other: &Self) -> Option { 17 | Some(self.cmp(other)) 18 | } 19 | } 20 | 21 | impl Ord for ScheduledTask { 22 | /// Compare tasks so that earlier times come first in a max-heap. 23 | fn cmp(&self, other: &Self) -> Ordering { 24 | other.scheduled_time.cmp(&self.scheduled_time) 25 | } 26 | } 27 | 28 | #[derive(Clone, Default)] 29 | pub struct TaskQueue { 30 | queue: BinaryHeap>, 31 | } 32 | 33 | impl Debug for TaskQueue { 34 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 35 | f.debug_struct("TaskQueue") 36 | .field("task count", &self.queue.len()) 37 | .finish() 38 | } 39 | } 40 | 41 | /// Implements a queue allowing for scheduling tasks for some time in the future. 42 | /// 43 | /// Note that this queue is passive - nothing will happen until you call `pop_due_task`. 44 | impl TaskQueue { 45 | /// Creates an empty queue. 46 | pub fn new() -> Self { 47 | Self { 48 | queue: BinaryHeap::new(), 49 | } 50 | } 51 | 52 | /// Schedules `task` for as soon as possible. 53 | pub fn schedule_now(&mut self, task: T) { 54 | self.schedule(task, time::Instant::now()); 55 | } 56 | 57 | /// Schedules `task` for execution after `delay`. 58 | pub fn schedule_in(&mut self, task: T, delay: Duration) { 59 | self.schedule(task, time::Instant::now() + delay) 60 | } 61 | 62 | /// Schedules `task` for execution at `scheduled_time`. 63 | pub fn schedule(&mut self, task: T, scheduled_time: time::Instant) { 64 | self.queue.push(ScheduledTask { 65 | task, 66 | scheduled_time, 67 | }) 68 | } 69 | 70 | /// Returns `Some(task)` if `task` is the most overdue task, and `None` if there are no overdue 71 | /// tasks. 72 | pub fn pop_due_task(&mut self) -> Option { 73 | let scheduled_task = self.queue.peek_mut()?; 74 | 75 | if scheduled_task.scheduled_time <= time::Instant::now() { 76 | Some(PeekMut::pop(scheduled_task).task) 77 | } else { 78 | None 79 | } 80 | } 81 | 82 | /// Returns an iterator over all pending tasks. 83 | pub fn iter(&self) -> impl Iterator { 84 | self.queue.iter().map(|x| &x.task) 85 | } 86 | } 87 | 88 | #[cfg(test)] 89 | mod tests { 90 | use super::*; 91 | use std::thread; 92 | 93 | #[test] 94 | fn test_scheduling() { 95 | let mut q = TaskQueue::new(); 96 | q.schedule_now(1); 97 | q.schedule_in(2, Duration::from_millis(5)); 98 | q.schedule_in(3, Duration::from_millis(30)); 99 | 100 | thread::sleep(Duration::from_millis(10)); 101 | 102 | assert_eq!(Some(1), q.pop_due_task()); 103 | assert_eq!(Some(2), q.pop_due_task()); 104 | assert_eq!(None, q.pop_due_task()); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /consensus/src/testing/behind.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{HashSet, VecDeque}, 3 | time::{Duration, Instant}, 4 | }; 5 | 6 | use crate::{ 7 | testing::{init_log, spawn_honest_member, HonestMember, NetworkData}, 8 | NodeCount, NodeIndex, SpawnHandle, 9 | }; 10 | use aleph_bft_mock::{DataProvider, NetworkHook, Router, Spawner}; 11 | use futures::StreamExt; 12 | use log::info; 13 | 14 | struct Latency { 15 | who: NodeIndex, 16 | buffer: VecDeque<(Instant, (NetworkData, NodeIndex, NodeIndex))>, 17 | } 18 | 19 | const LATENCY: Duration = Duration::from_millis(300); 20 | 21 | impl Latency { 22 | pub fn new(who: NodeIndex) -> Self { 23 | Latency { 24 | who, 25 | buffer: VecDeque::new(), 26 | } 27 | } 28 | 29 | fn add_message( 30 | &mut self, 31 | data: NetworkData, 32 | sender: NodeIndex, 33 | recipient: NodeIndex, 34 | ) -> Vec<(NetworkData, NodeIndex, NodeIndex)> { 35 | match sender == self.who || recipient == self.who { 36 | true => { 37 | self.buffer 38 | .push_back((Instant::now(), (data, sender, recipient))); 39 | Vec::new() 40 | } 41 | false => vec![(data, sender, recipient)], 42 | } 43 | } 44 | 45 | fn messages_to_send(&mut self) -> Vec<(NetworkData, NodeIndex, NodeIndex)> { 46 | let mut result = Vec::new(); 47 | while !self.buffer.is_empty() { 48 | let (when, msg) = self 49 | .buffer 50 | .pop_front() 51 | .expect("just checked it is not empty"); 52 | if Instant::now().duration_since(when) < LATENCY { 53 | self.buffer.push_front((when, msg)); 54 | break; 55 | } 56 | result.push(msg); 57 | } 58 | result 59 | } 60 | } 61 | 62 | impl NetworkHook for Latency { 63 | fn process_message( 64 | &mut self, 65 | data: NetworkData, 66 | sender: NodeIndex, 67 | recipient: NodeIndex, 68 | ) -> Vec<(NetworkData, NodeIndex, NodeIndex)> { 69 | let mut result = self.add_message(data, sender, recipient); 70 | result.append(&mut self.messages_to_send()); 71 | result 72 | } 73 | } 74 | 75 | #[tokio::test(flavor = "multi_thread")] 76 | async fn delayed_finalized() { 77 | let n_members = NodeCount(7); 78 | let australian = NodeIndex(0); 79 | init_log(); 80 | let spawner = Spawner::new(); 81 | let mut batch_rxs = Vec::new(); 82 | let mut exits = Vec::new(); 83 | let mut handles = Vec::new(); 84 | let (mut net_hub, networks) = Router::new(n_members); 85 | 86 | net_hub.add_hook(Latency::new(australian)); 87 | 88 | spawner.spawn("network-hub", net_hub); 89 | 90 | for (network, _) in networks { 91 | let ix = network.index(); 92 | let HonestMember { 93 | finalization_rx, 94 | exit_tx, 95 | handle, 96 | .. 97 | } = spawn_honest_member( 98 | spawner, 99 | ix, 100 | n_members, 101 | vec![], 102 | DataProvider::new_range(ix.0 * 50, (ix.0 + 1) * 50), 103 | network, 104 | ); 105 | batch_rxs.push(finalization_rx); 106 | exits.push(exit_tx); 107 | handles.push(handle); 108 | } 109 | let to_finalize: HashSet = (0..((n_members.0) * 50)) 110 | .map(|number| number as u32) 111 | .collect(); 112 | 113 | for mut rx in batch_rxs.drain(..) { 114 | let mut to_finalize_local = to_finalize.clone(); 115 | while !to_finalize_local.is_empty() { 116 | let number = rx.next().await.unwrap(); 117 | info!("finalizing {}", number); 118 | assert!(to_finalize_local.remove(&number)); 119 | } 120 | info!("finished one node"); 121 | } 122 | 123 | for exit in exits { 124 | let _ = exit.send(()); 125 | } 126 | for handle in handles { 127 | let _ = handle.await; 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /consensus/src/testing/crash.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | testing::{init_log, spawn_honest_member, HonestMember}, 3 | NodeCount, SpawnHandle, 4 | }; 5 | use aleph_bft_mock::{DataProvider, Router, Spawner, UnreliableHook}; 6 | use futures::StreamExt; 7 | use serial_test::serial; 8 | 9 | async fn honest_members_agree_on_batches( 10 | n_members: NodeCount, 11 | n_alive: NodeCount, 12 | n_batches: usize, 13 | network_reliability: Option, 14 | ) { 15 | init_log(); 16 | let spawner = Spawner::new(); 17 | let mut exits = Vec::new(); 18 | let mut handles = Vec::new(); 19 | let mut batch_rxs = Vec::new(); 20 | let (mut net_hub, networks) = Router::new(n_members); 21 | if let Some(reliability) = network_reliability { 22 | net_hub.add_hook(UnreliableHook::new(reliability)); 23 | } 24 | spawner.spawn("network-hub", net_hub); 25 | 26 | for (network, _) in networks { 27 | let ix = network.index(); 28 | if n_alive.into_range().contains(&ix) { 29 | let HonestMember { 30 | finalization_rx, 31 | exit_tx, 32 | handle, 33 | .. 34 | } = spawn_honest_member(spawner, ix, n_members, vec![], DataProvider::new(), network); 35 | batch_rxs.push(finalization_rx); 36 | exits.push(exit_tx); 37 | handles.push(handle); 38 | } 39 | } 40 | 41 | let mut batches = vec![]; 42 | for mut rx in batch_rxs.drain(..) { 43 | let mut batches_per_ix = vec![]; 44 | for _ in 0..n_batches { 45 | let batch = rx.next().await.unwrap(); 46 | batches_per_ix.push(batch); 47 | } 48 | batches.push(batches_per_ix); 49 | } 50 | 51 | for node_ix in n_alive.into_iterator().skip(1) { 52 | assert_eq!(batches[0], batches[node_ix.0]); 53 | } 54 | for exit in exits { 55 | let _ = exit.send(()); 56 | } 57 | for handle in handles { 58 | let _ = handle.await; 59 | } 60 | } 61 | 62 | #[tokio::test(flavor = "multi_thread")] 63 | #[serial] 64 | async fn small_honest_all_alive() { 65 | honest_members_agree_on_batches(4.into(), 4.into(), 5, None).await; 66 | } 67 | 68 | #[tokio::test(flavor = "multi_thread")] 69 | #[serial] 70 | async fn small_honest_one_crash() { 71 | honest_members_agree_on_batches(4.into(), 3.into(), 5, None).await; 72 | } 73 | 74 | #[tokio::test(flavor = "multi_thread")] 75 | #[serial] 76 | async fn small_honest_one_crash_unreliable_network() { 77 | honest_members_agree_on_batches(4.into(), 3.into(), 5, Some(0.9)).await; 78 | } 79 | 80 | #[tokio::test(flavor = "multi_thread")] 81 | #[serial] 82 | async fn medium_honest_all_alive() { 83 | honest_members_agree_on_batches(31.into(), 31.into(), 5, None).await; 84 | } 85 | 86 | #[tokio::test(flavor = "multi_thread")] 87 | #[serial] 88 | async fn medium_honest_ten_crashes() { 89 | honest_members_agree_on_batches(31.into(), 21.into(), 5, None).await; 90 | } 91 | 92 | #[tokio::test(flavor = "multi_thread")] 93 | #[serial] 94 | async fn medium_honest_ten_crashes_unreliable_network() { 95 | honest_members_agree_on_batches(31.into(), 21.into(), 5, Some(0.9)).await; 96 | } 97 | -------------------------------------------------------------------------------- /consensus/src/testing/mod.rs: -------------------------------------------------------------------------------- 1 | mod alerts; 2 | mod behind; 3 | mod byzantine; 4 | mod crash; 5 | mod crash_recovery; 6 | mod creation; 7 | mod dag; 8 | mod unreliable; 9 | 10 | use crate::{ 11 | create_config, run_session, Config, DelayConfig, LocalIO, Network as NetworkT, NodeCount, 12 | NodeIndex, SpawnHandle, TaskHandle, Terminator, 13 | }; 14 | use aleph_bft_mock::{ 15 | Data, DataProvider, FinalizationHandler, Hasher64, Keychain, Loader, Network as MockNetwork, 16 | PartialMultisignature, ReconnectSender as ReconnectSenderGeneric, Saver, Signature, Spawner, 17 | }; 18 | use futures::channel::{mpsc::UnboundedReceiver, oneshot}; 19 | use parking_lot::Mutex; 20 | use std::{sync::Arc, time::Duration}; 21 | 22 | pub type NetworkData = crate::NetworkData; 23 | 24 | pub type Network = MockNetwork; 25 | pub type ReconnectSender = ReconnectSenderGeneric; 26 | 27 | pub fn init_log() { 28 | let _ = env_logger::builder() 29 | .filter_level(log::LevelFilter::max()) 30 | .is_test(true) 31 | .try_init(); 32 | } 33 | 34 | pub fn gen_delay_config() -> DelayConfig { 35 | DelayConfig { 36 | tick_interval: Duration::from_millis(5), 37 | unit_rebroadcast_interval_min: Duration::from_millis(400), 38 | unit_rebroadcast_interval_max: Duration::from_millis(500), 39 | //50, 50, 50, 50, ... 40 | unit_creation_delay: Arc::new(|_| Duration::from_millis(50)), 41 | //100, 100, 100, ... 42 | coord_request_delay: Arc::new(|_| Duration::from_millis(100)), 43 | //3, 1, 1, 1, ... 44 | coord_request_recipients: Arc::new(|t| if t == 0 { 3 } else { 1 }), 45 | // 50, 50, 50, 50, ... 46 | parent_request_delay: Arc::new(|_| Duration::from_millis(50)), 47 | // 1, 1, 1, ... 48 | parent_request_recipients: Arc::new(|_| 1), 49 | // 50, 50, 50, 50, ... 50 | newest_request_delay: Arc::new(|_| Duration::from_millis(50)), 51 | } 52 | } 53 | 54 | pub fn gen_config(node_ix: NodeIndex, n_members: NodeCount, delay_config: DelayConfig) -> Config { 55 | create_config(n_members, node_ix, 0, 5000, delay_config, Duration::ZERO) 56 | .expect("Should always succeed with Duration::ZERO") 57 | } 58 | 59 | pub struct HonestMember { 60 | finalization_rx: UnboundedReceiver, 61 | saved_state: Arc>>, 62 | exit_tx: oneshot::Sender<()>, 63 | handle: TaskHandle, 64 | } 65 | 66 | pub fn spawn_honest_member( 67 | spawner: Spawner, 68 | node_index: NodeIndex, 69 | n_members: NodeCount, 70 | units: Vec, 71 | data_provider: DataProvider, 72 | network: impl 'static + NetworkT, 73 | ) -> HonestMember { 74 | let (finalization_handler, finalization_rx) = FinalizationHandler::new(); 75 | let config = gen_config(node_index, n_members, gen_delay_config()); 76 | let (exit_tx, exit_rx) = oneshot::channel(); 77 | let spawner_inner = spawner; 78 | let unit_loader = Loader::new(units); 79 | let saved_state = Arc::new(Mutex::new(vec![])); 80 | let unit_saver: Saver = saved_state.clone().into(); 81 | let local_io = LocalIO::new(data_provider, finalization_handler, unit_saver, unit_loader); 82 | let member_task = async move { 83 | let keychain = Keychain::new(n_members, node_index); 84 | run_session( 85 | config, 86 | local_io, 87 | network, 88 | keychain, 89 | spawner_inner, 90 | Terminator::create_root(exit_rx, "AlephBFT-member"), 91 | ) 92 | .await 93 | }; 94 | let handle = spawner.spawn_essential("member", member_task); 95 | HonestMember { 96 | finalization_rx, 97 | saved_state, 98 | exit_tx, 99 | handle, 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /consensus/src/testing/unreliable.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | network::NetworkDataInner, 3 | network::UnitMessage, 4 | testing::{init_log, spawn_honest_member, HonestMember, NetworkData}, 5 | units::Unit, 6 | Index, NodeCount, NodeIndex, Round, Signed, SpawnHandle, 7 | }; 8 | use aleph_bft_mock::{BadSigning, DataProvider, Keychain, NetworkHook, Router, Spawner}; 9 | use futures::StreamExt; 10 | use parking_lot::Mutex; 11 | use std::sync::Arc; 12 | 13 | struct CorruptPacket { 14 | recipient: NodeIndex, 15 | sender: NodeIndex, 16 | creator: NodeIndex, 17 | round: Round, 18 | } 19 | 20 | impl NetworkHook for CorruptPacket { 21 | fn process_message( 22 | &mut self, 23 | mut data: NetworkData, 24 | sender: NodeIndex, 25 | recipient: NodeIndex, 26 | ) -> Vec<(NetworkData, NodeIndex, NodeIndex)> { 27 | if self.recipient != recipient || self.sender != sender { 28 | return vec![(data, sender, recipient)]; 29 | } 30 | if let crate::NetworkData(NetworkDataInner::Units(UnitMessage::Unit(us))) = &mut data { 31 | let full_unit = us.clone().into_signable(); 32 | let index = full_unit.index(); 33 | if full_unit.round() == self.round && full_unit.creator() == self.creator { 34 | let bad_keychain: BadSigning = Keychain::new(0.into(), index).into(); 35 | *us = Signed::sign(full_unit, &bad_keychain).into(); 36 | } 37 | } 38 | vec![(data, sender, recipient)] 39 | } 40 | } 41 | 42 | struct NoteRequest { 43 | sender: NodeIndex, 44 | creator: NodeIndex, 45 | round: Round, 46 | requested: Arc>, 47 | } 48 | 49 | impl NetworkHook for NoteRequest { 50 | fn process_message( 51 | &mut self, 52 | data: NetworkData, 53 | sender: NodeIndex, 54 | recipient: NodeIndex, 55 | ) -> Vec<(NetworkData, NodeIndex, NodeIndex)> { 56 | use NetworkDataInner::Units; 57 | use UnitMessage::CoordRequest; 58 | if sender == self.sender { 59 | if let crate::NetworkData(Units(CoordRequest(_, co))) = &data { 60 | if co.round() == self.round && co.creator() == self.creator { 61 | *self.requested.lock() = true; 62 | } 63 | } 64 | } 65 | vec![(data, sender, recipient)] 66 | } 67 | } 68 | 69 | #[tokio::test] 70 | async fn request_missing_coord() { 71 | init_log(); 72 | 73 | let n_members = NodeCount(4); 74 | let censored_node = NodeIndex(0); 75 | let censoring_node = NodeIndex(1); 76 | let censoring_round = 5; 77 | 78 | let (mut net_hub, networks) = Router::new(n_members); 79 | net_hub.add_hook(CorruptPacket { 80 | recipient: censored_node, 81 | sender: censoring_node, 82 | creator: censoring_node, 83 | round: censoring_round, 84 | }); 85 | let requested = Arc::new(Mutex::new(false)); 86 | net_hub.add_hook(NoteRequest { 87 | sender: censored_node, 88 | creator: censoring_node, 89 | round: censoring_round, 90 | requested: requested.clone(), 91 | }); 92 | let spawner = Spawner::new(); 93 | spawner.spawn("network-hub", net_hub); 94 | 95 | let mut exits = Vec::new(); 96 | let mut handles = Vec::new(); 97 | let mut batch_rxs = Vec::new(); 98 | for (network, _) in networks { 99 | let ix = network.index(); 100 | let HonestMember { 101 | finalization_rx, 102 | exit_tx, 103 | handle, 104 | .. 105 | } = spawn_honest_member(spawner, ix, n_members, vec![], DataProvider::new(), network); 106 | batch_rxs.push(finalization_rx); 107 | exits.push(exit_tx); 108 | handles.push(handle); 109 | } 110 | 111 | let n_batches = 10; 112 | let mut batches = vec![]; 113 | for mut rx in batch_rxs.drain(..) { 114 | let mut batches_per_ix = vec![]; 115 | for _ in 0..n_batches { 116 | let batch = rx.next().await.unwrap(); 117 | batches_per_ix.push(batch); 118 | } 119 | batches.push(batches_per_ix); 120 | } 121 | for node_ix in n_members.into_iterator().skip(1) { 122 | assert_eq!(batches[0], batches[node_ix.0]); 123 | } 124 | for exit in exits { 125 | let _ = exit.send(()); 126 | } 127 | for handle in handles { 128 | let _ = handle.await; 129 | } 130 | 131 | assert!(*requested.lock()) 132 | } 133 | -------------------------------------------------------------------------------- /consensus/src/units/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter, Result as FmtResult}; 2 | 3 | use crate::{ 4 | Data, Hasher, Index, MultiKeychain, NodeCount, NodeIndex, Round, SessionId, Signable, Signed, 5 | UncheckedSigned, 6 | }; 7 | use codec::{Decode, Encode}; 8 | use derivative::Derivative; 9 | use parking_lot::RwLock; 10 | 11 | mod control_hash; 12 | mod store; 13 | #[cfg(test)] 14 | mod testing; 15 | mod validator; 16 | 17 | pub use control_hash::{ControlHash, Error as ControlHashError}; 18 | pub(crate) use store::*; 19 | #[cfg(test)] 20 | pub use testing::{ 21 | create_preunits, creator_set, full_unit_to_unchecked_signed_unit, 22 | minimal_reconstructed_dag_units_up_to, preunit_to_full_unit, preunit_to_signed_unit, 23 | preunit_to_unchecked_signed_unit, random_full_parent_reconstrusted_units_up_to, 24 | random_full_parent_units_up_to, random_reconstructed_unit_with_parents, 25 | random_unit_with_parents, DagUnit as TestingDagUnit, FullUnit as TestingFullUnit, 26 | SignedUnit as TestingSignedUnit, WrappedSignedUnit, 27 | }; 28 | pub use validator::{ValidationError, Validator}; 29 | 30 | /// The coordinates of a unit, i.e. creator and round. In the absence of forks this uniquely 31 | /// determines a unit within a session. 32 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default, Encode, Decode)] 33 | pub struct UnitCoord { 34 | round: Round, 35 | creator: NodeIndex, 36 | } 37 | 38 | impl UnitCoord { 39 | pub fn new(round: Round, creator: NodeIndex) -> Self { 40 | Self { creator, round } 41 | } 42 | 43 | pub fn creator(&self) -> NodeIndex { 44 | self.creator 45 | } 46 | 47 | pub fn round(&self) -> Round { 48 | self.round 49 | } 50 | } 51 | 52 | impl Display for UnitCoord { 53 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 54 | write!(f, "(#{} by {})", self.round, self.creator.0) 55 | } 56 | } 57 | 58 | /// The simplest type representing a unit, consisting of coordinates and a control hash 59 | #[derive(Clone, Eq, PartialEq, Hash, Debug, Decode, Encode)] 60 | pub struct PreUnit { 61 | coord: UnitCoord, 62 | control_hash: ControlHash, 63 | } 64 | 65 | impl PreUnit { 66 | pub(crate) fn new(creator: NodeIndex, round: Round, control_hash: ControlHash) -> Self { 67 | PreUnit { 68 | coord: UnitCoord::new(round, creator), 69 | control_hash, 70 | } 71 | } 72 | 73 | pub(crate) fn n_members(&self) -> NodeCount { 74 | self.control_hash.n_members() 75 | } 76 | 77 | pub(crate) fn creator(&self) -> NodeIndex { 78 | self.coord.creator() 79 | } 80 | 81 | pub(crate) fn round(&self) -> Round { 82 | self.coord.round() 83 | } 84 | 85 | pub(crate) fn control_hash(&self) -> &ControlHash { 86 | &self.control_hash 87 | } 88 | } 89 | 90 | #[derive(Debug, Decode, Derivative, Encode)] 91 | #[derivative(Eq, PartialEq, Hash)] 92 | pub struct FullUnit { 93 | pre_unit: PreUnit, 94 | data: Option, 95 | session_id: SessionId, 96 | #[codec(skip)] 97 | #[derivative(PartialEq = "ignore", Hash = "ignore")] 98 | hash: RwLock>, 99 | } 100 | 101 | impl From> for Option { 102 | fn from(value: FullUnit) -> Self { 103 | value.data 104 | } 105 | } 106 | 107 | impl Clone for FullUnit { 108 | fn clone(&self) -> Self { 109 | let hash = self.hash.try_read().and_then(|guard| *guard); 110 | FullUnit { 111 | pre_unit: self.pre_unit.clone(), 112 | data: self.data.clone(), 113 | session_id: self.session_id, 114 | hash: RwLock::new(hash), 115 | } 116 | } 117 | } 118 | 119 | impl FullUnit { 120 | pub(crate) fn new(pre_unit: PreUnit, data: Option, session_id: SessionId) -> Self { 121 | FullUnit { 122 | pre_unit, 123 | data, 124 | session_id, 125 | hash: RwLock::new(None), 126 | } 127 | } 128 | pub(crate) fn as_pre_unit(&self) -> &PreUnit { 129 | &self.pre_unit 130 | } 131 | pub(crate) fn data(&self) -> &Option { 132 | &self.data 133 | } 134 | pub(crate) fn included_data(&self) -> Vec { 135 | self.data.iter().cloned().collect() 136 | } 137 | } 138 | 139 | impl Signable for FullUnit { 140 | type Hash = H::Hash; 141 | fn hash(&self) -> H::Hash { 142 | Unit::hash(self) 143 | } 144 | } 145 | 146 | impl Index for FullUnit { 147 | fn index(&self) -> NodeIndex { 148 | self.creator() 149 | } 150 | } 151 | 152 | pub(crate) type UncheckedSignedUnit = UncheckedSigned, S>; 153 | 154 | pub(crate) type SignedUnit = Signed, K>; 155 | 156 | /// Abstract representation of a unit from the Dag point of view. 157 | pub trait Unit: 'static + Send + Clone { 158 | type Hasher: Hasher; 159 | 160 | fn hash(&self) -> ::Hash; 161 | 162 | fn coord(&self) -> UnitCoord; 163 | 164 | fn control_hash(&self) -> &ControlHash; 165 | 166 | fn session_id(&self) -> SessionId; 167 | 168 | fn creator(&self) -> NodeIndex { 169 | self.coord().creator() 170 | } 171 | 172 | fn round(&self) -> Round { 173 | self.coord().round() 174 | } 175 | } 176 | 177 | pub trait WrappedUnit: Unit { 178 | type Wrapped: Unit; 179 | 180 | fn unpack(self) -> Self::Wrapped; 181 | } 182 | 183 | pub trait UnitWithParents: Unit { 184 | fn parents(&self) -> impl Iterator>; 185 | fn direct_parents(&self) -> impl Iterator>; 186 | fn parent_for(&self, index: NodeIndex) -> Option<&HashFor>; 187 | 188 | fn node_count(&self) -> NodeCount; 189 | } 190 | 191 | impl Unit for FullUnit { 192 | type Hasher = H; 193 | 194 | fn hash(&self) -> H::Hash { 195 | let hash = *self.hash.read(); 196 | match hash { 197 | Some(hash) => hash, 198 | None => { 199 | let hash = self.using_encoded(H::hash); 200 | *self.hash.write() = Some(hash); 201 | hash 202 | } 203 | } 204 | } 205 | 206 | fn coord(&self) -> UnitCoord { 207 | self.pre_unit.coord 208 | } 209 | 210 | fn control_hash(&self) -> &ControlHash { 211 | self.pre_unit.control_hash() 212 | } 213 | 214 | fn session_id(&self) -> SessionId { 215 | self.session_id 216 | } 217 | } 218 | 219 | impl Unit for SignedUnit { 220 | type Hasher = H; 221 | 222 | fn hash(&self) -> H::Hash { 223 | Unit::hash(self.as_signable()) 224 | } 225 | 226 | fn coord(&self) -> UnitCoord { 227 | self.as_signable().coord() 228 | } 229 | 230 | fn control_hash(&self) -> &ControlHash { 231 | self.as_signable().control_hash() 232 | } 233 | 234 | fn session_id(&self) -> SessionId { 235 | self.as_signable().session_id() 236 | } 237 | } 238 | 239 | pub type HashFor = <::Hasher as Hasher>::Hash; 240 | 241 | #[cfg(test)] 242 | pub mod tests { 243 | use crate::{ 244 | units::{random_full_parent_units_up_to, FullUnit, Unit}, 245 | Hasher, NodeCount, 246 | }; 247 | use aleph_bft_mock::{Data, Hasher64}; 248 | use codec::{Decode, Encode}; 249 | 250 | pub type TestFullUnit = FullUnit; 251 | 252 | #[test] 253 | fn test_full_unit_hash_is_correct() { 254 | for full_unit in random_full_parent_units_up_to(3, NodeCount(4), 43) 255 | .into_iter() 256 | .flatten() 257 | { 258 | let hash = full_unit.using_encoded(Hasher64::hash); 259 | assert_eq!(full_unit.hash(), hash); 260 | } 261 | } 262 | 263 | #[test] 264 | fn test_full_unit_codec() { 265 | for full_unit in random_full_parent_units_up_to(3, NodeCount(4), 43) 266 | .into_iter() 267 | .flatten() 268 | { 269 | let encoded = full_unit.encode(); 270 | let decoded = 271 | TestFullUnit::decode(&mut encoded.as_slice()).expect("should decode correctly"); 272 | assert_eq!(decoded, full_unit); 273 | } 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /conventions.md: -------------------------------------------------------------------------------- 1 | # Conventions 2 | 3 | A basic overview of (semi-)arbitrary conventions we follow in this repository. 4 | 5 | ## Logging 6 | 7 | We use all the logging macros, for the purposes outlined below: 8 | 9 | 1. `error!` -- for errors that are fatal to the program or a significant subsystem (at least leaving them in a broken state). 10 | 2. `warn!` -- for errors that can be handled or unexpected states of the program that are not fatal. 11 | 3. `info!` -- for changes in the state of the program (e.g. started/stopped a subservice). 12 | 4. `debug!` -- for everything lower level than the above, but without many details. 13 | 5. `trace!` -- for extremely noisy logging, including writing out details about the state of the program. 14 | 15 | A rule of thumb to decide between `debug!` and `trace!` is that the former should never include data more detailed than simple numeric identifiers. 16 | No `warn!` logs should ever show up in a run that does not include Byzantine nodes. 17 | 18 | All log calls should include a `target` parameter, with a `AlephBFT` prefix for messages in the core library. 19 | -------------------------------------------------------------------------------- /cov_report.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cargo cov -- show \ 4 | --use-color \ 5 | --ignore-filename-regex='/rustc' \ 6 | --ignore-filename-regex='/.cargo/registry' \ 7 | --instr-profile=aleph_bft.profdata \ 8 | --object target/debug/deps/aleph_bft-coverage \ 9 | --show-instantiations --show-line-counts-or-regions \ 10 | --Xdemangler=rustfilt | less -R 11 | -------------------------------------------------------------------------------- /crypto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aleph-bft-crypto" 3 | version = "0.11.0" 4 | edition = "2021" 5 | authors = ["Cardinal Cryptography"] 6 | documentation = "https://docs.rs/?" 7 | homepage = "https://alephzero.org" 8 | license = "Apache-2.0" 9 | categories = ["cryptography"] 10 | repository = "https://github.com/Cardinal-Cryptography/AlephBFT" 11 | readme = "./README.md" 12 | description = "Utilities for node addressing and message signing in the aleph-bft package." 13 | 14 | [dependencies] 15 | async-trait = "0.1" 16 | bit-vec = "0.8" 17 | codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } 18 | derive_more = { version = "1.0", features = ["full"] } 19 | log = "0.4" 20 | 21 | [dev-dependencies] 22 | tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] } 23 | -------------------------------------------------------------------------------- /crypto/README.md: -------------------------------------------------------------------------------- 1 | [![Crate][crate-image]][crate-link] 2 | [![Docs][docs-image]][docs-link] 3 | [![Apache 2.0 Licensed][license-image]][license-link] 4 | 5 | ### Overview 6 | 7 | This package is a part of the AlephBFT toolset. For more information, see the README 8 | in the top-level directory. 9 | 10 | Utilities for node addressing and message signing. 11 | 12 | [crate-image]: https://img.shields.io/crates/v/aleph-bft-crypto.svg 13 | [crate-link]: https://crates.io/crates/aleph-bft-crypto 14 | [docs-image]: https://docs.rs/aleph-bft-crypto/badge.svg 15 | [docs-link]: https://docs.rs/aleph-bft-crypto 16 | [license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg 17 | [license-link]: https://github.com/Cardinal-Cryptography/AlephBFT/blob/main/LICENSE 18 | -------------------------------------------------------------------------------- /crypto/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for node addressing and message signing. 2 | 3 | mod node; 4 | mod signature; 5 | 6 | pub use node::{Index, NodeCount, NodeIndex, NodeMap, NodeSubset}; 7 | pub use signature::{ 8 | IncompleteMultisignatureError, Indexed, Keychain, MultiKeychain, Multisigned, 9 | PartialMultisignature, PartiallyMultisigned, Signable, Signature, SignatureError, SignatureSet, 10 | Signed, UncheckedSigned, 11 | }; 12 | -------------------------------------------------------------------------------- /docs/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Cardinal Cryptography"] 3 | language = "en" 4 | description = "Documentation of the Rust implementation of Aleph protocol" 5 | multilingual = false 6 | title = "AlephBFT" 7 | 8 | [build] 9 | create-missing = false 10 | -------------------------------------------------------------------------------- /docs/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | - [What is AlephBFT?](./what_is_aleph_bft.md) 4 | - [What is Aleph?](./how_alephbft_does_it.md) 5 | - [API of AlephBFT](./aleph_bft_api.md) 6 | - [Differences between Aleph and AlephBFT](./differences.md) 7 | - [AlephBFT Internals](./internals.md) 8 | - [Reliable Broadcast](./reliable_broadcast.md) 9 | -------------------------------------------------------------------------------- /docs/src/differences.md: -------------------------------------------------------------------------------- 1 | ## 4 Differences between the implementation and the paper. 2 | 3 | There are several differences between the Aleph as described in the [paper](https://arxiv.org/abs/1908.05156) and the version implemented in AlephBFT. Many of them are already described in previous sections but for completeness we briefly list the differences here. 4 | 5 | 1. The main version of Aleph uses Reliable Broadcast to disseminate units. The AlephBFT implementation is closer to QuickAleph (in the Appendix of the paper) that uses Reliable Broadcast only for Alerts. 6 | 2. The specifics of alerts are different in the AlephBFT implementation -- in particular they do not require to freeze the protocol at any moment and are generally simpler. 7 | 3. AlephBFT uses its own variant of Reliable Broadcast -- see the section [Reliable Broadcast](reliable_broadcast.md##reliable-broadcast). 8 | 4. Differences in the use of randomness -- see [Randomness in AlephBFT](how_alephbft_does_it.md#24-randomness-in-alephbft). 9 | 5. The main version in the paper uses a full list of parent hashes instead of control hashes -- the latter is described in the Appendix as an optimization. 10 | 6. The paper's Appendix proposes the use of random gossip as a method of disseminating units -- AlephBFT uses repeated broadcast + a request/response mechanism instead, which according to our experience performs much better in practice. 11 | -------------------------------------------------------------------------------- /docs/src/internals.md: -------------------------------------------------------------------------------- 1 | ## 5 AlephBFT Internals 2 | 3 | To explain the inner workings of AlephBFT it is instructive to follow the path of a unit: from the very start when it is created to the moment when its round is decided and its data is placed in one of the output batches. Here we give a brief overview and subsequently go more into details of specific components in dedicated subsections. 4 | 5 | 1. The unit is created by one of the node's `Creator` component -- implemented in `creation/`. Creator sends the produced unit to `consensus/`. 6 | 2. A recurring task of broadcasting this unit is put in the task queue. The unit will be broadcast to all other nodes a few times (with some delays in between). 7 | 3. The unit is received by another node -- happens in `consensus/`, where it is send for further processing in `dag/`. 8 | 4. Dag validates and reconstructs a unit's parents in several steps: 9 | 1. Validation, implemented in `dag/validation.rs`, checks signatures and basic unit properties, plus catches forks. This means that only **legit units**, in the sense defined in [the section on alerts](how_alephbft_does_it.md#25-alerts----dealing-with-fork-spam), are sent further. Thus no fork is ever passed on unless coming from an alert. 10 | 2. The units are further moved to a component responsible for reconstructing the explicit parents for these units -- implemented in `dag/reconstruction/parents.rs`. 11 | 3. Each unit whose parents are successfully decoded, is passed on to `dag/reconstruction/dag.rs`, which ensures that units are passed on only when their parents already were. They are then returned back to `consensus/`. 12 | 5. In `consensus/` such units are put in a store. Each unit in the store is legit + has all its parents in the store. 13 | 6. Such units are passed to a component called the `Extender` -- see the files in `extension/`. The role of the extender is to efficiently run the `OrderData` algorithm, described in the [section on AlephBFT](how_alephbft_does_it.md). 14 | 7. Once a unit's data is placed in one of batches by the `Extender` then its path is over, although we keep it in the consensus store to be able to send it to other nodes on request. 15 | 16 | The above description omits backup saving for simplicity. It is injected just before a unit is placed in the store or broadcast. 17 | 18 | ### 5.1 Creator 19 | 20 | The creator produces units according to the AlephBFT protocol rules. It will wait until the prespecified delay has passed and attempt to create a unit using a maximal number of parents. If it is not possible yet, it will wait till the first moment enough parents are available. After creating the last unit, the creator stops producing new ones, although this is never expected to happen during correct execution. 21 | 22 | ### 5.2 Dag 23 | 24 | The dag receives units from the network and returns any that were successfully reconstructed with parents. It does that in several steps, starting with validation. 25 | 26 | #### 5.2.1 Validation 27 | 28 | The validation process consists of checking basic properties of units (correct number of parents, correct session etc.), the signatures, and whether the unit is a fork based on the units that the node either already has or at least started processing. As mentioned, the idea is that only legit units are passed to the reconstructing component. In case a fork by a node `i` is detected, all of `i`'s units are attached to the appropriate alert, so that other nodes can accept them as legit. 29 | 30 | The next step is to reconstruct the structure of the Dag from the somewhat compressed information in the units. 31 | 32 | #### 5.2.2 Parents 33 | 34 | The reconstruction service receives legit units, but the information about their parents is only present as a control hash, i.e. which nodes created the parents and what was the combined hash of all the parents' hashes. Parents reconstruction remembers the first unit for any creator-round combination it encounters and optimistically uses this information to check the combined hash. If there are no dishonest nodes, which is the usual situation, then this means that every unit might at most have some parents that cannot yet be checked, because the node has not yet received them. In such a case requests for these missing units are sent to `consensus`. After the units are received, the control hash check succeeds and thus the parents are reconstructed successfully. 35 | 36 | If dishonest nodes participate in the protocol, then two additional things can go wrong: 37 | 38 | 1. either the unit has one or multiple parents that are forks, with variants different from the first ones received by this node to be precise. The reconstructing service might or might not have access to the correct variants, but in either case it does not attempt to perform the naive check on different variants -- guessing the correct variants might require exponential time so there is no point to even try it, 39 | 2. or the unit's creator is dishonest and just put a control hash in the unit that does not "unhash" to anything meaningful. 40 | 41 | In any case the reconstruction triggers a request to `consensus` to download the full list of the unit's parent hashes, so that the ambiguity is resolved. Once a response is received by `consensus` then it is passed back to the reconstruction so that it can "decode" the parents and proceed. 42 | 43 | #### 5.2.3 Dag 44 | 45 | The units parents might, for many reasons, not be reconstructed in an order agreeing with the Dag order, i.e. some of their ancestors might not yet be reconstructed. The Dag component ensures that units are only added to the store after their parents were already added, and thus any units emitted by the Dag component are in an order agreeing with the Dag order. 46 | 47 | ### 5.3 Extender 48 | 49 | The `Extender`'s role is to receive Dag units (in an order agreeing with the Dag order) and extend the output stream. Towards this end it elects the `Head` for each `round`. Such an election works by going through candidate units from this round either eliminating them or eventually electing one. Votes are computed and cached for each candidate until a decision on it is made, after which the election moves on to the next round (if elected as `Head`) or to the next unit (otherwise). After electing every `Head` the `Extender` deterministically orders all its unordered ancestors and the `Head` itself and returns the resulting batch. 50 | -------------------------------------------------------------------------------- /docs/src/reliable_broadcast.md: -------------------------------------------------------------------------------- 1 | ## 6 Reliable Broadcast 2 | 3 | Recall that Reliable Broadcast is the primitive we use to broadcast `fork alerts` among nodes -- see [the section on alerts](how_alephbft_does_it.md#25-alerts----dealing-with-fork-spam). There are two requirements for a protocol realizing reliable broadcast: 4 | 5 | 1. If an honest sender initiates a reliable broadcast with some message `m` then the protocol terminates and as a result all honest nodes receive `m`. 6 | 2. If a malicious sender initiates a reliable broadcast then either it terminates for all honest nodes and they receive the same message `m`, or it does not terminate for any honest node. 7 | 8 | So, roughly speaking, we want a broadcast primitive that is consistent even if a malicious node is the sender. There is the possibility that a malicious broadcast will not terminate, but it is not hard to see that this is the best we can hope for. 9 | 10 | ### 6.1 Consistent Broadcast using multisignatures -- RMC 11 | 12 | The main idea behind the reliable broadcast implementation in AlephBFT is the use of multisignatures. Without going too much into details, think of a multisignature over a message `m` as a list of `N-f` signatures by `N-f` different committee nodes over the same message `m` (more efficient ways to achieve such a functionality are possible, like threshold signatures or signature aggregates, but they are beyond the scope of this section). Someone holding a multisignature over `m` can be sure that a large fraction of nodes "agree" (with the meaning of "agree" depending on the particular application) on `m`. 13 | 14 | The RMC (Reliable MultiCast) protocol is a way to reliably disseminate a single hash `h` among all nodes (in the next section we explain how to extend it to disseminating arbitrary data and not only a hash). The idea is as follows: 15 | 16 | 1. Whenever a node `i` wishes to disseminate a hash `h` it initiates a reliable multicast instance by signing `h` and sending such a signed hash `SIG(h, i, sig_i)` to all other nodes. 17 | 2. Upon receiving such a signed hash, each node `j` signs `h` and sends its signed hash: `SIG(h, j, sig_j)` to all other nodes. 18 | 3. Each node keeps receiving signatures under `h` from different nodes. Upon receiving `N-f` of them, this node combines the signatures into a single multisignature `msig` and sends to all nodes a message `MULTISIG(h, msig)`. 19 | 4. Upon receiving `MULTISIG(h, msig)` under `h`, each node passes it also to all other nodes. 20 | 21 | The moment when a node receives `MULTISIG(h, msig)` is considered as the completion of the multicast for this node (and even though the node still keeps resubmitting messages) this instance of RMC is considered as successful. If a RMC succeeds for some honest node then it is guaranteed to succeed for all honest nodes (but maybe with some delay). We refer to the file `/src/rmc.rs` for a thorough documentation of this component. 22 | 23 | ### 6.2 Reliable Broadcast based on RMC 24 | 25 | Having the idea of RMC, one can modify it quite easily to achieve reliable broadcast. A naive way to do so would be to let the sender node hash the message `m` it intends to reliably broadcast into `h=hash(m)` and use RMC on the hash `h`. This almost works, except for the data availability problem -- a malicious sender might simply send a random meaningless hash `h` and then the honest nodes would never be able to recover the underlying data. 26 | 27 | To circumvent the data availability problem we instruct the sender to send data `m` to all the nodes and only then to initiate RMC on `h = hash(m)`, if we make sure that no honest node proceeds with RMC before it receives the data `m`, then a successful RMC has the guarantee that most of the honest nodes hold the data `m`. This is the basic idea behind the protocol implemented for fork alerts in AlephBFT, we refer to `/src/alerts.rs` for details. 28 | -------------------------------------------------------------------------------- /docs/src/what_is_aleph_bft.md: -------------------------------------------------------------------------------- 1 | ## 1. What is AlephBFT? 2 | 3 | AlephBFT is a Rust implementation of the [Aleph Consensus Protocol](https://arxiv.org/abs/1908.05156) that offers a convenient API allowing it to be easily applied to various problems. The prime application of AlephBFT is to be the consensus engine (sometimes called the "finality gadget") of the [Aleph Zero blockchain](https://alephzero.org/). 4 | 5 | ### 1.1 High level idea of what AlephBFT does. 6 | 7 | From the high level perspective AlephBFT allows a set of `N` prespecified nodes to agree on an ordering of items arriving in some unreliable streams of data that these nodes locally observe. An illustrative example to have in mind here is when these nodes all observe a growing blockchain that does not have a built-in notion of finality, and would like to finalize blocks. Then the above mentioned "streams of data" are simply the advancing sequences of blockchain "tips" that each of the nodes sees. Note that in this (and in all the interesting examples), because of possible forks, or network delays, the data streams of individual nodes might not be consistent. 8 | The goal of a consensus protocol, is to ensure consistency of the decisions, even though relying on an unreliable data source. Consequently, what AlephBFT produces is a single stream of data that "combines" all the individual streams of the `N` nodes and importantly **is consistent** among the nodes. Thus, in the example above, all the nodes would produce a unique sequence of **finalized** blocks (see also the corresponding [AlephBFT API section](aleph_bft_api.md#321-blockchain-finality-gadget) for a more detailed description on how to use AlephBFT as a finality gadget for a blockchain). 9 | 10 | ### 1.2 High level idea of AlephBFT requirements. 11 | 12 | Let us index the nodes taking part in the protocol as `i = 0, 1, ..., N-1` and call them "the committee", below we list some high-level requirements to be able to run AlephBFT among this nodes: 13 | 14 | 1. The nodes are connected via a network (that is not assumed to be 100% reliable) allowing them to send arbitrary messages to each other, 15 | 2. Each node knows the identities of all other nodes (via their public keys) and holds a private key allowing it to sign messages. 16 | 3. Each node `i` holds a data source object (to be explained in detail in the [DataIO subsection of AlephBFT API section](aleph_bft_api.md#311-dataio) -- see `DataIO`) that allows it 1) to receive fresh pieces of data (we refer to it as the input stream `in_i`), and 2) to check that a piece of data received from another node is "available". The availability is best understood when thinking about the blockchain example and data being block hashes. Then the availability question for a blockhash is essentially whether we locally hold a block with such a hash. 17 | 4. At most `(N-1)/3` of the nodes can be malicious (act with the intent to break the protocol guarantees). 18 | 19 | ### 1.3 High level idea of AlephBFT guarantees. 20 | 21 | AlephBFT guarantees (as long as at most `(N-1)/3` nodes act maliciously) that each node `i` running the protocol produces a stream of data `out_i` such that: 22 | 23 | 1. The output streams of any two nodes `out_i` and `out_j` are consistent, in the sense that at any given time one is a prefix of another. Moreover, none of the streams gets "stuck", they keep producing items until the protocol is shut down. So, intuitively, all the streams produce the same items, but just at possibly different paces. 24 | 2. Each item in the output stream is "available" to at least half the honest (= not malicious) nodes in the committee. 25 | 3. Roughly speaking, all of the data items "proposed" by honest nodes (i.e., data coming from `in` streams of honest nodes) eventually land in the output stream. 26 | -------------------------------------------------------------------------------- /examples/blockchain/.gitignore: -------------------------------------------------------------------------------- 1 | node*.log 2 | -------------------------------------------------------------------------------- /examples/blockchain/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aleph-bft-examples-blockchain" 3 | version = "0.0.3" 4 | edition = "2021" 5 | authors = ["Cardinal Cryptography"] 6 | license = "Apache-2.0" 7 | publish = false 8 | 9 | [dependencies] 10 | aleph-bft = { path = "../../consensus", version = "*" } 11 | aleph-bft-mock = { path = "../../mock", version = "*" } 12 | async-trait = "0.1" 13 | clap = { version = "4", features = ["derive"] } 14 | codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } 15 | env_logger = "0.11" 16 | futures = "0.3" 17 | futures-timer = "3.0" 18 | log = "0.4" 19 | parking_lot = "0.12" 20 | sha3 = "0.10" 21 | time = { version = "0.3", features = ["formatting", "macros", "local-offset"] } 22 | tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread", "io-util", "net", "time"] } 23 | unsigned-varint = { version = "0.8.0", features = ["futures", "asynchronous_codec"] } 24 | -------------------------------------------------------------------------------- /examples/blockchain/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cargo build --release 6 | 7 | clear 8 | 9 | n_members="$1" 10 | 11 | cargo run --release -- --my-id 0 --n-members $n_members --n-finalized 50 --ip-addr 127.0.0.1:43000 --bootnodes-id 0 --bootnodes-ip-addr 127.0.0.1:43000 2> node0.log & 12 | 13 | for i in $(seq 1 $(expr $n_members - 1)); do 14 | cargo run --release -- --my-id $i --n-members $n_members --n-finalized 50 --bootnodes-id 0 --bootnodes-ip-addr 127.0.0.1:43000 2> node$i.log & 15 | done 16 | 17 | echo "Running blockchain example... (Ctrl+C to exit)" 18 | trap 'kill $(jobs -p)' SIGINT SIGTERM 19 | wait $(jobs -p) 20 | -------------------------------------------------------------------------------- /examples/blockchain/src/chain.rs: -------------------------------------------------------------------------------- 1 | use crate::{network::NetworkData, DataStore}; 2 | use aleph_bft::{NodeIndex, Terminator}; 3 | use codec::{Decode, Encode}; 4 | use futures::{ 5 | channel::mpsc::{UnboundedReceiver, UnboundedSender}, 6 | FutureExt, StreamExt, 7 | }; 8 | use futures_timer::Delay; 9 | use log::{debug, info}; 10 | use parking_lot::Mutex; 11 | use std::{ 12 | fmt, 13 | fmt::{Debug, Formatter}, 14 | sync::Arc, 15 | time::{self, Duration}, 16 | }; 17 | 18 | pub type BlockNum = u32; 19 | 20 | #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Default, Encode, Decode)] 21 | pub struct Block { 22 | pub num: BlockNum, 23 | pub data: Vec, 24 | } 25 | 26 | impl Debug for Block { 27 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 28 | f.debug_struct("Block").field("num", &self.num).finish() 29 | } 30 | } 31 | 32 | impl Block { 33 | pub fn new(num: BlockNum, size: usize) -> Self { 34 | debug!(target: "Blockchain-chain", "Started creating block {:?}", num); 35 | // Not extremely random, but good enough. 36 | let data: Vec = (0..size) 37 | .map(|i| ((i + i / 999 + (i >> 12)) % 8) as u8) 38 | .collect(); 39 | debug!(target: "Blockchain-chain", "Finished creating block {:?}", num); 40 | Block { num, data } 41 | } 42 | } 43 | 44 | pub type BlockPlan = Arc NodeIndex + Sync + Send + 'static>; 45 | 46 | #[derive(Clone)] 47 | pub struct ChainConfig { 48 | // Our NodeIndex. 49 | pub node_ix: NodeIndex, 50 | // Number of random bytes to include in the block. 51 | pub data_size: usize, 52 | // Delay between blocks 53 | pub blocktime: Duration, 54 | // Delay before the first block should be created 55 | pub init_delay: Duration, 56 | // f(k) means who should author the kth block 57 | pub authorship_plan: BlockPlan, 58 | } 59 | 60 | impl Debug for ChainConfig { 61 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 62 | f.debug_struct("ChainConfig") 63 | .field("node index", &self.node_ix) 64 | .field("data size", &self.data_size) 65 | .field("blocktime", &self.blocktime) 66 | .field("initial delay", &self.init_delay) 67 | .finish() 68 | } 69 | } 70 | 71 | impl ChainConfig { 72 | pub fn new( 73 | node_ix: NodeIndex, 74 | n_members: usize, 75 | data_size: usize, 76 | blocktime: Duration, 77 | init_delay: Duration, 78 | ) -> ChainConfig { 79 | //Round robin block authorship plan. 80 | let authorship_plan = Arc::new(move |num: BlockNum| NodeIndex((num as usize) % n_members)); 81 | ChainConfig { 82 | node_ix, 83 | data_size, 84 | blocktime, 85 | init_delay, 86 | authorship_plan, 87 | } 88 | } 89 | } 90 | 91 | // Runs a process that maintains a simple blockchain. The blocks are created every config.blocktime_ms 92 | // milliseconds and the block authors are determined by config.authorship_plan. The default config 93 | // uses round robin authorship: node k creates blocks number n if n%n_members = k. 94 | // A node will create a block n only if: 95 | // 1) it received the previous block (n-1) 96 | // 2) it is the nth block author 97 | // 3) enough time has passed -- to maintain blocktime of roughly config.blocktime_ms milliseconds. 98 | // This process holds two channel endpoints: block_rx to receive blocks from the network and 99 | // block_tx to push created blocks to the network (to send them to all the remaining nodes). 100 | pub async fn run_blockchain( 101 | config: ChainConfig, 102 | mut data_store: DataStore, 103 | current_block: Arc>, 104 | mut blocks_from_network: UnboundedReceiver, 105 | blocks_for_network: UnboundedSender, 106 | mut messages_from_network: UnboundedReceiver, 107 | mut terminator: Terminator, 108 | ) { 109 | let start_time = time::Instant::now(); 110 | for block_num in 1.. { 111 | while *current_block.lock() < block_num { 112 | let curr_author = (config.authorship_plan)(block_num); 113 | if curr_author == config.node_ix { 114 | // We need to create the block, but at the right time 115 | let curr_time = time::Instant::now(); 116 | let block_delay = (block_num - 1) * config.blocktime + config.init_delay; 117 | let block_creation_time = start_time + block_delay; 118 | if curr_time >= block_creation_time { 119 | let block = Block::new(block_num, config.data_size); 120 | blocks_for_network 121 | .unbounded_send(block) 122 | .expect("network should accept blocks"); 123 | data_store.add_block(block_num); 124 | } 125 | } 126 | // We tick every 10ms. 127 | let mut delay_fut = Delay::new(Duration::from_millis(10)).fuse(); 128 | 129 | futures::select! { 130 | maybe_block = blocks_from_network.next() => { 131 | if let Some(block) = maybe_block { 132 | data_store.add_block(block.num); 133 | //We drop the block at this point, only keep track of the fact that we received it. 134 | } 135 | } 136 | maybe_message = messages_from_network.next() => { 137 | if let Some(message) = maybe_message { 138 | data_store.add_message(message); 139 | } 140 | } 141 | _ = &mut delay_fut => { 142 | //We do nothing, but this takes us out of the select. 143 | } 144 | _ = terminator.get_exit().fuse() => { 145 | info!(target: "Blockchain-chain", "Received exit signal."); 146 | terminator.terminate_sync().await; 147 | return; 148 | }, 149 | } 150 | } 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /examples/blockchain/src/data.rs: -------------------------------------------------------------------------------- 1 | use crate::{BlockNum, NetworkData}; 2 | use async_trait::async_trait; 3 | use futures::channel::mpsc::UnboundedSender; 4 | use log::debug; 5 | use parking_lot::Mutex; 6 | use std::{ 7 | collections::{HashMap, HashSet}, 8 | fmt::{Debug, Formatter}, 9 | sync::Arc, 10 | }; 11 | 12 | pub type Data = BlockNum; 13 | 14 | #[derive(Clone)] 15 | pub struct DataStore { 16 | next_message_id: u32, 17 | current_block: Arc>, 18 | available_blocks: HashSet, 19 | message_requirements: HashMap, 20 | dependent_messages: HashMap>, 21 | pending_messages: HashMap, 22 | messages_for_member: UnboundedSender, 23 | } 24 | 25 | impl Debug for DataStore { 26 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 27 | f.debug_struct("DataStore") 28 | .field("next message id", &self.next_message_id) 29 | .field("available block count", &self.available_blocks.len()) 30 | .field( 31 | "message requirement count", 32 | &self.message_requirements.len(), 33 | ) 34 | .field("dependent message count", &self.dependent_messages.len()) 35 | .field("pending message count", &self.pending_messages.len()) 36 | .finish() 37 | } 38 | } 39 | 40 | impl DataStore { 41 | pub fn new( 42 | current_block: Arc>, 43 | messages_for_member: UnboundedSender, 44 | ) -> Self { 45 | let available_blocks = (0..=*current_block.lock()).collect(); 46 | DataStore { 47 | next_message_id: 0, 48 | current_block, 49 | available_blocks, 50 | message_requirements: HashMap::new(), 51 | dependent_messages: HashMap::new(), 52 | pending_messages: HashMap::new(), 53 | messages_for_member, 54 | } 55 | } 56 | 57 | fn add_pending_message(&mut self, message: NetworkData, requirements: Vec) { 58 | let message_id = self.next_message_id; 59 | // Whatever test you are running should end before this becomes a problem. 60 | self.next_message_id += 1; 61 | for block_num in requirements.iter() { 62 | self.dependent_messages 63 | .entry(*block_num) 64 | .or_default() 65 | .push(message_id); 66 | } 67 | self.message_requirements 68 | .insert(message_id, requirements.len()); 69 | self.pending_messages.insert(message_id, message); 70 | } 71 | 72 | pub fn add_message(&mut self, message: NetworkData) { 73 | let requirements: Vec<_> = message 74 | .included_data() 75 | .into_iter() 76 | .filter(|b| !self.available_blocks.contains(b)) 77 | .collect(); 78 | if requirements.is_empty() { 79 | self.messages_for_member 80 | .unbounded_send(message) 81 | .expect("member accept messages"); 82 | } else { 83 | self.add_pending_message(message, requirements); 84 | } 85 | } 86 | 87 | fn push_messages(&mut self, num: BlockNum) { 88 | for message_id in self.dependent_messages.entry(num).or_default().iter() { 89 | *self 90 | .message_requirements 91 | .get_mut(message_id) 92 | .expect("there are some requirements") -= 1; 93 | if self.message_requirements[message_id] == 0 { 94 | let message = self 95 | .pending_messages 96 | .remove(message_id) 97 | .expect("there is a pending message"); 98 | self.messages_for_member 99 | .unbounded_send(message) 100 | .expect("member accept messages"); 101 | self.message_requirements.remove(message_id); 102 | } 103 | } 104 | self.dependent_messages.remove(&num); 105 | } 106 | 107 | pub fn add_block(&mut self, num: BlockNum) { 108 | debug!(target: "data-store", "Added block {:?}.", num); 109 | self.available_blocks.insert(num); 110 | self.push_messages(num); 111 | while self 112 | .available_blocks 113 | .contains(&(*self.current_block.lock() + 1)) 114 | { 115 | *self.current_block.lock() += 1; 116 | } 117 | } 118 | } 119 | 120 | #[derive(Clone)] 121 | pub struct DataProvider { 122 | current_block: Arc>, 123 | } 124 | 125 | #[async_trait] 126 | impl aleph_bft::DataProvider for DataProvider { 127 | type Output = Data; 128 | 129 | async fn get_data(&mut self) -> Option { 130 | Some(*self.current_block.lock()) 131 | } 132 | } 133 | 134 | impl DataProvider { 135 | pub fn new() -> (Self, Arc>) { 136 | let current_block = Arc::new(Mutex::new(0)); 137 | ( 138 | DataProvider { 139 | current_block: current_block.clone(), 140 | }, 141 | current_block, 142 | ) 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /examples/blockchain/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | io::Write, 4 | str::FromStr, 5 | time::{Duration, Instant}, 6 | }; 7 | 8 | use clap::Parser; 9 | use futures::{channel::oneshot, StreamExt}; 10 | use log::{debug, error, info}; 11 | use time::{macros::format_description, OffsetDateTime}; 12 | 13 | use aleph_bft::{run_session, NodeIndex, Terminator}; 14 | use aleph_bft_mock::{FinalizationHandler, Keychain, Loader, Saver, Spawner}; 15 | use chain::{run_blockchain, Block, BlockNum, ChainConfig}; 16 | use data::{Data, DataProvider, DataStore}; 17 | use network::{Address, NetworkData, NetworkManager}; 18 | 19 | mod chain; 20 | mod data; 21 | mod network; 22 | 23 | const TXS_PER_BLOCK: usize = 50000; 24 | const TX_SIZE: usize = 300; 25 | const BLOCK_TIME: Duration = Duration::from_millis(1000); 26 | const INITIAL_DELAY: Duration = Duration::from_millis(5000); 27 | 28 | /// Blockchain example. 29 | #[derive(Parser, Debug)] 30 | #[clap(author, version, about, long_about = None)] 31 | struct Args { 32 | /// Our index 33 | #[clap(long, value_parser)] 34 | my_id: usize, 35 | 36 | /// IP address of the node 37 | #[clap(default_value = "127.0.0.1:0", long, value_parser)] 38 | ip_addr: String, 39 | 40 | /// Bootnodes indices 41 | #[clap(long, value_parser, value_delimiter = ',')] 42 | bootnodes_id: Vec, 43 | 44 | /// Bootnodes addresses 45 | #[clap(long, value_parser, value_delimiter = ',')] 46 | bootnodes_ip_addr: Vec, 47 | 48 | /// Size of the committee 49 | #[clap(long, value_parser)] 50 | n_members: usize, 51 | 52 | /// Number of data to be finalized 53 | #[clap(long, value_parser)] 54 | n_finalized: usize, 55 | } 56 | 57 | #[tokio::main] 58 | async fn main() { 59 | let time_format = 60 | format_description!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond digits:3]"); 61 | env_logger::builder() 62 | .format(move |buf, record| { 63 | writeln!( 64 | buf, 65 | "{} {} {}: {}", 66 | record.level(), 67 | OffsetDateTime::now_local() 68 | .unwrap_or_else(|_| OffsetDateTime::now_utc()) 69 | .format(&time_format) 70 | .unwrap(), 71 | record.target(), 72 | record.args() 73 | ) 74 | }) 75 | .filter(None, log::LevelFilter::Debug) 76 | .init(); 77 | 78 | let args = Args::parse(); 79 | let start_time = Instant::now(); 80 | info!(target: "Blockchain-main", "Getting network up."); 81 | let bootnodes: HashMap = args 82 | .bootnodes_id 83 | .into_iter() 84 | .zip(args.bootnodes_ip_addr) 85 | .map(|(id, addr)| (id.into(), Address::from_str(&addr).unwrap())) 86 | .collect(); 87 | let ( 88 | mut manager, 89 | network, 90 | block_from_data_io_tx, 91 | block_from_network_rx, 92 | message_for_network, 93 | message_from_network, 94 | ) = NetworkManager::new(args.my_id.into(), args.ip_addr, args.n_members, bootnodes) 95 | .await 96 | .expect("Network set-up should succeed."); 97 | let (data_provider, current_block) = DataProvider::new(); 98 | let (finalization_handler, mut finalized_rx) = FinalizationHandler::new(); 99 | let data_store = DataStore::new(current_block.clone(), message_for_network); 100 | 101 | let (terminator_tx, terminator_rx) = oneshot::channel(); 102 | let mut terminator = Terminator::create_root(terminator_rx, "Blockchain example"); 103 | let network_terminator = terminator.add_offspring_connection("blockchain network"); 104 | let network_handle = tokio::spawn(async move { manager.run(network_terminator).await }); 105 | 106 | let data_size: usize = TXS_PER_BLOCK * TX_SIZE; 107 | let chain_config = ChainConfig::new( 108 | args.my_id.into(), 109 | args.n_members, 110 | data_size, 111 | BLOCK_TIME, 112 | INITIAL_DELAY, 113 | ); 114 | let chain_terminator = terminator.add_offspring_connection("chain"); 115 | let chain_handle = tokio::spawn(async move { 116 | run_blockchain( 117 | chain_config, 118 | data_store, 119 | current_block, 120 | block_from_network_rx, 121 | block_from_data_io_tx, 122 | message_from_network, 123 | chain_terminator, 124 | ) 125 | .await 126 | }); 127 | 128 | let member_terminator = terminator.add_offspring_connection("AlephBFT-member"); 129 | let member_handle = tokio::spawn(async move { 130 | let keychain = Keychain::new(args.n_members.into(), args.my_id.into()); 131 | let config = aleph_bft::default_config( 132 | args.n_members.into(), 133 | args.my_id.into(), 134 | 0, 135 | 5000, 136 | Duration::ZERO, 137 | ) 138 | .expect("Should always succeed with Duration::ZERO"); 139 | let backup_loader = Loader::new(vec![]); 140 | let backup_saver = Saver::new(); 141 | let local_io = aleph_bft::LocalIO::new( 142 | data_provider, 143 | finalization_handler, 144 | backup_saver, 145 | backup_loader, 146 | ); 147 | run_session( 148 | config, 149 | local_io, 150 | network, 151 | keychain, 152 | Spawner {}, 153 | member_terminator, 154 | ) 155 | .await 156 | }); 157 | 158 | let mut max_block_finalized = 0; 159 | while let Some(block_num) = finalized_rx.next().await { 160 | if max_block_finalized < block_num { 161 | max_block_finalized = block_num; 162 | } 163 | debug!(target: "Blockchain-main", 164 | "Got new batch. Highest finalized = {:?}", 165 | max_block_finalized 166 | ); 167 | if max_block_finalized >= args.n_finalized as u32 { 168 | break; 169 | } 170 | } 171 | if max_block_finalized < args.n_finalized as u32 { 172 | error!(target: "Blockchain-main", "Finalization stream finished too soon. Highest finalized = {:?}, expected {:?}", max_block_finalized, args.n_finalized); 173 | panic!("Finalization stream finished too soon."); 174 | } 175 | 176 | let stop_time = Instant::now(); 177 | let tot_millis = (stop_time - start_time - INITIAL_DELAY).as_millis(); 178 | let tps = (args.n_finalized as f64) * (TXS_PER_BLOCK as f64) / (0.001 * (tot_millis as f64)); 179 | info!(target: "Blockchain-main", "Achieved {:?} tps.", tps); 180 | 181 | terminator_tx.send(()).expect("should send"); 182 | terminator.get_exit().await.expect("should receive"); 183 | terminator.terminate_sync().await; 184 | 185 | member_handle.await.unwrap(); 186 | chain_handle.await.unwrap(); 187 | network_handle.await.unwrap(); 188 | } 189 | -------------------------------------------------------------------------------- /examples/ordering/.gitignore: -------------------------------------------------------------------------------- 1 | node*.log 2 | aleph-bft-examples-ordering-backup 3 | -------------------------------------------------------------------------------- /examples/ordering/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aleph-bft-examples-ordering" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Cardinal Cryptography"] 6 | license = "Apache-2.0" 7 | publish = false 8 | 9 | [dependencies] 10 | aleph-bft = { path = "../../consensus", version = "*" } 11 | aleph-bft-mock = { path = "../../mock", version = "*" } 12 | aleph-bft-types = { path = "../../types", version = "*" } 13 | async-trait = "0.1" 14 | clap = { version = "4", features = ["derive"] } 15 | codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } 16 | env_logger = "0.11" 17 | futures = "0.3" 18 | log = "0.4" 19 | parking_lot = "0.12" 20 | time = { version = "0.3", features = ["formatting", "macros", "local-offset"] } 21 | tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread", "io-util", "net", "time", "fs"] } 22 | tokio-util = { version = "0.7.13", features = ["compat"] } 23 | -------------------------------------------------------------------------------- /examples/ordering/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | function usage() { 6 | cat << EOF 7 | Usage: 8 | This script is a demonstration usage of AlephBFT protocol, in which there are N nodes and they want to achieve 9 | a consensus with regards to provided data. The data sent to AlephBFT from each node is a stream of integers from range 10 | [0, N * DATA_ITEMS), where DATA_ITEMS is configurable. Each node of index 'i' sends to the consensus 11 | integers from range [i * DATA_ITEMS; (i + 1) * DATA_ITEMS). where 0 <= i < N. At the end, each node makes 12 | sure that it receives all integers from range [0, N * DATA_ITEMS), each integer exactly once. 13 | 14 | N nodes are started on your machine, and they communicate via UDP. Not all nodes behave correctly - some of them crash 15 | or are stuck while providing data. 16 | 17 | This script is using aleph-bft-examples-ordering and assumes to be available in a relative folder from this script path 18 | ../../target/release/aleph-bft-examples-ordering 19 | 20 | $0 21 | [-n|--nodes NODES] 22 | number of all non-crashing nodes; some of them can have stalled data provider 23 | [-c|--crashing-nodes CRASHING_NODES] 24 | number of nodes that crash while providing data 25 | [-s|--stalling-data-providers STALLING_DATA_PROVIDERS] 26 | number of nodes that eventually stall while providing data; must be less than --nodes 27 | [--crashes-count CRASHES_COUNT] 28 | how many times a crashing node should crash 29 | [--data-items DATA_ITEMS] 30 | how many data items each node should order 31 | [--crash-restart-delay-seconds CRASH_RESTART_DELAY_SECONDS] 32 | delay (seconds) between subsequent node crashes 33 | [--unit-creation-delay UNIT_CREATION_DELAY] 34 | unit creation delay (milliseconds), default 200 35 | EOF 36 | exit 0 37 | } 38 | 39 | NORMAL=$(tput sgr0) 40 | GREEN=$(tput setaf 2; tput bold) 41 | YELLOW=$(tput setaf 3) 42 | RED=$(tput setaf 1) 43 | 44 | function get_timestamp() { 45 | echo "$(date +'%Y-%m-%d %T:%3N')" 46 | } 47 | 48 | function error() { 49 | echo -e "$(get_timestamp) $RED$*$NORMAL" 50 | exit 1 51 | } 52 | 53 | function info() { 54 | echo -e "$(get_timestamp) $GREEN$*$NORMAL" 55 | } 56 | 57 | function warning() { 58 | echo -e "$(get_timestamp) $YELLOW$*$NORMAL" 59 | } 60 | 61 | function run_ordering_binary() { 62 | local id="$1" 63 | local starting_data_item="$2" 64 | local data_items=$3 65 | local should_stall="${4:-no}" 66 | 67 | local binary_args=( 68 | --id "$id" 69 | --ports "${PORTS}" 70 | --starting-data-item "${starting_data_item}" 71 | --data-items "${data_items}" 72 | --required-finalization-value "${EXPECTED_FINALIZED_DATA_ITEMS}" 73 | --unit-creation-delay "${UNIT_CREATION_DELAY}" 74 | ) 75 | if [[ "${should_stall}" == "yes-stall" ]]; then 76 | binary_args+=(--should-stall) 77 | fi 78 | 79 | info "Starting node ${id} to provide items from ${starting_data_item} to $(( starting_data_item + data_items - 1 )), inclusive" 80 | "${ordering_binary}" "${binary_args[@]}" 2>> "node${id}.log" > /dev/null & 81 | } 82 | 83 | function run_crash_node () { 84 | id="$1" 85 | for run_attempt_index in $(seq 0 $(( CRASHES_COUNT - 1 ))); do 86 | run_ordering_binary "${id}" "${DATA_ITEMS_COUNTER}" "${DATA_ITEMS}" 87 | pid=$! 88 | info "Waiting ${CRASH_RESTART_DELAY_SECONDS} seconds..." 89 | sleep "${CRASH_RESTART_DELAY_SECONDS}" 90 | info "Killing node with pid ${pid}" 91 | kill -9 "${pid}" 2> /dev/null 92 | done 93 | run_ordering_binary "${id}" "${DATA_ITEMS_COUNTER}" "${DATA_ITEMS}" 94 | } 95 | 96 | NODES=2 97 | CRASHING_NODES=2 98 | STALLING_DATA_PROVIDERS=1 99 | CRASHES_COUNT=3 100 | DATA_ITEMS=25 101 | CRASH_RESTART_DELAY_SECONDS=5 102 | DATA_ITEMS_COUNTER=0 103 | UNIT_CREATION_DELAY=200 104 | 105 | while [[ $# -gt 0 ]]; do 106 | case "$1" in 107 | -n|--nodes) 108 | NODES="$2" 109 | shift;shift 110 | ;; 111 | -c|--crashing-nodes) 112 | CRASHING_NODES="$2" 113 | shift;shift 114 | ;; 115 | -s|--stalling-data-providers) 116 | STALLING_DATA_PROVIDERS="$2" 117 | shift;shift 118 | ;; 119 | --crashes-count) 120 | CRASHES_COUNT="$2" 121 | shift;shift 122 | ;; 123 | --data-items) 124 | DATA_ITEMS="$2" 125 | shift;shift 126 | ;; 127 | --crash-restart-delay-seconds) 128 | CRASH_RESTART_DELAY_SECONDS="$2" 129 | shift;shift 130 | ;; 131 | --unit-creation-delay) 132 | UNIT_CREATION_DELAY="$2" 133 | shift;shift 134 | ;; 135 | --help) 136 | usage 137 | shift 138 | ;; 139 | *) 140 | error "Unrecognized argument $1!" 141 | ;; 142 | esac 143 | done 144 | 145 | script_path="${BASH_SOURCE[0]}" 146 | script_dir=$(dirname "${script_path}") 147 | ordering_binary_dir=$(realpath "${script_dir}/../../") 148 | ordering_binary="${ordering_binary_dir}/target/release/aleph-bft-examples-ordering" 149 | 150 | if [[ ! -x "${ordering_binary}" ]]; then 151 | error "${ordering_binary} does not exist or it's not an executable file!" 152 | fi 153 | 154 | ALL_NODES=$(( NODES + CRASHING_NODES )) 155 | PORTS=($(seq -s , 10000 $(( 10000 + ALL_NODES - 1 )))) 156 | EXPECTED_FINALIZED_DATA_ITEMS=$(( ALL_NODES * DATA_ITEMS )) 157 | 158 | for id in $(seq 0 $(( ALL_NODES - 1 ))); do 159 | rm -f "aleph-bft-examples-ordering-backup/${id}.units" 160 | rm -f "node${id}.log" 161 | done 162 | 163 | info "Starting $0 164 | PARAMETERS 165 | number of nodes: ${NODES} 166 | number of crashing nodes: ${CRASHING_NODES} 167 | number of nodes with stalling DataProviders: ${STALLING_DATA_PROVIDERS} 168 | number of forced crashes: ${CRASHES_COUNT} 169 | number of ordered data per batch: ${DATA_ITEMS} 170 | restart delay: ${CRASH_RESTART_DELAY_SECONDS} second(s) 171 | " 172 | 173 | for id in $(seq 0 $(( NODES - 1 ))); do 174 | if [[ "${id}" -lt "${STALLING_DATA_PROVIDERS}" ]]; then 175 | run_ordering_binary "${id}" "${DATA_ITEMS_COUNTER}" "${DATA_ITEMS}" "yes-stall" 176 | else 177 | run_ordering_binary "${id}" "${DATA_ITEMS_COUNTER}" "${DATA_ITEMS}" 178 | fi 179 | DATA_ITEMS_COUNTER=$(( DATA_ITEMS_COUNTER + DATA_ITEMS )) 180 | done 181 | 182 | for id in $(seq $(( NODES )) $(( ALL_NODES - 1 ))); do 183 | run_crash_node "${id}" & 184 | DATA_ITEMS_COUNTER=$(( DATA_ITEMS_COUNTER + DATA_ITEMS )) 185 | done 186 | 187 | trap 'kill $(jobs -p); wait' SIGINT SIGTERM 188 | wait 189 | -------------------------------------------------------------------------------- /examples/ordering/src/dataio.rs: -------------------------------------------------------------------------------- 1 | use aleph_bft_types::{ 2 | DataProvider as DataProviderT, FinalizationHandler as FinalizationHandlerT, NodeIndex, 3 | }; 4 | use async_trait::async_trait; 5 | use codec::{Decode, Encode}; 6 | use futures::{channel::mpsc::unbounded, future::pending}; 7 | use log::{error, info}; 8 | 9 | type Receiver = futures::channel::mpsc::UnboundedReceiver; 10 | type Sender = futures::channel::mpsc::UnboundedSender; 11 | 12 | pub type Data = (NodeIndex, u32); 13 | 14 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default, Decode, Encode)] 15 | pub struct DataProvider { 16 | id: NodeIndex, 17 | starting_data_item: u32, 18 | data_items: u32, 19 | current_data: u32, 20 | stalled: bool, 21 | } 22 | 23 | impl DataProvider { 24 | pub fn new(id: NodeIndex, starting_data_item: u32, data_items: u32, stalled: bool) -> Self { 25 | Self { 26 | id, 27 | starting_data_item, 28 | current_data: starting_data_item, 29 | data_items, 30 | stalled, 31 | } 32 | } 33 | } 34 | 35 | #[async_trait] 36 | impl DataProviderT for DataProvider { 37 | type Output = Data; 38 | 39 | async fn get_data(&mut self) -> Option { 40 | if self.starting_data_item + self.data_items == self.current_data { 41 | if self.stalled { 42 | info!("Awaiting DataProvider::get_data forever"); 43 | pending::<()>().await; 44 | } 45 | info!("Providing None"); 46 | None 47 | } else { 48 | let data = (self.id, self.current_data); 49 | info!("Providing data: {}", self.current_data); 50 | self.current_data += 1; 51 | Some(data) 52 | } 53 | } 54 | } 55 | 56 | #[derive(Clone)] 57 | pub struct FinalizationHandler { 58 | tx: Sender, 59 | } 60 | 61 | impl FinalizationHandlerT for FinalizationHandler { 62 | fn data_finalized(&mut self, data: Data) { 63 | if let Err(e) = self.tx.unbounded_send(data) { 64 | error!(target: "finalization-handler", "Error when sending data from FinalizationHandler {:?}.", e); 65 | } 66 | } 67 | } 68 | 69 | impl FinalizationHandler { 70 | pub fn new() -> (Self, Receiver) { 71 | let (tx, rx) = unbounded(); 72 | (Self { tx }, rx) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /examples/ordering/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | mod dataio; 3 | mod network; 4 | 5 | use aleph_bft::{default_delay_config, run_session, NodeIndex, Terminator}; 6 | use aleph_bft_mock::{Keychain, Spawner}; 7 | use clap::Parser; 8 | use dataio::{Data, DataProvider, FinalizationHandler}; 9 | use futures::{channel::oneshot, io, StreamExt}; 10 | use log::{debug, error, info}; 11 | use network::Network; 12 | use std::{path::Path, sync::Arc, time::Duration}; 13 | use time::{macros::format_description, OffsetDateTime}; 14 | use tokio::fs::{self, File}; 15 | use tokio_util::compat::{Compat, TokioAsyncWriteCompatExt}; 16 | 17 | /// Example node producing linear order. 18 | #[derive(Parser, Debug)] 19 | #[clap(author, version, about, long_about = None)] 20 | struct Args { 21 | /// Index of the node 22 | #[clap(long, value_parser)] 23 | id: usize, 24 | 25 | /// Ports 26 | #[clap(long, value_parser, value_delimiter = ',')] 27 | ports: Vec, 28 | 29 | /// Number of items to be ordered 30 | #[clap(long, value_parser)] 31 | data_items: u32, 32 | 33 | /// Number of the first created item 34 | #[clap(long, value_parser)] 35 | starting_data_item: u32, 36 | 37 | /// Should the node stall after providing all its items 38 | #[clap(long, value_parser)] 39 | should_stall: bool, 40 | 41 | /// Value which denotes range of integers that must be seen as finalized from all nodes 42 | /// ie all nodes must finalize integer sequence [0; required_finalization_value) 43 | #[clap(long, value_parser)] 44 | required_finalization_value: u32, 45 | 46 | /// Unit creation delay (milliseconds) 47 | #[clap(long, default_value = "200", value_parser)] 48 | unit_creation_delay: u64, 49 | } 50 | 51 | async fn create_backup( 52 | node_id: NodeIndex, 53 | ) -> Result<(Compat, io::Cursor>), io::Error> { 54 | let stash_path = Path::new("./aleph-bft-examples-ordering-backup"); 55 | fs::create_dir_all(stash_path).await?; 56 | let file_path = stash_path.join(format!("{}.units", node_id.0)); 57 | let loader = if file_path.exists() { 58 | io::Cursor::new(fs::read(&file_path).await?) 59 | } else { 60 | io::Cursor::new(Vec::new()) 61 | }; 62 | let saver = fs::OpenOptions::new() 63 | .create(true) 64 | .append(true) 65 | .open(file_path) 66 | .await?; 67 | Ok((saver.compat_write(), loader)) 68 | } 69 | 70 | #[tokio::main] 71 | async fn main() { 72 | let time_format = 73 | format_description!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond digits:3]"); 74 | env_logger::builder() 75 | .format(move |buf, record| { 76 | writeln!( 77 | buf, 78 | "{} {} {}: {}", 79 | record.level(), 80 | OffsetDateTime::now_local() 81 | .unwrap_or_else(|_| OffsetDateTime::now_utc()) 82 | .format(&time_format) 83 | .unwrap(), 84 | record.target(), 85 | record.args() 86 | ) 87 | }) 88 | .filter(None, log::LevelFilter::Debug) 89 | .init(); 90 | 91 | let Args { 92 | id, 93 | ports, 94 | data_items, 95 | starting_data_item, 96 | should_stall, 97 | required_finalization_value, 98 | unit_creation_delay, 99 | } = Args::parse(); 100 | 101 | let id: NodeIndex = id.into(); 102 | 103 | info!("Getting network up."); 104 | let network = Network::new(id, &ports) 105 | .await 106 | .expect("Could not create a Network instance."); 107 | let n_members = ports.len().into(); 108 | let data_provider = DataProvider::new(id, starting_data_item, data_items, should_stall); 109 | let (finalization_handler, mut finalized_rx) = FinalizationHandler::new(); 110 | let (backup_saver, backup_loader) = create_backup(id) 111 | .await 112 | .expect("Error setting up unit saving"); 113 | let local_io = aleph_bft::LocalIO::new( 114 | data_provider, 115 | finalization_handler, 116 | backup_saver, 117 | backup_loader, 118 | ); 119 | 120 | let (exit_tx, exit_rx) = oneshot::channel(); 121 | let member_terminator = Terminator::create_root(exit_rx, "AlephBFT-member"); 122 | let mut delay_config = default_delay_config(); 123 | delay_config.unit_creation_delay = 124 | Arc::new(move |_| Duration::from_millis(unit_creation_delay)); 125 | let member_handle = tokio::spawn(async move { 126 | let keychain = Keychain::new(n_members, id); 127 | let config = aleph_bft::create_config(n_members, id, 0, 5000, delay_config, Duration::ZERO) 128 | .expect("Should always succeed with Duration::ZERO"); 129 | run_session( 130 | config, 131 | local_io, 132 | network, 133 | keychain, 134 | Spawner {}, 135 | member_terminator, 136 | ) 137 | .await 138 | }); 139 | 140 | let node_count = ports.len(); 141 | let mut count_finalized = vec![0; node_count]; 142 | 143 | let mut finalized_items = vec![0; required_finalization_value as usize]; 144 | 145 | loop { 146 | match finalized_rx.next().await { 147 | Some((id, number)) => { 148 | count_finalized[id.0] += 1; 149 | finalized_items[number as usize] += 1; 150 | debug!( 151 | "Finalized new item: node {:?}, number {:?}; total: {:?}", 152 | id.0, number, &count_finalized, 153 | ); 154 | } 155 | None => { 156 | error!( 157 | "Finalization stream finished too soon. Got {:?} items, wanted {:?} items", 158 | &count_finalized, data_items 159 | ); 160 | panic!("Finalization stream finished too soon."); 161 | } 162 | } 163 | if finalized_items.iter().all(|item| *item >= 1) { 164 | info!( 165 | "Finalized all items from 0 to {}, at least once.", 166 | required_finalization_value - 1 167 | ); 168 | info!("Waiting 10 seconds for other nodes..."); 169 | tokio::time::sleep(Duration::from_secs(10)).await; 170 | info!("Shutdown."); 171 | break; 172 | } 173 | } 174 | 175 | exit_tx.send(()).expect("should send"); 176 | member_handle.await.unwrap(); 177 | } 178 | -------------------------------------------------------------------------------- /examples/ordering/src/network.rs: -------------------------------------------------------------------------------- 1 | use crate::Data; 2 | use aleph_bft::{NodeIndex, Recipient}; 3 | use aleph_bft_mock::{Hasher64, PartialMultisignature, Signature}; 4 | use codec::{Decode, Encode}; 5 | use log::error; 6 | use std::net::SocketAddr; 7 | use tokio::{ 8 | io, 9 | net::UdpSocket, 10 | time::{sleep, Duration}, 11 | }; 12 | 13 | const MAX_UDP_DATAGRAM_BYTES: usize = 65536; 14 | 15 | pub type NetworkData = aleph_bft::NetworkData; 16 | 17 | #[derive(Debug)] 18 | pub struct Network { 19 | my_id: usize, 20 | addresses: Vec, 21 | socket: UdpSocket, 22 | /// Buffer for incoming data. 23 | /// 24 | /// It's allocated on the heap, because otherwise it overflows the stack when used inside a future. 25 | buffer: Box<[u8; MAX_UDP_DATAGRAM_BYTES]>, 26 | } 27 | 28 | impl Network { 29 | pub async fn new( 30 | my_id: NodeIndex, 31 | ports: &[usize], 32 | ) -> Result> { 33 | let my_id = my_id.0; 34 | assert!(my_id < ports.len()); 35 | 36 | let addresses = ports 37 | .iter() 38 | .map(|p| format!("127.0.0.1:{}", p).parse::()) 39 | .collect::, _>>()?; 40 | 41 | let socket = Self::bind_socket(addresses[my_id]).await; 42 | Ok(Network { 43 | my_id, 44 | addresses, 45 | socket, 46 | buffer: Box::new([0; MAX_UDP_DATAGRAM_BYTES]), 47 | }) 48 | } 49 | 50 | async fn bind_socket(address: SocketAddr) -> UdpSocket { 51 | loop { 52 | match UdpSocket::bind(address).await { 53 | Ok(socket) => { 54 | return socket; 55 | } 56 | Err(e) => { 57 | error!("{}", e); 58 | error!("Waiting 10 seconds before the next attempt..."); 59 | sleep(Duration::from_secs(10)).await; 60 | } 61 | }; 62 | } 63 | } 64 | 65 | fn send_to_peer(&self, data: NetworkData, recipient: usize) { 66 | if let Err(e) = self.try_send_to_peer(data, recipient) { 67 | error!("Sending failed, recipient: {:?}, error: {:?}", recipient, e); 68 | } 69 | } 70 | 71 | fn try_send_to_peer(&self, data: NetworkData, recipient: usize) -> io::Result<()> { 72 | let encoded = data.encode(); 73 | assert!(encoded.len() <= MAX_UDP_DATAGRAM_BYTES); 74 | 75 | self.socket 76 | .try_send_to(&encoded, self.addresses[recipient])?; 77 | Ok(()) 78 | } 79 | } 80 | 81 | #[async_trait::async_trait] 82 | impl aleph_bft::Network for Network { 83 | fn send(&self, data: NetworkData, recipient: Recipient) { 84 | match recipient { 85 | Recipient::Everyone => { 86 | for r in 0..self.addresses.len() { 87 | if r != self.my_id { 88 | self.send_to_peer(data.clone(), r); 89 | } 90 | } 91 | } 92 | Recipient::Node(r) => { 93 | if r.0 < self.addresses.len() { 94 | self.send_to_peer(data, r.0); 95 | } else { 96 | error!("Recipient unknown: {}", r.0); 97 | } 98 | } 99 | } 100 | } 101 | 102 | async fn next_event(&mut self) -> Option { 103 | match self.socket.recv_from(self.buffer.as_mut()).await { 104 | Ok((_len, _addr)) => NetworkData::decode(&mut &self.buffer[..]).ok(), 105 | Err(e) => { 106 | error!("Couldn't receive datagram: {:?}", e); 107 | None 108 | } 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /gen_cov_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | RUSTFLAGS="-Z instrument-coverage" \ 6 | LLVM_PROFILE_FILE="aleph_bft-%m.profraw" \ 7 | cargo test --tests $1 2> covtest.out 8 | 9 | version=$(grep Running covtest.out | sed -e "s/.*aleph_bft-\(.*\))/\1/") 10 | rm covtest.out 11 | cp target/debug/deps/aleph_bft-"$version" target/debug/deps/aleph_bft-coverage 12 | 13 | cargo profdata -- merge -sparse aleph_bft-*.profraw -o aleph_bft.profdata 14 | rm aleph_bft-*.profraw 15 | 16 | cargo cov -- report \ 17 | --use-color \ 18 | --ignore-filename-regex='/rustc' \ 19 | --ignore-filename-regex='/.cargo/registry' \ 20 | --instr-profile=aleph_bft.profdata \ 21 | --object target/debug/deps/aleph_bft-coverage 22 | -------------------------------------------------------------------------------- /install_cov_tools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rustup component add llvm-tools-preview 4 | 5 | cargo install rustfilt --version 0.2.1 6 | cargo install cargo-binutils --version 0.3.5 7 | -------------------------------------------------------------------------------- /mock/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aleph-bft-mock" 3 | version = "0.18.0" 4 | edition = "2021" 5 | authors = ["Cardinal Cryptography"] 6 | documentation = "https://docs.rs/?" 7 | homepage = "https://alephzero.org" 8 | license = "Apache-2.0" 9 | repository = "https://github.com/Cardinal-Cryptography/AlephBFT" 10 | readme = "./README.md" 11 | description = "Mock implementations of traits required by the aleph-bft package. Do NOT use outside of testing!" 12 | 13 | [dependencies] 14 | aleph-bft-types = { path = "../types", version = "0.16" } 15 | async-trait = "0.1" 16 | codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } 17 | futures = "0.3" 18 | log = "0.4" 19 | parking_lot = "0.12" 20 | rand = "0.8" 21 | tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] } 22 | -------------------------------------------------------------------------------- /mock/README.md: -------------------------------------------------------------------------------- 1 | [![Crate][crate-image]][crate-link] 2 | [![Docs][docs-image]][docs-link] 3 | [![Apache 2.0 Licensed][license-image]][license-link] 4 | 5 | ### Overview 6 | 7 | This package is a part of the AlephBFT toolset. For more information, see the README 8 | in the top-level directory. 9 | 10 | Mock implementations of required traits. Do NOT use outside of testing! 11 | 12 | [crate-image]: https://img.shields.io/crates/v/aleph-bft-mock.svg 13 | [crate-link]: https://crates.io/crates/aleph-bft-mock 14 | [docs-image]: https://docs.rs/aleph-bft-mock/badge.svg 15 | [docs-link]: https://docs.rs/aleph-bft-mock 16 | [license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg 17 | [license-link]: https://github.com/Cardinal-Cryptography/AlephBFT/blob/main/LICENSE 18 | -------------------------------------------------------------------------------- /mock/src/crypto/keychain.rs: -------------------------------------------------------------------------------- 1 | use crate::crypto::{PartialMultisignature, Signature}; 2 | use aleph_bft_types::{ 3 | Index, Keychain as KeychainT, MultiKeychain as MultiKeychainT, NodeCount, NodeIndex, 4 | PartialMultisignature as PartialMultisignatureT, SignatureSet, 5 | }; 6 | 7 | #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)] 8 | pub struct Keychain { 9 | count: NodeCount, 10 | index: NodeIndex, 11 | } 12 | 13 | impl Keychain { 14 | pub fn new(count: NodeCount, index: NodeIndex) -> Self { 15 | Keychain { count, index } 16 | } 17 | 18 | pub fn new_vec(node_count: NodeCount) -> Vec { 19 | (0..node_count.0) 20 | .map(|i| Self::new(node_count, i.into())) 21 | .collect() 22 | } 23 | } 24 | 25 | impl Index for Keychain { 26 | fn index(&self) -> NodeIndex { 27 | self.index 28 | } 29 | } 30 | 31 | impl KeychainT for Keychain { 32 | type Signature = Signature; 33 | 34 | fn node_count(&self) -> NodeCount { 35 | self.count 36 | } 37 | 38 | fn sign(&self, msg: &[u8]) -> Self::Signature { 39 | Signature::new(msg.to_vec(), self.index) 40 | } 41 | 42 | fn verify(&self, msg: &[u8], sgn: &Self::Signature, index: NodeIndex) -> bool { 43 | index == sgn.index() && msg == sgn.msg() 44 | } 45 | } 46 | 47 | impl MultiKeychainT for Keychain { 48 | type PartialMultisignature = PartialMultisignature; 49 | 50 | fn bootstrap_multi( 51 | &self, 52 | signature: &Self::Signature, 53 | index: NodeIndex, 54 | ) -> Self::PartialMultisignature { 55 | SignatureSet::add_signature(SignatureSet::with_size(self.node_count()), signature, index) 56 | } 57 | 58 | fn is_complete(&self, msg: &[u8], partial: &Self::PartialMultisignature) -> bool { 59 | let signature_count = partial.iter().count(); 60 | if signature_count < self.node_count().consensus_threshold().0 { 61 | return false; 62 | } 63 | partial.iter().all(|(i, sgn)| self.verify(msg, sgn, i)) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /mock/src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | mod keychain; 2 | mod signable; 3 | mod signature; 4 | mod wrappers; 5 | 6 | pub use keychain::Keychain; 7 | pub use signable::Signable; 8 | pub use signature::{PartialMultisignature, Signature}; 9 | pub use wrappers::BadSigning; 10 | -------------------------------------------------------------------------------- /mock/src/crypto/signable.rs: -------------------------------------------------------------------------------- 1 | use aleph_bft_types::Signable as SignableT; 2 | 3 | #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)] 4 | pub struct Signable(String); 5 | 6 | impl SignableT for Signable { 7 | type Hash = Vec; 8 | fn hash(&self) -> Self::Hash { 9 | self.0.clone().into() 10 | } 11 | } 12 | 13 | impl> From for Signable { 14 | fn from(x: T) -> Self { 15 | Self(x.into()) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /mock/src/crypto/signature.rs: -------------------------------------------------------------------------------- 1 | use aleph_bft_types::{Index, NodeIndex, SignatureSet}; 2 | use codec::{Decode, Encode}; 3 | use std::hash::Hash; 4 | 5 | #[derive(Clone, Eq, PartialEq, Hash, Debug, Default, Encode, Decode)] 6 | pub struct Signature { 7 | msg: Vec, 8 | index: NodeIndex, 9 | } 10 | 11 | impl Signature { 12 | pub fn new(msg: Vec, index: NodeIndex) -> Self { 13 | Self { msg, index } 14 | } 15 | 16 | pub fn msg(&self) -> &Vec { 17 | &self.msg 18 | } 19 | } 20 | 21 | impl Index for Signature { 22 | fn index(&self) -> NodeIndex { 23 | self.index 24 | } 25 | } 26 | 27 | pub type PartialMultisignature = SignatureSet; 28 | -------------------------------------------------------------------------------- /mock/src/crypto/wrappers.rs: -------------------------------------------------------------------------------- 1 | use crate::crypto::{PartialMultisignature, Signature}; 2 | use aleph_bft_types::{ 3 | Index, Keychain as KeychainT, MultiKeychain as MultiKeychainT, NodeCount, NodeIndex, 4 | }; 5 | use codec::{Decode, Encode}; 6 | use std::fmt::Debug; 7 | 8 | pub trait MK: 9 | KeychainT + MultiKeychainT 10 | { 11 | } 12 | 13 | impl< 14 | T: KeychainT 15 | + MultiKeychainT, 16 | > MK for T 17 | { 18 | } 19 | 20 | /// Keychain wrapper which produces incorrect signatures 21 | #[derive(Clone, Eq, PartialEq, Hash, Debug, Default, Encode, Decode)] 22 | pub struct BadSigning(T); 23 | 24 | impl From for BadSigning { 25 | fn from(mk: T) -> Self { 26 | Self(mk) 27 | } 28 | } 29 | 30 | impl Index for BadSigning { 31 | fn index(&self) -> NodeIndex { 32 | self.0.index() 33 | } 34 | } 35 | 36 | impl KeychainT for BadSigning { 37 | type Signature = T::Signature; 38 | 39 | fn node_count(&self) -> NodeCount { 40 | self.0.node_count() 41 | } 42 | 43 | fn sign(&self, msg: &[u8]) -> Self::Signature { 44 | let signature = self.0.sign(msg); 45 | let mut msg = b"BAD".to_vec(); 46 | msg.extend(signature.msg().clone()); 47 | Signature::new(msg, signature.index()) 48 | } 49 | 50 | fn verify(&self, msg: &[u8], sgn: &Self::Signature, index: NodeIndex) -> bool { 51 | self.0.verify(msg, sgn, index) 52 | } 53 | } 54 | 55 | impl MultiKeychainT for BadSigning { 56 | type PartialMultisignature = T::PartialMultisignature; 57 | 58 | fn bootstrap_multi( 59 | &self, 60 | signature: &Self::Signature, 61 | index: NodeIndex, 62 | ) -> Self::PartialMultisignature { 63 | self.0.bootstrap_multi(signature, index) 64 | } 65 | 66 | fn is_complete(&self, msg: &[u8], partial: &Self::PartialMultisignature) -> bool { 67 | self.0.is_complete(msg, partial) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /mock/src/dataio.rs: -------------------------------------------------------------------------------- 1 | use aleph_bft_types::{DataProvider as DataProviderT, FinalizationHandler as FinalizationHandlerT}; 2 | use async_trait::async_trait; 3 | use codec::{Decode, Encode}; 4 | use futures::{channel::mpsc::unbounded, future::pending, AsyncWrite}; 5 | use log::error; 6 | use parking_lot::Mutex; 7 | use std::{ 8 | io::{self}, 9 | pin::Pin, 10 | sync::Arc, 11 | task::{self, Poll}, 12 | }; 13 | 14 | type Receiver = futures::channel::mpsc::UnboundedReceiver; 15 | type Sender = futures::channel::mpsc::UnboundedSender; 16 | 17 | pub type Data = u32; 18 | 19 | #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)] 20 | pub struct DataProvider { 21 | counter: usize, 22 | n_data: Option, 23 | } 24 | 25 | impl DataProvider { 26 | pub fn new() -> Self { 27 | Self { 28 | counter: 0, 29 | n_data: None, 30 | } 31 | } 32 | 33 | pub fn new_finite(n_data: usize) -> Self { 34 | Self { 35 | counter: 0, 36 | n_data: Some(n_data), 37 | } 38 | } 39 | pub fn new_range(start: usize, end: usize) -> Self { 40 | Self { 41 | counter: start, 42 | n_data: Some(end), 43 | } 44 | } 45 | } 46 | 47 | #[async_trait] 48 | impl DataProviderT for DataProvider { 49 | type Output = Data; 50 | 51 | async fn get_data(&mut self) -> Option { 52 | let result = self.counter as u32; 53 | self.counter += 1; 54 | if let Some(n_data) = self.n_data { 55 | if n_data < self.counter { 56 | return None; 57 | } 58 | } 59 | Some(result) 60 | } 61 | } 62 | 63 | #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default, Decode, Encode)] 64 | pub struct StalledDataProvider {} 65 | 66 | impl StalledDataProvider { 67 | pub fn new() -> Self { 68 | Self {} 69 | } 70 | } 71 | 72 | #[async_trait] 73 | impl DataProviderT for StalledDataProvider { 74 | type Output = Data; 75 | 76 | async fn get_data(&mut self) -> Option { 77 | pending().await 78 | } 79 | } 80 | 81 | #[derive(Clone, Debug)] 82 | pub struct FinalizationHandler { 83 | tx: Sender, 84 | } 85 | 86 | impl FinalizationHandlerT for FinalizationHandler { 87 | fn data_finalized(&mut self, data: Data) { 88 | if let Err(e) = self.tx.unbounded_send(data) { 89 | error!(target: "finalization-handler", "Error when sending data from FinalizationHandler {:?}.", e); 90 | } 91 | } 92 | } 93 | 94 | impl FinalizationHandler { 95 | pub fn new() -> (Self, Receiver) { 96 | let (tx, rx) = unbounded(); 97 | 98 | (Self { tx }, rx) 99 | } 100 | } 101 | 102 | #[derive(Clone, Debug, Default)] 103 | pub struct Saver { 104 | data: Arc>>, 105 | } 106 | 107 | impl Saver { 108 | pub fn new() -> Self { 109 | Self { 110 | data: Arc::new(Mutex::new(vec![])), 111 | } 112 | } 113 | } 114 | 115 | impl AsyncWrite for Saver { 116 | fn poll_write( 117 | self: Pin<&mut Self>, 118 | _: &mut task::Context<'_>, 119 | buf: &[u8], 120 | ) -> Poll> { 121 | self.data.lock().extend_from_slice(buf); 122 | Poll::Ready(Ok(buf.len())) 123 | } 124 | 125 | fn poll_flush(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { 126 | Poll::Ready(Ok(())) 127 | } 128 | 129 | fn poll_close(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { 130 | Poll::Ready(Ok(())) 131 | } 132 | } 133 | 134 | impl From>>> for Saver { 135 | fn from(data: Arc>>) -> Self { 136 | Self { data } 137 | } 138 | } 139 | 140 | pub type Loader = futures::io::Cursor>; 141 | -------------------------------------------------------------------------------- /mock/src/hasher.rs: -------------------------------------------------------------------------------- 1 | use aleph_bft_types::Hasher; 2 | use std::{collections::hash_map::DefaultHasher, hash::Hasher as StdHasher}; 3 | 4 | // A hasher from the standard library that hashes to u64, should be enough to 5 | // avoid collisions in testing. 6 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)] 7 | pub struct Hasher64; 8 | 9 | impl Hasher for Hasher64 { 10 | type Hash = [u8; 8]; 11 | 12 | fn hash(x: &[u8]) -> Self::Hash { 13 | let mut hasher = DefaultHasher::new(); 14 | hasher.write(x); 15 | hasher.finish().to_ne_bytes() 16 | } 17 | } 18 | 19 | pub type Hash64 = ::Hash; 20 | -------------------------------------------------------------------------------- /mock/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Mock implementations of required traits. Do NOT use outside of testing! 2 | 3 | mod crypto; 4 | mod dataio; 5 | mod hasher; 6 | mod network; 7 | mod spawner; 8 | 9 | pub use crypto::{BadSigning, Keychain, PartialMultisignature, Signable, Signature}; 10 | pub use dataio::{Data, DataProvider, FinalizationHandler, Loader, Saver, StalledDataProvider}; 11 | pub use hasher::{Hash64, Hasher64}; 12 | pub use network::{ 13 | Network, NetworkHook, NetworkReceiver, NetworkSender, Peer, ReconnectSender, Router, 14 | UnreliableHook, 15 | }; 16 | pub use spawner::Spawner; 17 | -------------------------------------------------------------------------------- /mock/src/network.rs: -------------------------------------------------------------------------------- 1 | use aleph_bft_types::{Network as NetworkT, NodeCount, NodeIndex, Recipient}; 2 | use futures::{ 3 | channel::{ 4 | mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, 5 | oneshot, 6 | }, 7 | Future, StreamExt, 8 | }; 9 | use log::debug; 10 | use std::{ 11 | cell::RefCell, 12 | collections::HashMap, 13 | fmt::{Debug, Formatter}, 14 | pin::Pin, 15 | task::{Context, Poll}, 16 | }; 17 | 18 | pub type NetworkReceiver = UnboundedReceiver<(D, NodeIndex)>; 19 | pub type NetworkSender = UnboundedSender<(D, NodeIndex)>; 20 | 21 | #[derive(Debug)] 22 | pub struct Network { 23 | rx: NetworkReceiver, 24 | tx: NetworkSender, 25 | peers: Vec, 26 | index: NodeIndex, 27 | } 28 | 29 | impl Network { 30 | pub fn new( 31 | rx: NetworkReceiver, 32 | tx: NetworkSender, 33 | peers: Vec, 34 | index: NodeIndex, 35 | ) -> Self { 36 | Network { 37 | rx, 38 | tx, 39 | peers, 40 | index, 41 | } 42 | } 43 | 44 | pub fn index(&self) -> NodeIndex { 45 | self.index 46 | } 47 | 48 | pub fn peers(&self) -> Vec { 49 | self.peers.clone() 50 | } 51 | } 52 | 53 | #[async_trait::async_trait] 54 | impl NetworkT for Network { 55 | fn send(&self, data: D, recipient: Recipient) { 56 | use Recipient::*; 57 | match recipient { 58 | Node(node) => self 59 | .tx 60 | .unbounded_send((data, node)) 61 | .expect("send on channel should work"), 62 | Everyone => { 63 | for peer in self.peers.iter() { 64 | if *peer != self.index { 65 | self.send(data.clone(), Node(*peer)); 66 | } 67 | } 68 | } 69 | } 70 | } 71 | 72 | async fn next_event(&mut self) -> Option { 73 | Some(self.rx.next().await?.0) 74 | } 75 | } 76 | 77 | pub struct Peer { 78 | tx: NetworkSender, 79 | rx: NetworkReceiver, 80 | } 81 | 82 | pub trait NetworkHook: Send { 83 | fn process_message( 84 | &mut self, 85 | data: D, 86 | sender: NodeIndex, 87 | recipient: NodeIndex, 88 | ) -> Vec<(D, NodeIndex, NodeIndex)>; 89 | } 90 | 91 | pub struct UnreliableHook { 92 | reliability: f64, 93 | } 94 | 95 | impl UnreliableHook { 96 | // reliability - a number in the range [0, 1], 1.0 means perfect reliability, 0.0 means no message gets through 97 | pub fn new(reliability: f64) -> Self { 98 | UnreliableHook { reliability } 99 | } 100 | } 101 | 102 | impl NetworkHook for UnreliableHook { 103 | fn process_message( 104 | &mut self, 105 | data: D, 106 | sender: NodeIndex, 107 | recipient: NodeIndex, 108 | ) -> Vec<(D, NodeIndex, NodeIndex)> { 109 | let rand_sample = rand::random::(); 110 | if rand_sample > self.reliability { 111 | debug!("Simulated network fail."); 112 | Vec::new() 113 | } else { 114 | vec![(data, sender, recipient)] 115 | } 116 | } 117 | } 118 | 119 | type ReconnectReceiver = UnboundedReceiver<(NodeIndex, oneshot::Sender>)>; 120 | pub type ReconnectSender = UnboundedSender<(NodeIndex, oneshot::Sender>)>; 121 | 122 | pub struct Router { 123 | peers: RefCell>>, 124 | peer_list: Vec, 125 | hook_list: RefCell>>>, 126 | peer_reconnect_rx: ReconnectReceiver, 127 | } 128 | 129 | impl Debug for Router { 130 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 131 | f.debug_struct("Router") 132 | .field("peers", &self.peer_list) 133 | .field("hook count", &self.hook_list.borrow().len()) 134 | .finish() 135 | } 136 | } 137 | 138 | type RouterWithNetworks = (Router, Vec<(Network, ReconnectSender)>); 139 | 140 | impl Router { 141 | pub fn new(n_members: NodeCount) -> RouterWithNetworks { 142 | let peer_list = n_members.into_iterator().collect(); 143 | let (reconnect_tx, peer_reconnect_rx) = unbounded(); 144 | let mut router = Router { 145 | peers: RefCell::new(HashMap::new()), 146 | peer_list, 147 | hook_list: RefCell::new(Vec::new()), 148 | peer_reconnect_rx, 149 | }; 150 | let mut networks = Vec::new(); 151 | for ix in n_members.into_iterator() { 152 | let network = router.connect_peer(ix); 153 | networks.push((network, reconnect_tx.clone())); 154 | } 155 | (router, networks) 156 | } 157 | 158 | pub fn add_hook + 'static>(&mut self, hook: HK) { 159 | self.hook_list.borrow_mut().push(Box::new(hook)); 160 | } 161 | 162 | pub fn connect_peer(&mut self, peer: NodeIndex) -> Network { 163 | assert!( 164 | self.peer_list.iter().any(|p| *p == peer), 165 | "Must connect a peer in the list." 166 | ); 167 | assert!( 168 | !self.peers.borrow().contains_key(&peer), 169 | "Cannot connect a peer twice." 170 | ); 171 | let (tx_in_hub, rx_in_hub) = unbounded(); 172 | let (tx_out_hub, rx_out_hub) = unbounded(); 173 | let peer_entry = Peer { 174 | tx: tx_out_hub, 175 | rx: rx_in_hub, 176 | }; 177 | self.peers.borrow_mut().insert(peer, peer_entry); 178 | Network::new(rx_out_hub, tx_in_hub, self.peer_list.clone(), peer) 179 | } 180 | 181 | pub fn peer_list(&self) -> Vec { 182 | self.peer_list.clone() 183 | } 184 | } 185 | 186 | impl Future for Router { 187 | type Output = (); 188 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 189 | let this = &mut self; 190 | let mut disconnected_peers: Vec = Vec::new(); 191 | let mut buffer = Vec::new(); 192 | for (peer_id, peer) in this.peers.borrow_mut().iter_mut() { 193 | loop { 194 | // this call is responsible for waking this Future 195 | match peer.rx.poll_next_unpin(cx) { 196 | Poll::Ready(Some((data, recipient))) => { 197 | buffer.push((data, *peer_id, recipient)); 198 | } 199 | Poll::Ready(None) => { 200 | disconnected_peers.push(*peer_id); 201 | break; 202 | } 203 | Poll::Pending => { 204 | break; 205 | } 206 | } 207 | } 208 | } 209 | for peer_id in disconnected_peers { 210 | this.peers.borrow_mut().remove(&peer_id); 211 | } 212 | loop { 213 | // this call is responsible for waking this Future 214 | match this.peer_reconnect_rx.poll_next_unpin(cx) { 215 | Poll::Ready(Some((node_id, sender))) => { 216 | sender 217 | .send(this.connect_peer(node_id)) 218 | .expect("channel should be open"); 219 | } 220 | Poll::Ready(None) => { 221 | break; 222 | } 223 | Poll::Pending => { 224 | break; 225 | } 226 | } 227 | } 228 | let mut new_buffer = Vec::new(); 229 | for hook in this.hook_list.borrow_mut().iter_mut() { 230 | for (data, sender, recipient) in buffer { 231 | new_buffer.append(&mut hook.process_message(data, sender, recipient)); 232 | } 233 | buffer = new_buffer; 234 | new_buffer = Vec::new(); 235 | } 236 | for (data, sender, recipient) in buffer { 237 | if let Some(peer) = this.peers.borrow().get(&recipient) { 238 | peer.tx.unbounded_send((data, sender)).ok(); 239 | } 240 | } 241 | if this.peers.borrow().is_empty() { 242 | Poll::Ready(()) 243 | } else { 244 | Poll::Pending 245 | } 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /mock/src/spawner.rs: -------------------------------------------------------------------------------- 1 | use aleph_bft_types::{SpawnHandle, TaskHandle}; 2 | use codec::{Decode, Encode}; 3 | use futures::{channel::oneshot, Future}; 4 | 5 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default, Decode, Encode)] 6 | pub struct Spawner; 7 | 8 | impl SpawnHandle for Spawner { 9 | fn spawn(&self, _name: &str, task: impl Future + Send + 'static) { 10 | tokio::spawn(task); 11 | } 12 | 13 | fn spawn_essential( 14 | &self, 15 | _: &str, 16 | task: impl Future + Send + 'static, 17 | ) -> TaskHandle { 18 | let (res_tx, res_rx) = oneshot::channel(); 19 | tokio::spawn(async move { 20 | task.await; 21 | res_tx.send(()).expect("We own the rx."); 22 | }); 23 | Box::pin(async move { res_rx.await.map_err(|_| ()) }) 24 | } 25 | } 26 | 27 | impl Spawner { 28 | pub fn new() -> Self { 29 | Spawner {} 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /rmc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aleph-bft-rmc" 3 | version = "0.16.0" 4 | edition = "2021" 5 | authors = ["Cardinal Cryptography"] 6 | categories = ["algorithms", "cryptography"] 7 | documentation = "https://docs.rs/?" 8 | homepage = "https://alephzero.org" 9 | repository = "https://github.com/Cardinal-Cryptography/AlephBFT" 10 | keywords = ["asynchronous", "consensus", "bft", "distributed-systems"] 11 | license = "Apache-2.0" 12 | readme = "./README.md" 13 | description = "Reliable MultiCast - a primitive for Reliable Broadcast protocol." 14 | 15 | [dependencies] 16 | aleph-bft-crypto = { path = "../crypto", version = "0.11" } 17 | aleph-bft-types = { path = "../types", version = "0.16" } 18 | async-trait = "0.1" 19 | codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } 20 | futures = "0.3" 21 | futures-timer = "3.0" 22 | log = "0.4" 23 | 24 | [dev-dependencies] 25 | aleph-bft-mock = { path = "../mock" } 26 | rand = "0.8" 27 | tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread", "time"] } 28 | -------------------------------------------------------------------------------- /rmc/README.md: -------------------------------------------------------------------------------- 1 | [![Crate][crate-image]][crate-link] 2 | [![Docs][docs-image]][docs-link] 3 | [![Apache 2.0 Licensed][license-image]][license-link] 4 | 5 | ### Overview 6 | 7 | This package is a part of the AlephBFT toolset. For more information, see the README 8 | in the top-level directory. 9 | 10 | Reliable MultiCast - a primitive for Reliable Broadcast protocol. 11 | 12 | For more information, check [the documentation][reference-link-rmc]. 13 | 14 | [crate-image]: https://img.shields.io/crates/v/aleph-bft-rmc.svg 15 | [crate-link]: https://crates.io/crates/aleph-bft-rmc 16 | [docs-image]: https://docs.rs/aleph-bft-rmc/badge.svg 17 | [docs-link]: https://docs.rs/aleph-bft-rmc 18 | [license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg 19 | [license-link]: https://github.com/Cardinal-Cryptography/AlephBFT/blob/main/LICENSE 20 | [reference-link-rmc]: https://cardinal-cryptography.github.io/AlephBFT/reliable_broadcast.html 21 | -------------------------------------------------------------------------------- /rmc/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use aleph_bft_crypto::{ 2 | Indexed, MultiKeychain, Multisigned, NodeCount, PartialMultisignature, PartiallyMultisigned, 3 | Signable, Signature, Signed, UncheckedSigned, 4 | }; 5 | use codec::{Decode, Encode}; 6 | use core::fmt::Debug; 7 | use std::hash::Hash; 8 | 9 | mod handler; 10 | mod scheduler; 11 | mod service; 12 | 13 | pub use handler::Handler; 14 | pub use scheduler::DoublingDelayScheduler; 15 | pub use service::Service; 16 | 17 | /// An RMC message consisting of either a signed (indexed) hash, or a multisigned hash. 18 | #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, Hash)] 19 | pub enum Message { 20 | SignedHash(UncheckedSigned, S>), 21 | MultisignedHash(UncheckedSigned), 22 | } 23 | 24 | impl Message { 25 | pub fn hash(&self) -> &H { 26 | match self { 27 | Message::SignedHash(unchecked) => unchecked.as_signable_strip_index(), 28 | Message::MultisignedHash(unchecked) => unchecked.as_signable(), 29 | } 30 | } 31 | pub fn is_complete(&self) -> bool { 32 | matches!(self, Message::MultisignedHash(_)) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /rmc/src/scheduler.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use core::fmt::Debug; 3 | use futures::future::pending; 4 | use futures_timer::Delay; 5 | use std::{ 6 | cmp::Reverse, 7 | collections::BinaryHeap, 8 | fmt::Formatter, 9 | ops::{Add, Div, Mul}, 10 | time::{Duration, Instant}, 11 | }; 12 | 13 | /// Abstraction of a task-scheduling logic 14 | /// 15 | /// Because the network can be faulty, the task of sending a message must be performed multiple 16 | /// times to ensure that the recipient receives each message. 17 | /// The trait [`TaskScheduler`] describes in what intervals some abstract task of type `T` 18 | /// should be performed. 19 | #[async_trait::async_trait] 20 | pub trait TaskScheduler: Send + Sync { 21 | fn add_task(&mut self, task: T); 22 | async fn next_task(&mut self) -> T; 23 | } 24 | 25 | #[derive(Clone, Debug, Eq, PartialEq)] 26 | struct ScheduledTask { 27 | task: T, 28 | delay: Duration, 29 | } 30 | 31 | impl ScheduledTask { 32 | fn new(task: T, delay: Duration) -> Self { 33 | ScheduledTask { task, delay } 34 | } 35 | } 36 | 37 | #[derive(Ord, PartialOrd, Eq, PartialEq)] 38 | struct IndexedInstant(Instant, usize); 39 | 40 | impl IndexedInstant { 41 | fn at(instant: Instant, i: usize) -> Self { 42 | IndexedInstant(instant, i) 43 | } 44 | } 45 | 46 | /// A basic task scheduler scheduling tasks with an exponential slowdown 47 | /// 48 | /// A scheduler parameterized by a duration `initial_delay`. When a task is added to the scheduler 49 | /// it is first scheduled immediately, then it is scheduled indefinitely, where the first delay is 50 | /// `initial_delay`, and each following delay for that task is two times longer than the previous 51 | /// one. 52 | pub struct DoublingDelayScheduler { 53 | initial_delay: Duration, 54 | scheduled_instants: BinaryHeap>, 55 | scheduled_tasks: Vec>, 56 | } 57 | 58 | impl Debug for DoublingDelayScheduler { 59 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 60 | f.debug_struct("DoublingDelayScheduler") 61 | .field("initial delay", &self.initial_delay) 62 | .field("scheduled instant count", &self.scheduled_instants.len()) 63 | .field("scheduled task count", &self.scheduled_tasks.len()) 64 | .finish() 65 | } 66 | } 67 | 68 | impl DoublingDelayScheduler { 69 | pub fn new(initial_delay: Duration) -> Self { 70 | DoublingDelayScheduler::with_tasks(vec![], initial_delay) 71 | } 72 | 73 | pub fn with_tasks(initial_tasks: Vec, initial_delay: Duration) -> Self { 74 | let mut scheduler = DoublingDelayScheduler { 75 | initial_delay, 76 | scheduled_instants: BinaryHeap::new(), 77 | scheduled_tasks: Vec::new(), 78 | }; 79 | if initial_tasks.is_empty() { 80 | return scheduler; 81 | } 82 | let delta = initial_delay.div((initial_tasks.len()) as u32); // safety: len is non-zero 83 | for (i, task) in initial_tasks.into_iter().enumerate() { 84 | scheduler.add_task_after(task, delta.mul(i as u32)); 85 | } 86 | scheduler 87 | } 88 | 89 | fn add_task_after(&mut self, task: T, delta: Duration) { 90 | let i = self.scheduled_tasks.len(); 91 | let instant = Instant::now().add(delta); 92 | let indexed_instant = IndexedInstant::at(instant, i); 93 | self.scheduled_instants.push(Reverse(indexed_instant)); 94 | let scheduled_task = ScheduledTask::new(task, self.initial_delay); 95 | self.scheduled_tasks.push(scheduled_task); 96 | } 97 | } 98 | 99 | #[async_trait] 100 | impl TaskScheduler for DoublingDelayScheduler { 101 | fn add_task(&mut self, task: T) { 102 | self.add_task_after(task, Duration::ZERO); 103 | } 104 | 105 | async fn next_task(&mut self) -> T { 106 | match self.scheduled_instants.peek() { 107 | Some(&Reverse(IndexedInstant(instant, _))) => { 108 | let now = Instant::now(); 109 | if now < instant { 110 | Delay::new(instant - now).await; 111 | } 112 | } 113 | None => pending().await, 114 | } 115 | 116 | let Reverse(IndexedInstant(instant, i)) = self 117 | .scheduled_instants 118 | .pop() 119 | .expect("By the logic of the function, there is an instant available"); 120 | let scheduled_task = &mut self.scheduled_tasks[i]; 121 | 122 | let task = scheduled_task.task.clone(); 123 | self.scheduled_instants 124 | .push(Reverse(IndexedInstant(instant + scheduled_task.delay, i))); 125 | 126 | scheduled_task.delay *= 2; 127 | task 128 | } 129 | } 130 | 131 | #[cfg(test)] 132 | mod tests { 133 | use crate::scheduler::{DoublingDelayScheduler, TaskScheduler}; 134 | use std::{ 135 | ops::{Add, Mul}, 136 | time::Duration, 137 | }; 138 | use tokio::time::Instant; 139 | 140 | #[tokio::test] 141 | async fn scheduler_yields_proper_order_of_tasks() { 142 | let mut scheduler = DoublingDelayScheduler::new(Duration::from_millis(25)); 143 | 144 | scheduler.add_task(0); 145 | tokio::time::sleep(Duration::from_millis(2)).await; 146 | scheduler.add_task(1); 147 | 148 | let task = scheduler.next_task().await; 149 | assert_eq!(task, 0); 150 | let task = scheduler.next_task().await; 151 | assert_eq!(task, 1); 152 | let task = scheduler.next_task().await; 153 | assert_eq!(task, 0); 154 | let task = scheduler.next_task().await; 155 | assert_eq!(task, 1); 156 | 157 | tokio::time::sleep(Duration::from_millis(2)).await; 158 | scheduler.add_task(2); 159 | 160 | let task = scheduler.next_task().await; 161 | assert_eq!(task, 2); 162 | let task = scheduler.next_task().await; 163 | assert_eq!(task, 2); 164 | let task = scheduler.next_task().await; 165 | assert_eq!(task, 0); 166 | let task = scheduler.next_task().await; 167 | assert_eq!(task, 1); 168 | let task = scheduler.next_task().await; 169 | assert_eq!(task, 2); 170 | } 171 | 172 | #[tokio::test] 173 | async fn scheduler_properly_handles_initial_bunch_of_tasks() { 174 | let tasks = (0..5).collect(); 175 | let before = Instant::now(); 176 | let mut scheduler = DoublingDelayScheduler::with_tasks(tasks, Duration::from_millis(25)); 177 | 178 | for i in 0..5 { 179 | let task = scheduler.next_task().await; 180 | assert_eq!(task, i); 181 | let now = Instant::now(); 182 | // 0, 5, 10, 15, 20 183 | assert!(now - before >= Duration::from_millis(5).mul(i)); 184 | } 185 | 186 | for i in 0..5 { 187 | let task = scheduler.next_task().await; 188 | assert_eq!(task, i); 189 | let now = Instant::now(); 190 | // 25, 30, 35, 40, 45 191 | assert!( 192 | now - before 193 | >= Duration::from_millis(5) 194 | .mul(i) 195 | .add(Duration::from_millis(25)) 196 | ); 197 | } 198 | } 199 | 200 | #[tokio::test] 201 | async fn asking_empty_scheduler_for_next_task_blocks() { 202 | let mut scheduler: DoublingDelayScheduler = 203 | DoublingDelayScheduler::new(Duration::from_millis(25)); 204 | let future = tokio::time::timeout(Duration::from_millis(30), scheduler.next_task()); 205 | let result = future.await; 206 | assert!(result.is_err()); // elapsed 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /run_local_pipeline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cargo clippy --all-targets --all-features -- -D warnings 6 | cargo +nightly fmt --all 7 | cargo test --lib -- --skip medium 8 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.85.1" 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | imports_granularity = "Crate" 2 | use_field_init_shorthand = true 3 | reorder_imports = true 4 | reorder_modules = true 5 | edition = "2018" 6 | -------------------------------------------------------------------------------- /types/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aleph-bft-types" 3 | version = "0.16.0" 4 | edition = "2021" 5 | authors = ["Cardinal Cryptography"] 6 | documentation = "https://docs.rs/?" 7 | homepage = "https://alephzero.org" 8 | license = "Apache-2.0" 9 | repository = "https://github.com/Cardinal-Cryptography/AlephBFT" 10 | readme = "./README.md" 11 | description = "Traits that need to be implemented by the user of the aleph-bft package." 12 | 13 | [dependencies] 14 | aleph-bft-crypto = { path = "../crypto", version = "0.11" } 15 | async-trait = "0.1" 16 | codec = { package = "parity-scale-codec", version = "3.0", default-features = false, features = ["derive"] } 17 | futures = "0.3" 18 | -------------------------------------------------------------------------------- /types/README.md: -------------------------------------------------------------------------------- 1 | [![Crate][crate-image]][crate-link] 2 | [![Docs][docs-image]][docs-link] 3 | [![Apache 2.0 Licensed][license-image]][license-link] 4 | 5 | ### Overview 6 | 7 | This package is a part of the AlephBFT toolset. For more information, see the README 8 | in the top-level directory. 9 | 10 | Contains traits that need to be implemented by the user. 11 | 12 | [crate-image]: https://img.shields.io/crates/v/aleph-bft-types.svg 13 | [crate-link]: https://crates.io/crates/aleph-bft-types 14 | [docs-image]: https://docs.rs/aleph-bft-types/badge.svg 15 | [docs-link]: https://docs.rs/aleph-bft-types 16 | [license-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg 17 | [license-link]: https://github.com/Cardinal-Cryptography/AlephBFT/blob/main/LICENSE 18 | -------------------------------------------------------------------------------- /types/src/dataio.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | use crate::{Data, Hasher, NodeIndex, Round}; 4 | 5 | /// The source of data items that consensus should order. 6 | /// 7 | /// AlephBFT internally calls [`DataProvider::get_data`] whenever a new unit is created and data 8 | /// needs to be placed inside. 9 | /// 10 | /// We refer to the documentation 11 | /// https://cardinal-cryptography.github.io/AlephBFT/aleph_bft_api.html for a discussion and 12 | /// examples of how this trait can be implemented. 13 | #[async_trait] 14 | pub trait DataProvider: Sync + Send + 'static { 15 | /// Type of data returned by this provider. 16 | type Output: Data; 17 | /// Outputs a new data item to be ordered. 18 | async fn get_data(&mut self) -> Option; 19 | } 20 | 21 | /// The source of finalization of the units that consensus produces. 22 | /// 23 | /// The [`FinalizationHandler::data_finalized`] method is called whenever a piece of data input 24 | /// to the algorithm using [`DataProvider::get_data`] has been finalized, in order of finalization. 25 | pub trait FinalizationHandler: Sync + Send + 'static { 26 | /// Data, provided by [DataProvider::get_data], has been finalized. 27 | /// The calls to this function follow the order of finalization. 28 | fn data_finalized(&mut self, data: D); 29 | } 30 | 31 | /// Represents state of the main internal data structure of AlephBFT (i.e. direct acyclic graph) used for 32 | /// achieving consensus. 33 | /// 34 | /// Instances of this type are returned indirectly by [`member::run_session`] method using the 35 | /// [`UnitFinalizationHandler`] trait. This way it allows to reconstruct the DAG's structure used by AlephBFT, 36 | /// which can be then used for example for the purpose of node's performance evaluation. 37 | pub struct OrderedUnit { 38 | pub data: Option, 39 | pub parents: Vec, 40 | pub hash: H::Hash, 41 | pub creator: NodeIndex, 42 | pub round: Round, 43 | } 44 | 45 | /// The source of finalization of the units that consensus produces. 46 | /// 47 | /// The [`UnitFinalizationHandler::batch_finalized`] method is called whenever a batch of units 48 | /// has been finalized, in order of finalization. 49 | pub trait UnitFinalizationHandler: Sync + Send + 'static { 50 | type Data: Data; 51 | type Hasher: Hasher; 52 | 53 | /// A batch of units, that contains data provided by [DataProvider::get_data], has been finalized. 54 | /// The calls to this function follow the order of finalization. 55 | fn batch_finalized(&mut self, batch: Vec>); 56 | } 57 | -------------------------------------------------------------------------------- /types/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Traits that need to be implemented by the user. 2 | 3 | mod dataio; 4 | mod network; 5 | mod tasks; 6 | 7 | pub use aleph_bft_crypto::{ 8 | IncompleteMultisignatureError, Index, Indexed, Keychain, MultiKeychain, Multisigned, NodeCount, 9 | NodeIndex, NodeMap, NodeSubset, PartialMultisignature, PartiallyMultisigned, Signable, 10 | Signature, SignatureError, SignatureSet, Signed, UncheckedSigned, 11 | }; 12 | pub use dataio::{DataProvider, FinalizationHandler, OrderedUnit, UnitFinalizationHandler}; 13 | pub use network::{Network, Recipient}; 14 | pub use tasks::{SpawnHandle, TaskHandle}; 15 | 16 | use codec::Codec; 17 | use std::{fmt::Debug, hash::Hash as StdHash}; 18 | 19 | /// Data type that we want to order. 20 | pub trait Data: Eq + Clone + Send + Sync + Debug + StdHash + Codec + 'static {} 21 | 22 | impl Data for T where T: Eq + Clone + Send + Sync + Debug + StdHash + Codec + 'static {} 23 | 24 | /// A hasher, used for creating identifiers for blocks or units. 25 | pub trait Hasher: Eq + Clone + Send + Sync + Debug + 'static { 26 | /// A hash, as an identifier for a block or unit. 27 | type Hash: AsRef<[u8]> + Eq + Ord + Copy + Clone + Send + Sync + Debug + StdHash + Codec; 28 | 29 | fn hash(s: &[u8]) -> Self::Hash; 30 | } 31 | 32 | /// The number of a session for which the consensus is run. 33 | pub type SessionId = u64; 34 | 35 | /// An asynchronous round of the protocol. 36 | pub type Round = u16; 37 | -------------------------------------------------------------------------------- /types/src/network.rs: -------------------------------------------------------------------------------- 1 | use crate::NodeIndex; 2 | 3 | use codec::{Decode, Encode}; 4 | 5 | /// A recipient of a message, either a specific node or everyone. 6 | #[derive(Clone, Eq, PartialEq, Hash, Debug, Decode, Encode)] 7 | pub enum Recipient { 8 | Everyone, 9 | Node(NodeIndex), 10 | } 11 | 12 | /// Network represents an interface for sending and receiving NetworkData. 13 | /// 14 | /// Note on Rate Control: it is assumed that Network implements a rate control mechanism guaranteeing 15 | /// that no node is allowed to spam messages without limits. We do not specify details yet, but in 16 | /// future releases we plan to publish recommended upper bounds for the amounts of bandwidth and 17 | /// number of messages allowed per node per a unit of time. These bounds must be carefully crafted 18 | /// based upon the number of nodes N and the configured delays between subsequent Dag rounds, so 19 | /// that at the same time spammers are cut off but honest nodes are able function correctly within 20 | /// these bounds. 21 | /// 22 | /// Note on Network Reliability: it is not assumed that each message that AlephBFT orders to send 23 | /// reaches its intended recipient, there are some built-in reliability mechanisms within AlephBFT 24 | /// that will automatically detect certain failures and resend messages as needed. Clearly, the less 25 | /// reliable the network is, the worse the performance of AlephBFT will be (generally slower to 26 | /// produce output). Also, not surprisingly if the percentage of dropped messages is too high 27 | /// AlephBFT might stop making progress, but from what we observe in tests, this happens only when 28 | /// the reliability is extremely bad, i.e., drops below 50% (which means there is some significant 29 | /// issue with the network). 30 | /// 31 | /// We refer to the documentation https://cardinal-cryptography.github.io/AlephBFT/aleph_bft_api.html 32 | /// Section 3.1.2 for a discussion of the required guarantees of this trait's implementation. 33 | #[async_trait::async_trait] 34 | pub trait Network: Send + 'static { 35 | /// Send a message to a single node or everyone, depending on the value of the recipient 36 | /// argument. 37 | /// 38 | /// Note on the implementation: this function should be implemented in a non-blocking manner. 39 | /// Otherwise, the performance might be affected negatively or the execution may end up in a deadlock. 40 | fn send(&self, data: D, recipient: Recipient); 41 | /// Receive a message from the network. 42 | async fn next_event(&mut self) -> Option; 43 | } 44 | -------------------------------------------------------------------------------- /types/src/tasks.rs: -------------------------------------------------------------------------------- 1 | use futures::Future; 2 | use std::pin::Pin; 3 | 4 | /// A handle for waiting the task's completion. 5 | pub type TaskHandle = Pin> + Send>>; 6 | 7 | /// An abstraction for an execution engine for Rust's asynchronous tasks. 8 | pub trait SpawnHandle: Clone + Send + 'static { 9 | /// Run a new task. 10 | fn spawn(&self, name: &'static str, task: impl Future + Send + 'static); 11 | /// Run a new task and returns a handle to it. If there is some error or panic during 12 | /// execution of the task, the handle should return an error. 13 | fn spawn_essential( 14 | &self, 15 | name: &'static str, 16 | task: impl Future + Send + 'static, 17 | ) -> TaskHandle; 18 | } 19 | --------------------------------------------------------------------------------