├── .cargo └── config.toml ├── .devcontainer └── devcontainer.json ├── .editorconfig ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── custom.md │ ├── feature_request.md │ └── future_task.md ├── release-draft.md └── workflows │ ├── checks-and-tests.yaml │ ├── release.yaml │ └── static-analysis.yaml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── client └── consensus │ └── manual-seal │ ├── Cargo.toml │ ├── README.md │ └── src │ ├── consensus.rs │ ├── consensus │ ├── aura.rs │ ├── babe.rs │ └── timestamp.rs │ ├── error.rs │ ├── finalize_block.rs │ ├── lib.rs │ ├── rpc.rs │ └── seal_block.rs ├── docker-compose.yml ├── docs └── rust-setup.md ├── frame └── balances │ ├── Cargo.toml │ ├── README.md │ ├── rpc │ ├── Cargo.toml │ ├── runtime-api │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ └── src │ │ └── lib.rs │ └── src │ ├── benchmarking.rs │ ├── impl_currency.rs │ ├── impl_fungible.rs │ ├── lib.rs │ ├── migration.rs │ ├── tests │ ├── currency_tests.rs │ ├── dispatchable_tests.rs │ ├── fungible_conformance_tests.rs │ ├── fungible_tests.rs │ ├── mod.rs │ └── reentrancy_tests.rs │ ├── types.rs │ └── weights.rs ├── node ├── Cargo.toml ├── build.rs └── src │ ├── chain_spec.rs │ ├── cli.rs │ ├── command.rs │ ├── lib.rs │ ├── main.rs │ ├── rpc.rs │ └── service.rs ├── runtime ├── Cargo.toml ├── build.rs └── src │ ├── chain_extensions.rs │ └── lib.rs ├── rust-toolchain.toml ├── rustfmt.toml ├── scripts ├── docker_run.sh └── init.sh └── taplo.toml /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | # https://github.com/rust-lang/rust-clippy#clippy 2 | # 3 | # An auto defined `clippy` feature was introduced, 4 | # but it was found to clash with user defined features, 5 | # so was renamed to `cargo-clippy`. 6 | # 7 | # If you want standard clippy run: 8 | # RUSTFLAGS= cargo clippy 9 | [target.'cfg(feature = "cargo-clippy")'] 10 | rustflags = [ 11 | # Substrate config 12 | # https://github.com/paritytech/substrate/blob/master/.cargo/config.toml 13 | "-Aclippy::all", 14 | "-Dclippy::correctness", 15 | "-Aclippy::if-same-then-else", 16 | "-Aclippy::clone-double-ref", 17 | "-Dclippy::complexity", 18 | "-Aclippy::clone_on_copy", # Too common 19 | "-Aclippy::needless_lifetimes", # Backward compat? 20 | "-Aclippy::zero-prefixed-literal", # 00_1000_000 21 | "-Aclippy::type_complexity", # raison d'etre 22 | "-Aclippy::nonminimal-bool", # maybe 23 | "-Aclippy::borrowed-box", # Reasonable to fix this one 24 | "-Aclippy::too-many-arguments", # (Turning this on would lead to) 25 | "-Aclippy::unnecessary_cast", # Types may change 26 | "-Aclippy::identity-op", # One case where we do 0 + 27 | "-Aclippy::useless_conversion", # Types may change 28 | "-Aclippy::unit_arg", # styalistic. 29 | "-Aclippy::option-map-unit-fn", # styalistic 30 | "-Aclippy::bind_instead_of_map", # styalistic 31 | "-Aclippy::erasing_op", # E.g. 0 * DOLLARS 32 | "-Aclippy::eq_op", # In tests we test equality. 33 | "-Aclippy::while_immutable_condition", # false positives 34 | "-Aclippy::needless_option_as_deref", # false positives 35 | "-Aclippy::derivable_impls", # false positives 36 | # Custom config 37 | "-Aclippy::boxed_local", 38 | "-Aclippy::double_must_use", 39 | "-Aclippy::unused_unit", 40 | ] 41 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Substrate Node template", 3 | "context": "..", 4 | "settings": { 5 | "terminal.integrated.shell.linux": "/bin/bash", 6 | "lldb.executable": "/usr/bin/lldb" 7 | }, 8 | "extensions": [ 9 | "rust-lang.rust", 10 | "bungcip.better-toml", 11 | "vadimcn.vscode-lldb" 12 | ], 13 | "forwardPorts": [ 14 | 3000, 15 | 9944 16 | ], 17 | "onCreateCommand": ["cargo build", "cargo check"], 18 | "postStartCommand": "./target/debug/swanky-node --dev --ws-external", 19 | "menuActions": [ 20 | {"id": "polkadotjs", 21 | "label": "Open PolkadotJS Apps", 22 | "type": "external-preview", 23 | "args": ["https://polkadot.js.org/apps/?rpc=wss%3A%2F%2F/$HOST/wss"]} 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style=space 5 | indent_size=2 6 | tab_width=2 7 | end_of_line=lf 8 | charset=utf-8 9 | trim_trailing_whitespace=true 10 | insert_final_newline = true 11 | 12 | [*.{rs,toml}] 13 | indent_style=tab 14 | indent_size=tab 15 | tab_width=4 16 | max_line_length=100 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Report a Bug 3 | about: Report a problem with this project. 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | --- 8 | 9 | **Description** 10 | 11 | > Tell us what happened. In particular, tell us how and why you are using this project, and describe the bug that you encountered. Please note that we are not able to support all conceivable use cases, but the more information you are able to provide the more equipped we will be to help. 12 | 13 | **Steps to Reproduce** 14 | 15 | > Replace the example steps below with actual steps to reproduce the bug you're reporting. 16 | 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | **Expected vs. Actual Behavior** 23 | 24 | > What did you expect to happen after you followed the steps you described in the last section? What actually happened? 25 | 26 | **Environment** 27 | 28 | > Describe the environment in which you encountered this bug. Use the list below as a starting point and add additional information if you think it's relevant. 29 | 30 | - Operating system: 31 | - Project version/tag: 32 | - Rust version (run `rustup show`): 33 | 34 | **Logs, Errors or Screenshots** 35 | 36 | > Please provide the text of any logs or errors that you experienced; if applicable, provide screenshots to help illustrate the problem. 37 | 38 | **Additional Information** 39 | 40 | > Please add any other details that you think may help us solve your problem. 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | # Description 2 | {write description of what the feature should be} 3 | 4 | ## Required Tasks 5 | - [ ] {task 1} 6 | - [ ] {task 2} 7 | - [ ] ... 8 | 9 | ## Expired(Estimating). 10 | {When finished this task?} 11 | 12 | ## Dependencies 13 | {Dependencies issue or PR.} 14 | 15 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/future_task.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Future task issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | # Description 11 | {write description of what the feature should be} 12 | 13 | ## Required Tasks 14 | - [ ] {task 1} 15 | - [ ] {task 2} 16 | - [ ] ... 17 | 18 | ## Estimated done. 19 | {When finished this task?} 20 | 21 | ## Dependencies 22 | {Dependencies issue or PR.} 23 | 24 | -------------------------------------------------------------------------------- /.github/release-draft.md: -------------------------------------------------------------------------------- 1 | Changelog 2 | --------------- 3 | **Client** 4 | 5 | **Runtimes** 6 | 7 | | Arch | Link | 8 | |-------------------|--------------| 9 | | `MacOS universal` | [Download]() | 10 | | `Ubuntu amd64` | [Download]() | 11 | | `Ubuntu arm64` | [Download]() | 12 | -------------------------------------------------------------------------------- /.github/workflows/checks-and-tests.yaml: -------------------------------------------------------------------------------- 1 | name: Checks and Tests 2 | on: 3 | pull_request: 4 | types: [opened, reopened, synchronize, ready_for_review] 5 | workflow_dispatch: 6 | jobs: 7 | compile-check: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout the source code 11 | uses: actions/checkout@v3 12 | 13 | - name: Install Protoc 14 | uses: arduino/setup-protoc@v1 15 | with: 16 | version: '3.x' 17 | 18 | - name: Install & display rust toolchain 19 | run: rustup show 20 | 21 | - name: Check targets are installed correctly 22 | run: rustup target list --installed 23 | 24 | - name: Check all features compilation 25 | run: cargo check --verbose 26 | 27 | unit-test: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: Checkout the source code 31 | uses: actions/checkout@v3 32 | 33 | - name: Install Protoc 34 | run: sudo apt -y install protobuf-compiler 35 | 36 | - name: Install & display rust toolchain 37 | run: rustup show 38 | 39 | - name: Check targets are installed correctly 40 | run: rustup target list --installed 41 | 42 | - name: Run all tests 43 | run: cargo test --verbose 44 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release Build 2 | on: 3 | push: 4 | tags: 5 | - v[0-9]+.[0-9]+.[0-9]+* 6 | workflow_dispatch: 7 | env: 8 | SUBWASM_VERSION: 0.16.1 9 | jobs: 10 | checks-and-tests: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout the source code 14 | uses: actions/checkout@v3 15 | with: 16 | submodules: true 17 | 18 | - name: Install Protoc 19 | run: sudo apt -y install protobuf-compiler 20 | 21 | - name: Install & display rust toolchain 22 | run: rustup show 23 | 24 | - name: Check targets are installed correctly 25 | run: rustup target list --installed 26 | 27 | - name: Check all features compilation 28 | run: cargo check --verbose 29 | 30 | - name: Run all tests 31 | run: cargo test --verbose 32 | 33 | native-linux: 34 | needs: checks-and-tests 35 | runs-on: ubuntu-latest 36 | steps: 37 | - name: Checkout the source code 38 | uses: actions/checkout@v3 39 | with: 40 | submodules: true 41 | 42 | - name: Install Protoc 43 | run: sudo apt -y install protobuf-compiler 44 | 45 | - name: Install & display rust toolchain 46 | run: | 47 | rustup target add aarch64-unknown-linux-gnu 48 | rustup show 49 | 50 | - name: Check targets are installed correctly 51 | run: rustup target list --installed 52 | 53 | - name: Install zig 54 | uses: korandoru/setup-zig@v1 55 | with: 56 | zig-version: "master" 57 | 58 | - name: Install zigbuild 59 | run: cargo install cargo-zigbuild 60 | 61 | - name: Build optimized binary 62 | run: | 63 | cargo build --release --target x86_64-unknown-linux-gnu 64 | cargo zigbuild --release --target aarch64-unknown-linux-gnu 65 | 66 | - uses: actions/upload-artifact@master 67 | with: 68 | name: swanky-node-ubuntu-latest-x86_64 69 | path: target/x86_64-unknown-linux-gnu/release/swanky-node 70 | 71 | - uses: actions/upload-artifact@master 72 | with: 73 | name: swanky-node-ubuntu-latest-aarch64 74 | path: target/aarch64-unknown-linux-gnu/release/swanky-node 75 | 76 | native-macos: 77 | needs: checks-and-tests 78 | runs-on: macos-latest 79 | steps: 80 | - name: Checkout the source code 81 | uses: actions/checkout@v3 82 | with: 83 | submodules: true 84 | 85 | - name: Install Protoc 86 | run: brew install protobuf 87 | 88 | - name: Install & display rust toolchain 89 | run: | 90 | rustup target add aarch64-apple-darwin 91 | rustup show 92 | 93 | - name: Check targets are installed correctly 94 | run: rustup target list --installed 95 | 96 | - name: Build optimized binary 97 | run: | 98 | cargo build --release --target x86_64-apple-darwin 99 | cargo build --release --target aarch64-apple-darwin 100 | mkdir -p ./artifacts/swanky-node-mac/ 101 | lipo ./target/x86_64-apple-darwin/release/swanky-node ./target/aarch64-apple-darwin/release/swanky-node -create -output ./artifacts/swanky-node-mac/swanky-node 102 | 103 | - uses: actions/upload-artifact@master 104 | with: 105 | name: swanky-node-macOS-latest-universal 106 | path: artifacts/swanky-node-mac/swanky-node 107 | 108 | publish-release-draft: 109 | needs: [native-linux, native-macOS] 110 | runs-on: ubuntu-latest 111 | outputs: 112 | release_url: ${{ steps.create-release.outputs.html_url }} 113 | upload_url: ${{ steps.create-release.outputs.upload_url }} 114 | steps: 115 | - uses: actions/checkout@v3 116 | with: 117 | fetch-depth: 0 118 | 119 | - name: Create Release Draft 120 | id: create-release 121 | uses: actions/create-release@v1 122 | env: 123 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 124 | with: 125 | tag_name: ${{ github.ref_name }} 126 | release_name: ${{ github.ref_name }} 127 | body_path: .github/release-draft.md 128 | draft: true 129 | 130 | upload-binaries: 131 | needs: publish-release-draft 132 | runs-on: ubuntu-latest 133 | strategy: 134 | matrix: 135 | os: [ubuntu] 136 | arch: [aarch64, x86_64] 137 | include: 138 | - os: macOS 139 | arch: universal 140 | steps: 141 | - name: Create download folder 142 | run: | 143 | mkdir -p ${{ matrix.os }}-${{ matrix.arch }}-bin 144 | 145 | - name: Download pre-built collator binary 146 | uses: actions/download-artifact@v3 147 | with: 148 | name: swanky-node-${{ matrix.os }}-latest-${{ matrix.arch }} 149 | path: ${{ matrix.os }}-${{ matrix.arch }}-bin 150 | 151 | - name: Make binary executable and tar gzip 152 | run: | 153 | cd ${{ matrix.os }}-${{ matrix.arch }}-bin 154 | chmod +x swanky-node 155 | tar zcvf swanky-node.tar.gz swanky-node 156 | 157 | - name: Upload binary artifact 158 | uses: actions/upload-release-asset@v1 159 | env: 160 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 161 | with: 162 | upload_url: ${{ needs.publish-release-draft.outputs.upload_url }} 163 | asset_path: ${{ matrix.os }}-${{ matrix.arch }}-bin/swanky-node.tar.gz 164 | asset_name: swanky-node-${{ github.ref_name }}-${{ matrix.os }}-${{ matrix.arch }}.tar.gz 165 | asset_content_type: application/gzip 166 | -------------------------------------------------------------------------------- /.github/workflows/static-analysis.yaml: -------------------------------------------------------------------------------- 1 | name: Static Analysis 2 | on: 3 | pull_request: 4 | types: [opened, reopened, synchronize, ready_for_review] 5 | workflow_dispatch: 6 | jobs: 7 | fmt: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout the source code 11 | uses: actions/checkout@v3 12 | 13 | - name: Install & display rust toolchain 14 | run: rustup show 15 | 16 | - name: Install Protoc 17 | uses: arduino/setup-protoc@v1 18 | with: 19 | version: '3.x' 20 | 21 | - name: Check targets are installed correctly 22 | run: rustup target list --installed 23 | 24 | - name: Check fmt 25 | run: cargo fmt -- --check 26 | 27 | - name: Check Cargo.toml format 28 | run: | 29 | if taplo --version &> /dev/null; then 30 | echo "taplo-cli is already installed" 31 | else 32 | cargo install taplo-cli 33 | fi 34 | taplo fmt --check 35 | 36 | clippy: 37 | runs-on: ubuntu-latest 38 | steps: 39 | - name: Checkout the source code 40 | uses: actions/checkout@v3 41 | 42 | - name: Install Protoc 43 | uses: arduino/setup-protoc@v1 44 | with: 45 | version: '3.x' 46 | 47 | - name: Install & display rust toolchain 48 | run: rustup show 49 | 50 | - name: Check targets are installed correctly 51 | run: rustup target list --installed 52 | 53 | - uses: actions-rs/clippy-check@v1 54 | with: 55 | token: ${{ secrets.GITHUB_TOKEN }} 56 | args: -- -D warnings 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | **/target/ 4 | # These are backup files generated by rustfmt 5 | **/*.rs.bk 6 | 7 | .DS_Store 8 | 9 | # The cache for chain data in container 10 | .local 11 | 12 | # direnv cache 13 | .direnv 14 | 15 | .vscode 16 | vendor/ 17 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "node", 4 | "runtime", 5 | "frame/balances", 6 | "frame/balances/rpc", 7 | "frame/balances/rpc/runtime-api", 8 | "client/consensus/manual-seal", 9 | ] 10 | resolver = "2" 11 | exclude = [ 12 | "contracts", 13 | ] 14 | 15 | [profile.release] 16 | panic = "unwind" 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Swanky Node :sunglasses: 2 | 3 | Swanky node is a Substrate based blockchain configured to enable `pallet-contracts` (a smart contract module) and more features to help WASM smart contract development locally. 4 | 5 | ## Features 6 | - [pallet-contracts](https://github.com/paritytech/substrate/tree/master/frame/contracts) (polkadot-0.9.39). 7 | - `grandpa` & `aura` consensus were removed. Instead, [`instant-seal`/`manual-seal`](https://github.com/AstarNetwork/swanky-node#consensus-manual-seal--instant-seal) & [`delayed-finalize`](https://github.com/AstarNetwork/swanky-node#consensus-delayed-finalize) are used. 8 | Blocks are sealed (1) as soon as a transaction get in the pool (2) when `engine_createBlock` RPC called. Blocks are finalized configured delay sec after blocks are sealed. 9 | - Users' account Balance manipulation 10 | - Block height manipulation. Developers can forward and revert blocks via RPC. 11 | - [pallet-dapps-staking](https://github.com/AstarNetwork/Astar/tree/v5.15.0/pallets/dapps-staking) and ChainExtension to interact with it. 12 | - [pallet-assets](https://github.com/paritytech/substrate/tree/polkadot-v0.9.43/frame/assets). 13 | - Pallet-assets chain-extension 14 | - dApps-staking chain-extension 15 | 16 | Swanky Node is optimized for local development, while removing unnecessary components such as P2P. Additional features and pallets, such as to interact between (Contract <-> Runtime), will be added in the future. 17 | 18 | ## Compatible ink! version 19 | 20 | Any ink! version from `v4.0.0` or `higher` is supported by pallet-contract polkadot-0.9.43 branch. 21 | 22 | ## Installation 23 | ### Download Binary 24 | The easiest method of installation is by downloading and executing a precompiled binary from the [Release Page](https://github.com/AstarNetwork/swanky-node/releases) 25 | 26 | ### Build Locally 27 | If you would like to build the source locally, you should first complete the [basic Rust setup instructions](https://github.com/AstarNetwork/swanky-node/blob/main/docs/rust-setup.md). 28 | Once Rust is installed and configured, you will be able to build the node with: 29 | ```bash 30 | cargo build --release 31 | ``` 32 | 33 | ### Embedded Docs :book: 34 | 35 | Once the project has been built, the following command can be used to explore all parameters and 36 | subcommands: 37 | 38 | ```bash 39 | ./target/release/swanky-node -h 40 | ``` 41 | 42 | ## Usage 43 | This command will start the single-node development chain with a persistent state. 44 | ```bash 45 | ./target/release/swanky-node 46 | ``` 47 | If you would prefer to run the node in non-persistent mode, use tmp option. 48 | ``` 49 | ./target/release/swanky-node --tmp 50 | # or 51 | ./target/release/swanky-node --dev 52 | ``` 53 | 54 | Purge the development chain's state. 55 | ```bash 56 | ./target/release/swanky-node purge-chain 57 | ``` 58 | 59 | > The **alice** development account will be the authority and sudo account as declared in the 60 | > [genesis state](https://github.com/AstarNetwork/swanky-node/blob/main/node/src/chain_spec.rs#L44). 61 | > While at the same time, the following accounts will be pre-funded: 62 | > 63 | > - Alice 64 | > - Bob 65 | > - Charlie 66 | > - Dave 67 | > - Eve 68 | > - Ferdie 69 | > - Alice//stash 70 | > - Bob//stash 71 | > - Charlie//stash 72 | > - Dave//stash 73 | > - Eve//stash 74 | > - Ferdie//stash 75 | 76 | ### Show only Errors and Contract Debug Output 77 | To print errors and contract debug output to the console log, supply `-lerror,runtime::contracts=debug` when starting the node. 78 | ``` 79 | -lerror,runtime::contracts=debug 80 | ``` 81 | 82 | Important: Debug output is only printed for RPC calls or off-chain tests ‒ not for transactions. 83 | 84 | See the ink! [FAQ](https://ink.substrate.io/faq/#how-do-i-print-something-to-the-console-from-the-runtime) for more details: How do I print something to the console from the runtime?. 85 | 86 | ### Connect with Polkadot-JS Apps Front-end 87 | 88 | Once the Swanky Node is running locally, you will be able to connect to it from the **Polkadot-JS Apps** front-end, 89 | in order to interact with your chain. [Click 90 | here](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) connecting the Apps to your 91 | local Swanky Node. 92 | 93 | ### Run in Docker 94 | 95 | First, install [Docker](https://docs.docker.com/get-docker/) and 96 | [Docker Compose](https://docs.docker.com/compose/install/). 97 | 98 | Then run the following command to start a single node development chain. 99 | 100 | ```bash 101 | mkdir .local # this is mounted by container 102 | ./scripts/docker_run.sh 103 | ``` 104 | 105 | This command will compile the code, and then start a local development network. You can 106 | also replace the default command 107 | (`cargo build --release && ./target/release/swanky-node --dev --ws-external`) 108 | by appending your own. A few useful commands are shown below: 109 | 110 | ```bash 111 | # Run Substrate node without re-compiling 112 | ./scripts/docker_run.sh ./target/release/swanky-node --ws-external 113 | 114 | # Purge the local dev chain 115 | ./scripts/docker_run.sh ./target/release/swanky-node purge-chain 116 | 117 | # Check whether the code is compilable 118 | ./scripts/docker_run.sh cargo check 119 | ``` 120 | 121 | ## Consensus (Manual Seal & Instant Seal) 122 | Unlike other blockchains, Swanky Node adopts block authoring and finality gadgets referred to as Manual Seal and Instant Seal, consensus mechanisms suitable for contract development and testing. 123 | 124 | Manual seal - Blocks are authored whenever RPC is called. 125 | Instant seal - Blocks are authored as soon as transactions enter the pool, most often resulting in one transaction per block. 126 | 127 | Swanky Node enables both Manual seal and Instant seal. 128 | 129 | ### Manual Sealing via RPC call 130 | We can tell the node to author a block by calling the `engine_createBlock` RPC. 131 | 132 | ```bash 133 | $ curl http://127.0.0.1:9944 -H "Content-Type:application/json;charset=utf-8" -d '{ 134 | "jsonrpc":"2.0", 135 | "id":1, 136 | "method":"engine_createBlock", 137 | "params": [true, false, null] 138 | }' 139 | ``` 140 | 141 | #### Params 142 | - **Create Empty** 143 | `create_empty` is a Boolean value indicating whether empty blocks may be created. Setting `create-empty` to true does not mean that an empty block will necessarily be created. Rather, it means that the engine should go ahead creating a block even if no transactions are present. If transactions are present in the queue, they will be included regardless of the value of `create_empty`. 144 | 145 | - **Finalize** 146 | `finalize` is a Boolean value indicating whether the block (and its ancestors, recursively) should be finalized after creation. 147 | 148 | - **Parent Hash** 149 | `parent_hash` is an optional hash of a block to use as a parent. To set the parent, use the format `"0x0e0626477621754200486f323e3858cd5f28fcbe52c69b2581aecb622e384764"`. To omit the parent, use `null`. When the parent is omitted the block will be built on the current best block. Manually specifying the parent is useful for constructing fork scenarios, and demonstrating chain reorganizations. 150 | 151 | ### Finalizing Blocks Manually 152 | In addition to finalizing blocks at the time of creating them, they may also be finalized later by using the RPC call `engine_finalizeBlock`. 153 | 154 | ```bash 155 | $ curl http://127.0.0.1:9944 -H "Content-Type:application/json;charset=utf-8" -d '{ 156 | "jsonrpc":"2.0", 157 | "id":1, 158 | "method":"engine_finalizeBlock", 159 | "params": ["0x0e0626477621754200486f323e3858cd5f28fcbe52c69b2581aecb622e384764", null] 160 | }' 161 | ``` 162 | 163 | ## Consensus (Delayed Finalize) 164 | By default, either manual or instant seal does not result in block finalization unless the `engine_finalizeBlock` RPC is executed. However, it is possible to configure the finalization of sealed blocks to occur after a certain amount of time by setting the `--finalize-delay-sec` option to a specific value, which specifies the number of seconds to delay before finalizing the blocks. 165 | 166 | ```bash 167 | ./target/release/swanky-node --finalize-delay-sec 5 168 | ``` 169 | 170 | In the above example, a setting of `5` seconds would result in the blocks being finalized five seconds after being sealed. In contrast, setting the value to `0` would lead to instant finalization, with the blocks being finalized immediately upon being sealed. 171 | 172 | ## Block height manipulation 173 | Developers can forward blocks and revert blocks to requested block heights. 174 | 175 | ### Forward blocks via RPC 176 | Forwarding blocks to requested block height by calling `engine_forwardBlocksTo`. 177 | 178 | ```bash 179 | $ curl http://127.0.0.1:9944 -H "Content-Type:application/json;charset=utf-8" -d '{ 180 | "jsonrpc":"2.0", 181 | "id":1, 182 | "method":"engine_forwardBlocksTo", 183 | "params": [120] 184 | }' 185 | ``` 186 | 187 | #### Params 188 | - **Height** 189 | `height` denotes an integral value that signifies the desired block height towards which the user intends to progress. If the value is lower than current height, RPC returns an error. 190 | 191 | ### Revert blocks via RPC 192 | Reverting blocks to requested block height by calling `engine_revertBlocksTo`. 193 | 194 | Note that reverting finalized blocks only works when node is launched with archive mode `--state-pruning archive` (or `--pruning archive`) since reverting blocks requires past blocks' states. 195 | When blocks' states are pruned, **RPC won't revert finalized blocks**. 196 | 197 | ```bash 198 | $ curl http://127.0.0.1:9944 -H "Content-Type:application/json;charset=utf-8" -d '{ 199 | "jsonrpc":"2.0", 200 | "id":1, 201 | "method":"engine_revertBlocksTo", 202 | "params": [50] 203 | }' 204 | ``` 205 | 206 | #### Params 207 | - **Height** 208 | `height` denotes an integral value that represents the desired block height which the user intends to revert to. If the value is higher than current height, RPC returns an error. 209 | 210 | ## Account Balance manipulation 211 | For local development purpose, developers can manipulate any users' account balance via RPC without requiring their accounts' signatures and transaction cost to pay. 212 | 213 | ### Get Account Balance 214 | Getting users' account balance by `balance_getAccount` method. 215 | ```bash 216 | curl http://localhost:9933 -H "Content-Type:application/json;charset=utf-8" -d '{ 217 | "jsonrpc":"2.0", 218 | "id":1, 219 | "method":"balance_getAccount", 220 | "params": ["5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", null] 221 | }' 222 | ``` 223 | 224 | #### Params 225 | - **Account ID** 226 | `account_id` is AccountID whose balance information you would like to check. 227 | 228 | ### Set Free Balance 229 | Free balance is amount of unreserved token owner can freely spend. `balance_setFreeBalance` alters the amount of free token a specified account has. 230 | ```bash 231 | curl http://localhost:9933 -H "Content-Type:application/json;charset=utf-8" -d '{ 232 | "jsonrpc":"2.0", 233 | "id":1, 234 | "method":"balance_setFreeBalance", 235 | "params": ["5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", 120000000000000000000, null] 236 | }' 237 | ``` 238 | 239 | #### Params 240 | - **Account ID** 241 | `account_id` is `AccountID` whose balance you would like to modify. 242 | 243 | - **Free Balance** 244 | `free_balance` is new Balance value you would like to set to accounts. 245 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sc-consensus-manual-seal" 3 | version = "1.7.0" 4 | authors = ["Astar Network"] 5 | description = "Manual sealing engine for Substrate" 6 | edition = "2021" 7 | license = "Unlicense" 8 | homepage = "https://astar.network" 9 | repository = "https://github.com/shunsukew/swanky-node/" 10 | readme = "README.md" 11 | 12 | [package.metadata.docs.rs] 13 | targets = ["x86_64-unknown-linux-gnu"] 14 | 15 | [dependencies] 16 | assert_matches = "1.3.0" 17 | async-trait = "0.1.57" 18 | codec = { package = "parity-scale-codec", version = "3.2.2" } 19 | futures = "0.3.21" 20 | futures-timer = "3.0.1" 21 | jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } 22 | log = "0.4.17" 23 | sc-client-api = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 24 | sc-consensus = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 25 | sc-consensus-aura = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 26 | sc-consensus-babe = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 27 | sc-consensus-epochs = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 28 | sc-service = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 29 | sc-transaction-pool = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 30 | sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 31 | serde = { version = "1.0", features = ["derive"] } 32 | sp-api = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 33 | sp-blockchain = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 34 | sp-consensus = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 35 | sp-consensus-aura = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 36 | sp-consensus-babe = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 37 | sp-consensus-slots = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 38 | sp-core = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 39 | sp-inherents = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 40 | sp-keystore = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 41 | sp-runtime = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 42 | sp-timestamp = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 43 | substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 44 | thiserror = "1.0" 45 | 46 | [dev-dependencies] 47 | sc-basic-authorship = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 48 | substrate-test-runtime-client = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 49 | substrate-test-runtime-transaction-pool = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 50 | tokio = { version = "1.22.0", features = ["rt-multi-thread", "macros"] } 51 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/README.md: -------------------------------------------------------------------------------- 1 | A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. 2 | This is suitable for a testing environment. 3 | 4 | License: GPL-3.0-or-later WITH Classpath-exception-2.0 -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/consensus.rs: -------------------------------------------------------------------------------- 1 | //! Extensions for manual seal to produce blocks valid for any runtime. 2 | use super::Error; 3 | 4 | use sc_consensus::BlockImportParams; 5 | use sp_inherents::InherentData; 6 | use sp_runtime::{traits::Block as BlockT, Digest}; 7 | 8 | pub mod aura; 9 | pub mod babe; 10 | pub mod timestamp; 11 | 12 | /// Consensus data provider, manual seal uses this trait object for authoring blocks valid 13 | /// for any runtime. 14 | pub trait ConsensusDataProvider: Send + Sync { 15 | /// Block import transaction type 16 | type Transaction; 17 | 18 | /// The proof type. 19 | type Proof; 20 | 21 | /// Attempt to create a consensus digest. 22 | fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; 23 | 24 | /// Set up the necessary import params. 25 | fn append_block_import( 26 | &self, 27 | parent: &B::Header, 28 | params: &mut BlockImportParams, 29 | inherents: &InherentData, 30 | proof: Self::Proof, 31 | ) -> Result<(), Error>; 32 | } 33 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/consensus/aura.rs: -------------------------------------------------------------------------------- 1 | use crate::{ConsensusDataProvider, Error}; 2 | use sc_client_api::{AuxStore, UsageProvider}; 3 | use sc_consensus::BlockImportParams; 4 | use sp_api::{ProvideRuntimeApi, TransactionFor}; 5 | use sp_blockchain::{HeaderBackend, HeaderMetadata}; 6 | use sp_consensus_aura::{ 7 | digests::CompatibleDigestItem, 8 | sr25519::{AuthorityId, AuthoritySignature}, 9 | AuraApi, Slot, SlotDuration, 10 | }; 11 | use sp_inherents::InherentData; 12 | use sp_runtime::{traits::Block as BlockT, Digest, DigestItem}; 13 | use sp_timestamp::TimestampInherentData; 14 | use std::{marker::PhantomData, sync::Arc}; 15 | 16 | /// Consensus data provider for Aura. 17 | pub struct AuraConsensusDataProvider { 18 | // slot duration 19 | slot_duration: SlotDuration, 20 | // phantom data for required generics 21 | _phantom: PhantomData<(B, C, P)>, 22 | } 23 | 24 | impl AuraConsensusDataProvider 25 | where 26 | B: BlockT, 27 | C: AuxStore + ProvideRuntimeApi + UsageProvider, 28 | C::Api: AuraApi, 29 | { 30 | /// Creates a new instance of the [`AuraConsensusDataProvider`], requires that `client` 31 | /// implements [`sp_consensus_aura::AuraApi`] 32 | pub fn new(client: Arc) -> Self { 33 | let slot_duration = sc_consensus_aura::slot_duration(&*client) 34 | .expect("slot_duration is always present; qed."); 35 | 36 | Self { slot_duration, _phantom: PhantomData } 37 | } 38 | } 39 | 40 | impl ConsensusDataProvider for AuraConsensusDataProvider 41 | where 42 | B: BlockT, 43 | C: AuxStore 44 | + HeaderBackend 45 | + HeaderMetadata 46 | + UsageProvider 47 | + ProvideRuntimeApi, 48 | C::Api: AuraApi, 49 | P: Send + Sync, 50 | { 51 | type Transaction = TransactionFor; 52 | type Proof = P; 53 | 54 | fn create_digest( 55 | &self, 56 | _parent: &B::Header, 57 | inherents: &InherentData, 58 | ) -> Result { 59 | let timestamp = 60 | inherents.timestamp_inherent_data()?.expect("Timestamp is always present; qed"); 61 | 62 | // we always calculate the new slot number based on the current time-stamp and the slot 63 | // duration. 64 | let digest_item = >::aura_pre_digest( 65 | Slot::from_timestamp(timestamp, self.slot_duration), 66 | ); 67 | 68 | Ok(Digest { logs: vec![digest_item] }) 69 | } 70 | 71 | fn append_block_import( 72 | &self, 73 | _parent: &B::Header, 74 | _params: &mut BlockImportParams, 75 | _inherents: &InherentData, 76 | _proof: Self::Proof, 77 | ) -> Result<(), Error> { 78 | Ok(()) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/consensus/babe.rs: -------------------------------------------------------------------------------- 1 | use super::ConsensusDataProvider; 2 | use crate::{Error, LOG_TARGET}; 3 | use codec::Encode; 4 | use sc_client_api::{AuxStore, UsageProvider}; 5 | use sc_consensus_babe::{ 6 | authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Epoch, INTERMEDIATE_KEY, 7 | }; 8 | use sc_consensus_epochs::{ 9 | descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, 10 | }; 11 | use sp_keystore::KeystorePtr; 12 | use std::{marker::PhantomData, sync::Arc}; 13 | 14 | use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; 15 | use sp_api::{ProvideRuntimeApi, TransactionFor}; 16 | use sp_blockchain::{HeaderBackend, HeaderMetadata}; 17 | use sp_consensus_babe::{ 18 | digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, 19 | inherents::BabeInherentData, 20 | AuthorityId, BabeApi, BabeAuthorityWeight, BabeConfiguration, ConsensusLog, BABE_ENGINE_ID, 21 | }; 22 | use sp_consensus_slots::Slot; 23 | use sp_inherents::InherentData; 24 | use sp_runtime::{ 25 | generic::Digest, 26 | traits::{Block as BlockT, Header}, 27 | DigestItem, 28 | }; 29 | use sp_timestamp::TimestampInherentData; 30 | 31 | /// Provides BABE-compatible predigests and BlockImportParams. 32 | /// Intended for use with BABE runtimes. 33 | pub struct BabeConsensusDataProvider { 34 | /// shared reference to keystore 35 | keystore: KeystorePtr, 36 | 37 | /// Shared reference to the client. 38 | client: Arc, 39 | 40 | /// Shared epoch changes 41 | epoch_changes: SharedEpochChanges, 42 | 43 | /// BABE config, gotten from the runtime. 44 | /// NOTE: This is used to fetch `slot_duration` and `epoch_length` in the 45 | /// `ConsensusDataProvider` implementation. Correct as far as these values 46 | /// are not changed during an epoch change. 47 | config: BabeConfiguration, 48 | 49 | /// Authorities to be used for this babe chain. 50 | authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, 51 | _phantom: PhantomData

, 52 | } 53 | 54 | /// Verifier to be used for babe chains 55 | pub struct BabeVerifier { 56 | /// Shared epoch changes 57 | epoch_changes: SharedEpochChanges, 58 | 59 | /// Shared reference to the client. 60 | client: Arc, 61 | } 62 | 63 | impl BabeVerifier { 64 | /// create a nrew verifier 65 | pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier { 66 | BabeVerifier { epoch_changes, client } 67 | } 68 | } 69 | 70 | /// The verifier for the manual seal engine; instantly finalizes. 71 | #[async_trait::async_trait] 72 | impl Verifier for BabeVerifier 73 | where 74 | B: BlockT, 75 | C: HeaderBackend + HeaderMetadata, 76 | { 77 | async fn verify( 78 | &mut self, 79 | mut import_params: BlockImportParams, 80 | ) -> Result, String> { 81 | import_params.finalized = false; 82 | import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); 83 | 84 | let pre_digest = find_pre_digest::(&import_params.header)?; 85 | 86 | let parent_hash = import_params.header.parent_hash(); 87 | let parent = self 88 | .client 89 | .header(*parent_hash) 90 | .ok() 91 | .flatten() 92 | .ok_or_else(|| format!("header for block {} not found", parent_hash))?; 93 | let epoch_changes = self.epoch_changes.shared_data(); 94 | let epoch_descriptor = epoch_changes 95 | .epoch_descriptor_for_child_of( 96 | descendent_query(&*self.client), 97 | &parent.hash(), 98 | *parent.number(), 99 | pre_digest.slot(), 100 | ) 101 | .map_err(|e| format!("failed to fetch epoch_descriptor: {}", e))? 102 | .ok_or_else(|| format!("{}", sp_consensus::Error::InvalidAuthoritiesSet))?; 103 | // drop the lock 104 | drop(epoch_changes); 105 | 106 | import_params 107 | .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); 108 | 109 | Ok(import_params) 110 | } 111 | } 112 | 113 | impl BabeConsensusDataProvider 114 | where 115 | B: BlockT, 116 | C: AuxStore 117 | + HeaderBackend 118 | + ProvideRuntimeApi 119 | + HeaderMetadata 120 | + UsageProvider, 121 | C::Api: BabeApi, 122 | { 123 | pub fn new( 124 | client: Arc, 125 | keystore: KeystorePtr, 126 | epoch_changes: SharedEpochChanges, 127 | authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, 128 | ) -> Result { 129 | if authorities.is_empty() { 130 | return Err(Error::StringError("Cannot supply empty authority set!".into())); 131 | } 132 | 133 | let config = sc_consensus_babe::configuration(&*client)?; 134 | 135 | Ok(Self { 136 | config, 137 | client, 138 | keystore, 139 | epoch_changes, 140 | authorities, 141 | _phantom: Default::default(), 142 | }) 143 | } 144 | 145 | fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { 146 | let epoch_changes = self.epoch_changes.shared_data(); 147 | let epoch_descriptor = epoch_changes 148 | .epoch_descriptor_for_child_of( 149 | descendent_query(&*self.client), 150 | &parent.hash(), 151 | *parent.number(), 152 | slot, 153 | ) 154 | .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? 155 | .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?; 156 | 157 | let epoch = epoch_changes 158 | .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) 159 | .ok_or_else(|| { 160 | log::info!(target: LOG_TARGET, "create_digest: no viable_epoch :("); 161 | sp_consensus::Error::InvalidAuthoritiesSet 162 | })?; 163 | 164 | Ok(epoch.as_ref().clone()) 165 | } 166 | } 167 | 168 | impl ConsensusDataProvider for BabeConsensusDataProvider 169 | where 170 | B: BlockT, 171 | C: AuxStore 172 | + HeaderBackend 173 | + HeaderMetadata 174 | + UsageProvider 175 | + ProvideRuntimeApi, 176 | C::Api: BabeApi, 177 | P: Send + Sync, 178 | { 179 | type Transaction = TransactionFor; 180 | type Proof = P; 181 | 182 | fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { 183 | let slot = inherents 184 | .babe_inherent_data()? 185 | .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; 186 | let epoch = self.epoch(parent, slot)?; 187 | 188 | // this is a dev node environment, we should always be able to claim a slot. 189 | let logs = if let Some((predigest, _)) = 190 | authorship::claim_slot(slot, &epoch, &self.keystore) 191 | { 192 | vec![::babe_pre_digest(predigest)] 193 | } else { 194 | // well we couldn't claim a slot because this is an existing chain and we're not in the 195 | // authorities. we need to tell BabeBlockImport that the epoch has changed, and we put 196 | // ourselves in the authorities. 197 | let predigest = 198 | PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: 0_u32 }); 199 | 200 | let mut epoch_changes = self.epoch_changes.shared_data(); 201 | let epoch_descriptor = epoch_changes 202 | .epoch_descriptor_for_child_of( 203 | descendent_query(&*self.client), 204 | &parent.hash(), 205 | *parent.number(), 206 | slot, 207 | ) 208 | .map_err(|e| { 209 | Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)) 210 | })? 211 | .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?; 212 | 213 | match epoch_descriptor { 214 | ViableEpochDescriptor::Signaled(identifier, _epoch_header) => { 215 | let epoch_mut = epoch_changes 216 | .epoch_mut(&identifier) 217 | .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?; 218 | 219 | // mutate the current epoch 220 | epoch_mut.authorities = self.authorities.clone(); 221 | 222 | let next_epoch = ConsensusLog::NextEpochData(NextEpochDescriptor { 223 | authorities: self.authorities.clone(), 224 | // copy the old randomness 225 | randomness: epoch_mut.randomness, 226 | }); 227 | 228 | vec![ 229 | DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode()), 230 | DigestItem::Consensus(BABE_ENGINE_ID, next_epoch.encode()), 231 | ] 232 | }, 233 | ViableEpochDescriptor::UnimportedGenesis(_) => { 234 | // since this is the genesis, secondary predigest works for now. 235 | vec![DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode())] 236 | }, 237 | } 238 | }; 239 | 240 | Ok(Digest { logs }) 241 | } 242 | 243 | fn append_block_import( 244 | &self, 245 | parent: &B::Header, 246 | params: &mut BlockImportParams, 247 | inherents: &InherentData, 248 | _proof: Self::Proof, 249 | ) -> Result<(), Error> { 250 | let slot = inherents 251 | .babe_inherent_data()? 252 | .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; 253 | let epoch_changes = self.epoch_changes.shared_data(); 254 | let mut epoch_descriptor = epoch_changes 255 | .epoch_descriptor_for_child_of( 256 | descendent_query(&*self.client), 257 | &parent.hash(), 258 | *parent.number(), 259 | slot, 260 | ) 261 | .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? 262 | .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?; 263 | // drop the lock 264 | drop(epoch_changes); 265 | // a quick check to see if we're in the authorities 266 | let epoch = self.epoch(parent, slot)?; 267 | let (authority, _) = self.authorities.first().expect("authorities is non-emptyp; qed"); 268 | let has_authority = epoch.authorities.iter().any(|(id, _)| *id == *authority); 269 | 270 | if !has_authority { 271 | log::info!(target: LOG_TARGET, "authority not found"); 272 | let timestamp = inherents 273 | .timestamp_inherent_data()? 274 | .ok_or_else(|| Error::StringError("No timestamp inherent data".into()))?; 275 | 276 | let slot = Slot::from_timestamp(timestamp, self.config.slot_duration()); 277 | 278 | // manually hard code epoch descriptor 279 | epoch_descriptor = match epoch_descriptor { 280 | ViableEpochDescriptor::Signaled(identifier, _header) => { 281 | ViableEpochDescriptor::Signaled( 282 | identifier, 283 | EpochHeader { 284 | start_slot: slot, 285 | end_slot: (*slot * self.config.epoch_length).into(), 286 | }, 287 | ) 288 | }, 289 | _ => unreachable!( 290 | "we're not in the authorities, so this isn't the genesis epoch; qed" 291 | ), 292 | }; 293 | } 294 | 295 | params.insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); 296 | 297 | Ok(()) 298 | } 299 | } 300 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/consensus/timestamp.rs: -------------------------------------------------------------------------------- 1 | use crate::Error; 2 | use sc_client_api::{AuxStore, UsageProvider}; 3 | use sp_api::ProvideRuntimeApi; 4 | use sp_blockchain::HeaderBackend; 5 | use sp_consensus_aura::{ 6 | sr25519::{AuthorityId, AuthoritySignature}, 7 | AuraApi, 8 | }; 9 | use sp_consensus_babe::BabeApi; 10 | use sp_consensus_slots::{Slot, SlotDuration}; 11 | use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; 12 | use sp_runtime::traits::{Block as BlockT, Zero}; 13 | use sp_timestamp::{InherentType, INHERENT_IDENTIFIER}; 14 | use std::{ 15 | sync::{atomic, Arc}, 16 | time::SystemTime, 17 | }; 18 | 19 | /// Provide duration since unix epoch in millisecond for timestamp inherent. 20 | /// Mocks the timestamp inherent to always produce a valid timestamp for the next slot. 21 | /// 22 | /// This works by either fetching the `slot_number` from the most recent header and dividing 23 | /// that value by `slot_duration` in order to fork chains that expect this inherent. 24 | /// 25 | /// It produces timestamp inherents that are increased by `slot_duration` whenever 26 | /// `provide_inherent_data` is called. 27 | pub struct SlotTimestampProvider { 28 | // holds the unix millisecond timestamp for the most recent block 29 | unix_millis: atomic::AtomicU64, 30 | // configured slot_duration in the runtime 31 | slot_duration: SlotDuration, 32 | } 33 | 34 | impl SlotTimestampProvider { 35 | /// Create a new mocked time stamp provider, for babe. 36 | pub fn new_babe(client: Arc) -> Result 37 | where 38 | B: BlockT, 39 | C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, 40 | C::Api: BabeApi, 41 | { 42 | let slot_duration = sc_consensus_babe::configuration(&*client)?.slot_duration(); 43 | 44 | let time = Self::with_header(&client, slot_duration, |header| { 45 | let slot_number = *sc_consensus_babe::find_pre_digest::(&header) 46 | .map_err(|err| format!("{}", err))? 47 | .slot(); 48 | Ok(slot_number) 49 | })?; 50 | 51 | Ok(Self { unix_millis: atomic::AtomicU64::new(time), slot_duration }) 52 | } 53 | 54 | /// Create a new mocked time stamp provider, for aura 55 | pub fn new_aura(client: Arc) -> Result 56 | where 57 | B: BlockT, 58 | C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, 59 | C::Api: AuraApi, 60 | { 61 | let slot_duration = sc_consensus_aura::slot_duration(&*client)?; 62 | 63 | let time = Self::with_header(&client, slot_duration, |header| { 64 | let slot_number = *sc_consensus_aura::find_pre_digest::(&header) 65 | .map_err(|err| format!("{}", err))?; 66 | Ok(slot_number) 67 | })?; 68 | 69 | Ok(Self { unix_millis: atomic::AtomicU64::new(time), slot_duration }) 70 | } 71 | 72 | fn with_header( 73 | client: &Arc, 74 | slot_duration: SlotDuration, 75 | func: F, 76 | ) -> Result 77 | where 78 | B: BlockT, 79 | C: AuxStore + HeaderBackend + UsageProvider, 80 | F: Fn(B::Header) -> Result, 81 | { 82 | let info = client.info(); 83 | 84 | // looks like this isn't the first block, rehydrate the fake time. 85 | // otherwise we'd be producing blocks for older slots. 86 | let time = if info.best_number != Zero::zero() { 87 | let header = client 88 | .header(info.best_hash)? 89 | .ok_or_else(|| "best header not found in the db!".to_string())?; 90 | let slot = func(header)?; 91 | // add the slot duration so there's no collision of slots 92 | (slot * slot_duration.as_millis() as u64) + slot_duration.as_millis() as u64 93 | } else { 94 | // this is the first block, use the correct time. 95 | let now = SystemTime::now(); 96 | now.duration_since(SystemTime::UNIX_EPOCH) 97 | .map_err(|err| Error::StringError(format!("{}", err)))? 98 | .as_millis() as u64 99 | }; 100 | 101 | Ok(time) 102 | } 103 | 104 | /// Get the current slot number 105 | pub fn slot(&self) -> Slot { 106 | Slot::from_timestamp( 107 | self.unix_millis.load(atomic::Ordering::SeqCst).into(), 108 | self.slot_duration, 109 | ) 110 | } 111 | 112 | /// Gets the current time stamp. 113 | pub fn timestamp(&self) -> sp_timestamp::Timestamp { 114 | sp_timestamp::Timestamp::new(self.unix_millis.load(atomic::Ordering::SeqCst)) 115 | } 116 | } 117 | 118 | #[async_trait::async_trait] 119 | impl InherentDataProvider for SlotTimestampProvider { 120 | async fn provide_inherent_data( 121 | &self, 122 | inherent_data: &mut InherentData, 123 | ) -> Result<(), sp_inherents::Error> { 124 | // we update the time here. 125 | let new_time: InherentType = self 126 | .unix_millis 127 | .fetch_add(self.slot_duration.as_millis() as u64, atomic::Ordering::SeqCst) 128 | .into(); 129 | inherent_data.put_data(INHERENT_IDENTIFIER, &new_time)?; 130 | Ok(()) 131 | } 132 | 133 | async fn try_handle_error( 134 | &self, 135 | _: &InherentIdentifier, 136 | _: &[u8], 137 | ) -> Option> { 138 | None 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/error.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::{mpsc::SendError, oneshot}; 2 | use jsonrpsee::{ 3 | core::Error as JsonRpseeError, 4 | types::error::{CallError, ErrorObject}, 5 | }; 6 | use sc_consensus::ImportResult; 7 | use sp_blockchain::Error as BlockchainError; 8 | use sp_consensus::Error as ConsensusError; 9 | use sp_inherents::Error as InherentsError; 10 | 11 | /// Error code for rpc 12 | mod codes { 13 | pub const SERVER_SHUTTING_DOWN: i32 = 10_000; 14 | pub const BLOCK_IMPORT_FAILED: i32 = 11_000; 15 | pub const EMPTY_TRANSACTION_POOL: i32 = 12_000; 16 | pub const BLOCK_NOT_FOUND: i32 = 13_000; 17 | pub const CONSENSUS_ERROR: i32 = 14_000; 18 | pub const INHERENTS_ERROR: i32 = 15_000; 19 | pub const BLOCKCHAIN_ERROR: i32 = 16_000; 20 | pub const UNKNOWN_ERROR: i32 = 20_000; 21 | } 22 | 23 | /// errors encountered by background block authorship task 24 | #[derive(Debug, thiserror::Error)] 25 | pub enum Error { 26 | /// An error occurred while importing the block 27 | #[error("Block import failed: {0:?}")] 28 | BlockImportError(ImportResult), 29 | /// Transaction pool is empty, cannot create a block 30 | #[error( 31 | "Transaction pool is empty, set create_empty to true, if you want to create empty blocks" 32 | )] 33 | EmptyTransactionPool, 34 | /// encountered during creation of Proposer. 35 | #[error("Consensus Error: {0}")] 36 | ConsensusError(#[from] ConsensusError), 37 | /// Failed to create Inherents data 38 | #[error("Inherents Error: {0}")] 39 | InherentError(#[from] InherentsError), 40 | /// error encountered during finalization 41 | #[error("Finalization Error: {0}")] 42 | BlockchainError(#[from] BlockchainError), 43 | /// Supplied parent_hash doesn't exist in chain 44 | #[error("Supplied parent_hash: {0} doesn't exist in chain")] 45 | BlockNotFound(String), 46 | /// Some string error 47 | #[error("{0}")] 48 | StringError(String), 49 | /// send error 50 | #[error("Consensus process is terminating")] 51 | Canceled(#[from] oneshot::Canceled), 52 | /// send error 53 | #[error("Consensus process is terminating")] 54 | SendError(#[from] SendError), 55 | /// Some other error. 56 | #[error("Other error: {0}")] 57 | Other(Box), 58 | } 59 | 60 | impl From for Error { 61 | fn from(err: ImportResult) -> Self { 62 | Error::BlockImportError(err) 63 | } 64 | } 65 | 66 | impl From for Error { 67 | fn from(s: String) -> Self { 68 | Error::StringError(s) 69 | } 70 | } 71 | 72 | impl Error { 73 | fn to_code(&self) -> i32 { 74 | use Error::*; 75 | match self { 76 | BlockImportError(_) => codes::BLOCK_IMPORT_FAILED, 77 | BlockNotFound(_) => codes::BLOCK_NOT_FOUND, 78 | EmptyTransactionPool => codes::EMPTY_TRANSACTION_POOL, 79 | ConsensusError(_) => codes::CONSENSUS_ERROR, 80 | InherentError(_) => codes::INHERENTS_ERROR, 81 | BlockchainError(_) => codes::BLOCKCHAIN_ERROR, 82 | SendError(_) | Canceled(_) => codes::SERVER_SHUTTING_DOWN, 83 | _ => codes::UNKNOWN_ERROR, 84 | } 85 | } 86 | } 87 | 88 | impl From for JsonRpseeError { 89 | fn from(err: Error) -> Self { 90 | CallError::Custom(ErrorObject::owned(err.to_code(), err.to_string(), None::<()>)).into() 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/finalize_block.rs: -------------------------------------------------------------------------------- 1 | //! Block finalization utilities 2 | 3 | use crate::rpc; 4 | use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; 5 | use sp_runtime::{traits::Block as BlockT, Justification}; 6 | use std::{marker::PhantomData, sync::Arc}; 7 | 8 | /// params for block finalization. 9 | pub struct FinalizeBlockParams { 10 | /// hash of the block 11 | pub hash: ::Hash, 12 | /// sender to report errors/success to the rpc. 13 | pub sender: rpc::Sender<()>, 14 | /// finalization justification 15 | pub justification: Option, 16 | /// Finalizer trait object. 17 | pub finalizer: Arc, 18 | /// phantom type to pin the Backend type 19 | pub _phantom: PhantomData, 20 | } 21 | 22 | /// finalizes a block in the backend with the given params. 23 | pub async fn finalize_block(params: FinalizeBlockParams) 24 | where 25 | B: BlockT, 26 | F: Finalizer, 27 | CB: ClientBackend, 28 | { 29 | let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; 30 | 31 | match finalizer.finalize_block(hash, justification, true) { 32 | Err(e) => { 33 | log::warn!("Failed to finalize block {}", e); 34 | rpc::send_result(&mut sender, Err(e.into())) 35 | }, 36 | Ok(()) => { 37 | log::info!("✅ Successfully finalized block: {}", hash); 38 | rpc::send_result(&mut sender, Ok(())) 39 | }, 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/rpc.rs: -------------------------------------------------------------------------------- 1 | //! RPC interface for the `ManualSeal` Engine. 2 | 3 | use crate::error::Error; 4 | use futures::{ 5 | channel::{mpsc, oneshot}, 6 | prelude::*, 7 | stream::StreamExt, 8 | SinkExt, 9 | }; 10 | use jsonrpsee::{ 11 | core::{async_trait, Error as JsonRpseeError, RpcResult}, 12 | proc_macros::rpc, 13 | }; 14 | use sc_consensus::ImportedAux; 15 | use serde::{Deserialize, Serialize}; 16 | use sp_blockchain::HeaderBackend; 17 | use sp_runtime::{ 18 | traits::{Block as BlockT, Header}, 19 | EncodedJustification, SaturatedConversion, 20 | }; 21 | use std::sync::Arc; 22 | 23 | /// Sender passed to the authorship task to report errors or successes. 24 | pub type Sender = Option>>; 25 | 26 | /// Message sent to the background authorship task, usually by RPC. 27 | pub enum EngineCommand { 28 | /// Tells the engine to propose a new block 29 | /// 30 | /// if create_empty == true, it will create empty blocks if there are no transactions 31 | /// in the transaction pool. 32 | /// 33 | /// if finalize == true, the block will be instantly finalized. 34 | SealNewBlock { 35 | /// if true, empty blocks(without extrinsics) will be created. 36 | /// otherwise, will return Error::EmptyTransactionPool. 37 | create_empty: bool, 38 | /// instantly finalize this block? 39 | finalize: bool, 40 | /// specify the parent hash of the about-to-created block 41 | parent_hash: Option, 42 | /// sender to report errors/success to the rpc. 43 | sender: Sender>, 44 | }, 45 | /// Tells the engine to finalize the block with the supplied hash 46 | FinalizeBlock { 47 | /// hash of the block 48 | hash: Hash, 49 | /// sender to report errors/success to the rpc. 50 | sender: Sender<()>, 51 | /// finalization justification 52 | justification: Option, 53 | }, 54 | } 55 | 56 | /// RPC trait that provides methods for interacting with the manual-seal authorship task over rpc. 57 | #[rpc(client, server)] 58 | pub trait ManualSealApi 59 | where 60 | Block: BlockT, 61 | { 62 | /// Instructs the manual-seal authorship task to create a new block 63 | #[method(name = "engine_createBlock")] 64 | async fn create_block( 65 | &self, 66 | create_empty: bool, 67 | finalize: bool, 68 | parent_hash: Option, 69 | ) -> RpcResult>; 70 | 71 | /// Instructs the manual-seal authorship task to finalize a block 72 | #[method(name = "engine_finalizeBlock")] 73 | async fn finalize_block( 74 | &self, 75 | hash: Block::Hash, 76 | justification: Option, 77 | ) -> RpcResult; 78 | 79 | #[method(name = "engine_forwardBlocksTo")] 80 | async fn forward_blocks_to( 81 | &self, 82 | height: <::Header as Header>::Number, 83 | ) -> RpcResult<()>; 84 | 85 | #[method(name = "engine_revertBlocksTo")] 86 | async fn revert_blocks_to( 87 | &self, 88 | height: <::Header as Header>::Number, 89 | ) -> RpcResult<()>; 90 | } 91 | 92 | /// A struct that implements the [`ManualSealApiServer`]. 93 | pub struct ManualSeal { 94 | client: Arc, 95 | backend: Arc, 96 | import_block_channel: mpsc::Sender>, 97 | } 98 | 99 | /// return type of `engine_createBlock` 100 | #[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] 101 | pub struct CreatedBlock { 102 | /// hash of the created block. 103 | pub hash: Hash, 104 | /// some extra details about the import operation 105 | pub aux: ImportedAux, 106 | } 107 | 108 | impl ManualSeal { 109 | /// Create new `ManualSeal` with the given reference to the client. 110 | pub fn new( 111 | client: Arc, 112 | backend: Arc, 113 | import_block_channel: mpsc::Sender>, 114 | ) -> Self { 115 | Self { client, backend, import_block_channel } 116 | } 117 | } 118 | 119 | #[async_trait] 120 | impl ManualSealApiServer for ManualSeal 121 | where 122 | Block: BlockT, 123 | Client: sp_api::ProvideRuntimeApi, 124 | Client: HeaderBackend, 125 | Client: Send + Sync + 'static, 126 | Backend: sc_client_api::backend::Backend + Send + Sync + 'static, 127 | { 128 | async fn create_block( 129 | &self, 130 | create_empty: bool, 131 | finalize: bool, 132 | parent_hash: Option, 133 | ) -> RpcResult> { 134 | let mut sink = self.import_block_channel.clone(); 135 | let (sender, receiver) = oneshot::channel(); 136 | // NOTE: this sends a Result over the channel. 137 | let command = EngineCommand::SealNewBlock { 138 | create_empty, 139 | finalize, 140 | parent_hash, 141 | sender: Some(sender), 142 | }; 143 | 144 | sink.send(command).await?; 145 | 146 | match receiver.await { 147 | Ok(Ok(rx)) => Ok(rx), 148 | Ok(Err(e)) => Err(e.into()), 149 | Err(e) => Err(JsonRpseeError::to_call_error(e)), 150 | } 151 | } 152 | 153 | async fn finalize_block( 154 | &self, 155 | hash: Block::Hash, 156 | justification: Option, 157 | ) -> RpcResult { 158 | let mut sink = self.import_block_channel.clone(); 159 | let (sender, receiver) = oneshot::channel(); 160 | let command = EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }; 161 | sink.send(command).await?; 162 | receiver.await.map(|_| true).map_err(|e| JsonRpseeError::to_call_error(e)) 163 | } 164 | 165 | async fn forward_blocks_to( 166 | &self, 167 | height: <::Header as Header>::Number, 168 | ) -> RpcResult<()> { 169 | let best_number = self.client.info().best_number; 170 | if height <= best_number { 171 | return Err(JsonRpseeError::Custom( 172 | "Target height is lower than current best height".into(), 173 | )); 174 | } 175 | 176 | let diff = height - best_number; 177 | let to_height = (0..diff.saturated_into::()) 178 | .into_iter() 179 | .map(|_| EngineCommand::SealNewBlock { 180 | create_empty: true, 181 | finalize: false, 182 | parent_hash: None, 183 | sender: None, 184 | }) 185 | .collect::>>(); 186 | 187 | let mut forward_blocks_stream = stream::iter(to_height).map(Ok); 188 | 189 | let mut sink = self.import_block_channel.clone(); 190 | sink.send_all(&mut forward_blocks_stream).await?; 191 | 192 | Ok(()) 193 | } 194 | 195 | async fn revert_blocks_to( 196 | &self, 197 | height: <::Header as Header>::Number, 198 | ) -> RpcResult<()> { 199 | let best_number = self.client.info().best_number; 200 | if height >= best_number { 201 | return Err(JsonRpseeError::Custom( 202 | "Target height is higher than current best height".into(), 203 | )); 204 | } 205 | 206 | let diff = best_number - height; 207 | 208 | println!("Diff: {:?}", diff); 209 | 210 | let reverted = self 211 | .backend 212 | .revert(diff, true) 213 | .map_err(|e| JsonRpseeError::Custom(format!("Backend Revert Error: {}", e)))?; 214 | 215 | println!("Reverted: {:?}", reverted); 216 | 217 | Ok(()) 218 | } 219 | } 220 | 221 | /// report any errors or successes encountered by the authorship task back 222 | /// to the rpc 223 | pub fn send_result( 224 | sender: &mut Sender, 225 | result: std::result::Result, 226 | ) { 227 | if let Some(sender) = sender.take() { 228 | if let Err(err) = sender.send(result) { 229 | match err { 230 | Ok(value) => log::warn!("Server is shutting down: {:?}", value), 231 | Err(error) => log::warn!("Server is shutting down with error: {}", error), 232 | } 233 | } 234 | } else { 235 | // No RPC sender sealing/finalization such as instant seal or delayed finalize doesn't 236 | // report errors over rpc, simply log them. 237 | match result { 238 | Ok(r) => log::info!("Consensus with no RPC sender success: {:?}", r), 239 | Err(e) => log::error!("Consensus with no RPC sender encountered an error: {}", e), 240 | } 241 | } 242 | } 243 | -------------------------------------------------------------------------------- /client/consensus/manual-seal/src/seal_block.rs: -------------------------------------------------------------------------------- 1 | //! Block sealing utilities 2 | 3 | use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; 4 | use futures::prelude::*; 5 | use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction}; 6 | use sc_transaction_pool_api::TransactionPool; 7 | use sp_api::{ProvideRuntimeApi, TransactionFor}; 8 | use sp_blockchain::HeaderBackend; 9 | use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; 10 | use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; 11 | use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; 12 | use std::{sync::Arc, time::Duration}; 13 | 14 | /// max duration for creating a proposal in secs 15 | pub const MAX_PROPOSAL_DURATION: u64 = 180; 16 | 17 | /// params for sealing a new block 18 | pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP, P> { 19 | /// if true, empty blocks(without extrinsics) will be created. 20 | /// otherwise, will return Error::EmptyTransactionPool. 21 | pub create_empty: bool, 22 | /// instantly finalize this block? 23 | pub finalize: bool, 24 | /// specify the parent hash of the about-to-created block 25 | pub parent_hash: Option<::Hash>, 26 | /// sender to report errors/success to the rpc. 27 | pub sender: rpc::Sender::Hash>>, 28 | /// transaction pool 29 | pub pool: Arc, 30 | /// header backend 31 | pub client: Arc, 32 | /// Environment trait object for creating a proposer 33 | pub env: &'a mut E, 34 | /// SelectChain object 35 | pub select_chain: &'a SC, 36 | /// Digest provider for inclusion in blocks. 37 | pub consensus_data_provider: 38 | Option<&'a dyn ConsensusDataProvider>>, 39 | /// block import object 40 | pub block_import: &'a mut BI, 41 | /// Something that can create the inherent data providers. 42 | pub create_inherent_data_providers: &'a CIDP, 43 | } 44 | 45 | /// seals a new block with the given params 46 | pub async fn seal_block( 47 | SealBlockParams { 48 | create_empty, 49 | finalize, 50 | pool, 51 | parent_hash, 52 | client, 53 | select_chain, 54 | block_import, 55 | env, 56 | create_inherent_data_providers, 57 | consensus_data_provider: digest_provider, 58 | mut sender, 59 | }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP, P>, 60 | ) where 61 | B: BlockT, 62 | BI: BlockImport> 63 | + Send 64 | + Sync 65 | + 'static, 66 | C: HeaderBackend + ProvideRuntimeApi, 67 | E: Environment, 68 | E::Proposer: Proposer>, 69 | TP: TransactionPool, 70 | SC: SelectChain, 71 | TransactionFor: 'static, 72 | CIDP: CreateInherentDataProviders, 73 | P: Send + Sync + 'static, 74 | { 75 | let future = async { 76 | if pool.status().ready == 0 && !create_empty { 77 | return Err(Error::EmptyTransactionPool); 78 | } 79 | 80 | // get the header to build this new block on. 81 | // use the parent_hash supplied via `EngineCommand` 82 | // or fetch the best_block. 83 | let parent = match parent_hash { 84 | Some(hash) => { 85 | client.header(hash)?.ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))? 86 | }, 87 | None => select_chain.best_chain().await?, 88 | }; 89 | 90 | let inherent_data_providers = create_inherent_data_providers 91 | .create_inherent_data_providers(parent.hash(), ()) 92 | .await 93 | .map_err(|e| Error::Other(e))?; 94 | 95 | let inherent_data = inherent_data_providers.create_inherent_data().await?; 96 | 97 | let proposer = env.init(&parent).map_err(|err| Error::StringError(err.to_string())).await?; 98 | let inherents_len = inherent_data.len(); 99 | 100 | let digest = if let Some(digest_provider) = digest_provider { 101 | digest_provider.create_digest(&parent, &inherent_data)? 102 | } else { 103 | Default::default() 104 | }; 105 | 106 | let proposal = proposer 107 | .propose( 108 | inherent_data.clone(), 109 | digest, 110 | Duration::from_secs(MAX_PROPOSAL_DURATION), 111 | None, 112 | ) 113 | .map_err(|err| Error::StringError(err.to_string())) 114 | .await?; 115 | 116 | if proposal.block.extrinsics().len() == inherents_len && !create_empty { 117 | return Err(Error::EmptyTransactionPool); 118 | } 119 | 120 | let (header, body) = proposal.block.deconstruct(); 121 | let proof = proposal.proof; 122 | let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); 123 | params.body = Some(body); 124 | params.finalized = finalize; 125 | params.fork_choice = Some(ForkChoiceStrategy::LongestChain); 126 | params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( 127 | proposal.storage_changes, 128 | )); 129 | 130 | if let Some(digest_provider) = digest_provider { 131 | digest_provider.append_block_import(&parent, &mut params, &inherent_data, proof)?; 132 | } 133 | 134 | // Make sure we return the same post-hash that will be calculated when importing the block 135 | // This is important in case the digest_provider added any signature, seal, ect. 136 | let mut post_header = header.clone(); 137 | post_header.digest_mut().logs.extend(params.post_digests.iter().cloned()); 138 | 139 | match block_import.import_block(params).await? { 140 | ImportResult::Imported(aux) => { 141 | Ok(CreatedBlock { hash: ::Header::hash(&post_header), aux }) 142 | }, 143 | other => Err(other.into()), 144 | } 145 | }; 146 | 147 | rpc::send_result(&mut sender, future.await) 148 | } 149 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.2" 2 | 3 | services: 4 | dev: 5 | container_name: swanky-node 6 | image: paritytech/ci-linux:eb1f6a26-20220517 7 | working_dir: /var/www/swanky-node 8 | ports: 9 | - "9944:9944" 10 | environment: 11 | - CARGO_HOME=/var/www/swanky-node/.cargo 12 | volumes: 13 | - .:/var/www/swanky-node 14 | - type: bind 15 | source: ./.local 16 | target: /root/.local 17 | command: bash -c "cargo build --release && ./target/release/swanky-node --dev --ws-external" 18 | -------------------------------------------------------------------------------- /docs/rust-setup.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installation 3 | --- 4 | 5 | This guide is for reference only, please check the latest information on getting starting with Substrate 6 | [here](https://docs.substrate.io/v3/getting-started/installation/). 7 | 8 | This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. 9 | Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first 10 | thing you will need to do is prepare the computer for Rust development - these steps will vary based 11 | on the computer's operating system. Once Rust is configured, you will use its toolchains to interact 12 | with Rust projects; the commands for Rust's toolchains will be the same for all supported, 13 | Unix-based operating systems. 14 | 15 | ## Build dependencies 16 | 17 | Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples 18 | in the [Substrate Docs](https://docs.substrate.io) use Unix-style terminals to demonstrate how to 19 | interact with Substrate from the command line. 20 | 21 | ### Ubuntu/Debian 22 | 23 | Use a terminal shell to execute the following commands: 24 | 25 | ```bash 26 | sudo apt update 27 | # May prompt for location information 28 | sudo apt install -y git clang curl libssl-dev llvm libudev-dev cmake make 29 | ``` 30 | 31 | ### Arch Linux 32 | 33 | Run these commands from a terminal: 34 | 35 | ```bash 36 | pacman -Syu --needed --noconfirm curl git clang 37 | ``` 38 | 39 | ### Fedora 40 | 41 | Run these commands from a terminal: 42 | 43 | ```bash 44 | sudo dnf update 45 | sudo dnf install clang curl git openssl-devel 46 | ``` 47 | 48 | ### OpenSUSE 49 | 50 | Run these commands from a terminal: 51 | 52 | ```bash 53 | sudo zypper install clang curl git openssl-devel llvm-devel libudev-devel 54 | ``` 55 | 56 | ### macOS 57 | 58 | > **Apple M1 ARM** 59 | > If you have an Apple M1 ARM system on a chip, make sure that you have Apple Rosetta 2 60 | > installed through `softwareupdate --install-rosetta`. This is only needed to run the 61 | > `protoc` tool during the build. The build itself and the target binaries would remain native. 62 | 63 | Open the Terminal application and execute the following commands: 64 | 65 | ```bash 66 | # Install Homebrew if necessary https://brew.sh/ 67 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" 68 | 69 | # Make sure Homebrew is up-to-date, install openssl 70 | brew update 71 | brew install openssl 72 | ``` 73 | 74 | ### Windows 75 | 76 | **_PLEASE NOTE:_** Native development of Substrate is _not_ very well supported! It is _highly_ 77 | recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) 78 | (WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian). 79 | Please refer to the separate 80 | [guide for native Windows development](https://docs.substrate.io/v3/getting-started/windows-users/). 81 | 82 | ## Rust developer environment 83 | 84 | This guide uses installer and the `rustup` tool to manage the Rust toolchain. 85 | First install and configure `rustup`: 86 | 87 | ```bash 88 | # Install 89 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 90 | # Configure 91 | source ~/.cargo/env 92 | ``` 93 | 94 | Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target: 95 | 96 | ```bash 97 | rustup default stable 98 | rustup update 99 | rustup update nightly 100 | rustup target add wasm32-unknown-unknown --toolchain nightly 101 | ``` 102 | 103 | ## Test your set-up 104 | 105 | Now the best way to ensure that you have successfully prepared a computer for Substrate 106 | development is to follow the steps in [our first Substrate tutorial](https://docs.substrate.io/tutorials/v3/create-your-first-substrate-chain/). 107 | 108 | ## Troubleshooting Substrate builds 109 | 110 | Sometimes you can't get the Substrate node template 111 | to compile out of the box. Here are some tips to help you work through that. 112 | 113 | ### Rust configuration check 114 | 115 | To see what Rust toolchain you are presently using, run: 116 | 117 | ```bash 118 | rustup show 119 | ``` 120 | 121 | This will show something like this (Ubuntu example) output: 122 | 123 | ```text 124 | Default host: x86_64-unknown-linux-gnu 125 | rustup home: /home/user/.rustup 126 | 127 | installed toolchains 128 | -------------------- 129 | 130 | stable-x86_64-unknown-linux-gnu (default) 131 | nightly-2020-10-06-x86_64-unknown-linux-gnu 132 | nightly-x86_64-unknown-linux-gnu 133 | 134 | installed targets for active toolchain 135 | -------------------------------------- 136 | 137 | wasm32-unknown-unknown 138 | x86_64-unknown-linux-gnu 139 | 140 | active toolchain 141 | ---------------- 142 | 143 | stable-x86_64-unknown-linux-gnu (default) 144 | rustc 1.50.0 (cb75ad5db 2021-02-10) 145 | ``` 146 | 147 | As you can see above, the default toolchain is stable, and the 148 | `nightly-x86_64-unknown-linux-gnu` toolchain as well as its `wasm32-unknown-unknown` target is installed. 149 | You also see that `nightly-2020-10-06-x86_64-unknown-linux-gnu` is installed, but is not used unless explicitly defined as illustrated in the [specify your nightly version](#specifying-nightly-version) 150 | section. 151 | 152 | ### WebAssembly compilation 153 | 154 | Substrate uses [WebAssembly](https://webassembly.org) (Wasm) to produce portable blockchain 155 | runtimes. You will need to configure your Rust compiler to use 156 | [`nightly` builds](https://doc.rust-lang.org/book/appendix-07-nightly-rust.html) to allow you to 157 | compile Substrate runtime code to the Wasm target. 158 | 159 | > There are upstream issues in Rust that need to be resolved before all of Substrate can use the stable Rust toolchain. 160 | > [This is our tracking issue](https://github.com/paritytech/substrate/issues/1252) if you're curious as to why and how this will be resolved. 161 | 162 | #### Latest nightly for Substrate `master` 163 | 164 | Developers who are building Substrate _itself_ should always use the latest bug-free versions of 165 | Rust stable and nightly. This is because the Substrate codebase follows the tip of Rust nightly, 166 | which means that changes in Substrate often depend on upstream changes in the Rust nightly compiler. 167 | To ensure your Rust compiler is always up to date, you should run: 168 | 169 | ```bash 170 | rustup update 171 | rustup update nightly 172 | rustup target add wasm32-unknown-unknown --toolchain nightly 173 | ``` 174 | 175 | > NOTE: It may be necessary to occasionally rerun `rustup update` if a change in the upstream Substrate 176 | > codebase depends on a new feature of the Rust compiler. When you do this, both your nightly 177 | > and stable toolchains will be pulled to the most recent release, and for nightly, it is 178 | > generally _not_ expected to compile WASM without error (although it very often does). 179 | > Be sure to [specify your nightly version](#specifying-nightly-version) if you get WASM build errors 180 | > from `rustup` and [downgrade nightly as needed](#downgrading-rust-nightly). 181 | 182 | #### Rust nightly toolchain 183 | 184 | If you want to guarantee that your build works on your computer as you update Rust and other 185 | dependencies, you should use a specific Rust nightly version that is known to be 186 | compatible with the version of Substrate they are using; this version will vary from project to 187 | project and different projects may use different mechanisms to communicate this version to 188 | developers. For instance, the Polkadot client specifies this information in its 189 | [release notes](https://github.com/paritytech/polkadot/releases). 190 | 191 | ```bash 192 | # Specify the specific nightly toolchain in the date below: 193 | rustup install nightly- 194 | ``` 195 | 196 | #### Wasm toolchain 197 | 198 | Now, configure the nightly version to work with the Wasm compilation target: 199 | 200 | ```bash 201 | rustup target add wasm32-unknown-unknown --toolchain nightly- 202 | ``` 203 | 204 | ### Specifying nightly version 205 | 206 | Use the `WASM_BUILD_TOOLCHAIN` environment variable to specify the Rust nightly version a Substrate 207 | project should use for Wasm compilation: 208 | 209 | ```bash 210 | WASM_BUILD_TOOLCHAIN=nightly- cargo build --release 211 | ``` 212 | 213 | > Note that this only builds _the runtime_ with the specified nightly. The rest of project will be 214 | > compiled with **your default toolchain**, i.e. the latest installed stable toolchain. 215 | 216 | ### Downgrading Rust nightly 217 | 218 | If your computer is configured to use the latest Rust nightly and you would like to downgrade to a 219 | specific nightly version, follow these steps: 220 | 221 | ```bash 222 | rustup uninstall nightly 223 | rustup install nightly- 224 | rustup target add wasm32-unknown-unknown --toolchain nightly- 225 | ``` 226 | -------------------------------------------------------------------------------- /frame/balances/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-balances" 3 | version = "4.0.0-dev" 4 | authors = ["Parity Technologies "] 5 | edition = "2021" 6 | license = "Apache-2.0" 7 | homepage = "https://substrate.io" 8 | repository = "https://github.com/paritytech/substrate/" 9 | description = "FRAME pallet to manage balances" 10 | readme = "README.md" 11 | 12 | [package.metadata.docs.rs] 13 | targets = ["x86_64-unknown-linux-gnu"] 14 | 15 | [dependencies] 16 | codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive", "max-encoded-len"] } 17 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false, optional = true } 18 | frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 19 | frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 20 | log = { version = "0.4.17", default-features = false } 21 | scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } 22 | serde = { version = "1.0.151", features = ["derive"] } 23 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 24 | sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 25 | 26 | [dev-dependencies] 27 | pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 28 | paste = "1.0.12" 29 | sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 30 | sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 31 | 32 | [features] 33 | default = ["std"] 34 | std = [ 35 | "codec/std", 36 | "frame-benchmarking?/std", 37 | "frame-support/std", 38 | "frame-system/std", 39 | "log/std", 40 | "scale-info/std", 41 | "sp-runtime/std", 42 | "sp-std/std", 43 | ] 44 | # Enable support for setting the existential deposit to zero. 45 | insecure_zero_ed = [] 46 | runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] 47 | try-runtime = ["frame-support/try-runtime"] 48 | -------------------------------------------------------------------------------- /frame/balances/README.md: -------------------------------------------------------------------------------- 1 | Modifications to the original Balances pallet are added. 2 | ※ Comment out [swanky node specific] is added to those custom implementations. Source code under frame/balances/src other than [swanky node specific] is copied from Substrate. 3 | 4 | # Balances Module 5 | 6 | The Balances module provides functionality for handling accounts and balances. 7 | 8 | - [`Config`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/trait.Config.html) 9 | - [`Call`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/enum.Call.html) 10 | - [`Pallet`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/struct.Pallet.html) 11 | 12 | ## Overview 13 | 14 | The Balances module provides functions for: 15 | 16 | - Getting and setting free balances. 17 | - Retrieving total, reserved and unreserved balances. 18 | - Repatriating a reserved balance to a beneficiary account that exists. 19 | - Transferring a balance between accounts (when not reserved). 20 | - Slashing an account balance. 21 | - Account creation and removal. 22 | - Managing total issuance. 23 | - Setting and managing locks. 24 | 25 | ### Terminology 26 | 27 | - **Existential Deposit:** The minimum balance required to create or keep an account open. This prevents 28 | "dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) 29 | fall below this, then the account is said to be dead; and it loses its functionality as well as any 30 | prior history and all information on it is removed from the chain's state. 31 | No account should ever have a total balance that is strictly between 0 and the existential 32 | deposit (exclusive). If this ever happens, it indicates either a bug in this module or an 33 | erroneous raw mutation of storage. 34 | 35 | - **Total Issuance:** The total number of units in existence in a system. 36 | 37 | - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after its 38 | total balance has become zero (or, strictly speaking, less than the Existential Deposit). 39 | 40 | - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only 41 | balance that matters for most operations. 42 | 43 | - **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. 44 | Reserved balance can still be slashed, but only after all the free balance has been slashed. 45 | 46 | - **Imbalance:** A condition when some funds were credited or debited without equal and opposite accounting 47 | (i.e. a difference between total issuance and account balances). Functions that result in an imbalance will 48 | return an object of the `Imbalance` trait that can be managed within your runtime logic. (If an imbalance is 49 | simply dropped, it should automatically maintain any book-keeping such as total issuance.) 50 | 51 | - **Lock:** A freeze on a specified amount of an account's free balance until a specified block number. Multiple 52 | locks always operate over the same funds, so they "overlay" rather than "stack". 53 | 54 | ### Implementations 55 | 56 | The Balances module provides implementations for the following traits. If these traits provide the functionality 57 | that you need, then you can avoid coupling with the Balances module. 58 | 59 | - [`Currency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.Currency.html): Functions for dealing with a 60 | fungible assets system. 61 | - [`ReservableCurrency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.ReservableCurrency.html): 62 | Functions for dealing with assets that can be reserved from an account. 63 | - [`LockableCurrency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.LockableCurrency.html): Functions for 64 | dealing with accounts that allow liquidity restrictions. 65 | - [`Imbalance`](https://docs.rs/frame-support/latest/frame_support/traits/trait.Imbalance.html): Functions for handling 66 | imbalances between total issuance in the system and account balances. Must be used when a function 67 | creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). 68 | - [`IsDeadAccount`](https://docs.rs/frame-support/latest/frame_support/traits/trait.IsDeadAccount.html): Determiner to say whether a 69 | given account is unused. 70 | 71 | ## Interface 72 | 73 | ### Dispatchable Functions 74 | 75 | - `transfer` - Transfer some liquid free balance to another account. 76 | - `force_set_balance` - Set the balances of a given account. The origin of this call must be root. 77 | 78 | ## Usage 79 | 80 | The following examples show how to use the Balances module in your custom module. 81 | 82 | ### Examples from the FRAME 83 | 84 | The Contract module uses the `Currency` trait to handle gas payment, and its types inherit from `Currency`: 85 | 86 | ```rust 87 | use frame_support::traits::Currency; 88 | 89 | pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; 90 | pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; 91 | 92 | ``` 93 | 94 | The Staking module uses the `LockableCurrency` trait to lock a stash account's funds: 95 | 96 | ```rust 97 | use frame_support::traits::{WithdrawReasons, LockableCurrency}; 98 | use sp_runtime::traits::Bounded; 99 | pub trait Config: frame_system::Config { 100 | type Currency: LockableCurrency; 101 | } 102 | 103 | fn update_ledger( 104 | controller: &T::AccountId, 105 | ledger: &StakingLedger 106 | ) { 107 | T::Currency::set_lock( 108 | STAKING_ID, 109 | &ledger.stash, 110 | ledger.total, 111 | WithdrawReasons::all() 112 | ); 113 | // >::insert(controller, ledger); // Commented out as we don't have access to Staking's storage here. 114 | } 115 | ``` 116 | 117 | ## Genesis config 118 | 119 | The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/struct.GenesisConfig.html). 120 | 121 | ## Assumptions 122 | 123 | * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. 124 | 125 | License: Apache-2.0 126 | -------------------------------------------------------------------------------- /frame/balances/rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-balances-rpc" 3 | version = "4.0.0-dev" 4 | authors = ["Astar Network"] 5 | edition = "2021" 6 | license = "Apache-2.0" 7 | homepage = "https://substrate.io" 8 | repository = "https://github.com/paritytech/substrate/" 9 | description = "RPC interface for balances pallet." 10 | readme = "README.md" 11 | 12 | [package.metadata.docs.rs] 13 | targets = ["x86_64-unknown-linux-gnu"] 14 | 15 | [dependencies] 16 | codec = { package = "parity-scale-codec", version = "3.2.2" } 17 | futures = "0.3.21" 18 | hex = "0.4.3" 19 | jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } 20 | node-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 21 | pallet-balances = { path = "../../balances", default-features = false } 22 | pallet-balances-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } 23 | sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 24 | sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 25 | sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 26 | sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 27 | sp-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 28 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 29 | sp-weights = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 30 | swanky-runtime = { version = "1.7.0", path = "../../../runtime" } 31 | -------------------------------------------------------------------------------- /frame/balances/rpc/runtime-api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pallet-balances-rpc-runtime-api" 3 | version = "4.0.0-dev" 4 | authors = ["Astar Network"] 5 | edition = "2021" 6 | license = "Apache-2.0" 7 | homepage = "https://substrate.io" 8 | repository = "https://github.com/paritytech/substrate/" 9 | description = "RPC runtime API for transaction payment FRAME pallet" 10 | readme = "README.md" 11 | 12 | [package.metadata.docs.rs] 13 | targets = ["x86_64-unknown-linux-gnu"] 14 | 15 | [dependencies] 16 | codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } 17 | pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../balances" } 18 | sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 19 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 20 | sp-weights = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 21 | 22 | [features] 23 | default = ["std"] 24 | std = [ 25 | "codec/std", 26 | "pallet-balances/std", 27 | "sp-api/std", 28 | "sp-runtime/std", 29 | "sp-weights/std", 30 | ] 31 | -------------------------------------------------------------------------------- /frame/balances/rpc/runtime-api/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | 3 | use codec::Codec; 4 | pub use pallet_balances::AccountData; 5 | use sp_runtime::traits::Block as BlockT; 6 | 7 | sp_api::decl_runtime_apis! { 8 | pub trait BalancesApi 9 | where 10 | AccountId: Codec, 11 | Balance: Codec, 12 | { 13 | fn account(account_id: AccountId) -> AccountData; 14 | 15 | fn get_set_free_balance_extrinsic(account_id: AccountId, free_balance: Balance) -> ::Extrinsic; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /frame/balances/rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::TryInto, sync::Arc}; 2 | 3 | use codec::Codec; 4 | use futures::future::TryFutureExt; 5 | use jsonrpsee::{ 6 | core::{async_trait, RpcResult}, 7 | proc_macros::rpc, 8 | types::error::{CallError, ErrorObject}, 9 | }; 10 | use pallet_balances_rpc_runtime_api::AccountData; 11 | use sc_transaction_pool_api::TransactionPool; 12 | use sp_api::ProvideRuntimeApi; 13 | use sp_blockchain::HeaderBackend; 14 | use sp_rpc::number::NumberOrHex; 15 | use sp_runtime::{ 16 | generic::BlockId, 17 | traits::{Block as BlockT, MaybeDisplay}, 18 | }; 19 | use std::marker::{Send, Sync}; 20 | 21 | pub use pallet_balances_rpc_runtime_api::BalancesApi as BalancesRuntimeApi; 22 | 23 | /// RPC trait that provides methods for interacting with the dev balances functionalities. 24 | #[rpc(server)] 25 | #[async_trait] 26 | pub trait BalancesApi { 27 | #[method(name = "balance_getAccount")] 28 | fn get_account( 29 | &self, 30 | account_id: AccountId, 31 | at: Option, 32 | ) -> RpcResult>; 33 | 34 | #[method(name = "balance_setFreeBalance")] 35 | async fn set_free_balance(&self, account_id: AccountId, free_balance: Balance) 36 | -> RpcResult<()>; 37 | } 38 | 39 | /// Error type of this RPC api. 40 | pub enum Error { 41 | /// The transaction was not decodable. 42 | DecodeError, 43 | /// The call to runtime failed. 44 | RuntimeError, 45 | } 46 | 47 | impl From for i32 { 48 | fn from(e: Error) -> i32 { 49 | match e { 50 | Error::RuntimeError => 1, 51 | Error::DecodeError => 2, 52 | } 53 | } 54 | } 55 | 56 | /// Provides RPC methods to query a dispatchable's class, weight and fee. 57 | pub struct Balances { 58 | /// Shared reference to the client. 59 | client: Arc, 60 | /// Shared reference to the transaction pool. 61 | pool: Arc

, 62 | } 63 | 64 | impl Balances { 65 | /// Creates a new instance of the TransactionPayment Rpc helper. 66 | pub fn new(client: Arc, pool: Arc

) -> Self { 67 | Self { client, pool } 68 | } 69 | } 70 | 71 | #[async_trait] 72 | impl 73 | BalancesApiServer<::Hash, AccountId, Balance> for Balances 74 | where 75 | Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, 76 | Client::Api: BalancesRuntimeApi, 77 | Pool: TransactionPool + 'static, 78 | AccountId: Clone + MaybeDisplay + Codec + Send + 'static, 79 | Balance: Codec + MaybeDisplay + Copy + TryInto + Send + Sync + 'static, 80 | { 81 | fn get_account( 82 | &self, 83 | account_id: AccountId, 84 | at: Option<::Hash>, 85 | ) -> RpcResult> { 86 | let runtime_api = self.client.runtime_api(); 87 | let at_hash = at.unwrap_or_else(|| self.client.info().best_hash); 88 | 89 | let account_data = runtime_api.account(at_hash, account_id).map_err(|e| { 90 | CallError::Custom(ErrorObject::owned( 91 | Error::DecodeError.into(), 92 | "Unable to get account data.", 93 | Some(e.to_string()), 94 | )) 95 | })?; 96 | 97 | Ok(account_data) 98 | } 99 | 100 | async fn set_free_balance( 101 | &self, 102 | account_id: AccountId, 103 | free_balance: Balance, 104 | ) -> RpcResult<()> { 105 | let best_block_hash = self.client.info().best_hash; 106 | 107 | // TODO: Find a way to construct Balances Call which can casted to `<::Block as BlockT>::Extrinsic` without using runtime_api. Is that 109 | // possible? 110 | let extrinsic: <::Block as BlockT>::Extrinsic = match self 111 | .client 112 | .runtime_api() 113 | .get_set_free_balance_extrinsic(best_block_hash, account_id, free_balance) 114 | { 115 | Ok(extrinsic) => extrinsic, 116 | Err(_) => return RpcResult::Err(internal_err("cannot access runtime api")), 117 | }; 118 | 119 | self.pool 120 | .submit_one( 121 | &BlockId::Hash(best_block_hash), 122 | sc_transaction_pool_api::TransactionSource::Local, 123 | extrinsic, 124 | ) 125 | .map_ok(move |_| ()) 126 | .map_err(|err| internal_err(err)) 127 | .await 128 | } 129 | } 130 | 131 | pub fn err(code: i32, message: T, data: Option<&[u8]>) -> jsonrpsee::core::Error { 132 | jsonrpsee::core::Error::Call(jsonrpsee::types::error::CallError::Custom( 133 | jsonrpsee::types::error::ErrorObject::owned( 134 | code, 135 | message.to_string(), 136 | data.map(|bytes| { 137 | jsonrpsee::core::to_json_raw_value(&format!("0x{}", hex::encode(bytes))) 138 | .expect("fail to serialize data") 139 | }), 140 | ), 141 | )) 142 | } 143 | 144 | pub fn internal_err(message: T) -> jsonrpsee::core::Error { 145 | err(jsonrpsee::types::error::INTERNAL_ERROR_CODE, message, None) 146 | } 147 | 148 | pub fn internal_err_with_data(message: T, data: &[u8]) -> jsonrpsee::core::Error { 149 | err(jsonrpsee::types::error::INTERNAL_ERROR_CODE, message, Some(data)) 150 | } 151 | -------------------------------------------------------------------------------- /frame/balances/src/benchmarking.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Balances pallet benchmarking. 19 | 20 | #![cfg(feature = "runtime-benchmarks")] 21 | 22 | use super::*; 23 | use crate::Pallet as Balances; 24 | 25 | use frame_benchmarking::v2::*; 26 | use frame_system::RawOrigin; 27 | use sp_runtime::traits::Bounded; 28 | use types::ExtraFlags; 29 | 30 | const SEED: u32 = 0; 31 | // existential deposit multiplier 32 | const ED_MULTIPLIER: u32 = 10; 33 | 34 | #[instance_benchmarks] 35 | mod benchmarks { 36 | use super::*; 37 | 38 | // Benchmark `transfer` extrinsic with the worst possible conditions: 39 | // * Transfer will kill the sender account. 40 | // * Transfer will create the recipient account. 41 | #[benchmark] 42 | fn transfer_allow_death() { 43 | let existential_deposit = T::ExistentialDeposit::get(); 44 | let caller = whitelisted_caller(); 45 | 46 | // Give some multiple of the existential deposit 47 | let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 48 | let _ = as Currency<_>>::make_free_balance_be(&caller, balance); 49 | 50 | // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, 51 | // and reap this user. 52 | let recipient: T::AccountId = account("recipient", 0, SEED); 53 | let recipient_lookup = T::Lookup::unlookup(recipient.clone()); 54 | let transfer_amount = 55 | existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); 56 | 57 | #[extrinsic_call] 58 | _(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); 59 | 60 | assert_eq!(Balances::::free_balance(&caller), Zero::zero()); 61 | assert_eq!(Balances::::free_balance(&recipient), transfer_amount); 62 | } 63 | 64 | // Benchmark `transfer` with the best possible condition: 65 | // * Both accounts exist and will continue to exist. 66 | #[benchmark(extra)] 67 | fn transfer_best_case() { 68 | let caller = whitelisted_caller(); 69 | let recipient: T::AccountId = account("recipient", 0, SEED); 70 | let recipient_lookup = T::Lookup::unlookup(recipient.clone()); 71 | 72 | // Give the sender account max funds for transfer (their account will never reasonably be 73 | // killed). 74 | let _ = 75 | as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); 76 | 77 | // Give the recipient account existential deposit (thus their account already exists). 78 | let existential_deposit = T::ExistentialDeposit::get(); 79 | let _ = 80 | as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); 81 | let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 82 | 83 | #[extrinsic_call] 84 | transfer_allow_death(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); 85 | 86 | assert!(!Balances::::free_balance(&caller).is_zero()); 87 | assert!(!Balances::::free_balance(&recipient).is_zero()); 88 | } 89 | 90 | // Benchmark `transfer_keep_alive` with the worst possible condition: 91 | // * The recipient account is created. 92 | #[benchmark] 93 | fn transfer_keep_alive() { 94 | let caller = whitelisted_caller(); 95 | let recipient: T::AccountId = account("recipient", 0, SEED); 96 | let recipient_lookup = T::Lookup::unlookup(recipient.clone()); 97 | 98 | // Give the sender account max funds, thus a transfer will not kill account. 99 | let _ = 100 | as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); 101 | let existential_deposit = T::ExistentialDeposit::get(); 102 | let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 103 | 104 | #[extrinsic_call] 105 | _(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); 106 | 107 | assert!(!Balances::::free_balance(&caller).is_zero()); 108 | assert_eq!(Balances::::free_balance(&recipient), transfer_amount); 109 | } 110 | 111 | // Benchmark `force_set_balance` coming from ROOT account. This always creates an account. 112 | #[benchmark] 113 | fn force_set_balance_creating() { 114 | let user: T::AccountId = account("user", 0, SEED); 115 | let user_lookup = T::Lookup::unlookup(user.clone()); 116 | 117 | // Give the user some initial balance. 118 | let existential_deposit = T::ExistentialDeposit::get(); 119 | let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 120 | let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); 121 | 122 | #[extrinsic_call] 123 | force_set_balance(RawOrigin::Root, user_lookup, balance_amount); 124 | 125 | assert_eq!(Balances::::free_balance(&user), balance_amount); 126 | } 127 | 128 | // Benchmark `force_set_balance` coming from ROOT account. This always kills an account. 129 | #[benchmark] 130 | fn force_set_balance_killing() { 131 | let user: T::AccountId = account("user", 0, SEED); 132 | let user_lookup = T::Lookup::unlookup(user.clone()); 133 | 134 | // Give the user some initial balance. 135 | let existential_deposit = T::ExistentialDeposit::get(); 136 | let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 137 | let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); 138 | 139 | #[extrinsic_call] 140 | force_set_balance(RawOrigin::Root, user_lookup, Zero::zero()); 141 | 142 | assert!(Balances::::free_balance(&user).is_zero()); 143 | } 144 | 145 | // Benchmark `force_transfer` extrinsic with the worst possible conditions: 146 | // * Transfer will kill the sender account. 147 | // * Transfer will create the recipient account. 148 | #[benchmark] 149 | fn force_transfer() { 150 | let existential_deposit = T::ExistentialDeposit::get(); 151 | let source: T::AccountId = account("source", 0, SEED); 152 | let source_lookup = T::Lookup::unlookup(source.clone()); 153 | 154 | // Give some multiple of the existential deposit 155 | let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 156 | let _ = as Currency<_>>::make_free_balance_be(&source, balance); 157 | 158 | // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, 159 | // and reap this user. 160 | let recipient: T::AccountId = account("recipient", 0, SEED); 161 | let recipient_lookup = T::Lookup::unlookup(recipient.clone()); 162 | let transfer_amount = existential_deposit 163 | .saturating_mul((ED_MULTIPLIER - 1).into()) 164 | .saturating_add(1u32.into()); 165 | 166 | #[extrinsic_call] 167 | _(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount); 168 | 169 | assert_eq!(Balances::::free_balance(&source), Zero::zero()); 170 | assert_eq!(Balances::::free_balance(&recipient), transfer_amount); 171 | } 172 | 173 | // This benchmark performs the same operation as `transfer` in the worst case scenario, 174 | // but additionally introduces many new users into the storage, increasing the the merkle 175 | // trie and PoV size. 176 | #[benchmark(extra)] 177 | fn transfer_increasing_users(u: Linear<0, 1_000>) { 178 | // 1_000 is not very much, but this upper bound can be controlled by the CLI. 179 | let existential_deposit = T::ExistentialDeposit::get(); 180 | let caller = whitelisted_caller(); 181 | 182 | // Give some multiple of the existential deposit 183 | let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 184 | let _ = as Currency<_>>::make_free_balance_be(&caller, balance); 185 | 186 | // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, 187 | // and reap this user. 188 | let recipient: T::AccountId = account("recipient", 0, SEED); 189 | let recipient_lookup = T::Lookup::unlookup(recipient.clone()); 190 | let transfer_amount = 191 | existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); 192 | 193 | // Create a bunch of users in storage. 194 | for i in 0..u { 195 | // The `account` function uses `blake2_256` to generate unique accounts, so these 196 | // should be quite random and evenly distributed in the trie. 197 | let new_user: T::AccountId = account("new_user", i, SEED); 198 | let _ = as Currency<_>>::make_free_balance_be(&new_user, balance); 199 | } 200 | 201 | #[extrinsic_call] 202 | transfer_allow_death(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); 203 | 204 | assert_eq!(Balances::::free_balance(&caller), Zero::zero()); 205 | assert_eq!(Balances::::free_balance(&recipient), transfer_amount); 206 | } 207 | 208 | // Benchmark `transfer_all` with the worst possible condition: 209 | // * The recipient account is created 210 | // * The sender is killed 211 | #[benchmark] 212 | fn transfer_all() { 213 | let caller = whitelisted_caller(); 214 | let recipient: T::AccountId = account("recipient", 0, SEED); 215 | let recipient_lookup = T::Lookup::unlookup(recipient.clone()); 216 | 217 | // Give some multiple of the existential deposit 218 | let existential_deposit = T::ExistentialDeposit::get(); 219 | let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); 220 | let _ = as Currency<_>>::make_free_balance_be(&caller, balance); 221 | 222 | #[extrinsic_call] 223 | _(RawOrigin::Signed(caller.clone()), recipient_lookup, false); 224 | 225 | assert!(Balances::::free_balance(&caller).is_zero()); 226 | assert_eq!(Balances::::free_balance(&recipient), balance); 227 | } 228 | 229 | #[benchmark] 230 | fn force_unreserve() -> Result<(), BenchmarkError> { 231 | let user: T::AccountId = account("user", 0, SEED); 232 | let user_lookup = T::Lookup::unlookup(user.clone()); 233 | 234 | // Give some multiple of the existential deposit 235 | let ed = T::ExistentialDeposit::get(); 236 | let balance = ed.saturating_add(ed); 237 | let _ = as Currency<_>>::make_free_balance_be(&user, balance); 238 | 239 | // Reserve the balance 240 | as ReservableCurrency<_>>::reserve(&user, ed)?; 241 | assert_eq!(Balances::::reserved_balance(&user), ed); 242 | assert_eq!(Balances::::free_balance(&user), ed); 243 | 244 | #[extrinsic_call] 245 | _(RawOrigin::Root, user_lookup, balance); 246 | 247 | assert!(Balances::::reserved_balance(&user).is_zero()); 248 | assert_eq!(Balances::::free_balance(&user), ed.saturating_add(ed)); 249 | 250 | Ok(()) 251 | } 252 | 253 | #[benchmark] 254 | fn upgrade_accounts(u: Linear<1, 1_000>) { 255 | let caller: T::AccountId = whitelisted_caller(); 256 | let who = (0..u) 257 | .into_iter() 258 | .map(|i| -> T::AccountId { 259 | let user = account("old_user", i, SEED); 260 | let account = AccountData { 261 | free: T::ExistentialDeposit::get(), 262 | reserved: T::ExistentialDeposit::get(), 263 | frozen: Zero::zero(), 264 | flags: ExtraFlags::old_logic(), 265 | }; 266 | frame_system::Pallet::::inc_providers(&user); 267 | assert!(T::AccountStore::try_mutate_exists(&user, |a| -> DispatchResult { 268 | *a = Some(account); 269 | Ok(()) 270 | }) 271 | .is_ok()); 272 | assert!(!Balances::::account(&user).flags.is_new_logic()); 273 | assert_eq!(frame_system::Pallet::::providers(&user), 1); 274 | assert_eq!(frame_system::Pallet::::consumers(&user), 0); 275 | user 276 | }) 277 | .collect(); 278 | 279 | #[extrinsic_call] 280 | _(RawOrigin::Signed(caller.clone()), who); 281 | 282 | for i in 0..u { 283 | let user: T::AccountId = account("old_user", i, SEED); 284 | assert!(Balances::::account(&user).flags.is_new_logic()); 285 | assert_eq!(frame_system::Pallet::::providers(&user), 1); 286 | assert_eq!(frame_system::Pallet::::consumers(&user), 1); 287 | } 288 | } 289 | 290 | impl_benchmark_test_suite! { 291 | Balances, 292 | crate::tests::ExtBuilder::default().build(), 293 | crate::tests::Test, 294 | } 295 | } 296 | -------------------------------------------------------------------------------- /frame/balances/src/impl_fungible.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Implementation of `fungible` traits for Balances pallet. 19 | use super::*; 20 | use frame_support::traits::tokens::{ 21 | Fortitude, 22 | Preservation::{self, Preserve, Protect}, 23 | Provenance::{self, Minted}, 24 | }; 25 | 26 | impl, I: 'static> fungible::Inspect for Pallet { 27 | type Balance = T::Balance; 28 | 29 | fn total_issuance() -> Self::Balance { 30 | TotalIssuance::::get() 31 | } 32 | fn active_issuance() -> Self::Balance { 33 | TotalIssuance::::get().saturating_sub(InactiveIssuance::::get()) 34 | } 35 | fn minimum_balance() -> Self::Balance { 36 | T::ExistentialDeposit::get() 37 | } 38 | fn total_balance(who: &T::AccountId) -> Self::Balance { 39 | Self::account(who).total() 40 | } 41 | fn balance(who: &T::AccountId) -> Self::Balance { 42 | Self::account(who).free 43 | } 44 | fn reducible_balance( 45 | who: &T::AccountId, 46 | preservation: Preservation, 47 | force: Fortitude, 48 | ) -> Self::Balance { 49 | let a = Self::account(who); 50 | let mut untouchable = Zero::zero(); 51 | if force == Polite { 52 | // Frozen balance applies to total. Anything on hold therefore gets discounted from the 53 | // limit given by the freezes. 54 | untouchable = a.frozen.saturating_sub(a.reserved); 55 | } 56 | // If we want to keep our provider ref.. 57 | if preservation == Preserve 58 | // ..or we don't want the account to die and our provider ref is needed for it to live.. 59 | || preservation == Protect && !a.free.is_zero() && 60 | frame_system::Pallet::::providers(who) == 1 61 | // ..or we don't care about the account dying but our provider ref is required.. 62 | || preservation == Expendable && !a.free.is_zero() && 63 | !frame_system::Pallet::::can_dec_provider(who) 64 | { 65 | // ..then the ED needed.. 66 | untouchable = untouchable.max(T::ExistentialDeposit::get()); 67 | } 68 | // Liquid balance is what is neither on hold nor frozen/required for provider. 69 | a.free.saturating_sub(untouchable) 70 | } 71 | fn can_deposit( 72 | who: &T::AccountId, 73 | amount: Self::Balance, 74 | provenance: Provenance, 75 | ) -> DepositConsequence { 76 | if amount.is_zero() { 77 | return DepositConsequence::Success; 78 | } 79 | 80 | if provenance == Minted && TotalIssuance::::get().checked_add(&amount).is_none() { 81 | return DepositConsequence::Overflow; 82 | } 83 | 84 | let account = Self::account(who); 85 | let new_free = match account.free.checked_add(&amount) { 86 | None => return DepositConsequence::Overflow, 87 | Some(x) if x < T::ExistentialDeposit::get() => return DepositConsequence::BelowMinimum, 88 | Some(x) => x, 89 | }; 90 | 91 | match account.reserved.checked_add(&new_free) { 92 | Some(_) => {}, 93 | None => return DepositConsequence::Overflow, 94 | }; 95 | 96 | // NOTE: We assume that we are a provider, so don't need to do any checks in the 97 | // case of account creation. 98 | 99 | DepositConsequence::Success 100 | } 101 | fn can_withdraw( 102 | who: &T::AccountId, 103 | amount: Self::Balance, 104 | ) -> WithdrawConsequence { 105 | if amount.is_zero() { 106 | return WithdrawConsequence::Success; 107 | } 108 | 109 | if TotalIssuance::::get().checked_sub(&amount).is_none() { 110 | return WithdrawConsequence::Underflow; 111 | } 112 | 113 | let account = Self::account(who); 114 | let new_free_balance = match account.free.checked_sub(&amount) { 115 | Some(x) => x, 116 | None => return WithdrawConsequence::BalanceLow, 117 | }; 118 | 119 | let liquid = Self::reducible_balance(who, Expendable, Polite); 120 | if amount > liquid { 121 | return WithdrawConsequence::Frozen; 122 | } 123 | 124 | // Provider restriction - total account balance cannot be reduced to zero if it cannot 125 | // sustain the loss of a provider reference. 126 | // NOTE: This assumes that the pallet is a provider (which is true). Is this ever changes, 127 | // then this will need to adapt accordingly. 128 | let ed = T::ExistentialDeposit::get(); 129 | let success = if new_free_balance < ed { 130 | if frame_system::Pallet::::can_dec_provider(who) { 131 | WithdrawConsequence::ReducedToZero(new_free_balance) 132 | } else { 133 | return WithdrawConsequence::WouldDie; 134 | } 135 | } else { 136 | WithdrawConsequence::Success 137 | }; 138 | 139 | let new_total_balance = new_free_balance.saturating_add(account.reserved); 140 | 141 | // Eventual free funds must be no less than the frozen balance. 142 | if new_total_balance < account.frozen { 143 | return WithdrawConsequence::Frozen; 144 | } 145 | 146 | success 147 | } 148 | } 149 | 150 | impl, I: 'static> fungible::Unbalanced for Pallet { 151 | fn handle_dust(dust: fungible::Dust) { 152 | T::DustRemoval::on_unbalanced(dust.into_credit()); 153 | } 154 | fn write_balance( 155 | who: &T::AccountId, 156 | amount: Self::Balance, 157 | ) -> Result, DispatchError> { 158 | let max_reduction = 159 | >::reducible_balance(who, Expendable, Force); 160 | let (result, maybe_dust) = Self::mutate_account(who, |account| -> DispatchResult { 161 | // Make sure the reduction (if there is one) is no more than the maximum allowed. 162 | let reduction = account.free.saturating_sub(amount); 163 | ensure!(reduction <= max_reduction, Error::::InsufficientBalance); 164 | 165 | account.free = amount; 166 | Ok(()) 167 | })?; 168 | result?; 169 | Ok(maybe_dust) 170 | } 171 | 172 | fn set_total_issuance(amount: Self::Balance) { 173 | TotalIssuance::::mutate(|t| *t = amount); 174 | } 175 | 176 | fn deactivate(amount: Self::Balance) { 177 | InactiveIssuance::::mutate(|b| b.saturating_accrue(amount)); 178 | } 179 | 180 | fn reactivate(amount: Self::Balance) { 181 | InactiveIssuance::::mutate(|b| b.saturating_reduce(amount)); 182 | } 183 | } 184 | 185 | impl, I: 'static> fungible::Mutate for Pallet { 186 | fn done_mint_into(who: &T::AccountId, amount: Self::Balance) { 187 | Self::deposit_event(Event::::Minted { who: who.clone(), amount }); 188 | } 189 | fn done_burn_from(who: &T::AccountId, amount: Self::Balance) { 190 | Self::deposit_event(Event::::Burned { who: who.clone(), amount }); 191 | } 192 | fn done_shelve(who: &T::AccountId, amount: Self::Balance) { 193 | Self::deposit_event(Event::::Suspended { who: who.clone(), amount }); 194 | } 195 | fn done_restore(who: &T::AccountId, amount: Self::Balance) { 196 | Self::deposit_event(Event::::Restored { who: who.clone(), amount }); 197 | } 198 | fn done_transfer(source: &T::AccountId, dest: &T::AccountId, amount: Self::Balance) { 199 | Self::deposit_event(Event::::Transfer { 200 | from: source.clone(), 201 | to: dest.clone(), 202 | amount, 203 | }); 204 | } 205 | } 206 | 207 | impl, I: 'static> fungible::MutateHold for Pallet {} 208 | 209 | impl, I: 'static> fungible::InspectHold for Pallet { 210 | type Reason = T::HoldIdentifier; 211 | 212 | fn total_balance_on_hold(who: &T::AccountId) -> T::Balance { 213 | Self::account(who).reserved 214 | } 215 | fn reducible_total_balance_on_hold(who: &T::AccountId, force: Fortitude) -> Self::Balance { 216 | // The total balance must never drop below the freeze requirements if we're not forcing: 217 | let a = Self::account(who); 218 | let unavailable = if force == Force { 219 | Self::Balance::zero() 220 | } else { 221 | // The freeze lock applies to the total balance, so we can discount the free balance 222 | // from the amount which the total reserved balance must provide to satisfy it. 223 | a.frozen.saturating_sub(a.free) 224 | }; 225 | a.reserved.saturating_sub(unavailable) 226 | } 227 | fn balance_on_hold(reason: &Self::Reason, who: &T::AccountId) -> T::Balance { 228 | Holds::::get(who) 229 | .iter() 230 | .find(|x| &x.id == reason) 231 | .map_or_else(Zero::zero, |x| x.amount) 232 | } 233 | fn hold_available(reason: &Self::Reason, who: &T::AccountId) -> bool { 234 | if frame_system::Pallet::::providers(who) == 0 { 235 | return false; 236 | } 237 | let holds = Holds::::get(who); 238 | if holds.is_full() && !holds.iter().any(|x| &x.id == reason) { 239 | return false; 240 | } 241 | true 242 | } 243 | } 244 | 245 | impl, I: 'static> fungible::UnbalancedHold for Pallet { 246 | fn set_balance_on_hold( 247 | reason: &Self::Reason, 248 | who: &T::AccountId, 249 | amount: Self::Balance, 250 | ) -> DispatchResult { 251 | let mut new_account = Self::account(who); 252 | let mut holds = Holds::::get(who); 253 | let mut increase = true; 254 | let mut delta = amount; 255 | 256 | if let Some(item) = holds.iter_mut().find(|x| &x.id == reason) { 257 | delta = item.amount.max(amount) - item.amount.min(amount); 258 | increase = amount > item.amount; 259 | item.amount = amount; 260 | holds.retain(|x| !x.amount.is_zero()); 261 | } else { 262 | if !amount.is_zero() { 263 | holds 264 | .try_push(IdAmount { id: *reason, amount }) 265 | .map_err(|_| Error::::TooManyHolds)?; 266 | } 267 | } 268 | 269 | new_account.reserved = if increase { 270 | new_account.reserved.checked_add(&delta).ok_or(ArithmeticError::Overflow)? 271 | } else { 272 | new_account.reserved.checked_sub(&delta).ok_or(ArithmeticError::Underflow)? 273 | }; 274 | 275 | let (result, maybe_dust) = Self::try_mutate_account(who, |a, _| -> DispatchResult { 276 | *a = new_account; 277 | Ok(()) 278 | })?; 279 | debug_assert!( 280 | maybe_dust.is_none(), 281 | "Does not alter main balance; dust only happens when it is altered; qed" 282 | ); 283 | Holds::::insert(who, holds); 284 | Ok(result) 285 | } 286 | } 287 | 288 | impl, I: 'static> fungible::InspectFreeze for Pallet { 289 | type Id = T::FreezeIdentifier; 290 | 291 | fn balance_frozen(id: &Self::Id, who: &T::AccountId) -> Self::Balance { 292 | let locks = Freezes::::get(who); 293 | locks.into_iter().find(|l| &l.id == id).map_or(Zero::zero(), |l| l.amount) 294 | } 295 | 296 | fn can_freeze(id: &Self::Id, who: &T::AccountId) -> bool { 297 | let l = Freezes::::get(who); 298 | !l.is_full() || l.iter().any(|x| &x.id == id) 299 | } 300 | } 301 | 302 | impl, I: 'static> fungible::MutateFreeze for Pallet { 303 | fn set_freeze(id: &Self::Id, who: &T::AccountId, amount: Self::Balance) -> DispatchResult { 304 | if amount.is_zero() { 305 | return Self::thaw(id, who); 306 | } 307 | let mut locks = Freezes::::get(who); 308 | if let Some(i) = locks.iter_mut().find(|x| &x.id == id) { 309 | i.amount = amount; 310 | } else { 311 | locks 312 | .try_push(IdAmount { id: *id, amount }) 313 | .map_err(|_| Error::::TooManyFreezes)?; 314 | } 315 | Self::update_freezes(who, locks.as_bounded_slice()) 316 | } 317 | 318 | fn extend_freeze(id: &Self::Id, who: &T::AccountId, amount: Self::Balance) -> DispatchResult { 319 | if amount.is_zero() { 320 | return Ok(()); 321 | } 322 | let mut locks = Freezes::::get(who); 323 | if let Some(i) = locks.iter_mut().find(|x| &x.id == id) { 324 | i.amount = i.amount.max(amount); 325 | } else { 326 | locks 327 | .try_push(IdAmount { id: *id, amount }) 328 | .map_err(|_| Error::::TooManyFreezes)?; 329 | } 330 | Self::update_freezes(who, locks.as_bounded_slice()) 331 | } 332 | 333 | fn thaw(id: &Self::Id, who: &T::AccountId) -> DispatchResult { 334 | let mut locks = Freezes::::get(who); 335 | locks.retain(|l| &l.id != id); 336 | Self::update_freezes(who, locks.as_bounded_slice()) 337 | } 338 | } 339 | 340 | impl, I: 'static> fungible::Balanced for Pallet { 341 | type OnDropCredit = fungible::DecreaseIssuance; 342 | type OnDropDebt = fungible::IncreaseIssuance; 343 | 344 | fn done_deposit(who: &T::AccountId, amount: Self::Balance) { 345 | Self::deposit_event(Event::::Deposit { who: who.clone(), amount }); 346 | } 347 | fn done_withdraw(who: &T::AccountId, amount: Self::Balance) { 348 | Self::deposit_event(Event::::Withdraw { who: who.clone(), amount }); 349 | } 350 | fn done_issue(amount: Self::Balance) { 351 | Self::deposit_event(Event::::Issued { amount }); 352 | } 353 | fn done_rescind(amount: Self::Balance) { 354 | Self::deposit_event(Event::::Rescinded { amount }); 355 | } 356 | } 357 | 358 | impl, I: 'static> fungible::BalancedHold for Pallet {} 359 | -------------------------------------------------------------------------------- /frame/balances/src/migration.rs: -------------------------------------------------------------------------------- 1 | // Copyright Parity Technologies (UK) Ltd. 2 | // This file is part of Polkadot. 3 | 4 | // Polkadot is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Polkadot is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Polkadot. If not, see . 16 | 17 | use super::*; 18 | use frame_support::{ 19 | pallet_prelude::*, 20 | traits::{OnRuntimeUpgrade, PalletInfoAccess}, 21 | weights::Weight, 22 | }; 23 | 24 | fn migrate_v0_to_v1, I: 'static>(accounts: &[T::AccountId]) -> Weight { 25 | let onchain_version = Pallet::::on_chain_storage_version(); 26 | 27 | if onchain_version == 0 { 28 | let total = accounts 29 | .iter() 30 | .map(|a| Pallet::::total_balance(a)) 31 | .fold(T::Balance::zero(), |a, e| a.saturating_add(e)); 32 | Pallet::::deactivate(total); 33 | 34 | // Remove the old `StorageVersion` type. 35 | frame_support::storage::unhashed::kill(&frame_support::storage::storage_prefix( 36 | Pallet::::name().as_bytes(), 37 | "StorageVersion".as_bytes(), 38 | )); 39 | 40 | // Set storage version to `1`. 41 | StorageVersion::new(1).put::>(); 42 | 43 | log::info!(target: LOG_TARGET, "Storage to version 1"); 44 | T::DbWeight::get().reads_writes(2 + accounts.len() as u64, 3) 45 | } else { 46 | log::info!( 47 | target: LOG_TARGET, 48 | "Migration did not execute. This probably should be removed" 49 | ); 50 | T::DbWeight::get().reads(1) 51 | } 52 | } 53 | 54 | // NOTE: This must be used alongside the account whose balance is expected to be inactive. 55 | // Generally this will be used for the XCM teleport checking account. 56 | pub struct MigrateToTrackInactive(PhantomData<(T, A, I)>); 57 | impl, A: Get, I: 'static> OnRuntimeUpgrade 58 | for MigrateToTrackInactive 59 | { 60 | fn on_runtime_upgrade() -> Weight { 61 | migrate_v0_to_v1::(&[A::get()]) 62 | } 63 | } 64 | 65 | // NOTE: This must be used alongside the accounts whose balance is expected to be inactive. 66 | // Generally this will be used for the XCM teleport checking accounts. 67 | pub struct MigrateManyToTrackInactive(PhantomData<(T, A, I)>); 68 | impl, A: Get>, I: 'static> OnRuntimeUpgrade 69 | for MigrateManyToTrackInactive 70 | { 71 | fn on_runtime_upgrade() -> Weight { 72 | migrate_v0_to_v1::(&A::get()) 73 | } 74 | } 75 | 76 | pub struct ResetInactive(PhantomData<(T, I)>); 77 | impl, I: 'static> OnRuntimeUpgrade for ResetInactive { 78 | fn on_runtime_upgrade() -> Weight { 79 | let onchain_version = Pallet::::on_chain_storage_version(); 80 | 81 | if onchain_version == 1 { 82 | // Remove the old `StorageVersion` type. 83 | frame_support::storage::unhashed::kill(&frame_support::storage::storage_prefix( 84 | Pallet::::name().as_bytes(), 85 | "StorageVersion".as_bytes(), 86 | )); 87 | 88 | InactiveIssuance::::kill(); 89 | 90 | // Set storage version to `0`. 91 | StorageVersion::new(0).put::>(); 92 | 93 | log::info!(target: LOG_TARGET, "Storage to version 0"); 94 | T::DbWeight::get().reads_writes(1, 2) 95 | } else { 96 | log::info!( 97 | target: LOG_TARGET, 98 | "Migration did not execute. This probably should be removed" 99 | ); 100 | T::DbWeight::get().reads(1) 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /frame/balances/src/tests/dispatchable_tests.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Tests regarding the functionality of the dispatchables/extrinsics. 19 | 20 | use super::*; 21 | use frame_support::traits::tokens::Preservation::Expendable; 22 | use fungible::{hold::Mutate as HoldMutate, Inspect, Mutate}; 23 | 24 | #[test] 25 | fn default_indexing_on_new_accounts_should_not_work2() { 26 | ExtBuilder::default() 27 | .existential_deposit(10) 28 | .monied(true) 29 | .build_and_execute_with(|| { 30 | // account 5 should not exist 31 | // ext_deposit is 10, value is 9, not satisfies for ext_deposit 32 | assert_noop!( 33 | Balances::transfer_allow_death(Some(1).into(), 5, 9), 34 | TokenError::BelowMinimum, 35 | ); 36 | assert_eq!(Balances::free_balance(1), 100); 37 | }); 38 | } 39 | 40 | #[test] 41 | fn dust_account_removal_should_work() { 42 | ExtBuilder::default() 43 | .existential_deposit(100) 44 | .monied(true) 45 | .build_and_execute_with(|| { 46 | System::inc_account_nonce(&2); 47 | assert_eq!(System::account_nonce(&2), 1); 48 | assert_eq!(Balances::total_balance(&2), 2000); 49 | // index 1 (account 2) becomes zombie 50 | assert_ok!(Balances::transfer_allow_death(Some(2).into(), 5, 1901)); 51 | assert_eq!(Balances::total_balance(&2), 0); 52 | assert_eq!(Balances::total_balance(&5), 1901); 53 | assert_eq!(System::account_nonce(&2), 0); 54 | }); 55 | } 56 | 57 | #[test] 58 | fn balance_transfer_works() { 59 | ExtBuilder::default().build_and_execute_with(|| { 60 | let _ = Balances::mint_into(&1, 111); 61 | assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 69)); 62 | assert_eq!(Balances::total_balance(&1), 42); 63 | assert_eq!(Balances::total_balance(&2), 69); 64 | }); 65 | } 66 | 67 | #[test] 68 | fn force_transfer_works() { 69 | ExtBuilder::default().build_and_execute_with(|| { 70 | let _ = Balances::mint_into(&1, 111); 71 | assert_noop!(Balances::force_transfer(Some(2).into(), 1, 2, 69), BadOrigin,); 72 | assert_ok!(Balances::force_transfer(RawOrigin::Root.into(), 1, 2, 69)); 73 | assert_eq!(Balances::total_balance(&1), 42); 74 | assert_eq!(Balances::total_balance(&2), 69); 75 | }); 76 | } 77 | 78 | #[test] 79 | fn balance_transfer_when_on_hold_should_not_work() { 80 | ExtBuilder::default().build_and_execute_with(|| { 81 | let _ = Balances::mint_into(&1, 111); 82 | assert_ok!(Balances::hold(&TestId::Foo, &1, 69)); 83 | assert_noop!( 84 | Balances::transfer_allow_death(Some(1).into(), 2, 69), 85 | TokenError::FundsUnavailable, 86 | ); 87 | }); 88 | } 89 | 90 | #[test] 91 | fn transfer_keep_alive_works() { 92 | ExtBuilder::default().existential_deposit(1).build_and_execute_with(|| { 93 | let _ = Balances::mint_into(&1, 100); 94 | assert_noop!( 95 | Balances::transfer_keep_alive(Some(1).into(), 2, 100), 96 | TokenError::NotExpendable 97 | ); 98 | assert_eq!(Balances::total_balance(&1), 100); 99 | assert_eq!(Balances::total_balance(&2), 0); 100 | }); 101 | } 102 | 103 | #[test] 104 | fn transfer_keep_alive_all_free_succeed() { 105 | ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { 106 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 300)); 107 | assert_ok!(Balances::hold(&TestId::Foo, &1, 100)); 108 | assert_ok!(Balances::transfer_keep_alive(Some(1).into(), 2, 100)); 109 | assert_eq!(Balances::total_balance(&1), 200); 110 | assert_eq!(Balances::total_balance(&2), 100); 111 | }); 112 | } 113 | 114 | #[test] 115 | fn transfer_all_works_1() { 116 | ExtBuilder::default().existential_deposit(100).build().execute_with(|| { 117 | // setup 118 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 200)); 119 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); 120 | // transfer all and allow death 121 | assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); 122 | assert_eq!(Balances::total_balance(&1), 0); 123 | assert_eq!(Balances::total_balance(&2), 200); 124 | }); 125 | } 126 | 127 | #[test] 128 | fn transfer_all_works_2() { 129 | ExtBuilder::default().existential_deposit(100).build().execute_with(|| { 130 | // setup 131 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 200)); 132 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); 133 | // transfer all and keep alive 134 | assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); 135 | assert_eq!(Balances::total_balance(&1), 100); 136 | assert_eq!(Balances::total_balance(&2), 100); 137 | }); 138 | } 139 | 140 | #[test] 141 | fn transfer_all_works_3() { 142 | ExtBuilder::default().existential_deposit(100).build().execute_with(|| { 143 | // setup 144 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 210)); 145 | assert_ok!(Balances::hold(&TestId::Foo, &1, 10)); 146 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); 147 | // transfer all and allow death w/ reserved 148 | assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); 149 | assert_eq!(Balances::total_balance(&1), 110); 150 | assert_eq!(Balances::total_balance(&2), 100); 151 | }); 152 | } 153 | 154 | #[test] 155 | fn transfer_all_works_4() { 156 | ExtBuilder::default().existential_deposit(100).build().execute_with(|| { 157 | // setup 158 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1, 210)); 159 | assert_ok!(Balances::hold(&TestId::Foo, &1, 10)); 160 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 2, 0)); 161 | // transfer all and keep alive w/ reserved 162 | assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); 163 | assert_eq!(Balances::total_balance(&1), 110); 164 | assert_eq!(Balances::total_balance(&2), 100); 165 | }); 166 | } 167 | 168 | #[test] 169 | fn set_balance_handles_killing_account() { 170 | ExtBuilder::default().build_and_execute_with(|| { 171 | let _ = Balances::mint_into(&1, 111); 172 | assert_ok!(frame_system::Pallet::::inc_consumers(&1)); 173 | assert_noop!( 174 | Balances::force_set_balance(RuntimeOrigin::root(), 1, 0), 175 | DispatchError::ConsumerRemaining, 176 | ); 177 | }); 178 | } 179 | 180 | #[test] 181 | fn set_balance_handles_total_issuance() { 182 | ExtBuilder::default().build_and_execute_with(|| { 183 | let old_total_issuance = Balances::total_issuance(); 184 | assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1337, 69)); 185 | assert_eq!(Balances::total_issuance(), old_total_issuance + 69); 186 | assert_eq!(Balances::total_balance(&1337), 69); 187 | assert_eq!(Balances::free_balance(&1337), 69); 188 | }); 189 | } 190 | 191 | #[test] 192 | fn upgrade_accounts_should_work() { 193 | ExtBuilder::default() 194 | .existential_deposit(1) 195 | .monied(true) 196 | .build_and_execute_with(|| { 197 | System::inc_providers(&7); 198 | assert_ok!(::AccountStore::try_mutate_exists( 199 | &7, 200 | |a| -> DispatchResult { 201 | *a = Some(AccountData { 202 | free: 5, 203 | reserved: 5, 204 | frozen: Zero::zero(), 205 | flags: crate::types::ExtraFlags::old_logic(), 206 | }); 207 | Ok(()) 208 | } 209 | )); 210 | assert!(!Balances::account(&7).flags.is_new_logic()); 211 | assert_eq!(System::providers(&7), 1); 212 | assert_eq!(System::consumers(&7), 0); 213 | assert_ok!(Balances::upgrade_accounts(Some(1).into(), vec![7])); 214 | assert!(Balances::account(&7).flags.is_new_logic()); 215 | assert_eq!(System::providers(&7), 1); 216 | assert_eq!(System::consumers(&7), 1); 217 | 218 | >::unreserve(&7, 5); 219 | assert_ok!(>::transfer(&7, &1, 10, Expendable)); 220 | assert_eq!(Balances::total_balance(&7), 0); 221 | assert_eq!(System::providers(&7), 0); 222 | assert_eq!(System::consumers(&7), 0); 223 | }); 224 | } 225 | -------------------------------------------------------------------------------- /frame/balances/src/tests/fungible_conformance_tests.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | use super::*; 19 | use frame_support::traits::fungible::{conformance_tests, Inspect, Mutate}; 20 | use paste::paste; 21 | 22 | macro_rules! run_tests { 23 | ($path:path, $ext_deposit:expr, $($name:ident),*) => { 24 | $( 25 | paste! { 26 | #[test] 27 | fn [< $name _existential_deposit_ $ext_deposit _dust_trap_on >]() { 28 | let trap_account = ::AccountId::from(65174286u64); 29 | let builder = ExtBuilder::default().existential_deposit($ext_deposit).dust_trap(trap_account); 30 | builder.build_and_execute_with(|| { 31 | Balances::set_balance(&trap_account, Balances::minimum_balance()); 32 | $path::$name::< 33 | Balances, 34 | ::AccountId, 35 | >(Some(trap_account)); 36 | }); 37 | } 38 | 39 | #[test] 40 | fn [< $name _existential_deposit_ $ext_deposit _dust_trap_off >]() { 41 | let builder = ExtBuilder::default().existential_deposit($ext_deposit); 42 | builder.build_and_execute_with(|| { 43 | $path::$name::< 44 | Balances, 45 | ::AccountId, 46 | >(None); 47 | }); 48 | } 49 | } 50 | )* 51 | }; 52 | ($path:path, $ext_deposit:expr) => { 53 | run_tests!( 54 | $path, 55 | $ext_deposit, 56 | mint_into_success, 57 | mint_into_overflow, 58 | mint_into_below_minimum, 59 | burn_from_exact_success, 60 | burn_from_best_effort_success, 61 | burn_from_exact_insufficient_funds, 62 | restore_success, 63 | restore_overflow, 64 | restore_below_minimum, 65 | shelve_success, 66 | shelve_insufficient_funds, 67 | transfer_success, 68 | transfer_expendable_all, 69 | transfer_expendable_dust, 70 | transfer_protect_preserve, 71 | set_balance_mint_success, 72 | set_balance_burn_success, 73 | can_deposit_success, 74 | can_deposit_below_minimum, 75 | can_deposit_overflow, 76 | can_withdraw_success, 77 | can_withdraw_reduced_to_zero, 78 | can_withdraw_balance_low, 79 | reducible_balance_expendable, 80 | reducible_balance_protect_preserve 81 | ); 82 | }; 83 | } 84 | 85 | run_tests!(conformance_tests::inspect_mutate, 1); 86 | run_tests!(conformance_tests::inspect_mutate, 2); 87 | run_tests!(conformance_tests::inspect_mutate, 5); 88 | run_tests!(conformance_tests::inspect_mutate, 1000); 89 | -------------------------------------------------------------------------------- /frame/balances/src/tests/fungible_tests.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Tests regarding the functionality of the `fungible` trait set implementations. 19 | 20 | use super::*; 21 | use frame_support::traits::tokens::{ 22 | Fortitude::{Force, Polite}, 23 | Precision::{BestEffort, Exact}, 24 | Preservation::{Expendable, Preserve, Protect}, 25 | Restriction::Free, 26 | }; 27 | use fungible::{Inspect, InspectFreeze, InspectHold, Mutate, MutateFreeze, MutateHold, Unbalanced}; 28 | 29 | #[test] 30 | fn inspect_trait_reducible_balance_basic_works() { 31 | ExtBuilder::default().existential_deposit(10).build_and_execute_with(|| { 32 | Balances::set_balance(&1, 100); 33 | assert_eq!(Balances::reducible_balance(&1, Expendable, Polite), 100); 34 | assert_eq!(Balances::reducible_balance(&1, Protect, Polite), 90); 35 | assert_eq!(Balances::reducible_balance(&1, Preserve, Polite), 90); 36 | assert_eq!(Balances::reducible_balance(&1, Expendable, Force), 100); 37 | assert_eq!(Balances::reducible_balance(&1, Protect, Force), 90); 38 | assert_eq!(Balances::reducible_balance(&1, Preserve, Force), 90); 39 | }); 40 | } 41 | 42 | #[test] 43 | fn inspect_trait_reducible_balance_other_provide_works() { 44 | ExtBuilder::default().existential_deposit(10).build_and_execute_with(|| { 45 | Balances::set_balance(&1, 100); 46 | System::inc_providers(&1); 47 | assert_eq!(Balances::reducible_balance(&1, Expendable, Polite), 100); 48 | assert_eq!(Balances::reducible_balance(&1, Protect, Polite), 100); 49 | assert_eq!(Balances::reducible_balance(&1, Preserve, Polite), 90); 50 | assert_eq!(Balances::reducible_balance(&1, Expendable, Force), 100); 51 | assert_eq!(Balances::reducible_balance(&1, Protect, Force), 100); 52 | assert_eq!(Balances::reducible_balance(&1, Preserve, Force), 90); 53 | }); 54 | } 55 | 56 | #[test] 57 | fn inspect_trait_reducible_balance_frozen_works() { 58 | ExtBuilder::default().existential_deposit(10).build_and_execute_with(|| { 59 | Balances::set_balance(&1, 100); 60 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 50)); 61 | assert_eq!(Balances::reducible_balance(&1, Expendable, Polite), 50); 62 | assert_eq!(Balances::reducible_balance(&1, Protect, Polite), 50); 63 | assert_eq!(Balances::reducible_balance(&1, Preserve, Polite), 50); 64 | assert_eq!(Balances::reducible_balance(&1, Expendable, Force), 90); 65 | assert_eq!(Balances::reducible_balance(&1, Protect, Force), 90); 66 | assert_eq!(Balances::reducible_balance(&1, Preserve, Force), 90); 67 | }); 68 | } 69 | 70 | #[test] 71 | fn unbalanced_trait_set_balance_works() { 72 | ExtBuilder::default().build_and_execute_with(|| { 73 | assert_eq!(>::balance(&1337), 0); 74 | assert_ok!(Balances::write_balance(&1337, 100)); 75 | assert_eq!(>::balance(&1337), 100); 76 | 77 | assert_ok!(>::hold(&TestId::Foo, &1337, 60)); 78 | assert_eq!(>::balance(&1337), 40); 79 | assert_eq!(>::total_balance_on_hold(&1337), 60); 80 | assert_eq!( 81 | >::balance_on_hold(&TestId::Foo, &1337), 82 | 60 83 | ); 84 | 85 | assert_noop!(Balances::write_balance(&1337, 0), Error::::InsufficientBalance); 86 | 87 | assert_ok!(Balances::write_balance(&1337, 1)); 88 | assert_eq!(>::balance(&1337), 1); 89 | assert_eq!( 90 | >::balance_on_hold(&TestId::Foo, &1337), 91 | 60 92 | ); 93 | 94 | assert_ok!(>::release(&TestId::Foo, &1337, 60, Exact)); 95 | assert_eq!(>::balance_on_hold(&TestId::Foo, &1337), 0); 96 | assert_eq!(>::total_balance_on_hold(&1337), 0); 97 | }); 98 | } 99 | 100 | #[test] 101 | fn unbalanced_trait_set_total_issuance_works() { 102 | ExtBuilder::default().build_and_execute_with(|| { 103 | assert_eq!(>::total_issuance(), 0); 104 | Balances::set_total_issuance(100); 105 | assert_eq!(>::total_issuance(), 100); 106 | }); 107 | } 108 | 109 | #[test] 110 | fn unbalanced_trait_decrease_balance_simple_works() { 111 | ExtBuilder::default().build_and_execute_with(|| { 112 | // An Account that starts at 100 113 | assert_ok!(Balances::write_balance(&1337, 100)); 114 | assert_eq!(>::balance(&1337), 100); 115 | // and reserves 50 116 | assert_ok!(>::hold(&TestId::Foo, &1337, 50)); 117 | assert_eq!(>::balance(&1337), 50); 118 | // and is decreased by 20 119 | assert_ok!(Balances::decrease_balance(&1337, 20, Exact, Expendable, Polite)); 120 | assert_eq!(>::balance(&1337), 30); 121 | }); 122 | } 123 | 124 | #[test] 125 | fn unbalanced_trait_decrease_balance_works_1() { 126 | ExtBuilder::default().build_and_execute_with(|| { 127 | assert_ok!(Balances::write_balance(&1337, 100)); 128 | assert_eq!(>::balance(&1337), 100); 129 | 130 | assert_noop!( 131 | Balances::decrease_balance(&1337, 101, Exact, Expendable, Polite), 132 | TokenError::FundsUnavailable 133 | ); 134 | assert_eq!(Balances::decrease_balance(&1337, 100, Exact, Expendable, Polite), Ok(100)); 135 | assert_eq!(>::balance(&1337), 0); 136 | }); 137 | } 138 | 139 | #[test] 140 | fn unbalanced_trait_decrease_balance_works_2() { 141 | ExtBuilder::default().build_and_execute_with(|| { 142 | // free: 40, reserved: 60 143 | assert_ok!(Balances::write_balance(&1337, 100)); 144 | assert_ok!(Balances::hold(&TestId::Foo, &1337, 60)); 145 | assert_eq!(>::balance(&1337), 40); 146 | assert_eq!(Balances::total_balance_on_hold(&1337), 60); 147 | assert_noop!( 148 | Balances::decrease_balance(&1337, 40, Exact, Expendable, Polite), 149 | Error::::InsufficientBalance 150 | ); 151 | assert_eq!(Balances::decrease_balance(&1337, 39, Exact, Expendable, Polite), Ok(39)); 152 | assert_eq!(>::balance(&1337), 1); 153 | assert_eq!(Balances::total_balance_on_hold(&1337), 60); 154 | }); 155 | } 156 | 157 | #[test] 158 | fn unbalanced_trait_decrease_balance_at_most_works_1() { 159 | ExtBuilder::default().build_and_execute_with(|| { 160 | assert_ok!(Balances::write_balance(&1337, 100)); 161 | assert_eq!(>::balance(&1337), 100); 162 | 163 | assert_eq!(Balances::decrease_balance(&1337, 101, BestEffort, Expendable, Polite), Ok(100)); 164 | assert_eq!(>::balance(&1337), 0); 165 | }); 166 | } 167 | 168 | #[test] 169 | fn unbalanced_trait_decrease_balance_at_most_works_2() { 170 | ExtBuilder::default().build_and_execute_with(|| { 171 | assert_ok!(Balances::write_balance(&1337, 99)); 172 | assert_eq!(Balances::decrease_balance(&1337, 99, BestEffort, Expendable, Polite), Ok(99)); 173 | assert_eq!(>::balance(&1337), 0); 174 | }); 175 | } 176 | 177 | #[test] 178 | fn unbalanced_trait_decrease_balance_at_most_works_3() { 179 | ExtBuilder::default().build_and_execute_with(|| { 180 | // free: 40, reserved: 60 181 | assert_ok!(Balances::write_balance(&1337, 100)); 182 | assert_ok!(Balances::hold(&TestId::Foo, &1337, 60)); 183 | assert_eq!(Balances::free_balance(1337), 40); 184 | assert_eq!(Balances::total_balance_on_hold(&1337), 60); 185 | assert_eq!(Balances::decrease_balance(&1337, 0, BestEffort, Expendable, Polite), Ok(0)); 186 | assert_eq!(Balances::free_balance(1337), 40); 187 | assert_eq!(Balances::total_balance_on_hold(&1337), 60); 188 | assert_eq!(Balances::decrease_balance(&1337, 10, BestEffort, Expendable, Polite), Ok(10)); 189 | assert_eq!(Balances::free_balance(1337), 30); 190 | assert_eq!(Balances::decrease_balance(&1337, 200, BestEffort, Expendable, Polite), Ok(29)); 191 | assert_eq!(>::balance(&1337), 1); 192 | assert_eq!(Balances::free_balance(1337), 1); 193 | assert_eq!(Balances::total_balance_on_hold(&1337), 60); 194 | }); 195 | } 196 | 197 | #[test] 198 | fn unbalanced_trait_increase_balance_works() { 199 | ExtBuilder::default().build_and_execute_with(|| { 200 | assert_noop!(Balances::increase_balance(&1337, 0, Exact), TokenError::BelowMinimum); 201 | assert_eq!(Balances::increase_balance(&1337, 1, Exact), Ok(1)); 202 | assert_noop!(Balances::increase_balance(&1337, u64::MAX, Exact), ArithmeticError::Overflow); 203 | }); 204 | } 205 | 206 | #[test] 207 | fn unbalanced_trait_increase_balance_at_most_works() { 208 | ExtBuilder::default().build_and_execute_with(|| { 209 | assert_eq!(Balances::increase_balance(&1337, 0, BestEffort), Ok(0)); 210 | assert_eq!(Balances::increase_balance(&1337, 1, BestEffort), Ok(1)); 211 | assert_eq!(Balances::increase_balance(&1337, u64::MAX, BestEffort), Ok(u64::MAX - 1)); 212 | }); 213 | } 214 | 215 | #[test] 216 | fn freezing_and_holds_should_overlap() { 217 | ExtBuilder::default() 218 | .existential_deposit(1) 219 | .monied(true) 220 | .build_and_execute_with(|| { 221 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 10)); 222 | assert_ok!(Balances::hold(&TestId::Foo, &1, 9)); 223 | assert_eq!(Balances::account(&1).free, 1); 224 | assert_eq!(System::consumers(&1), 1); 225 | assert_eq!(Balances::account(&1).free, 1); 226 | assert_eq!(Balances::account(&1).frozen, 10); 227 | assert_eq!(Balances::account(&1).reserved, 9); 228 | assert_eq!(Balances::total_balance_on_hold(&1), 9); 229 | }); 230 | } 231 | 232 | #[test] 233 | fn frozen_hold_balance_cannot_be_moved_without_force() { 234 | ExtBuilder::default() 235 | .existential_deposit(1) 236 | .monied(true) 237 | .build_and_execute_with(|| { 238 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 10)); 239 | assert_ok!(Balances::hold(&TestId::Foo, &1, 9)); 240 | assert_eq!(Balances::reducible_total_balance_on_hold(&1, Force), 9); 241 | assert_eq!(Balances::reducible_total_balance_on_hold(&1, Polite), 0); 242 | let e = TokenError::Frozen; 243 | assert_noop!( 244 | Balances::transfer_on_hold(&TestId::Foo, &1, &2, 1, Exact, Free, Polite), 245 | e 246 | ); 247 | assert_ok!(Balances::transfer_on_hold(&TestId::Foo, &1, &2, 1, Exact, Free, Force)); 248 | }); 249 | } 250 | 251 | #[test] 252 | fn frozen_hold_balance_best_effort_transfer_works() { 253 | ExtBuilder::default() 254 | .existential_deposit(1) 255 | .monied(true) 256 | .build_and_execute_with(|| { 257 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); 258 | assert_ok!(Balances::hold(&TestId::Foo, &1, 9)); 259 | assert_eq!(Balances::reducible_total_balance_on_hold(&1, Force), 9); 260 | assert_eq!(Balances::reducible_total_balance_on_hold(&1, Polite), 5); 261 | assert_ok!(Balances::transfer_on_hold( 262 | &TestId::Foo, 263 | &1, 264 | &2, 265 | 10, 266 | BestEffort, 267 | Free, 268 | Polite 269 | )); 270 | assert_eq!(Balances::total_balance(&1), 5); 271 | assert_eq!(Balances::total_balance(&2), 25); 272 | }); 273 | } 274 | 275 | #[test] 276 | fn partial_freezing_should_work() { 277 | ExtBuilder::default() 278 | .existential_deposit(1) 279 | .monied(true) 280 | .build_and_execute_with(|| { 281 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); 282 | assert_eq!(System::consumers(&1), 1); 283 | assert_ok!(>::transfer(&1, &2, 5, Expendable)); 284 | assert_noop!( 285 | >::transfer(&1, &2, 1, Expendable), 286 | TokenError::Frozen 287 | ); 288 | }); 289 | } 290 | 291 | #[test] 292 | fn thaw_should_work() { 293 | ExtBuilder::default() 294 | .existential_deposit(1) 295 | .monied(true) 296 | .build_and_execute_with(|| { 297 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, u64::MAX)); 298 | assert_ok!(Balances::thaw(&TestId::Foo, &1)); 299 | assert_eq!(System::consumers(&1), 0); 300 | assert_eq!(Balances::balance_frozen(&TestId::Foo, &1), 0); 301 | assert_eq!(Balances::account(&1).frozen, 0); 302 | assert_ok!(>::transfer(&1, &2, 10, Expendable)); 303 | }); 304 | } 305 | 306 | #[test] 307 | fn set_freeze_zero_should_work() { 308 | ExtBuilder::default() 309 | .existential_deposit(1) 310 | .monied(true) 311 | .build_and_execute_with(|| { 312 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, u64::MAX)); 313 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 0)); 314 | assert_eq!(System::consumers(&1), 0); 315 | assert_eq!(Balances::balance_frozen(&TestId::Foo, &1), 0); 316 | assert_eq!(Balances::account(&1).frozen, 0); 317 | assert_ok!(>::transfer(&1, &2, 10, Expendable)); 318 | }); 319 | } 320 | 321 | #[test] 322 | fn set_freeze_should_work() { 323 | ExtBuilder::default() 324 | .existential_deposit(1) 325 | .monied(true) 326 | .build_and_execute_with(|| { 327 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, u64::MAX)); 328 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); 329 | assert_ok!(>::transfer(&1, &2, 5, Expendable)); 330 | assert_noop!( 331 | >::transfer(&1, &2, 1, Expendable), 332 | TokenError::Frozen 333 | ); 334 | }); 335 | } 336 | 337 | #[test] 338 | fn extend_freeze_should_work() { 339 | ExtBuilder::default() 340 | .existential_deposit(1) 341 | .monied(true) 342 | .build_and_execute_with(|| { 343 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); 344 | assert_ok!(Balances::extend_freeze(&TestId::Foo, &1, 10)); 345 | assert_eq!(Balances::account(&1).frozen, 10); 346 | assert_eq!(Balances::balance_frozen(&TestId::Foo, &1), 10); 347 | assert_noop!( 348 | >::transfer(&1, &2, 1, Expendable), 349 | TokenError::Frozen 350 | ); 351 | }); 352 | } 353 | 354 | #[test] 355 | fn double_freezing_should_work() { 356 | ExtBuilder::default() 357 | .existential_deposit(1) 358 | .monied(true) 359 | .build_and_execute_with(|| { 360 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 5)); 361 | assert_ok!(Balances::set_freeze(&TestId::Bar, &1, 5)); 362 | assert_eq!(System::consumers(&1), 1); 363 | assert_ok!(>::transfer(&1, &2, 5, Expendable)); 364 | assert_noop!( 365 | >::transfer(&1, &2, 1, Expendable), 366 | TokenError::Frozen 367 | ); 368 | }); 369 | } 370 | 371 | #[test] 372 | fn can_hold_entire_balance_when_second_provider() { 373 | ExtBuilder::default() 374 | .existential_deposit(1) 375 | .monied(false) 376 | .build_and_execute_with(|| { 377 | >::set_balance(&1, 100); 378 | assert_noop!(Balances::hold(&TestId::Foo, &1, 100), TokenError::FundsUnavailable); 379 | System::inc_providers(&1); 380 | assert_eq!(System::providers(&1), 2); 381 | assert_ok!(Balances::hold(&TestId::Foo, &1, 100)); 382 | assert_eq!(System::providers(&1), 1); 383 | assert_noop!(System::dec_providers(&1), DispatchError::ConsumerRemaining); 384 | }); 385 | } 386 | 387 | #[test] 388 | fn unholding_frees_hold_slot() { 389 | ExtBuilder::default() 390 | .existential_deposit(1) 391 | .monied(false) 392 | .build_and_execute_with(|| { 393 | >::set_balance(&1, 100); 394 | assert_ok!(Balances::hold(&TestId::Foo, &1, 10)); 395 | assert_ok!(Balances::hold(&TestId::Bar, &1, 10)); 396 | assert_ok!(Balances::release(&TestId::Foo, &1, 10, Exact)); 397 | assert_ok!(Balances::hold(&TestId::Baz, &1, 10)); 398 | }); 399 | } 400 | 401 | #[test] 402 | fn sufficients_work_properly_with_reference_counting() { 403 | ExtBuilder::default() 404 | .existential_deposit(1) 405 | .monied(true) 406 | .build_and_execute_with(|| { 407 | // Only run PoC when the system pallet is enabled, since the underlying bug is in the 408 | // system pallet it won't work with BalancesAccountStore 409 | if UseSystem::get() { 410 | // Start with a balance of 100 411 | >::set_balance(&1, 100); 412 | // Emulate a sufficient, in reality this could be reached by transferring a 413 | // sufficient asset to the account 414 | System::inc_sufficients(&1); 415 | // Spend the same balance multiple times 416 | assert_ok!(>::transfer(&1, &1337, 100, Expendable)); 417 | assert_eq!(Balances::free_balance(&1), 0); 418 | assert_noop!( 419 | >::transfer(&1, &1337, 100, Expendable), 420 | TokenError::FundsUnavailable 421 | ); 422 | } 423 | }); 424 | } 425 | 426 | #[test] 427 | fn emit_events_with_changing_freezes() { 428 | ExtBuilder::default().build_and_execute_with(|| { 429 | let _ = Balances::set_balance(&1, 100); 430 | System::reset_events(); 431 | 432 | // Freeze = [] --> [10] 433 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 10)); 434 | assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Frozen { who: 1, amount: 10 })]); 435 | 436 | // Freeze = [10] --> [15] 437 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 15)); 438 | assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Frozen { who: 1, amount: 5 })]); 439 | 440 | // Freeze = [15] --> [15, 20] 441 | assert_ok!(Balances::set_freeze(&TestId::Bar, &1, 20)); 442 | assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Frozen { who: 1, amount: 5 })]); 443 | 444 | // Freeze = [15, 20] --> [17, 20] 445 | assert_ok!(Balances::set_freeze(&TestId::Foo, &1, 17)); 446 | for event in events() { 447 | match event { 448 | RuntimeEvent::Balances(crate::Event::Frozen { .. }) => { 449 | assert!(false, "unexpected freeze event") 450 | }, 451 | RuntimeEvent::Balances(crate::Event::Thawed { .. }) => { 452 | assert!(false, "unexpected thaw event") 453 | }, 454 | _ => continue, 455 | } 456 | } 457 | 458 | // Freeze = [17, 20] --> [17, 15] 459 | assert_ok!(Balances::set_freeze(&TestId::Bar, &1, 15)); 460 | assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Thawed { who: 1, amount: 3 })]); 461 | 462 | // Freeze = [17, 15] --> [15] 463 | assert_ok!(Balances::thaw(&TestId::Foo, &1)); 464 | assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Thawed { who: 1, amount: 2 })]); 465 | 466 | // Freeze = [15] --> [] 467 | assert_ok!(Balances::thaw(&TestId::Bar, &1)); 468 | assert_eq!(events(), [RuntimeEvent::Balances(crate::Event::Thawed { who: 1, amount: 15 })]); 469 | }); 470 | } 471 | -------------------------------------------------------------------------------- /frame/balances/src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Tests. 19 | 20 | #![cfg(test)] 21 | 22 | use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet}; 23 | use codec::{Decode, Encode, MaxEncodedLen}; 24 | use frame_support::{ 25 | assert_err, assert_noop, assert_ok, assert_storage_noop, 26 | dispatch::{DispatchInfo, GetDispatchInfo}, 27 | parameter_types, 28 | traits::{ 29 | tokens::fungible, ConstU32, ConstU64, ConstU8, Imbalance as ImbalanceT, OnUnbalanced, 30 | StorageMapShim, StoredMap, WhitelistedStorageKeys, 31 | }, 32 | weights::{IdentityFee, Weight}, 33 | RuntimeDebug, 34 | }; 35 | use frame_system::{self as system, RawOrigin}; 36 | use pallet_transaction_payment::{ChargeTransactionPayment, CurrencyAdapter, Multiplier}; 37 | use scale_info::TypeInfo; 38 | use sp_core::{hexdisplay::HexDisplay, H256}; 39 | use sp_io; 40 | use sp_runtime::{ 41 | testing::Header, 42 | traits::{BadOrigin, IdentityLookup, SignedExtension, Zero}, 43 | ArithmeticError, DispatchError, DispatchResult, FixedPointNumber, TokenError, 44 | }; 45 | use std::collections::BTreeSet; 46 | 47 | mod currency_tests; 48 | mod dispatchable_tests; 49 | mod fungible_conformance_tests; 50 | mod fungible_tests; 51 | mod reentrancy_tests; 52 | 53 | type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; 54 | type Block = frame_system::mocking::MockBlock; 55 | 56 | #[derive( 57 | Encode, 58 | Decode, 59 | Copy, 60 | Clone, 61 | Eq, 62 | PartialEq, 63 | Ord, 64 | PartialOrd, 65 | MaxEncodedLen, 66 | TypeInfo, 67 | RuntimeDebug, 68 | )] 69 | pub enum TestId { 70 | Foo, 71 | Bar, 72 | Baz, 73 | } 74 | 75 | frame_support::construct_runtime!( 76 | pub struct Test where 77 | Block = Block, 78 | NodeBlock = Block, 79 | UncheckedExtrinsic = UncheckedExtrinsic, 80 | { 81 | System: frame_system::{Pallet, Call, Config, Storage, Event}, 82 | Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, 83 | TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, 84 | } 85 | ); 86 | 87 | parameter_types! { 88 | pub BlockWeights: frame_system::limits::BlockWeights = 89 | frame_system::limits::BlockWeights::simple_max( 90 | frame_support::weights::Weight::from_parts(1024, u64::MAX), 91 | ); 92 | pub static ExistentialDeposit: u64 = 1; 93 | } 94 | impl frame_system::Config for Test { 95 | type BaseCallFilter = frame_support::traits::Everything; 96 | type BlockWeights = BlockWeights; 97 | type BlockLength = (); 98 | type DbWeight = (); 99 | type RuntimeOrigin = RuntimeOrigin; 100 | type Index = u64; 101 | type BlockNumber = u64; 102 | type RuntimeCall = RuntimeCall; 103 | type Hash = H256; 104 | type Hashing = ::sp_runtime::traits::BlakeTwo256; 105 | type AccountId = u64; 106 | type Lookup = IdentityLookup; 107 | type Header = Header; 108 | type RuntimeEvent = RuntimeEvent; 109 | type BlockHashCount = ConstU64<250>; 110 | type Version = (); 111 | type PalletInfo = PalletInfo; 112 | type AccountData = super::AccountData; 113 | type OnNewAccount = (); 114 | type OnKilledAccount = (); 115 | type SystemWeightInfo = (); 116 | type SS58Prefix = (); 117 | type OnSetCode = (); 118 | type MaxConsumers = frame_support::traits::ConstU32<16>; 119 | } 120 | 121 | impl pallet_transaction_payment::Config for Test { 122 | type RuntimeEvent = RuntimeEvent; 123 | type OnChargeTransaction = CurrencyAdapter, ()>; 124 | type OperationalFeeMultiplier = ConstU8<5>; 125 | type WeightToFee = IdentityFee; 126 | type LengthToFee = IdentityFee; 127 | type FeeMultiplierUpdate = (); 128 | } 129 | 130 | impl Config for Test { 131 | type Balance = u64; 132 | type DustRemoval = DustTrap; 133 | type RuntimeEvent = RuntimeEvent; 134 | type ExistentialDeposit = ExistentialDeposit; 135 | type AccountStore = TestAccountStore; 136 | type MaxLocks = ConstU32<50>; 137 | type MaxReserves = ConstU32<2>; 138 | type ReserveIdentifier = TestId; 139 | type WeightInfo = (); 140 | type HoldIdentifier = TestId; 141 | type FreezeIdentifier = TestId; 142 | type MaxFreezes = ConstU32<2>; 143 | type MaxHolds = ConstU32<2>; 144 | } 145 | 146 | #[derive(Clone)] 147 | pub struct ExtBuilder { 148 | existential_deposit: u64, 149 | monied: bool, 150 | dust_trap: Option, 151 | } 152 | impl Default for ExtBuilder { 153 | fn default() -> Self { 154 | Self { existential_deposit: 1, monied: false, dust_trap: None } 155 | } 156 | } 157 | impl ExtBuilder { 158 | pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { 159 | self.existential_deposit = existential_deposit; 160 | self 161 | } 162 | pub fn monied(mut self, monied: bool) -> Self { 163 | self.monied = monied; 164 | if self.existential_deposit == 0 { 165 | self.existential_deposit = 1; 166 | } 167 | self 168 | } 169 | pub fn dust_trap(mut self, account: u64) -> Self { 170 | self.dust_trap = Some(account); 171 | self 172 | } 173 | pub fn set_associated_consts(&self) { 174 | DUST_TRAP_TARGET.with(|v| v.replace(self.dust_trap)); 175 | EXISTENTIAL_DEPOSIT.with(|v| v.replace(self.existential_deposit)); 176 | } 177 | pub fn build(self) -> sp_io::TestExternalities { 178 | self.set_associated_consts(); 179 | let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); 180 | pallet_balances::GenesisConfig:: { 181 | balances: if self.monied { 182 | vec![ 183 | (1, 10 * self.existential_deposit), 184 | (2, 20 * self.existential_deposit), 185 | (3, 30 * self.existential_deposit), 186 | (4, 40 * self.existential_deposit), 187 | (12, 10 * self.existential_deposit), 188 | ] 189 | } else { 190 | vec![] 191 | }, 192 | } 193 | .assimilate_storage(&mut t) 194 | .unwrap(); 195 | 196 | let mut ext = sp_io::TestExternalities::new(t); 197 | ext.execute_with(|| System::set_block_number(1)); 198 | ext 199 | } 200 | pub fn build_and_execute_with(self, f: impl Fn()) { 201 | let other = self.clone(); 202 | UseSystem::set(false); 203 | other.build().execute_with(|| f()); 204 | UseSystem::set(true); 205 | self.build().execute_with(|| f()); 206 | } 207 | } 208 | 209 | parameter_types! { 210 | static DustTrapTarget: Option = None; 211 | } 212 | 213 | pub struct DustTrap; 214 | 215 | impl OnUnbalanced> for DustTrap { 216 | fn on_nonzero_unbalanced(amount: CreditOf) { 217 | match DustTrapTarget::get() { 218 | None => drop(amount), 219 | Some(a) => { 220 | let result = >::resolve(&a, amount); 221 | debug_assert!(result.is_ok()); 222 | }, 223 | } 224 | } 225 | } 226 | 227 | parameter_types! { 228 | pub static UseSystem: bool = false; 229 | } 230 | 231 | type BalancesAccountStore = StorageMapShim, u64, super::AccountData>; 232 | type SystemAccountStore = frame_system::Pallet; 233 | 234 | pub struct TestAccountStore; 235 | impl StoredMap> for TestAccountStore { 236 | fn get(k: &u64) -> super::AccountData { 237 | if UseSystem::get() { 238 | >::get(k) 239 | } else { 240 | >::get(k) 241 | } 242 | } 243 | fn try_mutate_exists>( 244 | k: &u64, 245 | f: impl FnOnce(&mut Option>) -> Result, 246 | ) -> Result { 247 | if UseSystem::get() { 248 | >::try_mutate_exists(k, f) 249 | } else { 250 | >::try_mutate_exists(k, f) 251 | } 252 | } 253 | fn mutate( 254 | k: &u64, 255 | f: impl FnOnce(&mut super::AccountData) -> R, 256 | ) -> Result { 257 | if UseSystem::get() { 258 | >::mutate(k, f) 259 | } else { 260 | >::mutate(k, f) 261 | } 262 | } 263 | fn mutate_exists( 264 | k: &u64, 265 | f: impl FnOnce(&mut Option>) -> R, 266 | ) -> Result { 267 | if UseSystem::get() { 268 | >::mutate_exists(k, f) 269 | } else { 270 | >::mutate_exists(k, f) 271 | } 272 | } 273 | fn insert(k: &u64, t: super::AccountData) -> Result<(), DispatchError> { 274 | if UseSystem::get() { 275 | >::insert(k, t) 276 | } else { 277 | >::insert(k, t) 278 | } 279 | } 280 | fn remove(k: &u64) -> Result<(), DispatchError> { 281 | if UseSystem::get() { 282 | >::remove(k) 283 | } else { 284 | >::remove(k) 285 | } 286 | } 287 | } 288 | 289 | pub fn events() -> Vec { 290 | let evt = System::events().into_iter().map(|evt| evt.event).collect::>(); 291 | System::reset_events(); 292 | evt 293 | } 294 | 295 | /// create a transaction info struct from weight. Handy to avoid building the whole struct. 296 | pub fn info_from_weight(w: Weight) -> DispatchInfo { 297 | DispatchInfo { weight: w, ..Default::default() } 298 | } 299 | 300 | #[test] 301 | fn weights_sane() { 302 | let info = crate::Call::::transfer_allow_death { dest: 10, value: 4 }.get_dispatch_info(); 303 | assert_eq!(<() as crate::WeightInfo>::transfer_allow_death(), info.weight); 304 | 305 | let info = crate::Call::::force_unreserve { who: 10, amount: 4 }.get_dispatch_info(); 306 | assert_eq!(<() as crate::WeightInfo>::force_unreserve(), info.weight); 307 | } 308 | 309 | #[test] 310 | fn check_whitelist() { 311 | let whitelist: BTreeSet = AllPalletsWithSystem::whitelisted_storage_keys() 312 | .iter() 313 | .map(|s| HexDisplay::from(&s.key).to_string()) 314 | .collect(); 315 | // Inactive Issuance 316 | assert!(whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f1ccde6872881f893a21de93dfe970cd5")); 317 | // Total Issuance 318 | assert!(whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80")); 319 | } 320 | -------------------------------------------------------------------------------- /frame/balances/src/tests/reentrancy_tests.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Tests regarding the reentrancy functionality. 19 | 20 | use super::*; 21 | use frame_support::traits::tokens::{ 22 | Fortitude::Force, 23 | Precision::BestEffort, 24 | Preservation::{Expendable, Protect}, 25 | }; 26 | use fungible::Balanced; 27 | 28 | #[test] 29 | fn transfer_dust_removal_tst1_should_work() { 30 | ExtBuilder::default() 31 | .existential_deposit(100) 32 | .dust_trap(1) 33 | .build_and_execute_with(|| { 34 | // Verification of reentrancy in dust removal 35 | assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 1000)); 36 | assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 2, 500)); 37 | 38 | // In this transaction, account 2 free balance 39 | // drops below existential balance 40 | // and dust balance is removed from account 2 41 | assert_ok!(Balances::transfer_allow_death(RawOrigin::Signed(2).into(), 3, 450)); 42 | 43 | // As expected dust balance is removed. 44 | assert_eq!(Balances::free_balance(&2), 0); 45 | 46 | // As expected beneficiary account 3 47 | // received the transfered fund. 48 | assert_eq!(Balances::free_balance(&3), 450); 49 | 50 | // Dust balance is deposited to account 1 51 | // during the process of dust removal. 52 | assert_eq!(Balances::free_balance(&1), 1050); 53 | 54 | // Verify the events 55 | assert_eq!(System::events().len(), 12); 56 | 57 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { 58 | from: 2, 59 | to: 3, 60 | amount: 450, 61 | })); 62 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { 63 | account: 2, 64 | amount: 50, 65 | })); 66 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { 67 | who: 1, 68 | amount: 50, 69 | })); 70 | }); 71 | } 72 | 73 | #[test] 74 | fn transfer_dust_removal_tst2_should_work() { 75 | ExtBuilder::default() 76 | .existential_deposit(100) 77 | .dust_trap(1) 78 | .build_and_execute_with(|| { 79 | // Verification of reentrancy in dust removal 80 | assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 1000)); 81 | assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 2, 500)); 82 | 83 | // In this transaction, account 2 free balance 84 | // drops below existential balance 85 | // and dust balance is removed from account 2 86 | assert_ok!(Balances::transfer_allow_death(RawOrigin::Signed(2).into(), 1, 450)); 87 | 88 | // As expected dust balance is removed. 89 | assert_eq!(Balances::free_balance(&2), 0); 90 | 91 | // Dust balance is deposited to account 1 92 | // during the process of dust removal. 93 | assert_eq!(Balances::free_balance(&1), 1500); 94 | 95 | // Verify the events 96 | assert_eq!(System::events().len(), 10); 97 | 98 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { 99 | from: 2, 100 | to: 1, 101 | amount: 450, 102 | })); 103 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { 104 | account: 2, 105 | amount: 50, 106 | })); 107 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { 108 | who: 1, 109 | amount: 50, 110 | })); 111 | }); 112 | } 113 | 114 | #[test] 115 | fn repatriating_reserved_balance_dust_removal_should_work() { 116 | ExtBuilder::default() 117 | .existential_deposit(100) 118 | .dust_trap(1) 119 | .build_and_execute_with(|| { 120 | // Verification of reentrancy in dust removal 121 | assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 1000)); 122 | assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 2, 500)); 123 | 124 | // Reserve a value on account 2, 125 | // Such that free balance is lower than 126 | // Exestintial deposit. 127 | assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), 1, 450)); 128 | 129 | // Since free balance of account 2 is lower than 130 | // existential deposit, dust amount is 131 | // removed from the account 2 132 | assert_eq!(Balances::reserved_balance(2), 0); 133 | assert_eq!(Balances::free_balance(2), 0); 134 | 135 | // account 1 is credited with reserved amount 136 | // together with dust balance during dust 137 | // removal. 138 | assert_eq!(Balances::reserved_balance(1), 0); 139 | assert_eq!(Balances::free_balance(1), 1500); 140 | 141 | // Verify the events 142 | assert_eq!(System::events().len(), 10); 143 | 144 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { 145 | from: 2, 146 | to: 1, 147 | amount: 450, 148 | })); 149 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { 150 | account: 2, 151 | amount: 50, 152 | })); 153 | System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { 154 | who: 1, 155 | amount: 50, 156 | })); 157 | }); 158 | } 159 | 160 | #[test] 161 | fn emit_events_with_no_existential_deposit_suicide_with_dust() { 162 | ExtBuilder::default().existential_deposit(2).build_and_execute_with(|| { 163 | assert_ok!(Balances::force_set_balance(RawOrigin::Root.into(), 1, 100)); 164 | 165 | assert_eq!( 166 | events(), 167 | [ 168 | RuntimeEvent::System(system::Event::NewAccount { account: 1 }), 169 | RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), 170 | RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100 }), 171 | ] 172 | ); 173 | 174 | let res = Balances::withdraw(&1, 98, BestEffort, Protect, Force); 175 | assert_eq!(res.unwrap().peek(), 98); 176 | 177 | // no events 178 | assert_eq!( 179 | events(), 180 | [RuntimeEvent::Balances(crate::Event::Withdraw { who: 1, amount: 98 })] 181 | ); 182 | 183 | let res = Balances::withdraw(&1, 1, BestEffort, Expendable, Force); 184 | assert_eq!(res.unwrap().peek(), 1); 185 | 186 | assert_eq!( 187 | events(), 188 | [ 189 | RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), 190 | RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 1 }), 191 | RuntimeEvent::Balances(crate::Event::Withdraw { who: 1, amount: 1 }) 192 | ] 193 | ); 194 | }); 195 | } 196 | -------------------------------------------------------------------------------- /frame/balances/src/types.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Types used in the pallet. 19 | 20 | use crate::{Config, CreditOf, Event, Pallet}; 21 | use codec::{Decode, Encode, MaxEncodedLen}; 22 | use core::ops::BitOr; 23 | use frame_support::{ 24 | traits::{Imbalance, LockIdentifier, OnUnbalanced, WithdrawReasons}, 25 | RuntimeDebug, 26 | }; 27 | use scale_info::TypeInfo; 28 | #[cfg(feature = "std")] 29 | use serde::{Deserialize, Serialize}; 30 | use sp_runtime::Saturating; 31 | 32 | /// Simplified reasons for withdrawing balance. 33 | #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] 34 | pub enum Reasons { 35 | /// Paying system transaction fees. 36 | Fee = 0, 37 | /// Any reason other than paying system transaction fees. 38 | Misc = 1, 39 | /// Any reason at all. 40 | All = 2, 41 | } 42 | 43 | impl From for Reasons { 44 | fn from(r: WithdrawReasons) -> Reasons { 45 | if r == WithdrawReasons::TRANSACTION_PAYMENT { 46 | Reasons::Fee 47 | } else if r.contains(WithdrawReasons::TRANSACTION_PAYMENT) { 48 | Reasons::All 49 | } else { 50 | Reasons::Misc 51 | } 52 | } 53 | } 54 | 55 | impl BitOr for Reasons { 56 | type Output = Reasons; 57 | fn bitor(self, other: Reasons) -> Reasons { 58 | if self == other { 59 | return self; 60 | } 61 | Reasons::All 62 | } 63 | } 64 | 65 | /// A single lock on a balance. There can be many of these on an account and they "overlap", so the 66 | /// same balance is frozen by multiple locks. 67 | #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] 68 | pub struct BalanceLock { 69 | /// An identifier for this lock. Only one lock may be in existence for each identifier. 70 | pub id: LockIdentifier, 71 | /// The amount which the free balance may not drop below when this lock is in effect. 72 | pub amount: Balance, 73 | /// If true, then the lock remains in effect even for payment of transaction fees. 74 | pub reasons: Reasons, 75 | } 76 | 77 | /// Store named reserved balance. 78 | #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] 79 | pub struct ReserveData { 80 | /// The identifier for the named reserve. 81 | pub id: ReserveIdentifier, 82 | /// The amount of the named reserve. 83 | pub amount: Balance, 84 | } 85 | 86 | /// An identifier and balance. 87 | #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] 88 | pub struct IdAmount { 89 | /// An identifier for this item. 90 | pub id: Id, 91 | /// Some amount for this item. 92 | pub amount: Balance, 93 | } 94 | 95 | /// All balance information for an account. 96 | #[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)] 97 | #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] 98 | pub struct AccountData { 99 | /// Non-reserved part of the balance which the account holder may be able to control. 100 | /// 101 | /// This is the only balance that matters in terms of most operations on tokens. 102 | pub free: Balance, 103 | /// Balance which is has active holds on it and may not be used at all. 104 | /// 105 | /// This is the sum of all individual holds together with any sums still under the (deprecated) 106 | /// reserves API. 107 | pub reserved: Balance, 108 | /// The amount that `free + reserved` may not drop below when reducing the balance, except for 109 | /// actions where the account owner cannot reasonably benefit from the balance reduction, such 110 | /// as slashing. 111 | pub frozen: Balance, 112 | /// Extra information about this account. The MSB is a flag indicating whether the new ref- 113 | /// counting logic is in place for this account. 114 | pub flags: ExtraFlags, 115 | } 116 | 117 | const IS_NEW_LOGIC: u128 = 0x80000000_00000000_00000000_00000000u128; 118 | 119 | #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] 120 | #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] 121 | pub struct ExtraFlags(u128); 122 | impl Default for ExtraFlags { 123 | fn default() -> Self { 124 | Self(IS_NEW_LOGIC) 125 | } 126 | } 127 | impl ExtraFlags { 128 | pub fn old_logic() -> Self { 129 | Self(0) 130 | } 131 | pub fn set_new_logic(&mut self) { 132 | self.0 = self.0 | IS_NEW_LOGIC 133 | } 134 | pub fn is_new_logic(&self) -> bool { 135 | (self.0 & IS_NEW_LOGIC) == IS_NEW_LOGIC 136 | } 137 | } 138 | 139 | impl AccountData { 140 | pub fn usable(&self) -> Balance { 141 | self.free.saturating_sub(self.frozen) 142 | } 143 | 144 | /// The total balance in this account including any that is reserved and ignoring any frozen. 145 | pub fn total(&self) -> Balance { 146 | self.free.saturating_add(self.reserved) 147 | } 148 | } 149 | 150 | pub struct DustCleaner, I: 'static = ()>( 151 | pub(crate) Option<(T::AccountId, CreditOf)>, 152 | ); 153 | 154 | impl, I: 'static> Drop for DustCleaner { 155 | fn drop(&mut self) { 156 | if let Some((who, dust)) = self.0.take() { 157 | Pallet::::deposit_event(Event::DustLost { account: who, amount: dust.peek() }); 158 | T::DustRemoval::on_unbalanced(dust); 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /frame/balances/src/weights.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Autogenerated weights for pallet_balances 19 | //! 20 | //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev 21 | //! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` 22 | //! WORST CASE MAP SIZE: `1000000` 23 | //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` 24 | //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 25 | 26 | // Executed Command: 27 | // ./target/production/substrate 28 | // benchmark 29 | // pallet 30 | // --chain=dev 31 | // --steps=50 32 | // --repeat=20 33 | // --pallet=pallet_balances 34 | // --extrinsic=* 35 | // --execution=wasm 36 | // --wasm-execution=compiled 37 | // --heap-pages=4096 38 | // --output=./frame/balances/src/weights.rs 39 | // --header=./HEADER-APACHE2 40 | // --template=./.maintain/frame-weight-template.hbs 41 | 42 | #![cfg_attr(rustfmt, rustfmt_skip)] 43 | #![allow(unused_parens)] 44 | #![allow(unused_imports)] 45 | 46 | use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; 47 | use sp_std::marker::PhantomData; 48 | 49 | /// Weight functions needed for pallet_balances. 50 | pub trait WeightInfo { 51 | fn transfer_allow_death() -> Weight; 52 | fn transfer_keep_alive() -> Weight; 53 | fn force_set_balance_creating() -> Weight; 54 | fn force_set_balance_killing() -> Weight; 55 | fn force_transfer() -> Weight; 56 | fn transfer_all() -> Weight; 57 | fn force_unreserve() -> Weight; 58 | fn upgrade_accounts(u: u32, ) -> Weight; 59 | } 60 | 61 | /// Weights for pallet_balances using the Substrate node and recommended hardware. 62 | pub struct SubstrateWeight(PhantomData); 63 | impl WeightInfo for SubstrateWeight { 64 | /// Storage: System Account (r:1 w:1) 65 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 66 | fn transfer_allow_death() -> Weight { 67 | // Proof Size summary in bytes: 68 | // Measured: `0` 69 | // Estimated: `3593` 70 | // Minimum execution time: 59_458_000 picoseconds. 71 | Weight::from_parts(60_307_000, 3593) 72 | .saturating_add(T::DbWeight::get().reads(1_u64)) 73 | .saturating_add(T::DbWeight::get().writes(1_u64)) 74 | } 75 | /// Storage: System Account (r:1 w:1) 76 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 77 | fn transfer_keep_alive() -> Weight { 78 | // Proof Size summary in bytes: 79 | // Measured: `0` 80 | // Estimated: `3593` 81 | // Minimum execution time: 43_056_000 picoseconds. 82 | Weight::from_parts(43_933_000, 3593) 83 | .saturating_add(T::DbWeight::get().reads(1_u64)) 84 | .saturating_add(T::DbWeight::get().writes(1_u64)) 85 | } 86 | /// Storage: System Account (r:1 w:1) 87 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 88 | fn force_set_balance_creating() -> Weight { 89 | // Proof Size summary in bytes: 90 | // Measured: `174` 91 | // Estimated: `3593` 92 | // Minimum execution time: 17_428_000 picoseconds. 93 | Weight::from_parts(17_731_000, 3593) 94 | .saturating_add(T::DbWeight::get().reads(1_u64)) 95 | .saturating_add(T::DbWeight::get().writes(1_u64)) 96 | } 97 | /// Storage: System Account (r:1 w:1) 98 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 99 | fn force_set_balance_killing() -> Weight { 100 | // Proof Size summary in bytes: 101 | // Measured: `174` 102 | // Estimated: `3593` 103 | // Minimum execution time: 22_809_000 picoseconds. 104 | Weight::from_parts(23_225_000, 3593) 105 | .saturating_add(T::DbWeight::get().reads(1_u64)) 106 | .saturating_add(T::DbWeight::get().writes(1_u64)) 107 | } 108 | /// Storage: System Account (r:2 w:2) 109 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 110 | fn force_transfer() -> Weight { 111 | // Proof Size summary in bytes: 112 | // Measured: `103` 113 | // Estimated: `6196` 114 | // Minimum execution time: 56_929_000 picoseconds. 115 | Weight::from_parts(57_688_000, 6196) 116 | .saturating_add(T::DbWeight::get().reads(2_u64)) 117 | .saturating_add(T::DbWeight::get().writes(2_u64)) 118 | } 119 | /// Storage: System Account (r:1 w:1) 120 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 121 | fn transfer_all() -> Weight { 122 | // Proof Size summary in bytes: 123 | // Measured: `0` 124 | // Estimated: `3593` 125 | // Minimum execution time: 49_820_000 picoseconds. 126 | Weight::from_parts(50_832_000, 3593) 127 | .saturating_add(T::DbWeight::get().reads(1_u64)) 128 | .saturating_add(T::DbWeight::get().writes(1_u64)) 129 | } 130 | /// Storage: System Account (r:1 w:1) 131 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 132 | fn force_unreserve() -> Weight { 133 | // Proof Size summary in bytes: 134 | // Measured: `174` 135 | // Estimated: `3593` 136 | // Minimum execution time: 20_270_000 picoseconds. 137 | Weight::from_parts(20_597_000, 3593) 138 | .saturating_add(T::DbWeight::get().reads(1_u64)) 139 | .saturating_add(T::DbWeight::get().writes(1_u64)) 140 | } 141 | /// Storage: System Account (r:999 w:999) 142 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 143 | /// The range of component `u` is `[1, 1000]`. 144 | fn upgrade_accounts(u: u32, ) -> Weight { 145 | // Proof Size summary in bytes: 146 | // Measured: `0 + u * (135 ±0)` 147 | // Estimated: `990 + u * (2603 ±0)` 148 | // Minimum execution time: 19_847_000 picoseconds. 149 | Weight::from_parts(20_053_000, 990) 150 | // Standard Error: 11_643 151 | .saturating_add(Weight::from_parts(14_563_782, 0).saturating_mul(u.into())) 152 | .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) 153 | .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) 154 | .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) 155 | } 156 | } 157 | 158 | // For backwards compatibility and tests 159 | impl WeightInfo for () { 160 | /// Storage: System Account (r:1 w:1) 161 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 162 | fn transfer_allow_death() -> Weight { 163 | // Proof Size summary in bytes: 164 | // Measured: `0` 165 | // Estimated: `3593` 166 | // Minimum execution time: 59_458_000 picoseconds. 167 | Weight::from_parts(60_307_000, 3593) 168 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 169 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 170 | } 171 | /// Storage: System Account (r:1 w:1) 172 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 173 | fn transfer_keep_alive() -> Weight { 174 | // Proof Size summary in bytes: 175 | // Measured: `0` 176 | // Estimated: `3593` 177 | // Minimum execution time: 43_056_000 picoseconds. 178 | Weight::from_parts(43_933_000, 3593) 179 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 180 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 181 | } 182 | /// Storage: System Account (r:1 w:1) 183 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 184 | fn force_set_balance_creating() -> Weight { 185 | // Proof Size summary in bytes: 186 | // Measured: `174` 187 | // Estimated: `3593` 188 | // Minimum execution time: 17_428_000 picoseconds. 189 | Weight::from_parts(17_731_000, 3593) 190 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 191 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 192 | } 193 | /// Storage: System Account (r:1 w:1) 194 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 195 | fn force_set_balance_killing() -> Weight { 196 | // Proof Size summary in bytes: 197 | // Measured: `174` 198 | // Estimated: `3593` 199 | // Minimum execution time: 22_809_000 picoseconds. 200 | Weight::from_parts(23_225_000, 3593) 201 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 202 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 203 | } 204 | /// Storage: System Account (r:2 w:2) 205 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 206 | fn force_transfer() -> Weight { 207 | // Proof Size summary in bytes: 208 | // Measured: `103` 209 | // Estimated: `6196` 210 | // Minimum execution time: 56_929_000 picoseconds. 211 | Weight::from_parts(57_688_000, 6196) 212 | .saturating_add(RocksDbWeight::get().reads(2_u64)) 213 | .saturating_add(RocksDbWeight::get().writes(2_u64)) 214 | } 215 | /// Storage: System Account (r:1 w:1) 216 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 217 | fn transfer_all() -> Weight { 218 | // Proof Size summary in bytes: 219 | // Measured: `0` 220 | // Estimated: `3593` 221 | // Minimum execution time: 49_820_000 picoseconds. 222 | Weight::from_parts(50_832_000, 3593) 223 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 224 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 225 | } 226 | /// Storage: System Account (r:1 w:1) 227 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 228 | fn force_unreserve() -> Weight { 229 | // Proof Size summary in bytes: 230 | // Measured: `174` 231 | // Estimated: `3593` 232 | // Minimum execution time: 20_270_000 picoseconds. 233 | Weight::from_parts(20_597_000, 3593) 234 | .saturating_add(RocksDbWeight::get().reads(1_u64)) 235 | .saturating_add(RocksDbWeight::get().writes(1_u64)) 236 | } 237 | /// Storage: System Account (r:999 w:999) 238 | /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) 239 | /// The range of component `u` is `[1, 1000]`. 240 | fn upgrade_accounts(u: u32, ) -> Weight { 241 | // Proof Size summary in bytes: 242 | // Measured: `0 + u * (135 ±0)` 243 | // Estimated: `990 + u * (2603 ±0)` 244 | // Minimum execution time: 19_847_000 picoseconds. 245 | Weight::from_parts(20_053_000, 990) 246 | // Standard Error: 11_643 247 | .saturating_add(Weight::from_parts(14_563_782, 0).saturating_mul(u.into())) 248 | .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) 249 | .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) 250 | .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "swanky-node" 3 | version = "1.7.0" 4 | description = "Local Substrate node for wasm contract development & testing" 5 | authors = ["Astar Network"] 6 | homepage = "https://astar.network" 7 | edition = "2021" 8 | license = "Unlicense" 9 | publish = false 10 | repository = "https://github.com/shunsukew/swanky-node/" 11 | build = "build.rs" 12 | 13 | [package.metadata.docs.rs] 14 | targets = ["x86_64-unknown-linux-gnu"] 15 | 16 | [[bin]] 17 | name = "swanky-node" 18 | 19 | [dependencies] 20 | clap = { version = "=4.2.5", features = ["derive"] } 21 | 22 | futures = { version = '0.3.21' } 23 | log = { version = "0.4.17" } 24 | serde_json = "1.0" 25 | 26 | frame-support = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 27 | frame-system = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 28 | pallet-transaction-payment = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 29 | sc-cli = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 30 | sc-client-api = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 31 | sc-consensus = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 32 | sc-consensus-manual-seal = { path = "../client/consensus/manual-seal" } 33 | sc-executor = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 34 | sc-keystore = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 35 | sc-network = { git = "https://github.com/paritytech/substrate", package = "sc-network", branch = "polkadot-v0.9.43" } 36 | sc-service = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 37 | sc-telemetry = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 38 | sc-transaction-pool = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 39 | sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 40 | sp-consensus = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 41 | sp-core = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 42 | sp-database = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 43 | sp-inherents = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 44 | sp-keyring = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 45 | sp-runtime = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 46 | sp-timestamp = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 47 | 48 | # These dependencies are used for the node template's RPCs 49 | pallet-balances-rpc = { path = "../frame/balances/rpc" } 50 | pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 51 | sc-basic-authorship = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 52 | sc-rpc = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 53 | sc-rpc-api = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 54 | sp-api = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 55 | sp-block-builder = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 56 | sp-blockchain = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 57 | substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 58 | 59 | # These dependencies are used for runtime benchmarking 60 | frame-benchmarking = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", default-features = false } 61 | frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 62 | 63 | # Local Dependencies 64 | swanky-runtime = { version = "1.7.0", path = "../runtime" } 65 | 66 | # RPC related dependencies 67 | jsonrpsee = { version = "0.16.2", features = ["server"] } 68 | 69 | # CLI-specific dependencies 70 | try-runtime-cli = { optional = true, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 71 | 72 | [build-dependencies] 73 | substrate-build-script-utils = { version = "3.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 74 | 75 | [features] 76 | default = [] 77 | runtime-benchmarks = ["swanky-runtime/runtime-benchmarks"] 78 | # Enable features that allow the runtime to be tried and debugged. Name might be subject to change 79 | # in the near future. 80 | try-runtime = ["swanky-runtime/try-runtime", "try-runtime-cli"] 81 | -------------------------------------------------------------------------------- /node/build.rs: -------------------------------------------------------------------------------- 1 | use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; 2 | 3 | fn main() { 4 | generate_cargo_keys(); 5 | 6 | rerun_if_git_head_changed(); 7 | } 8 | -------------------------------------------------------------------------------- /node/src/chain_spec.rs: -------------------------------------------------------------------------------- 1 | use sc_service::ChainType; 2 | use sp_core::{sr25519, Pair, Public}; 3 | use sp_runtime::traits::{IdentifyAccount, Verify}; 4 | use swanky_runtime::{ 5 | AccountId, BalancesConfig, GenesisConfig, Signature, SudoConfig, SystemConfig, WASM_BINARY, 6 | }; 7 | 8 | // The URL for the telemetry server. 9 | // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; 10 | 11 | /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. 12 | pub type ChainSpec = sc_service::GenericChainSpec; 13 | 14 | /// Generate a crypto pair from seed. 15 | pub fn get_from_seed(seed: &str) -> ::Public { 16 | TPublic::Pair::from_string(&format!("//{}", seed), None) 17 | .expect("static values are valid; qed") 18 | .public() 19 | } 20 | 21 | type AccountPublic = ::Signer; 22 | 23 | /// Generate an account ID from seed. 24 | pub fn get_account_id_from_seed(seed: &str) -> AccountId 25 | where 26 | AccountPublic: From<::Public>, 27 | { 28 | AccountPublic::from(get_from_seed::(seed)).into_account() 29 | } 30 | 31 | pub fn development_config() -> Result { 32 | let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; 33 | let mut properties = serde_json::map::Map::new(); 34 | properties.insert("tokenDecimals".into(), 18.into()); 35 | 36 | Ok(ChainSpec::from_genesis( 37 | // Name 38 | "Development", 39 | // ID 40 | "dev", 41 | ChainType::Development, 42 | move || { 43 | testnet_genesis( 44 | wasm_binary, 45 | // Sudo account 46 | get_account_id_from_seed::("Alice"), 47 | // Pre-funded accounts 48 | vec![ 49 | get_account_id_from_seed::("Alice"), 50 | get_account_id_from_seed::("Bob"), 51 | get_account_id_from_seed::("Charlie"), 52 | get_account_id_from_seed::("Dave"), 53 | get_account_id_from_seed::("Eve"), 54 | get_account_id_from_seed::("Ferdie"), 55 | get_account_id_from_seed::("Alice//stash"), 56 | get_account_id_from_seed::("Bob//stash"), 57 | get_account_id_from_seed::("Charlie//stash"), 58 | get_account_id_from_seed::("Dave//stash"), 59 | get_account_id_from_seed::("Eve//stash"), 60 | get_account_id_from_seed::("Ferdie//stash"), 61 | ], 62 | true, 63 | ) 64 | }, 65 | // Bootnodes 66 | vec![], 67 | // Telemetry 68 | None, 69 | // Protocol ID 70 | None, 71 | None, 72 | // Properties 73 | Some(properties), 74 | // Extensions 75 | None, 76 | )) 77 | } 78 | 79 | /// Configure initial storage state for FRAME modules. 80 | fn testnet_genesis( 81 | wasm_binary: &[u8], 82 | root_key: AccountId, 83 | endowed_accounts: Vec, 84 | _enable_println: bool, 85 | ) -> GenesisConfig { 86 | GenesisConfig { 87 | system: SystemConfig { 88 | // Add Wasm runtime to storage. 89 | code: wasm_binary.to_vec(), 90 | }, 91 | balances: BalancesConfig { 92 | // Configure endowed accounts with initial balance of 1 << 100. 93 | balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 100)).collect(), 94 | }, 95 | sudo: SudoConfig { 96 | // Assign network admin rights. 97 | key: Some(root_key), 98 | }, 99 | transaction_payment: Default::default(), 100 | assets: Default::default(), 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /node/src/cli.rs: -------------------------------------------------------------------------------- 1 | use sc_cli::RunCmd; 2 | 3 | #[derive(Debug, clap::Parser)] 4 | pub struct Cli { 5 | #[clap(subcommand)] 6 | pub subcommand: Option, 7 | 8 | #[clap(flatten)] 9 | pub run: RunCmd, 10 | 11 | #[clap(long)] 12 | pub finalize_delay_sec: Option, 13 | } 14 | 15 | #[derive(Debug, clap::Subcommand)] 16 | pub enum Subcommand { 17 | /// Key management cli utilities 18 | #[clap(subcommand)] 19 | Key(sc_cli::KeySubcommand), 20 | 21 | /// Build a chain specification. 22 | BuildSpec(sc_cli::BuildSpecCmd), 23 | 24 | /// Validate blocks. 25 | CheckBlock(sc_cli::CheckBlockCmd), 26 | 27 | /// Export blocks. 28 | ExportBlocks(sc_cli::ExportBlocksCmd), 29 | 30 | /// Export the state of a given block into a chain spec. 31 | ExportState(sc_cli::ExportStateCmd), 32 | 33 | /// Import blocks. 34 | ImportBlocks(sc_cli::ImportBlocksCmd), 35 | 36 | /// Remove the whole chain. 37 | PurgeChain(sc_cli::PurgeChainCmd), 38 | 39 | /// Revert the chain to a previous state. 40 | Revert(sc_cli::RevertCmd), 41 | 42 | /// Sub-commands concerned with benchmarking. 43 | #[cfg(feature = "frame-benchmarking")] 44 | #[clap(subcommand)] 45 | Benchmark(frame_benchmarking_cli::BenchmarkCmd), 46 | 47 | /// Try some command against runtime state. 48 | #[cfg(feature = "try-runtime")] 49 | TryRuntime(try_runtime_cli::TryRuntimeCmd), 50 | 51 | /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. 52 | #[cfg(not(feature = "try-runtime"))] 53 | TryRuntime, 54 | } 55 | -------------------------------------------------------------------------------- /node/src/command.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | chain_spec, 3 | cli::{Cli, Subcommand}, 4 | service, 5 | }; 6 | use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; 7 | use sc_service::PartialComponents; 8 | 9 | impl SubstrateCli for Cli { 10 | fn impl_name() -> String { 11 | "Swanky Node".into() 12 | } 13 | 14 | fn impl_version() -> String { 15 | env!("SUBSTRATE_CLI_IMPL_VERSION").into() 16 | } 17 | 18 | fn description() -> String { 19 | env!("CARGO_PKG_DESCRIPTION").into() 20 | } 21 | 22 | fn author() -> String { 23 | env!("CARGO_PKG_AUTHORS").into() 24 | } 25 | 26 | fn support_url() -> String { 27 | "support.anonymous.an".into() 28 | } 29 | 30 | fn copyright_start_year() -> i32 { 31 | 2022 32 | } 33 | 34 | fn load_spec(&self, id: &str) -> Result, String> { 35 | Ok(match id { 36 | "dev" => Box::new(chain_spec::development_config()?), 37 | _ => Box::new(chain_spec::development_config()?), 38 | }) 39 | } 40 | 41 | fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { 42 | &swanky_runtime::VERSION 43 | } 44 | } 45 | 46 | /// Parse and run command line arguments 47 | pub fn run() -> sc_cli::Result<()> { 48 | let cli = Cli::from_args(); 49 | 50 | match &cli.subcommand { 51 | Some(Subcommand::Key(cmd)) => cmd.run(&cli), 52 | Some(Subcommand::BuildSpec(cmd)) => { 53 | let runner = cli.create_runner(cmd)?; 54 | runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) 55 | }, 56 | Some(Subcommand::CheckBlock(cmd)) => { 57 | let runner = cli.create_runner(cmd)?; 58 | runner.async_run(|config| { 59 | let PartialComponents { client, task_manager, import_queue, .. } = 60 | service::new_partial(&config)?; 61 | Ok((cmd.run(client, import_queue), task_manager)) 62 | }) 63 | }, 64 | Some(Subcommand::ExportBlocks(cmd)) => { 65 | let runner = cli.create_runner(cmd)?; 66 | runner.async_run(|config| { 67 | let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; 68 | Ok((cmd.run(client, config.database), task_manager)) 69 | }) 70 | }, 71 | Some(Subcommand::ExportState(cmd)) => { 72 | let runner = cli.create_runner(cmd)?; 73 | runner.async_run(|config| { 74 | let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; 75 | Ok((cmd.run(client, config.chain_spec), task_manager)) 76 | }) 77 | }, 78 | Some(Subcommand::ImportBlocks(cmd)) => { 79 | let runner = cli.create_runner(cmd)?; 80 | runner.async_run(|config| { 81 | let PartialComponents { client, task_manager, import_queue, .. } = 82 | service::new_partial(&config)?; 83 | Ok((cmd.run(client, import_queue), task_manager)) 84 | }) 85 | }, 86 | Some(Subcommand::PurgeChain(cmd)) => { 87 | let runner = cli.create_runner(cmd)?; 88 | runner.sync_run(|config| cmd.run(config.database)) 89 | }, 90 | Some(Subcommand::Revert(cmd)) => { 91 | let runner = cli.create_runner(cmd)?; 92 | runner.async_run(|config| { 93 | let PartialComponents { client, task_manager, backend, .. } = 94 | service::new_partial(&config)?; 95 | Ok((cmd.run(client, backend, None), task_manager)) 96 | }) 97 | }, 98 | #[cfg(feature = "frame-benchmarking")] 99 | Some(Subcommand::Benchmark(cmd)) => { 100 | let runner = cli.create_runner(cmd)?; 101 | 102 | runner.sync_run(|config| { 103 | // This switch needs to be in the client, since the client decides 104 | // which sub-commands it wants to support. 105 | match cmd { 106 | BenchmarkCmd::Pallet(cmd) => { 107 | if !cfg!(feature = "runtime-benchmarks") { 108 | return Err( 109 | "Runtime benchmarking wasn't enabled when building the node. \ 110 | You can enable it with `--features runtime-benchmarks`." 111 | .into(), 112 | ); 113 | } 114 | 115 | cmd.run::(config) 116 | }, 117 | BenchmarkCmd::Block(cmd) => { 118 | let PartialComponents { client, .. } = service::new_partial(&config)?; 119 | cmd.run(client) 120 | }, 121 | BenchmarkCmd::Storage(cmd) => { 122 | let PartialComponents { client, backend, .. } = 123 | service::new_partial(&config)?; 124 | let db = backend.expose_db(); 125 | let storage = backend.expose_storage(); 126 | 127 | cmd.run(config, client, db, storage) 128 | }, 129 | BenchmarkCmd::Overhead(_) => Err("Benchmark overhead not supported.".into()), 130 | BenchmarkCmd::Extrinsic(_) => Err("Benchmark extrinsic not supported.".into()), 131 | BenchmarkCmd::Machine(cmd) => { 132 | cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()) 133 | }, 134 | } 135 | }) 136 | }, 137 | #[cfg(feature = "try-runtime")] 138 | Some(Subcommand::TryRuntime(cmd)) => { 139 | let runner = cli.create_runner(cmd)?; 140 | runner.async_run(|config| { 141 | // we don't need any of the components of new_partial, just a runtime, or a task 142 | // manager to do `async_run`. 143 | let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); 144 | let task_manager = 145 | sc_service::TaskManager::new(config.tokio_handle.clone(), registry) 146 | .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; 147 | Ok(( 148 | cmd.run::(config), 149 | task_manager, 150 | )) 151 | }) 152 | }, 153 | #[cfg(not(feature = "try-runtime"))] 154 | Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ 155 | You can enable it with `--features try-runtime`." 156 | .into()), 157 | None => { 158 | let runner = cli.create_runner(&cli.run)?; 159 | runner.run_node_until_exit(|config| async move { 160 | service::new_full(config, cli.finalize_delay_sec).map_err(sc_cli::Error::Service) 161 | }) 162 | }, 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /node/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod chain_spec; 2 | pub mod rpc; 3 | pub mod service; 4 | -------------------------------------------------------------------------------- /node/src/main.rs: -------------------------------------------------------------------------------- 1 | //! Substrate Node Template CLI library. 2 | #![warn(missing_docs)] 3 | 4 | mod chain_spec; 5 | #[macro_use] 6 | mod service; 7 | mod cli; 8 | mod command; 9 | mod rpc; 10 | 11 | fn main() -> sc_cli::Result<()> { 12 | command::run() 13 | } 14 | -------------------------------------------------------------------------------- /node/src/rpc.rs: -------------------------------------------------------------------------------- 1 | //! A collection of node-specific RPC methods. 2 | //! Substrate provides the `sc-rpc` crate, which defines the core RPC layer 3 | //! used by Substrate nodes. This file extends those RPC definitions with 4 | //! capabilities that are specific to this project's runtime configuration. 5 | 6 | #![warn(missing_docs)] 7 | 8 | use std::sync::Arc; 9 | 10 | use futures::channel::mpsc::Sender; 11 | use jsonrpsee::RpcModule; 12 | use sp_runtime::traits::Block as BlockT; 13 | use swanky_runtime::{opaque::Block, AccountId, Balance, Hash, Index}; 14 | 15 | use sc_consensus_manual_seal::{ 16 | rpc::{ManualSeal, ManualSealApiServer}, 17 | EngineCommand, 18 | }; 19 | pub use sc_rpc_api::DenyUnsafe; 20 | use sc_transaction_pool_api::TransactionPool; 21 | use sp_api::ProvideRuntimeApi; 22 | use sp_block_builder::BlockBuilder; 23 | use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; 24 | 25 | /// Full client dependencies. 26 | pub struct FullDeps { 27 | /// The client instance to use. 28 | pub client: Arc, 29 | /// The backend instance to use. 30 | pub backend: Arc, 31 | /// Transaction pool instance. 32 | pub pool: Arc

, 33 | /// Whether to deny unsafe calls 34 | pub deny_unsafe: DenyUnsafe, 35 | /// A command stream to send authoring commands to manual seal consensus engine 36 | pub command_sink: Sender>, 37 | } 38 | 39 | /// Instantiate all full RPC extensions. 40 | pub fn create_full( 41 | deps: FullDeps, 42 | ) -> Result, Box> 43 | where 44 | C: ProvideRuntimeApi, 45 | C: HeaderBackend + HeaderMetadata + 'static, 46 | C: Send + Sync + 'static, 47 | C::Api: substrate_frame_rpc_system::AccountNonceApi, 48 | C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, 49 | C::Api: pallet_balances_rpc::BalancesRuntimeApi, 50 | C::Api: BlockBuilder, 51 | P: TransactionPool::Hash> + 'static, 52 | B: sc_client_api::backend::Backend + Send + Sync + 'static, 53 | P: TransactionPool + 'static, 54 | { 55 | use pallet_balances_rpc::{Balances, BalancesApiServer}; 56 | use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; 57 | use substrate_frame_rpc_system::{System, SystemApiServer}; 58 | 59 | let mut io = RpcModule::new(()); 60 | let FullDeps { client, backend, pool, deny_unsafe, command_sink } = deps; 61 | 62 | io.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; 63 | io.merge(TransactionPayment::new(client.clone()).into_rpc())?; 64 | io.merge(Balances::new(client.clone(), pool.clone()).into_rpc())?; 65 | 66 | // The final RPC extension receives commands for the manual seal consensus engine. 67 | io.merge(ManualSeal::new(client, backend, command_sink).into_rpc())?; 68 | 69 | Ok(io) 70 | } 71 | -------------------------------------------------------------------------------- /node/src/service.rs: -------------------------------------------------------------------------------- 1 | //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. 2 | 3 | use futures::prelude::*; 4 | 5 | pub use sc_executor::NativeElseWasmExecutor; 6 | use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; 7 | use sc_telemetry::{Telemetry, TelemetryWorker}; 8 | use sc_transaction_pool_api::TransactionPool; 9 | use std::sync::Arc; 10 | use swanky_runtime::{self, opaque::Block, RuntimeApi}; 11 | // Our native executor instance. 12 | pub struct ExecutorDispatch; 13 | 14 | impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { 15 | /// Only enable the benchmarking host functions when we actually want to benchmark. 16 | #[cfg(feature = "runtime-benchmarks")] 17 | type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; 18 | /// Otherwise we only use the default Substrate host functions. 19 | #[cfg(not(feature = "runtime-benchmarks"))] 20 | type ExtendHostFunctions = (); 21 | 22 | fn dispatch(method: &str, data: &[u8]) -> Option> { 23 | swanky_runtime::api::dispatch(method, data) 24 | } 25 | 26 | fn native_version() -> sc_executor::NativeVersion { 27 | swanky_runtime::native_version() 28 | } 29 | } 30 | 31 | pub(crate) type FullClient = 32 | sc_service::TFullClient>; 33 | type FullBackend = sc_service::TFullBackend; 34 | type FullSelectChain = sc_consensus::LongestChain; 35 | 36 | #[allow(clippy::type_complexity)] 37 | pub fn new_partial( 38 | config: &Configuration, 39 | ) -> Result< 40 | sc_service::PartialComponents< 41 | FullClient, 42 | FullBackend, 43 | FullSelectChain, 44 | sc_consensus::DefaultImportQueue, 45 | sc_transaction_pool::FullPool, 46 | (Option,), 47 | >, 48 | ServiceError, 49 | > { 50 | let telemetry = config 51 | .telemetry_endpoints 52 | .clone() 53 | .filter(|x| !x.is_empty()) 54 | .map(|endpoints| -> Result<_, sc_telemetry::Error> { 55 | let worker = TelemetryWorker::new(16)?; 56 | let telemetry = worker.handle().new_telemetry(endpoints); 57 | Ok((worker, telemetry)) 58 | }) 59 | .transpose()?; 60 | 61 | let executor = sc_service::new_native_or_wasm_executor(config); 62 | 63 | let (client, backend, keystore_container, task_manager) = 64 | sc_service::new_full_parts::( 65 | &config, 66 | telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), 67 | executor, 68 | )?; 69 | let client = Arc::new(client); 70 | 71 | let telemetry = telemetry.map(|(worker, telemetry)| { 72 | task_manager.spawn_handle().spawn("telemetry", None, worker.run()); 73 | telemetry 74 | }); 75 | 76 | let select_chain = sc_consensus::LongestChain::new(backend.clone()); 77 | 78 | let transaction_pool = sc_transaction_pool::BasicPool::new_full( 79 | config.transaction_pool.clone(), 80 | config.role.is_authority().into(), 81 | config.prometheus_registry(), 82 | task_manager.spawn_essential_handle(), 83 | client.clone(), 84 | ); 85 | 86 | let import_queue = sc_consensus_manual_seal::import_queue( 87 | Box::new(client.clone()), 88 | &task_manager.spawn_essential_handle(), 89 | config.prometheus_registry(), 90 | ); 91 | 92 | Ok(sc_service::PartialComponents { 93 | client, 94 | backend, 95 | task_manager, 96 | import_queue, 97 | keystore_container, 98 | select_chain, 99 | transaction_pool, 100 | other: (telemetry,), 101 | }) 102 | } 103 | 104 | /// Builds a new service for a full client. 105 | pub fn new_full( 106 | config: Configuration, 107 | finalize_delay_sec: Option, 108 | ) -> Result { 109 | let sc_service::PartialComponents { 110 | client, 111 | backend, 112 | mut task_manager, 113 | import_queue, 114 | keystore_container, 115 | select_chain, 116 | transaction_pool, 117 | other: (mut telemetry,), 118 | } = new_partial(&config)?; 119 | 120 | let net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); 121 | 122 | let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = 123 | sc_service::build_network(sc_service::BuildNetworkParams { 124 | config: &config, 125 | net_config, 126 | client: client.clone(), 127 | transaction_pool: transaction_pool.clone(), 128 | spawn_handle: task_manager.spawn_handle(), 129 | import_queue, 130 | block_announce_validator_builder: None, 131 | warp_sync_params: None, 132 | })?; 133 | 134 | if config.offchain_worker.enabled { 135 | sc_service::build_offchain_workers( 136 | &config, 137 | task_manager.spawn_handle(), 138 | client.clone(), 139 | network.clone(), 140 | ); 141 | } 142 | 143 | let prometheus_registry = config.prometheus_registry().cloned(); 144 | let (rpc_command_sink, rpc_commands_stream) = futures::channel::mpsc::channel(1000); 145 | 146 | let rpc_extensions_builder = { 147 | let client = client.clone(); 148 | let backend = backend.clone(); 149 | let pool = transaction_pool.clone(); 150 | 151 | Box::new(move |deny_unsafe, _| { 152 | let deps = crate::rpc::FullDeps { 153 | client: client.clone(), 154 | backend: backend.clone(), 155 | pool: pool.clone(), 156 | deny_unsafe, 157 | command_sink: rpc_command_sink.clone(), 158 | }; 159 | crate::rpc::create_full(deps).map_err(Into::into) 160 | }) 161 | }; 162 | 163 | let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { 164 | network: network.clone(), 165 | client: client.clone(), 166 | keystore: keystore_container.keystore(), 167 | task_manager: &mut task_manager, 168 | transaction_pool: transaction_pool.clone(), 169 | rpc_builder: rpc_extensions_builder, 170 | backend, 171 | system_rpc_tx, 172 | tx_handler_controller, 173 | sync_service: sync_service.clone(), 174 | config, 175 | telemetry: telemetry.as_mut(), 176 | })?; 177 | 178 | let proposer = sc_basic_authorship::ProposerFactory::new( 179 | task_manager.spawn_handle(), 180 | client.clone(), 181 | transaction_pool.clone(), 182 | prometheus_registry.as_ref(), 183 | telemetry.as_ref().map(|x| x.handle()), 184 | ); 185 | 186 | let pool_import_commands_stream = 187 | transaction_pool.clone().import_notification_stream().map(|_| { 188 | sc_consensus_manual_seal::EngineCommand::SealNewBlock { 189 | create_empty: false, 190 | finalize: false, 191 | parent_hash: None, 192 | sender: None, 193 | } 194 | }); 195 | 196 | let commands_stream = stream::select(rpc_commands_stream, pool_import_commands_stream); 197 | 198 | let params = sc_consensus_manual_seal::ManualSealParams { 199 | block_import: client.clone(), 200 | env: proposer, 201 | client: client.clone(), 202 | pool: transaction_pool, 203 | commands_stream, 204 | select_chain, 205 | consensus_data_provider: None, 206 | create_inherent_data_providers: move |_, ()| async move { 207 | Ok(sp_timestamp::InherentDataProvider::from_system_time()) 208 | }, 209 | }; 210 | 211 | task_manager.spawn_essential_handle().spawn_blocking( 212 | "instant-and-manual-seal", 213 | None, 214 | sc_consensus_manual_seal::run_manual_seal(params), 215 | ); 216 | 217 | if let Some(sec) = finalize_delay_sec { 218 | let delayed_finalize_params = sc_consensus_manual_seal::DelayedFinalizeParams { 219 | client, 220 | spawn_handle: task_manager.spawn_handle(), 221 | delay_sec: sec, 222 | }; 223 | 224 | task_manager.spawn_essential_handle().spawn_blocking( 225 | "delayed_finalize", 226 | None, 227 | sc_consensus_manual_seal::run_delayed_finalize(delayed_finalize_params), 228 | ); 229 | } 230 | 231 | network_starter.start_network(); 232 | Ok(task_manager) 233 | } 234 | -------------------------------------------------------------------------------- /runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "swanky-runtime" 3 | version = "1.7.0" 4 | description = "A fresh FRAME-based Substrate runtime, ready for hacking." 5 | authors = ["Astar Network"] 6 | homepage = "https://astar.network" 7 | edition = "2021" 8 | license = "Unlicense" 9 | publish = false 10 | repository = "https://github.com/shunsukew/swanky-node/" 11 | build = "build.rs" 12 | 13 | [package.metadata.docs.rs] 14 | targets = ["x86_64-unknown-linux-gnu"] 15 | 16 | [dependencies] 17 | codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } 18 | scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } 19 | 20 | frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 21 | frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 22 | frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 23 | frame-try-runtime = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", optional = true } 24 | pallet-assets = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 25 | pallet-balances = { path = "../frame/balances", default-features = false } 26 | pallet-insecure-randomness-collective-flip = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 27 | pallet-sudo = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 28 | pallet-timestamp = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 29 | pallet-transaction-payment = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 30 | pallet-uniques = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 31 | pallet-utility = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 32 | sp-api = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 33 | sp-block-builder = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 34 | sp-core = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 35 | sp-inherents = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 36 | sp-offchain = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 37 | sp-runtime = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 38 | sp-session = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 39 | sp-std = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 40 | sp-transaction-pool = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 41 | sp-version = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 42 | 43 | # Contracts specific packages 44 | pallet-contracts = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 45 | pallet-contracts-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false } 46 | 47 | # dApps staking 48 | pallet-dapps-staking = { git = "https://github.com/AstarNetwork/Astar", tag = "v5.32.1", default-features = false } 49 | 50 | # pallet-asset chain-extension 51 | pallet-chain-extension-assets = { git = "https://github.com/AstarNetwork/Astar", tag = "v5.32.1", default-features = false } 52 | 53 | # Used for the node template's RPCs 54 | frame-system-rpc-runtime-api = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 55 | pallet-balances-rpc-runtime-api = { path = "../frame/balances/rpc/runtime-api", default-features = false } 56 | pallet-transaction-payment-rpc-runtime-api = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 57 | 58 | # Used for runtime benchmarking 59 | frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", optional = true } 60 | frame-system-benchmarking = { default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43", optional = true } 61 | hex-literal = { version = "0.3.4", optional = true } 62 | log = { version = "0.4.17", optional = true } 63 | 64 | [build-dependencies] 65 | substrate-wasm-builder = { version = "5.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.43" } 66 | 67 | [features] 68 | default = [ 69 | "std", 70 | ] 71 | std = [ 72 | "codec/std", 73 | "scale-info/std", 74 | "frame-executive/std", 75 | "frame-support/std", 76 | "frame-system-rpc-runtime-api/std", 77 | "frame-system/std", 78 | "pallet-assets/std", 79 | "pallet-balances/std", 80 | "pallet-contracts-primitives/std", 81 | "pallet-contracts/std", 82 | "pallet-dapps-staking/std", 83 | "pallet-insecure-randomness-collective-flip/std", 84 | "pallet-sudo/std", 85 | "pallet-timestamp/std", 86 | "pallet-transaction-payment-rpc-runtime-api/std", 87 | "pallet-balances-rpc-runtime-api/std", 88 | "pallet-transaction-payment/std", 89 | "pallet-utility/std", 90 | "sp-api/std", 91 | "sp-block-builder/std", 92 | "sp-core/std", 93 | "sp-inherents/std", 94 | "sp-offchain/std", 95 | "sp-runtime/std", 96 | "sp-session/std", 97 | "sp-std/std", 98 | "sp-transaction-pool/std", 99 | "sp-version/std", 100 | "pallet-chain-extension-assets/std", 101 | ] 102 | runtime-benchmarks = [ 103 | "frame-benchmarking/runtime-benchmarks", 104 | "frame-support/runtime-benchmarks", 105 | "frame-system-benchmarking/runtime-benchmarks", 106 | "frame-system/runtime-benchmarks", 107 | "hex-literal", 108 | "pallet-balances/runtime-benchmarks", 109 | "pallet-dapps-staking/runtime-benchmarks", 110 | "pallet-timestamp/runtime-benchmarks", 111 | "sp-runtime/runtime-benchmarks", 112 | "pallet-contracts/runtime-benchmarks", 113 | "pallet-assets/runtime-benchmarks", 114 | ] 115 | try-runtime = [ 116 | "log", 117 | "frame-system/try-runtime", 118 | "frame-executive/try-runtime", 119 | "frame-try-runtime", 120 | "frame-system/try-runtime", 121 | "pallet-balances/try-runtime", 122 | "pallet-insecure-randomness-collective-flip/try-runtime", 123 | "pallet-dapps-staking/try-runtime", 124 | "pallet-sudo/try-runtime", 125 | "pallet-timestamp/try-runtime", 126 | "pallet-transaction-payment/try-runtime", 127 | "pallet-assets/try-runtime", 128 | "pallet-contracts/try-runtime", 129 | "pallet-uniques/try-runtime", 130 | ] 131 | -------------------------------------------------------------------------------- /runtime/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | #[cfg(feature = "std")] 3 | { 4 | substrate_wasm_builder::WasmBuilder::new() 5 | .with_current_project() 6 | .export_heap_base() 7 | .import_memory() 8 | .build(); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /runtime/src/chain_extensions.rs: -------------------------------------------------------------------------------- 1 | //! 2 | use super::Runtime; 3 | pub use pallet_chain_extension_assets::AssetsExtension; 4 | /// Registered WASM contracts chain extensions. 5 | use pallet_contracts::chain_extension::RegisteredChainExtension; 6 | 7 | // Following impls defines chain extension IDs. 8 | 9 | impl RegisteredChainExtension for AssetsExtension { 10 | const ID: u16 = 2; 11 | } 12 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.76.0" 3 | components = [ "rustfmt", "clippy" ] 4 | targets = [ "wasm32-unknown-unknown"] 5 | profile = "minimal" -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Basic 2 | hard_tabs = true 3 | max_width = 100 4 | use_small_heuristics = "Max" 5 | # Imports 6 | imports_granularity = "Crate" 7 | reorder_imports = true 8 | # Consistency 9 | newline_style = "Unix" 10 | # Format comments 11 | comment_width = 100 12 | wrap_comments = true 13 | # Misc 14 | chain_width = 80 15 | spaces_around_ranges = false 16 | binop_separator = "Back" 17 | reorder_impl_items = false 18 | match_arm_leading_pipes = "Preserve" 19 | match_arm_blocks = false 20 | match_block_trailing_comma = true 21 | trailing_comma = "Vertical" 22 | trailing_semicolon = false 23 | use_field_init_shorthand = true 24 | -------------------------------------------------------------------------------- /scripts/docker_run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script is meant to be run on Unix/Linux based systems 3 | set -e 4 | 5 | echo "*** Start Swanky node ***" 6 | 7 | cd $(dirname ${BASH_SOURCE[0]})/.. 8 | 9 | docker-compose down --remove-orphans 10 | docker-compose run --rm --service-ports dev $@ 11 | -------------------------------------------------------------------------------- /scripts/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script is meant to be run on Unix/Linux based systems 3 | set -e 4 | 5 | echo "*** Initializing WASM build environment" 6 | 7 | if [ -z $CI_PROJECT_NAME ] ; then 8 | rustup update nightly 9 | rustup update stable 10 | fi 11 | 12 | rustup target add wasm32-unknown-unknown --toolchain nightly 13 | -------------------------------------------------------------------------------- /taplo.toml: -------------------------------------------------------------------------------- 1 | ## https://taplo.tamasfe.dev/configuration/file.html 2 | 3 | include = ["**/Cargo.toml"] 4 | exclude = [".maintain/**/Cargo.toml", "target/**/Cargo.toml"] 5 | 6 | [formatting] 7 | # Align consecutive entries vertically. 8 | align_entries = false 9 | # Append trailing commas for multi-line arrays. 10 | array_trailing_comma = true 11 | # Expand arrays to multiple lines that exceed the maximum column width. 12 | array_auto_expand = false 13 | # Collapse arrays that don't exceed the maximum column width and don't contain comments. 14 | array_auto_collapse = false 15 | # Omit white space padding from single-line arrays 16 | compact_arrays = true 17 | # Omit white space padding from the start and end of inline tables. 18 | compact_inline_tables = false 19 | # Maximum column width in characters, affects array expansion and collapse, this doesn't take whitespace into account. 20 | # Note that this is not set in stone, and works on a best-effort basis. 21 | column_width = 160 22 | # Indent based on tables and arrays of tables and their subtables, subtables out of order are not indented. 23 | indent_tables = false 24 | # The substring that is used for indentation, should be tabs or spaces (but technically can be anything). 25 | indent_string = ' ' 26 | # Add trailing newline at the end of the file if not present. 27 | trailing_newline = true 28 | # Alphabetically reorder keys that are not separated by empty lines. 29 | reorder_keys = false 30 | # Maximum amount of allowed consecutive blank lines. This does not affect the whitespace at the end of the document, as it is always stripped. 31 | allowed_blank_lines = 1 32 | # Use CRLF for line endings. 33 | crlf = false 34 | 35 | [[rule]] 36 | keys = ["dependencies", "dev-dependencies", "build-dependencies"] 37 | formatting = { reorder_keys = true } 38 | --------------------------------------------------------------------------------