├── .github └── workflows │ └── pr.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── bridge └── dai-home │ ├── .babelrc │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── contracts │ ├── Migrations.sol │ ├── helpers │ │ └── ValidatorsOperations.sol │ ├── impl │ │ └── ValidatorsOperationsImpl.sol │ └── main │ │ └── DAIBridge.sol │ ├── migrations │ ├── 1_initial_migration.js │ └── 2_deploy_bridge.js │ ├── package-lock.json │ ├── package.json │ ├── test │ ├── ValidatorsOperations.js │ └── helpers │ │ ├── EVMRevert.js │ │ ├── EVMThrow.js │ │ ├── advanceToBlock.js │ │ ├── assertJump.js │ │ ├── assertRevert.js │ │ ├── decodeLogs.js │ │ ├── ether.js │ │ ├── expectEvent.js │ │ ├── expectThrow.js │ │ ├── increaseTime.js │ │ ├── latestTime.js │ │ ├── merkleTree.js │ │ ├── sendTransaction.js │ │ ├── sign.js │ │ ├── toPromise.js │ │ └── transactionMined.js │ └── truffle-config.js ├── cli ├── Cargo.toml ├── build.rs ├── res │ ├── akropolisos.json │ └── akropolisos_syracuse.json └── src │ ├── browser.rs │ ├── chain_spec.rs │ ├── cli.rs │ ├── command.rs │ ├── factory_impl.rs │ ├── lib.rs │ └── service.rs ├── executor ├── Cargo.toml ├── benches │ └── bench.rs ├── src │ └── lib.rs └── tests │ ├── basic.rs │ ├── common.rs │ ├── fees.rs │ └── submit_transaction.rs ├── inspect ├── Cargo.toml └── src │ ├── cli.rs │ ├── command.rs │ └── lib.rs ├── rpc ├── Cargo.toml └── src │ └── lib.rs ├── runtime ├── Cargo.lock ├── Cargo.toml ├── build.rs ├── src │ ├── bridge.rs │ ├── constants.rs │ ├── dao.rs │ ├── impls.rs │ ├── lib.rs │ ├── marketplace.rs │ ├── price_oracle.rs │ ├── token.rs │ └── types.rs └── wasm │ ├── Cargo.lock │ ├── Cargo.toml │ ├── build.sh │ └── src │ └── lib.rs ├── scripts ├── build.sh └── init.sh ├── src └── main.rs ├── testing ├── Cargo.toml ├── benches │ └── import.rs └── src │ ├── bench.rs │ ├── client.rs │ ├── genesis.rs │ ├── keyring.rs │ └── lib.rs └── transaction-factory ├── Cargo.toml └── src └── lib.rs /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | name: PR 2 | on: [pull_request] 3 | 4 | jobs: 5 | build: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v1 9 | - name: Install latest nightly and wasm utils 10 | run: sh scripts/init.sh 11 | - name: Build runtime 12 | run: cargo build --verbose 13 | - name: Runtime tests 14 | run: cargo test -p akropolisos-runtime 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | **/target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | 8 | # Ignore swap files 9 | *.swp 10 | 11 | # Bridge frontend 12 | /bridge/frontend/.cache 13 | /bridge/frontend/node_modules 14 | /bridge/frontend/dist 15 | 16 | # Code editors 17 | .vscode 18 | 19 | # local helper scripts 20 | *.sh 21 | 22 | # justfile 23 | justfile -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ['Akropolis '] 3 | edition = '2018' 4 | name = 'akropolisos-node' 5 | version = '0.3.3' 6 | 7 | [dependencies] 8 | error-chain = '0.12' 9 | exit-future = '0.1' 10 | futures = '0.3.1' 11 | hex-literal = "0.2.1" 12 | log = '0.4.8' 13 | parity-codec = '3.2' 14 | parking_lot = '0.9.0' 15 | tokio = '0.1.22' 16 | jsonrpc-core = "14.0.3" 17 | trie-root = '0.15.2' 18 | js-sys = '0.3.35' 19 | chrono = '0.4.7' 20 | #dotenv = "0.15.0" 21 | structopt = { version = "0.3.8", optional = true } 22 | serde = { version = "1.0.102", features = ["derive"] } 23 | codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false } 24 | node-cli = { path = "./cli", features = ["cli"] } 25 | akropolisos-runtime = { path = 'runtime' } 26 | 27 | [profile.release] 28 | panic = 'unwind' 29 | 30 | [build-dependencies] 31 | vergen = '3.0.4' 32 | chrono = '0.4.10' 33 | 34 | [dependencies.benchmarking] 35 | git = 'https://github.com/paritytech/substrate.git' 36 | package = 'frame-benchmarking-cli' 37 | version = '2.0.0-alpha.5' 38 | optional = true 39 | 40 | [dependencies.grandpa-primitives] 41 | git = 'https://github.com/paritytech/substrate.git' 42 | package = 'sp-finality-grandpa' 43 | version = '2.0.0-alpha.5' 44 | 45 | [dependencies.sp-core] 46 | git = 'https://github.com/paritytech/substrate.git' 47 | version = '2.0.0-alpha.5' 48 | 49 | [dependencies.sp-runtime] 50 | git = 'https://github.com/paritytech/substrate.git' 51 | version = '2.0.0-alpha.5' 52 | 53 | [dependencies.sp-transaction-pool] 54 | git = 'https://github.com/paritytech/substrate.git' 55 | version = '2.0.0-alpha.5' 56 | 57 | [dependencies.sp-timestamp] 58 | git = 'https://github.com/paritytech/substrate.git' 59 | version = '2.0.0-alpha.5' 60 | 61 | [dependencies.sp-finality-tracker] 62 | git = 'https://github.com/paritytech/substrate.git' 63 | version = '2.0.0-alpha.5' 64 | 65 | [dependencies.sp-inherents] 66 | git = 'https://github.com/paritytech/substrate.git' 67 | version = '2.0.0-alpha.5' 68 | 69 | [dependencies.sp-keyring] 70 | git = 'https://github.com/paritytech/substrate.git' 71 | version = '2.0.0-alpha.5' 72 | 73 | [dependencies.sp-io] 74 | git = 'https://github.com/paritytech/substrate.git' 75 | version = '2.0.0-alpha.5' 76 | 77 | [dependencies.sp-consensus] 78 | git = 'https://github.com/paritytech/substrate.git' 79 | version = '0.8.0-alpha.5' 80 | 81 | [dependencies.grandpa] 82 | git = 'https://github.com/paritytech/substrate.git' 83 | package = 'sc-finality-grandpa' 84 | version = '0.8.0-alpha.5' 85 | 86 | [dependencies.sc-authority-discovery] 87 | git = 'https://github.com/paritytech/substrate.git' 88 | version = '0.8.0-alpha.5' 89 | 90 | [dependencies.sc-basic-authorship] 91 | git = 'https://github.com/paritytech/substrate.git' 92 | version = '0.8.0-alpha.5' 93 | 94 | [dependencies.sc-consensus-babe] 95 | git = 'https://github.com/paritytech/substrate.git' 96 | version = '0.8.0-alpha.5' 97 | 98 | [dependencies.sc-client-db] 99 | default-features = false 100 | git = 'https://github.com/paritytech/substrate.git' 101 | version = '0.8.0-alpha.5' 102 | 103 | [dependencies.sc-client-api] 104 | default-features = false 105 | git = 'https://github.com/paritytech/substrate.git' 106 | version = '2.0.0-alpha.5' 107 | 108 | [dependencies.sc-offchain] 109 | git = 'https://github.com/paritytech/substrate.git' 110 | version = '2.0.0-alpha.5' 111 | 112 | [dependencies.sc-rpc] 113 | git = 'https://github.com/paritytech/substrate.git' 114 | version = '2.0.0-alpha.5' 115 | 116 | [dependencies.sc-cli] 117 | git = 'https://github.com/paritytech/substrate.git' 118 | version = '0.8.0-alpha.5' 119 | 120 | [dependencies.sc-client] 121 | git = 'https://github.com/paritytech/substrate.git' 122 | version = '0.8.0-alpha.5' 123 | 124 | [dependencies.sc-chain-spec] 125 | git = 'https://github.com/paritytech/substrate.git' 126 | version = '2.0.0-alpha.5' 127 | 128 | [dependencies.sc-executor] 129 | git = 'https://github.com/paritytech/substrate.git' 130 | version = '0.8.0-alpha.5' 131 | 132 | [dependencies.sc-network] 133 | git = 'https://github.com/paritytech/substrate.git' 134 | version = '0.8.0-alpha.5' 135 | 136 | [dependencies.sc-service] 137 | git = 'https://github.com/paritytech/substrate.git' 138 | version = '0.8.0-alpha.5' 139 | 140 | [dependencies.sc-transaction-pool] 141 | git = 'https://github.com/paritytech/substrate.git' 142 | version = '2.0.0-alpha.5' 143 | 144 | [dependencies.telemetry] 145 | git = 'https://github.com/paritytech/substrate.git' 146 | package = 'sc-telemetry' 147 | version = '2.0.0-alpha.5' 148 | 149 | [dependencies.pallet-contracts] 150 | default-features = false 151 | git = 'https://github.com/paritytech/substrate.git' 152 | version = '2.0.0-alpha.5' 153 | 154 | [dependencies.pallet-transaction-payment] 155 | default-features = false 156 | git = 'https://github.com/paritytech/substrate.git' 157 | version = '2.0.0-alpha.5' 158 | 159 | [dependencies.pallet-im-online] 160 | default-features = false 161 | git = 'https://github.com/paritytech/substrate.git' 162 | version = '2.0.0-alpha.5' 163 | 164 | [dependencies.system] 165 | default-features = false 166 | git = 'https://github.com/paritytech/substrate.git' 167 | package = 'frame-system' 168 | version = '2.0.0-alpha.5' 169 | 170 | [dependencies.node-transaction-factory] 171 | git = 'https://github.com/paritytech/substrate.git' 172 | version = "0.8.0-alpha.5" 173 | 174 | [workspace] 175 | members = ['runtime', 'cli', 'rpc', 'executor'] 176 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Akropolis.io 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # akropolisos-chain-node 2 | 3 | A new SRML-based Substrate node, ready for hacking. 4 | 5 | # Building 6 | 7 | Install Rust: 8 | 9 | ```bash 10 | curl https://sh.rustup.rs -sSf | sh 11 | ``` 12 | 13 | Install required tools: 14 | 15 | ```bash 16 | ./scripts/init.sh 17 | ``` 18 | 19 | Build the WebAssembly binary: 20 | 21 | ```bash 22 | ./scripts/build.sh 23 | ``` 24 | 25 | Build all native code: 26 | 27 | ```bash 28 | cargo build 29 | ``` 30 | 31 | # Run 32 | 33 | You can start a full node in AkropolisOS chain with: 34 | 35 | ```bash 36 | cargo run -- --name node-name 37 | ``` 38 | 39 | You can start a validator node in AkropolisOS chain with: 40 | 41 | ```bash 42 | cargo run -- --name node-name --validator 43 | ``` 44 | 45 | # Development 46 | 47 | You can start a development chain with: 48 | 49 | ```bash 50 | cargo run -- --dev 51 | ``` 52 | 53 | Detailed logs may be shown by running the node with the following environment variables set: `RUST_LOG=debug RUST_BACKTRACE=1 cargo run -- --dev`. 54 | 55 | If you want to see the multi-node consensus algorithm in action locally, then you can create a local testnet with two validator nodes for Alice and Bob, who are the initial authorities of the genesis chain that have been endowed with testnet units. Give each node a name and expose them so they are listed on the Polkadot [telemetry site](https://telemetry.polkadot.io/#/Local%20Testnet). You'll need two terminal windows open. 56 | 57 | We'll start Alice's substrate node first on default TCP port 30333 with her chain database stored locally at `/tmp/alice`. The bootnode ID of her node is `QmQZ8TjTqeDj3ciwr93EJ95hxfDsb9pEYDizUAbWpigtQN`, which is generated from the `--node-key` value that we specify below: 58 | 59 | ```bash 60 | cargo run -- \ 61 | --base-path /tmp/alice \ 62 | --chain=local \ 63 | --alice \ 64 | --node-key 0000000000000000000000000000000000000000000000000000000000000001 \ 65 | --telemetry-url ws://telemetry.polkadot.io:1024 \ 66 | --validator 67 | ``` 68 | 69 | In the second terminal, we'll start Bob's substrate node on a different TCP port of 30334, and with his chain database stored locally at `/tmp/bob`. We'll specify a value for the `--bootnodes` option that will connect his node to Alice's bootnode ID on TCP port 30333: 70 | 71 | ```bash 72 | cargo run -- \ 73 | --base-path /tmp/bob \ 74 | --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/QmQZ8TjTqeDj3ciwr93EJ95hxfDsb9pEYDizUAbWpigtQN \ 75 | --chain=local \ 76 | --bob \ 77 | --port 30334 \ 78 | --telemetry-url ws://telemetry.polkadot.io:1024 \ 79 | --validator 80 | ``` 81 | 82 | Additional CLI usage options are available and may be shown by running `cargo run -- --help`. 83 | 84 | 85 | # How it works 86 | 87 | ## Account creation 88 | 89 | 90 | This guide will walk you through how to create account and how to connect to AkropolisOSChain Testnet. 91 | 92 | 1) Open [Akropolis UI](https://wallet.akropolis.io) (it’s polkadotJS app working with substrate v.1.0). You can also use [Polkadot UI](https://polkadot.js.org/apps/#/explorer). 93 | 94 | 2) Go to *Settings*, open *Developer* tab. Insert in textbox description of types (copy&paste from here) and Save it. 95 | 96 | 97 | ```bash 98 | 99 | { 100 | "Count": "u64", 101 | "DaoId": "u64", 102 | "MemberId": "u64", 103 | "ProposalId": "u64", 104 | "TokenBalance": "u128", 105 | "VotesCount": "MemberId", 106 | "TokenId": "u32", 107 | "Days": "u32", 108 | "Rate": "u32", 109 | "Dao": { 110 | "address": "AccountId", 111 | "name": "Text", 112 | "description": "Bytes", 113 | "founder": "AccountId" 114 | }, 115 | "Action": { 116 | "_enum": { 117 | "EmptyAction": null, 118 | "AddMember": "AccountId", 119 | "RemoveMember": "AccountId", 120 | "GetLoan": "(Vec, Days, Rate, Balance)", 121 | "Withdraw": "(AccountId, Balance, Vec)", 122 | "ChangeTimeout": "(DaoId, BlockNumber)", 123 | "ChangeMaximumNumberOfMembers": "(DaoId, MemberId)" 124 | } 125 | }, 126 | "Proposal": { 127 | "dao_id": "DaoId", 128 | "action": "Action", 129 | "open": "bool", 130 | "accepted": "bool", 131 | "voting_deadline": "BlockNumber", 132 | "yes_count": "VotesCount", 133 | "no_count": "VotesCount" 134 | }, 135 | "Token": { 136 | "token_id": "u32", 137 | "decimals": "u16", 138 | "symbol": "Vec" 139 | }, 140 | "Limits": { 141 | "max_tx_value": "u128", 142 | "day_max_limit": "u128", 143 | "day_max_limit_for_one_address": "u128", 144 | "max_pending_tx_limit": "u128", 145 | "min_tx_value": "u128" 146 | }, 147 | "Status": { 148 | "_enum":[ 149 | "Revoked", 150 | "Pending", 151 | "PauseTheBridge", 152 | "ResumeTheBridge", 153 | "UpdateValidatorSet", 154 | "UpdateLimits", 155 | "Deposit", 156 | "Withdraw", 157 | "Approved", 158 | "Canceled", 159 | "Confirmed" 160 | ] 161 | }, 162 | "Kind" :{ 163 | "_enum":[ 164 | "Transfer", 165 | "Limits", 166 | "Validator", 167 | "Bridge" 168 | ] 169 | }, 170 | "TransferMessage": { 171 | "message_id": "H256", 172 | "eth_address": "H160", 173 | "substrate_address": "AccountId", 174 | "amount": "TokenBalance", 175 | "status": "Status", 176 | "direction": "Status" 177 | }, 178 | "LimitMessage": { 179 | "id": "H256", 180 | "limits": "Limits", 181 | "status": "Status" 182 | }, 183 | "BridgeMessage": { 184 | "message_id": "H256", 185 | "account": "AccountId", 186 | "status": "Status", 187 | "action": "Status" 188 | }, 189 | "ValidatorMessage": { 190 | "message_id": "H256", 191 | "quorum":"u64", 192 | "accounts": "Vec", 193 | "status": "Status", 194 | "action": "Status" 195 | }, 196 | "BridgeTransfer": { 197 | "transfer_id": "ProposalId", 198 | "message_id": "H256", 199 | "open": "bool", 200 | "votes": "MemberId", 201 | "kind": "Kind" 202 | } 203 | } 204 | 205 | 206 | 207 | ``` 208 | 209 | 210 | 3) If you use [Akropolis UI](https://wallet.akropolis.io) skip this step, and go to the step 4. If you use [Polkadot UI](https://polkadot.js.org/apps/#/explorer), go to *Settings' General* tab, choose *custom endpoint* (top right corner), and set: 211 | 212 | - remote node/endpoint to connect to: wss://node1-chain.akropolis.io or wss://node2-chain.akropolis.io, 213 | 214 | - address prefix: Default for the connected node, 215 | 216 | - default interface theme: Substrate, 217 | 218 | - interface operation mode: Fully featured. 219 | 220 | Then push to *Save&Reload* button. 221 | 222 | 4) Create Account: 223 | 224 | - Navigate to the *Accounts* tab and click on the *Add account* button. 225 | 226 | - Enter a name for your account and create a secure password. This password will be used to decrypt your account. 227 | 228 | - Click *Save* and *Create and backup account*. 229 | 230 | - Save your encrypted keystore locally. 231 | 232 | - The account now appears in your *Accounts* tab and is backed up to the keystore you just saved. 233 | 234 | 5) Fill [the form](https://forms.gle/QjcccF6WWxSrbe9Z7) to get test AKRO tokens. 235 | 236 | ##Staking 237 | 238 | This guide will walk you through how to nominate your AKROs to a validator node so that you can take part in the staking system. 239 | 240 | We will assume that you will be starting with two fresh accounts. Click [here](https://wiki.polkadot.network/en/latest/polkadot/learn/staking/#accounts) to learn more about what stash and controller accounts mean. 241 | 242 | 1) The first step is to create two accounts by going to the *Accounts* tab. Make sure to use *stash* and *controller* in the names of your accounts to identify them easily. 243 | 244 | 2) Once you've created your accounts you will need to acquire some AKROs. Each of your accounts should have at least 150 milliAKROs to cover the existential deposit and transaction fees. 245 | 246 | To nominate and validate follow [this instructions](https://wiki.polkadot.network/en/latest/polkadot/node/guides/how-to-nominate/#nominating). 247 | 248 | 249 | 250 | ## Working with DAOs 251 | 252 | 253 | ### Creation of DAOs 254 | 255 | For creation of DAO you will need account with some AKROs. 256 | 257 | 1) Go to *Extrinsics* tab, select in *using the selected account* your account address. 258 | 259 | 2) Select "dao" in *submit the following extrinsic* 260 | 261 | 3) Insert *name* of new DAO and it's *description* in the HEX format. Use [utility](https://www.rapidtables.com/convert/number/ascii-to-hex.html) to convert ASCII symbols to HEX (please remove space symbols). Dao's name should have only "a" - "z", "A" - "Z", "0" - "9", "_" and "-" symbols. Length of DAO's name is between 10 and 255 symbols, length of description is between 10 and 4096 symbols. 262 | 263 | 3) Click *Submit* button 264 | 265 | After DAO is created you will see DAO page with minimal balance: 266 | 267 | 268 | 4) See DAO stats you can in *Chain state* tab. Select *dao* in *selected state query* and select what kind of data you want to get: 269 | 270 | - daosCount(): number of DAOs 271 | 272 | - daos(DaoId): get information about DAO. DaoId is a number, starts from 0. 273 | 274 | - membersCount(): number of members in DAO 275 | 276 | - members(DaoId, MemberId): infromation about DAO member, where DaoId and MemberId is a numbers-identifiers. 277 | 278 | ### Add new members to DAO 279 | 280 | Adding new members to DAO works through voting. To start voting you should make a proposal to add candidate. Candidate needs an account with some AKROs. This account should not be a member of this DAO to do a proposal. 281 | 282 | 1) Go to 'Extrinsics' tab and insert candidate's address to "using the selected account", select "dao" in "submit the following extrinsic" and "proposeToAddMemeber(dao_id)" function. Then insert dao id and click "Submit Transaction". 283 | 284 | 2) Check the status of proposal you can in *Chain state* tab. Select *dao* in *selected state query*. 285 | 286 | - daoProposalsCount(DaoId) will show number of existing proposals 287 | 288 | - daoProposals(DaoId, ProposalId) will show status of proposal ProposalId in DAO DaoId: open:true/false, voting_deadline - block number when voting is over, yes_count & no_count - number of DAO members voted yes or no for proposal). 289 | 290 | ### Remove member from DAO 291 | 292 | Excluding DAO member happens through voting. Only existing DAO members can be removed from DAO. If DAO has only one member, this member can't be removed from DAO. 293 | 294 | 1) Go to 'Extrinsics' tab and insert candidate's address to "using the selected account", select "dao" in "submit the following extrinsic" and "proposeToRemoveMemeber(dao_id)" function. Then insert dao id and click "Submit Transaction". 295 | 296 | 2) Check the status of proposal you can in *Chain state* tab. Select *dao* in *selected state query*. 297 | 298 | - daoProposalsCount(DaoId) will show number of existing proposals 299 | 300 | - daoProposals(DaoId, ProposalId) will show status of proposal ProposalId in DAO DaoId: open:true/false, voting_deadline - block number when voting is over, yes_count & no_count - number of DAO members voted yes or no for proposal). 301 | 302 | 303 | 304 | ### Voting 305 | 306 | Only DAO member can take participation in voting (one time for proposal). 307 | 308 | To take participation in voting go to 'Extrinsics' tab and insert your address to "using the selected account", select "dao" in "submit the following extrinsic" and "vote(dao_id, proposal_id, vote)" function, where vote is boolean (Yes/No). Then insert dao id and click "Submit Transaction". 309 | -------------------------------------------------------------------------------- /bridge/dai-home/.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": ["flow", "env"] 3 | } 4 | -------------------------------------------------------------------------------- /bridge/dai-home/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .env 3 | build 4 | -------------------------------------------------------------------------------- /bridge/dai-home/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Akropolis.io 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /bridge/dai-home/README.md: -------------------------------------------------------------------------------- 1 | # akropolis-vesting 2 | Akropolis Token Vesting Contract 3 | -------------------------------------------------------------------------------- /bridge/dai-home/contracts/Migrations.sol: -------------------------------------------------------------------------------- 1 | pragma solidity >=0.4.21 <0.6.0; 2 | 3 | contract Migrations { 4 | address public owner; 5 | uint public last_completed_migration; 6 | 7 | constructor() public { 8 | owner = msg.sender; 9 | } 10 | 11 | modifier restricted() { 12 | if (msg.sender == owner) _; 13 | } 14 | 15 | function setCompleted(uint completed) public restricted { 16 | last_completed_migration = completed; 17 | } 18 | 19 | function upgrade(address new_address) public restricted { 20 | Migrations upgraded = Migrations(new_address); 21 | upgraded.setCompleted(last_completed_migration); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /bridge/dai-home/contracts/helpers/ValidatorsOperations.sol: -------------------------------------------------------------------------------- 1 | /* 2 | License: MIT 3 | Copyright Bitclave, 2018 4 | It's modified contract ValidatorOperations from https://github.com/bitclave/ValidatorOperations 5 | */ 6 | 7 | pragma solidity ^0.5.9; 8 | 9 | import "openzeppelin-solidity/contracts/math/SafeMath.sol"; 10 | 11 | contract ValidatorsOperations { 12 | 13 | using SafeMath for uint256; 14 | 15 | using SafeMath for uint8; 16 | // VARIABLES 17 | 18 | uint256 public validatorsGeneration; 19 | uint256 public howManyValidatorsDecide; 20 | address[] public validators; 21 | bytes32[] public allOperations; 22 | address internal insideCallSender; 23 | uint256 internal insideCallCount; 24 | 25 | 26 | // Reverse lookup tables for validators and allOperations 27 | mapping(address => uint8) public validatorsIndices; // Starts from 1, size 255 28 | mapping(bytes32 => uint) public allOperationsIndicies; 29 | 30 | 31 | // validators voting mask per operations 32 | mapping(bytes32 => uint256) public votesMaskByOperation; 33 | mapping(bytes32 => uint256) public votesCountByOperation; 34 | 35 | //operation -> ValidatorIndex 36 | mapping(bytes32 => uint8) internal operationsByValidatorIndex; 37 | mapping(uint8 => uint8) internal operationsCountByValidatorIndex; 38 | // EVENTS 39 | 40 | event ValidatorShipTransferred(address[] previousValidators, uint howManyValidatorsDecide, address[] newvalidators, uint newHowManyValidatorsDecide); 41 | event OperationCreated(bytes32 operation, uint howMany, uint validatorsCount, address proposer); 42 | event OperationUpvoted(bytes32 operation, uint votes, uint howMany, uint validatorsCount, address upvoter); 43 | event OperationPerformed(bytes32 operation, uint howMany, uint validatorsCount, address performer); 44 | event OperationDownvoted(bytes32 operation, uint votes, uint validatorsCount, address downvoter); 45 | event OperationCancelled(bytes32 operation, address lastCanceller); 46 | 47 | // ACCESSORS 48 | 49 | function isExistValidator(address wallet) public view returns(bool) { 50 | return validatorsIndices[wallet] > 0; 51 | } 52 | 53 | 54 | function validatorsCount() public view returns(uint) { 55 | return validators.length; 56 | } 57 | 58 | function allOperationsCount() public view returns(uint) { 59 | return allOperations.length; 60 | } 61 | 62 | /* 63 | Internal functions 64 | */ 65 | 66 | function _operationLimitByValidatorIndex(uint8 ValidatorIndex) internal view returns(bool) { 67 | return (operationsCountByValidatorIndex[ValidatorIndex] <= 3); 68 | } 69 | 70 | function _cancelAllPending() internal { 71 | for (uint i = 0; i < allOperations.length; i++) { 72 | delete(allOperationsIndicies[allOperations[i]]); 73 | delete(votesMaskByOperation[allOperations[i]]); 74 | delete(votesCountByOperation[allOperations[i]]); 75 | //delete operation->ValidatorIndex 76 | delete(operationsByValidatorIndex[allOperations[i]]); 77 | } 78 | 79 | allOperations.length = 0; 80 | //delete operations count for Validator 81 | for (uint8 j = 0; j < validators.length; j++) { 82 | operationsCountByValidatorIndex[j] = 0; 83 | } 84 | } 85 | 86 | 87 | // MODIFIERS 88 | 89 | /** 90 | * @dev Allows to perform method by any of the validators 91 | */ 92 | modifier onlyAnyValidator { 93 | if (checkHowManyValidators(1)) { 94 | bool update = (insideCallSender == address(0)); 95 | if (update) { 96 | insideCallSender = msg.sender; 97 | insideCallCount = 1; 98 | } 99 | _; 100 | if (update) { 101 | insideCallSender = address(0); 102 | insideCallCount = 0; 103 | } 104 | } 105 | } 106 | 107 | /** 108 | * @dev Allows to perform method only after many validators call it with the same arguments 109 | */ 110 | modifier onlyManyValidators { 111 | if (checkHowManyValidators(howManyValidatorsDecide)) { 112 | bool update = (insideCallSender == address(0)); 113 | if (update) { 114 | insideCallSender = msg.sender; 115 | insideCallCount = howManyValidatorsDecide; 116 | } 117 | _; 118 | if (update) { 119 | insideCallSender = address(0); 120 | insideCallCount = 0; 121 | } 122 | } 123 | } 124 | 125 | /** 126 | * @dev Allows to perform method only after all validators call it with the same arguments 127 | */ 128 | modifier onlyAllValidators { 129 | if (checkHowManyValidators(validators.length)) { 130 | bool update = (insideCallSender == address(0)); 131 | if (update) { 132 | insideCallSender = msg.sender; 133 | insideCallCount = validators.length; 134 | } 135 | _; 136 | if (update) { 137 | insideCallSender = address(0); 138 | insideCallCount = 0; 139 | } 140 | } 141 | } 142 | 143 | /** 144 | * @dev Allows to perform method only after some validators call it with the same arguments 145 | */ 146 | modifier onlySomeValidators(uint howMany) { 147 | require(howMany > 0, "onlySomevalidators: howMany argument is zero"); 148 | require(howMany <= validators.length, "onlySomevalidators: howMany argument exceeds the number of validators"); 149 | 150 | if (checkHowManyValidators(howMany)) { 151 | bool update = (insideCallSender == address(0)); 152 | if (update) { 153 | insideCallSender = msg.sender; 154 | insideCallCount = howMany; 155 | } 156 | _; 157 | if (update) { 158 | insideCallSender = address(0); 159 | insideCallCount = 0; 160 | } 161 | } 162 | } 163 | 164 | // CONSTRUCTOR 165 | 166 | constructor() public { 167 | validators.push(msg.sender); 168 | validatorsIndices[msg.sender] = 1; 169 | howManyValidatorsDecide = 1; 170 | } 171 | 172 | // INTERNAL METHODS 173 | 174 | /** 175 | * @dev onlyManyvalidators modifier helper 176 | */ 177 | function checkHowManyValidators(uint howMany) internal returns(bool) { 178 | if (insideCallSender == msg.sender) { 179 | require(howMany <= insideCallCount, "checkHowManyvalidators: nested validators modifier check require more Validators"); 180 | return true; 181 | } 182 | 183 | 184 | require((isExistValidator(msg.sender) && (validatorsIndices[msg.sender] <= validators.length)), "checkHowManyvalidators: msg.sender is not an Validator"); 185 | 186 | uint ValidatorIndex = validatorsIndices[msg.sender].sub(1); 187 | 188 | bytes32 operation = keccak256(abi.encodePacked(msg.data, validatorsGeneration)); 189 | 190 | require((votesMaskByOperation[operation] & (2 ** ValidatorIndex)) == 0, "checkHowManyvalidators: Validator already voted for the operation"); 191 | //check limit for operation 192 | require(_operationLimitByValidatorIndex(uint8(ValidatorIndex)), "checkHowManyvalidators: operation limit is reached for this Validator"); 193 | 194 | votesMaskByOperation[operation] |= (2 ** ValidatorIndex); 195 | uint operationVotesCount = votesCountByOperation[operation].add(1); 196 | votesCountByOperation[operation] = operationVotesCount; 197 | 198 | if (operationVotesCount == 1) { 199 | allOperationsIndicies[operation] = allOperations.length; 200 | 201 | operationsByValidatorIndex[operation] = uint8(ValidatorIndex); 202 | 203 | operationsCountByValidatorIndex[uint8(ValidatorIndex)] = uint8(operationsCountByValidatorIndex[uint8(ValidatorIndex)].add(1)); 204 | 205 | allOperations.push(operation); 206 | 207 | 208 | emit OperationCreated(operation, howMany, validators.length, msg.sender); 209 | } 210 | emit OperationUpvoted(operation, operationVotesCount, howMany, validators.length, msg.sender); 211 | 212 | // If enough validators confirmed the same operation 213 | if (votesCountByOperation[operation] == howMany) { 214 | deleteOperation(operation); 215 | emit OperationPerformed(operation, howMany, validators.length, msg.sender); 216 | return true; 217 | } 218 | 219 | return false; 220 | } 221 | 222 | /** 223 | * @dev Used to delete cancelled or performed operation 224 | * @param operation defines which operation to delete 225 | */ 226 | function deleteOperation(bytes32 operation) internal { 227 | uint index = allOperationsIndicies[operation]; 228 | if (index < allOperations.length - 1) { // Not last 229 | allOperations[index] = allOperations[allOperations.length.sub(1)]; 230 | allOperationsIndicies[allOperations[index]] = index; 231 | } 232 | allOperations.length = allOperations.length.sub(1); 233 | 234 | uint8 ValidatorIndex = uint8(operationsByValidatorIndex[operation]); 235 | operationsCountByValidatorIndex[ValidatorIndex] = uint8(operationsCountByValidatorIndex[ValidatorIndex].sub(1)); 236 | 237 | delete votesMaskByOperation[operation]; 238 | delete votesCountByOperation[operation]; 239 | delete allOperationsIndicies[operation]; 240 | delete operationsByValidatorIndex[operation]; 241 | } 242 | 243 | // PUBLIC METHODS 244 | 245 | /** 246 | * @dev Allows validators to change their mind by cancelling votesMaskByOperation operations 247 | * @param operation defines which operation to delete 248 | */ 249 | function cancelPending(bytes32 operation) public onlyAnyValidator { 250 | 251 | require((isExistValidator(msg.sender) && (validatorsIndices[msg.sender] <= validators.length)), "checkHowManyvalidators: msg.sender is not an Validator"); 252 | 253 | uint ValidatorIndex = validatorsIndices[msg.sender].sub(1); 254 | require((votesMaskByOperation[operation] & (2 ** ValidatorIndex)) != 0, "cancelPending: operation not found for this user"); 255 | votesMaskByOperation[operation] &= ~(2 ** ValidatorIndex); 256 | uint operationVotesCount = votesCountByOperation[operation].sub(1); 257 | votesCountByOperation[operation] = operationVotesCount; 258 | emit OperationDownvoted(operation, operationVotesCount, validators.length, msg.sender); 259 | if (operationVotesCount == 0) { 260 | deleteOperation(operation); 261 | emit OperationCancelled(operation, msg.sender); 262 | } 263 | } 264 | 265 | /** 266 | * @dev Allows validators to change their mind by cancelling all operations 267 | */ 268 | 269 | function cancelAllPending() public onlyManyValidators { 270 | _cancelAllPending(); 271 | } 272 | 273 | 274 | 275 | /**Переписать*/ 276 | 277 | /** 278 | * @dev Allows validators to change validatorsship 279 | * @param newValidators defines array of addresses of new validators 280 | */ 281 | function transferValidatorShip(address[] memory newValidators) public { 282 | transferValidatorShipWithHowMany(newValidators, newValidators.length); 283 | } 284 | 285 | /** 286 | * @dev Allows validators to change ValidatorShip 287 | * @param newValidators defines array of addresses of new validators 288 | * @param newHowManyValidatorsDecide defines how many validators can decide 289 | */ 290 | function transferValidatorShipWithHowMany(address[] memory newValidators, uint256 newHowManyValidatorsDecide) public onlyManyValidators { 291 | require(newValidators.length > 0, "transferValidatorShipWithHowMany: validators array is empty"); 292 | require(newValidators.length < 256, "transferValidatorShipWithHowMany: validators count is greater then 255"); 293 | require(newHowManyValidatorsDecide > 0, "transferValidatorShipWithHowMany: newHowManyValidatorsDecide equal to 0"); 294 | require(newHowManyValidatorsDecide <= newValidators.length, "transferValidatorShipWithHowMany: newHowManyValidatorsDecide exceeds the number of Validators"); 295 | 296 | // Reset validators reverse lookup table 297 | for (uint j = 0; j < validators.length; j++) { 298 | delete validatorsIndices[validators[j]]; 299 | } 300 | for (uint i = 0; i < newValidators.length; i++) { 301 | require(newValidators[i] != address(0), "transferValidatorShipWithHowMany: validators array contains zero"); 302 | require(validatorsIndices[newValidators[i]] == 0, "transferValidatorShipWithHowMany: validators array contains duplicates"); 303 | validatorsIndices[newValidators[i]] = uint8(i.add(1)); 304 | } 305 | 306 | emit ValidatorShipTransferred(validators, howManyValidatorsDecide, newValidators, newHowManyValidatorsDecide); 307 | validators = newValidators; 308 | howManyValidatorsDecide = newHowManyValidatorsDecide; 309 | 310 | _cancelAllPending(); 311 | 312 | validatorsGeneration++; 313 | } 314 | } -------------------------------------------------------------------------------- /bridge/dai-home/contracts/impl/ValidatorsOperationsImpl.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.5.0; 2 | 3 | import "../helpers/ValidatorsOperations.sol"; 4 | 5 | 6 | contract ValidatorOperationsImpl is ValidatorsOperations { 7 | 8 | uint public value; 9 | 10 | function setValue(uint _value) public onlyManyValidators { 11 | value = _value; 12 | } 13 | 14 | function setValueAny(uint _value) public onlyAnyValidator { 15 | value = _value; 16 | } 17 | 18 | function setValueAll(uint _value) public onlyAllValidators { 19 | value = _value; 20 | } 21 | 22 | function setValueSome(uint _value, uint howMany) public onlySomeValidators(howMany) { 23 | value = _value; 24 | } 25 | 26 | function nestedFirst(uint _value) public onlyManyValidators { 27 | nestedSecond(_value); 28 | } 29 | 30 | function nestedSecond(uint _value) public onlyManyValidators { 31 | value = _value; 32 | } 33 | 34 | // 35 | 36 | function nestedFirstAllToAll(uint _value) public onlyAllValidators { 37 | nestedSecondAllToAll(_value); 38 | } 39 | 40 | function nestedFirstAllToAll2(uint _value) public onlyAllValidators { 41 | this.nestedSecondAllToAll(_value); // this. 42 | } 43 | 44 | function nestedSecondAllToAll(uint _value) public onlyAllValidators { 45 | value = _value; 46 | } 47 | 48 | // 49 | 50 | function nestedFirstAnyToAny(uint _value) public onlyAnyValidator { 51 | nestedSecondAnyToAny(_value); 52 | } 53 | 54 | function nestedFirstAnyToAny2(uint _value) public onlyAnyValidator { 55 | this.nestedSecondAnyToAny(_value); // this. 56 | } 57 | 58 | function nestedSecondAnyToAny(uint _value) public onlyAnyValidator { 59 | value = _value; 60 | } 61 | 62 | // 63 | 64 | function nestedFirstManyToSome(uint _value, uint howMany) public onlyManyValidators { 65 | nestedSecondSome(_value, howMany); 66 | } 67 | 68 | function nestedFirstAnyToSome(uint _value, uint howMany) public onlyAnyValidator { 69 | nestedSecondSome(_value, howMany); 70 | } 71 | 72 | function nestedSecondSome(uint _value, uint howMany) public onlySomeValidators(howMany) { 73 | value = _value; 74 | } 75 | 76 | } -------------------------------------------------------------------------------- /bridge/dai-home/contracts/main/DAIBridge.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.5.9; 2 | 3 | import 'openzeppelin-solidity/contracts/token/ERC20/SafeERC20.sol'; 4 | 5 | //Beneficieries (validators) template 6 | import "../helpers/ValidatorsOperations.sol"; 7 | 8 | contract DAIBridge is ValidatorsOperations { 9 | 10 | IERC20 private token; 11 | 12 | enum Status {PENDING,WITHDRAW,APPROVED, CANCELED, CONFIRMED} 13 | 14 | struct Message { 15 | bytes32 messageID; 16 | address spender; 17 | bytes32 substrateAddress; 18 | uint availableAmount; 19 | Status status; 20 | } 21 | 22 | event RelayMessage(bytes32 messageID, address sender, bytes32 recipient, uint amount); 23 | event RevertMessage(bytes32 messageID, address sender, uint amount); 24 | event WithdrawMessage(bytes32 MessageID); 25 | event ApprovedRelayMessage(bytes32 messageID, address sender, bytes32 recipient, uint amount); 26 | 27 | 28 | mapping(bytes32 => Message) messages; 29 | mapping(address => Message) messagesBySender; 30 | 31 | /** 32 | * @notice Constructor. 33 | * @param _token Address of DAI token 34 | */ 35 | 36 | constructor (IERC20 _token) public 37 | ValidatorsOperations() { 38 | token = _token; 39 | } 40 | 41 | // MODIFIERS 42 | /** 43 | * @dev Allows to perform method by existing Validator 44 | */ 45 | 46 | modifier onlyExistingValidator(address _Validator) { 47 | require(isExistValidator(_Validator), "address is not in Validator array"); 48 | _; 49 | } 50 | 51 | /* 52 | check available amount 53 | */ 54 | 55 | modifier messageHasAmount(bytes32 messageID) { 56 | require((messages[messageID].availableAmount > 0), "Amount withdraw"); 57 | _; 58 | } 59 | 60 | /* 61 | check that message is valid 62 | */ 63 | modifier validMessage(bytes32 messageID, address spender, bytes32 substrateAddress, uint availableAmount) { 64 | require((messages[messageID].spender == spender) 65 | && (messages[messageID].substrateAddress == substrateAddress) 66 | && (messages[messageID].availableAmount == availableAmount), "Data is not valid"); 67 | _; 68 | } 69 | 70 | modifier pendingMessage(bytes32 messageID) { 71 | require(messages[messageID].status == Status.PENDING, "Message is not pending"); 72 | _; 73 | } 74 | 75 | modifier approvedMessage(bytes32 messageID) { 76 | require(messages[messageID].status == Status.APPROVED, "Message is not approved"); 77 | _; 78 | } 79 | 80 | function setTransfer(uint amount, bytes32 substrateAddress) public { 81 | require(token.allowance(msg.sender, address(this)) >= amount, "contract is not allowed to this amount"); 82 | token.transferFrom(msg.sender, address(this), amount); 83 | 84 | bytes32 messageID = keccak256(abi.encodePacked(now)); 85 | 86 | Message memory message = Message(messageID, msg.sender, substrateAddress, amount, Status.PENDING); 87 | messages[messageID] = message; 88 | 89 | emit RelayMessage(messageID, msg.sender, substrateAddress, amount); 90 | } 91 | 92 | /* 93 | * Widthdraw finance by message ID when transfer pending 94 | */ 95 | function revertTransfer(bytes32 messageID) public pendingMessage(messageID) { 96 | Message storage message = messages[messageID]; 97 | 98 | message.status = Status.CANCELED; 99 | 100 | token.transfer(msg.sender, message.availableAmount); 101 | 102 | emit RevertMessage(messageID, msg.sender, message.availableAmount); 103 | } 104 | 105 | 106 | /* 107 | * Approve finance by message ID when transfer pending 108 | */ 109 | function approveTransfer(bytes32 messageID, address spender, bytes32 substrateAddress, uint availableAmount) 110 | public validMessage(messageID, spender, substrateAddress, availableAmount) pendingMessage(messageID) onlyManyValidators { 111 | Message storage message = messages[messageID]; 112 | message.status = Status.APPROVED; 113 | 114 | emit ApprovedRelayMessage(messageID, spender, substrateAddress, availableAmount); 115 | } 116 | 117 | /* 118 | * Confirm tranfer by message ID when transfer pending 119 | */ 120 | function confirmTransfer(bytes32 messageID) public approvedMessage(messageID) onlyManyValidators { 121 | Message storage message = messages[messageID]; 122 | message.status = Status.CONFIRMED; 123 | } 124 | 125 | 126 | /* 127 | * Withdraw tranfer by message ID after approve from Substrate 128 | */ 129 | function withdrawTransfer(bytes32 messageID, bytes32 substrateSender, address recipient, uint availableAmount) public onlyManyValidators { 130 | require(token.balanceOf(address(this)) >= availableAmount, "Balance is not enough"); 131 | token.transfer(recipient, availableAmount); 132 | Message memory message = Message(messageID, msg.sender, substrateSender, availableAmount, Status.WITHDRAW); 133 | messages[messageID] = message; 134 | emit WithdrawMessage(messageID); 135 | } 136 | 137 | } -------------------------------------------------------------------------------- /bridge/dai-home/migrations/1_initial_migration.js: -------------------------------------------------------------------------------- 1 | const Migrations = artifacts.require("Migrations"); 2 | 3 | module.exports = function(deployer) { 4 | deployer.deploy(Migrations); 5 | }; 6 | -------------------------------------------------------------------------------- /bridge/dai-home/migrations/2_deploy_bridge.js: -------------------------------------------------------------------------------- 1 | var DAIBridge = artifacts.require("./DAIBridge.sol"); 2 | 3 | 4 | module.exports = function(deployer, network, accounts) { 5 | let owner = accounts[0]; 6 | 7 | let token = "0xc4375b7de8af5a38a93548eb8453a498222c4ff2"; //DAI 8 | 9 | 10 | //console.log('owner of storage contracts: ' + owner) 11 | 12 | deployer.deploy(DAIBridge, token, {from: owner}); 13 | 14 | }; 15 | 16 | -------------------------------------------------------------------------------- /bridge/dai-home/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "akropolis-substrate-ethereum-bridge", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "y", 6 | "directories": { 7 | "test": "test" 8 | }, 9 | "scripts": { 10 | "test": "truffle test", 11 | "rpc": "scripts/rpc.sh" 12 | }, 13 | "author": "am@akropolis.io", 14 | "license": "MIT", 15 | "dependencies": { 16 | "babel-eslint": "^8.2.3", 17 | "babel-polyfill": "^6.26.0", 18 | "babel-preset-env": "^1.6.1", 19 | "babel-preset-flow": "^6.23.0", 20 | "babel-register": "^6.26.0", 21 | "chai": "^4.1.2", 22 | "chai-as-promised": "^7.1.1", 23 | "chai-bignumber": "^2.0.2", 24 | "dotenv": "^6.0.0", 25 | "eth-gas-reporter": "^0.2.3", 26 | "ethjs-abi": "^0.2.1", 27 | "openzeppelin-solidity": "2.3.0", 28 | "truffle-contract": "4.0.23", 29 | "truffle-hdwallet-provider": "1.0.8", 30 | "web3": "v1.0.0-beta.37", 31 | "zos-lib": "2.4.1" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/EVMRevert.js: -------------------------------------------------------------------------------- 1 | export default 'revert'; 2 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/EVMThrow.js: -------------------------------------------------------------------------------- 1 | export default 'invalid opcode'; 2 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/advanceToBlock.js: -------------------------------------------------------------------------------- 1 | export function advanceBlock () { 2 | return new Promise((resolve, reject) => { 3 | web3.currentProvider.sendAsync({ 4 | jsonrpc: '2.0', 5 | method: 'evm_mine', 6 | id: Date.now(), 7 | }, (err, res) => { 8 | return err ? reject(err) : resolve(res); 9 | }); 10 | }); 11 | } 12 | 13 | // Advances the block number so that the last mined block is `number`. 14 | export default async function advanceToBlock (number) { 15 | if (web3.eth.blockNumber > number) { 16 | throw Error(`block number ${number} is in the past (current is ${web3.eth.blockNumber})`); 17 | } 18 | 19 | while (web3.eth.blockNumber < number) { 20 | await advanceBlock(); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/assertJump.js: -------------------------------------------------------------------------------- 1 | export default async promise => { 2 | try { 3 | await promise; 4 | assert.fail('Expected invalid opcode not received'); 5 | } catch (error) { 6 | const invalidOpcodeReceived = error.message.search('invalid opcode') >= 0; 7 | assert(invalidOpcodeReceived, `Expected "invalid opcode", got ${error} instead`); 8 | } 9 | }; 10 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/assertRevert.js: -------------------------------------------------------------------------------- 1 | export default async promise => { 2 | try { 3 | await promise; 4 | assert.fail('Expected revert not received'); 5 | } catch (error) { 6 | const revertFound = error.message.search('revert') >= 0; 7 | assert(revertFound, `Expected "revert", got ${error} instead`); 8 | } 9 | }; 10 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/decodeLogs.js: -------------------------------------------------------------------------------- 1 | const SolidityEvent = require('web3'); 2 | 3 | export default function decodeLogs (logs, contract, address) { 4 | return logs.map(log => { 5 | const event = new SolidityEvent(null, contract.events[log.topics[0]], address); 6 | return event.decode(log); 7 | }); 8 | } 9 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/ether.js: -------------------------------------------------------------------------------- 1 | export default function ether (n) { 2 | return new web3.BigNumber(web3.toWei(n, 'ether')); 3 | } 4 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/expectEvent.js: -------------------------------------------------------------------------------- 1 | const assert = require('chai').assert; 2 | 3 | const inLogs = async (logs, eventName) => { 4 | const event = logs.find(e => e.event === eventName); 5 | assert.exists(event); 6 | return event; 7 | }; 8 | 9 | const inTransaction = async (tx, eventName) => { 10 | const { logs } = await tx; 11 | return inLogs(logs, eventName); 12 | }; 13 | 14 | module.exports = { 15 | inLogs, 16 | inTransaction, 17 | }; 18 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/expectThrow.js: -------------------------------------------------------------------------------- 1 | export default async promise => { 2 | try { 3 | await promise; 4 | } catch (error) { 5 | // TODO: Check jump destination to destinguish between a throw 6 | // and an actual invalid jump. 7 | const invalidOpcode = error.message.search('invalid opcode') >= 0; 8 | // TODO: When we contract A calls contract B, and B throws, instead 9 | // of an 'invalid jump', we get an 'out of gas' error. How do 10 | // we distinguish this from an actual out of gas event? (The 11 | // ganache log actually show an 'invalid jump' event.) 12 | const outOfGas = error.message.search('out of gas') >= 0; 13 | const revert = error.message.search('revert') >= 0; 14 | assert( 15 | invalidOpcode || outOfGas || revert, 16 | 'Expected throw, got \'' + error + '\' instead', 17 | ); 18 | return; 19 | } 20 | assert.fail('Expected throw not received'); 21 | }; 22 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/increaseTime.js: -------------------------------------------------------------------------------- 1 | import latestTime from './latestTime'; 2 | 3 | // Increases ganache time by the passed duration in seconds 4 | export default function increaseTime (duration) { 5 | const id = Date.now(); 6 | 7 | return new Promise((resolve, reject) => { 8 | web3.currentProvider.sendAsync({ 9 | jsonrpc: '2.0', 10 | method: 'evm_increaseTime', 11 | params: [duration], 12 | id: id, 13 | }, err1 => { 14 | if (err1) return reject(err1); 15 | 16 | web3.currentProvider.sendAsync({ 17 | jsonrpc: '2.0', 18 | method: 'evm_mine', 19 | id: id + 1, 20 | }, (err2, res) => { 21 | return err2 ? reject(err2) : resolve(res); 22 | }); 23 | }); 24 | }); 25 | } 26 | 27 | /** 28 | * Beware that due to the need of calling two separate ganache methods and rpc calls overhead 29 | * it's hard to increase time precisely to a target point so design your test to tolerate 30 | * small fluctuations from time to time. 31 | * 32 | * @param target time in seconds 33 | */ 34 | export function increaseTimeTo (target) { 35 | let now = latestTime(); 36 | if (target < now) throw Error(`Cannot increase current time(${now}) to a moment in the past(${target})`); 37 | let diff = target - now; 38 | return increaseTime(diff); 39 | } 40 | 41 | export const duration = { 42 | seconds: function (val) { return val; }, 43 | minutes: function (val) { return val * this.seconds(60); }, 44 | hours: function (val) { return val * this.minutes(60); }, 45 | days: function (val) { return val * this.hours(24); }, 46 | weeks: function (val) { return val * this.days(7); }, 47 | years: function (val) { return val * this.days(365); }, 48 | }; 49 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/latestTime.js: -------------------------------------------------------------------------------- 1 | // Returns the time of the last mined block in seconds 2 | export default function latestTime () { 3 | return web3.eth.getBlock('latest').timestamp; 4 | } 5 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/merkleTree.js: -------------------------------------------------------------------------------- 1 | import { sha3, bufferToHex } from 'ethereumjs-util'; 2 | 3 | export default class MerkleTree { 4 | constructor (elements) { 5 | // Filter empty strings and hash elements 6 | this.elements = elements.filter(el => el).map(el => sha3(el)); 7 | 8 | // Deduplicate elements 9 | this.elements = this.bufDedup(this.elements); 10 | // Sort elements 11 | this.elements.sort(Buffer.compare); 12 | 13 | // Create layers 14 | this.layers = this.getLayers(this.elements); 15 | } 16 | 17 | getLayers (elements) { 18 | if (elements.length === 0) { 19 | return [['']]; 20 | } 21 | 22 | const layers = []; 23 | layers.push(elements); 24 | 25 | // Get next layer until we reach the root 26 | while (layers[layers.length - 1].length > 1) { 27 | layers.push(this.getNextLayer(layers[layers.length - 1])); 28 | } 29 | 30 | return layers; 31 | } 32 | 33 | getNextLayer (elements) { 34 | return elements.reduce((layer, el, idx, arr) => { 35 | if (idx % 2 === 0) { 36 | // Hash the current element with its pair element 37 | layer.push(this.combinedHash(el, arr[idx + 1])); 38 | } 39 | 40 | return layer; 41 | }, []); 42 | } 43 | 44 | combinedHash (first, second) { 45 | if (!first) { return second; } 46 | if (!second) { return first; } 47 | 48 | return sha3(this.sortAndConcat(first, second)); 49 | } 50 | 51 | getRoot () { 52 | return this.layers[this.layers.length - 1][0]; 53 | } 54 | 55 | getHexRoot () { 56 | return bufferToHex(this.getRoot()); 57 | } 58 | 59 | getProof (el) { 60 | let idx = this.bufIndexOf(el, this.elements); 61 | 62 | if (idx === -1) { 63 | throw new Error('Element does not exist in Merkle tree'); 64 | } 65 | 66 | return this.layers.reduce((proof, layer) => { 67 | const pairElement = this.getPairElement(idx, layer); 68 | 69 | if (pairElement) { 70 | proof.push(pairElement); 71 | } 72 | 73 | idx = Math.floor(idx / 2); 74 | 75 | return proof; 76 | }, []); 77 | } 78 | 79 | getHexProof (el) { 80 | const proof = this.getProof(el); 81 | 82 | return this.bufArrToHexArr(proof); 83 | } 84 | 85 | getPairElement (idx, layer) { 86 | const pairIdx = idx % 2 === 0 ? idx + 1 : idx - 1; 87 | 88 | if (pairIdx < layer.length) { 89 | return layer[pairIdx]; 90 | } else { 91 | return null; 92 | } 93 | } 94 | 95 | bufIndexOf (el, arr) { 96 | let hash; 97 | 98 | // Convert element to 32 byte hash if it is not one already 99 | if (el.length !== 32 || !Buffer.isBuffer(el)) { 100 | hash = sha3(el); 101 | } else { 102 | hash = el; 103 | } 104 | 105 | for (let i = 0; i < arr.length; i++) { 106 | if (hash.equals(arr[i])) { 107 | return i; 108 | } 109 | } 110 | 111 | return -1; 112 | } 113 | 114 | bufDedup (elements) { 115 | return elements.filter((el, idx) => { 116 | return this.bufIndexOf(el, elements) === idx; 117 | }); 118 | } 119 | 120 | bufArrToHexArr (arr) { 121 | if (arr.some(el => !Buffer.isBuffer(el))) { 122 | throw new Error('Array is not an array of buffers'); 123 | } 124 | 125 | return arr.map(el => '0x' + el.toString('hex')); 126 | } 127 | 128 | sortAndConcat (...args) { 129 | return Buffer.concat([...args].sort(Buffer.compare)); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/sendTransaction.js: -------------------------------------------------------------------------------- 1 | const _ = require('lodash'); 2 | const ethjsABI = require('ethjs-abi'); 3 | 4 | export function findMethod (abi, name, args) { 5 | for (var i = 0; i < abi.length; i++) { 6 | const methodArgs = _.map(abi[i].inputs, 'type').join(','); 7 | if ((abi[i].name === name) && (methodArgs === args)) { 8 | return abi[i]; 9 | } 10 | } 11 | } 12 | 13 | export default function sendTransaction (target, name, argsTypes, argsValues, opts) { 14 | const abiMethod = findMethod(target.abi, name, argsTypes); 15 | const encodedData = ethjsABI.encodeMethod(abiMethod, argsValues); 16 | return target.sendTransaction(Object.assign({ data: encodedData }, opts)); 17 | } 18 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/sign.js: -------------------------------------------------------------------------------- 1 | import utils from 'ethereumjs-util'; 2 | 3 | /** 4 | * Hash and add same prefix to the hash that ganache use. 5 | * @param {string} message the plaintext/ascii/original message 6 | * @return {string} the hash of the message, prefixed, and then hashed again 7 | */ 8 | export const hashMessage = (message) => { 9 | const messageHex = Buffer.from(utils.sha3(message).toString('hex'), 'hex'); 10 | const prefix = utils.toBuffer('\u0019Ethereum Signed Message:\n' + messageHex.length.toString()); 11 | return utils.bufferToHex(utils.sha3(Buffer.concat([prefix, messageHex]))); 12 | }; 13 | 14 | // signs message using web3 (auto-applies prefix) 15 | export const signMessage = (signer, message = '', options = {}) => { 16 | return web3.eth.sign(signer, web3.sha3(message, options)); 17 | }; 18 | 19 | // signs hex string using web3 (auto-applies prefix) 20 | export const signHex = (signer, message = '') => { 21 | return signMessage(signer, message, { encoding: 'hex' }); 22 | }; 23 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/toPromise.js: -------------------------------------------------------------------------------- 1 | export default func => 2 | (...args) => 3 | new Promise((resolve, reject) => 4 | func(...args, (error, data) => error ? reject(error) : resolve(data))); 5 | -------------------------------------------------------------------------------- /bridge/dai-home/test/helpers/transactionMined.js: -------------------------------------------------------------------------------- 1 | 2 | // from https://gist.github.com/xavierlepretre/88682e871f4ad07be4534ae560692ee6 3 | module.export = web3.eth.transactionMined = function (txnHash, interval) { 4 | var transactionReceiptAsync; 5 | interval = interval || 500; 6 | transactionReceiptAsync = function (txnHash, resolve, reject) { 7 | try { 8 | var receipt = web3.eth.getTransactionReceipt(txnHash); 9 | if (receipt === null) { 10 | setTimeout(function () { 11 | transactionReceiptAsync(txnHash, resolve, reject); 12 | }, interval); 13 | } else { 14 | resolve(receipt); 15 | } 16 | } catch (e) { 17 | reject(e); 18 | } 19 | }; 20 | 21 | if (Array.isArray(txnHash)) { 22 | var promises = []; 23 | txnHash.forEach(function (oneTxHash) { 24 | promises.push( 25 | web3.eth.getTransactionReceiptMined(oneTxHash, interval)); 26 | }); 27 | return Promise.all(promises); 28 | } else { 29 | return new Promise(function (resolve, reject) { 30 | transactionReceiptAsync(txnHash, resolve, reject); 31 | }); 32 | } 33 | }; 34 | -------------------------------------------------------------------------------- /bridge/dai-home/truffle-config.js: -------------------------------------------------------------------------------- 1 | 2 | const HDWalletProvider = require("truffle-hdwallet-provider"); 3 | require('dotenv').config() // Stores environment-specific variable from '.env' to process.env 4 | require('babel-register'); 5 | require('babel-polyfill'); 6 | 7 | 8 | console.log(process.env.METAMASK_MNEMONIC); 9 | console.log(process.env.INFURA_API_KEY); 10 | module.exports = { 11 | // See 12 | // to customize your Truffle configuration! 13 | compilers: { 14 | solc: { 15 | version: "0.5.9" // Change this to whatever you need 16 | } 17 | }, 18 | networks: { 19 | development: { 20 | host: 'localhost', 21 | port: 8545, 22 | network_id: '*', 23 | gas: 4600000 24 | }, 25 | 26 | mainnet: { 27 | provider: function () { 28 | return new HDWalletProvider(process.env.METAMASK_MNEMONIC, "https://mainnet.infura.io/v3/" + process.env.INFURA_API_KEY) 29 | }, 30 | network_id: 1, 31 | network_id: 1, 32 | gas: 7000000, 33 | skipDryRun:true 34 | }, 35 | 36 | kovan: { 37 | provider: function () { 38 | return new HDWalletProvider(process.env.METAMASK_MNEMONIC, "https://kovan.infura.io/v3/" + process.env.INFURA_API_KEY) 39 | }, 40 | network_id: 42, 41 | gas: 7000000 42 | }, 43 | }, 44 | mocha: { 45 | reporter: 'eth-gas-reporter', 46 | reporterOptions: { 47 | gasPrice: 21 48 | } 49 | }, 50 | }; -------------------------------------------------------------------------------- /cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ['Akropolis '] 3 | build = 'build.rs' 4 | edition = '2018' 5 | name = 'node-cli' 6 | version = '0.3.1' 7 | 8 | [package.metadata.wasm-pack.profile.release] 9 | # `wasm-opt` has some problems on linux, see 10 | # https://github.com/rustwasm/wasm-pack/issues/781 etc. 11 | wasm-opt = false 12 | 13 | [dependencies] 14 | # third-party dependencies 15 | codec = { package = "parity-scale-codec", version = "1.2.0" } 16 | serde = { version = "1.0.102", features = ["derive"] } 17 | futures = { version = "0.3.1", features = ["compat"] } 18 | hex-literal = "0.2.1" 19 | jsonrpc-core = "14.0.3" 20 | log = "0.4.8" 21 | rand = "0.7.2" 22 | structopt = { version = "0.3.8", optional = true } 23 | tracing = "0.1.10" 24 | 25 | # WASM-specific dependencies 26 | wasm-bindgen = { version = "0.2.57", optional = true } 27 | wasm-bindgen-futures = { version = "0.4.7", optional = true } 28 | browser-utils = { package = "substrate-browser-utils", git = 'https://github.com/paritytech/substrate.git', optional = true, version = "0.8.0-alpha.5"} 29 | 30 | node-executor = { path = "../executor" } 31 | node-rpc = { path = "../rpc" } 32 | 33 | node-transaction-factory = { optional = true, path = "../transaction-factory" } 34 | node-inspect = { optional = true, path = "../inspect" } 35 | frame-benchmarking-cli = {version = '2.0.0-alpha.5', git = 'https://github.com/paritytech/substrate.git', optional = true} 36 | akropolisos-runtime = { version = '0.5.0', path = '../runtime' } 37 | 38 | sp-blockchain = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 39 | 40 | [dependencies.ctrlc] 41 | features = ['termination'] 42 | version = '3.1.3' 43 | 44 | [dependencies.futures01] 45 | package = 'futures' 46 | version = '0.1.29' 47 | 48 | [dependencies.sp-authority-discovery] 49 | git = 'https://github.com/paritytech/substrate.git' 50 | version = '2.0.0-alpha.5' 51 | 52 | [dependencies.sp-consensus-babe] 53 | git = 'https://github.com/paritytech/substrate.git' 54 | version = '0.8.0-alpha.5' 55 | 56 | [dependencies.grandpa-primitives] 57 | git = 'https://github.com/paritytech/substrate.git' 58 | package = 'sp-finality-grandpa' 59 | version = '2.0.0-alpha.5' 60 | 61 | [dependencies.sp-core] 62 | git = 'https://github.com/paritytech/substrate.git' 63 | version = '2.0.0-alpha.5' 64 | 65 | [dependencies.sp-runtime] 66 | git = 'https://github.com/paritytech/substrate.git' 67 | version = '2.0.0-alpha.5' 68 | 69 | [dependencies.sp-transaction-pool] 70 | git = 'https://github.com/paritytech/substrate.git' 71 | version = '2.0.0-alpha.5' 72 | 73 | [dependencies.sp-timestamp] 74 | git = 'https://github.com/paritytech/substrate.git' 75 | version = '2.0.0-alpha.5' 76 | 77 | [dependencies.sp-finality-tracker] 78 | git = 'https://github.com/paritytech/substrate.git' 79 | version = '2.0.0-alpha.5' 80 | 81 | [dependencies.sp-inherents] 82 | git = 'https://github.com/paritytech/substrate.git' 83 | version = '2.0.0-alpha.5' 84 | 85 | [dependencies.sp-keyring] 86 | git = 'https://github.com/paritytech/substrate.git' 87 | version = '2.0.0-alpha.5' 88 | 89 | [dependencies.sp-io] 90 | git = 'https://github.com/paritytech/substrate.git' 91 | version = '2.0.0-alpha.5' 92 | 93 | [dependencies.sp-consensus] 94 | git = 'https://github.com/paritytech/substrate.git' 95 | version = '0.8.0-alpha.5' 96 | 97 | [dependencies.grandpa] 98 | git = 'https://github.com/paritytech/substrate.git' 99 | package = 'sc-finality-grandpa' 100 | version = '0.8.0-alpha.5' 101 | 102 | [dependencies.sc-authority-discovery] 103 | git = 'https://github.com/paritytech/substrate.git' 104 | version = '0.8.0-alpha.5' 105 | 106 | [dependencies.sc-basic-authorship] 107 | git = 'https://github.com/paritytech/substrate.git' 108 | version = '0.8.0-alpha.5' 109 | 110 | [dependencies.sc-consensus-babe] 111 | git = 'https://github.com/paritytech/substrate.git' 112 | version = '0.8.0-alpha.5' 113 | 114 | [dependencies.sc-client-db] 115 | default-features = false 116 | git = 'https://github.com/paritytech/substrate.git' 117 | version = '0.8.0-alpha.5' 118 | 119 | [dependencies.sc-client-api] 120 | default-features = false 121 | git = 'https://github.com/paritytech/substrate.git' 122 | version = '2.0.0-alpha.5' 123 | 124 | [dependencies.sc-offchain] 125 | git = 'https://github.com/paritytech/substrate.git' 126 | version = '2.0.0-alpha.5' 127 | 128 | [dependencies.sc-rpc] 129 | git = 'https://github.com/paritytech/substrate.git' 130 | version = '2.0.0-alpha.5' 131 | 132 | [dependencies.sc-cli] 133 | git = 'https://github.com/paritytech/substrate.git' 134 | version = '0.8.0-alpha.5' 135 | 136 | [dependencies.sc-client] 137 | git = 'https://github.com/paritytech/substrate.git' 138 | version = '0.8.0-alpha.5' 139 | 140 | [dependencies.sc-chain-spec] 141 | git = 'https://github.com/paritytech/substrate.git' 142 | version = '2.0.0-alpha.5' 143 | 144 | [dependencies.sc-executor] 145 | git = 'https://github.com/paritytech/substrate.git' 146 | version = '0.8.0-alpha.5' 147 | 148 | [dependencies.sc-network] 149 | git = 'https://github.com/paritytech/substrate.git' 150 | version = '0.8.0-alpha.5' 151 | 152 | [dependencies.sc-service] 153 | git = 'https://github.com/paritytech/substrate.git' 154 | version = '0.8.0-alpha.5' 155 | 156 | [dependencies.sc-tracing] 157 | git = 'https://github.com/paritytech/substrate.git' 158 | version = '2.0.0-alpha.5' 159 | 160 | [dependencies.sc-transaction-pool] 161 | git = 'https://github.com/paritytech/substrate.git' 162 | version = '2.0.0-alpha.5' 163 | 164 | [dependencies.telemetry] 165 | git = 'https://github.com/paritytech/substrate.git' 166 | package = 'sc-telemetry' 167 | version = '2.0.0-alpha.5' 168 | 169 | [dependencies.pallet-indices] 170 | default-features = false 171 | git = 'https://github.com/paritytech/substrate.git' 172 | version = '2.0.0-alpha.5' 173 | 174 | [dependencies.pallet-contracts] 175 | default-features = false 176 | git = 'https://github.com/paritytech/substrate.git' 177 | version = '2.0.0-alpha.5' 178 | 179 | [dependencies.pallet-transaction-payment] 180 | default-features = false 181 | git = 'https://github.com/paritytech/substrate.git' 182 | version = '2.0.0-alpha.5' 183 | 184 | [dependencies.pallet-im-online] 185 | default-features = false 186 | git = 'https://github.com/paritytech/substrate.git' 187 | version = '2.0.0-alpha.5' 188 | 189 | [dependencies.system] 190 | default-features = false 191 | git = 'https://github.com/paritytech/substrate.git' 192 | package = 'frame-system' 193 | version = '2.0.0-alpha.5' 194 | 195 | [dev-dependencies] 196 | sc-keystore = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git'} 197 | sc-consensus-babe = { version = "0.8.0-alpha.5", features = ["test-helpers"], git = 'https://github.com/paritytech/substrate.git' } 198 | sc-consensus-epochs = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 199 | sc-service-test = { version = "2.0.0-dev", git = 'https://github.com/paritytech/substrate.git' } 200 | futures = "0.3.1" 201 | tempfile = "3.1.0" 202 | assert_cmd = "0.12" 203 | nix = "0.17" 204 | serde_json = "1.0" 205 | 206 | [build-dependencies] 207 | chrono = '0.4.10' 208 | vergen = {version = '3.0.4', optional = true} 209 | structopt = { version = "0.3.8", optional = true } 210 | sc-cli = { version = "0.8.0-alpha.5", optional = true, git = 'https://github.com/paritytech/substrate.git'} 211 | node-transaction-factory = { optional = true, path = "../transaction-factory" } 212 | node-inspect = { optional = true, path = "../inspect" } 213 | frame-benchmarking-cli = {version = '2.0.0-alpha.5', git = 'https://github.com/paritytech/substrate.git', optional = true} 214 | 215 | 216 | [build-dependencies.build-script-utils] 217 | git = 'https://github.com/paritytech/substrate.git' 218 | package = 'substrate-build-script-utils' 219 | version = '2.0.0-alpha.5' 220 | 221 | [lib] 222 | crate-type = ["cdylib", "rlib"] 223 | 224 | [features] 225 | default = ["cli", "wasmtime"] 226 | browser = [ 227 | "browser-utils", 228 | "wasm-bindgen", 229 | "wasm-bindgen-futures", 230 | ] 231 | cli = [ 232 | "node-executor/wasmi-errno", 233 | "node-inspect", 234 | "node-transaction-factory", 235 | "sc-cli", 236 | "frame-benchmarking-cli", 237 | "sc-service/rocksdb", 238 | "structopt", 239 | "vergen", 240 | ] 241 | wasmtime = [ 242 | "cli", 243 | "node-executor/wasmtime", 244 | "sc-cli/wasmtime", 245 | "sc-service/wasmtime", 246 | ] 247 | runtime-benchmarks = [ "akropolisos-runtime/runtime-benchmarks" ] -------------------------------------------------------------------------------- /cli/build.rs: -------------------------------------------------------------------------------- 1 | 2 | fn main() { 3 | #[cfg(feature = "cli")] 4 | cli::main(); 5 | } 6 | 7 | #[cfg(feature = "cli")] 8 | mod cli { 9 | include!("src/cli.rs"); 10 | 11 | use std::{fs, env, path::Path}; 12 | use sc_cli::{structopt::clap::Shell}; 13 | use vergen::{ConstantsFlags, generate_cargo_keys}; 14 | 15 | pub fn main() { 16 | build_shell_completion(); 17 | generate_cargo_keys(ConstantsFlags::all()).expect("Failed to generate metadata files"); 18 | 19 | build_script_utils::rerun_if_git_head_changed(); 20 | } 21 | 22 | /// Build shell completion scripts for all known shells 23 | /// Full list in https://github.com/kbknapp/clap-rs/blob/e9d0562a1dc5dfe731ed7c767e6cee0af08f0cf9/src/app/parser.rs#L123 24 | fn build_shell_completion() { 25 | for shell in &[Shell::Bash, Shell::Fish, Shell::Zsh, Shell::Elvish, Shell::PowerShell] { 26 | build_completion(shell); 27 | } 28 | } 29 | 30 | /// Build the shell auto-completion for a given Shell 31 | fn build_completion(shell: &Shell) { 32 | let outdir = match env::var_os("OUT_DIR") { 33 | None => return, 34 | Some(dir) => dir, 35 | }; 36 | let path = Path::new(&outdir) 37 | .parent().unwrap() 38 | .parent().unwrap() 39 | .parent().unwrap() 40 | .join("completion-scripts"); 41 | 42 | fs::create_dir(&path).ok(); 43 | 44 | Cli::clap().gen_completions("akropolisos-node", *shell, &path); 45 | } 46 | } -------------------------------------------------------------------------------- /cli/src/browser.rs: -------------------------------------------------------------------------------- 1 | 2 | use crate::chain_spec::ChainSpec; 3 | use log::info; 4 | use wasm_bindgen::prelude::*; 5 | use sc_service::Configuration; 6 | use browser_utils::{ 7 | Client, 8 | browser_configuration, set_console_error_panic_hook, init_console_log, 9 | }; 10 | use std::str::FromStr; 11 | 12 | /// Starts the client. 13 | #[wasm_bindgen] 14 | pub async fn start_client(chain_spec: String, log_level: String) -> Result { 15 | start_inner(chain_spec, log_level) 16 | .await 17 | .map_err(|err| JsValue::from_str(&err.to_string())) 18 | } 19 | 20 | async fn start_inner(chain_spec: String, log_level: String) -> Result> { 21 | set_console_error_panic_hook(); 22 | init_console_log(log::Level::from_str(&log_level)?)?; 23 | let chain_spec = ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec()) 24 | .map_err(|e| format!("{:?}", e))?; 25 | 26 | let config = browser_configuration(chain_spec).await?; 27 | 28 | info!("Akropolis OS browser node"); 29 | info!(" version {}", config.full_version()); 30 | info!(" by Akropolis Decentralized LTD, 2017-2020"); 31 | info!("Chain specification: {}", config.expect_chain_spec().name()); 32 | info!("Node name: {}", config.name); 33 | info!("Roles: {:?}", config.roles); 34 | 35 | // Create the service. This is the most heavy initialization step. 36 | let service = crate::service::new_light(config) 37 | .map_err(|e| format!("{:?}", e))?; 38 | 39 | Ok(browser_utils::start_client(service)) 40 | } -------------------------------------------------------------------------------- /cli/src/cli.rs: -------------------------------------------------------------------------------- 1 | use sc_cli::{SharedParams, ImportParams, RunCmd}; 2 | use structopt::StructOpt; 3 | 4 | /// An overarching CLI command definition. 5 | #[derive(Clone, Debug, StructOpt)] 6 | pub struct Cli { 7 | /// Possible subcommand with parameters. 8 | #[structopt(subcommand)] 9 | pub subcommand: Option, 10 | #[allow(missing_docs)] 11 | #[structopt(flatten)] 12 | pub run: RunCmd, 13 | } 14 | 15 | /// Possible subcommands of the main binary. 16 | #[derive(Clone, Debug, StructOpt)] 17 | pub enum Subcommand { 18 | /// A set of base subcommands handled by `sc_cli`. 19 | #[structopt(flatten)] 20 | Base(sc_cli::Subcommand), 21 | /// The custom factory subcommmand for manufacturing transactions. 22 | #[structopt( 23 | name = "factory", 24 | about = "Manufactures num transactions from Alice to random accounts. \ 25 | Only supported for development or local testnet." 26 | )] 27 | Factory(FactoryCmd), 28 | 29 | /// The custom inspect subcommmand for decoding blocks and extrinsics. 30 | #[structopt( 31 | name = "inspect", 32 | about = "Decode given block or extrinsic using current native runtime." 33 | )] 34 | Inspect(node_inspect::cli::InspectCmd), 35 | 36 | /// The custom benchmark subcommmand benchmarking runtime pallets. 37 | #[structopt( 38 | name = "benchmark", 39 | about = "Benchmark runtime pallets." 40 | )] 41 | Benchmark(frame_benchmarking_cli::BenchmarkCmd), 42 | } 43 | 44 | /// The `factory` command used to generate transactions. 45 | /// Please note: this command currently only works on an empty database! 46 | #[derive(Debug, StructOpt, Clone)] 47 | pub struct FactoryCmd { 48 | /// Number of blocks to generate. 49 | #[structopt(long="blocks", default_value = "1")] 50 | pub blocks: u32, 51 | 52 | /// Number of transactions to push per block. 53 | #[structopt(long="transactions", default_value = "8")] 54 | pub transactions: u32, 55 | 56 | #[allow(missing_docs)] 57 | #[structopt(flatten)] 58 | pub shared_params: SharedParams, 59 | 60 | #[allow(missing_docs)] 61 | #[structopt(flatten)] 62 | pub import_params: ImportParams, 63 | } -------------------------------------------------------------------------------- /cli/src/command.rs: -------------------------------------------------------------------------------- 1 | use sc_cli::VersionInfo; 2 | use sc_service::{Roles as ServiceRoles}; 3 | use node_transaction_factory::RuntimeAdapter; 4 | use crate::{Cli, service, ChainSpec, load_spec, Subcommand, factory_impl::FactoryState}; 5 | 6 | /// Parse command line arguments into service configuration. 7 | pub fn run(args: I, version: VersionInfo) -> sc_cli::Result<()> 8 | where 9 | I: Iterator, 10 | T: Into + Clone, 11 | { 12 | let args: Vec<_> = args.collect(); 13 | let opt = sc_cli::from_iter::(args.clone(), &version); 14 | 15 | let mut config = sc_service::Configuration::from_version(&version); 16 | 17 | match opt.subcommand { 18 | None => { 19 | opt.run.init(&version)?; 20 | opt.run.update_config(&mut config, load_spec, &version)?; 21 | opt.run.run( 22 | config, 23 | service::new_light, 24 | service::new_full, 25 | &version, 26 | ) 27 | }, 28 | Some(Subcommand::Inspect(cmd)) => { 29 | cmd.init(&version)?; 30 | cmd.update_config(&mut config, load_spec, &version)?; 31 | 32 | let client = sc_service::new_full_client::< 33 | akropolisos_runtime::Block, akropolisos_runtime::RuntimeApi, node_executor::Executor, 34 | >(&config)?; 35 | let inspect = node_inspect::Inspector::::new(client); 36 | 37 | cmd.run(inspect) 38 | }, 39 | Some(Subcommand::Benchmark(cmd)) => { 40 | cmd.init(&version)?; 41 | cmd.update_config(&mut config, load_spec, &version)?; 42 | 43 | cmd.run::(config) 44 | }, 45 | Some(Subcommand::Factory(cli_args)) => { 46 | cli_args.shared_params.init(&version)?; 47 | cli_args.shared_params.update_config(&mut config, load_spec, &version)?; 48 | cli_args.import_params.update_config( 49 | &mut config, 50 | ServiceRoles::FULL, 51 | cli_args.shared_params.dev, 52 | )?; 53 | 54 | config.use_in_memory_keystore()?; 55 | 56 | match ChainSpec::from(config.expect_chain_spec().id()) { 57 | Some(ref c) if c == &ChainSpec::Development || c == &ChainSpec::LocalTestnet => {}, 58 | _ => return Err( 59 | "Factory is only supported for development and local testnet.".into() 60 | ), 61 | } 62 | 63 | // Setup tracing. 64 | if let Some(tracing_targets) = cli_args.import_params.tracing_targets.as_ref() { 65 | let subscriber = sc_tracing::ProfilingSubscriber::new( 66 | cli_args.import_params.tracing_receiver.into(), tracing_targets 67 | ); 68 | if let Err(e) = tracing::subscriber::set_global_default(subscriber) { 69 | return Err( 70 | format!("Unable to set global default subscriber {}", e).into() 71 | ); 72 | } 73 | } 74 | 75 | let factory_state = FactoryState::new( 76 | cli_args.blocks, 77 | cli_args.transactions, 78 | ); 79 | 80 | let service_builder = new_full_start!(config).0; 81 | node_transaction_factory::factory( 82 | factory_state, 83 | service_builder.client(), 84 | service_builder.select_chain() 85 | .expect("The select_chain is always initialized by new_full_start!; QED") 86 | ).map_err(|e| format!("Error in transaction factory: {}", e))?; 87 | 88 | Ok(()) 89 | }, 90 | Some(Subcommand::Base(subcommand)) => { 91 | subcommand.init(&version)?; 92 | subcommand.update_config(&mut config, load_spec, &version)?; 93 | subcommand.run( 94 | config, 95 | |config: sc_service::Configuration| Ok(new_full_start!(config).0), 96 | ) 97 | }, 98 | } 99 | } -------------------------------------------------------------------------------- /cli/src/factory_impl.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of the transaction factory trait, which enables 2 | //! using the cli to manufacture transactions and distribute them 3 | //! to accounts. 4 | 5 | use rand::{Rng, SeedableRng}; 6 | use rand::rngs::StdRng; 7 | 8 | use codec::{Encode, Decode}; 9 | use sp_keyring::sr25519::Keyring; 10 | use akropolisos_runtime::{Signature, 11 | Call, CheckedExtrinsic, UncheckedExtrinsic, SignedExtra, BalancesCall, ExistentialDeposit, 12 | MinimumPeriod 13 | }; 14 | use sp_core::{sr25519, crypto::Pair}; 15 | use sp_runtime::{ 16 | generic::Era, traits::{Block as BlockT, Header as HeaderT, SignedExtension, Verify, IdentifyAccount} 17 | }; 18 | use node_transaction_factory::RuntimeAdapter; 19 | use sp_inherents::InherentData; 20 | use sp_timestamp; 21 | use sp_finality_tracker; 22 | 23 | type AccountPublic = ::Signer; 24 | 25 | pub struct FactoryState { 26 | blocks: u32, 27 | transactions: u32, 28 | block_number: N, 29 | index: u32, 30 | } 31 | 32 | type Number = <::Header as HeaderT>::Number; 33 | 34 | impl FactoryState { 35 | fn build_extra(index: akropolisos_runtime::Index, phase: u64) -> akropolisos_runtime::SignedExtra { 36 | ( 37 | system::CheckVersion::new(), 38 | system::CheckGenesis::new(), 39 | system::CheckEra::from(Era::mortal(256, phase)), 40 | system::CheckNonce::from(index), 41 | system::CheckWeight::new(), 42 | pallet_transaction_payment::ChargeTransactionPayment::from(0), 43 | Default::default(), 44 | ) 45 | } 46 | } 47 | 48 | impl RuntimeAdapter for FactoryState { 49 | type AccountId = akropolisos_runtime::AccountId; 50 | type Balance = akropolisos_runtime::Balance; 51 | type Block = akropolisos_runtime::Block; 52 | type Phase = sp_runtime::generic::Phase; 53 | type Secret = sr25519::Pair; 54 | type Index = akropolisos_runtime::Index; 55 | 56 | type Number = Number; 57 | 58 | fn new( 59 | blocks: u32, 60 | transactions: u32, 61 | ) -> FactoryState { 62 | FactoryState { 63 | blocks, 64 | transactions, 65 | block_number: 0, 66 | index: 0, 67 | } 68 | } 69 | 70 | fn block_number(&self) -> u32 { 71 | self.block_number 72 | } 73 | 74 | fn blocks(&self) -> u32 { 75 | self.blocks 76 | } 77 | 78 | fn transactions(&self) -> u32 { 79 | self.transactions 80 | } 81 | 82 | fn set_block_number(&mut self, value: u32) { 83 | self.block_number = value; 84 | } 85 | 86 | fn transfer_extrinsic( 87 | &mut self, 88 | sender: &Self::AccountId, 89 | key: &Self::Secret, 90 | destination: &Self::AccountId, 91 | amount: &Self::Balance, 92 | version: u32, 93 | genesis_hash: &::Hash, 94 | prior_block_hash: &::Hash, 95 | ) -> ::Extrinsic { 96 | let phase = self.block_number() as Self::Phase; 97 | let extra = Self::build_extra(self.index, phase); 98 | self.index += 1; 99 | 100 | sign::(CheckedExtrinsic { 101 | signed: Some((sender.clone(), extra)), 102 | function: Call::Balances( 103 | BalancesCall::transfer( 104 | pallet_indices::address::Address::Id(destination.clone().into()), 105 | (*amount).into() 106 | ) 107 | ) 108 | }, key, (version, genesis_hash.clone(), prior_block_hash.clone(), (), (), (), ())) 109 | } 110 | 111 | fn inherent_extrinsics(&self) -> InherentData { 112 | let timestamp = (self.block_number as u64 + 1) * MinimumPeriod::get(); 113 | 114 | let mut inherent = InherentData::new(); 115 | inherent.put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) 116 | .expect("Failed putting timestamp inherent"); 117 | inherent.put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &self.block_number) 118 | .expect("Failed putting finalized number inherent"); 119 | inherent 120 | } 121 | 122 | fn minimum_balance() -> Self::Balance { 123 | ExistentialDeposit::get() 124 | } 125 | 126 | fn master_account_id() -> Self::AccountId { 127 | Keyring::Alice.to_account_id() 128 | } 129 | 130 | fn master_account_secret() -> Self::Secret { 131 | Keyring::Alice.pair() 132 | } 133 | 134 | /// Generates a random `AccountId` from `seed`. 135 | fn gen_random_account_id(seed: u32) -> Self::AccountId { 136 | let pair: sr25519::Pair = sr25519::Pair::from_seed(&gen_seed_bytes(seed)); 137 | AccountPublic::from(pair.public()).into_account() 138 | } 139 | 140 | /// Generates a random `Secret` from `seed`. 141 | fn gen_random_account_secret(seed: u32) -> Self::Secret { 142 | let pair: sr25519::Pair = sr25519::Pair::from_seed(&gen_seed_bytes(seed)); 143 | pair 144 | } 145 | } 146 | 147 | fn gen_seed_bytes(seed: u32) -> [u8; 32] { 148 | let mut rng: StdRng = SeedableRng::seed_from_u64(seed as u64); 149 | 150 | let mut seed_bytes = [0u8; 32]; 151 | for i in 0..32 { 152 | seed_bytes[i] = rng.gen::(); 153 | } 154 | seed_bytes 155 | } 156 | 157 | /// Creates an `UncheckedExtrinsic` containing the appropriate signature for 158 | /// a `CheckedExtrinsics`. 159 | fn sign( 160 | xt: CheckedExtrinsic, 161 | key: &sr25519::Pair, 162 | additional_signed: ::AdditionalSigned, 163 | ) -> ::Extrinsic { 164 | let s = match xt.signed { 165 | Some((signed, extra)) => { 166 | let payload = (xt.function, extra.clone(), additional_signed); 167 | let signature = payload.using_encoded(|b| { 168 | if b.len() > 256 { 169 | key.sign(&sp_io::hashing::blake2_256(b)) 170 | } else { 171 | key.sign(b) 172 | } 173 | }).into(); 174 | UncheckedExtrinsic { 175 | signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), 176 | function: payload.0, 177 | } 178 | } 179 | None => UncheckedExtrinsic { 180 | signature: None, 181 | function: xt.function, 182 | }, 183 | }; 184 | 185 | let e = Encode::encode(&s); 186 | Decode::decode(&mut &e[..]).expect("Failed to decode signed unchecked extrinsic") 187 | } -------------------------------------------------------------------------------- /cli/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Substrate CLI library. 2 | //! 3 | //! This package has two Cargo features: 4 | //! 5 | //! - `cli` (default): exposes functions that parse command-line options, then start and run the 6 | //! node as a CLI application. 7 | //! 8 | //! - `browser`: exposes the content of the `browser` module, which consists of exported symbols 9 | //! that are meant to be passed through the `wasm-bindgen` utility and called from JavaScript. 10 | //! Despite its name the produced WASM can theoretically also be used from NodeJS, although this 11 | //! hasn't been tested. 12 | 13 | pub mod chain_spec; 14 | 15 | #[macro_use] 16 | pub mod service; 17 | #[cfg(feature = "browser")] 18 | mod browser; 19 | #[cfg(feature = "cli")] 20 | mod cli; 21 | #[cfg(feature = "cli")] 22 | mod command; 23 | #[cfg(feature = "cli")] 24 | mod factory_impl; 25 | 26 | #[cfg(feature = "browser")] 27 | pub use browser::*; 28 | #[cfg(feature = "cli")] 29 | pub use cli::*; 30 | #[cfg(feature = "cli")] 31 | pub use command::*; 32 | 33 | /// The chain specification option. 34 | #[derive(Clone, Debug, PartialEq)] 35 | pub enum ChainSpec { 36 | /// Whatever the current runtime is, with just Alice as an auth. 37 | Development, 38 | /// Whatever the current runtime is, with simple Alice/Bob auths. 39 | LocalTestnet, 40 | /// Whatever the current runtime is with the "global testnet" defaults. 41 | AkropolisOSStaging, 42 | /// Syracuse testnet 43 | AkropolisOSSyracuse, 44 | /// Akropolis OS Mainnet 45 | AkropolisOS, 46 | } 47 | 48 | /// Get a chain config from a spec setting. 49 | impl ChainSpec { 50 | pub(crate) fn load(self) -> Result { 51 | Ok(match self { 52 | ChainSpec::Development => chain_spec::development_config(), 53 | ChainSpec::LocalTestnet => chain_spec::local_testnet_config(), 54 | ChainSpec::AkropolisOSSyracuse => chain_spec::syracuse_testnet_config()?, 55 | ChainSpec::AkropolisOSStaging => chain_spec::staging_testnet_config(), 56 | ChainSpec::AkropolisOS => chain_spec::akropolisos_config()?, 57 | }) 58 | } 59 | 60 | pub(crate) fn from(s: &str) -> Option { 61 | match s { 62 | "dev" => Some(ChainSpec::Development), 63 | "local" => Some(ChainSpec::LocalTestnet), 64 | "syracuse" => Some(ChainSpec::AkropolisOSSyracuse), 65 | "" | "akro" | "akropolisos" => Some(ChainSpec::AkropolisOS), 66 | "staging" => Some(ChainSpec::AkropolisOSStaging), 67 | _ => None, 68 | } 69 | } 70 | } 71 | 72 | fn load_spec(id: &str) -> Result, String> { 73 | Ok(match ChainSpec::from(id) { 74 | Some(spec) => Box::new(spec.load()?), 75 | None => Box::new(chain_spec::ChainSpec::from_json_file( 76 | std::path::PathBuf::from(id), 77 | )?), 78 | }) 79 | } 80 | 81 | pub fn run_cli() -> sc_cli::Result<()> { 82 | use std::env; 83 | let version = sc_cli::VersionInfo { 84 | name: "AkropolisOS", 85 | commit: env!("VERGEN_SHA_SHORT"), 86 | version: env!("CARGO_PKG_VERSION"), 87 | executable_name: "akropolisos-node", 88 | author: "Akropolis", 89 | description: "Akropolis OS Node", 90 | support_url: "admin@akropolis.io", 91 | copyright_start_year: 2019, 92 | }; 93 | 94 | crate::run(env::args(), version) 95 | } 96 | -------------------------------------------------------------------------------- /executor/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "node-executor" 3 | version = "0.3.0" 4 | authors = ['Akropolis '] 5 | description = "Substrate node implementation in Rust." 6 | edition = "2018" 7 | license = "MIT" 8 | 9 | [dependencies] 10 | codec = { package = "parity-scale-codec", version = "1.2.0" } 11 | akropolisos-runtime = { path = "../runtime" } 12 | sc-executor = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 13 | sp-core = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 14 | sp-io = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 15 | sp-state-machine = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 16 | sp-trie = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 17 | trie-root = "0.16.0" 18 | frame-benchmarking = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 19 | 20 | [dev-dependencies] 21 | criterion = "0.3.0" 22 | node-testing = { path = "../testing" } 23 | frame-support = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 24 | frame-system = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 25 | pallet-balances = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 26 | pallet-contracts = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 27 | pallet-grandpa = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 28 | pallet-im-online = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 29 | pallet-indices = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 30 | pallet-session = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 31 | pallet-timestamp = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 32 | pallet-transaction-payment = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 33 | pallet-treasury = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 34 | sp-application-crypto = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 35 | sp-runtime = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 36 | sp-externalities = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 37 | substrate-test-client = { version = "2.0.0-dev", git = 'https://github.com/paritytech/substrate.git' } 38 | wabt = "0.9.2" 39 | 40 | [features] 41 | wasmtime = [ 42 | "sc-executor/wasmtime", 43 | ] 44 | wasmi-errno = [ 45 | "sc-executor/wasmi-errno", 46 | ] 47 | stress-test = [] 48 | 49 | [[bench]] 50 | name = "bench" 51 | harness = false 52 | -------------------------------------------------------------------------------- /executor/benches/bench.rs: -------------------------------------------------------------------------------- 1 | use codec::{Decode, Encode}; 2 | use criterion::{BatchSize, Criterion, criterion_group, criterion_main}; 3 | use node_executor::Executor; 4 | use node_primitives::{BlockNumber, Hash}; 5 | use node_runtime::{ 6 | Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, UncheckedExtrinsic, 7 | }; 8 | use node_runtime::constants::currency::*; 9 | use node_testing::keyring::*; 10 | use sp_core::{NativeOrEncoded, NeverNativeValue}; 11 | use sp_core::storage::well_known_keys; 12 | use sp_core::traits::{CodeExecutor, RuntimeCode}; 13 | use frame_support::Hashable; 14 | use sp_state_machine::TestExternalities as CoreTestExternalities; 15 | use sc_executor::{NativeExecutor, RuntimeInfo, WasmExecutionMethod, Externalities}; 16 | use sp_runtime::traits::BlakeTwo256; 17 | 18 | criterion_group!(benches, bench_execute_block); 19 | criterion_main!(benches); 20 | 21 | /// The wasm runtime code. 22 | const COMPACT_CODE: &[u8] = node_runtime::WASM_BINARY; 23 | 24 | const GENESIS_HASH: [u8; 32] = [69u8; 32]; 25 | 26 | const VERSION: u32 = node_runtime::VERSION.spec_version; 27 | 28 | const HEAP_PAGES: u64 = 20; 29 | 30 | type TestExternalities = CoreTestExternalities; 31 | 32 | #[derive(Debug)] 33 | enum ExecutionMethod { 34 | Native, 35 | Wasm(WasmExecutionMethod), 36 | } 37 | 38 | fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { 39 | node_testing::keyring::sign(xt, VERSION, GENESIS_HASH) 40 | } 41 | 42 | fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities { 43 | let mut test_ext = TestExternalities::new_with_code( 44 | COMPACT_CODE, 45 | genesis_config.build_storage().unwrap(), 46 | ); 47 | test_ext.ext().place_storage(well_known_keys::HEAP_PAGES.to_vec(), Some(HEAP_PAGES.encode())); 48 | test_ext 49 | } 50 | 51 | fn construct_block( 52 | executor: &NativeExecutor, 53 | ext: &mut E, 54 | number: BlockNumber, 55 | parent_hash: Hash, 56 | extrinsics: Vec, 57 | ) -> (Vec, Hash) { 58 | use sp_trie::{TrieConfiguration, trie_types::Layout}; 59 | 60 | // sign extrinsics. 61 | let extrinsics = extrinsics.into_iter().map(sign).collect::>(); 62 | 63 | // calculate the header fields that we can. 64 | let extrinsics_root = Layout::::ordered_trie_root( 65 | extrinsics.iter().map(Encode::encode) 66 | ).to_fixed_bytes() 67 | .into(); 68 | 69 | let header = Header { 70 | parent_hash, 71 | number, 72 | extrinsics_root, 73 | state_root: Default::default(), 74 | digest: Default::default(), 75 | }; 76 | 77 | let runtime_code = RuntimeCode { 78 | code_fetcher: &sp_core::traits::WrappedRuntimeCode(COMPACT_CODE.into()), 79 | hash: vec![1, 2, 3], 80 | heap_pages: None, 81 | }; 82 | 83 | // execute the block to get the real header. 84 | executor.call:: _>( 85 | ext, 86 | &runtime_code, 87 | "Core_initialize_block", 88 | &header.encode(), 89 | true, 90 | None, 91 | ).0.unwrap(); 92 | 93 | for i in extrinsics.iter() { 94 | executor.call:: _>( 95 | ext, 96 | &runtime_code, 97 | "BlockBuilder_apply_extrinsic", 98 | &i.encode(), 99 | true, 100 | None, 101 | ).0.unwrap(); 102 | } 103 | 104 | let header = match executor.call:: _>( 105 | ext, 106 | &runtime_code, 107 | "BlockBuilder_finalize_block", 108 | &[0u8;0], 109 | true, 110 | None, 111 | ).0.unwrap() { 112 | NativeOrEncoded::Native(_) => unreachable!(), 113 | NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), 114 | }; 115 | 116 | let hash = header.blake2_256(); 117 | (Block { header, extrinsics }.encode(), hash.into()) 118 | } 119 | 120 | 121 | fn test_blocks(genesis_config: &GenesisConfig, executor: &NativeExecutor) 122 | -> Vec<(Vec, Hash)> 123 | { 124 | let mut test_ext = new_test_ext(genesis_config); 125 | let mut block1_extrinsics = vec![ 126 | CheckedExtrinsic { 127 | signed: None, 128 | function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), 129 | }, 130 | ]; 131 | block1_extrinsics.extend((0..20).map(|i| { 132 | CheckedExtrinsic { 133 | signed: Some((alice(), signed_extra(i, 0))), 134 | function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 1 * DOLLARS)), 135 | } 136 | })); 137 | let block1 = construct_block( 138 | executor, 139 | &mut test_ext.ext(), 140 | 1, 141 | GENESIS_HASH.into(), 142 | block1_extrinsics, 143 | ); 144 | 145 | vec![block1] 146 | } 147 | 148 | fn bench_execute_block(c: &mut Criterion) { 149 | c.bench_function_over_inputs( 150 | "execute blocks", 151 | |b, strategy| { 152 | let genesis_config = node_testing::genesis::config(false, Some(COMPACT_CODE)); 153 | let (use_native, wasm_method) = match strategy { 154 | ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), 155 | ExecutionMethod::Wasm(wasm_method) => (false, *wasm_method), 156 | }; 157 | 158 | let executor = NativeExecutor::new(wasm_method, None, 8); 159 | let runtime_code = RuntimeCode { 160 | code_fetcher: &sp_core::traits::WrappedRuntimeCode(COMPACT_CODE.into()), 161 | hash: vec![1, 2, 3], 162 | heap_pages: None, 163 | }; 164 | 165 | // Get the runtime version to initialize the runtimes cache. 166 | { 167 | let mut test_ext = new_test_ext(&genesis_config); 168 | executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); 169 | } 170 | 171 | let blocks = test_blocks(&genesis_config, &executor); 172 | 173 | b.iter_batched_ref( 174 | || new_test_ext(&genesis_config), 175 | |test_ext| { 176 | for block in blocks.iter() { 177 | executor.call:: _>( 178 | &mut test_ext.ext(), 179 | &runtime_code, 180 | "Core_execute_block", 181 | &block.0, 182 | use_native, 183 | None, 184 | ).0.unwrap(); 185 | } 186 | }, 187 | BatchSize::LargeInput, 188 | ); 189 | }, 190 | vec![ 191 | ExecutionMethod::Native, 192 | ExecutionMethod::Wasm(WasmExecutionMethod::Interpreted), 193 | #[cfg(feature = "wasmtime")] 194 | ExecutionMethod::Wasm(WasmExecutionMethod::Compiled), 195 | ], 196 | ); 197 | } 198 | -------------------------------------------------------------------------------- /executor/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A `CodeExecutor` specialization which uses natively compiled runtime when the wasm to be 2 | //! executed is equivalent to the natively compiled code. 3 | 4 | pub use sc_executor::NativeExecutor; 5 | use sc_executor::native_executor_instance; 6 | 7 | // Declare an instance of the native executor named `Executor`. Include the wasm binary as the 8 | // equivalent wasm code. 9 | native_executor_instance!( 10 | pub Executor, 11 | akropolisos_runtime::api::dispatch, 12 | akropolisos_runtime::native_version, 13 | frame_benchmarking::benchmarking::HostFunctions, 14 | ); 15 | -------------------------------------------------------------------------------- /executor/tests/common.rs: -------------------------------------------------------------------------------- 1 | use codec::{Encode, Decode}; 2 | use frame_support::Hashable; 3 | use sp_state_machine::TestExternalities as CoreTestExternalities; 4 | use sp_core::{NeverNativeValue, NativeOrEncoded, traits::{CodeExecutor, RuntimeCode}}; 5 | use sp_runtime::{ApplyExtrinsicResult, traits::{Header as HeaderT, BlakeTwo256}}; 6 | use sc_executor::{NativeExecutor, WasmExecutionMethod}; 7 | use sc_executor::error::Result; 8 | 9 | use node_executor::Executor; 10 | use node_runtime::{ 11 | Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Runtime, BuildStorage, 12 | constants::currency::*, 13 | }; 14 | use node_primitives::{Hash, BlockNumber}; 15 | use node_testing::keyring::*; 16 | use sp_externalities::Externalities; 17 | 18 | /// The wasm runtime code. 19 | /// 20 | /// `compact` since it is after post-processing with wasm-gc which performs tree-shaking thus 21 | /// making the binary slimmer. There is a convention to use compact version of the runtime 22 | /// as canonical. This is why `native_executor_instance` also uses the compact version of the 23 | /// runtime. 24 | pub const COMPACT_CODE: &[u8] = node_runtime::WASM_BINARY; 25 | 26 | pub const GENESIS_HASH: [u8; 32] = [69u8; 32]; 27 | 28 | pub const VERSION: u32 = node_runtime::VERSION.spec_version; 29 | 30 | pub type TestExternalities = CoreTestExternalities; 31 | 32 | pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { 33 | node_testing::keyring::sign(xt, VERSION, GENESIS_HASH) 34 | } 35 | 36 | pub fn default_transfer_call() -> pallet_balances::Call { 37 | pallet_balances::Call::transfer::(bob().into(), 69 * DOLLARS) 38 | } 39 | 40 | pub fn from_block_number(n: u32) -> Header { 41 | Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default()) 42 | } 43 | 44 | pub fn executor() -> NativeExecutor { 45 | NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) 46 | } 47 | 48 | pub fn executor_call< 49 | R:Decode + Encode + PartialEq, 50 | NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe 51 | >( 52 | t: &mut TestExternalities, 53 | method: &str, 54 | data: &[u8], 55 | use_native: bool, 56 | native_call: Option, 57 | ) -> (Result>, bool) { 58 | let mut t = t.ext(); 59 | 60 | let code = t.storage(sp_core::storage::well_known_keys::CODE).unwrap(); 61 | let heap_pages = t.storage(sp_core::storage::well_known_keys::HEAP_PAGES); 62 | let runtime_code = RuntimeCode { 63 | code_fetcher: &sp_core::traits::WrappedRuntimeCode(code.as_slice().into()), 64 | hash: sp_core::blake2_256(&code).to_vec(), 65 | heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), 66 | }; 67 | 68 | executor().call::( 69 | &mut t, 70 | &runtime_code, 71 | method, 72 | data, 73 | use_native, 74 | native_call, 75 | ) 76 | } 77 | 78 | pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { 79 | let mut ext = TestExternalities::new_with_code( 80 | code, 81 | node_testing::genesis::config(support_changes_trie, Some(code)).build_storage().unwrap(), 82 | ); 83 | ext.changes_trie_storage().insert(0, GENESIS_HASH.into(), Default::default()); 84 | ext 85 | } 86 | 87 | /// Construct a fake block. 88 | /// 89 | /// `extrinsics` must be a list of valid extrinsics, i.e. none of the extrinsics for example 90 | /// can report `ExhaustResources`. Otherwise, this function panics. 91 | pub fn construct_block( 92 | env: &mut TestExternalities, 93 | number: BlockNumber, 94 | parent_hash: Hash, 95 | extrinsics: Vec, 96 | ) -> (Vec, Hash) { 97 | use sp_trie::{TrieConfiguration, trie_types::Layout}; 98 | 99 | // sign extrinsics. 100 | let extrinsics = extrinsics.into_iter().map(sign).collect::>(); 101 | 102 | // calculate the header fields that we can. 103 | let extrinsics_root = 104 | Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) 105 | .to_fixed_bytes() 106 | .into(); 107 | 108 | let header = Header { 109 | parent_hash, 110 | number, 111 | extrinsics_root, 112 | state_root: Default::default(), 113 | digest: Default::default(), 114 | }; 115 | 116 | // execute the block to get the real header. 117 | executor_call:: _>( 118 | env, 119 | "Core_initialize_block", 120 | &header.encode(), 121 | true, 122 | None, 123 | ).0.unwrap(); 124 | 125 | for extrinsic in extrinsics.iter() { 126 | // Try to apply the `extrinsic`. It should be valid, in the sense that it passes 127 | // all pre-inclusion checks. 128 | let r = executor_call:: _>( 129 | env, 130 | "BlockBuilder_apply_extrinsic", 131 | &extrinsic.encode(), 132 | true, 133 | None, 134 | ).0.expect("application of an extrinsic failed").into_encoded(); 135 | match ApplyExtrinsicResult::decode(&mut &r[..]).expect("apply result deserialization failed") { 136 | Ok(_) => {}, 137 | Err(e) => panic!("Applying extrinsic failed: {:?}", e), 138 | } 139 | } 140 | 141 | let header = match executor_call:: _>( 142 | env, 143 | "BlockBuilder_finalize_block", 144 | &[0u8;0], 145 | true, 146 | None, 147 | ).0.unwrap() { 148 | NativeOrEncoded::Native(_) => unreachable!(), 149 | NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), 150 | }; 151 | 152 | let hash = header.blake2_256(); 153 | (Block { header, extrinsics }.encode(), hash.into()) 154 | } 155 | -------------------------------------------------------------------------------- /executor/tests/fees.rs: -------------------------------------------------------------------------------- 1 | use codec::{Encode, Joiner}; 2 | use frame_support::{ 3 | StorageValue, StorageMap, 4 | traits::Currency, 5 | weights::GetDispatchInfo, 6 | }; 7 | use sp_core::{NeverNativeValue, map, storage::Storage}; 8 | use sp_runtime::{Fixed64, Perbill, traits::{Convert, BlakeTwo256}}; 9 | use node_runtime::{ 10 | CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, TransactionBaseFee, 11 | TransactionByteFee, WeightFeeCoefficient, 12 | constants::currency::*, 13 | }; 14 | use node_runtime::impls::LinearWeightToFee; 15 | use node_primitives::Balance; 16 | use node_testing::keyring::*; 17 | 18 | pub mod common; 19 | use self::common::{*, sign}; 20 | 21 | #[test] 22 | fn fee_multiplier_increases_and_decreases_on_big_weight() { 23 | let mut t = new_test_ext(COMPACT_CODE, false); 24 | 25 | // initial fee multiplier must be zero 26 | let mut prev_multiplier = Fixed64::from_parts(0); 27 | 28 | t.execute_with(|| { 29 | assert_eq!(TransactionPayment::next_fee_multiplier(), prev_multiplier); 30 | }); 31 | 32 | let mut tt = new_test_ext(COMPACT_CODE, false); 33 | 34 | // big one in terms of weight. 35 | let block1 = construct_block( 36 | &mut tt, 37 | 1, 38 | GENESIS_HASH.into(), 39 | vec![ 40 | CheckedExtrinsic { 41 | signed: None, 42 | function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), 43 | }, 44 | CheckedExtrinsic { 45 | signed: Some((charlie(), signed_extra(0, 0))), 46 | function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(90))), 47 | } 48 | ] 49 | ); 50 | 51 | // small one in terms of weight. 52 | let block2 = construct_block( 53 | &mut tt, 54 | 2, 55 | block1.1.clone(), 56 | vec![ 57 | CheckedExtrinsic { 58 | signed: None, 59 | function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), 60 | }, 61 | CheckedExtrinsic { 62 | signed: Some((charlie(), signed_extra(1, 0))), 63 | function: Call::System(frame_system::Call::remark(vec![0; 1])), 64 | } 65 | ] 66 | ); 67 | 68 | println!( 69 | "++ Block 1 size: {} / Block 2 size {}", 70 | block1.0.encode().len(), 71 | block2.0.encode().len(), 72 | ); 73 | 74 | // execute a big block. 75 | executor_call:: _>( 76 | &mut t, 77 | "Core_execute_block", 78 | &block1.0, 79 | true, 80 | None, 81 | ).0.unwrap(); 82 | 83 | // weight multiplier is increased for next block. 84 | t.execute_with(|| { 85 | let fm = TransactionPayment::next_fee_multiplier(); 86 | println!("After a big block: {:?} -> {:?}", prev_multiplier, fm); 87 | assert!(fm > prev_multiplier); 88 | prev_multiplier = fm; 89 | }); 90 | 91 | // execute a big block. 92 | executor_call:: _>( 93 | &mut t, 94 | "Core_execute_block", 95 | &block2.0, 96 | true, 97 | None, 98 | ).0.unwrap(); 99 | 100 | // weight multiplier is increased for next block. 101 | t.execute_with(|| { 102 | let fm = TransactionPayment::next_fee_multiplier(); 103 | println!("After a small block: {:?} -> {:?}", prev_multiplier, fm); 104 | assert!(fm < prev_multiplier); 105 | }); 106 | } 107 | 108 | #[test] 109 | fn transaction_fee_is_correct_ultimate() { 110 | // This uses the exact values of substrate-node. 111 | // 112 | // weight of transfer call as of now: 1_000_000 113 | // if weight of the cheapest weight would be 10^7, this would be 10^9, which is: 114 | // - 1 MILLICENTS in substrate node. 115 | // - 1 milli-dot based on current polkadot runtime. 116 | // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) 117 | let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { 118 | top: map![ 119 | >::hashed_key_for(alice()) => { 120 | (0u32, 0u8, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() 121 | }, 122 | >::hashed_key_for(bob()) => { 123 | (0u32, 0u8, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() 124 | }, 125 | >::hashed_key().to_vec() => { 126 | (110 * DOLLARS).encode() 127 | }, 128 | >::hashed_key_for(0) => vec![0u8; 32] 129 | ], 130 | children: map![], 131 | }); 132 | 133 | let tip = 1_000_000; 134 | let xt = sign(CheckedExtrinsic { 135 | signed: Some((alice(), signed_extra(0, tip))), 136 | function: Call::Balances(default_transfer_call()), 137 | }); 138 | 139 | let r = executor_call:: _>( 140 | &mut t, 141 | "Core_initialize_block", 142 | &vec![].and(&from_block_number(1u32)), 143 | true, 144 | None, 145 | ).0; 146 | 147 | assert!(r.is_ok()); 148 | let r = executor_call:: _>( 149 | &mut t, 150 | "BlockBuilder_apply_extrinsic", 151 | &vec![].and(&xt.clone()), 152 | true, 153 | None, 154 | ).0; 155 | assert!(r.is_ok()); 156 | 157 | t.execute_with(|| { 158 | assert_eq!(Balances::total_balance(&bob()), (10 + 69) * DOLLARS); 159 | // Components deducted from alice's balances: 160 | // - Weight fee 161 | // - Length fee 162 | // - Tip 163 | // - Creation-fee of bob's account. 164 | let mut balance_alice = (100 - 69) * DOLLARS; 165 | 166 | let length_fee = TransactionBaseFee::get() + 167 | TransactionByteFee::get() * 168 | (xt.clone().encode().len() as Balance); 169 | balance_alice -= length_fee; 170 | 171 | let weight = default_transfer_call().get_dispatch_info().weight; 172 | let weight_fee = LinearWeightToFee::::convert(weight); 173 | 174 | // we know that weight to fee multiplier is effect-less in block 1. 175 | assert_eq!(weight_fee as Balance, MILLICENTS); 176 | balance_alice -= weight_fee; 177 | balance_alice -= tip; 178 | 179 | assert_eq!(Balances::total_balance(&alice()), balance_alice); 180 | }); 181 | } 182 | 183 | #[test] 184 | #[should_panic] 185 | #[cfg(feature = "stress-test")] 186 | fn block_weight_capacity_report() { 187 | // Just report how many transfer calls you could fit into a block. The number should at least 188 | // be a few hundred (250 at the time of writing but can change over time). Runs until panic. 189 | use node_primitives::Index; 190 | 191 | // execution ext. 192 | let mut t = new_test_ext(COMPACT_CODE, false); 193 | // setup ext. 194 | let mut tt = new_test_ext(COMPACT_CODE, false); 195 | 196 | let factor = 50; 197 | let mut time = 10; 198 | let mut nonce: Index = 0; 199 | let mut block_number = 1; 200 | let mut previous_hash: Hash = GENESIS_HASH.into(); 201 | 202 | loop { 203 | let num_transfers = block_number * factor; 204 | let mut xts = (0..num_transfers).map(|i| CheckedExtrinsic { 205 | signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), 206 | function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)), 207 | }).collect::>(); 208 | 209 | xts.insert(0, CheckedExtrinsic { 210 | signed: None, 211 | function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), 212 | }); 213 | 214 | // NOTE: this is super slow. Can probably be improved. 215 | let block = construct_block( 216 | &mut tt, 217 | block_number, 218 | previous_hash, 219 | xts 220 | ); 221 | 222 | let len = block.0.len(); 223 | print!( 224 | "++ Executing block with {} transfers. Block size = {} bytes / {} kb / {} mb", 225 | num_transfers, 226 | len, 227 | len / 1024, 228 | len / 1024 / 1024, 229 | ); 230 | 231 | let r = executor_call:: _>( 232 | &mut t, 233 | "Core_execute_block", 234 | &block.0, 235 | true, 236 | None, 237 | ).0; 238 | 239 | println!(" || Result = {:?}", r); 240 | assert!(r.is_ok()); 241 | 242 | previous_hash = block.1; 243 | nonce += num_transfers; 244 | time += 10; 245 | block_number += 1; 246 | } 247 | } 248 | 249 | #[test] 250 | #[should_panic] 251 | #[cfg(feature = "stress-test")] 252 | fn block_length_capacity_report() { 253 | // Just report how big a block can get. Executes until panic. Should be ignored unless if 254 | // manually inspected. The number should at least be a few megabytes (5 at the time of 255 | // writing but can change over time). 256 | use node_primitives::Index; 257 | 258 | // execution ext. 259 | let mut t = new_test_ext(COMPACT_CODE, false); 260 | // setup ext. 261 | let mut tt = new_test_ext(COMPACT_CODE, false); 262 | 263 | let factor = 256 * 1024; 264 | let mut time = 10; 265 | let mut nonce: Index = 0; 266 | let mut block_number = 1; 267 | let mut previous_hash: Hash = GENESIS_HASH.into(); 268 | 269 | loop { 270 | // NOTE: this is super slow. Can probably be improved. 271 | let block = construct_block( 272 | &mut tt, 273 | block_number, 274 | previous_hash, 275 | vec![ 276 | CheckedExtrinsic { 277 | signed: None, 278 | function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), 279 | }, 280 | CheckedExtrinsic { 281 | signed: Some((charlie(), signed_extra(nonce, 0))), 282 | function: Call::System(frame_system::Call::remark(vec![0u8; (block_number * factor) as usize])), 283 | }, 284 | ] 285 | ); 286 | 287 | let len = block.0.len(); 288 | print!( 289 | "++ Executing block with big remark. Block size = {} bytes / {} kb / {} mb", 290 | len, 291 | len / 1024, 292 | len / 1024 / 1024, 293 | ); 294 | 295 | let r = executor_call:: _>( 296 | &mut t, 297 | "Core_execute_block", 298 | &block.0, 299 | true, 300 | None, 301 | ).0; 302 | 303 | println!(" || Result = {:?}", r); 304 | assert!(r.is_ok()); 305 | 306 | previous_hash = block.1; 307 | nonce += 1; 308 | time += 10; 309 | block_number += 1; 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /executor/tests/submit_transaction.rs: -------------------------------------------------------------------------------- 1 | use node_runtime::{ 2 | Call, Executive, Indices, Runtime, SubmitTransaction, UncheckedExtrinsic, 3 | }; 4 | use sp_application_crypto::AppKey; 5 | use sp_core::testing::KeyStore; 6 | use sp_core::traits::KeystoreExt; 7 | use sp_core::offchain::{ 8 | TransactionPoolExt, 9 | testing::TestTransactionPoolExt, 10 | }; 11 | use frame_system::offchain::{SubmitSignedTransaction, SubmitUnsignedTransaction}; 12 | use pallet_im_online::sr25519::AuthorityPair as Key; 13 | use codec::Decode; 14 | 15 | pub mod common; 16 | use self::common::*; 17 | 18 | #[test] 19 | fn should_submit_unsigned_transaction() { 20 | let mut t = new_test_ext(COMPACT_CODE, false); 21 | let (pool, state) = TestTransactionPoolExt::new(); 22 | t.register_extension(TransactionPoolExt::new(pool)); 23 | 24 | t.execute_with(|| { 25 | let signature = Default::default(); 26 | let heartbeat_data = pallet_im_online::Heartbeat { 27 | block_number: 1, 28 | network_state: Default::default(), 29 | session_index: 1, 30 | authority_index: 0, 31 | }; 32 | 33 | let call = pallet_im_online::Call::heartbeat(heartbeat_data, signature); 34 | > 35 | ::submit_unsigned(call) 36 | .unwrap(); 37 | 38 | assert_eq!(state.read().transactions.len(), 1) 39 | }); 40 | } 41 | 42 | const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; 43 | 44 | #[test] 45 | fn should_submit_signed_transaction() { 46 | let mut t = new_test_ext(COMPACT_CODE, false); 47 | let (pool, state) = TestTransactionPoolExt::new(); 48 | t.register_extension(TransactionPoolExt::new(pool)); 49 | 50 | let keystore = KeyStore::new(); 51 | keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))).unwrap(); 52 | keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter2", PHRASE))).unwrap(); 53 | keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter3", PHRASE))).unwrap(); 54 | t.register_extension(KeystoreExt(keystore)); 55 | 56 | t.execute_with(|| { 57 | let keys = > 58 | ::find_all_local_keys(); 59 | assert_eq!(keys.len(), 3, "Missing keys: {:?}", keys); 60 | 61 | let can_sign = > 62 | ::can_sign(); 63 | assert!(can_sign, "Since there are keys, `can_sign` should return true"); 64 | 65 | let call = pallet_balances::Call::transfer(Default::default(), Default::default()); 66 | let results = 67 | >::submit_signed(call); 68 | 69 | let len = results.len(); 70 | assert_eq!(len, 3); 71 | assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); 72 | assert_eq!(state.read().transactions.len(), len); 73 | }); 74 | } 75 | 76 | #[test] 77 | fn should_submit_signed_twice_from_the_same_account() { 78 | let mut t = new_test_ext(COMPACT_CODE, false); 79 | let (pool, state) = TestTransactionPoolExt::new(); 80 | t.register_extension(TransactionPoolExt::new(pool)); 81 | 82 | let keystore = KeyStore::new(); 83 | keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))).unwrap(); 84 | t.register_extension(KeystoreExt(keystore)); 85 | 86 | t.execute_with(|| { 87 | let call = pallet_balances::Call::transfer(Default::default(), Default::default()); 88 | let results = 89 | >::submit_signed(call); 90 | 91 | let len = results.len(); 92 | assert_eq!(len, 1); 93 | assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); 94 | assert_eq!(state.read().transactions.len(), 1); 95 | 96 | // submit another one from the same account. The nonce should be incremented. 97 | let call = pallet_balances::Call::transfer(Default::default(), Default::default()); 98 | let results = 99 | >::submit_signed(call); 100 | 101 | let len = results.len(); 102 | assert_eq!(len, 1); 103 | assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); 104 | assert_eq!(state.read().transactions.len(), 2); 105 | 106 | // now check that the transaction nonces are not equal 107 | let s = state.read(); 108 | fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { 109 | let extra = tx.signature.unwrap().2; 110 | extra.3 111 | } 112 | let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); 113 | let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); 114 | assert!( 115 | nonce1 != nonce2, 116 | "Transactions should have different nonces. Got: {:?}", nonce1 117 | ); 118 | }); 119 | } 120 | 121 | #[test] 122 | fn submitted_transaction_should_be_valid() { 123 | use codec::Encode; 124 | use frame_support::storage::StorageMap; 125 | use sp_runtime::transaction_validity::ValidTransaction; 126 | use sp_runtime::traits::StaticLookup; 127 | 128 | let mut t = new_test_ext(COMPACT_CODE, false); 129 | let (pool, state) = TestTransactionPoolExt::new(); 130 | t.register_extension(TransactionPoolExt::new(pool)); 131 | 132 | let keystore = KeyStore::new(); 133 | keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))).unwrap(); 134 | t.register_extension(KeystoreExt(keystore)); 135 | 136 | t.execute_with(|| { 137 | let call = pallet_balances::Call::transfer(Default::default(), Default::default()); 138 | let results = 139 | >::submit_signed(call); 140 | let len = results.len(); 141 | assert_eq!(len, 1); 142 | assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); 143 | }); 144 | 145 | // check that transaction is valid, but reset environment storage, 146 | // since CreateTransaction increments the nonce 147 | let tx0 = state.read().transactions[0].clone(); 148 | let mut t = new_test_ext(COMPACT_CODE, false); 149 | t.execute_with(|| { 150 | let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); 151 | // add balance to the account 152 | let author = extrinsic.signature.clone().unwrap().0; 153 | let address = Indices::lookup(author).unwrap(); 154 | let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; 155 | let account = frame_system::AccountInfo { nonce: 0u32, refcount: 0u8, data }; 156 | >::insert(&address, account); 157 | 158 | // check validity 159 | let res = Executive::validate_transaction(extrinsic); 160 | 161 | assert_eq!(res.unwrap(), ValidTransaction { 162 | priority: 2_411_002_000_000, 163 | requires: vec![], 164 | provides: vec![(address, 0).encode()], 165 | longevity: 127, 166 | propagate: true, 167 | }); 168 | }); 169 | } 170 | 171 | -------------------------------------------------------------------------------- /inspect/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "node-inspect" 3 | version = "0.3.0" 4 | authors = ['Akropolis '] 5 | edition = "2018" 6 | license = "MIT" 7 | 8 | [dependencies] 9 | codec = { package = "parity-scale-codec", version = "1.2.0" } 10 | derive_more = "0.99" 11 | log = "0.4.8" 12 | sc-cli = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 13 | sc-client-api = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 14 | sc-service = { version = "0.8.0-alpha.5", default-features = false, git = 'https://github.com/paritytech/substrate.git' } 15 | sp-blockchain = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 16 | sp-core = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 17 | sp-runtime = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 18 | structopt = "0.3.8" 19 | -------------------------------------------------------------------------------- /inspect/src/cli.rs: -------------------------------------------------------------------------------- 1 | //! Structs to easily compose inspect sub-command for CLI. 2 | 3 | use std::fmt::Debug; 4 | use sc_cli::{ImportParams, SharedParams}; 5 | use structopt::StructOpt; 6 | 7 | /// The `inspect` command used to print decoded chain data. 8 | #[derive(Debug, StructOpt, Clone)] 9 | pub struct InspectCmd { 10 | #[allow(missing_docs)] 11 | #[structopt(flatten)] 12 | pub command: InspectSubCmd, 13 | 14 | #[allow(missing_docs)] 15 | #[structopt(flatten)] 16 | pub shared_params: SharedParams, 17 | 18 | #[allow(missing_docs)] 19 | #[structopt(flatten)] 20 | pub import_params: ImportParams, 21 | } 22 | 23 | /// A possible inspect sub-commands. 24 | #[derive(Debug, StructOpt, Clone)] 25 | pub enum InspectSubCmd { 26 | /// Decode block with native version of runtime and print out the details. 27 | Block { 28 | /// Address of the block to print out. 29 | /// 30 | /// Can be either a block hash (no 0x prefix) or a number to retrieve existing block, 31 | /// or a 0x-prefixed bytes hex string, representing SCALE encoding of 32 | /// a block. 33 | #[structopt(value_name = "HASH or NUMBER or BYTES")] 34 | input: String, 35 | }, 36 | /// Decode extrinsic with native version of runtime and print out the details. 37 | Extrinsic { 38 | /// Address of an extrinsic to print out. 39 | /// 40 | /// Can be either a block hash (no 0x prefix) or number and the index, in the form 41 | /// of `{block}:{index}` or a 0x-prefixed bytes hex string, 42 | /// representing SCALE encoding of an extrinsic. 43 | #[structopt(value_name = "BLOCK:INDEX or BYTES")] 44 | input: String, 45 | }, 46 | } 47 | -------------------------------------------------------------------------------- /inspect/src/command.rs: -------------------------------------------------------------------------------- 1 | //! Command ran by the CLI 2 | 3 | use std::{ 4 | fmt::Debug, 5 | str::FromStr, 6 | }; 7 | 8 | use crate::cli::{InspectCmd, InspectSubCmd}; 9 | use crate::{Inspector, PrettyPrinter}; 10 | 11 | impl InspectCmd { 12 | /// Initialize 13 | pub fn init(&self, version: &sc_cli::VersionInfo) -> sc_cli::Result<()> { 14 | self.shared_params.init(version) 15 | } 16 | 17 | /// Parse CLI arguments and initialize given config. 18 | pub fn update_config( 19 | &self, 20 | mut config: &mut sc_service::config::Configuration, 21 | spec_factory: impl FnOnce(&str) -> Result, String>, 22 | version: &sc_cli::VersionInfo, 23 | ) -> sc_cli::Result<()> { 24 | self.shared_params.update_config(config, spec_factory, version)?; 25 | 26 | // make sure to configure keystore 27 | config.use_in_memory_keystore()?; 28 | 29 | // and all import params (especially pruning that has to match db meta) 30 | self.import_params.update_config( 31 | &mut config, 32 | sc_service::Roles::FULL, 33 | self.shared_params.dev, 34 | )?; 35 | 36 | Ok(()) 37 | } 38 | 39 | /// Run the inspect command, passing the inspector. 40 | pub fn run( 41 | self, 42 | inspect: Inspector, 43 | ) -> sc_cli::Result<()> where 44 | B: sp_runtime::traits::Block, 45 | B::Hash: FromStr, 46 | P: PrettyPrinter, 47 | { 48 | match self.command { 49 | InspectSubCmd::Block { input } => { 50 | let input = input.parse()?; 51 | let res = inspect.block(input) 52 | .map_err(|e| format!("{}", e))?; 53 | println!("{}", res); 54 | Ok(()) 55 | }, 56 | InspectSubCmd::Extrinsic { input } => { 57 | let input = input.parse()?; 58 | let res = inspect.extrinsic(input) 59 | .map_err(|e| format!("{}", e))?; 60 | println!("{}", res); 61 | Ok(()) 62 | }, 63 | } 64 | } 65 | } 66 | 67 | /// A block to retrieve. 68 | #[derive(Debug, Clone, PartialEq)] 69 | pub enum BlockAddress { 70 | /// Get block by hash. 71 | Hash(Hash), 72 | /// Get block by number. 73 | Number(Number), 74 | /// Raw SCALE-encoded bytes. 75 | Bytes(Vec), 76 | } 77 | 78 | impl FromStr for BlockAddress { 79 | type Err = String; 80 | 81 | fn from_str(s: &str) -> Result { 82 | // try to parse hash first 83 | if let Ok(hash) = s.parse() { 84 | return Ok(Self::Hash(hash)) 85 | } 86 | 87 | // then number 88 | if let Ok(number) = s.parse() { 89 | return Ok(Self::Number(number)) 90 | } 91 | 92 | // then assume it's bytes (hex-encoded) 93 | sp_core::bytes::from_hex(s) 94 | .map(Self::Bytes) 95 | .map_err(|e| format!( 96 | "Given string does not look like hash or number. It could not be parsed as bytes either: {}", 97 | e 98 | )) 99 | } 100 | } 101 | 102 | /// An extrinsic address to decode and print out. 103 | #[derive(Debug, Clone, PartialEq)] 104 | pub enum ExtrinsicAddress { 105 | /// Extrinsic as part of existing block. 106 | Block(BlockAddress, usize), 107 | /// Raw SCALE-encoded extrinsic bytes. 108 | Bytes(Vec), 109 | } 110 | 111 | impl FromStr for ExtrinsicAddress { 112 | type Err = String; 113 | 114 | fn from_str(s: &str) -> Result { 115 | // first try raw bytes 116 | if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { 117 | return Ok(bytes) 118 | } 119 | 120 | // split by a bunch of different characters 121 | let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); 122 | let block = it.next() 123 | .expect("First element of split iterator is never empty; qed") 124 | .parse()?; 125 | 126 | let index = it.next() 127 | .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? 128 | .parse() 129 | .map_err(|e| format!("Invalid index format: {}", e))?; 130 | 131 | Ok(Self::Block(block, index)) 132 | } 133 | } 134 | 135 | #[cfg(test)] 136 | mod tests { 137 | use super::*; 138 | use sp_core::hash::H160 as Hash; 139 | 140 | #[test] 141 | fn should_parse_block_strings() { 142 | type BlockAddress = super::BlockAddress; 143 | 144 | let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258"); 145 | let b1 = BlockAddress::from_str("1234"); 146 | let b2 = BlockAddress::from_str("0"); 147 | let b3 = BlockAddress::from_str("0x0012345f"); 148 | 149 | 150 | assert_eq!(b0, Ok(BlockAddress::Hash( 151 | "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() 152 | ))); 153 | assert_eq!(b1, Ok(BlockAddress::Number(1234))); 154 | assert_eq!(b2, Ok(BlockAddress::Number(0))); 155 | assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); 156 | } 157 | 158 | #[test] 159 | fn should_parse_extrinsic_address() { 160 | type BlockAddress = super::BlockAddress; 161 | type ExtrinsicAddress = super::ExtrinsicAddress; 162 | 163 | let e0 = ExtrinsicAddress::from_str("1234"); 164 | let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5"); 165 | let b1 = ExtrinsicAddress::from_str("1234:0"); 166 | let b2 = ExtrinsicAddress::from_str("0 0"); 167 | let b3 = ExtrinsicAddress::from_str("0x0012345f"); 168 | 169 | 170 | assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); 171 | assert_eq!(b0, Ok(ExtrinsicAddress::Block( 172 | BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), 173 | 5 174 | ))); 175 | assert_eq!(b1, Ok(ExtrinsicAddress::Block( 176 | BlockAddress::Number(1234), 177 | 0 178 | ))); 179 | assert_eq!(b2, Ok(ExtrinsicAddress::Block( 180 | BlockAddress::Number(0), 181 | 0 182 | ))); 183 | assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /inspect/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A CLI extension for substrate node, adding sub-command to pretty print debug info 2 | //! about blocks and extrinsics. 3 | //! 4 | //! The blocks and extrinsics can either be retrieved from the database (on-chain), 5 | //! or a raw SCALE-encoding can be provided. 6 | 7 | #![warn(missing_docs)] 8 | 9 | pub mod cli; 10 | pub mod command; 11 | 12 | use std::{ 13 | fmt, 14 | marker::PhantomData 15 | }; 16 | use codec::{Encode, Decode}; 17 | use sc_client_api::BlockBackend; 18 | use sp_blockchain::HeaderBackend; 19 | use sp_core::hexdisplay::HexDisplay; 20 | use sp_runtime::{ 21 | generic::BlockId, 22 | traits::{Block, HashFor, NumberFor, Hash} 23 | }; 24 | 25 | use command::{BlockAddress, ExtrinsicAddress}; 26 | 27 | /// A helper type for a generic block input. 28 | pub type BlockAddressFor = BlockAddress< 29 | as Hash>::Output, 30 | NumberFor 31 | >; 32 | 33 | /// A Pretty formatter implementation. 34 | pub trait PrettyPrinter { 35 | /// Nicely format block. 36 | fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result; 37 | /// Nicely format extrinsic. 38 | fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result; 39 | } 40 | 41 | /// Default dummy debug printer. 42 | #[derive(Default)] 43 | pub struct DebugPrinter; 44 | impl PrettyPrinter for DebugPrinter { 45 | fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result { 46 | writeln!(fmt, "Header:")?; 47 | writeln!(fmt, "{:?}", block.header())?; 48 | writeln!(fmt, "Block bytes: {:?}", HexDisplay::from(&block.encode()))?; 49 | writeln!(fmt, "Extrinsics ({})", block.extrinsics().len())?; 50 | for (idx, ex) in block.extrinsics().iter().enumerate() { 51 | writeln!(fmt, "- {}:", idx)?; 52 | >::fmt_extrinsic(self, fmt, ex)?; 53 | } 54 | Ok(()) 55 | } 56 | 57 | fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result { 58 | writeln!(fmt, " {:?}", extrinsic)?; 59 | writeln!(fmt, " Bytes: {:?}", HexDisplay::from(&extrinsic.encode()))?; 60 | Ok(()) 61 | } 62 | } 63 | 64 | /// Aggregated error for `Inspector` operations. 65 | #[derive(Debug, derive_more::From, derive_more::Display)] 66 | pub enum Error { 67 | /// Could not decode Block or Extrinsic. 68 | Codec(codec::Error), 69 | /// Error accessing blockchain DB. 70 | Blockchain(sp_blockchain::Error), 71 | /// Given block has not been found. 72 | NotFound(String), 73 | } 74 | 75 | impl std::error::Error for Error { 76 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 77 | match *self { 78 | Self::Codec(ref e) => Some(e), 79 | Self::Blockchain(ref e) => Some(e), 80 | Self::NotFound(_) => None, 81 | } 82 | } 83 | } 84 | 85 | /// A helper trait to access block headers and bodies. 86 | pub trait ChainAccess: 87 | HeaderBackend + 88 | BlockBackend 89 | {} 90 | 91 | impl ChainAccess for T where 92 | TBlock: Block, 93 | T: sp_blockchain::HeaderBackend + sc_client_api::BlockBackend, 94 | {} 95 | 96 | /// Blockchain inspector. 97 | pub struct Inspector = DebugPrinter> { 98 | printer: TPrinter, 99 | chain: Box>, 100 | _block: PhantomData, 101 | } 102 | 103 | impl> Inspector { 104 | /// Create new instance of the inspector with default printer. 105 | pub fn new( 106 | chain: impl ChainAccess + 'static, 107 | ) -> Self where TPrinter: Default { 108 | Self::with_printer(chain, Default::default()) 109 | } 110 | 111 | /// Customize pretty-printing of the data. 112 | pub fn with_printer( 113 | chain: impl ChainAccess + 'static, 114 | printer: TPrinter, 115 | ) -> Self { 116 | Inspector { 117 | chain: Box::new(chain) as _, 118 | printer, 119 | _block: Default::default(), 120 | } 121 | } 122 | 123 | /// Get a pretty-printed block. 124 | pub fn block(&self, input: BlockAddressFor) -> Result { 125 | struct BlockPrinter<'a, A, B>(A, &'a B); 126 | impl<'a, A: Block, B: PrettyPrinter> fmt::Display for BlockPrinter<'a, A, B> { 127 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 128 | self.1.fmt_block(fmt, &self.0) 129 | } 130 | } 131 | 132 | let block = self.get_block(input)?; 133 | Ok(format!("{}", BlockPrinter(block, &self.printer))) 134 | } 135 | 136 | fn get_block(&self, input: BlockAddressFor) -> Result { 137 | Ok(match input { 138 | BlockAddress::Bytes(bytes) => { 139 | TBlock::decode(&mut &*bytes)? 140 | }, 141 | BlockAddress::Number(number) => { 142 | let id = BlockId::number(number); 143 | let not_found = format!("Could not find block {:?}", id); 144 | let body = self.chain.block_body(&id)? 145 | .ok_or_else(|| Error::NotFound(not_found.clone()))?; 146 | let header = self.chain.header(id)? 147 | .ok_or_else(|| Error::NotFound(not_found.clone()))?; 148 | TBlock::new(header, body) 149 | }, 150 | BlockAddress::Hash(hash) => { 151 | let id = BlockId::hash(hash); 152 | let not_found = format!("Could not find block {:?}", id); 153 | let body = self.chain.block_body(&id)? 154 | .ok_or_else(|| Error::NotFound(not_found.clone()))?; 155 | let header = self.chain.header(id)? 156 | .ok_or_else(|| Error::NotFound(not_found.clone()))?; 157 | TBlock::new(header, body) 158 | }, 159 | }) 160 | } 161 | 162 | /// Get a pretty-printed extrinsic. 163 | pub fn extrinsic( 164 | &self, 165 | input: ExtrinsicAddress< as Hash>::Output, NumberFor>, 166 | ) -> Result { 167 | struct ExtrinsicPrinter<'a, A: Block, B>(A::Extrinsic, &'a B); 168 | impl<'a, A: Block, B: PrettyPrinter> fmt::Display for ExtrinsicPrinter<'a, A, B> { 169 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 170 | self.1.fmt_extrinsic(fmt, &self.0) 171 | } 172 | } 173 | 174 | let ext = match input { 175 | ExtrinsicAddress::Block(block, index) => { 176 | let block = self.get_block(block)?; 177 | block.extrinsics() 178 | .get(index) 179 | .cloned() 180 | .ok_or_else(|| Error::NotFound(format!( 181 | "Could not find extrinsic {} in block {:?}", index, block 182 | )))? 183 | }, 184 | ExtrinsicAddress::Bytes(bytes) => { 185 | TBlock::Extrinsic::decode(&mut &*bytes)? 186 | } 187 | }; 188 | 189 | Ok(format!("{}", ExtrinsicPrinter(ext, &self.printer))) 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "node-rpc" 3 | version = "0.3.0" 4 | authors = ['Akropolis '] 5 | edition = "2018" 6 | license = "MIT" 7 | 8 | [dependencies] 9 | sc-client = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 10 | jsonrpc-core = "14.0.3" 11 | akropolisos-runtime = { version = "0.5.0", path = "../runtime" } 12 | sp-runtime = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 13 | sp-api = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 14 | pallet-contracts-rpc = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 15 | pallet-transaction-payment-rpc = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 16 | substrate-frame-rpc-system = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 17 | sp-transaction-pool = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 18 | sc-consensus-babe = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 19 | sc-consensus-babe-rpc = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 20 | sp-consensus-babe = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 21 | sc-keystore = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 22 | sc-consensus-epochs = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 23 | sp-consensus = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 24 | sp-blockchain = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 25 | 26 | sc-rpc = { version = "2.0.0-alpha.5", git = "https://github.com/paritytech/substrate" } 27 | codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -------------------------------------------------------------------------------- /rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A collection of node-specific RPC methods. 2 | //! 3 | //! Since `substrate` core functionality makes no assumptions 4 | //! about the modules used inside the runtime, so do 5 | //! RPC methods defined in `sc-rpc` crate. 6 | //! It means that `client/rpc` can't have any methods that 7 | //! need some strong assumptions about the particular runtime. 8 | //! 9 | //! The RPCs available in this crate however can make some assumptions 10 | //! about how the runtime is constructed and what FRAME pallets 11 | //! are part of it. Therefore all node-runtime-specific RPCs can 12 | //! be placed here or imported from corresponding FRAME RPC definitions. 13 | #![warn(missing_docs)] 14 | 15 | use std::{fmt, sync::Arc}; 16 | 17 | use akropolisos_runtime::{opaque::PrimitiveBlock as Block, BlockNumber, AccountId, Index, Balance}; 18 | use sc_consensus_babe::{Config, Epoch}; 19 | use sc_consensus_babe_rpc::BabeRPCHandler; 20 | use sc_consensus_epochs::SharedEpochChanges; 21 | use sc_keystore::KeyStorePtr; 22 | use sp_api::ProvideRuntimeApi; 23 | use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; 24 | use sp_consensus::SelectChain; 25 | use sp_consensus_babe::BabeApi; 26 | use sp_transaction_pool::TransactionPool; 27 | 28 | /// Light client extra dependencies. 29 | pub struct LightDeps { 30 | /// The client instance to use. 31 | pub client: Arc, 32 | /// Transaction pool instance. 33 | pub pool: Arc

, 34 | /// Remote access to the blockchain (async). 35 | pub remote_blockchain: Arc>, 36 | /// Fetcher instance. 37 | pub fetcher: Arc, 38 | } 39 | 40 | /// Extra dependencies for BABE. 41 | pub struct BabeDeps { 42 | /// BABE protocol config. 43 | pub babe_config: Config, 44 | /// BABE pending epoch changes. 45 | pub shared_epoch_changes: SharedEpochChanges, 46 | /// The keystore that manages the keys of the node. 47 | pub keystore: KeyStorePtr, 48 | } 49 | 50 | /// Full client dependencies. 51 | pub struct FullDeps { 52 | /// The client instance to use. 53 | pub client: Arc, 54 | /// Transaction pool instance. 55 | pub pool: Arc

, 56 | /// The SelectChain Strategy 57 | pub select_chain: SC, 58 | /// BABE specific dependencies. 59 | pub babe: BabeDeps, 60 | } 61 | 62 | /// Instantiate all Full RPC extensions. 63 | pub fn create_full(deps: FullDeps) -> jsonrpc_core::IoHandler 64 | where 65 | C: ProvideRuntimeApi, 66 | C: HeaderBackend + HeaderMetadata, 67 | C: Send + Sync + 'static, 68 | C::Api: substrate_frame_rpc_system::AccountNonceApi, 69 | C::Api: pallet_contracts_rpc::ContractsRuntimeApi, 70 | C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, 71 | C::Api: BabeApi, 72 | ::Error: fmt::Debug, 73 | P: TransactionPool + 'static, 74 | M: jsonrpc_core::Metadata + Default, 75 | SC: SelectChain + 'static, 76 | { 77 | use pallet_contracts_rpc::{Contracts, ContractsApi}; 78 | use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; 79 | use substrate_frame_rpc_system::{FullSystem, SystemApi}; 80 | 81 | let mut io = jsonrpc_core::IoHandler::default(); 82 | let FullDeps { 83 | client, 84 | pool, 85 | select_chain, 86 | babe, 87 | } = deps; 88 | let BabeDeps { 89 | keystore, 90 | babe_config, 91 | shared_epoch_changes, 92 | } = babe; 93 | 94 | io.extend_with(SystemApi::to_delegate(FullSystem::new( 95 | client.clone(), 96 | pool, 97 | ))); 98 | // Making synchronous calls in light client freezes the browser currently, 99 | // more context: https://github.com/paritytech/substrate/pull/3480 100 | // These RPCs should use an asynchronous caller instead. 101 | io.extend_with(ContractsApi::to_delegate(Contracts::new(client.clone()))); 102 | io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( 103 | client.clone(), 104 | ))); 105 | io.extend_with(sc_consensus_babe_rpc::BabeApi::to_delegate( 106 | BabeRPCHandler::new( 107 | client, 108 | shared_epoch_changes, 109 | keystore, 110 | babe_config, 111 | select_chain, 112 | ), 113 | )); 114 | 115 | io 116 | 117 | } 118 | 119 | /// Instantiate all Light RPC extensions. 120 | pub fn create_light(deps: LightDeps) -> jsonrpc_core::IoHandler 121 | where 122 | C: HeaderBackend, 123 | C: Send + Sync + 'static, 124 | F: sc_client::light::fetcher::Fetcher + 'static, 125 | P: TransactionPool + 'static, 126 | M: jsonrpc_core::Metadata + Default, 127 | { 128 | use substrate_frame_rpc_system::{LightSystem, SystemApi}; 129 | 130 | let LightDeps { 131 | client, 132 | pool, 133 | remote_blockchain, 134 | fetcher, 135 | } = deps; 136 | let mut io = jsonrpc_core::IoHandler::default(); 137 | io.extend_with(SystemApi::::to_delegate( 138 | LightSystem::new(client, remote_blockchain, fetcher, pool), 139 | )); 140 | 141 | io 142 | } 143 | -------------------------------------------------------------------------------- /runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ['Akropolis '] 3 | edition = '2018' 4 | name = 'akropolisos-runtime' 5 | version = '0.5.1' 6 | 7 | [build-dependencies] 8 | wasm-builder-runner = {git = 'https://github.com/paritytech/substrate.git', package = 'substrate-wasm-builder-runner', version = '1.0.5', default-features = false} 9 | wasm-builder = {git = 'https://github.com/paritytech/substrate.git', package = 'substrate-wasm-builder', version = '1.0.9', default-features = false} 10 | build-script-utils = { version = '2.0.0-alpha.5', package = 'substrate-build-script-utils', git = 'https://github.com/paritytech/substrate.git', default-features = false} 11 | 12 | [dependencies] 13 | safe-mix = { default-features = false, version = '1.0.0'} 14 | serde = {features = ['derive'], default-features = false, optional = true, version = '1.0.101'} 15 | num-traits = { version = "0.2.8", default-features = false } 16 | simple-json = {git = 'https://github.com/jimmychu0807/simple-json', version = '0.1.5', default-features = false} 17 | rustc-hex = { version = "2.0", optional = true } 18 | 19 | [dependencies.sp-authority-discovery] 20 | default-features = false 21 | git = 'https://github.com/paritytech/substrate.git' 22 | version = '2.0.0-alpha.5' 23 | 24 | [dependencies.sp-consensus-babe] 25 | default-features = false 26 | git = 'https://github.com/paritytech/substrate.git' 27 | version = '0.8.0-alpha.5' 28 | 29 | [dependencies.sp-block-builder] 30 | default-features = false 31 | git = 'https://github.com/paritytech/substrate.git' 32 | version = '2.0.0-alpha.5' 33 | 34 | [dependencies.sp-inherents] 35 | default-features = false 36 | git = 'https://github.com/paritytech/substrate.git' 37 | version = '2.0.0-alpha.5' 38 | 39 | [dependencies.sp-offchain] 40 | default-features = false 41 | git = 'https://github.com/paritytech/substrate.git' 42 | version = '2.0.0-alpha.5' 43 | 44 | [dependencies.sp-core] 45 | default-features = false 46 | git = 'https://github.com/paritytech/substrate.git' 47 | version = '2.0.0-alpha.5' 48 | 49 | [dependencies.sp-std] 50 | default-features = false 51 | git = 'https://github.com/paritytech/substrate.git' 52 | version = '2.0.0-alpha.5' 53 | 54 | [dependencies.sp-api] 55 | default-features = false 56 | git = 'https://github.com/paritytech/substrate.git' 57 | version = '2.0.0-alpha.5' 58 | 59 | [dependencies.sp-io] 60 | default-features = false 61 | features = ["disable_panic_handler", "disable_oom"] 62 | git = 'https://github.com/paritytech/substrate.git' 63 | version = '2.0.0-alpha.5' 64 | 65 | [dependencies.sp-runtime] 66 | default-features = false 67 | git = 'https://github.com/paritytech/substrate.git' 68 | version = '2.0.0-alpha.5' 69 | 70 | [dependencies.sp-staking] 71 | default-features = false 72 | git = 'https://github.com/paritytech/substrate.git' 73 | version = '2.0.0-alpha.5' 74 | 75 | [dependencies.sp-keyring] 76 | default-features = false 77 | optional = true 78 | git = 'https://github.com/paritytech/substrate.git' 79 | version = '2.0.0-alpha.5' 80 | 81 | [dependencies.sp-session] 82 | default-features = false 83 | git = 'https://github.com/paritytech/substrate.git' 84 | version = '2.0.0-alpha.5' 85 | 86 | [dependencies.sp-transaction-pool] 87 | default-features = false 88 | git = 'https://github.com/paritytech/substrate.git' 89 | version = '2.0.0-alpha.5' 90 | 91 | [dependencies.sp-version] 92 | default-features = false 93 | git = 'https://github.com/paritytech/substrate.git' 94 | version = '2.0.0-alpha.5' 95 | 96 | [dependencies.codec] 97 | default-features = false 98 | features = ['derive'] 99 | package = 'parity-scale-codec' 100 | version = '1.2.0' 101 | 102 | [dependencies.frame-executive] 103 | default-features = false 104 | git = 'https://github.com/paritytech/substrate.git' 105 | version = '2.0.0-alpha.5' 106 | 107 | [dependencies.frame-benchmarking] 108 | default-features = false 109 | optional = true 110 | git = 'https://github.com/paritytech/substrate.git' 111 | version = '2.0.0-alpha.5' 112 | 113 | [dependencies.frame-support] 114 | default-features = false 115 | git = 'https://github.com/paritytech/substrate.git' 116 | version = '2.0.0-alpha.5' 117 | 118 | [dependencies.system] 119 | default-features = false 120 | git = 'https://github.com/paritytech/substrate.git' 121 | package = 'frame-system' 122 | version = '2.0.0-alpha.5' 123 | 124 | [dependencies.frame-system-rpc-runtime-api] 125 | default-features = false 126 | git = 'https://github.com/paritytech/substrate.git' 127 | version = '2.0.0-alpha.5' 128 | 129 | [dependencies.pallet-authority-discovery] 130 | default-features = false 131 | git = 'https://github.com/paritytech/substrate.git' 132 | version = '2.0.0-alpha.5' 133 | 134 | [dependencies.pallet-authorship] 135 | default-features = false 136 | git = 'https://github.com/paritytech/substrate.git' 137 | version = '2.0.0-alpha.5' 138 | 139 | [dependencies.pallet-babe] 140 | default-features = false 141 | git = 'https://github.com/paritytech/substrate.git' 142 | version = '2.0.0-alpha.5' 143 | 144 | [dependencies.balances] 145 | default-features = false 146 | git = 'https://github.com/paritytech/substrate.git' 147 | package = 'pallet-balances' 148 | version = '2.0.0-alpha.5' 149 | 150 | [dependencies.pallet-collective] 151 | default-features = false 152 | git = 'https://github.com/paritytech/substrate.git' 153 | version = '2.0.0-alpha.5' 154 | 155 | [dependencies.pallet-contracts] 156 | default-features = false 157 | git = 'https://github.com/paritytech/substrate.git' 158 | version = '2.0.0-alpha.5' 159 | 160 | [dependencies.pallet-contracts-primitives] 161 | default-features = false 162 | git = 'https://github.com/paritytech/substrate.git' 163 | version = '2.0.0-alpha.5' 164 | 165 | [dependencies.pallet-contracts-rpc-runtime-api] 166 | default-features = false 167 | git = 'https://github.com/paritytech/substrate.git' 168 | version = '0.8.0-alpha.5' 169 | 170 | [dependencies.pallet-democracy] 171 | default-features = false 172 | git = 'https://github.com/paritytech/substrate.git' 173 | version = '2.0.0-alpha.5' 174 | 175 | [dependencies.pallet-elections-phragmen] 176 | default-features = false 177 | git = 'https://github.com/paritytech/substrate.git' 178 | version = '2.0.0-alpha.5' 179 | 180 | [dependencies.pallet-finality-tracker] 181 | default-features = false 182 | git = 'https://github.com/paritytech/substrate.git' 183 | version = '2.0.0-alpha.5' 184 | 185 | [dependencies.grandpa] 186 | default-features = false 187 | git = 'https://github.com/paritytech/substrate.git' 188 | package = 'pallet-grandpa' 189 | version = '2.0.0-alpha.5' 190 | 191 | [dependencies.pallet-im-online] 192 | default-features = false 193 | git = 'https://github.com/paritytech/substrate.git' 194 | version = '2.0.0-alpha.5' 195 | 196 | [dependencies.pallet-indices] 197 | default-features = false 198 | git = 'https://github.com/paritytech/substrate.git' 199 | version = '2.0.0-alpha.5' 200 | 201 | [dependencies.pallet-identity] 202 | default-features = false 203 | git = 'https://github.com/paritytech/substrate.git' 204 | version = '2.0.0-alpha.5' 205 | 206 | [dependencies.pallet-membership] 207 | default-features = false 208 | git = 'https://github.com/paritytech/substrate.git' 209 | version = '2.0.0-alpha.5' 210 | 211 | [dependencies.pallet-offences] 212 | default-features = false 213 | git = 'https://github.com/paritytech/substrate.git' 214 | version = '2.0.0-alpha.5' 215 | 216 | [dependencies.randomness-collective-flip] 217 | default-features = false 218 | git = 'https://github.com/paritytech/substrate.git' 219 | package = 'pallet-randomness-collective-flip' 220 | version = '2.0.0-alpha.5' 221 | 222 | [dependencies.pallet-recovery] 223 | default-features = false 224 | git = 'https://github.com/paritytech/substrate.git' 225 | version = '2.0.0-alpha.5' 226 | 227 | [dependencies.pallet-session] 228 | default-features = false 229 | git = 'https://github.com/paritytech/substrate.git' 230 | version = '2.0.0-alpha.5' 231 | 232 | [dependencies.pallet-session-benchmarking] 233 | default-features = false 234 | optional = true 235 | git = 'https://github.com/paritytech/substrate.git' 236 | version = '2.0.0-alpha.5' 237 | 238 | [dependencies.pallet-staking] 239 | default-features = false 240 | git = 'https://github.com/paritytech/substrate.git' 241 | version = '2.0.0-alpha.5' 242 | 243 | [dependencies.pallet-staking-reward-curve] 244 | default-features = false 245 | git = 'https://github.com/paritytech/substrate.git' 246 | version = '2.0.0-alpha.5' 247 | 248 | [dependencies.sudo] 249 | default-features = false 250 | git = 'https://github.com/paritytech/substrate.git' 251 | package = 'pallet-sudo' 252 | version = '2.0.0-alpha.5' 253 | 254 | [dependencies.pallet-society] 255 | default-features = false 256 | git = 'https://github.com/paritytech/substrate.git' 257 | version = '2.0.0-alpha.5' 258 | 259 | [dependencies.timestamp] 260 | default-features = false 261 | git = 'https://github.com/paritytech/substrate.git' 262 | package = 'pallet-timestamp' 263 | version = '2.0.0-alpha.5' 264 | 265 | [dependencies.pallet-treasury] 266 | default-features = false 267 | git = 'https://github.com/paritytech/substrate.git' 268 | version = '2.0.0-alpha.5' 269 | 270 | [dependencies.pallet-utility] 271 | default-features = false 272 | git = 'https://github.com/paritytech/substrate.git' 273 | version = '2.0.0-alpha.5' 274 | 275 | [dependencies.pallet-transaction-payment] 276 | default-features = false 277 | git = 'https://github.com/paritytech/substrate.git' 278 | version = '2.0.0-alpha.5' 279 | 280 | [dependencies.pallet-transaction-payment-rpc-runtime-api] 281 | default-features = false 282 | git = 'https://github.com/paritytech/substrate.git' 283 | version = '2.0.0-alpha.5' 284 | 285 | [dependencies.pallet-vesting] 286 | default-features = false 287 | git = 'https://github.com/paritytech/substrate.git' 288 | version = '2.0.0-alpha.5' 289 | 290 | [features] 291 | default = ['std'] 292 | std = [ 293 | 'simple-json/std', 294 | 'num-traits/std', 295 | 'safe-mix/std', 296 | "serde", 297 | 'serde/std', 298 | "rustc-hex", 299 | "sp-authority-discovery/std", 300 | "pallet-authority-discovery/std", 301 | "pallet-authorship/std", 302 | "sp-consensus-babe/std", 303 | "pallet-babe/std", 304 | "balances/std", 305 | "sp-block-builder/std", 306 | "codec/std", 307 | "pallet-collective/std", 308 | "pallet-contracts/std", 309 | "pallet-contracts-primitives/std", 310 | "pallet-contracts-rpc-runtime-api/std", 311 | "pallet-democracy/std", 312 | "pallet-elections-phragmen/std", 313 | "frame-executive/std", 314 | "pallet-finality-tracker/std", 315 | "grandpa/std", 316 | "pallet-im-online/std", 317 | "pallet-indices/std", 318 | "sp-inherents/std", 319 | "pallet-membership/std", 320 | "pallet-identity/std", 321 | "sp-offchain/std", 322 | "pallet-offences/std", 323 | "sp-core/std", 324 | "randomness-collective-flip/std", 325 | "sp-std/std", 326 | "pallet-session/std", 327 | "sp-api/std", 328 | "sp-runtime/std", 329 | "sp-staking/std", 330 | "pallet-staking/std", 331 | "sp-keyring", 332 | "sp-session/std", 333 | "sudo/std", 334 | "frame-support/std", 335 | "frame-system-rpc-runtime-api/std", 336 | "system/std", 337 | "timestamp/std", 338 | "pallet-transaction-payment-rpc-runtime-api/std", 339 | "pallet-transaction-payment/std", 340 | "pallet-treasury/std", 341 | "sp-transaction-pool/std", 342 | "pallet-utility/std", 343 | "sp-version/std", 344 | "pallet-society/std", 345 | "pallet-recovery/std", 346 | "pallet-vesting/std", 347 | ] 348 | runtime-benchmarks = [ 349 | "frame-benchmarking", 350 | "frame-support/runtime-benchmarks", 351 | "balances/runtime-benchmarks", 352 | "pallet-elections-phragmen/runtime-benchmarks", 353 | "pallet-identity/runtime-benchmarks", 354 | "timestamp/runtime-benchmarks", 355 | "pallet-treasury/runtime-benchmarks", 356 | "pallet-session-benchmarking", 357 | "pallet-staking/runtime-benchmarks", 358 | "pallet-vesting/runtime-benchmarks", 359 | "pallet-session-benchmarking", 360 | "pallet-staking/runtime-benchmarks", 361 | "pallet-im-online/runtime-benchmarks", 362 | ] -------------------------------------------------------------------------------- /runtime/build.rs: -------------------------------------------------------------------------------- 1 | use wasm_builder_runner::WasmBuilder; 2 | 3 | fn main() { 4 | WasmBuilder::new() 5 | .with_current_project() 6 | .with_wasm_builder_from_crates("1.0.9") 7 | .export_heap_base() 8 | .import_memory() 9 | .build() 10 | } -------------------------------------------------------------------------------- /runtime/src/constants.rs: -------------------------------------------------------------------------------- 1 | /// Money matters. 2 | pub mod currency { 3 | /// Balance of an account. 4 | pub type Balance = u128; 5 | 6 | pub const MILLICENTS: Balance = 1_000_000_000; 7 | pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. 8 | pub const DOLLARS: Balance = 100 * CENTS; 9 | } 10 | 11 | /// Time. 12 | pub mod time { 13 | /// An index to a block. 14 | pub type Moment = u64; 15 | /// An index to a block. 16 | pub type BlockNumber = u32; 17 | 18 | /// Since BABE is probabilistic this is the average expected block time that 19 | /// we are targetting. Blocks will be produced at a minimum duration defined 20 | /// by `SLOT_DURATION`, but some slots will not be allocated to any 21 | /// authority and hence no block will be produced. We expect to have this 22 | /// block time on average following the defined slot duration and the value 23 | /// of `c` configured for BABE (where `1 - c` represents the probability of 24 | /// a slot being empty). 25 | /// This value is only used indirectly to define the unit constants below 26 | /// that are expressed in blocks. The rest of the code should use 27 | /// `SLOT_DURATION` instead (like the Timestamp pallet for calculating the 28 | /// minimum period). 29 | /// 30 | /// If using BABE with secondary slots (default) then all of the slots will 31 | /// always be assigned, in which case `MILLISECS_PER_BLOCK` and 32 | /// `SLOT_DURATION` should have the same value. 33 | /// 34 | /// 35 | pub const MILLISECS_PER_BLOCK: Moment = 3000; 36 | pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; 37 | 38 | pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; 39 | 40 | // 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks. 41 | pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); 42 | 43 | pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES; 44 | pub const EPOCH_DURATION_IN_SLOTS: u64 = { 45 | const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; 46 | 47 | (EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64 48 | }; 49 | 50 | // These time units are defined in number of blocks. 51 | pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber); 52 | pub const HOURS: BlockNumber = MINUTES * 60; 53 | pub const DAYS: BlockNumber = HOURS * 24; 54 | } 55 | -------------------------------------------------------------------------------- /runtime/src/impls.rs: -------------------------------------------------------------------------------- 1 | use sp_runtime::traits::{Convert, Saturating}; 2 | use sp_runtime::{Fixed64, Perbill}; 3 | use frame_support::{traits::{OnUnbalanced, Currency, Get}, weights::Weight}; 4 | use crate::{Balances, Balance, System, Authorship, MaximumBlockWeight, NegativeImbalance}; 5 | 6 | pub struct Author; 7 | impl OnUnbalanced for Author { 8 | fn on_nonzero_unbalanced(amount: NegativeImbalance) { 9 | Balances::resolve_creating(&Authorship::author(), amount); 10 | } 11 | } 12 | 13 | /// Struct that handles the conversion of Balance -> `u64`. This is used for staking's election 14 | /// calculation. 15 | pub struct CurrencyToVoteHandler; 16 | 17 | impl CurrencyToVoteHandler { 18 | fn factor() -> Balance { (Balances::total_issuance() / u64::max_value() as Balance).max(1) } 19 | } 20 | 21 | impl Convert for CurrencyToVoteHandler { 22 | fn convert(x: Balance) -> u64 { (x / Self::factor()) as u64 } 23 | } 24 | 25 | impl Convert for CurrencyToVoteHandler { 26 | fn convert(x: u128) -> Balance { x * Self::factor() } 27 | } 28 | 29 | /// Convert from weight to balance via a simple coefficient multiplication 30 | /// The associated type C encapsulates a constant in units of balance per weight 31 | pub struct LinearWeightToFee(sp_std::marker::PhantomData); 32 | 33 | impl> Convert for LinearWeightToFee { 34 | fn convert(w: Weight) -> Balance { 35 | // substrate-node a weight of 10_000 (smallest non-zero weight) to be mapped to 10^7 units of 36 | // fees, hence: 37 | let coefficient = C::get(); 38 | Balance::from(w).saturating_mul(coefficient) 39 | } 40 | } 41 | 42 | /// Update the given multiplier based on the following formula 43 | /// 44 | /// diff = (previous_block_weight - target_weight) 45 | /// v = 0.00004 46 | /// next_weight = weight * (1 + (v . diff) + (v . diff)^2 / 2) 47 | /// 48 | /// Where `target_weight` must be given as the `Get` implementation of the `T` generic type. 49 | /// https://research.web3.foundation/en/latest/polkadot/Token%20Economics/#relay-chain-transaction-fees 50 | pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData); 51 | 52 | impl> Convert for TargetedFeeAdjustment { 53 | fn convert(multiplier: Fixed64) -> Fixed64 { 54 | let block_weight = System::all_extrinsics_weight(); 55 | let max_weight = MaximumBlockWeight::get(); 56 | let target_weight = (T::get() * max_weight) as u128; 57 | let block_weight = block_weight as u128; 58 | 59 | // determines if the first_term is positive 60 | let positive = block_weight >= target_weight; 61 | let diff_abs = block_weight.max(target_weight) - block_weight.min(target_weight); 62 | // diff is within u32, safe. 63 | let diff = Fixed64::from_rational(diff_abs as i64, max_weight as u64); 64 | let diff_squared = diff.saturating_mul(diff); 65 | 66 | // 0.00004 = 4/100_000 = 40_000/10^9 67 | let v = Fixed64::from_rational(4, 100_000); 68 | // 0.00004^2 = 16/10^10 ~= 2/10^9. Taking the future /2 into account, then it is just 1 69 | // parts from a billionth. 70 | let v_squared_2 = Fixed64::from_rational(1, 1_000_000_000); 71 | 72 | let first_term = v.saturating_mul(diff); 73 | // It is very unlikely that this will exist (in our poor perbill estimate) but we are giving 74 | // it a shot. 75 | let second_term = v_squared_2.saturating_mul(diff_squared); 76 | 77 | if positive { 78 | // Note: this is merely bounded by how big the multiplier and the inner value can go, 79 | // not by any economical reasoning. 80 | let excess = first_term.saturating_add(second_term); 81 | multiplier.saturating_add(excess) 82 | } else { 83 | // Proof: first_term > second_term. Safe subtraction. 84 | let negative = first_term - second_term; 85 | multiplier.saturating_sub(negative) 86 | // despite the fact that apply_to saturates weight (final fee cannot go below 0) 87 | // it is crucially important to stop here and don't further reduce the weight fee 88 | // multiplier. While at -1, it means that the network is so un-congested that all 89 | // transactions have no weight fee. We stop here and only increase if the network 90 | // became more busy. 91 | .max(Fixed64::from_rational(-1, 1)) 92 | } 93 | } 94 | } 95 | 96 | #[cfg(test)] 97 | mod tests { 98 | use super::*; 99 | use sp_runtime::assert_eq_error_rate; 100 | use crate::{MaximumBlockWeight, AvailableBlockRatio, Runtime}; 101 | use crate::{constants::currency::*, TransactionPayment, TargetBlockFullness}; 102 | use frame_support::weights::Weight; 103 | 104 | fn max() -> Weight { 105 | MaximumBlockWeight::get() 106 | } 107 | 108 | fn target() -> Weight { 109 | TargetBlockFullness::get() * max() 110 | } 111 | 112 | // poc reference implementation. 113 | fn fee_multiplier_update(block_weight: Weight, previous: Fixed64) -> Fixed64 { 114 | let block_weight = block_weight as f32; 115 | let v: f32 = 0.00004; 116 | 117 | // maximum tx weight 118 | let m = max() as f32; 119 | // Ideal saturation in terms of weight 120 | let ss = target() as f32; 121 | // Current saturation in terms of weight 122 | let s = block_weight; 123 | 124 | let fm = v * (s/m - ss/m) + v.powi(2) * (s/m - ss/m).powi(2) / 2.0; 125 | let addition_fm = Fixed64::from_parts((fm * 1_000_000_000_f32).round() as i64); 126 | previous.saturating_add(addition_fm) 127 | } 128 | 129 | fn feemul(parts: i64) -> Fixed64 { 130 | Fixed64::from_parts(parts) 131 | } 132 | 133 | fn run_with_system_weight(w: Weight, assertions: F) where F: Fn() -> () { 134 | let mut t: sp_io::TestExternalities = 135 | system::GenesisConfig::default().build_storage::().unwrap().into(); 136 | t.execute_with(|| { 137 | System::set_block_limits(w, 0); 138 | assertions() 139 | }); 140 | } 141 | 142 | #[test] 143 | fn fee_multiplier_update_poc_works() { 144 | let fm = Fixed64::from_rational(0, 1); 145 | let test_set = vec![ 146 | (0, fm.clone()), 147 | (100, fm.clone()), 148 | (target(), fm.clone()), 149 | (max() / 2, fm.clone()), 150 | (max(), fm.clone()), 151 | ]; 152 | test_set.into_iter().for_each(|(w, fm)| { 153 | run_with_system_weight(w, || { 154 | assert_eq_error_rate!( 155 | fee_multiplier_update(w, fm).into_inner(), 156 | TargetedFeeAdjustment::::convert(fm).into_inner(), 157 | 5, 158 | ); 159 | }) 160 | }) 161 | } 162 | 163 | #[test] 164 | fn empty_chain_simulation() { 165 | // just a few txs per_block. 166 | let block_weight = 0; 167 | run_with_system_weight(block_weight, || { 168 | let mut fm = Fixed64::default(); 169 | let mut iterations: u64 = 0; 170 | loop { 171 | let next = TargetedFeeAdjustment::::convert(fm); 172 | fm = next; 173 | if fm == Fixed64::from_rational(-1, 1) { break; } 174 | iterations += 1; 175 | } 176 | println!("iteration {}, new fm = {:?}. Weight fee is now zero", iterations, fm); 177 | assert!(iterations > 50_000, "This assertion is just a warning; Don't panic. \ 178 | Current substrate/polkadot node are configured with a _slow adjusting fee_ \ 179 | mechanism. Hence, it is really unlikely that fees collapse to zero even on an \ 180 | empty chain in less than at least of couple of thousands of empty blocks. But this \ 181 | simulation indicates that fees collapsed to zero after {} almost-empty blocks. \ 182 | Check it", 183 | iterations, 184 | ); 185 | }) 186 | } 187 | 188 | #[test] 189 | #[ignore] // for it is a time consuming test 190 | fn congested_chain_simulation() { 191 | // `cargo test congested_chain_simulation -- --nocapture` to get some insight. 192 | 193 | // almost full. The entire quota of normal transactions is taken. 194 | let block_weight = AvailableBlockRatio::get() * max() - 100; 195 | 196 | // Default substrate minimum. 197 | let tx_weight = 10_000; 198 | 199 | run_with_system_weight(block_weight, || { 200 | // initial value configured on module 201 | let mut fm = Fixed64::default(); 202 | assert_eq!(fm, TransactionPayment::next_fee_multiplier()); 203 | 204 | let mut iterations: u64 = 0; 205 | loop { 206 | let next = TargetedFeeAdjustment::::convert(fm); 207 | // if no change, panic. This should never happen in this case. 208 | if fm == next { panic!("The fee should ever increase"); } 209 | fm = next; 210 | iterations += 1; 211 | let fee = ::WeightToFee::convert(tx_weight); 212 | let adjusted_fee = fm.saturated_multiply_accumulate(fee); 213 | println!( 214 | "iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \ 215 | {} cents, {} dollars", 216 | iterations, 217 | fm, 218 | adjusted_fee, 219 | adjusted_fee / MILLICENTS, 220 | adjusted_fee / CENTS, 221 | adjusted_fee / DOLLARS, 222 | ); 223 | } 224 | }); 225 | } 226 | 227 | #[test] 228 | fn stateless_weight_mul() { 229 | run_with_system_weight(target() / 4, || { 230 | // Light block. Fee is reduced a little. 231 | assert_eq!( 232 | TargetedFeeAdjustment::::convert(Fixed64::default()), 233 | feemul(-7500), 234 | ); 235 | }); 236 | run_with_system_weight(target() / 2, || { 237 | // a bit more. Fee is decreased less, meaning that the fee increases as the block grows. 238 | assert_eq!( 239 | TargetedFeeAdjustment::::convert(Fixed64::default()), 240 | feemul(-5000), 241 | ); 242 | 243 | }); 244 | run_with_system_weight(target(), || { 245 | // ideal. Original fee. No changes. 246 | assert_eq!( 247 | TargetedFeeAdjustment::::convert(Fixed64::default()), 248 | feemul(0), 249 | ); 250 | }); 251 | run_with_system_weight(target() * 2, || { 252 | // // More than ideal. Fee is increased. 253 | assert_eq!( 254 | TargetedFeeAdjustment::::convert(Fixed64::default()), 255 | feemul(10000), 256 | ); 257 | }); 258 | } 259 | 260 | #[test] 261 | fn stateful_weight_mul_grow_to_infinity() { 262 | run_with_system_weight(target() * 2, || { 263 | assert_eq!( 264 | TargetedFeeAdjustment::::convert(Fixed64::default()), 265 | feemul(10000) 266 | ); 267 | assert_eq!( 268 | TargetedFeeAdjustment::::convert(feemul(10000)), 269 | feemul(20000) 270 | ); 271 | assert_eq!( 272 | TargetedFeeAdjustment::::convert(feemul(20000)), 273 | feemul(30000) 274 | ); 275 | // ... 276 | assert_eq!( 277 | TargetedFeeAdjustment::::convert(feemul(1_000_000_000)), 278 | feemul(1_000_000_000 + 10000) 279 | ); 280 | }); 281 | } 282 | 283 | #[test] 284 | fn stateful_weight_mil_collapse_to_minus_one() { 285 | run_with_system_weight(0, || { 286 | assert_eq!( 287 | TargetedFeeAdjustment::::convert(Fixed64::default()), 288 | feemul(-10000) 289 | ); 290 | assert_eq!( 291 | TargetedFeeAdjustment::::convert(feemul(-10000)), 292 | feemul(-20000) 293 | ); 294 | assert_eq!( 295 | TargetedFeeAdjustment::::convert(feemul(-20000)), 296 | feemul(-30000) 297 | ); 298 | // ... 299 | assert_eq!( 300 | TargetedFeeAdjustment::::convert(feemul(1_000_000_000 * -1)), 301 | feemul(-1_000_000_000) 302 | ); 303 | }) 304 | } 305 | 306 | #[test] 307 | fn weight_to_fee_should_not_overflow_on_large_weights() { 308 | let kb = 1024 as Weight; 309 | let mb = kb * kb; 310 | let max_fm = Fixed64::from_natural(i64::max_value()); 311 | 312 | // check that for all values it can compute, correctly. 313 | vec![ 314 | 0, 315 | 1, 316 | 10, 317 | 1000, 318 | kb, 319 | 10 * kb, 320 | 100 * kb, 321 | mb, 322 | 10 * mb, 323 | Weight::max_value() / 2, 324 | Weight::max_value(), 325 | ].into_iter().for_each(|i| { 326 | run_with_system_weight(i, || { 327 | let next = TargetedFeeAdjustment::::convert(Fixed64::default()); 328 | let truth = fee_multiplier_update(i, Fixed64::default()); 329 | assert_eq_error_rate!(truth.into_inner(), next.into_inner(), 5); 330 | }); 331 | }); 332 | 333 | // Some values that are all above the target and will cause an increase. 334 | let t = target(); 335 | vec![t + 100, t * 2, t * 4] 336 | .into_iter() 337 | .for_each(|i| { 338 | run_with_system_weight(i, || { 339 | let fm = TargetedFeeAdjustment::::convert(max_fm); 340 | // won't grow. The convert saturates everything. 341 | assert_eq!(fm, max_fm); 342 | }) 343 | }); 344 | } 345 | } -------------------------------------------------------------------------------- /runtime/src/marketplace.rs: -------------------------------------------------------------------------------- 1 | use crate::types::{DaoId, Days, Rate, TokenId}; 2 | use frame_support::{ 3 | decl_event, decl_module, decl_storage, dispatch::DispatchResult, weights::SimpleDispatchInfo, 4 | StorageValue, 5 | }; 6 | use sp_std::prelude::Vec; 7 | use system::ensure_signed; 8 | 9 | pub trait Trait: balances::Trait + system::Trait { 10 | type Event: From> + Into<::Event>; 11 | } 12 | 13 | // This module's storage items. 14 | decl_storage! { 15 | trait Store for Module as marketplace { 16 | Something get(something): Option; 17 | } 18 | } 19 | 20 | decl_module! { 21 | /// The module declaration. 22 | pub struct Module for enum Call where origin: T::Origin { 23 | fn deposit_event() = default; 24 | 25 | #[weight = SimpleDispatchInfo::FixedNormal(10_000)] 26 | fn make_investment(origin, proposal_id: u64) -> DispatchResult { 27 | let who = ensure_signed(origin)?; 28 | 29 | ::put(proposal_id); 30 | 31 | Self::deposit_event(RawEvent::NewInvsetment(proposal_id, who)); 32 | Ok(()) 33 | } 34 | } 35 | } 36 | 37 | impl Module { 38 | pub fn propose_investment( 39 | dao_id: DaoId, 40 | description: Vec, 41 | days: Days, 42 | rate: Rate, 43 | token: TokenId, 44 | price: T::Balance, 45 | value: T::Balance, 46 | ) -> DispatchResult { 47 | // TODO: do usefull stuff :D 48 | Self::deposit_event(RawEvent::ProposeInvestment( 49 | dao_id, 50 | description, 51 | days, 52 | rate, 53 | token, 54 | price, 55 | value, 56 | )); 57 | Ok(()) 58 | } 59 | } 60 | 61 | decl_event!( 62 | pub enum Event 63 | where 64 | AccountId = ::AccountId, 65 | Balance = ::Balance, 66 | { 67 | NewInvsetment(u64, AccountId), 68 | ProposeInvestment(DaoId, Vec, Days, Rate, TokenId, Balance, Balance), 69 | } 70 | ); 71 | 72 | /// tests for this module 73 | #[cfg(test)] 74 | mod tests { 75 | use super::*; 76 | 77 | use frame_support::{ 78 | assert_ok, impl_outer_origin, parameter_types, traits::Get, weights::Weight, 79 | }; 80 | use sp_core::H256; 81 | use sp_runtime::{ 82 | testing::Header, 83 | traits::{BlakeTwo256, IdentityLookup}, 84 | Perbill, 85 | }; 86 | use std::cell::RefCell; 87 | 88 | pub type Balance = u128; 89 | 90 | thread_local! { 91 | static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(500); 92 | } 93 | 94 | impl_outer_origin! { 95 | pub enum Origin for Test {} 96 | } 97 | pub struct ExistentialDeposit; 98 | impl Get for ExistentialDeposit { 99 | fn get() -> u128 { 100 | EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) 101 | } 102 | } 103 | 104 | // For testing the module, we construct most of a mock runtime. This means 105 | // first constructing a configuration type (`Test`) which `impl`s each of the 106 | // configuration traits of modules we want to use. 107 | #[derive(Clone, Eq, PartialEq)] 108 | pub struct Test; 109 | parameter_types! { 110 | pub const BlockHashCount: u64 = 250; 111 | pub const MaximumBlockWeight: Weight = 1024; 112 | pub const MaximumBlockLength: u32 = 2 * 1024; 113 | pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); 114 | } 115 | impl system::Trait for Test { 116 | type Origin = Origin; 117 | type Call = (); 118 | type Index = u64; 119 | type BlockNumber = u64; 120 | type Hash = H256; 121 | type Hashing = BlakeTwo256; 122 | type AccountId = u64; 123 | type Lookup = IdentityLookup; 124 | type Header = Header; 125 | type Event = (); 126 | type BlockHashCount = BlockHashCount; 127 | type MaximumBlockWeight = MaximumBlockWeight; 128 | type MaximumBlockLength = MaximumBlockLength; 129 | type AvailableBlockRatio = AvailableBlockRatio; 130 | type Version = (); 131 | type ModuleToIndex = (); 132 | type AccountData = balances::AccountData; 133 | type OnNewAccount = (); 134 | type OnKilledAccount = (); 135 | } 136 | 137 | impl balances::Trait for Test { 138 | type Balance = Balance; 139 | type DustRemoval = (); 140 | type Event = (); 141 | type ExistentialDeposit = ExistentialDeposit; 142 | type AccountStore = system::Module; 143 | } 144 | 145 | impl Trait for Test { 146 | type Event = (); 147 | } 148 | type Marketplace = Module; 149 | 150 | const DAO_DESC: &[u8; 10] = b"Desc-1234_"; 151 | 152 | pub struct ExtBuilder { 153 | existential_deposit: u128, 154 | } 155 | 156 | impl Default for ExtBuilder { 157 | fn default() -> Self { 158 | Self { 159 | existential_deposit: 500, 160 | } 161 | } 162 | } 163 | 164 | impl ExtBuilder { 165 | pub fn set_associated_consts(&self) { 166 | EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); 167 | } 168 | pub fn build(self) -> sp_io::TestExternalities { 169 | self.set_associated_consts(); 170 | let mut storage = system::GenesisConfig::default() 171 | .build_storage::() 172 | .unwrap(); 173 | 174 | let _ = balances::GenesisConfig:: { 175 | balances: vec![ 176 | (2, 20000), 177 | (3, 30000), 178 | (4, 400000), 179 | (11, 500), 180 | (21, 2000), 181 | (31, 2000), 182 | (41, 2000), 183 | (100, 2000), 184 | (101, 2000), 185 | // This allow us to have a total_payout different from 0. 186 | (999, 1_000_000_000_000), 187 | ], 188 | } 189 | .assimilate_storage(&mut storage); 190 | 191 | let ext = sp_io::TestExternalities::from(storage); 192 | ext 193 | } 194 | } 195 | 196 | #[test] 197 | fn make_investment_should_work() { 198 | ExtBuilder::default().build().execute_with(|| { 199 | assert_ok!(Marketplace::make_investment(Origin::signed(31), 42)); 200 | assert_eq!(Marketplace::something(), Some(42)); 201 | }); 202 | } 203 | 204 | #[test] 205 | fn propose_tokenized_investment_should_work() { 206 | const DAO_ID: DaoId = 11; 207 | const DAYS: Days = 181; 208 | const RATE: Rate = 1000; 209 | const TOKEN: TokenId = 0; 210 | const TOKEN_PRICE: Balance = 1; 211 | const VALUE: u128 = 42; 212 | 213 | ExtBuilder::default().build().execute_with(|| { 214 | assert_ok!(Marketplace::propose_investment( 215 | DAO_ID, 216 | DAO_DESC.to_vec(), 217 | DAYS, 218 | RATE, 219 | TOKEN, 220 | TOKEN_PRICE, 221 | VALUE 222 | )); 223 | }); 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /runtime/src/types.rs: -------------------------------------------------------------------------------- 1 | use codec::{Decode, Encode}; 2 | use sp_core::H160; 3 | use sp_std::prelude::Vec; 4 | 5 | #[cfg(feature = "std")] 6 | use serde::{Deserialize, Serialize}; 7 | 8 | //dao 9 | pub type Count = u64; 10 | pub type DaoId = u64; 11 | pub type MemberId = u64; 12 | pub type ProposalId = u64; 13 | pub type VotesCount = MemberId; 14 | pub type Days = u32; 15 | pub type Rate = u32; 16 | 17 | #[derive(Encode, Decode, Default, Clone, PartialEq)] 18 | #[cfg_attr(feature = "std", derive(Debug))] 19 | pub struct Dao { 20 | pub address: AccountId, 21 | pub name: Vec, 22 | pub description: Vec, 23 | pub founder: AccountId, 24 | } 25 | 26 | #[derive(Encode, Decode, Clone, PartialEq)] 27 | #[cfg_attr(feature = "std", derive(Debug))] 28 | pub struct Proposal { 29 | pub dao_id: DaoId, 30 | pub action: Action, 31 | pub open: bool, 32 | pub accepted: bool, 33 | pub voting_deadline: VotingDeadline, 34 | pub yes_count: MemberId, 35 | pub no_count: MemberId, 36 | } 37 | 38 | impl Default for Proposal 39 | where 40 | D: Default, 41 | A: Default, 42 | B: Default, 43 | V: Default, 44 | M: Default, 45 | { 46 | fn default() -> Self { 47 | Proposal { 48 | dao_id: D::default(), 49 | action: Action::EmptyAction, 50 | open: true, 51 | accepted: false, 52 | voting_deadline: V::default(), 53 | yes_count: M::default(), 54 | no_count: M::default(), 55 | } 56 | } 57 | } 58 | 59 | #[derive(Encode, Decode, Clone, PartialEq)] 60 | #[cfg_attr(feature = "std", derive(Debug))] 61 | pub enum Action { 62 | EmptyAction, 63 | AddMember(AccountId), 64 | RemoveMember(AccountId), 65 | GetLoan(Vec, Days, Rate, TokenId, Balance), 66 | ChangeTimeout(DaoId, Timeout), 67 | ChangeMaximumNumberOfMembers(DaoId, MemberId), 68 | } 69 | 70 | //token factory 71 | pub type TokenId = u32; 72 | 73 | #[derive(Encode, Decode, Default, Clone, PartialEq)] 74 | #[cfg_attr(feature = "std", derive(Deserialize, Serialize, Debug))] 75 | pub struct Token { 76 | pub id: TokenId, 77 | pub decimals: u16, 78 | pub symbol: Vec, 79 | } 80 | 81 | //bridge 82 | #[derive(Encode, Decode, Clone, PartialEq)] 83 | #[cfg_attr(feature = "std", derive(Debug))] 84 | pub struct Limits { 85 | pub max_tx_value: Balance, 86 | pub day_max_limit: Balance, 87 | pub day_max_limit_for_one_address: Balance, 88 | pub max_pending_tx_limit: Balance, 89 | pub min_tx_value: Balance, 90 | } 91 | 92 | // bridge types 93 | #[derive(Encode, Decode, Clone)] 94 | #[cfg_attr(feature = "std", derive(Debug))] 95 | pub struct BridgeTransfer { 96 | pub transfer_id: ProposalId, 97 | pub message_id: Hash, 98 | pub open: bool, 99 | pub votes: MemberId, 100 | pub kind: Kind, 101 | } 102 | 103 | #[derive(Encode, Decode, Clone, PartialEq)] 104 | #[cfg_attr(feature = "std", derive(Debug))] 105 | pub enum Status { 106 | Revoked, 107 | Pending, 108 | PauseTheBridge, 109 | ResumeTheBridge, 110 | UpdateValidatorSet, 111 | UpdateLimits, 112 | Deposit, 113 | Withdraw, 114 | Approved, 115 | Canceled, 116 | Confirmed, 117 | } 118 | 119 | #[derive(Encode, Decode, Clone, PartialEq)] 120 | #[cfg_attr(feature = "std", derive(Debug))] 121 | pub enum Kind { 122 | Transfer, 123 | Limits, 124 | Validator, 125 | Bridge, 126 | } 127 | 128 | #[derive(Encode, Decode, Clone)] 129 | #[cfg_attr(feature = "std", derive(Debug))] 130 | pub struct TransferMessage { 131 | pub message_id: Hash, 132 | pub token: TokenId, 133 | pub eth_address: H160, 134 | pub substrate_address: AccountId, 135 | pub amount: Balance, 136 | pub status: Status, 137 | pub action: Status, 138 | } 139 | 140 | #[derive(Encode, Decode, Clone)] 141 | #[cfg_attr(feature = "std", derive(Debug))] 142 | pub struct LimitMessage { 143 | pub id: Hash, 144 | pub limits: Limits, 145 | pub status: Status, 146 | } 147 | 148 | #[derive(Encode, Decode, Clone)] 149 | #[cfg_attr(feature = "std", derive(Debug))] 150 | pub struct BridgeMessage { 151 | pub message_id: Hash, 152 | pub account: AccountId, 153 | pub action: Status, 154 | pub status: Status, 155 | } 156 | 157 | #[derive(Encode, Decode, Clone)] 158 | #[cfg_attr(feature = "std", derive(Debug))] 159 | pub struct ValidatorMessage { 160 | pub message_id: Hash, 161 | pub quorum: u64, 162 | pub accounts: Vec, 163 | pub action: Status, 164 | pub status: Status, 165 | } 166 | 167 | impl Default for TransferMessage 168 | where 169 | A: Default, 170 | H: Default, 171 | B: Default, 172 | { 173 | fn default() -> Self { 174 | TransferMessage { 175 | message_id: H::default(), 176 | token: TokenId::default(), 177 | eth_address: H160::default(), 178 | substrate_address: A::default(), 179 | amount: B::default(), 180 | status: Status::Withdraw, 181 | action: Status::Withdraw, 182 | } 183 | } 184 | } 185 | 186 | impl Default for LimitMessage 187 | where 188 | H: Default, 189 | B: Default, 190 | { 191 | fn default() -> Self { 192 | LimitMessage { 193 | id: H::default(), 194 | limits: Limits::default(), 195 | status: Status::UpdateLimits, 196 | } 197 | } 198 | } 199 | 200 | impl Default for BridgeMessage 201 | where 202 | A: Default, 203 | H: Default, 204 | { 205 | fn default() -> Self { 206 | BridgeMessage { 207 | message_id: H::default(), 208 | account: A::default(), 209 | action: Status::Revoked, 210 | status: Status::Revoked, 211 | } 212 | } 213 | } 214 | 215 | impl Default for ValidatorMessage 216 | where 217 | A: Default, 218 | H: Default, 219 | { 220 | fn default() -> Self { 221 | ValidatorMessage { 222 | message_id: H::default(), 223 | quorum: u64::default(), 224 | accounts: Vec::default(), 225 | action: Status::Revoked, 226 | status: Status::Revoked, 227 | } 228 | } 229 | } 230 | 231 | impl Default for BridgeTransfer 232 | where 233 | H: Default, 234 | { 235 | fn default() -> Self { 236 | BridgeTransfer { 237 | transfer_id: ProposalId::default(), 238 | message_id: H::default(), 239 | open: true, 240 | votes: MemberId::default(), 241 | kind: Kind::Transfer, 242 | } 243 | } 244 | } 245 | 246 | impl Default for Limits 247 | where 248 | B: Default, 249 | { 250 | fn default() -> Self { 251 | Limits { 252 | max_tx_value: B::default(), 253 | day_max_limit: B::default(), 254 | day_max_limit_for_one_address: B::default(), 255 | max_pending_tx_limit: B::default(), 256 | min_tx_value: B::default(), 257 | } 258 | } 259 | } 260 | 261 | pub trait IntoArray { 262 | fn into_array(&self) -> [T; 5]; 263 | } 264 | 265 | impl IntoArray for Limits { 266 | fn into_array(&self) -> [B; 5] { 267 | [ 268 | self.max_tx_value.clone(), 269 | self.day_max_limit.clone(), 270 | self.day_max_limit_for_one_address.clone(), 271 | self.max_pending_tx_limit.clone(), 272 | self.min_tx_value.clone(), 273 | ] 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /runtime/wasm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [lib] 2 | crate-type = ['cdylib'] 3 | 4 | [features] 5 | default = [] 6 | std = ['akropolisos-runtime/std'] 7 | 8 | [workspace] 9 | members = [] 10 | 11 | [profile.release] 12 | lto = true 13 | panic = 'abort' 14 | 15 | [package] 16 | authors = ['Akropolis '] 17 | edition = '2018' 18 | name = 'akropolisos-runtime-wasm' 19 | version = '1.0.0' 20 | 21 | [dependencies.akropolisos-runtime] 22 | default-features = false 23 | path = '..' 24 | -------------------------------------------------------------------------------- /runtime/wasm/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | if cargo --version | grep -q "nightly"; then 5 | CARGO_CMD="cargo" 6 | else 7 | CARGO_CMD="cargo +nightly" 8 | fi 9 | CARGO_INCREMENTAL=0 RUSTFLAGS="-C link-arg=--export-table" $CARGO_CMD build --target=wasm32-unknown-unknown --release 10 | for i in akropolisos_substrate_node_runtime_wasm 11 | do 12 | wasm-gc target/wasm32-unknown-unknown/release/$i.wasm target/wasm32-unknown-unknown/release/$i.compact.wasm 13 | done 14 | -------------------------------------------------------------------------------- /runtime/wasm/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! The Akropolis OS runtime reexported for WebAssembly compile. 2 | 3 | #![cfg_attr(not(feature = "std"), no_std)] 4 | 5 | pub use akropolisos_runtime::*; 6 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -e 4 | 5 | export CARGO_INCREMENTAL=0 6 | 7 | for SRC in runtime/wasm 8 | do 9 | echo "Building webassembly binary in $SRC..." 10 | cd "$SRC" 11 | 12 | ./build.sh 13 | 14 | cd - >> /dev/null 15 | done -------------------------------------------------------------------------------- /scripts/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | echo "*** Initializing WASM build environment" 6 | 7 | if [ -z $CI_PROJECT_NAME ] ; then 8 | rustup update nightly 9 | rustup update stable 10 | fi 11 | 12 | rustup target add wasm32-unknown-unknown --toolchain nightly 13 | 14 | # Install wasm-gc. It's useful for stripping slimming down wasm binaries. 15 | command -v wasm-gc || \ 16 | cargo +nightly install --git https://github.com/alexcrichton/wasm-gc --force 17 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | //! Substrate Node CLI 2 | #![warn(missing_docs)] 3 | 4 | fn main() -> sc_cli::Result<()> { 5 | node_cli::run_cli() 6 | } -------------------------------------------------------------------------------- /testing/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "node-testing" 3 | version = "0.3.0" 4 | authors = ['Akropolis '] 5 | description = "Test utilities for Substrate node." 6 | edition = "2018" 7 | license = "MIT" 8 | 9 | [dependencies] 10 | pallet-balances = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git'} 11 | sc-client = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 12 | sc-client-db = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git', features = ["kvdb-rocksdb"] } 13 | sc-client-api = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 14 | codec = { package = "parity-scale-codec", version = "1.2.0" } 15 | pallet-contracts = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 16 | pallet-grandpa = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 17 | pallet-indices = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 18 | sp-keyring = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 19 | node-executor = { path = "../executor" } 20 | akropolisos-runtime = { path = "../runtime" } 21 | sp-core = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 22 | sp-io = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 23 | frame-support = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 24 | pallet-session = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 25 | pallet-society = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 26 | sp-runtime = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 27 | pallet-staking = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 28 | sc-executor = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git', features = ["wasmtime"] } 29 | sp-consensus = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 30 | frame-system = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 31 | substrate-test-client = { version = "2.0.0-dev", git = 'https://github.com/paritytech/substrate.git' } 32 | pallet-timestamp = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 33 | pallet-transaction-payment = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 34 | pallet-treasury = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 35 | wabt = "0.9.2" 36 | sp-api = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 37 | sp-finality-tracker = { version = "2.0.0-alpha.5", default-features = false, git = 'https://github.com/paritytech/substrate.git' } 38 | sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, git = 'https://github.com/paritytech/substrate.git' } 39 | sp-block-builder = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 40 | sc-block-builder = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 41 | sp-inherents = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 42 | sp-blockchain = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 43 | log = "0.4.8" 44 | tempfile = "3.1.0" 45 | fs_extra = "1" 46 | 47 | [dev-dependencies] 48 | criterion = "0.3.0" 49 | sc-cli = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 50 | sc-service = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git', features = ["rocksdb"] } 51 | 52 | [[bench]] 53 | name = "import" 54 | harness = false 55 | -------------------------------------------------------------------------------- /testing/benches/import.rs: -------------------------------------------------------------------------------- 1 | //! Block import benchmark. 2 | //! 3 | //! This benchmark is expected to measure block import operation of 4 | //! some more or less full block. 5 | //! 6 | //! As we also want to protect against cold-cache attacks, this 7 | //! benchmark should not rely on any caching (except those that 8 | //! DO NOT depend on user input). Thus block generation should be 9 | //! based on randomized operation. 10 | //! 11 | //! This is supposed to be very simple benchmark and is not subject 12 | //! to much configuring - just block full of randomized transactions. 13 | //! It is not supposed to measure runtime modules weight correctness 14 | 15 | use std::fmt; 16 | use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes}; 17 | use node_primitives::Block; 18 | use sp_runtime::generic::BlockId; 19 | use criterion::{Criterion, criterion_group, criterion_main}; 20 | use sc_client_api::backend::Backend; 21 | 22 | criterion_group!( 23 | name = benches; 24 | config = Criterion::default().sample_size(50).warm_up_time(std::time::Duration::from_secs(20)); 25 | targets = bench_block_import, bench_account_reaping, bench_account_ed25519 26 | ); 27 | criterion_group!( 28 | name = wasm_size; 29 | config = Criterion::default().sample_size(10); 30 | targets = bench_wasm_size_import 31 | ); 32 | criterion_group!( 33 | name = profile; 34 | config = Criterion::default().sample_size(10); 35 | targets = profile_block_import 36 | ); 37 | criterion_main!(benches, profile); 38 | 39 | fn bench_block_import(c: &mut Criterion) { 40 | sc_cli::init_logger(""); 41 | // for future uses, uncomment if something wrong. 42 | // sc_cli::init_logger("sc_client=debug"); 43 | 44 | let mut bench_db = BenchDb::new(100); 45 | let block = bench_db.generate_block(BlockType::RandomTransfers(100)); 46 | 47 | log::trace!( 48 | target: "bench-logistics", 49 | "Seed database directory: {}", 50 | bench_db.path().display(), 51 | ); 52 | 53 | c.bench_function_over_inputs("import-block-B-0001", 54 | move |bencher, profile| { 55 | bencher.iter_batched( 56 | || { 57 | let context = bench_db.create_context(*profile); 58 | 59 | // mostly to just launch compiler before benching! 60 | let version = context.client.runtime_version_at(&BlockId::Number(0)) 61 | .expect("Failed to get runtime version") 62 | .spec_version; 63 | 64 | log::trace!( 65 | target: "bench-logistics", 66 | "Next iteration database directory: {}, runtime version: {}", 67 | context.path().display(), version, 68 | ); 69 | 70 | context 71 | }, 72 | |mut context| { 73 | let start = std::time::Instant::now(); 74 | context.import_block(block.clone()); 75 | let elapsed = start.elapsed(); 76 | 77 | log::info!( 78 | target: "bench-logistics", 79 | "imported block with {} tx, took: {:#?}", 80 | block.extrinsics.len(), 81 | elapsed, 82 | ); 83 | 84 | log::info!( 85 | target: "bench-logistics", 86 | "usage info: {}", 87 | context.backend.usage_info() 88 | .expect("RocksDB backend always provides usage info!"), 89 | ); 90 | }, 91 | criterion::BatchSize::LargeInput, 92 | ); 93 | }, 94 | vec![Profile::Wasm, Profile::Native], 95 | ); 96 | } 97 | 98 | fn bench_account_reaping(c: &mut Criterion) { 99 | sc_cli::init_logger(""); 100 | 101 | let mut bench_db = BenchDb::new(100); 102 | let block = bench_db.generate_block(BlockType::RandomTransfersReaping(100)); 103 | 104 | c.bench_function_over_inputs("import-block-reaping-B-0002", 105 | move |bencher, profile| { 106 | bencher.iter_batched( 107 | || { 108 | let context = bench_db.create_context(*profile); 109 | 110 | // mostly to just launch compiler before benching! 111 | context.client.runtime_version_at(&BlockId::Number(0)) 112 | .expect("Failed to get runtime version"); 113 | 114 | context 115 | }, 116 | |mut context| { 117 | context.import_block(block.clone()); 118 | }, 119 | criterion::BatchSize::LargeInput, 120 | ); 121 | }, 122 | vec![Profile::Wasm, Profile::Native], 123 | ); 124 | } 125 | 126 | fn bench_account_ed25519(c: &mut Criterion) { 127 | sc_cli::init_logger(""); 128 | 129 | let mut bench_db = BenchDb::with_key_types(100, KeyTypes::Ed25519); 130 | let block = bench_db.generate_block(BlockType::RandomTransfers(100)); 131 | 132 | c.bench_function_over_inputs("import-block-ed25519-B-0003", 133 | move |bencher, profile| { 134 | bencher.iter_batched( 135 | || { 136 | let context = bench_db.create_context(*profile); 137 | context.client.runtime_version_at(&BlockId::Number(0)) 138 | .expect("Failed to get runtime version"); 139 | 140 | context 141 | }, 142 | |mut context| { 143 | context.import_block(block.clone()); 144 | }, 145 | criterion::BatchSize::LargeInput, 146 | ); 147 | }, 148 | vec![Profile::Wasm, Profile::Native], 149 | ); 150 | } 151 | 152 | // This is not an actual benchmark, so don't use it to measure anything. 153 | // It just produces special pattern of cpu load that allows easy picking 154 | // the part of block import for the profiling in the tool of choice. 155 | fn profile_block_import(c: &mut Criterion) { 156 | sc_cli::init_logger(""); 157 | 158 | let mut bench_db = BenchDb::new(128); 159 | let block = bench_db.generate_block(BlockType::RandomTransfers(100)); 160 | 161 | c.bench_function("profile block", 162 | move |bencher| { 163 | bencher.iter_batched( 164 | || { 165 | bench_db.create_context(Profile::Native) 166 | }, 167 | |mut context| { 168 | // until better osx signpost/callgrind signal is possible to use 169 | // in rust, we just pause everything completely to help choosing 170 | // actual profiling interval 171 | std::thread::park_timeout(std::time::Duration::from_secs(2)); 172 | context.import_block(block.clone()); 173 | // and here as well 174 | std::thread::park_timeout(std::time::Duration::from_secs(2)); 175 | log::info!( 176 | target: "bench-logistics", 177 | "imported block, usage info: {}", 178 | context.backend.usage_info() 179 | .expect("RocksDB backend always provides usage info!"), 180 | ) 181 | }, 182 | criterion::BatchSize::PerIteration, 183 | ); 184 | }, 185 | ); 186 | } 187 | 188 | struct Setup { 189 | db: BenchDb, 190 | block: Block, 191 | } 192 | 193 | struct SetupIterator { 194 | current: usize, 195 | finish: usize, 196 | multiplier: usize, 197 | } 198 | 199 | impl SetupIterator { 200 | fn new(current: usize, finish: usize, multiplier: usize) -> Self { 201 | SetupIterator { current, finish, multiplier } 202 | } 203 | } 204 | 205 | impl Iterator for SetupIterator { 206 | type Item = Setup; 207 | 208 | fn next(&mut self) -> Option { 209 | if self.current >= self.finish { return None } 210 | 211 | self.current += 1; 212 | 213 | let size = self.current * self.multiplier; 214 | let mut db = BenchDb::new(size); 215 | let block = db.generate_block(BlockType::RandomTransfers(size)); 216 | Some(Setup { db, block }) 217 | } 218 | } 219 | 220 | impl fmt::Debug for Setup { 221 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 222 | write!(f, "Setup: {} tx/block", self.block.extrinsics.len()) 223 | } 224 | } 225 | 226 | fn bench_wasm_size_import(c: &mut Criterion) { 227 | sc_cli::init_logger(""); 228 | 229 | c.bench_function_over_inputs("wasm_size_import", 230 | move |bencher, setup| { 231 | bencher.iter_batched( 232 | || { 233 | setup.db.create_context(Profile::Wasm) 234 | }, 235 | |mut context| { 236 | context.import_block(setup.block.clone()); 237 | }, 238 | criterion::BatchSize::PerIteration, 239 | ); 240 | }, 241 | SetupIterator::new(5, 15, 50), 242 | ); 243 | } 244 | -------------------------------------------------------------------------------- /testing/src/bench.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarking module. 2 | //! 3 | //! Utilities to do full-scale benchmarks involving database. With `BenchDb` you 4 | //! can pregenerate seed database and `clone` it for every iteration of your benchmarks 5 | //! or tests to get consistent, smooth benchmark experience! 6 | 7 | use std::{sync::Arc, path::Path, collections::BTreeMap}; 8 | 9 | use crate::client::{Client, Backend}; 10 | use crate::keyring::*; 11 | use sc_client_db::PruningMode; 12 | use sc_executor::{NativeExecutor, WasmExecutionMethod}; 13 | use sp_consensus::{ 14 | BlockOrigin, BlockImport, BlockImportParams, 15 | ForkChoiceStrategy, ImportResult, ImportedAux 16 | }; 17 | use sp_runtime::{ 18 | generic::BlockId, 19 | OpaqueExtrinsic, 20 | traits::{Block as BlockT, Verify, Zero, IdentifyAccount}, 21 | }; 22 | use codec::{Decode, Encode}; 23 | use akropolisos_runtime::{ 24 | Call, 25 | Block, 26 | CheckedExtrinsic, 27 | constants::currency::DOLLARS, 28 | UncheckedExtrinsic, 29 | MinimumPeriod, 30 | BalancesCall, 31 | AccountId, 32 | Signature, 33 | }; 34 | use sp_core::{ExecutionContext, blake2_256}; 35 | use sp_api::ProvideRuntimeApi; 36 | use sp_block_builder::BlockBuilder; 37 | use sp_inherents::InherentData; 38 | use sc_client_api::{ 39 | ExecutionStrategy, 40 | execution_extensions::{ExecutionExtensions, ExecutionStrategies}, 41 | }; 42 | use sp_core::{Pair, Public, sr25519, ed25519}; 43 | use sc_block_builder::BlockBuilderProvider; 44 | 45 | /// Keyring full of accounts for benching. 46 | /// 47 | /// Accounts are ordered: 48 | /// //endowed-user//00 49 | /// //endowed-user//01 50 | /// ... 51 | /// //endowed-user//N 52 | #[derive(Clone)] 53 | pub struct BenchKeyring { 54 | accounts: BTreeMap, 55 | } 56 | 57 | #[derive(Clone)] 58 | enum BenchPair { 59 | Sr25519(sr25519::Pair), 60 | Ed25519(ed25519::Pair), 61 | } 62 | 63 | impl BenchPair { 64 | fn sign(&self, payload: &[u8]) -> Signature { 65 | match self { 66 | Self::Sr25519(pair) => pair.sign(payload).into(), 67 | Self::Ed25519(pair) => pair.sign(payload).into(), 68 | } 69 | } 70 | } 71 | 72 | /// Pre-initialized benchmarking database. 73 | /// 74 | /// This is prepared database with genesis and keyring 75 | /// that can be cloned and then used for any benchmarking. 76 | pub struct BenchDb { 77 | keyring: BenchKeyring, 78 | directory_guard: Guard, 79 | } 80 | 81 | impl Clone for BenchDb { 82 | fn clone(&self) -> Self { 83 | let keyring = self.keyring.clone(); 84 | let dir = tempfile::tempdir().expect("temp dir creation failed"); 85 | 86 | let seed_dir = self.directory_guard.0.path(); 87 | 88 | log::trace!( 89 | target: "bench-logistics", 90 | "Copying seed db from {} to {}", 91 | seed_dir.to_string_lossy(), 92 | dir.path().to_string_lossy(), 93 | ); 94 | let seed_db_files = std::fs::read_dir(seed_dir) 95 | .expect("failed to list file in seed dir") 96 | .map(|f_result| 97 | f_result.expect("failed to read file in seed db") 98 | .path() 99 | .clone() 100 | ).collect(); 101 | fs_extra::copy_items( 102 | &seed_db_files, 103 | dir.path(), 104 | &fs_extra::dir::CopyOptions::new(), 105 | ).expect("Copy of seed database is ok"); 106 | 107 | BenchDb { keyring, directory_guard: Guard(dir) } 108 | } 109 | } 110 | 111 | /// Type of block for generation 112 | #[derive(Debug, PartialEq, Clone, Copy)] 113 | pub enum BlockType { 114 | /// Bunch of random transfers. 115 | RandomTransfers(usize), 116 | /// Bunch of random transfers that drain all of the source balance. 117 | RandomTransfersReaping(usize), 118 | } 119 | 120 | impl BlockType { 121 | /// Number of transactions for this block type. 122 | pub fn transactions(&self) -> usize { 123 | match self { 124 | Self::RandomTransfers(v) | Self::RandomTransfersReaping(v) => *v, 125 | } 126 | } 127 | } 128 | 129 | impl BenchDb { 130 | /// New immutable benchmarking database. 131 | /// 132 | /// See [`new`] method documentation for more information about the purpose 133 | /// of this structure. 134 | pub fn with_key_types(keyring_length: usize, key_types: KeyTypes) -> Self { 135 | let keyring = BenchKeyring::new(keyring_length, key_types); 136 | 137 | let dir = tempfile::tempdir().expect("temp dir creation failed"); 138 | log::trace!( 139 | target: "bench-logistics", 140 | "Created seed db at {}", 141 | dir.path().to_string_lossy(), 142 | ); 143 | let (_client, _backend) = Self::bench_client(dir.path(), Profile::Native, &keyring); 144 | let directory_guard = Guard(dir); 145 | 146 | BenchDb { keyring, directory_guard } 147 | } 148 | 149 | /// New immutable benchmarking database. 150 | /// 151 | /// This will generate database files in random temporary directory 152 | /// and keep it there until struct is dropped. 153 | /// 154 | /// You can `clone` this database or you can `create_context` from it 155 | /// (which also do `clone`) to run actual operation against new database 156 | /// which will be identical to this. 157 | pub fn new(keyring_length: usize) -> Self { 158 | Self::with_key_types(keyring_length, KeyTypes::Sr25519) 159 | } 160 | 161 | // This should return client that is doing everything that full node 162 | // is doing. 163 | // 164 | // - This client should use best wasm execution method. 165 | // - This client should work with real database only. 166 | fn bench_client(dir: &std::path::Path, profile: Profile, keyring: &BenchKeyring) -> (Client, std::sync::Arc) { 167 | let db_config = sc_client_db::DatabaseSettings { 168 | state_cache_size: 16*1024*1024, 169 | state_cache_child_ratio: Some((0, 100)), 170 | pruning: PruningMode::ArchiveAll, 171 | source: sc_client_db::DatabaseSettingsSrc::Path { 172 | path: dir.into(), 173 | cache_size: None, 174 | }, 175 | }; 176 | 177 | let (client, backend) = sc_client_db::new_client( 178 | db_config, 179 | NativeExecutor::new(WasmExecutionMethod::Compiled, None, 8), 180 | &keyring.generate_genesis(), 181 | None, 182 | None, 183 | ExecutionExtensions::new(profile.into_execution_strategies(), None), 184 | sp_core::tasks::executor(), 185 | None, 186 | ).expect("Should not fail"); 187 | 188 | (client, backend) 189 | } 190 | 191 | /// Generate new block using this database. 192 | pub fn generate_block(&mut self, block_type: BlockType) -> Block { 193 | let (client, _backend) = Self::bench_client( 194 | self.directory_guard.path(), 195 | Profile::Wasm, 196 | &self.keyring, 197 | ); 198 | 199 | let version = client.runtime_version_at(&BlockId::number(0)) 200 | .expect("There should be runtime version at 0") 201 | .spec_version; 202 | 203 | let genesis_hash = client.block_hash(Zero::zero()) 204 | .expect("Database error?") 205 | .expect("Genesis block always exists; qed") 206 | .into(); 207 | 208 | let mut block = client 209 | .new_block(Default::default()) 210 | .expect("Block creation failed"); 211 | 212 | let timestamp = 1 * MinimumPeriod::get(); 213 | 214 | let mut inherent_data = InherentData::new(); 215 | inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) 216 | .expect("Put timestamp failed"); 217 | inherent_data.put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &0) 218 | .expect("Put finality tracker failed"); 219 | 220 | for extrinsic in client.runtime_api() 221 | .inherent_extrinsics_with_context( 222 | &BlockId::number(0), 223 | ExecutionContext::BlockConstruction, 224 | inherent_data, 225 | ).expect("Get inherents failed") 226 | { 227 | block.push(extrinsic).expect("Push inherent failed"); 228 | } 229 | 230 | let mut iteration = 0; 231 | let start = std::time::Instant::now(); 232 | for _ in 0..block_type.transactions() { 233 | 234 | let sender = self.keyring.at(iteration); 235 | let receiver = get_account_id_from_seed::( 236 | &format!("random-user//{}", iteration) 237 | ); 238 | 239 | let signed = self.keyring.sign( 240 | CheckedExtrinsic { 241 | signed: Some((sender, signed_extra(0, akropolisos_runtime::ExistentialDeposit::get() + 1))), 242 | function: Call::Balances( 243 | BalancesCall::transfer( 244 | pallet_indices::address::Address::Id(receiver), 245 | match block_type { 246 | BlockType::RandomTransfers(_) => akropolisos_runtime::ExistentialDeposit::get() + 1, 247 | BlockType::RandomTransfersReaping(_) => 100*DOLLARS - akropolisos_runtime::ExistentialDeposit::get() - 1, 248 | } 249 | ) 250 | ), 251 | }, 252 | version, 253 | genesis_hash, 254 | ); 255 | 256 | let encoded = Encode::encode(&signed); 257 | 258 | let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]) 259 | .expect("Failed to decode opaque"); 260 | 261 | match block.push(opaque) { 262 | Err(sp_blockchain::Error::ApplyExtrinsicFailed( 263 | sp_blockchain::ApplyExtrinsicFailed::Validity(e) 264 | )) if e.exhausted_resources() => { 265 | break; 266 | }, 267 | Err(err) => panic!("Error pushing transaction: {:?}", err), 268 | Ok(_) => {}, 269 | } 270 | iteration += 1; 271 | } 272 | let block = block.build().expect("Block build failed").block; 273 | 274 | log::info!( 275 | target: "bench-logistics", 276 | "Block construction: {:#?} ({} tx)", 277 | start.elapsed(), block.extrinsics.len() 278 | ); 279 | 280 | block 281 | } 282 | 283 | /// Database path. 284 | pub fn path(&self) -> &Path { 285 | self.directory_guard.path() 286 | } 287 | 288 | /// Clone this database and create context for testing/benchmarking. 289 | pub fn create_context(&self, profile: Profile) -> BenchContext { 290 | let BenchDb { directory_guard, keyring } = self.clone(); 291 | let (client, backend) = Self::bench_client(directory_guard.path(), profile, &keyring); 292 | 293 | BenchContext { 294 | client, backend, db_guard: directory_guard, 295 | } 296 | } 297 | } 298 | 299 | /// Key types to be used in benching keyring 300 | pub enum KeyTypes { 301 | /// sr25519 signing keys 302 | Sr25519, 303 | /// ed25519 signing keys 304 | Ed25519, 305 | } 306 | 307 | impl BenchKeyring { 308 | /// New keyring. 309 | /// 310 | /// `length` is the number of accounts generated. 311 | pub fn new(length: usize, key_types: KeyTypes) -> Self { 312 | let mut accounts = BTreeMap::new(); 313 | 314 | for n in 0..length { 315 | let seed = format!("//endowed-user/{}", n); 316 | let (account_id, pair) = match key_types { 317 | KeyTypes::Sr25519 => { 318 | let pair = sr25519::Pair::from_string(&seed, None).expect("failed to generate pair"); 319 | let account_id = AccountPublic::from(pair.public()).into_account(); 320 | (account_id, BenchPair::Sr25519(pair)) 321 | }, 322 | KeyTypes::Ed25519 => { 323 | let pair = ed25519::Pair::from_seed(&blake2_256(seed.as_bytes())); 324 | let account_id = AccountPublic::from(pair.public()).into_account(); 325 | (account_id, BenchPair::Ed25519(pair)) 326 | }, 327 | }; 328 | accounts.insert(account_id, pair); 329 | } 330 | 331 | Self { accounts } 332 | } 333 | 334 | /// Generated account id-s from keyring keypairs. 335 | pub fn collect_account_ids(&self) -> Vec { 336 | self.accounts.keys().cloned().collect() 337 | } 338 | 339 | /// Get account id at position `index` 340 | pub fn at(&self, index: usize) -> AccountId { 341 | self.accounts.keys().nth(index).expect("Failed to get account").clone() 342 | } 343 | 344 | /// Sign transaction with keypair from this keyring. 345 | pub fn sign(&self, xt: CheckedExtrinsic, version: u32, genesis_hash: [u8; 32]) -> UncheckedExtrinsic { 346 | match xt.signed { 347 | Some((signed, extra)) => { 348 | let payload = (xt.function, extra.clone(), version, genesis_hash, genesis_hash); 349 | let key = self.accounts.get(&signed).expect("Account id not found in keyring"); 350 | let signature = payload.using_encoded(|b| { 351 | if b.len() > 256 { 352 | key.sign(&sp_io::hashing::blake2_256(b)) 353 | } else { 354 | key.sign(b) 355 | } 356 | }).into(); 357 | UncheckedExtrinsic { 358 | signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), 359 | function: payload.0, 360 | } 361 | } 362 | None => UncheckedExtrinsic { 363 | signature: None, 364 | function: xt.function, 365 | }, 366 | } 367 | } 368 | 369 | /// Generate genesis with accounts from this keyring endowed with some balance. 370 | pub fn generate_genesis(&self) -> akropolisos_runtime::GenesisConfig { 371 | crate::genesis::config_endowed( 372 | false, 373 | Some(akropolisos_runtime::WASM_BINARY), 374 | self.collect_account_ids(), 375 | ) 376 | } 377 | } 378 | 379 | /// Profile for exetion strategies. 380 | #[derive(Clone, Copy, Debug)] 381 | pub enum Profile { 382 | /// As native as possible. 383 | Native, 384 | /// As wasm as possible. 385 | Wasm, 386 | } 387 | 388 | impl Profile { 389 | fn into_execution_strategies(self) -> ExecutionStrategies { 390 | match self { 391 | Profile::Wasm => ExecutionStrategies { 392 | syncing: ExecutionStrategy::AlwaysWasm, 393 | importing: ExecutionStrategy::AlwaysWasm, 394 | block_construction: ExecutionStrategy::AlwaysWasm, 395 | offchain_worker: ExecutionStrategy::AlwaysWasm, 396 | other: ExecutionStrategy::AlwaysWasm, 397 | }, 398 | Profile::Native => ExecutionStrategies { 399 | syncing: ExecutionStrategy::NativeElseWasm, 400 | importing: ExecutionStrategy::NativeElseWasm, 401 | block_construction: ExecutionStrategy::NativeElseWasm, 402 | offchain_worker: ExecutionStrategy::NativeElseWasm, 403 | other: ExecutionStrategy::NativeElseWasm, 404 | } 405 | } 406 | } 407 | } 408 | 409 | struct Guard(tempfile::TempDir); 410 | 411 | impl Guard { 412 | fn path(&self) -> &Path { 413 | self.0.path() 414 | } 415 | } 416 | 417 | /// Benchmarking/test context holding instantiated client and backend references. 418 | pub struct BenchContext { 419 | /// Node client. 420 | pub client: Client, 421 | /// Node backend. 422 | pub backend: Arc, 423 | 424 | db_guard: Guard, 425 | } 426 | 427 | type AccountPublic = ::Signer; 428 | 429 | fn get_from_seed(seed: &str) -> ::Public { 430 | TPublic::Pair::from_string(&format!("//{}", seed), None) 431 | .expect("static values are valid; qed") 432 | .public() 433 | } 434 | 435 | fn get_account_id_from_seed(seed: &str) -> AccountId 436 | where 437 | AccountPublic: From<::Public> 438 | { 439 | AccountPublic::from(get_from_seed::(seed)).into_account() 440 | } 441 | 442 | impl BenchContext { 443 | /// Import some block. 444 | pub fn import_block(&mut self, block: Block) { 445 | let mut import_params = BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone()); 446 | import_params.body = Some(block.extrinsics().to_vec()); 447 | import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); 448 | 449 | assert_eq!(self.client.chain_info().best_number, 0); 450 | 451 | assert_eq!( 452 | self.client.import_block(import_params, Default::default()) 453 | .expect("Failed to import block"), 454 | ImportResult::Imported( 455 | ImportedAux { 456 | header_only: false, 457 | clear_justification_requests: false, 458 | needs_justification: false, 459 | bad_justification: false, 460 | needs_finality_proof: false, 461 | is_new_best: true, 462 | } 463 | ) 464 | ); 465 | 466 | assert_eq!(self.client.chain_info().best_number, 1); 467 | } 468 | 469 | /// Database path for the current context. 470 | pub fn path(&self) -> &Path { 471 | self.db_guard.path() 472 | } 473 | } 474 | -------------------------------------------------------------------------------- /testing/src/client.rs: -------------------------------------------------------------------------------- 1 | //! Utilities to build a `TestClient` for `node-runtime`. 2 | 3 | use sp_runtime::BuildStorage; 4 | 5 | /// Re-export test-client utilities. 6 | pub use substrate_test_client::*; 7 | 8 | /// Call executor for `node-runtime` `TestClient`. 9 | pub type Executor = sc_executor::NativeExecutor; 10 | 11 | /// Default backend type. 12 | pub type Backend = sc_client_db::Backend; 13 | 14 | /// Test client type. 15 | pub type Client = sc_client::Client< 16 | Backend, 17 | sc_client::LocalCallExecutor, 18 | akropolisos_runtime::Block, 19 | akropolisos_runtime::RuntimeApi, 20 | >; 21 | 22 | /// Transaction for node-runtime. 23 | pub type Transaction = sc_client_api::backend::TransactionFor; 24 | 25 | /// Genesis configuration parameters for `TestClient`. 26 | #[derive(Default)] 27 | pub struct GenesisParameters { 28 | support_changes_trie: bool, 29 | } 30 | 31 | impl substrate_test_client::GenesisInit for GenesisParameters { 32 | fn genesis_storage(&self) -> Storage { 33 | crate::genesis::config(self.support_changes_trie, None).build_storage().unwrap() 34 | } 35 | } 36 | 37 | /// A `test-runtime` extensions to `TestClientBuilder`. 38 | pub trait TestClientBuilderExt: Sized { 39 | /// Create test client builder. 40 | fn new() -> Self; 41 | 42 | /// Build the test client. 43 | fn build(self) -> Client; 44 | } 45 | 46 | impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< 47 | akropolisos_runtime::Block, 48 | sc_client::LocalCallExecutor, 49 | Backend, 50 | GenesisParameters, 51 | > { 52 | fn new() -> Self{ 53 | Self::default() 54 | } 55 | 56 | fn build(self) -> Client { 57 | self.build_with_native_executor(None).0 58 | } 59 | } 60 | 61 | 62 | -------------------------------------------------------------------------------- /testing/src/genesis.rs: -------------------------------------------------------------------------------- 1 | //! Genesis Configuration. 2 | 3 | use crate::keyring::*; 4 | use akropolisos_runtime::types::Token; 5 | use akropolisos_runtime::constants::currency::*; 6 | use akropolisos_runtime::{ 7 | AccountId, BalancesConfig, ContractsConfig, GenesisConfig, 8 | GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, StakingConfig, SystemConfig, 9 | TokenConfig, WASM_BINARY, 10 | }; 11 | use sp_core::ChangesTrieConfiguration; 12 | use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; 13 | use sp_runtime::Perbill; 14 | 15 | /// Create genesis runtime configuration for tests. 16 | pub fn config(support_changes_trie: bool, code: Option<&[u8]>) -> GenesisConfig { 17 | config_endowed(support_changes_trie, code, Default::default()) 18 | } 19 | 20 | /// Create genesis runtime configuration for tests with some extra 21 | /// endowed accounts. 22 | pub fn config_endowed( 23 | support_changes_trie: bool, 24 | code: Option<&[u8]>, 25 | extra_endowed: Vec, 26 | ) -> GenesisConfig { 27 | let mut endowed = vec![ 28 | (alice(), 111 * DOLLARS), 29 | (bob(), 100 * DOLLARS), 30 | (charlie(), 100_000_000 * DOLLARS), 31 | (dave(), 111 * DOLLARS), 32 | (eve(), 101 * DOLLARS), 33 | (ferdie(), 100 * DOLLARS), 34 | ]; 35 | 36 | endowed.extend( 37 | extra_endowed 38 | .into_iter() 39 | .map(|endowed| (endowed, 100 * DOLLARS)), 40 | ); 41 | 42 | GenesisConfig { 43 | system: Some(SystemConfig { 44 | changes_trie_config: if support_changes_trie { 45 | Some(ChangesTrieConfiguration { 46 | digest_interval: 2, 47 | digest_levels: 2, 48 | }) 49 | } else { 50 | None 51 | }, 52 | code: code 53 | .map(|x| x.to_vec()) 54 | .unwrap_or_else(|| WASM_BINARY.to_vec()), 55 | }), 56 | pallet_indices: Some(IndicesConfig { indices: vec![] }), 57 | balances: Some(BalancesConfig { balances: endowed }), 58 | pallet_session: Some(SessionConfig { 59 | keys: vec![ 60 | ( 61 | dave(), 62 | alice(), 63 | to_session_keys(&Ed25519Keyring::Alice, &Sr25519Keyring::Alice), 64 | ), 65 | ( 66 | eve(), 67 | bob(), 68 | to_session_keys(&Ed25519Keyring::Bob, &Sr25519Keyring::Bob), 69 | ), 70 | ( 71 | ferdie(), 72 | charlie(), 73 | to_session_keys(&Ed25519Keyring::Charlie, &Sr25519Keyring::Charlie), 74 | ), 75 | ], 76 | }), 77 | pallet_staking: Some(StakingConfig { 78 | stakers: vec![ 79 | ( 80 | dave(), 81 | alice(), 82 | 111 * DOLLARS, 83 | pallet_staking::StakerStatus::Validator, 84 | ), 85 | ( 86 | eve(), 87 | bob(), 88 | 100 * DOLLARS, 89 | pallet_staking::StakerStatus::Validator, 90 | ), 91 | ( 92 | ferdie(), 93 | charlie(), 94 | 100 * DOLLARS, 95 | pallet_staking::StakerStatus::Validator, 96 | ), 97 | ], 98 | validator_count: 3, 99 | minimum_validator_count: 0, 100 | slash_reward_fraction: Perbill::from_percent(10), 101 | invulnerables: vec![alice(), bob(), charlie()], 102 | ..Default::default() 103 | }), 104 | pallet_contracts: Some(ContractsConfig { 105 | current_schedule: Default::default(), 106 | gas_price: 1 * MILLICENTS, 107 | }), 108 | pallet_babe: Some(Default::default()), 109 | grandpa: Some(GrandpaConfig { 110 | authorities: vec![], 111 | }), 112 | pallet_im_online: Some(Default::default()), 113 | pallet_authority_discovery: Some(Default::default()), 114 | pallet_democracy: Some(Default::default()), 115 | pallet_collective_Instance1: Some(Default::default()), 116 | pallet_collective_Instance2: Some(Default::default()), 117 | pallet_membership_Instance1: Some(Default::default()), 118 | sudo: Some(Default::default()), 119 | pallet_treasury: Some(Default::default()), 120 | pallet_society: Some(SocietyConfig { 121 | members: vec![alice(), bob()], 122 | pot: 0, 123 | max_members: 999, 124 | }), 125 | pallet_vesting: Some(Default::default()), 126 | bridge: None, 127 | dao: None, 128 | token: Some(TokenConfig { tokens: vec![Token { 129 | id: 0, 130 | decimals: 18, 131 | symbol: Vec::from("TOKEN"), 132 | }] }), 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /testing/src/keyring.rs: -------------------------------------------------------------------------------- 1 | //! Test accounts. 2 | 3 | use akropolisos_runtime::{ 4 | AccountId, Balance, CheckedExtrinsic, Index, SessionKeys, SignedExtra, UncheckedExtrinsic, 5 | }; 6 | use codec::Encode; 7 | use sp_keyring::{AccountKeyring, Ed25519Keyring, Sr25519Keyring}; 8 | use sp_runtime::generic::Era; 9 | 10 | /// Alice's account id. 11 | pub fn alice() -> AccountId { 12 | AccountKeyring::Alice.into() 13 | } 14 | 15 | /// Bob's account id. 16 | pub fn bob() -> AccountId { 17 | AccountKeyring::Bob.into() 18 | } 19 | 20 | /// Charlie's account id. 21 | pub fn charlie() -> AccountId { 22 | AccountKeyring::Charlie.into() 23 | } 24 | 25 | /// Dave's account id. 26 | pub fn dave() -> AccountId { 27 | AccountKeyring::Dave.into() 28 | } 29 | 30 | /// Eve's account id. 31 | pub fn eve() -> AccountId { 32 | AccountKeyring::Eve.into() 33 | } 34 | 35 | /// Ferdie's account id. 36 | pub fn ferdie() -> AccountId { 37 | AccountKeyring::Ferdie.into() 38 | } 39 | 40 | /// Convert keyrings into `SessionKeys`. 41 | pub fn to_session_keys( 42 | ed25519_keyring: &Ed25519Keyring, 43 | sr25519_keyring: &Sr25519Keyring, 44 | ) -> SessionKeys { 45 | SessionKeys { 46 | grandpa: ed25519_keyring.to_owned().public().into(), 47 | babe: sr25519_keyring.to_owned().public().into(), 48 | im_online: sr25519_keyring.to_owned().public().into(), 49 | authority_discovery: sr25519_keyring.to_owned().public().into(), 50 | } 51 | } 52 | 53 | /// Returns transaction extra. 54 | pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { 55 | ( 56 | frame_system::CheckVersion::new(), 57 | frame_system::CheckGenesis::new(), 58 | frame_system::CheckEra::from(Era::mortal(256, 0)), 59 | frame_system::CheckNonce::from(nonce), 60 | frame_system::CheckWeight::new(), 61 | pallet_transaction_payment::ChargeTransactionPayment::from(extra_fee), 62 | Default::default(), 63 | ) 64 | } 65 | 66 | /// Sign given `CheckedExtrinsic`. 67 | pub fn sign(xt: CheckedExtrinsic, version: u32, genesis_hash: [u8; 32]) -> UncheckedExtrinsic { 68 | match xt.signed { 69 | Some((signed, extra)) => { 70 | let payload = ( 71 | xt.function, 72 | extra.clone(), 73 | version, 74 | genesis_hash, 75 | genesis_hash, 76 | ); 77 | let key = AccountKeyring::from_account_id(&signed).unwrap(); 78 | let signature = payload 79 | .using_encoded(|b| { 80 | if b.len() > 256 { 81 | key.sign(&sp_io::hashing::blake2_256(b)) 82 | } else { 83 | key.sign(b) 84 | } 85 | }) 86 | .into(); 87 | UncheckedExtrinsic { 88 | signature: Some(( 89 | pallet_indices::address::Address::Id(signed), 90 | signature, 91 | extra, 92 | )), 93 | function: payload.0, 94 | } 95 | } 96 | None => UncheckedExtrinsic { 97 | signature: None, 98 | function: xt.function, 99 | }, 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /testing/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A set of testing utilities for Substrate Node. 2 | 3 | #![warn(missing_docs)] 4 | 5 | pub mod client; 6 | pub mod genesis; 7 | pub mod keyring; 8 | pub mod bench; 9 | -------------------------------------------------------------------------------- /transaction-factory/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "node-transaction-factory" 3 | version = "0.3.0" 4 | authors = ['Akropolis '] 5 | edition = "2018" 6 | license = "MIT" 7 | 8 | [dependencies] 9 | log = "0.4.8" 10 | codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } 11 | 12 | sp-block-builder = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 13 | sc-cli = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 14 | sc-client-api = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 15 | sc-block-builder = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 16 | sc-client = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 17 | sp-consensus = { version = "0.8.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 18 | sp-core = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 19 | sp-api = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 20 | sp-runtime = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 21 | sc-service = { version = "0.8.0-alpha.5", default-features = false, git = 'https://github.com/paritytech/substrate.git' } 22 | sp-blockchain = { version = "2.0.0-alpha.5", git = 'https://github.com/paritytech/substrate.git' } 23 | -------------------------------------------------------------------------------- /transaction-factory/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | use std::cmp::PartialOrd; 4 | use std::fmt::Display; 5 | 6 | use log::info; 7 | 8 | use sp_block_builder::BlockBuilder; 9 | use sc_block_builder::BlockBuilderProvider; 10 | use sp_api::{ProvideRuntimeApi, ApiExt, CallApiAt, TransactionFor}; 11 | use sp_consensus::{ 12 | BlockOrigin, BlockImportParams, InherentData, 13 | ForkChoiceStrategy, SelectChain 14 | }; 15 | use sp_consensus::block_import::BlockImport; 16 | use codec::{Decode, Encode}; 17 | use sp_runtime::generic::BlockId; 18 | use sp_runtime::traits::{ 19 | Block as BlockT, Header as HeaderT, AtLeast32Bit, One, Zero, 20 | }; 21 | use sp_blockchain::HeaderBackend; 22 | 23 | pub trait RuntimeAdapter { 24 | type AccountId: Display; 25 | type Balance: Display + AtLeast32Bit + From; 26 | type Block: BlockT; 27 | type Index: Copy; 28 | type Number: Display + PartialOrd + AtLeast32Bit + Zero + One; 29 | type Phase: Copy; 30 | type Secret; 31 | 32 | fn new(blocks: u32, transactions: u32) -> Self; 33 | 34 | fn blocks(&self) -> u32; 35 | fn transactions(&self) -> u32; 36 | 37 | fn block_number(&self) -> u32; 38 | fn set_block_number(&mut self, value: u32); 39 | 40 | fn transfer_extrinsic( 41 | &mut self, 42 | sender: &Self::AccountId, 43 | key: &Self::Secret, 44 | destination: &Self::AccountId, 45 | amount: &Self::Balance, 46 | version: u32, 47 | genesis_hash: &::Hash, 48 | prior_block_hash: &::Hash, 49 | ) -> ::Extrinsic; 50 | 51 | fn inherent_extrinsics(&self) -> InherentData; 52 | 53 | fn minimum_balance() -> Self::Balance; 54 | fn master_account_id() -> Self::AccountId; 55 | fn master_account_secret() -> Self::Secret; 56 | 57 | fn gen_random_account_id(seed: u32) -> Self::AccountId; 58 | fn gen_random_account_secret(seed: u32) -> Self::Secret; 59 | } 60 | 61 | /// Manufactures transactions. The exact amount depends on `num` and `rounds`. 62 | pub fn factory( 63 | mut factory_state: RA, 64 | client: &Arc, 65 | select_chain: &Sc, 66 | ) -> sc_cli::Result<()> 67 | where 68 | Backend: sc_client_api::backend::Backend + Send, 69 | Block: BlockT, 70 | Client: BlockBuilderProvider + CallApiAt 71 | + ProvideRuntimeApi + HeaderBackend, 72 | Client::Api: BlockBuilder + ApiExt, 73 | Sc: SelectChain, 74 | RA: RuntimeAdapter, 75 | Block::Hash: From, 76 | for<'a> &'a Client: BlockImport>, 77 | { 78 | let best_header: Result<::Header, sc_cli::Error> = 79 | select_chain.best_chain().map_err(|e| format!("{:?}", e).into()); 80 | let mut best_hash = best_header?.hash(); 81 | let mut best_block_id = BlockId::::hash(best_hash); 82 | let version = client.runtime_version_at(&best_block_id)?.spec_version; 83 | let genesis_hash = client.hash(Zero::zero())? 84 | .expect("Genesis block always exists; qed").into(); 85 | 86 | while factory_state.block_number() < factory_state.blocks() { 87 | let from = (RA::master_account_id(), RA::master_account_secret()); 88 | let amount = RA::minimum_balance(); 89 | 90 | let inherents = RA::inherent_extrinsics(&factory_state); 91 | let inherents = client.runtime_api().inherent_extrinsics(&best_block_id, inherents) 92 | .expect("Failed to create inherent extrinsics"); 93 | 94 | let tx_per_block = factory_state.transactions(); 95 | 96 | let mut block = client.new_block(Default::default()).expect("Failed to create new block"); 97 | 98 | for tx_num in 0..tx_per_block { 99 | let seed = tx_num * (factory_state.block_number() + 1); 100 | let to = RA::gen_random_account_id(seed); 101 | 102 | let transfer = factory_state.transfer_extrinsic( 103 | &from.0, 104 | &from.1, 105 | &to, 106 | &amount, 107 | version, 108 | &genesis_hash, 109 | &best_hash, 110 | ); 111 | 112 | info!("Pushing transfer {}/{} to {} into block.", tx_num + 1, tx_per_block, to); 113 | 114 | block.push( 115 | Decode::decode(&mut &transfer.encode()[..]) 116 | .expect("Failed to decode transfer extrinsic") 117 | ).expect("Failed to push transfer extrinsic into block"); 118 | } 119 | 120 | for inherent in inherents { 121 | block.push(inherent).expect("Failed ..."); 122 | } 123 | 124 | let block = block.build().expect("Failed to bake block").block; 125 | 126 | factory_state.set_block_number(factory_state.block_number() + 1); 127 | 128 | info!( 129 | "Created block {} with hash {}.", 130 | factory_state.block_number(), 131 | best_hash, 132 | ); 133 | 134 | best_hash = block.header().hash(); 135 | best_block_id = BlockId::::hash(best_hash); 136 | 137 | let mut import = BlockImportParams::new(BlockOrigin::File, block.header().clone()); 138 | import.body = Some(block.extrinsics().to_vec()); 139 | import.fork_choice = Some(ForkChoiceStrategy::LongestChain); 140 | (&**client).import_block(import, HashMap::new()).expect("Failed to import block"); 141 | 142 | info!("Imported block at {}", factory_state.block_number()); 143 | } 144 | 145 | Ok(()) 146 | } 147 | --------------------------------------------------------------------------------