├── .editorconfig ├── .git-crypt ├── .gitattributes └── keys │ └── default │ └── 0 │ ├── 7447F1AB5A5688074853F3E3ED778D7375EA72E5.gpg │ └── 7ACB049E2EC6236A37419D9A634DF68C8F462CBD.gpg ├── .gitattributes ├── .github └── workflows │ ├── build_test.yml │ └── update_pallets.yml ├── .gitignore ├── .gitmodules ├── .maintain └── frame-weight-template.hbs ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── node ├── Cargo.toml ├── build.rs └── src │ ├── chain_spec.rs │ ├── cli.rs │ ├── command.rs │ ├── lib.rs │ ├── main.rs │ ├── rpc.rs │ └── service.rs ├── runtime ├── Cargo.toml ├── build.rs └── src │ └── lib.rs ├── scripts ├── check-rust-env.sh ├── start-devnet.sh ├── start-testnet.sh └── testnet │ ├── benchmark.md │ ├── insert-key.sh │ ├── node01-aura.json │ ├── node01-gran.json │ ├── node02-aura.json │ ├── node02-gran.json │ └── testnet-accounts.md └── ts-tests ├── README.md ├── package-lock.json ├── package.json ├── scripts ├── export_env.sh └── smoke-test.ts ├── tests ├── test-eth-balance.ts └── utils.ts └── tsconfig.json /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | [*.rs] 3 | indent_style=tab 4 | indent_size=tab 5 | tab_width=4 6 | end_of_line=lf 7 | charset=utf-8 8 | trim_trailing_whitespace=true 9 | max_line_length=120 10 | insert_final_newline=true 11 | 12 | [*.yml] 13 | indent_style=space 14 | indent_size=2 15 | tab_width=8 16 | end_of_line=lf 17 | 18 | [*.sh] 19 | indent_style=space 20 | indent_size=2 21 | tab_width=8 22 | end_of_line=lf 23 | -------------------------------------------------------------------------------- /.git-crypt/.gitattributes: -------------------------------------------------------------------------------- 1 | # Do not edit this file. To specify the files to encrypt, create your own 2 | # .gitattributes file in the directory where your files are. 3 | * !filter !diff 4 | *.gpg binary 5 | -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/7447F1AB5A5688074853F3E3ED778D7375EA72E5.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litentry/litentry-node/14953d5dca77312224ee045cef97bacd78381aa0/.git-crypt/keys/default/0/7447F1AB5A5688074853F3E3ED778D7375EA72E5.gpg -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/7ACB049E2EC6236A37419D9A634DF68C8F462CBD.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litentry/litentry-node/14953d5dca77312224ee045cef97bacd78381aa0/.git-crypt/keys/default/0/7ACB049E2EC6236A37419D9A634DF68C8F462CBD.gpg -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | .gitattributes !filter !diff 2 | .gitignore !filter !diff 3 | .git/* !filter !diff 4 | testnet/*.json filter=git-crypt diff=git-crypt 5 | testnet/testnet-accounts.md filter=git-crypt diff=git-crypt 6 | scripts/testnet/*.json filter=git-crypt diff=git-crypt 7 | scripts/testnet/testnet-accounts.md filter=git-crypt diff=git-crypt 8 | -------------------------------------------------------------------------------- /.github/workflows/build_test.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ develop ] 6 | pull_request: 7 | branches: [ develop ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build-test: 14 | 15 | runs-on: self-hosted 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | with: 20 | submodules: true 21 | - uses: actions/setup-node@v2.1.4 22 | - name: Add wasm toolchain 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | toolchain: nightly 26 | target: wasm32-unknown-unknown 27 | override: true 28 | - name: Build 29 | run: | 30 | cargo build 31 | - name: Run unit tests 32 | run: cargo test --verbose 33 | - name: Run integration test 34 | run: | 35 | cd ts-tests 36 | npm install 37 | npm test 38 | -------------------------------------------------------------------------------- /.github/workflows/update_pallets.yml: -------------------------------------------------------------------------------- 1 | name: Update Pallets 2 | 3 | on: 4 | repository_dispatch: 5 | 6 | env: 7 | CARGO_TERM_COLOR: always 8 | 9 | jobs: 10 | create-pr: 11 | 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v2 16 | with: 17 | submodules: true 18 | - name: Add wasm toolchain 19 | uses: actions-rs/toolchain@v1 20 | with: 21 | toolchain: nightly 22 | target: wasm32-unknown-unknown 23 | override: true 24 | - name: Update pallets and commit 25 | run: | 26 | cargo update -p pallet-account-linker 27 | cargo update -p pallet-offchain-worker 28 | 29 | - name: Create Pull Request 30 | id: cpr 31 | uses: peter-evans/create-pull-request@v3 32 | with: 33 | token: ${{ secrets.ACCESS_TOKEN }} 34 | commit-message: Update litentry-pallets dependencies 35 | committer: GitHub 36 | author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> 37 | signoff: false 38 | branch: feature/patch-${{ github.event.client_payload.sha }} 39 | delete-branch: true 40 | title: '[AutoPR] Dependency update triggered by litentry-pallets change' 41 | body: | 42 | This PR updates [litentry-pallets][1] dependencies to the latest HEAD of ${{ github.event.client_payload.ref }} 43 | 44 | It is generated automatically by [this commit][2] 45 | 46 | [1]: https://github.com/litentry/litentry-pallets 47 | [2]: https://github.com/litentry/litentry-pallets/commit/${{ github.event.client_payload.sha }} 48 | labels: auto-gen-pr 49 | #assignees: h4n0 50 | reviewers: | 51 | h4n0 52 | buildtrust 53 | chenzongxiong 54 | Satoshi-Kusumoto 55 | suinuj 56 | #team-reviewers: 57 | draft: false 58 | 59 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | # Cargo.lock 8 | 9 | # Cargo local config 10 | .cargo/ 11 | 12 | # These are backup files generated by rustfmt 13 | **/*.rs.bk 14 | 15 | # Node modules 16 | **/node_modules/ 17 | 18 | # vim temp files 19 | *.swp 20 | tags 21 | 22 | # vscode configs 23 | .vscode/ 24 | pallets/* 25 | .DS_Store 26 | 27 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "token-server"] 2 | path = token-server 3 | url = https://github.com/litentry/litentry-token-server 4 | -------------------------------------------------------------------------------- /.maintain/frame-weight-template.hbs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2021 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | //! Autogenerated weights for {{pallet}} 19 | //! 20 | //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} 21 | //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}, LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} 22 | //! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} 23 | 24 | // Executed Command: 25 | {{#each args as |arg|~}} 26 | // {{arg}} 27 | {{/each}} 28 | 29 | #![allow(unused_parens)] 30 | #![allow(unused_imports)] 31 | 32 | use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; 33 | use sp_std::marker::PhantomData; 34 | 35 | /// Weight functions needed for {{pallet}}. 36 | pub trait WeightInfo { 37 | {{~#each benchmarks as |benchmark|}} 38 | fn {{benchmark.name~}} 39 | ( 40 | {{~#each benchmark.components as |c| ~}} 41 | {{c.name}}: u32, {{/each~}} 42 | ) -> Weight; 43 | {{~/each}} 44 | } 45 | 46 | /// Weights for {{pallet}} using the Substrate node and recommended hardware. 47 | pub struct SubstrateWeight(PhantomData); 48 | impl WeightInfo for SubstrateWeight { 49 | {{~#each benchmarks as |benchmark|}} 50 | fn {{benchmark.name~}} 51 | ( 52 | {{~#each benchmark.components as |c| ~}} 53 | {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} 54 | ) -> Weight { 55 | ({{underscore benchmark.base_weight}} as Weight) 56 | {{~#each benchmark.component_weight as |cw|}} 57 | // Standard Error: {{underscore cw.error}} 58 | .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) 59 | {{~/each}} 60 | {{~#if (ne benchmark.base_reads "0")}} 61 | .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) 62 | {{~/if}} 63 | {{~#each benchmark.component_reads as |cr|}} 64 | .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) 65 | {{~/each}} 66 | {{~#if (ne benchmark.base_writes "0")}} 67 | .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) 68 | {{~/if}} 69 | {{~#each benchmark.component_writes as |cw|}} 70 | .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) 71 | {{~/each}} 72 | } 73 | {{~/each}} 74 | } 75 | 76 | // For backwards compatibility and tests 77 | impl WeightInfo for () { 78 | {{~#each benchmarks as |benchmark|}} 79 | fn {{benchmark.name~}} 80 | ( 81 | {{~#each benchmark.components as |c| ~}} 82 | {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} 83 | ) -> Weight { 84 | ({{underscore benchmark.base_weight}} as Weight) 85 | {{~#each benchmark.component_weight as |cw|}} 86 | // Standard Error: {{underscore cw.error}} 87 | .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) 88 | {{~/each}} 89 | {{~#if (ne benchmark.base_reads "0")}} 90 | .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) 91 | {{~/if}} 92 | {{~#each benchmark.component_reads as |cr|}} 93 | .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) 94 | {{~/each}} 95 | {{~#if (ne benchmark.base_writes "0")}} 96 | .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) 97 | {{~/if}} 98 | {{~#each benchmark.component_writes as |cw|}} 99 | .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) 100 | {{~/each}} 101 | } 102 | {{~/each}} 103 | } 104 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | 'node', 4 | 'runtime', 5 | 'token-server', 6 | ] 7 | 8 | [profile.dev] 9 | opt-level = 0 10 | debug = true 11 | debug-assertions = true 12 | overflow-checks = true 13 | lto = false 14 | panic = 'unwind' 15 | incremental = true 16 | codegen-units = 256 17 | rpath = false 18 | 19 | [profile.test] 20 | opt-level = 0 21 | debug = 2 22 | debug-assertions = true 23 | overflow-checks = true 24 | lto = false 25 | panic = 'unwind' # This setting is always ignored. 26 | incremental = true 27 | codegen-units = 256 28 | rpath = false 29 | 30 | [profile.bench] 31 | opt-level = 3 32 | debug = false 33 | debug-assertions = false 34 | overflow-checks = false 35 | lto = false 36 | panic = 'unwind' # This setting is always ignored. 37 | incremental = false 38 | codegen-units = 16 39 | rpath = false 40 | 41 | [profile.release] 42 | opt-level = 3 43 | debug = false 44 | debug-assertions = false 45 | overflow-checks = false 46 | lto = false 47 | panic = 'unwind' 48 | incremental = false 49 | codegen-units = 16 50 | rpath = false 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | @echo "Make All" 3 | 4 | build: 5 | cargo build 6 | node: 7 | cargo build --package $(call pkgid, litentry-node) 8 | runtime: 9 | cargo build --package $(call pkgid, litentry-runtime) 10 | offchain-worker: 11 | cargo build --package $(call pkgid, pallet-offchain-worker) 12 | account-linker: 13 | cargo build --package $(call pkgid, pallet-account-linker) 14 | litentry-token-server: 15 | cargo build --package $(call pkgid, litentry-token-server) 16 | 17 | test-node: 18 | cargo test --package $(call pkgid, litentry-node) 19 | test-runtime: 20 | cargo test --package $(call pkgid, litentry-runtime) 21 | test-account-linker: 22 | cargo test --package $(call pkgid, pallet-account-linker) 23 | test-offchain-worker: 24 | cargo test --package $(call pkgid, pallet-offchain-worker) 25 | test-litentry-token-server: 26 | cargo test --package $(call pkgid, litentry-token-server) 27 | 28 | test: 29 | cargo test 30 | 31 | # benchmark build 32 | build-benchmark: 33 | cd node; cargo build --features runtime-benchmarks --release 34 | 35 | benchmark-account-linker: 36 | target/release/litentry-node benchmark \ 37 | --chain=dev \ 38 | --execution=wasm \ 39 | --wasm-execution=compiled \ 40 | --pallet=pallet_account_linker \ 41 | --extrinsic=* \ 42 | --heap-pages=4096 \ 43 | --steps=20 \ 44 | --repeat=50 \ 45 | --output=./pallets/account-linker/src/weights.rs \ 46 | --template=./.maintain/frame-weight-template.hbs 47 | 48 | benchmark-offchain-worker: 49 | target/release/litentry-node benchmark \ 50 | --chain=dev \ 51 | --execution=wasm \ 52 | --wasm-execution=compiled \ 53 | --pallet=pallet_offchain_worker \ 54 | --extrinsic=* \ 55 | --heap-pages=4096 \ 56 | --steps=20 \ 57 | --repeat=50 \ 58 | --output=./pallets/offchain-worker/src/weights.rs \ 59 | --template=./.maintain/frame-weight-template.hbs 60 | 61 | fmt: 62 | cargo fmt 63 | define pkgid 64 | $(shell cargo pkgid $1) 65 | endef 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Litentry Node 2 | [![Actions Status](https://github.com/litentry/litentry-node/workflows/Rust/badge.svg)](https://github.com/litentry/litentry-node/actions) 3 | 4 | 5 | Litentry node built with Substrate. 6 | 7 | ## Setup rust enivornment 8 | Follow the [tutorial](https://substrate.dev/docs/en/knowledgebase/getting-started/) to setup **rust** environment. 9 | 10 | ## Build from source code 11 | rustup default nightly 12 | cargo clean && cargo build 13 | 14 | 15 | ## Run nodes 16 | ./scripts/start-devnet 17 | # Or 18 | ./scripts/start-testnet 19 | 20 | 21 | ## License 22 | Apache-2.0 23 | -------------------------------------------------------------------------------- /node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ['Litentry Dev'] 3 | build = 'build.rs' 4 | description = 'A fresh FRAME-based Substrate node, ready for hacking.' 5 | edition = '2018' 6 | homepage = 'https://substrate.dev' 7 | license = 'Unlicense' 8 | name = 'litentry-node' 9 | repository = 'https://github.com/litentry/litentry-node' 10 | version = '0.0.1' 11 | 12 | [[bin]] 13 | name = 'litentry-node' 14 | 15 | [package.metadata.docs.rs] 16 | targets = ['x86_64-unknown-linux-gnu'] 17 | 18 | [build-dependencies] 19 | substrate-build-script-utils = '3.0.0' 20 | 21 | [dependencies] 22 | jsonrpc-core = '15.0.0' 23 | structopt = '0.3.8' 24 | hex-literal = "0.3.1" 25 | # local dependencies 26 | litentry-runtime = { path = '../runtime', version = '0.0.1' } 27 | 28 | # Substrate dependencies 29 | frame-benchmarking = '3.0.0' 30 | frame-benchmarking-cli = '3.0.0' 31 | pallet-transaction-payment-rpc = '3.0.0' 32 | sc-basic-authorship = '0.9.0' 33 | sc-cli = { features = ['wasmtime'], version = '0.9.0' } 34 | sc-client-api = '3.0.0' 35 | sc-consensus = '0.9.0' 36 | sc-consensus-aura = '0.9.0' 37 | sc-executor = { features = ['wasmtime'], version = '0.9.0' } 38 | sc-finality-grandpa = '0.9.0' 39 | sc-rpc = '3.0.0' 40 | sc-rpc-api = '0.9.0' 41 | sc-keystore = '3.0.0' 42 | sc-service = { features = ['wasmtime'], version = '0.9.0' } 43 | sc-transaction-pool = '3.0.0' 44 | sp-api = '3.0.0' 45 | sp-block-builder = '3.0.0' 46 | sp-blockchain = '3.0.0' 47 | sp-consensus = '0.9.0' 48 | sp-consensus-aura = '0.9.0' 49 | sp-core = '3.0.0' 50 | sp-finality-grandpa = '3.0.0' 51 | sp-inherents = '3.0.0' 52 | sp-runtime = '3.0.0' 53 | sp-transaction-pool = '3.0.0' 54 | substrate-frame-rpc-system = '3.0.0' 55 | 56 | [features] 57 | default = [] 58 | runtime-benchmarks = ['litentry-runtime/runtime-benchmarks'] 59 | -------------------------------------------------------------------------------- /node/build.rs: -------------------------------------------------------------------------------- 1 | use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; 2 | 3 | fn main() { 4 | generate_cargo_keys(); 5 | 6 | rerun_if_git_head_changed(); 7 | } 8 | -------------------------------------------------------------------------------- /node/src/chain_spec.rs: -------------------------------------------------------------------------------- 1 | use sp_core::{Pair, Public, sr25519, crypto::UncheckedInto,}; 2 | // use hex_literal::hex; 3 | use litentry_runtime::{ 4 | AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, 5 | SudoConfig, SystemConfig, WASM_BINARY, Signature, 6 | CouncilConfig, TechnicalCommitteeConfig, DemocracyConfig, 7 | }; 8 | use sp_consensus_aura::sr25519::AuthorityId as AuraId; 9 | use sp_finality_grandpa::AuthorityId as GrandpaId; 10 | use sp_runtime::traits::{Verify, IdentifyAccount}; 11 | use sc_service::{ChainType, Properties}; 12 | use hex_literal::hex; 13 | 14 | // The URL for the telemetry server. 15 | // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; 16 | 17 | /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. 18 | pub type ChainSpec = sc_service::GenericChainSpec; 19 | 20 | /// Generate a crypto pair from seed. 21 | pub fn get_from_seed(seed: &str) -> ::Public { 22 | TPublic::Pair::from_string(&format!("//{}", seed), None) 23 | .expect("static values are valid; qed") 24 | .public() 25 | } 26 | 27 | type AccountPublic = ::Signer; 28 | 29 | /// Generate an account ID from seed. 30 | pub fn get_account_id_from_seed(seed: &str) -> AccountId where 31 | AccountPublic: From<::Public> 32 | { 33 | AccountPublic::from(get_from_seed::(seed)).into_account() 34 | } 35 | 36 | /// Generate an Aura authority key. 37 | pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { 38 | ( 39 | get_from_seed::(s), 40 | get_from_seed::(s), 41 | ) 42 | } 43 | 44 | pub fn development_config() -> Result { 45 | let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm binary not available".to_string())?; 46 | 47 | 48 | Ok(ChainSpec::from_genesis( 49 | // Name 50 | "Development", 51 | // ID 52 | "dev", 53 | ChainType::Development, 54 | move || testnet_genesis( 55 | wasm_binary, 56 | // Initial PoA authorities 57 | vec![ 58 | authority_keys_from_seed("Alice"), 59 | ], 60 | // Sudo account 61 | get_account_id_from_seed::("Alice"), 62 | // Pre-funded accounts 63 | vec![ 64 | get_account_id_from_seed::("Alice"), 65 | get_account_id_from_seed::("Bob"), 66 | get_account_id_from_seed::("Alice//stash"), 67 | get_account_id_from_seed::("Bob//stash"), 68 | ], 69 | true, 70 | ), 71 | // Bootnodes 72 | vec![], 73 | // Telemetry 74 | None, 75 | // Protocol ID 76 | None, 77 | // Properties 78 | Some(litentry_properties()), 79 | // Extensions 80 | None, 81 | )) 82 | } 83 | 84 | pub fn local_testnet_config() -> Result { 85 | let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm binary not available".to_string())?; 86 | 87 | Ok(ChainSpec::from_genesis( 88 | // Name 89 | "Local Testnet", 90 | // ID 91 | "local_testnet", 92 | ChainType::Local, 93 | move || testnet_genesis( 94 | wasm_binary, 95 | // Initial PoA authorities 96 | vec![ 97 | authority_keys_from_seed("Alice"), 98 | authority_keys_from_seed("Bob"), 99 | ], 100 | // Sudo account 101 | get_account_id_from_seed::("Alice"), 102 | // Pre-funded accounts 103 | vec![ 104 | get_account_id_from_seed::("Alice"), 105 | get_account_id_from_seed::("Bob"), 106 | get_account_id_from_seed::("Charlie"), 107 | get_account_id_from_seed::("Dave"), 108 | get_account_id_from_seed::("Eve"), 109 | get_account_id_from_seed::("Ferdie"), 110 | get_account_id_from_seed::("Alice//stash"), 111 | get_account_id_from_seed::("Bob//stash"), 112 | get_account_id_from_seed::("Charlie//stash"), 113 | get_account_id_from_seed::("Dave//stash"), 114 | get_account_id_from_seed::("Eve//stash"), 115 | get_account_id_from_seed::("Ferdie//stash"), 116 | ], 117 | true, 118 | ), 119 | // Bootnodes 120 | vec![], 121 | // Telemetry 122 | None, 123 | // Protocol ID 124 | None, 125 | // Properties 126 | None, 127 | // Extensions 128 | None, 129 | )) 130 | } 131 | 132 | /// Configure initial storage state for FRAME modules. 133 | fn testnet_genesis( 134 | wasm_binary: &[u8], 135 | initial_authorities: Vec<(AuraId, GrandpaId)>, 136 | root_key: AccountId, 137 | endowed_accounts: Vec, 138 | _enable_println: bool, 139 | ) -> GenesisConfig { 140 | let num_endowed_accounts = endowed_accounts.len(); 141 | GenesisConfig { 142 | frame_system: Some(SystemConfig { 143 | // Add Wasm runtime to storage. 144 | code: wasm_binary.to_vec(), 145 | changes_trie_config: Default::default(), 146 | }), 147 | pallet_balances: Some(BalancesConfig { 148 | // Configure endowed accounts with initial balance of 1 << 60. 149 | balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), 150 | }), 151 | pallet_aura: Some(AuraConfig { 152 | authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), 153 | }), 154 | pallet_grandpa: Some(GrandpaConfig { 155 | authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), 156 | }), 157 | pallet_sudo: Some(SudoConfig { 158 | // Assign network admin rights. 159 | key: root_key, 160 | }), 161 | pallet_democracy: Some(DemocracyConfig::default()), 162 | pallet_collective_Instance1: Some(CouncilConfig::default()), 163 | pallet_collective_Instance2: Some(TechnicalCommitteeConfig { 164 | members: endowed_accounts.iter() 165 | .take((num_endowed_accounts + 1) / 2) 166 | .cloned() 167 | .collect(), 168 | phantom: Default::default(), 169 | }), 170 | pallet_treasury: Default::default(), 171 | } 172 | } 173 | 174 | /// Properties for Litentry. 175 | pub fn litentry_properties() -> Properties { 176 | let mut properties = Properties::new(); 177 | 178 | properties.insert("ss58Format".into(), 31.into()); 179 | properties.insert("tokenDecimals".into(), 12.into()); 180 | properties.insert("tokenSymbol".into(), "LIT".into()); 181 | 182 | properties 183 | } 184 | 185 | pub fn litentry_config() -> Result { 186 | let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; 187 | 188 | Ok(ChainSpec::from_genesis( 189 | // Name 190 | "Litentry", 191 | // ID 192 | "Litentry", 193 | ChainType::Live, 194 | move || testnet_genesis( 195 | wasm_binary, 196 | // Initial PoA authorities 197 | vec![ 198 | // NOTE: sr25519 for aura, ed25519 for grandpa 199 | ( 200 | hex!["6ee3d433c282f2da08157874cb88002713bce7e34d88218734ebde9184adc62b"].unchecked_into(), 201 | hex!["1a5eb27b10e65006d1f11e95afef014fead6f76e1654433afdc850e42270b539"].unchecked_into(), 202 | ), 203 | ( 204 | hex!["208da5421e97eaa93426f99ae56efcbebe9fb44628dbf19ab31dd649e1290a04"].unchecked_into(), 205 | hex!["bd72634e87aa6fc31386449b73a2b5db3f7d3aacbbc6e8b46b225dc346ad43a8"].unchecked_into(), 206 | ), 207 | ], 208 | // Sudo account 209 | hex!["6ee3d433c282f2da08157874cb88002713bce7e34d88218734ebde9184adc62b"].into(), 210 | // Pre-funded accounts 211 | vec![ 212 | 213 | hex!["6ee3d433c282f2da08157874cb88002713bce7e34d88218734ebde9184adc62b"].into(), 214 | hex!["208da5421e97eaa93426f99ae56efcbebe9fb44628dbf19ab31dd649e1290a04"].into(), 215 | hex!["4a2cdd9649970dfaa3659d8078e129ace1a4dd420df6351d7b38cca96268f93a"].into(), 216 | hex!["b649b678664131da1576b90e81a2e3934d6cb82937cac9be62073f793f121e67"].into(), 217 | ], 218 | true, 219 | ), 220 | // Bootnodes 221 | vec![], 222 | // Telemetry 223 | None, 224 | // Protocol ID 225 | Some("Litentry"), 226 | // Properties 227 | Some(litentry_properties()), 228 | // Extensions 229 | None, 230 | )) 231 | } 232 | -------------------------------------------------------------------------------- /node/src/cli.rs: -------------------------------------------------------------------------------- 1 | use structopt::StructOpt; 2 | use sc_cli::RunCmd; 3 | 4 | #[derive(Debug, StructOpt)] 5 | pub struct Cli { 6 | #[structopt(subcommand)] 7 | pub subcommand: Option, 8 | 9 | #[structopt(flatten)] 10 | pub run: RunCmd, 11 | } 12 | 13 | #[derive(Debug, StructOpt)] 14 | pub enum Subcommand { 15 | /// Key management cli utilities 16 | Key(sc_cli::KeySubcommand), 17 | 18 | /// Build a chain specification. 19 | BuildSpec(sc_cli::BuildSpecCmd), 20 | 21 | /// Validate blocks. 22 | CheckBlock(sc_cli::CheckBlockCmd), 23 | 24 | /// Export blocks. 25 | ExportBlocks(sc_cli::ExportBlocksCmd), 26 | 27 | /// Export the state of a given block into a chain spec. 28 | ExportState(sc_cli::ExportStateCmd), 29 | 30 | /// Import blocks. 31 | ImportBlocks(sc_cli::ImportBlocksCmd), 32 | 33 | /// Remove the whole chain. 34 | PurgeChain(sc_cli::PurgeChainCmd), 35 | 36 | /// Revert the chain to a previous state. 37 | Revert(sc_cli::RevertCmd), 38 | 39 | /// The custom benchmark subcommmand benchmarking runtime pallets. 40 | #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] 41 | Benchmark(frame_benchmarking_cli::BenchmarkCmd), 42 | } 43 | -------------------------------------------------------------------------------- /node/src/command.rs: -------------------------------------------------------------------------------- 1 | // This file is part of Substrate. 2 | 3 | // Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | use crate::{chain_spec, service}; 19 | use crate::cli::{Cli, Subcommand}; 20 | use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; 21 | use sc_service::PartialComponents; 22 | use litentry_runtime::Block; 23 | 24 | impl SubstrateCli for Cli { 25 | fn impl_name() -> String { 26 | "Substrate Node".into() 27 | } 28 | 29 | fn impl_version() -> String { 30 | env!("SUBSTRATE_CLI_IMPL_VERSION").into() 31 | } 32 | 33 | fn description() -> String { 34 | env!("CARGO_PKG_DESCRIPTION").into() 35 | } 36 | 37 | fn author() -> String { 38 | env!("CARGO_PKG_AUTHORS").into() 39 | } 40 | 41 | fn support_url() -> String { 42 | "support.anonymous.an".into() 43 | } 44 | 45 | fn copyright_start_year() -> i32 { 46 | 2017 47 | } 48 | 49 | fn load_spec(&self, id: &str) -> Result, String> { 50 | Ok(match id { 51 | "dev" => Box::new(chain_spec::development_config()?), 52 | "" | "local" => Box::new(chain_spec::local_testnet_config()?), 53 | "litentry" => Box::new(chain_spec::litentry_config()?), 54 | path => Box::new(chain_spec::ChainSpec::from_json_file( 55 | std::path::PathBuf::from(path), 56 | )?), 57 | }) 58 | } 59 | 60 | fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { 61 | &litentry_runtime::VERSION 62 | } 63 | } 64 | 65 | /// Parse and run command line arguments 66 | pub fn run() -> sc_cli::Result<()> { 67 | let cli = Cli::from_args(); 68 | 69 | match &cli.subcommand { 70 | Some(Subcommand::Key(cmd)) => cmd.run(&cli), 71 | Some(Subcommand::BuildSpec(cmd)) => { 72 | let runner = cli.create_runner(cmd)?; 73 | runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) 74 | }, 75 | Some(Subcommand::CheckBlock(cmd)) => { 76 | let runner = cli.create_runner(cmd)?; 77 | runner.async_run(|config| { 78 | let PartialComponents { client, task_manager, import_queue, ..} 79 | = service::new_partial(&config)?; 80 | Ok((cmd.run(client, import_queue), task_manager)) 81 | }) 82 | }, 83 | Some(Subcommand::ExportBlocks(cmd)) => { 84 | let runner = cli.create_runner(cmd)?; 85 | runner.async_run(|config| { 86 | let PartialComponents { client, task_manager, ..} 87 | = service::new_partial(&config)?; 88 | Ok((cmd.run(client, config.database), task_manager)) 89 | }) 90 | }, 91 | Some(Subcommand::ExportState(cmd)) => { 92 | let runner = cli.create_runner(cmd)?; 93 | runner.async_run(|config| { 94 | let PartialComponents { client, task_manager, ..} 95 | = service::new_partial(&config)?; 96 | Ok((cmd.run(client, config.chain_spec), task_manager)) 97 | }) 98 | }, 99 | Some(Subcommand::ImportBlocks(cmd)) => { 100 | let runner = cli.create_runner(cmd)?; 101 | runner.async_run(|config| { 102 | let PartialComponents { client, task_manager, import_queue, ..} 103 | = service::new_partial(&config)?; 104 | Ok((cmd.run(client, import_queue), task_manager)) 105 | }) 106 | }, 107 | Some(Subcommand::PurgeChain(cmd)) => { 108 | let runner = cli.create_runner(cmd)?; 109 | runner.sync_run(|config| cmd.run(config.database)) 110 | }, 111 | Some(Subcommand::Revert(cmd)) => { 112 | let runner = cli.create_runner(cmd)?; 113 | runner.async_run(|config| { 114 | let PartialComponents { client, task_manager, backend, ..} 115 | = service::new_partial(&config)?; 116 | Ok((cmd.run(client, backend), task_manager)) 117 | }) 118 | }, 119 | Some(Subcommand::Benchmark(cmd)) => { 120 | if cfg!(feature = "runtime-benchmarks") { 121 | let runner = cli.create_runner(cmd)?; 122 | 123 | runner.sync_run(|config| cmd.run::(config)) 124 | } else { 125 | Err("Benchmarking wasn't enabled when building the node. \ 126 | You can enable it with `--features runtime-benchmarks`.".into()) 127 | } 128 | }, 129 | None => { 130 | let runner = cli.create_runner(&cli.run)?; 131 | runner.run_node_until_exit(|config| async move { 132 | match config.role { 133 | Role::Light => service::new_light(config), 134 | _ => service::new_full(config), 135 | }.map_err(sc_cli::Error::Service) 136 | }) 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /node/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod chain_spec; 2 | pub mod rpc; 3 | pub mod service; 4 | -------------------------------------------------------------------------------- /node/src/main.rs: -------------------------------------------------------------------------------- 1 | //! Substrate Node Template CLI library. 2 | #![warn(missing_docs)] 3 | 4 | mod chain_spec; 5 | #[macro_use] 6 | mod service; 7 | mod cli; 8 | mod command; 9 | mod rpc; 10 | 11 | fn main() -> sc_cli::Result<()> { 12 | command::run() 13 | } 14 | -------------------------------------------------------------------------------- /node/src/rpc.rs: -------------------------------------------------------------------------------- 1 | //! A collection of node-specific RPC methods. 2 | //! Substrate provides the `sc-rpc` crate, which defines the core RPC layer 3 | //! used by Substrate nodes. This file extends those RPC definitions with 4 | //! capabilities that are specific to this project's runtime configuration. 5 | 6 | #![warn(missing_docs)] 7 | 8 | use std::sync::Arc; 9 | 10 | use litentry_runtime::{opaque::Block, AccountId, Balance, Index}; 11 | use sp_api::ProvideRuntimeApi; 12 | use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; 13 | use sp_block_builder::BlockBuilder; 14 | pub use sc_rpc_api::DenyUnsafe; 15 | use sp_transaction_pool::TransactionPool; 16 | 17 | 18 | /// Full client dependencies. 19 | pub struct FullDeps { 20 | /// The client instance to use. 21 | pub client: Arc, 22 | /// Transaction pool instance. 23 | pub pool: Arc

, 24 | /// Whether to deny unsafe calls 25 | pub deny_unsafe: DenyUnsafe, 26 | } 27 | 28 | /// Instantiate all full RPC extensions. 29 | pub fn create_full( 30 | deps: FullDeps, 31 | ) -> jsonrpc_core::IoHandler where 32 | C: ProvideRuntimeApi, 33 | C: HeaderBackend + HeaderMetadata + 'static, 34 | C: Send + Sync + 'static, 35 | C::Api: substrate_frame_rpc_system::AccountNonceApi, 36 | C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, 37 | C::Api: BlockBuilder, 38 | P: TransactionPool + 'static, 39 | { 40 | use substrate_frame_rpc_system::{FullSystem, SystemApi}; 41 | use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; 42 | 43 | let mut io = jsonrpc_core::IoHandler::default(); 44 | let FullDeps { 45 | client, 46 | pool, 47 | deny_unsafe, 48 | } = deps; 49 | 50 | io.extend_with( 51 | SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)) 52 | ); 53 | 54 | io.extend_with( 55 | TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())) 56 | ); 57 | 58 | // Extend this RPC with a custom API by using the following syntax. 59 | // `YourRpcStruct` should have a reference to a client, which is needed 60 | // to call into the runtime. 61 | // `io.extend_with(YourRpcTrait::to_delegate(YourRpcStruct::new(ReferenceToClient, ...)));` 62 | 63 | io 64 | } 65 | -------------------------------------------------------------------------------- /node/src/service.rs: -------------------------------------------------------------------------------- 1 | //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. 2 | 3 | use std::sync::Arc; 4 | use std::time::Duration; 5 | use sc_client_api::{ExecutorProvider, RemoteBackend}; 6 | use litentry_runtime::{self, opaque::Block, RuntimeApi}; 7 | use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; 8 | use sp_inherents::InherentDataProviders; 9 | use sc_executor::native_executor_instance; 10 | pub use sc_executor::NativeExecutor; 11 | use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; 12 | use sc_finality_grandpa::SharedVoterState; 13 | use sc_keystore::LocalKeystore; 14 | 15 | // Our native executor instance. 16 | native_executor_instance!( 17 | pub Executor, 18 | litentry_runtime::api::dispatch, 19 | litentry_runtime::native_version, 20 | frame_benchmarking::benchmarking::HostFunctions, 21 | ); 22 | 23 | type FullClient = sc_service::TFullClient; 24 | type FullBackend = sc_service::TFullBackend; 25 | type FullSelectChain = sc_consensus::LongestChain; 26 | 27 | pub fn new_partial(config: &Configuration) -> Result, 30 | sc_transaction_pool::FullPool, 31 | ( 32 | sc_consensus_aura::AuraBlockImport< 33 | Block, 34 | FullClient, 35 | sc_finality_grandpa::GrandpaBlockImport, 36 | AuraPair 37 | >, 38 | sc_finality_grandpa::LinkHalf, 39 | ) 40 | >, ServiceError> { 41 | if config.keystore_remote.is_some() { 42 | return Err(ServiceError::Other( 43 | format!("Remote Keystores are not supported."))) 44 | } 45 | let inherent_data_providers = sp_inherents::InherentDataProviders::new(); 46 | 47 | let (client, backend, keystore_container, task_manager) = 48 | sc_service::new_full_parts::(&config)?; 49 | let client = Arc::new(client); 50 | 51 | let select_chain = sc_consensus::LongestChain::new(backend.clone()); 52 | 53 | let transaction_pool = sc_transaction_pool::BasicPool::new_full( 54 | config.transaction_pool.clone(), 55 | config.role.is_authority().into(), 56 | config.prometheus_registry(), 57 | task_manager.spawn_handle(), 58 | client.clone(), 59 | ); 60 | 61 | let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( 62 | client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), 63 | )?; 64 | 65 | let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( 66 | grandpa_block_import.clone(), client.clone(), 67 | ); 68 | 69 | let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( 70 | sc_consensus_aura::slot_duration(&*client)?, 71 | aura_block_import.clone(), 72 | Some(Box::new(grandpa_block_import.clone())), 73 | client.clone(), 74 | inherent_data_providers.clone(), 75 | &task_manager.spawn_handle(), 76 | config.prometheus_registry(), 77 | sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), 78 | )?; 79 | 80 | Ok(sc_service::PartialComponents { 81 | client, backend, task_manager, import_queue, keystore_container, select_chain, transaction_pool, 82 | inherent_data_providers, 83 | other: (aura_block_import, grandpa_link), 84 | }) 85 | } 86 | 87 | fn remote_keystore(_url: &String) -> Result, &'static str> { 88 | // FIXME: here would the concrete keystore be built, 89 | // must return a concrete type (NOT `LocalKeystore`) that 90 | // implements `CryptoStore` and `SyncCryptoStore` 91 | Err("Remote Keystore not supported.") 92 | } 93 | 94 | /// Builds a new service for a full client. 95 | pub fn new_full(mut config: Configuration) -> Result { 96 | let sc_service::PartialComponents { 97 | client, backend, mut task_manager, import_queue, mut keystore_container, select_chain, transaction_pool, 98 | inherent_data_providers, 99 | other: (block_import, grandpa_link), 100 | } = new_partial(&config)?; 101 | 102 | if let Some(url) = &config.keystore_remote { 103 | match remote_keystore(url) { 104 | Ok(k) => keystore_container.set_remote_keystore(k), 105 | Err(e) => { 106 | return Err(ServiceError::Other( 107 | format!("Error hooking up remote keystore for {}: {}", url, e))) 108 | } 109 | }; 110 | } 111 | 112 | config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); 113 | 114 | let (network, network_status_sinks, system_rpc_tx, network_starter) = 115 | sc_service::build_network(sc_service::BuildNetworkParams { 116 | config: &config, 117 | client: client.clone(), 118 | transaction_pool: transaction_pool.clone(), 119 | spawn_handle: task_manager.spawn_handle(), 120 | import_queue, 121 | on_demand: None, 122 | block_announce_validator_builder: None, 123 | })?; 124 | 125 | if config.offchain_worker.enabled { 126 | sc_service::build_offchain_workers( 127 | &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), 128 | ); 129 | } 130 | 131 | let role = config.role.clone(); 132 | let force_authoring = config.force_authoring; 133 | let backoff_authoring_blocks: Option<()> = None; 134 | let name = config.network.node_name.clone(); 135 | let enable_grandpa = !config.disable_grandpa; 136 | let prometheus_registry = config.prometheus_registry().cloned(); 137 | 138 | let rpc_extensions_builder = { 139 | let client = client.clone(); 140 | let pool = transaction_pool.clone(); 141 | 142 | Box::new(move |deny_unsafe, _| { 143 | let deps = crate::rpc::FullDeps { 144 | client: client.clone(), 145 | pool: pool.clone(), 146 | deny_unsafe, 147 | }; 148 | 149 | crate::rpc::create_full(deps) 150 | }) 151 | }; 152 | 153 | let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks( 154 | sc_service::SpawnTasksParams { 155 | network: network.clone(), 156 | client: client.clone(), 157 | keystore: keystore_container.sync_keystore(), 158 | task_manager: &mut task_manager, 159 | transaction_pool: transaction_pool.clone(), 160 | rpc_extensions_builder, 161 | on_demand: None, 162 | remote_blockchain: None, 163 | backend, 164 | network_status_sinks, 165 | system_rpc_tx, 166 | config, 167 | }, 168 | )?; 169 | 170 | if role.is_authority() { 171 | let proposer = sc_basic_authorship::ProposerFactory::new( 172 | task_manager.spawn_handle(), 173 | client.clone(), 174 | transaction_pool, 175 | prometheus_registry.as_ref(), 176 | ); 177 | 178 | let can_author_with = 179 | sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); 180 | 181 | let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _, _>( 182 | sc_consensus_aura::slot_duration(&*client)?, 183 | client.clone(), 184 | select_chain, 185 | block_import, 186 | proposer, 187 | network.clone(), 188 | inherent_data_providers.clone(), 189 | force_authoring, 190 | backoff_authoring_blocks, 191 | keystore_container.sync_keystore(), 192 | can_author_with, 193 | )?; 194 | 195 | // the AURA authoring task is considered essential, i.e. if it 196 | // fails we take down the service with it. 197 | task_manager.spawn_essential_handle().spawn_blocking("aura", aura); 198 | } 199 | 200 | // if the node isn't actively participating in consensus then it doesn't 201 | // need a keystore, regardless of which protocol we use below. 202 | let keystore = if role.is_authority() { 203 | Some(keystore_container.sync_keystore()) 204 | } else { 205 | None 206 | }; 207 | 208 | let grandpa_config = sc_finality_grandpa::Config { 209 | // FIXME #1578 make this available through chainspec 210 | gossip_duration: Duration::from_millis(333), 211 | justification_period: 512, 212 | name: Some(name), 213 | observer_enabled: false, 214 | keystore, 215 | is_authority: role.is_network_authority(), 216 | }; 217 | 218 | if enable_grandpa { 219 | // start the full GRANDPA voter 220 | // NOTE: non-authorities could run the GRANDPA observer protocol, but at 221 | // this point the full voter should provide better guarantees of block 222 | // and vote data availability than the observer. The observer has not 223 | // been tested extensively yet and having most nodes in a network run it 224 | // could lead to finality stalls. 225 | let grandpa_config = sc_finality_grandpa::GrandpaParams { 226 | config: grandpa_config, 227 | link: grandpa_link, 228 | network, 229 | telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), 230 | voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), 231 | prometheus_registry, 232 | shared_voter_state: SharedVoterState::empty(), 233 | }; 234 | 235 | // the GRANDPA voter task is considered infallible, i.e. 236 | // if it fails we take down the service with it. 237 | task_manager.spawn_essential_handle().spawn_blocking( 238 | "grandpa-voter", 239 | sc_finality_grandpa::run_grandpa_voter(grandpa_config)? 240 | ); 241 | } 242 | 243 | network_starter.start_network(); 244 | Ok(task_manager) 245 | } 246 | 247 | /// Builds a new service for a light client. 248 | pub fn new_light(mut config: Configuration) -> Result { 249 | let (client, backend, keystore_container, mut task_manager, on_demand) = 250 | sc_service::new_light_parts::(&config)?; 251 | 252 | config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); 253 | 254 | let select_chain = sc_consensus::LongestChain::new(backend.clone()); 255 | 256 | let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( 257 | config.transaction_pool.clone(), 258 | config.prometheus_registry(), 259 | task_manager.spawn_handle(), 260 | client.clone(), 261 | on_demand.clone(), 262 | )); 263 | 264 | let (grandpa_block_import, _) = sc_finality_grandpa::block_import( 265 | client.clone(), 266 | &(client.clone() as Arc<_>), 267 | select_chain.clone(), 268 | )?; 269 | 270 | let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( 271 | grandpa_block_import.clone(), 272 | client.clone(), 273 | ); 274 | 275 | let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( 276 | sc_consensus_aura::slot_duration(&*client)?, 277 | aura_block_import, 278 | Some(Box::new(grandpa_block_import)), 279 | client.clone(), 280 | InherentDataProviders::new(), 281 | &task_manager.spawn_handle(), 282 | config.prometheus_registry(), 283 | sp_consensus::NeverCanAuthor, 284 | )?; 285 | 286 | let (network, network_status_sinks, system_rpc_tx, network_starter) = 287 | sc_service::build_network(sc_service::BuildNetworkParams { 288 | config: &config, 289 | client: client.clone(), 290 | transaction_pool: transaction_pool.clone(), 291 | spawn_handle: task_manager.spawn_handle(), 292 | import_queue, 293 | on_demand: Some(on_demand.clone()), 294 | block_announce_validator_builder: None, 295 | })?; 296 | 297 | if config.offchain_worker.enabled { 298 | sc_service::build_offchain_workers( 299 | &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), 300 | ); 301 | } 302 | 303 | sc_service::spawn_tasks(sc_service::SpawnTasksParams { 304 | remote_blockchain: Some(backend.remote_blockchain()), 305 | transaction_pool, 306 | task_manager: &mut task_manager, 307 | on_demand: Some(on_demand), 308 | rpc_extensions_builder: Box::new(|_, _| ()), 309 | config, 310 | client, 311 | keystore: keystore_container.sync_keystore(), 312 | backend, 313 | network, 314 | network_status_sinks, 315 | system_rpc_tx, 316 | })?; 317 | 318 | network_starter.start_network(); 319 | 320 | Ok(task_manager) 321 | } 322 | -------------------------------------------------------------------------------- /runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ['Litentry Dev'] 3 | edition = '2018' 4 | homepage = 'https://litentry.com' 5 | license = 'Unlicense' 6 | name = 'litentry-runtime' 7 | repository = 'https://github.com/litentry/litentry-node/' 8 | version = '0.0.1' 9 | 10 | [package.metadata.docs.rs] 11 | targets = ['x86_64-unknown-linux-gnu'] 12 | 13 | [build-dependencies] 14 | substrate-wasm-builder = '4.0.0' 15 | 16 | # alias "parity-scale-code" to "codec" 17 | [dependencies.codec] 18 | default-features = false 19 | features = ['derive'] 20 | package = 'parity-scale-codec' 21 | version = '2.0.0' 22 | 23 | [dependencies] 24 | hex-literal = { optional = true, version = '0.3.1' } 25 | serde = { features = ['derive'], optional = true, version = '1.0.119' } 26 | 27 | # local dependencies 28 | pallet-account-linker = { git = "https://github.com/litentry/litentry-pallets", package = "pallet-account-linker", default-features = false, branch = "dev", version = '0.0.1' } 29 | pallet-offchain-worker = { git = "https://github.com/litentry/litentry-pallets", package = "pallet-offchain-worker", default-features = false, branch = "dev", version = '0.0.1' } 30 | pallet-identity = { git = 'https://github.com/litentry/litentry-pallets', package = 'pallet-identity', default-features = false, branch = 'dev', version = '3.0.0' } 31 | 32 | # Substrate dependencies 33 | frame-benchmarking = { default-features = false, optional = true, version = '3.0.0' } 34 | frame-executive = { default-features = false, version = '3.0.0' } 35 | frame-support = { default-features = false, version = '3.0.0' } 36 | frame-system = { default-features = false, version = '3.0.0' } 37 | frame-system-benchmarking = { default-features = false, optional = true, version = '3.0.0' } 38 | frame-system-rpc-runtime-api = { default-features = false, version = '3.0.0' } 39 | pallet-aura = { default-features = false, version = '3.0.0' } 40 | pallet-balances = { default-features = false, version = '3.0.0' } 41 | pallet-collective = { default-features = false, version = '3.0.0' } 42 | pallet-democracy = { default-features = false, version = "3.0.0" } 43 | pallet-grandpa = { default-features = false, version = '3.0.0' } 44 | pallet-randomness-collective-flip = { default-features = false, version = '3.0.0' } 45 | pallet-recovery = { default-features = false, version = '3.0.0' } 46 | pallet-scheduler = { default-features = false, version = '3.0.0' } 47 | pallet-sudo = { default-features = false, version = '3.0.0' } 48 | pallet-proxy = { default-features = false, version = '3.0.0' } 49 | pallet-timestamp = { default-features = false, version = '3.0.0' } 50 | pallet-transaction-payment = { default-features = false, version = '3.0.0' } 51 | pallet-transaction-payment-rpc-runtime-api = { default-features = false, version = '3.0.0' } 52 | pallet-treasury = { default-features = false, version = '3.0.0' } 53 | sp-api = { default-features = false, version = '3.0.0' } 54 | sp-block-builder = { default-features = false, version = '3.0.0' } 55 | sp-consensus-aura = { default-features = false, version = '0.9.0' } 56 | sp-core = { default-features = false, version = '3.0.0' } 57 | sp-inherents = { default-features = false, version = '3.0.0' } 58 | sp-offchain = { default-features = false, version = '3.0.0' } 59 | sp-runtime = { default-features = false, version = '3.0.0' } 60 | sp-session = { default-features = false, version = '3.0.0' } 61 | sp-std = { default-features = false, version = '3.0.0' } 62 | sp-transaction-pool = { default-features = false, version = '3.0.0' } 63 | sp-version = { default-features = false, version = '3.0.0' } 64 | 65 | [features] 66 | default = ['std'] 67 | runtime-benchmarks = [ 68 | 'hex-literal', 69 | 'frame-benchmarking', 70 | 'frame-support/runtime-benchmarks', 71 | 'frame-system-benchmarking', 72 | 'frame-system/runtime-benchmarks', 73 | 'pallet-balances/runtime-benchmarks', 74 | 'pallet-timestamp/runtime-benchmarks', 75 | 'pallet-account-linker/runtime-benchmarks', 76 | 'pallet-identity/runtime-benchmarks', 77 | 'pallet-offchain-worker/runtime-benchmarks', 78 | 'sp-runtime/runtime-benchmarks', 79 | ] 80 | std = [ 81 | 'codec/std', 82 | 'serde', 83 | 'frame-executive/std', 84 | 'frame-support/std', 85 | 'frame-system/std', 86 | 'frame-system-rpc-runtime-api/std', 87 | 'pallet-account-linker/std', 88 | 'pallet-aura/std', 89 | 'pallet-balances/std', 90 | 'pallet-collective/std', 91 | 'pallet-democracy/std', 92 | 'pallet-grandpa/std', 93 | 'pallet-identity/std', 94 | 'pallet-offchain-worker/std', 95 | 'pallet-randomness-collective-flip/std', 96 | 'pallet-proxy/std', 97 | 'pallet-recovery/std', 98 | "pallet-scheduler/std", 99 | 'pallet-sudo/std', 100 | 'pallet-timestamp/std', 101 | 'pallet-transaction-payment/std', 102 | 'pallet-transaction-payment-rpc-runtime-api/std', 103 | 'pallet-treasury/std', 104 | 'sp-api/std', 105 | 'sp-block-builder/std', 106 | 'sp-consensus-aura/std', 107 | 'sp-core/std', 108 | 'sp-inherents/std', 109 | 'sp-offchain/std', 110 | 'sp-runtime/std', 111 | 'sp-session/std', 112 | 'sp-std/std', 113 | 'sp-transaction-pool/std', 114 | 'sp-version/std', 115 | ] 116 | -------------------------------------------------------------------------------- /runtime/build.rs: -------------------------------------------------------------------------------- 1 | use substrate_wasm_builder::WasmBuilder; 2 | 3 | fn main() { 4 | WasmBuilder::new() 5 | .with_current_project() 6 | .import_memory() 7 | .export_heap_base() 8 | .build() 9 | } 10 | -------------------------------------------------------------------------------- /runtime/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. 3 | #![recursion_limit="256"] 4 | 5 | // Make the WASM binary available. 6 | #[cfg(feature = "std")] 7 | include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); 8 | 9 | use sp_std::prelude::*; 10 | use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; 11 | use sp_core::u32_trait::{_1, _2, _3, _4, _5}; 12 | use codec::{Encode, Decode}; 13 | use sp_runtime::{ 14 | ApplyExtrinsicResult, generic, create_runtime_str, impl_opaque_keys, MultiSignature, 15 | transaction_validity::{TransactionValidity, TransactionSource}, 16 | }; 17 | use sp_runtime::traits::{ 18 | BlakeTwo256, Block as BlockT, IdentityLookup, Verify, IdentifyAccount, NumberFor, 19 | SaturatedConversion, 20 | }; 21 | use sp_api::impl_runtime_apis; 22 | use sp_consensus_aura::sr25519::AuthorityId as AuraId; 23 | use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; 24 | use pallet_grandpa::fg_primitives; 25 | use sp_version::RuntimeVersion; 26 | #[cfg(feature = "std")] 27 | use sp_version::NativeVersion; 28 | 29 | // A few exports that help ease life for downstream crates. 30 | #[cfg(any(feature = "std", test))] 31 | pub use sp_runtime::BuildStorage; 32 | pub use pallet_timestamp::Call as TimestampCall; 33 | pub use pallet_balances::Call as BalancesCall; 34 | pub use sp_runtime::{ModuleId, Percent, Permill, Perbill}; 35 | pub use frame_support::{ 36 | construct_runtime, parameter_types, StorageValue, debug, RuntimeDebug, 37 | traits::{KeyOwnerProofSystem, Randomness, InstanceFilter}, 38 | weights::{ 39 | constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, 40 | DispatchClass, IdentityFee, Weight, 41 | }, 42 | }; 43 | use frame_system::{ 44 | EnsureRoot, EnsureOneOf, 45 | }; 46 | use frame_system::limits::{BlockLength, BlockWeights}; 47 | 48 | /// Import the account-linker pallet. 49 | pub use pallet_account_linker; 50 | 51 | /// Import the offchain-worker pallet. 52 | pub use pallet_offchain_worker; 53 | 54 | /// An index to a block. 55 | pub type BlockNumber = u32; 56 | 57 | /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. 58 | pub type Signature = MultiSignature; 59 | 60 | /// Some way of identifying an account on the chain. We intentionally make it equivalent 61 | /// to the public key of our transaction signing scheme. 62 | pub type AccountId = <::Signer as IdentifyAccount>::AccountId; 63 | 64 | /// The type for looking up accounts. We don't expect more than 4 billion of them, but you 65 | /// never know... 66 | pub type AccountIndex = u32; 67 | 68 | /// Balance of an account. 69 | pub type Balance = u128; 70 | 71 | /// Index of a transaction in the chain. 72 | pub type Index = u32; 73 | 74 | /// A hash of some data used by the chain. 75 | pub type Hash = sp_core::H256; 76 | 77 | /// Digest item type. 78 | pub type DigestItem = generic::DigestItem; 79 | 80 | /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know 81 | /// the specifics of the runtime. They can then be made to be agnostic over specific formats 82 | /// of data like extrinsics, allowing for them to continue syncing the network through upgrades 83 | /// to even the core data structures. 84 | pub mod opaque { 85 | use super::*; 86 | 87 | pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; 88 | 89 | /// Opaque block header type. 90 | pub type Header = generic::Header; 91 | /// Opaque block type. 92 | pub type Block = generic::Block; 93 | /// Opaque block identifier type. 94 | pub type BlockId = generic::BlockId; 95 | 96 | impl_opaque_keys! { 97 | pub struct SessionKeys { 98 | pub aura: Aura, 99 | pub grandpa: Grandpa, 100 | } 101 | } 102 | } 103 | 104 | pub const VERSION: RuntimeVersion = RuntimeVersion { 105 | spec_name: create_runtime_str!("litentry-node"), 106 | impl_name: create_runtime_str!("litentry-node"), 107 | authoring_version: 1, 108 | spec_version: 1, 109 | impl_version: 1, 110 | apis: RUNTIME_API_VERSIONS, 111 | transaction_version: 1, 112 | }; 113 | 114 | 115 | /// This determines the average expected block time that we are targeting. 116 | /// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. 117 | /// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked 118 | /// up by `pallet_aura` to implement `fn slot_duration()`. 119 | /// 120 | /// Change this to adjust the block time. 121 | pub const MILLISECS_PER_BLOCK: u64 = 6000; 122 | 123 | pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; 124 | 125 | // Time is measured by number of blocks. 126 | pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); 127 | pub const HOURS: BlockNumber = MINUTES * 60; 128 | pub const DAYS: BlockNumber = HOURS * 24; 129 | pub const MILLICENTS: Balance = 1_000_000_000; 130 | pub const CENTS: Balance = 1_000 * MILLICENTS; 131 | pub const DOLLARS: Balance = 100 * CENTS; 132 | 133 | pub const fn deposit(items: u32, bytes: u32) -> Balance { 134 | items as Balance * 15 * CENTS + (bytes as Balance) * 6 * CENTS 135 | } 136 | 137 | /// The version information used to identify this runtime when compiled natively. 138 | #[cfg(feature = "std")] 139 | pub fn native_version() -> NativeVersion { 140 | NativeVersion { 141 | runtime_version: VERSION, 142 | can_author_with: Default::default(), 143 | } 144 | } 145 | 146 | /// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. 147 | /// This is used to limit the maximal weight of a single extrinsic. 148 | const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); 149 | /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used 150 | /// by Operational extrinsics. 151 | const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); 152 | /// We allow for 2 seconds of compute with a 6 second average block time. 153 | const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; 154 | 155 | parameter_types! { 156 | pub const BlockHashCount: BlockNumber = 2400; 157 | pub const Version: RuntimeVersion = VERSION; 158 | pub RuntimeBlockLength: BlockLength = 159 | BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); 160 | pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() 161 | .base_block(BlockExecutionWeight::get()) 162 | .for_class(DispatchClass::all(), |weights| { 163 | weights.base_extrinsic = ExtrinsicBaseWeight::get(); 164 | }) 165 | .for_class(DispatchClass::Normal, |weights| { 166 | weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); 167 | }) 168 | .for_class(DispatchClass::Operational, |weights| { 169 | weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); 170 | // Operational transactions have some extra reserved space, so that they 171 | // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. 172 | weights.reserved = Some( 173 | MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT 174 | ); 175 | }) 176 | .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) 177 | .build_or_panic(); 178 | pub const SS58Prefix: u8 = 31; 179 | } 180 | 181 | // Configure FRAME pallets to include in runtime. 182 | impl frame_system::Config for Runtime { 183 | /// The basic call filter to use in dispatchable. 184 | type BaseCallFilter = (); 185 | /// The identifier used to distinguish between accounts. 186 | type AccountId = AccountId; 187 | /// The aggregated dispatch type that is available for extrinsics. 188 | type Call = Call; 189 | /// The lookup mechanism to get account ID from whatever is passed in dispatchers. 190 | type Lookup = IdentityLookup; 191 | /// The index type for storing how many extrinsics an account has signed. 192 | type Index = Index; 193 | /// The index type for blocks. 194 | type BlockNumber = BlockNumber; 195 | /// The type for hashing blocks and tries. 196 | type Hash = Hash; 197 | /// The hashing algorithm used. 198 | type Hashing = BlakeTwo256; 199 | /// The header type. 200 | type Header = generic::Header; 201 | /// The ubiquitous event type. 202 | type Event = Event; 203 | /// The ubiquitous origin type. 204 | type Origin = Origin; 205 | /// Maximum number of block number to block hash mappings to keep (oldest pruned first). 206 | type BlockHashCount = BlockHashCount; 207 | /// The weight of database operations that the runtime can invoke. 208 | type DbWeight = RocksDbWeight; 209 | /// Version of the runtime. 210 | type Version = Version; 211 | /// Converts a module to the index of the module in `construct_runtime!`. 212 | /// This type is being generated by `construct_runtime!`. 213 | type PalletInfo = PalletInfo; 214 | /// What to do if a new account is created. 215 | type OnNewAccount = (); 216 | /// What to do if an account is fully reaped from the system. 217 | type OnKilledAccount = (); 218 | /// The data to be stored in an account. 219 | type AccountData = pallet_balances::AccountData; 220 | /// Weight information for the extrinsics of this pallet. 221 | type SystemWeightInfo = (); 222 | type BlockWeights = RuntimeBlockWeights; 223 | type BlockLength = RuntimeBlockLength; 224 | type SS58Prefix = SS58Prefix; 225 | } 226 | 227 | impl pallet_aura::Config for Runtime { 228 | type AuthorityId = AuraId; 229 | } 230 | 231 | impl pallet_grandpa::Config for Runtime { 232 | type Event = Event; 233 | type Call = Call; 234 | 235 | type KeyOwnerProofSystem = (); 236 | 237 | type KeyOwnerProof = 238 | >::Proof; 239 | 240 | type KeyOwnerIdentification = >::IdentificationTuple; 244 | 245 | type HandleEquivocation = (); 246 | 247 | type WeightInfo = (); 248 | } 249 | 250 | parameter_types! { 251 | // One storage item; key size 32, value size 8; . 252 | pub const ProxyDepositBase: Balance = deposit(1, 8); 253 | // Additional storage item size of 33 bytes. 254 | pub const ProxyDepositFactor: Balance = deposit(0, 33); 255 | pub const MaxProxies: u16 = 32; 256 | pub const AnnouncementDepositBase: Balance = deposit(1, 8); 257 | pub const AnnouncementDepositFactor: Balance = deposit(0, 66); 258 | pub const MaxPending: u16 = 32; 259 | } 260 | 261 | /// The type used to represent the kinds of proxying allowed. 262 | #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] 263 | pub enum ProxyType { 264 | Any, 265 | NonTransfer, 266 | Governance, 267 | } 268 | impl Default for ProxyType { fn default() -> Self { Self::Any } } 269 | impl InstanceFilter for ProxyType { 270 | fn filter(&self, c: &Call) -> bool { 271 | match self { 272 | ProxyType::Any => true, 273 | ProxyType::NonTransfer => !matches!( 274 | c, 275 | Call::Balances(..) 276 | ), 277 | ProxyType::Governance => matches!( 278 | c, 279 | // Call::Democracy(..) | 280 | Call::Council(..) | 281 | Call::TechnicalCommittee(..) | 282 | Call::Treasury(..) 283 | ), 284 | } 285 | } 286 | fn is_superset(&self, o: &Self) -> bool { 287 | match (self, o) { 288 | (x, y) if x == y => true, 289 | (ProxyType::Any, _) => true, 290 | (_, ProxyType::Any) => false, 291 | (ProxyType::NonTransfer, _) => true, 292 | _ => false, 293 | } 294 | } 295 | } 296 | 297 | impl pallet_proxy::Config for Runtime { 298 | type Event = Event; 299 | type Call = Call; 300 | type Currency = Balances; 301 | type ProxyType = ProxyType; 302 | type ProxyDepositBase = ProxyDepositBase; 303 | type ProxyDepositFactor = ProxyDepositFactor; 304 | type MaxProxies = MaxProxies; 305 | type WeightInfo = pallet_proxy::weights::SubstrateWeight; 306 | type MaxPending = MaxPending; 307 | type CallHasher = BlakeTwo256; 308 | type AnnouncementDepositBase = AnnouncementDepositBase; 309 | type AnnouncementDepositFactor = AnnouncementDepositFactor; 310 | } 311 | 312 | 313 | parameter_types! { 314 | pub const ConfigDepositBase: Balance = 5 * DOLLARS; 315 | pub const FriendDepositFactor: Balance = 50 * CENTS; 316 | pub const MaxFriends: u16 = 9; 317 | pub const RecoveryDeposit: Balance = 5 * DOLLARS; 318 | } 319 | 320 | impl pallet_recovery::Config for Runtime { 321 | type Event = Event; 322 | type Call = Call; 323 | type Currency = Balances; 324 | type ConfigDepositBase = ConfigDepositBase; 325 | type FriendDepositFactor = FriendDepositFactor; 326 | type MaxFriends = MaxFriends; 327 | type RecoveryDeposit = RecoveryDeposit; 328 | } 329 | 330 | parameter_types! { 331 | pub const MinimumPeriod: u64 = SLOT_DURATION / 2; 332 | } 333 | 334 | impl pallet_timestamp::Config for Runtime { 335 | /// A timestamp: milliseconds since the unix epoch. 336 | type Moment = u64; 337 | type OnTimestampSet = Aura; 338 | type MinimumPeriod = MinimumPeriod; 339 | type WeightInfo = (); 340 | } 341 | 342 | parameter_types! { 343 | pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * 344 | RuntimeBlockWeights::get().max_block; 345 | pub const MaxScheduledPerBlock: u32 = 50; 346 | } 347 | impl pallet_scheduler::Config for Runtime { 348 | type Event = Event; 349 | type Origin = Origin; 350 | type PalletsOrigin = OriginCaller; 351 | type Call = Call; 352 | type MaximumWeight = MaximumSchedulerWeight; 353 | type ScheduleOrigin = EnsureRoot; 354 | type MaxScheduledPerBlock = MaxScheduledPerBlock; 355 | type WeightInfo = pallet_scheduler::weights::SubstrateWeight; 356 | } 357 | 358 | parameter_types! { 359 | pub const ExistentialDeposit: u128 = 500; 360 | pub const MaxLocks: u32 = 50; 361 | } 362 | 363 | impl pallet_balances::Config for Runtime { 364 | type MaxLocks = MaxLocks; 365 | /// The type for recording an account's balance. 366 | type Balance = Balance; 367 | /// The ubiquitous event type. 368 | type Event = Event; 369 | type DustRemoval = (); 370 | type ExistentialDeposit = ExistentialDeposit; 371 | type AccountStore = System; 372 | type WeightInfo = pallet_balances::weights::SubstrateWeight; 373 | } 374 | 375 | parameter_types! { 376 | pub const TransactionByteFee: Balance = 1; 377 | } 378 | 379 | impl pallet_transaction_payment::Config for Runtime { 380 | type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; 381 | type TransactionByteFee = TransactionByteFee; 382 | type WeightToFee = IdentityFee; 383 | type FeeMultiplierUpdate = (); 384 | } 385 | 386 | impl pallet_sudo::Config for Runtime { 387 | type Event = Event; 388 | type Call = Call; 389 | } 390 | 391 | parameter_types! { 392 | pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; 393 | pub const VotingPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; 394 | pub const FastTrackVotingPeriod: BlockNumber = 3 * 24 * 60 * MINUTES; 395 | pub const InstantAllowed: bool = true; 396 | pub const MinimumDeposit: Balance = 100 * DOLLARS; 397 | pub const EnactmentPeriod: BlockNumber = 30 * 24 * 60 * MINUTES; 398 | pub const CooloffPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; 399 | // One cent: $10,000 / MB 400 | pub const PreimageByteDeposit: Balance = 1 * CENTS; 401 | pub const MaxVotes: u32 = 100; 402 | pub const MaxProposals: u32 = 100; 403 | } 404 | 405 | impl pallet_democracy::Config for Runtime { 406 | type Proposal = Call; 407 | type Event = Event; 408 | type Currency = Balances; 409 | type EnactmentPeriod = EnactmentPeriod; 410 | type LaunchPeriod = LaunchPeriod; 411 | type VotingPeriod = VotingPeriod; 412 | type MinimumDeposit = MinimumDeposit; 413 | /// A straight majority of the council can decide what their next motion is. 414 | type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; 415 | /// A super-majority can have the next scheduled referendum be a straight majority-carries vote. 416 | type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; 417 | /// A unanimous council can have the next scheduled referendum be a straight default-carries 418 | /// (NTB) vote. 419 | type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; 420 | /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote 421 | /// be tabled immediately and with a shorter voting/enactment period. 422 | type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; 423 | type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; 424 | type InstantAllowed = InstantAllowed; 425 | type FastTrackVotingPeriod = FastTrackVotingPeriod; 426 | // To cancel a proposal which has been passed, 2/3 of the council must agree to it. 427 | type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; 428 | // To cancel a proposal before it has been passed, the technical committee must be unanimous or 429 | // Root must agree. 430 | type CancelProposalOrigin = EnsureOneOf< 431 | AccountId, 432 | EnsureRoot, 433 | pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>, 434 | >; 435 | type BlacklistOrigin = EnsureRoot; 436 | // Any single technical committee member may veto a coming council proposal, however they can 437 | // only do it once and it lasts only for the cool-off period. 438 | type VetoOrigin = pallet_collective::EnsureMember; 439 | type CooloffPeriod = CooloffPeriod; 440 | type PreimageByteDeposit = PreimageByteDeposit; 441 | type OperationalPreimageOrigin = pallet_collective::EnsureMember; 442 | type Slash = Treasury; 443 | type Scheduler = Scheduler; 444 | type PalletsOrigin = OriginCaller; 445 | type MaxVotes = MaxVotes; 446 | type WeightInfo = pallet_democracy::weights::SubstrateWeight; 447 | type MaxProposals = MaxProposals; 448 | } 449 | 450 | parameter_types! { 451 | pub const CouncilMotionDuration: BlockNumber = 3 * DAYS; 452 | pub const CouncilMaxProposals: u32 = 100; 453 | pub const CouncilMaxMembers: u32 = 100; 454 | } 455 | 456 | type CouncilCollective = pallet_collective::Instance1; 457 | impl pallet_collective::Config for Runtime { 458 | type Origin = Origin; 459 | type Proposal = Call; 460 | type Event = Event; 461 | type MotionDuration = CouncilMotionDuration; 462 | type MaxProposals = CouncilMaxProposals; 463 | type MaxMembers = CouncilMaxMembers; 464 | type DefaultVote = pallet_collective::PrimeDefaultVote; 465 | type WeightInfo = pallet_collective::weights::SubstrateWeight; 466 | } 467 | 468 | parameter_types! { 469 | pub const TechnicalMotionDuration: BlockNumber = 3 * DAYS; 470 | pub const TechnicalMaxProposals: u32 = 100; 471 | pub const TechnicalMaxMembers: u32 = 100; 472 | } 473 | 474 | type TechnicalCollective = pallet_collective::Instance2; 475 | impl pallet_collective::Config for Runtime { 476 | type Origin = Origin; 477 | type Proposal = Call; 478 | type Event = Event; 479 | type MotionDuration = TechnicalMotionDuration; 480 | type MaxProposals = TechnicalMaxProposals; 481 | type MaxMembers = TechnicalMaxMembers; 482 | type DefaultVote = pallet_collective::PrimeDefaultVote; 483 | type WeightInfo = pallet_collective::weights::SubstrateWeight; 484 | } 485 | 486 | 487 | parameter_types! { 488 | pub const ProposalBond: Permill = Permill::from_percent(5); 489 | pub const ProposalBondMinimum: Balance = 1 * DOLLARS; 490 | pub const SpendPeriod: BlockNumber = 1 * DAYS; 491 | pub const Burn: Permill = Permill::from_percent(50); 492 | pub const TipCountdown: BlockNumber = 1 * DAYS; 493 | pub const TipFindersFee: Percent = Percent::from_percent(20); 494 | pub const TipReportDepositBase: Balance = 1 * DOLLARS; 495 | pub const DataDepositPerByte: Balance = 1 * CENTS; 496 | pub const BountyDepositBase: Balance = 1 * DOLLARS; 497 | pub const BountyDepositPayoutDelay: BlockNumber = 1 * DAYS; 498 | pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); 499 | pub const BountyUpdatePeriod: BlockNumber = 14 * DAYS; 500 | pub const MaximumReasonLength: u32 = 16384; 501 | pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); 502 | pub const BountyValueMinimum: Balance = 5 * DOLLARS; 503 | } 504 | 505 | 506 | impl pallet_treasury::Config for Runtime { 507 | type ModuleId = TreasuryModuleId; 508 | type Currency = Balances; 509 | type ApproveOrigin = EnsureOneOf< 510 | AccountId, 511 | EnsureRoot, 512 | pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> 513 | >; 514 | type RejectOrigin = EnsureOneOf< 515 | AccountId, 516 | EnsureRoot, 517 | pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> 518 | >; 519 | type Event = Event; 520 | type OnSlash = (); 521 | type ProposalBond = ProposalBond; 522 | type ProposalBondMinimum = ProposalBondMinimum; 523 | type SpendPeriod = SpendPeriod; 524 | type Burn = Burn; 525 | type BurnDestination = (); 526 | type SpendFunds = (); 527 | type WeightInfo = pallet_treasury::weights::SubstrateWeight; 528 | } 529 | 530 | 531 | /// Configure the template pallet in pallets/template. 532 | impl pallet_account_linker::Config for Runtime { 533 | type Event = Event; 534 | type WeightInfo = pallet_account_linker::weights::SubstrateWeight; 535 | } 536 | 537 | // We need to define the Transaction signer for that using the Key definition 538 | // type SubmitPFTransaction = frame_system::offchain::TransactionSubmitter< 539 | // pallet_offchain_worker::crypto::Public, 540 | // Runtime, 541 | // UncheckedExtrinsic 542 | // >; 543 | 544 | parameter_types! { 545 | pub const QueryTaskRedundancy: u32 = 3; 546 | pub const QuerySessionLength: u32 = 5; 547 | pub const OcwQueryReward: Balance = 1 * DOLLARS; 548 | } 549 | 550 | /// Configure the template pallet in pallets/template. 551 | impl pallet_offchain_worker::Config for Runtime { 552 | type Event = Event; 553 | type Call = Call; 554 | type Balance = Balance; 555 | type AuthorityId = pallet_offchain_worker::crypto::TestAuthId; 556 | type QueryTaskRedundancy = QueryTaskRedundancy; 557 | type QuerySessionLength = QuerySessionLength; 558 | type Currency = Balances; 559 | type Reward = (); 560 | type OcwQueryReward = OcwQueryReward; 561 | type WeightInfo = pallet_offchain_worker::weights::SubstrateWeight; 562 | } 563 | 564 | /// Configure the pallet-identity 565 | type MoreThanHalfCouncil = EnsureOneOf< 566 | AccountId, 567 | EnsureRoot, 568 | pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> 569 | >; 570 | parameter_types! { 571 | // Minimum 4 CENTS/byte 572 | pub const BasicDeposit: Balance = deposit(1, 258); 573 | pub const FieldDeposit: Balance = deposit(0, 66); 574 | pub const SubAccountDeposit: Balance = deposit(1, 53); 575 | pub const MaxSubAccounts: u32 = 100; 576 | pub const MaxAdditionalFields: u32 = 100; 577 | pub const MaxRegistrars: u32 = 20; 578 | } 579 | 580 | impl pallet_identity::Config for Runtime { 581 | type Event = Event; 582 | type Currency = Balances; 583 | type BasicDeposit = BasicDeposit; 584 | type FieldDeposit = FieldDeposit; 585 | type SubAccountDeposit = SubAccountDeposit; 586 | type MaxSubAccounts = MaxSubAccounts; 587 | type MaxAdditionalFields = MaxAdditionalFields; 588 | type MaxRegistrars = MaxRegistrars; 589 | type Slashed = Treasury; 590 | type ForceOrigin = MoreThanHalfCouncil; 591 | type RegistrarOrigin = MoreThanHalfCouncil; 592 | type WeightInfo = pallet_identity::weights::SubstrateWeight; 593 | } 594 | 595 | pub type SignedPayload = generic::SignedPayload; 596 | 597 | impl frame_system::offchain::CreateSignedTransaction for Runtime 598 | where 599 | Call: From, 600 | { 601 | fn create_transaction>( 602 | call: Call, 603 | public: ::Signer, 604 | account: AccountId, 605 | index: Index, 606 | ) -> Option<( 607 | Call, 608 | ::SignaturePayload, 609 | )> { 610 | let period = BlockHashCount::get() as u64; 611 | let current_block = System::block_number() 612 | .saturated_into::() 613 | .saturating_sub(1); 614 | let tip = 0; 615 | let extra: SignedExtra = ( 616 | frame_system::CheckSpecVersion::::new(), 617 | frame_system::CheckTxVersion::::new(), 618 | frame_system::CheckGenesis::::new(), 619 | frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), 620 | frame_system::CheckNonce::::from(index), 621 | frame_system::CheckWeight::::new(), 622 | pallet_transaction_payment::ChargeTransactionPayment::::from(tip), 623 | ); 624 | 625 | #[cfg_attr(not(feature = "std"), allow(unused_variables))] 626 | let raw_payload = SignedPayload::new(call, extra) 627 | .map_err(|e| { 628 | debug::native::warn!("SignedPayload error: {:?}", e); 629 | }) 630 | .ok()?; 631 | 632 | let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; 633 | 634 | let address = account; 635 | let (call, extra, _) = raw_payload.deconstruct(); 636 | Some((call, (address, signature, extra))) 637 | } 638 | } 639 | 640 | impl frame_system::offchain::SigningTypes for Runtime { 641 | type Public = ::Signer; 642 | type Signature = Signature; 643 | } 644 | 645 | impl frame_system::offchain::SendTransactionTypes for Runtime 646 | where 647 | Call: From, 648 | { 649 | type OverarchingCall = Call; 650 | type Extrinsic = UncheckedExtrinsic; 651 | } 652 | 653 | // Create the runtime by composing the FRAME pallets that were previously configured. 654 | construct_runtime!( 655 | pub enum Runtime where 656 | Block = Block, 657 | NodeBlock = opaque::Block, 658 | UncheckedExtrinsic = UncheckedExtrinsic 659 | { 660 | System: frame_system::{Module, Call, Config, Storage, Event}, 661 | RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, 662 | Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, 663 | Aura: pallet_aura::{Module, Config}, 664 | Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, 665 | Balances: pallet_balances::{Module, Call, Storage, Config, Event}, 666 | TransactionPayment: pallet_transaction_payment::{Module, Storage}, 667 | Treasury: pallet_treasury::{Module, Call, Storage, Config, Event}, 668 | Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, 669 | Sudo: pallet_sudo::{Module, Call, Config, Storage, Event}, 670 | Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, 671 | Council: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, 672 | TechnicalCommittee: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, 673 | Proxy: pallet_proxy::{Module, Call, Storage, Event}, 674 | Recovery: pallet_recovery::{Module, Call, Storage, Event}, 675 | Identity: pallet_identity::{Module, Call, Storage, Event}, 676 | // Include the custom logic from the template pallet in the runtime. 677 | AccountLinkerModule: pallet_account_linker::{Module, Call, Storage, Event}, 678 | OffchainWorkerModule: pallet_offchain_worker::{Module, Call, Storage, Event}, 679 | } 680 | ); 681 | 682 | /// The address format for describing accounts. 683 | pub type Address = AccountId; 684 | /// Block header type as expected by this runtime. 685 | pub type Header = generic::Header; 686 | /// Block type as expected by this runtime. 687 | pub type Block = generic::Block; 688 | /// A Block signed with a Justification 689 | pub type SignedBlock = generic::SignedBlock; 690 | /// BlockId type as expected by this runtime. 691 | pub type BlockId = generic::BlockId; 692 | /// The SignedExtension to the basic transaction logic. 693 | pub type SignedExtra = ( 694 | frame_system::CheckSpecVersion, 695 | frame_system::CheckTxVersion, 696 | frame_system::CheckGenesis, 697 | frame_system::CheckEra, 698 | frame_system::CheckNonce, 699 | frame_system::CheckWeight, 700 | pallet_transaction_payment::ChargeTransactionPayment 701 | ); 702 | /// Unchecked extrinsic type as expected by this runtime. 703 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; 704 | /// Extrinsic type that has already been checked. 705 | pub type CheckedExtrinsic = generic::CheckedExtrinsic; 706 | /// Executive: handles dispatch to the various modules. 707 | pub type Executive = frame_executive::Executive< 708 | Runtime, 709 | Block, 710 | frame_system::ChainContext, 711 | Runtime, 712 | AllModules, 713 | >; 714 | 715 | impl_runtime_apis! { 716 | impl sp_api::Core for Runtime { 717 | fn version() -> RuntimeVersion { 718 | VERSION 719 | } 720 | 721 | fn execute_block(block: Block) { 722 | Executive::execute_block(block) 723 | } 724 | 725 | fn initialize_block(header: &::Header) { 726 | Executive::initialize_block(header) 727 | } 728 | } 729 | 730 | impl sp_api::Metadata for Runtime { 731 | fn metadata() -> OpaqueMetadata { 732 | Runtime::metadata().into() 733 | } 734 | } 735 | 736 | impl sp_block_builder::BlockBuilder for Runtime { 737 | fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { 738 | Executive::apply_extrinsic(extrinsic) 739 | } 740 | 741 | fn finalize_block() -> ::Header { 742 | Executive::finalize_block() 743 | } 744 | 745 | fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { 746 | data.create_extrinsics() 747 | } 748 | 749 | fn check_inherents( 750 | block: Block, 751 | data: sp_inherents::InherentData, 752 | ) -> sp_inherents::CheckInherentsResult { 753 | data.check_extrinsics(&block) 754 | } 755 | 756 | fn random_seed() -> ::Hash { 757 | RandomnessCollectiveFlip::random_seed() 758 | } 759 | } 760 | 761 | impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { 762 | fn validate_transaction( 763 | source: TransactionSource, 764 | tx: ::Extrinsic, 765 | ) -> TransactionValidity { 766 | Executive::validate_transaction(source, tx) 767 | } 768 | } 769 | 770 | impl sp_offchain::OffchainWorkerApi for Runtime { 771 | fn offchain_worker(header: &::Header) { 772 | Executive::offchain_worker(header) 773 | } 774 | } 775 | 776 | impl sp_consensus_aura::AuraApi for Runtime { 777 | fn slot_duration() -> u64 { 778 | Aura::slot_duration() 779 | } 780 | 781 | fn authorities() -> Vec { 782 | Aura::authorities() 783 | } 784 | } 785 | 786 | impl sp_session::SessionKeys for Runtime { 787 | fn generate_session_keys(seed: Option>) -> Vec { 788 | opaque::SessionKeys::generate(seed) 789 | } 790 | 791 | fn decode_session_keys( 792 | encoded: Vec, 793 | ) -> Option, KeyTypeId)>> { 794 | opaque::SessionKeys::decode_into_raw_public_keys(&encoded) 795 | } 796 | } 797 | 798 | impl fg_primitives::GrandpaApi for Runtime { 799 | fn grandpa_authorities() -> GrandpaAuthorityList { 800 | Grandpa::grandpa_authorities() 801 | } 802 | 803 | fn submit_report_equivocation_unsigned_extrinsic( 804 | _equivocation_proof: fg_primitives::EquivocationProof< 805 | ::Hash, 806 | NumberFor, 807 | >, 808 | _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, 809 | ) -> Option<()> { 810 | None 811 | } 812 | 813 | fn generate_key_ownership_proof( 814 | _set_id: fg_primitives::SetId, 815 | _authority_id: GrandpaId, 816 | ) -> Option { 817 | // NOTE: this is the only implementation possible since we've 818 | // defined our key owner proof type as a bottom type (i.e. a type 819 | // with no values). 820 | None 821 | } 822 | } 823 | 824 | impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { 825 | fn account_nonce(account: AccountId) -> Index { 826 | System::account_nonce(account) 827 | } 828 | } 829 | 830 | impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { 831 | fn query_info( 832 | uxt: ::Extrinsic, 833 | len: u32, 834 | ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { 835 | TransactionPayment::query_info(uxt, len) 836 | } 837 | 838 | fn query_fee_details( 839 | uxt: ::Extrinsic, 840 | len: u32, 841 | ) -> pallet_transaction_payment::FeeDetails { 842 | TransactionPayment::query_fee_details(uxt, len) 843 | } 844 | } 845 | 846 | #[cfg(feature = "runtime-benchmarks")] 847 | impl frame_benchmarking::Benchmark for Runtime { 848 | fn dispatch_benchmark( 849 | config: frame_benchmarking::BenchmarkConfig 850 | ) -> Result, sp_runtime::RuntimeString> { 851 | use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; 852 | 853 | use frame_system_benchmarking::Module as SystemBench; 854 | impl frame_system_benchmarking::Config for Runtime {} 855 | 856 | let whitelist: Vec = vec![ 857 | // Block Number 858 | hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), 859 | // Total Issuance 860 | hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), 861 | // Execution Phase 862 | hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), 863 | // Event Count 864 | hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), 865 | // System Events 866 | hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), 867 | ]; 868 | 869 | let mut batches = Vec::::new(); 870 | let params = (&config, &whitelist); 871 | 872 | add_benchmark!(params, batches, frame_system, SystemBench::); 873 | add_benchmark!(params, batches, pallet_balances, Balances); 874 | add_benchmark!(params, batches, pallet_timestamp, Timestamp); 875 | add_benchmark!(params, batches, pallet_offchain_worker, OffchainWorkerModule); 876 | add_benchmark!(params, batches, pallet_account_linker, AccountLinkerModule); 877 | 878 | 879 | if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } 880 | Ok(batches) 881 | } 882 | } 883 | } 884 | -------------------------------------------------------------------------------- /scripts/check-rust-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | prompt_message=true 4 | 5 | prompt() { 6 | if [[ "$prompt_message" -eq "true" ]]; then 7 | echo "Please following the following instruction to setup *rust* environment" 8 | echo "https://substrate.dev/docs/en/knowledgebase/getting-started" 9 | prompt_message=false 10 | fi 11 | } 12 | 13 | check_rust() { 14 | echo "Checking rustup..." 15 | found=`which rustup | wc -l` 16 | if [ "$found" -ne "1" ]; then 17 | echo "Not found rustup" 18 | prompt 19 | return 1 20 | else 21 | rustup check 22 | fi 23 | } 24 | 25 | 26 | check_wasm() { 27 | echo "Checking wasm32-unknown-unknown ..." 28 | found=`rustup target list | grep wasm32-unknown-unknown | grep installed | wc -l` 29 | if [ "$found" -ne "1" ]; then 30 | echo "Not found wasm32-unknown-unknown" 31 | prompt 32 | return 1 33 | else 34 | echo "Found wasm32-unknown-unknown" 35 | fi 36 | } 37 | 38 | 39 | check_rust && check_wasm 40 | -------------------------------------------------------------------------------- /scripts/start-devnet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | EXECUTOR= 4 | BINARY=litentry-node 5 | 6 | # 1. Locate project workspace 7 | SCRIPT_DIR="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)" 8 | CWD=$(dirname $SCRIPT_DIR) 9 | 10 | # 2. Determine exector, prefer to execute release version 11 | if [[ -f $CWD/target/release/$BINARY ]] 12 | then 13 | EXECUTOR=$CWD/target/release/$BINARY 14 | elif [[ -f $CWD/target/debug/$BINARY ]] 15 | then 16 | EXECUTOR=$CWD/target/debug/$BINARY 17 | else 18 | echo "No available binary found. Exiting..." 19 | exit 1 20 | fi 21 | 22 | # 2.1 Check *rust* env 23 | . $SCRIPT_DIR/check-rust-env.sh || exit 1 24 | 25 | # 3. Execute 26 | echo "Exector: $EXECUTOR" 27 | 28 | stopNodes() { 29 | local numOfProcess=-1 30 | while [ "$numOfProcess" -ne "0" ]; do 31 | echo "Killing $BINARY ..." 32 | pkill $BINARY 33 | sleep 1 34 | numOfProcess=`ps aux | grep $BINARY | grep -v grep | wc -l` 35 | done 36 | } 37 | # stop all nodes 38 | stopNodes 39 | 40 | echo "Starting dev node ..." 41 | $EXECUTOR --tmp --dev --rpc-external --ws-external --rpc-methods Unsafe --rpc-cors all --alice 42 | -------------------------------------------------------------------------------- /scripts/start-testnet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ECHO="echo" 4 | if [ `uname` = "Darwin" ]; then 5 | echo "MacOSX system" 6 | ECHO="echo" 7 | elif [ `uname` = "Linux" ]; then 8 | echo "Linux system" 9 | ECHO="echo -e" 10 | fi 11 | 12 | EXECUTOR= 13 | BINARY=litentry-node 14 | CHAIN_SPEC=litentry 15 | 16 | # 1. Locate project workspace 17 | SCRIPT_DIR="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)" 18 | CWD=$(dirname $SCRIPT_DIR) 19 | 20 | # 2. Determine exector, prefer to execute release version 21 | if [[ -f $CWD/target/release/$BINARY ]] 22 | then 23 | EXECUTOR=$CWD/target/release/$BINARY 24 | elif [[ -f $CWD/target/debug/$BINARY ]] 25 | then 26 | EXECUTOR=$CWD/target/debug/$BINARY 27 | else 28 | $ECHO "No available binary found. Exiting..." 29 | exit 1 30 | fi 31 | 32 | # 2.1 Check *rust* env 33 | . $SCRIPT_DIR/check-rust-env.sh || exit 1 34 | 35 | # 3. Execute 36 | $ECHO "Exector: $EXECUTOR" 37 | 38 | stopNodes() { 39 | local numOfProcess=-1 40 | while [ "$numOfProcess" -ne "0" ]; do 41 | echo "Killing $BINARY ..." 42 | pkill $BINARY 43 | sleep 1 44 | numOfProcess=`ps aux | grep $BINARY | grep -v grep | wc -l` 45 | done 46 | } 47 | # stop all nodes 48 | stopNodes 49 | 50 | getip() { 51 | local ip= 52 | interfaces=(en0 eth0) 53 | for interface in ${interfaces[@]} 54 | do 55 | 56 | valid_interface=`ifconfig $interface &> /dev/null` 57 | if [ "$?" == "0" ] 58 | then 59 | ip=`ifconfig $interface | grep "inet " | awk '{print $2}'` 60 | break 61 | fi 62 | done 63 | $ECHO $ip 64 | } 65 | 66 | ip=$(getip) 67 | 68 | colorText() { 69 | text=$1 70 | NC='\033[0m' 71 | color='\033[0;32m' 72 | $ECHO "${color}${text}${NC}" 73 | } 74 | 75 | colorText "Starting node 01 ..." 76 | $EXECUTOR --chain litentry --rpc-external --ws-external --rpc-methods Unsafe --rpc-cors all --ws-port 9900 --port 30334 --rpc-port 9901 --validator -d /tmp/1 &> /dev/null & 77 | 78 | sleep 3 79 | 80 | node_identity=`curl -s http://$ip:9901 -H "Content-Type:application/json;charset=utf-8" -d '{ "jsonrpc": "2.0", "id": 1, "method": "system_localPeerId", "params": [] }' | jq -r '.result'` 81 | colorText "Node identity of node 01: $node_identity" 82 | 83 | colorText "Starting node 02 ..." 84 | $EXECUTOR --chain litentry --rpc-external --ws-external --rpc-methods Unsafe --rpc-cors all --ws-port 9902 --port 30335 --rpc-port 9903 --validator -d /tmp/2 --bootnodes /ip4/$ip/tcp/30334/p2p/$node_identity &> /dev/null & 85 | 86 | sleep 3 87 | 88 | colorText "Running nodes:" 89 | ps aux|grep $BINARY | grep -v grep 90 | -------------------------------------------------------------------------------- /scripts/testnet/benchmark.md: -------------------------------------------------------------------------------- 1 | # Benchmark 2 | 3 | ## Hardware config 4 | CPU, Intel i7-7700k - 4c/8t @4.20GHz 5 | Memory: 64GB, DDR4, 2400 MHz 6 | OS: ubuntu 20.04 7 | Rust version: rustc 1.43.0 8 | 9 | Rise-2 10 | -------------------------------------------------------------------------------- /scripts/testnet/insert-key.sh: -------------------------------------------------------------------------------- 1 | SCRIPT_DIR="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)" 2 | 3 | HOST1=localhost 4 | RPC_PORT1=9901 5 | curl http://$HOST1:9901 -H "Content-Type:application/json;charset=utf-8" -d "@$SCRIPT_DIR/node01-aura.json" 6 | sleep 1 7 | curl http://$HOST1:9901 -H "Content-Type:application/json;charset=utf-8" -d "@$SCRIPT_DIR/node01-gran.json" 8 | sleep 1 9 | 10 | HOST2=localhost 11 | RPC_PORT2=9903 12 | curl http://$HOST2:9903 -H "Content-Type:application/json;charset=utf-8" -d "@$SCRIPT_DIR/node02-aura.json" 13 | sleep 1 14 | curl http://$HOST2:9903 -H "Content-Type:application/json;charset=utf-8" -d "@$SCRIPT_DIR/node02-gran.json" 15 | sleep 1 16 | -------------------------------------------------------------------------------- /scripts/testnet/node01-aura.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litentry/litentry-node/14953d5dca77312224ee045cef97bacd78381aa0/scripts/testnet/node01-aura.json -------------------------------------------------------------------------------- /scripts/testnet/node01-gran.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litentry/litentry-node/14953d5dca77312224ee045cef97bacd78381aa0/scripts/testnet/node01-gran.json -------------------------------------------------------------------------------- /scripts/testnet/node02-aura.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litentry/litentry-node/14953d5dca77312224ee045cef97bacd78381aa0/scripts/testnet/node02-aura.json -------------------------------------------------------------------------------- /scripts/testnet/node02-gran.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litentry/litentry-node/14953d5dca77312224ee045cef97bacd78381aa0/scripts/testnet/node02-gran.json -------------------------------------------------------------------------------- /scripts/testnet/testnet-accounts.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litentry/litentry-node/14953d5dca77312224ee045cef97bacd78381aa0/scripts/testnet/testnet-accounts.md -------------------------------------------------------------------------------- /ts-tests/README.md: -------------------------------------------------------------------------------- 1 | # Litentry Integration Test 2 | 3 | This node.js project aims to test and verify Litentry Runtime as a whole, including the interactions between user, AccountLink and OffChainWorker. 4 | 5 | ## Install 6 | 7 | `npm i` 8 | 9 | ## Run 10 | 11 | For now, you need to start the node manually first by the command 12 | 13 | `./target/debug/litentry-node --dev --tmp` 14 | 15 | And then run the command 16 | 17 | `ts-node ts-tests/tests/test-eth-balance.ts` 18 | 19 | Later a better test suite will be built with better framework integrated and full test automation. 20 | 21 | ## Output 22 | 23 | The current test runs through the following steps: link eth account -> check account linking state -> asset claim -> check asset balances. 24 | 25 | There are test assertions in step 2 and step 4. Therefore if assert fails while you are running it, probably something is broken. -------------------------------------------------------------------------------- /ts-tests/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "litentry-integration-test", 3 | "version": "1.0.0", 4 | "description": "This is a set of integration tests for litentry runtime, using TypeScript.", 5 | "main": "index.js", 6 | "directories": { 7 | "test": "tests" 8 | }, 9 | "scripts": { 10 | "test": "mocha --exit -r ts-node/register 'tests/**/*.ts'" 11 | }, 12 | "author": "Han Zhao", 13 | "license": "ISC", 14 | "devDependencies": { 15 | "@types/chai": "^4.2.15", 16 | "@types/mocha": "^8.2.1", 17 | "chai": "^4.3.0", 18 | "mocha": "^8.3.0", 19 | "mocha-steps": "^1.3.0", 20 | "typescript": "^4.1.5" 21 | }, 22 | "dependencies": { 23 | "@polkadot/api": "^2.10.1", 24 | "ts-node": "^9.1.1", 25 | "web3": "^1.3.4" 26 | }, 27 | "compilerOptions": { 28 | "allowSyntheticDefaultImports": true, 29 | "esModuleInterop": true 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /ts-tests/scripts/export_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | export etherscan="RF71W4Z2RDA7XQD6EN19NGB66C2QD9UPHB" 3 | export infura="aa0a6af5f94549928307febe80612a2a" 4 | export blockchain="" 5 | -------------------------------------------------------------------------------- /ts-tests/scripts/smoke-test.ts: -------------------------------------------------------------------------------- 1 | import { ApiPromise, Keyring, WsProvider } from "@polkadot/api"; 2 | import { KeyringPair } from "@polkadot/keyring/types"; 3 | import { U8aFixed, UInt } from "@polkadot/types/codec"; 4 | import { TypeRegistry } from "@polkadot/types/create"; 5 | import { Text, U128 } from "@polkadot/types/primitive"; 6 | import * as crypto from "@polkadot/util-crypto"; 7 | // Import Web3 from 'web3'; 8 | import { testValidator } from "@polkadot/util-crypto/base32/is"; 9 | import { expect } from "chai"; 10 | 11 | // Import elliptic crypto 12 | // import { elliptic } from 'elliptic'; 13 | var elliptic = require("elliptic"); 14 | const ec = new elliptic.ec("secp256k1"); 15 | 16 | // Import eth lib (wrapper of elliptic lib) 17 | var Account = require("eth-lib/lib/account"); 18 | var Hash = require("eth-lib/lib/hash"); 19 | 20 | const privateKey = 21 | "0xe82c0c4259710bb0d6cf9f9e8d0ad73419c1278a14d375e5ca691e7618103011"; 22 | 23 | // Provider is set to localhost for development 24 | const wsProvider = new WsProvider("ws://localhost:9944"); 25 | 26 | // Keyring needed to sign using Alice account 27 | const keyring = new Keyring({ type: "sr25519" }); 28 | 29 | // Configs of test ropsten account 30 | const test_eth_address = "[0x4d88dc5d528a33e4b8be579e9476715f60060582]"; 31 | 32 | const msgPrefix: string = "Link Litentry: "; 33 | // const msgPrefix: string = "\x19Ethereum Signed Message:\n51Link Litentry: "; 34 | 35 | const keyringRopsten = new Keyring({ type: "ecdsa" }); 36 | 37 | // Setup the API and Alice Account 38 | async function init() { 39 | console.log( 40 | `Initiating the API (ignore message "Unable to resolve type B..." and "Unknown types found...")` 41 | ); 42 | 43 | // Initiate the polkadot API. 44 | const api = await ApiPromise.create({ 45 | provider: wsProvider, 46 | types: { 47 | // mapping the actual specified address format 48 | Address: "AccountId", 49 | // mapping the lookup 50 | LookupSource: "AccountId", 51 | BlockWeights: "BlockWeights", 52 | Account: { nonce: "U256", balance: "U256" }, 53 | Transaction: { 54 | nonce: "U256", 55 | action: "String", 56 | gas_price: "u64", 57 | gas_limit: "u64", 58 | value: "U256", 59 | input: "Vec", 60 | signature: "Signature", 61 | }, 62 | Signature: { v: "u64", r: "H256", s: "H256" }, 63 | }, 64 | }); 65 | 66 | console.log(`Initialization done`); 67 | console.log(`Genesis at block: ${api.genesisHash.toHex()}`); 68 | 69 | const alice = keyring.addFromUri("//Alice", { name: "Alice default" }); 70 | 71 | const { nonce, data: balance } = await api.query.system.account( 72 | alice.address 73 | ); 74 | console.log(`Alice Substrate Account: ${alice.address}`); 75 | console.log( 76 | `Alice Substrate Account (nonce: ${nonce}) balance, free: ${balance.free.toHex()}` 77 | ); 78 | 79 | return { api, alice }; 80 | } 81 | 82 | // Create Ethereum Link from ALICE 83 | async function eth_link(api: ApiPromise, alice: KeyringPair) { 84 | console.log(`\nStep 1: Link Ethereum account`); 85 | 86 | const registry = new TypeRegistry(); 87 | 88 | // Encode prefix with concatenated utf8, instead of SCALE codec to match the litentry node 89 | // implementation 90 | let encodedPrefix = Buffer.from(msgPrefix, "utf-8"); 91 | 92 | // let encodedAccId = registry.createType('AccountId', alice.address).toU8a(); 93 | // console.log(encodedAccId); 94 | // console.log(alice.addressRaw); 95 | 96 | let encodedExpiredBlock = new UInt(registry, 10000, 32).toU8a(); 97 | 98 | let encodedMsg = new Uint8Array( 99 | encodedPrefix.length + alice.addressRaw.length + encodedExpiredBlock.length 100 | ); 101 | encodedMsg.set(encodedPrefix); 102 | encodedMsg.set(alice.addressRaw, encodedPrefix.length); 103 | encodedMsg.set( 104 | encodedExpiredBlock, 105 | encodedPrefix.length + alice.addressRaw.length 106 | ); 107 | 108 | // To use manual hash and sign method, a prefix of \x19Ethereum ... is also needed to be prefixed 109 | // manually 110 | // let hash = Hash.keccak256s(encodedMsg); 111 | 112 | // console.log('hash is:'); 113 | // console.log(hash); 114 | 115 | // TODO ECDSA keyring from polkadot crypto still not working 116 | // const ropstenTestAcc = 117 | // keyringRopsten.addFromUri('0xe82c0c4259710bb0d6cf9f9e8d0ad73419c1278a14d375e5ca691e7618103011'); 118 | // console.log('ropsten pub key: '); 119 | // console.log(ropstenTestAcc.publicKey); 120 | // console.log(ropstenTestAcc.address); 121 | // console.log(crypto.keccakAsU8a(ropstenTestAcc.publicKey)); 122 | // let signedMsg = ropstenTestAcc.sign(new Buffer(hash2.slice(2), "hex")); 123 | // let r = signedMsg.slice(0, 32); 124 | // let s = signedMsg.slice(32, 64); 125 | // let v = signedMsg[64]; 126 | 127 | // var signature = Account.sign(hash, privateKey); 128 | // var vrs = Account.decodeSignature(signature); 129 | // console.log("signature is :"); 130 | // console.log(signature); 131 | // console.log(keyPair.sign(new Buffer(hash2.slice(2), "hex"), { canonical: true 132 | // }).r.toString(16)); 133 | 134 | // TODO Web3 could be used to replace eth-lib once ethereum prefix is implemented on 135 | // account-linker side 136 | const Web3 = require("web3"); 137 | const web3 = new Web3(); 138 | // Convert byte array to hex string 139 | let hexString = "0x" + Buffer.from(encodedMsg).toString("hex"); 140 | 141 | let signedMsg = web3.eth.accounts.sign(hexString, privateKey); 142 | 143 | // This is not needed as eth-lib already does the same job 144 | // let keyPair = ec.keyFromPrivate(new Buffer(privateKey.slice(2), "hex")); 145 | // let privKey = keyPair.getPrivate("hex"); 146 | // let pubKey = keyPair.getPublic(); 147 | // console.log(`Private key: ${privKey}`); 148 | // console.log("Public key :", pubKey.encode("hex").substr(2)); 149 | // console.log("Public key (compressed):", 150 | // pubKey.encodeCompressed("hex")); 151 | // let signature = ec.sign(hash, privKey, "hex", {canonical: true}); 152 | 153 | // Convert ethereum address to bytes array 154 | let ethAddressBytes = web3.utils.hexToBytes( 155 | web3.eth.accounts.privateKeyToAccount(privateKey).address 156 | ); 157 | 158 | // const transaction = api.tx.accountLinkerModule.link(alice.address, 0, 10000, vrs[1], vrs[2], 159 | // vrs[0]); 160 | const transaction = api.tx.accountLinkerModule.linkEth( 161 | alice.address, 162 | 0, 163 | ethAddressBytes, 164 | 10000, 165 | signedMsg.r, 166 | signedMsg.s, 167 | signedMsg.v 168 | ); 169 | 170 | const link = new Promise<{ block: string }>(async (resolve, reject) => { 171 | const unsub = await transaction.signAndSend(alice, (result) => { 172 | console.log(`Link creation is ${result.status}`); 173 | if (result.status.isInBlock) { 174 | console.log(`Link included at blockHash ${result.status.asInBlock}`); 175 | console.log(`Waiting for finalization... (can take a minute)`); 176 | } else if (result.status.isFinalized) { 177 | console.log( 178 | `Transfer finalized at blockHash ${result.status.asFinalized}` 179 | ); 180 | unsub(); 181 | resolve({ 182 | block: result.status.asFinalized.toString(), 183 | }); 184 | } 185 | }); 186 | }); 187 | return link; 188 | } 189 | 190 | // Retrieve Alice & Link Storage 191 | async function check_linking_state(api: ApiPromise, alice: KeyringPair) { 192 | console.log(`\nStep 2: Retrieving linking state of Alice `); 193 | 194 | // Retrieve Alice account with new nonce value 195 | const { nonce, data: balance } = await api.query.system.account( 196 | alice.address 197 | ); 198 | console.log( 199 | `Alice Substrate Account (nonce: ${nonce}) balance, free: ${balance.free}` 200 | ); 201 | 202 | const linkedEthAddress = await api.query.accountLinkerModule.ethereumLink( 203 | alice.address 204 | ); 205 | console.log( 206 | `Linked Ethereum addresses of Alice are: ${linkedEthAddress.toString()}` 207 | ); 208 | 209 | expect(linkedEthAddress.toString()).to.equal(test_eth_address); 210 | 211 | return; 212 | } 213 | 214 | // Claim Assets for Alice 215 | async function asset_claim(api: ApiPromise, alice: KeyringPair) { 216 | console.log(`\nStep 3: Claim assets for Alice`); 217 | 218 | const transaction = await api.tx.offchainWorkerModule.assetClaim(); 219 | 220 | const data = new Promise<{ block: string }>(async (resolve, reject) => { 221 | const unsub = await transaction.signAndSend(alice, (result) => { 222 | console.log(`Transfer is ${result.status}`); 223 | if (result.status.isInBlock) { 224 | console.log( 225 | `Transfer included at blockHash ${result.status.asInBlock}` 226 | ); 227 | console.log(`Waiting for finalization... (can take a minute)`); 228 | } else if (result.status.isFinalized) { 229 | console.log( 230 | `Transfer finalized at blockHash ${result.status.asFinalized}` 231 | ); 232 | unsub(); 233 | resolve({ 234 | block: result.status.asFinalized.toString(), 235 | }); 236 | } 237 | }); 238 | }); 239 | return data; 240 | } 241 | 242 | // Retrieve assets balances of Alice 243 | async function get_assets(api: ApiPromise, alice: KeyringPair) { 244 | console.log(`\nStep 4: Retrieving assets of Alice`); 245 | 246 | // Retrieve Alice account with new nonce value 247 | const { nonce, data: balance } = await api.query.system.account( 248 | alice.address 249 | ); 250 | console.log( 251 | `Alice Substrate Account (nonce: ${nonce}) balance, free: ${balance.free}` 252 | ); 253 | 254 | const assetsBalances = await api.query.offchainWorkerModule.accountBalance( 255 | alice.address 256 | ); 257 | console.log( 258 | `Linked Ethereum balances of Alice are: ${assetsBalances.toString()}` 259 | ); 260 | 261 | // TODO fetch real time balance and compare it here 262 | expect(assetsBalances.toString()).to.equal( 263 | `[0,"0x00000000000000004563918244f40000"]` 264 | ); 265 | 266 | return; 267 | } 268 | 269 | async function main() { 270 | const { api, alice } = await init(); 271 | 272 | // step 1: Creating the contract from ALICE 273 | const link = await eth_link(api, alice); 274 | 275 | // step 2: Retrieving Alice's linked Ethereum accounts 276 | await check_linking_state(api, alice); 277 | 278 | // step 3: Claim assets for Alice 279 | await asset_claim(api, alice); 280 | 281 | // step 4: Retrieving assets information of Alice 282 | await get_assets(api, alice); 283 | } 284 | 285 | main() 286 | .catch(console.error) 287 | .then(() => process.exit(0)); 288 | -------------------------------------------------------------------------------- /ts-tests/tests/test-eth-balance.ts: -------------------------------------------------------------------------------- 1 | import { ApiPromise, WsProvider, Keyring } from "@polkadot/api"; 2 | import { KeyringPair } from '@polkadot/keyring/types'; 3 | import { UInt } from '@polkadot/types/codec'; 4 | import { TypeRegistry } from "@polkadot/types/create"; 5 | // Import Web3 from 'web3'; 6 | import { expect } from "chai"; 7 | import { step } from "mocha-steps"; 8 | import { describeLitentry } from "./utils" 9 | 10 | const privateKey = '0xe82c0c4259710bb0d6cf9f9e8d0ad73419c1278a14d375e5ca691e7618103011'; 11 | 12 | // Provider is set to localhost for development 13 | const wsProvider = new WsProvider("ws://localhost:9944"); 14 | 15 | // Keyring needed to sign using Alice account 16 | const keyring = new Keyring({ type: 'sr25519' }); 17 | 18 | // Configs of test ropsten account 19 | const testEthAddress = "[0x4d88dc5d528a33e4b8be579e9476715f60060582]"; 20 | 21 | const msgPrefix: string = "Link Litentry: "; 22 | 23 | // Create Ethereum Link from ALICE 24 | async function eth_link(api: ApiPromise, alice: KeyringPair) { 25 | 26 | console.log(`\nStep 1: Link Ethereum account`); 27 | 28 | const registry = new TypeRegistry(); 29 | 30 | // Encode prefix with concatenated utf8, instead of SCALE codec to match the litentry node implementation 31 | let encodedPrefix = Buffer.from(msgPrefix, 'utf-8'); 32 | 33 | let encodedExpiredBlock = new UInt(registry, 10000, 32).toU8a(); 34 | 35 | let encodedMsg = new Uint8Array(encodedPrefix.length + alice.addressRaw.length + encodedExpiredBlock.length); 36 | encodedMsg.set(encodedPrefix); 37 | encodedMsg.set(alice.addressRaw, encodedPrefix.length); 38 | encodedMsg.set(encodedExpiredBlock, encodedPrefix.length + alice.addressRaw.length); 39 | 40 | // Web3 is used to sign the message with ethereum prefix ("\x19Ethereum ...") 41 | const Web3 = require("web3"); 42 | const web3 = new Web3(); 43 | // Convert byte array to hex string 44 | let hexString = "0x" + Buffer.from(encodedMsg).toString('hex'); 45 | 46 | let signedMsg = web3.eth.accounts.sign(hexString, privateKey); 47 | 48 | // Convert ethereum address to bytes array 49 | let ethAddressBytes = web3.utils.hexToBytes(web3.eth.accounts.privateKeyToAccount(privateKey).address); 50 | 51 | console.log(`r is ${signedMsg.r}`); 52 | console.log(`s is ${signedMsg.s}`); 53 | console.log(`v is ${signedMsg.v}`); 54 | 55 | const transaction = api.tx.accountLinkerModule.linkEth(alice.address, 0, ethAddressBytes, 10000, signedMsg.r, signedMsg.s, signedMsg.v); 56 | 57 | const link = new Promise<{ block: string }>(async (resolve, reject) => { 58 | const unsub = await transaction.signAndSend(alice, (result) => { 59 | console.log(`Link creation is ${result.status}`); 60 | if (result.status.isInBlock) { 61 | console.log(`Link included at blockHash ${result.status.asInBlock}`); 62 | console.log(`Waiting for finalization... (can take a minute)`); 63 | } else if (result.status.isFinalized) { 64 | console.log(`Transfer finalized at blockHash ${result.status.asFinalized}`); 65 | unsub(); 66 | resolve({ 67 | block: result.status.asFinalized.toString(), 68 | }); 69 | } 70 | }); 71 | }); 72 | return link; 73 | 74 | } 75 | 76 | // Retrieve Alice & Link Storage 77 | async function check_linking_state(api: ApiPromise, alice: KeyringPair) { 78 | 79 | console.log(`\nStep 2: Retrieving linking state of Alice `); 80 | 81 | // Retrieve Alice account with new nonce value 82 | const { nonce, data: balance } = await api.query.system.account(alice.address); 83 | console.log(`Alice Substrate Account (nonce: ${nonce}) balance, free: ${balance.free}`); 84 | 85 | const linkedEthAddress = (await api.query.accountLinkerModule.ethereumLink(alice.address)); 86 | console.log(`Linked Ethereum addresses of Alice are: ${linkedEthAddress.toString()}`); 87 | 88 | return linkedEthAddress; 89 | } 90 | 91 | 92 | // Claim Assets for Alice 93 | async function asset_claim(api: ApiPromise, alice: KeyringPair) { 94 | 95 | console.log(`\nStep 3: Claim assets for Alice`); 96 | 97 | const transaction = await api.tx.offchainWorkerModule.assetClaim(); 98 | 99 | const data = new Promise<{ block: string }>(async (resolve, reject) => { 100 | const unsub = await transaction.signAndSend(alice, (result) => { 101 | console.log(`Transfer is ${result.status}`); 102 | if (result.status.isInBlock) { 103 | console.log(`Transfer included at blockHash ${result.status.asInBlock}`); 104 | console.log(`Waiting for finalization... (can take a minute)`); 105 | } else if (result.status.isFinalized) { 106 | console.log(`Transfer finalized at blockHash ${result.status.asFinalized}`); 107 | unsub(); 108 | resolve({ 109 | block: result.status.asFinalized.toString(), 110 | }); 111 | } 112 | }); 113 | }); 114 | return data; 115 | } 116 | 117 | // Retrieve assets balances of Alice 118 | async function get_assets(api: ApiPromise, alice: KeyringPair) { 119 | 120 | console.log(`\nStep 4: Retrieving assets of Alice`); 121 | 122 | // Retrieve Alice account with new nonce value 123 | const { nonce, data: balance } = await api.query.system.account(alice.address); 124 | console.log(`Alice Substrate Account (nonce: ${nonce}) balance, free: ${balance.free}`); 125 | 126 | const assetsBalances = (await api.query.offchainWorkerModule.accountBalance(alice.address)); 127 | console.log(`Linked Ethereum balances of Alice are: ${assetsBalances.toString()}`); 128 | 129 | return assetsBalances; 130 | 131 | } 132 | 133 | describeLitentry("Test Ethereum Link and Balance Fetch", ``, (context) =>{ 134 | 135 | step("Create Ethereum Link", async function () { 136 | await eth_link(context.api, context.alice); 137 | }) 138 | 139 | step("Retrieving Alice's linked Ethereum accounts", async function () { 140 | const ethAddr = await check_linking_state(context.api, context.alice); 141 | 142 | expect(ethAddr.toString()).to.equal(testEthAddress); 143 | }) 144 | 145 | step("Claim assets for Alice", async function () { 146 | await asset_claim(context.api, context.alice); 147 | }) 148 | 149 | step("Retrieving assets information of Alice", async function () { 150 | // First wait for 36s ~ 6 blocks 151 | await new Promise(r => setTimeout(r, 36000)); 152 | const balances = await get_assets(context.api, context.alice); 153 | // TODO fetch real time balance and compare it here 154 | expect(balances.toString()).to.equal(`[null,"0x00000000000000004563918244f40000"]`); 155 | }) 156 | 157 | }); 158 | -------------------------------------------------------------------------------- /ts-tests/tests/utils.ts: -------------------------------------------------------------------------------- 1 | import Web3 from "web3"; 2 | import { JsonRpcResponse } from "web3-core-helpers"; 3 | import { spawn, ChildProcess } from "child_process"; 4 | import 'mocha'; 5 | import { ApiPromise, Keyring, WsProvider } from "@polkadot/api"; 6 | import { KeyringPair } from '@polkadot/keyring/types'; 7 | //import '@polkadot/types/interfaces/system' 8 | 9 | export const BINARY_PATH = `../target/debug/litentry-node`; 10 | export const APIKEY_SERVER_PATH = `../target/debug/litentry-token-server`; 11 | export const SPAWNING_TIME = 30000; 12 | 13 | // Provider is set to localhost for development 14 | const wsProvider = new WsProvider("ws://localhost:9944"); 15 | 16 | // Keyring needed to sign using Alice account 17 | const keyring = new Keyring({ type: 'sr25519' }); 18 | 19 | export async function launchAPITokenServer(): Promise<{ apikey_server: ChildProcess }> { 20 | 21 | const apikey_server = spawn(APIKEY_SERVER_PATH, [], { 22 | env: { 23 | etherscan: "RF71W4Z2RDA7XQD6EN19NGB66C2QD9UPHB", 24 | infura: "aa0a6af5f94549928307febe80612a2a", 25 | blockchain: "" 26 | } 27 | }); 28 | 29 | apikey_server.on("error", (err) => { 30 | if ((err as any).errno == "ENOENT") { 31 | console.error( 32 | `\x1b[31mMissing litentry-token-server binary (${APIKEY_SERVER_PATH}).\nPlease compile the litentry project:\ncargo build\x1b[0m` 33 | ); 34 | } else { 35 | console.error(err); 36 | } 37 | process.exit(1); 38 | }); 39 | 40 | apikey_server.stdout.on('data', (data) => { 41 | console.log('Litentry Token Server Output: ' + data.toString()); 42 | }); 43 | 44 | apikey_server.stderr.on('data', (data) => { 45 | console.log('Litentry Token Server Output: ' + data.toString()); 46 | }); 47 | 48 | return { apikey_server }; 49 | } 50 | 51 | export async function launchLitentryNode(specFilename: string, provider?: string): Promise<{ binary: ChildProcess }> { 52 | 53 | const cmd = BINARY_PATH; 54 | const args = [ 55 | `--dev`, 56 | `--tmp`, 57 | ]; 58 | const binary = spawn(cmd, args); 59 | 60 | binary.on("error", (err) => { 61 | if ((err as any).errno == "ENOENT") { 62 | console.error( 63 | `\x1b[31mMissing litentry-node binary (${BINARY_PATH}).\nPlease compile the litentry project:\ncargo build\x1b[0m` 64 | ); 65 | } else { 66 | console.error(err); 67 | } 68 | process.exit(1); 69 | }); 70 | 71 | binary.stdout.on('data', (data) => { 72 | console.log('Litentry Node Output: ' + data.toString()); 73 | }); 74 | 75 | binary.stderr.on('data', (data) => { 76 | console.log('Litentry Node Output: ' + data.toString()); 77 | }); 78 | 79 | // await new Promise((resolve) => { 80 | // const timer = setTimeout(() => { 81 | // console.error(`\x1b[31m Failed to start Litentry Node.\x1b[0m`); 82 | // console.error(`Command: ${cmd} ${args.join(" ")}`); 83 | // process.exit(1); 84 | // }, SPAWNING_TIME - 2000); 85 | // 86 | // const onData = async (chunk) => { 87 | // if (chunk.toString().match("Listening for new connections on 127.0.0.1:9944.")) { 88 | // 89 | // clearTimeout(timer); 90 | // console.log(`Litentry Node Starts`); 91 | // resolve(); 92 | // } 93 | // }; 94 | // binary.stderr.on("data", onData); 95 | // binary.stdout.on("data", onData); 96 | // }); 97 | 98 | return { binary }; 99 | } 100 | 101 | export async function initApiPromise(wsProvider: WsProvider) { 102 | console.log(`Initiating the API (ignore message "Unable to resolve type B..." and "Unknown types found...")`); 103 | 104 | // Initiate the polkadot API. 105 | const api = await ApiPromise.create({ 106 | provider: wsProvider, 107 | types: { 108 | // mapping the actual specified address format 109 | Address: "AccountId", 110 | // mapping the lookup 111 | LookupSource: "AccountId", 112 | Account: { 113 | nonce: "U256", 114 | balance: "U256" 115 | }, 116 | Transaction: { 117 | nonce: "U256", 118 | action: "String", 119 | gas_price: "u64", 120 | gas_limit: "u64", 121 | value: "U256", 122 | input: "Vec", 123 | signature: "Signature" 124 | }, 125 | Signature: { 126 | v: "u64", 127 | r: "H256", 128 | s: "H256" 129 | }, 130 | BlockWeights: "U256", 131 | BlockLength: "U256", 132 | } 133 | }); 134 | 135 | console.log(`Initialization done`); 136 | console.log(`Genesis at block: ${api.genesisHash.toHex()}`); 137 | 138 | // Get keyring of Alice 139 | const alice = keyring.addFromUri('//Alice', { name: 'Alice default' }); 140 | 141 | // Insert ocw session key 142 | const resInsertKey = api.rpc.author.insertKey( 143 | "ocw!", 144 | "loop high amazing chat tennis auto denial attend type quit liquid tonight", 145 | "0x8c35b97c56099cf3b5c631d1f296abbb11289857e74a8f60936290080d56da6d" 146 | ); 147 | 148 | const { nonce, data: balance } = await api.query.system.account(alice.address); 149 | console.log(`Alice Substrate Account: ${alice.address}`); 150 | console.log(`Alice Substrate Account (nonce: ${nonce}) balance, free: ${balance.free.toHex()}`); 151 | 152 | return { api, alice }; 153 | } 154 | 155 | async function sendTokenToOcw(api: ApiPromise, alice: KeyringPair) { 156 | // Transfer tokens from Alice to ocw account 157 | console.log(`Transfer tokens from Alice to ocw account`); 158 | return new Promise<{ block: string }>(async (resolve, reject) => { 159 | const unsub = await api.tx.balances 160 | .transfer("5FEYX9NES9mAJt1Xg4WebmHWywxyeGQK8G3oEBXtyfZrRePX", 1000000000000000) 161 | .signAndSend(alice, (result) => { 162 | console.log(`Current status is ${result.status}`); 163 | if (result.status.isInBlock) { 164 | console.log( 165 | `Transaction included at blockHash ${result.status.asInBlock}` 166 | ); 167 | } else if (result.status.isFinalized) { 168 | console.log( 169 | `Transaction finalized at blockHash ${result.status.asFinalized}` 170 | ); 171 | unsub(); 172 | resolve({ 173 | block: result.status.asFinalized.toString(), 174 | }); 175 | } 176 | }); 177 | }); 178 | } 179 | 180 | 181 | export function describeLitentry(title: string, specFilename: string, cb: (context: {api: ApiPromise, alice: KeyringPair}) => void, provider?: string) { 182 | describe(title, function() { 183 | // Set timeout to 120 seconds 184 | this.timeout(120000); 185 | 186 | let tokenServer: ChildProcess; 187 | let binary: ChildProcess; 188 | let context: {api: ApiPromise, alice: KeyringPair} = { api: {} as ApiPromise, alice: {} as KeyringPair}; 189 | // Making sure the Litentry node has started 190 | before("Starting Litentry Test Node", async function () { 191 | //this.timeout(SPAWNING_TIME); 192 | const initTokenServer = await launchAPITokenServer(); 193 | const initNode = await launchLitentryNode(specFilename, provider); 194 | tokenServer = initTokenServer.apikey_server; 195 | binary = initNode.binary; 196 | const initApi = await initApiPromise(wsProvider); 197 | context.api = initApi.api; 198 | context.alice = initApi.alice; 199 | return sendTokenToOcw(initApi.api, initApi.alice); 200 | }); 201 | 202 | after(async function () { 203 | //console.log(`\x1b[31m Killing RPC\x1b[0m`); 204 | tokenServer.kill() 205 | binary.kill(); 206 | context.api.disconnect(); 207 | }); 208 | 209 | cb(context); 210 | }); 211 | } -------------------------------------------------------------------------------- /ts-tests/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */ 4 | 5 | /* Basic Options */ 6 | // "incremental": true, /* Enable incremental compilation */ 7 | "target": "es5", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */ 8 | "module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */ 9 | // "lib": [], /* Specify library files to be included in the compilation. */ 10 | // "allowJs": true, /* Allow javascript files to be compiled. */ 11 | // "checkJs": true, /* Report errors in .js files. */ 12 | // "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */ 13 | // "declaration": true, /* Generates corresponding '.d.ts' file. */ 14 | // "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */ 15 | // "sourceMap": true, /* Generates corresponding '.map' file. */ 16 | // "outFile": "./", /* Concatenate and emit output to single file. */ 17 | // "outDir": "./", /* Redirect output structure to the directory. */ 18 | // "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */ 19 | // "composite": true, /* Enable project compilation */ 20 | // "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */ 21 | // "removeComments": true, /* Do not emit comments to output. */ 22 | // "noEmit": true, /* Do not emit outputs. */ 23 | // "importHelpers": true, /* Import emit helpers from 'tslib'. */ 24 | // "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */ 25 | // "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */ 26 | 27 | /* Strict Type-Checking Options */ 28 | "strict": true, /* Enable all strict type-checking options. */ 29 | // "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */ 30 | // "strictNullChecks": true, /* Enable strict null checks. */ 31 | // "strictFunctionTypes": true, /* Enable strict checking of function types. */ 32 | // "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */ 33 | // "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */ 34 | // "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */ 35 | // "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */ 36 | 37 | /* Additional Checks */ 38 | // "noUnusedLocals": true, /* Report errors on unused locals. */ 39 | // "noUnusedParameters": true, /* Report errors on unused parameters. */ 40 | // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ 41 | // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ 42 | // "noUncheckedIndexedAccess": true, /* Include 'undefined' in index signature results */ 43 | 44 | /* Module Resolution Options */ 45 | // "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */ 46 | // "baseUrl": "./", /* Base directory to resolve non-absolute module names. */ 47 | // "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */ 48 | // "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */ 49 | // "typeRoots": [], /* List of folders to include type definitions from. */ 50 | // "types": [], /* Type declaration files to be included in compilation. */ 51 | // "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */ 52 | "esModuleInterop": true, /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */ 53 | // "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */ 54 | // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ 55 | 56 | /* Source Map Options */ 57 | // "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */ 58 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ 59 | // "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */ 60 | // "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */ 61 | 62 | /* Experimental Options */ 63 | // "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */ 64 | // "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */ 65 | 66 | /* Advanced Options */ 67 | "skipLibCheck": true, /* Skip type checking of declaration files. */ 68 | "forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */ 69 | //"sourceMap": true 70 | } 71 | } 72 | --------------------------------------------------------------------------------