,
26 | /// Transaction pool instance.
27 | pub pool: Arc,
28 | /// Whether to deny unsafe calls
29 | pub deny_unsafe: DenyUnsafe,
30 | }
31 |
32 | /// Instantiate all RPC extensions.
33 | pub fn create_full(
34 | deps: FullDeps,
35 | ) -> Result>
36 | where
37 | C: ProvideRuntimeApi
38 | + HeaderBackend
39 | + AuxStore
40 | + HeaderMetadata
41 | + Send
42 | + Sync
43 | + 'static,
44 | C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi,
45 | C::Api: substrate_frame_rpc_system::AccountNonceApi,
46 | C::Api: BlockBuilder,
47 | P: TransactionPool + Sync + Send + 'static,
48 | {
49 | use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
50 | use substrate_frame_rpc_system::{System, SystemApiServer};
51 |
52 | let mut module = RpcExtension::new(());
53 | let FullDeps { client, pool, deny_unsafe } = deps;
54 |
55 | module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
56 | module.merge(TransactionPayment::new(client).into_rpc())?;
57 | Ok(module)
58 | }
59 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 |
3 | services:
4 | md5:
5 | container_name: md5_container
6 | image: dappdever/hashed-substrate
7 | ports:
8 | - "40333:40333"
9 | - "30333:30333"
10 | - "9933:9933"
11 | - "9944:9944"
12 | - "9946:9946"
13 | # Declare environment variables in docker-compose.override.yml for safety purposes
14 | volumes:
15 | - .:/var/www/hashed
16 | - type: bind
17 | source: ./.local
18 | target: /root/.local
19 | command: bash "scripts/start_collator.sh"
20 |
--------------------------------------------------------------------------------
/docs/CANNABIS.md:
--------------------------------------------------------------------------------
1 |
2 | # Definitions
3 |
4 | - *Universally Unique Identifier (UUIDs)* - an assigned 64-bit random(ish) value that is unpredictable.
5 | - In consumer products, *serial number* is often used as a unique identifier for a unit. `Serial` implies in a sequence, which would be predictable, so we use `UUID` instead.
6 |
7 | - *Checkpoint* - a geo-coded timestamp on the history of a UUID. Each `checkpoint` may include additional references such as documents, images, videos, technical values like test results, etc.
8 |
9 | - *New UUID* - when a new UUID is created, it is given a parent UUID to represent the source or literal parent of a live plant.
10 | - The only exception to this is when creating a new vendor UUID.
11 | - Vendors -> Seeds -> Germinates -> Mother -> etc etc.
12 |
13 | 
14 |
15 | ```
16 | @startuml
17 | digraph G {
18 | rankdir=TB;
19 | notes [label="- Purple lines are checkpoints\n- Gold lines are new UUIDs\n- +n are new UUIDs\n- -n are scrapped UUIDs",shape=note];
20 | node [shape=record];
21 |
22 | seeds_from_cookies [label="Seeds\n|{UUIDs|Pieces}|{{1}|{100}}"];
23 | cubed [label="Cubed\n|{UUIDs|Pieces}|{{75}|{75}}"];
24 | did_not_germinate [label="Did not Germinate\n|{UUIDs|Pieces}|{{1}|{25}}"];
25 | discard [fillcolor=pink,style=filled,label="Discard\n|{UUIDs|Pieces}|{{1}|{var}}"];
26 | mother_plants [label="Mother Plants\n|{UUIDs|Pieces}|{{55}|{55}}"];
27 | clones [label="Clones\n|{UUIDs|Pieces}|{{375}|{375}}"];
28 | clone_buyers [fillcolor=darkseagreen1,style=filled,label="Clone Buyers\n|{UUIDs|Pieces}|{{75}|{75}}"];
29 | flower [label="Flower\n|{UUIDs|Pieces}|{{1,783}|{1,783}}"];
30 | dispensaries [label="Dispensaries\n|{UUIDs|Pieces}|{{1,505}|{1,505}}"];
31 | consumers [label="Consumer Scan\n|{UUIDs|Pieces}|{{?}|{?}}"];
32 | pos_scan [fillcolor=darkseagreen1,style=filled,label="PoS Scan\n|{UUIDs|Pieces}|{{1,273}|{1,273}}"];
33 |
34 | seeds_from_cookies -> cubed [label="+ 75 uuids",color=gold4];
35 | cubed -> mother_plants [label="",color=purple];
36 | cubed -> discard [label=" -20",color=purple];
37 |
38 | seeds_from_cookies -> did_not_germinate [label="",color=purple];
39 | did_not_germinate -> discard [label=" (never became a UUID)",color=purple];
40 |
41 | mother_plants -> clones [color=gold4,label=" + 320"];
42 | mother_plants -> discard [label="-7",color=purple];
43 |
44 | clones -> flower [label=" + 1,531",color=gold4];
45 | clones -> clone_buyers [color=purple];
46 | clones -> discard [label="-123",color=purple];
47 |
48 | flower -> dispensaries [label="",color=purple];
49 | flower -> discard [label="-278",color=purple];
50 |
51 | dispensaries -> consumers [label="",color=purple];
52 | dispensaries -> discard [label="-232",color=purple];
53 |
54 | consumers -> pos_scan [label="",color=purple];
55 | dispensaries -> pos_scan [label=" Some portion of UUIDs will be scanned by consumers\n and all should have point-of-sale scan",color=purple];
56 | }
57 | @enduml
58 | ```
--------------------------------------------------------------------------------
/docs/bitcoin-wallet.md:
--------------------------------------------------------------------------------
1 | # Bitcoin Treasury Wallet- powered by a pallet
2 | ## Bitcoin xpub as Identity attribute
3 | A user can set their xpub information on their profile, such as:
4 | ```bash
5 | $ polkadot-js-api --ws wss://n1.hashed.systems tx.identity.setIdentity '{
6 | "display": {
7 | "Raw": "Paul McCartney"
8 | },
9 | "additional": [[{
10 | "Raw": "xpub"
11 | },{
12 | "Raw": "[dca67f77/84/1/0/0]tpubDEQ2wZuuDfizT2aThADBimVaDwWb3aK6WtA3VRMoCDog2Gg3PtDa1gHWhZYEiGba5XA2D2opry9MxZVVjgAaGM8MCnvW6kt6v5AURRyLHPh/*"
13 | }
14 | ]
15 | ]
16 | }' --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
17 | ```
18 | ## Bitcoin Developer Kit (BDK)
19 | [Bitcoin Dev Kit](https://bitcoindevkit.org) is a Rust-based library for working with Bitcoin wallets, with a special focus on output descriptors. There's a CLI (`bdk-cli`) to run the commands, and there's also a library/crate that I imagine can be used from within a pallet.
20 |
21 | Here are some helpful commands.
22 | ```bash
23 | $ bdk-cli key generate
24 | ```
25 | ```json
26 | {
27 | "fingerprint": "dca67f77",
28 | "mnemonic": "rose poet odor pole impose stamp boat cruel melt nut eight anchor jar obey tip mention accuse dry member stay pepper final alert live",
29 | "xprv": "tprv8ZgxMBicQKsPdRxBuQZegC2R3k9R1m4SB2Vy8wAaonownndjLrAdTsTiapvWNXQSN8N9XUvKAWukvm2evPS8yCqmvd1mmL8qAEnbe3PDNpD"
30 | }
31 | ```
32 |
33 | ```bash
34 | bdk-cli key derive --path m/84'/1'/0'/0 '--xprv tprv8ZgxMBicQKsPdRxBuQZegC2R3k9R1m4SB2Vy8wAaonownndjLrAdTsTiapvWNXQSN8N9XUvKAWukvm2evPS8yCqmvd1mmL8qAEnbe3PDNpD
35 | ```
36 | ```json
37 | {
38 | "xprv": "[dca67f77/84/1/0/0]tprv8hhzo9sf5J3KZZYfoWYbKMqTeuzetF8BwaZGCuKVmx1HBnRGmVPyqBfeXRWZPCBkSAbZabuDCZZ26J6eWeDk9qAQq8oYK97WpXmkQdpT6S8/*",
39 | "xpub": "[dca67f77/84/1/0/0]tpubDEQ2wZuuDfizT2aThADBimVaDwWb3aK6WtA3VRMoCDog2Gg3PtDa1gHWhZYEiGba5XA2D2opry9MxZVVjgAaGM8MCnvW6kt6v5AURRyLHPh/*"
40 | }
41 | ```
42 |
43 | ## Multisig wallet
44 | ### Receiving
45 | If 5 users all generated xpub keys on their own and attested them in the profile, the pallet would be able to generate an output descriptor for a 3 of 5 multisig wallet, such as below:
46 |
47 | ```
48 | wsh(multi(3,tpubDEQ2wZuuDfizYa8Vxo92Jz96nDhwwHTczsHTpSt4hnSRaWhQbj8Nrb46QitDpeEABLQSHPSyxdCn8gUDE6uZ2TWPLreLzvhFZLPPyrSizBz/1/0/*,tpubDEQ2wZuuDfizZR2aCmD5gpHJtsXET1zpYmR1JA9nMp4EWDcnnC957ekfaysjF4T8hSNJj98fEcUocnhds3Gwot8G145AZDsYjpwuJto4DFQ/0/0/*,tpubDEQ2wZuuDfizUWke1ZhreeVoybZiYiRept7ifSNSefbmPEM7yeNkbH1Kx4uMBnCtq2bB95oT1YX1ZAFuTfA1LetiTTrYuP6ShXsUUv6Bd8Q/0/0/*,tpubDEQ2wZuuDfizT2aThADBimVaDwWb3aK6WtA3VRMoCDog2Gg3PtDa1gHWhZYEiGba5XA2D2opry9MxZVVjgAaGM8MCnvW6kt6v5AURRyLHPh/0/0/*,tpubDEQ2wZuuDfizdnKYinDkouHHo7CeDdgScMfPYLMR8cnq3PYj85SccVnXa2Yt9HfVXq1riCkDLQG7R5YwcR8HY5z79M5b6zNsX4pZ12ngu1i/0/0/*))
49 | ```
50 |
51 | Once we have the descriptor for the full wallet, we can generate new receiving addresses.
52 |
53 | #### BENEFIT: Verifiable Receiving Addresses
54 | Contributors/investors of BTC to a multisig wallet can be highly confident that the intended signers have control over the sent BTC (UTXO).
55 |
56 | ```bash
57 | $ bdk-cli wallet --descriptor 'wsh(multi(3,tpubDEQ2wZuuDfizYa8Vxo92Jz96nDhwwHTczsHTpSt4hnSRaWhQbj8Nrb46QitDpeEABLQSHPSyxdCn8gUDE6uZ2TWPLreLzvhFZLPPyrSizBz/1/0/*,tpubDEQ2wZuuDfizZR2aCmD5gpHJtsXET1zpYmR1JA9nMp4EWDcnnC957ekfaysjF4T8hSNJj98fEcUocnhds3Gwot8G145AZDsYjpwuJto4DFQ/0/0/*,tpubDEQ2wZuuDfizUWke1ZhreeVoybZiYiRept7ifSNSefbmPEM7yeNkbH1Kx4uMBnCtq2bB95oT1YX1ZAFuTfA1LetiTTrYuP6ShXsUUv6Bd8Q/0/0/*,tpubDEQ2wZuuDfizT2aThADBimVaDwWb3aK6WtA3VRMoCDog2Gg3PtDa1gHWhZYEiGba5XA2D2opry9MxZVVjgAaGM8MCnvW6kt6v5AURRyLHPh/0/0/*,tpubDEQ2wZuuDfizdnKYinDkouHHo7CeDdgScMfPYLMR8cnq3PYj85SccVnXa2Yt9HfVXq1riCkDLQG7R5YwcR8HY5z79M5b6zNsX4pZ12ngu1i/0/0/*))' get_new_address
58 | ```
59 | ```json
60 | {
61 | "address": "tb1q433j97374mss5na5eu7f0ja29rx2fsretgs2h4f5p886x5mqg65q74fhzv"
62 | }
63 | ```
64 |
65 | ### Sending
66 | There are existing wallet UIs (Spectre Desktop, Caravan, Sparrow) that support output descriptors and facilitate the user experience, including signing via a variety of hot or cold wallets.
67 |
68 | To focus only on the pallet logic, we can use `bdk-cli` to simulate the signing steps.
69 |
70 | ```bash
71 | $ bdk-cli wallet sign -h
72 |
73 | bdk-cli-wallet-sign 0.3.1-dev
74 | Signs and tries to finalize a PSBT
75 |
76 | USAGE:
77 | bdk-cli wallet --descriptor sign [OPTIONS] --psbt
78 |
79 | FLAGS:
80 | -h, --help Prints help information
81 | -V, --version Prints version information
82 |
83 | OPTIONS:
84 | --psbt Sets the PSBT to sign
85 | --assume_height Assume the blockchain has reached a specific height. This affects the
86 | transaction finalization, if there are timelocks in the descriptor
87 | --trust_witness_utxo Whether the signer should trust the witness_utxo, if the non_witness_utxo
88 | hasn’t been provided
89 |
90 |
91 | ```
92 | - [ ] TODO: Add example signing step for PSBT
93 |
94 |
95 | The intermediate PSBT files (the output from above) are only needed temporarily and can be saved directly on chain or in IPFS. These files then are combined and broadcast.
96 | #### BENEFIT: User doesn't need to transport PSBT files
97 |
98 | ```bash
99 | $ bdk-cli wallet combine_psbt -h
100 |
101 | bdk-cli-wallet-combine_psbt 0.3.1-dev
102 | Combines multiple PSBTs into one
103 |
104 | USAGE:
105 | bdk-cli wallet --descriptor combine_psbt --psbt ...
106 |
107 | FLAGS:
108 | -h, --help Prints help information
109 | -V, --version Prints version information
110 |
111 | OPTIONS:
112 | --psbt ... Add one PSBT to combine. This option can be repeated multiple times, one for each
113 | PSBT
114 |
115 | ```
116 | - [ ] TODO: Add example combine-psbt and broadcast
117 |
118 | ## Appendix: How PSBTs work
119 | 
--------------------------------------------------------------------------------
/docs/crust-network-analysis.md:
--------------------------------------------------------------------------------
1 | # Crust network
2 | _Crust provides a Web3.0 decentralized storage network for the Metaverse._
3 |
4 | _"Crust implements the incentive layer protocol for decentralized storage with adapting to multiple storage layer protocols including IPFS, and provides support for the application layer. At the same time, Crust's architecture also can provide support for a decentralized computing layer to build a distributed cloud ecosystem."_
5 |
6 |
7 | ## Overview
8 | - Network token: CRU
9 | - Exchange Rate: `1 USDT=0.243693 CRU`
10 | - Real-time storage fee: $0.000005 GB/Year
11 | - Tested on rococo/westend, Polkadot/Kusama ParaChain Technical Readiness.
12 |
13 |
14 | ## Current status of the crust networks
15 |
16 | | Network | Token |Type | lease perdiod |
17 | |---|---|---|---|
18 | | Crust network (Polkadot version) | CRU |Parathread | NA |
19 | | Crust shadow (Kusama version) | CSM |Parachain | January 10, 2022 to December 19, 2022. |
20 | | Crust Maxwell | Candy | Preview network | NA |
21 | | Crust Rocky | Testnet network | CRU | NA |
22 |
23 | ## Features/capabilities
24 |
25 | - Website hosting
26 | - Decentralized file storage
27 |
28 | ## How to use it
29 |
30 | 1. Upload file to IPFS
31 | 2. Place a storage order on crust
32 | 3. If the order is succesfull, the file can be then accessed via standard IPFS interface and gateway from anywhere.
33 |
34 | Note: Storage user guidance is coming soon.
35 |
36 |
37 | ## Pros
38 | - Hosting for websites/dapps
39 | - Currently free for susbtrate chains
40 | - Available using pokladotjs frontend.
41 |
42 | ## Cons
43 | - Unclear documentation
44 | - Too much project fragmentation
45 | - Two requests for every file upload (ipfs upload and remote pinning request)
46 | - The pinned files have an expiration date (6 months from the upload date) and it is required to inject tokens for a renewal.
47 |
48 | ## Links and references
49 |
50 | - [Crust network official webpage](https://crust.network/)
51 | - [Crust Network parachain info](https://parachains.info/details/crust_network)
52 | - [Crust Shadow parachain info](https://parachains.info/details/crust_shadow)
53 | - [Crust crowdloan](https://medium.com/crustnetwork/join-the-crust-crowdloan-for-the-polkadot-parachain-slot-auction-5346e385485a)
54 | - [Crust participation in Polkadot's auction summary](https://medium.com/crustnetwork/summary-of-crusts-participation-in-polkadot-slot-auction-platform-cd3857acb325)
--------------------------------------------------------------------------------
/docs/fruniques-composability.iuml:
--------------------------------------------------------------------------------
1 | @startuml composability
2 | title "Fruniques Composability"
3 | node "Statemine NFT A" as statemine_nft_a
4 | node "Statemine NFT B" as statemine_nft_b
5 |
6 | node "Frunique NFT 1" as frq_1
7 | node "Frunique NFT 2" as frq_2
8 | node "Frunique NFT n" as frq_n
9 |
10 | node "Fungible Token" as fung_1
11 | node "Frunique NFT 2.1" as frq_2_1
12 | node "Frunique NFT 2.2" as frq_2_2
13 | node "Frunique NFT 2.3" as frq_2_3
14 |
15 | note left of frq_2
16 | Source NFT locked and 1..n new Fruniques
17 | can be minted.
18 | end note
19 |
20 | note bottom of frq_2_1
21 | Metadata values are inherited by
22 | default unless overridden.
23 | end note
24 |
25 | note bottom of fung_1
26 | Parent Frunique owner decides
27 | the token supply, symbol, and
28 | metadata and is minted the tokens
29 | end note
30 |
31 | statemine_nft_a --> frq_1
32 | statemine_nft_b --> frq_2
33 | statemine_nft_b --> frq_n
34 |
35 | frq_1 --> fung_1
36 |
37 | frq_2 --> frq_2_1
38 | frq_2 --> frq_2_2
39 | frq_2 --> frq_2_3
40 | @enduml
--------------------------------------------------------------------------------
/docs/fungible-basket-frunique.iuml:
--------------------------------------------------------------------------------
1 | @startuml basket
2 | title "Basket of Fungibles as Frunique"
3 | node "75 MYCOINS" as statemine_fungible_a
4 | node "29 DEFICOIN" as statemine_fungible_b
5 | node "9 FRUN" as statemine_fungible_c
6 | node "Statemine NFT C" as statemine_nft_c
7 |
8 | node "Frunique NFT" as frq_2
9 |
10 | note bottom of frq_2
11 | Can be further fractionalized via
12 | fungible or non-fungibles
13 | end note
14 |
15 | note bottom of statemine_fungible_a
16 | Any set of tokens and/or NFTs can be
17 | locked into a Frunique
18 | end note
19 |
20 | statemine_fungible_a --> frq_2
21 | statemine_fungible_b --> frq_2
22 | statemine_fungible_c --> frq_2
23 | statemine_nft_c --> frq_2
24 | @enduml
--------------------------------------------------------------------------------
/docs/hashed-chain-arch.iuml:
--------------------------------------------------------------------------------
1 | @startuml architecture
2 | node "Other Polkadot Ecosystem" as OPE {
3 | [Other Marketplaces]
4 | }
5 |
6 | package "Protocol Layer/Hashed Chain" {
7 | [Fruniques Pallet]
8 | [Marketplace Pallet] --> [Other Marketplaces]
9 | [Advanced Frunique UI/UX]
10 | [Services and Caching]
11 | }
12 |
13 | node "Statemint Chain" as SC {
14 | [Uniques Pallet]
15 | [Fruniques Pallet] --> [Uniques Pallet]
16 | [Fruniques Pallet] --> [Assets Pallet]
17 | [Assets Pallet]
18 | [Assets Pallet] --> [Other Marketplaces]
19 | }
20 |
21 | package "End User Experiences" {
22 | [Afloat/Tax Credits] --> [Fruniques Pallet]
23 | [Afloat/Tax Credits] --> [Services and Caching]
24 | [DAO LLC Registrations] --> [Fruniques Pallet]
25 | [DAO LLC Registrations] --> [Services and Caching]
26 | ['Get Gifted' NFTs] --> [Fruniques Pallet]
27 | [Marketplace UI/UX] --> [Fruniques Pallet]
28 | [Marketplace UI/UX] --> [Marketplace Pallet]
29 | ['Get Gifted' NFTs] --> [Marketplace UI/UX]
30 | }
31 | @enduml
--------------------------------------------------------------------------------
/docs/learning-from-scratch.md:
--------------------------------------------------------------------------------
1 | # Learning blockchain from scratch (focused on devops)
2 |
3 | Maybe it'll differ from Max's learning path.
4 |
5 | - [x] Git/Github (cloning repos, issues, projects, avoid commits and code reviews for now)
6 | - [x] Networking basic concepts (ports, firewall, p2p)
7 | - [x] Linux Terminal, deployment and server ops (ssh, systemd, reverse proxy with nginx)
8 | - [ ] Gcloud plattform & terminal
9 | - [ ] Blockchain and crypto things in general (platzi courses(?) )
10 | - [ ] Brief history about blockchain
11 | - [ ] Blockchain as a concept, white papers, mining
12 | - [ ] 2.0 smart contracts (layers ?)
13 | - [ ] Concensus protocol basics
14 | - [ ] Substrate fundamentals (up to intermediate level):
15 | - [ ] Polkadot concepts (include types of nodes?)
16 | - [ ] What is substrate & concepts (aura, grandpa, types of keys, pallets)
17 | - [ ] First tutorials? (only about deployment)
18 | - [ ] Polkadot explorer interface (focus on metrics and/or querys)
19 | - [ ] Our projects ( hosted on hashed chain ->(afloat, leasex, weed seeds), hypha, liberland-soil)
20 | - [ ] Chainspec
21 | - [ ] Hashed chain deployment (?)
22 | - [ ] Github CI (github actions)
23 | - [ ] Kusama
24 | - [ ] Validator node (How to, our deployment)
25 | - [ ] Tokenomics & Staking (If nominator: claim rewards)
26 | - [ ] Medium/long term:
27 | - [ ] Grafana
28 | - [ ] Runtime updates (focus on parameter configurations on the runtime? )
29 | - [ ] Propose bounties to the team on hashed chain
30 | - [ ] On the long term:
31 | - [ ] Kubernetes
32 | - [ ] Akash
--------------------------------------------------------------------------------
/docs/learning-path.md:
--------------------------------------------------------------------------------
1 | ## Substrate Training Path
2 | ### Substrate Tutorial #1
3 | The full binary executable is compiled from source and includes a default setup with Alice, Bob and other keys preconfigured with funds and sudo rights. This tutorial is the simplest and most informative way to ensure you have your environment ready to work on Substrate projects.
4 | https://docs.substrate.io/tutorials/v3/create-your-first-substrate-chain/
5 |
6 | ### Build the `hypha-/hashed-substrate` project
7 | Hashed project
8 | ```bash
9 | git clone https://github.com/hashed-io/hashed-substrate
10 | cd hashed-substrate
11 | cargo build --release
12 | ```
13 | Hypha project
14 | ```bash
15 | git clone https://github.com/hypha-dao/hypha-substrate
16 | cd hypha-substrate
17 | cargo build --release
18 | ```
19 |
20 | Connect the Front End template from the tutorial step above to the running `hypha/hashed` node.
21 |
22 | The only difference between the tutorial and the `hypha/hashed` node, in terms of setup instructions, are that the executable name will be `hypha/hashed` instead of `node-template`. That'll make sense after the tutorial. The `hypha/hashed` node will have more functionality than the `node-template`.
23 |
24 | ### Set Identity
25 | Use the pallet explorer on either the Front End template or https://polkadot.js.org to explore your node.
26 |
27 | The instructions here should work on your local node using Alice and Bob: https://wiki.polkadot.network/docs/learn-identity
28 |
29 | ### Interact with Identity Pallet - CLI
30 | Pre-requisites: `polkadot-js-api`: https://github.com/polkadot-js/tools/
31 |
32 | You can read the notes and copy/paste the commands from:
33 | https://github.com/hashed-io/hashed-substrate/blob/main/docs/identity.md
34 |
35 | ### Interact with Uniques/NFT Pallet - CLI
36 | You can read the notes and copy/paste the commands from:
37 | https://github.com/hashed-io/hashed-substrate/blob/main/docs/uniques.md
38 |
39 | ### Rust Developer Deeper Training
40 | Now that you have an idea for the environment, dive deeper into both the Rust training and Substrate training. As opposed to do them consecutively, I recommend starting both of the trainings and switch back and forth between the two as you progress.
41 |
42 | 1. [Rustlings](https://github.com/rust-lang/rustlings)
43 | - Good for interactive learners
44 | - Use watch mode and just follow the instructions
45 | 2. [Parity Substrate Tutorials](https://docs.substrate.io/tutorials/v3/)
46 | - No particular order
47 | - Some may be out-dated; don't get stuck on a versioning issue, just skip ahead.
48 | ### Substrate UI Developer Deeper Training
49 | 1. Build a Custom UI for one of the pallets using one of the available UI templates/toolkits:
50 | - [polkadot{.js} Web Application](https://github.com/polkadot-js/apps)
51 | - [React Native Library from Parity](https://github.com/paritytech/react-native-substrate-sign)
52 | - [PolkaWallet Flutter SDK](https://github.com/polkawallet-io/sdk)
53 | - [Front End template](https://github.com/substrate-developer-hub/substrate-front-end-template) from Parity
54 |
55 | 2. Review tooling for data caching and query
56 | -[Useful API sidecar](https://github.com/paritytech/substrate-api-sidecar) from Parity
57 | -[Awesome Substrate tools section](https://substrate.io/ecosystem/resources/awesome-substrate/#tools)
58 |
59 | ### Tools and Tips
60 | - [polkadot{.js}](https://github.com/polkadot-js)
61 | - CLI tool: [`polkadot-js-api`](https://github.com/polkadot-js/tools/)
62 | - [Awesome Substrate](https://github.com/substrate-developer-hub/awesome-substrate)
63 | - Spend time learning about the [keys types and related commands](https://docs.substrate.io/v3/tools/subkey/)
64 |
--------------------------------------------------------------------------------
/docs/node-configuration.md:
--------------------------------------------------------------------------------
1 | # How to add a node to the permissioned hashed chain
2 |
3 |
4 | ## Prerrequisites
5 | - [Install subkey](https://docs.substrate.io/v3/tools/subkey/#installation)
6 | - Have `sudo` access to the machine in which the node will run.
7 | - Download and compile the latest main branch of the project.
8 | - Have a mnemonic that is specified in the aura/grandpa authorities list (at the current time, it will be provided by us).
9 | - Have a directory route in mind where the node storage will be set, those directories will be created on later steps (we recommend a subdirectory within the hashed-substrate project: `./hashed-substrate/hashed-chaos-data/`). This route will be refferred as the `base path`.
10 |
11 | Additionally, most of the provided commands are executed on the project directory:
12 |
13 | ```bash
14 | cd hashed-substrate/
15 | ```
16 |
17 | ## Generate node key
18 | A node key must be generated in order to identify the nodes that are connected to the network.
19 | ```
20 | subkey generate-node-key
21 | ```
22 | It will output two rows of information looking something like this:
23 | ```bash
24 | 12D3KooWBmAwcd4PJNJvfV89HwE48nwkRmAgo8Vy3uQEyNNHBox2 # this is PeerId.
25 | c12b6d18942f5ee8528c8e2baf4e147b5c5c18710926ea492d09cbd9f6c9f82a # This is node-key.
26 | ```
27 | - The `PeerId` is what the other nodes see, while the `node-key` should remain private, it will be specified on the command that runs the node.
28 |
29 | - It is recommended to store both outputs on a safe place.
30 |
31 | - Please share the generated `PeerId` with a Hashed team member, as it will be used for creating a link between your node and the provided validator account.
32 |
33 |
34 | ## Insert the provided mnemonic to the node's local storage
35 |
36 | Next, the provided validator mnemonic has to be inserted in the node storage (aka. `base path`), replace the `--suri` option content with the provided mnemonic:
37 |
38 | ```bash
39 | sudo ./target/release/hashed key insert --base-path hashed-chaos-data --chain ./chaos2.json --scheme sr25519 --suri "your mnemonic goes here" --key-type aura
40 | ```
41 | - Please note that the `--base-path` option specifies the suggested directory where the node will store all the chain data, and it should be change accordingly in case another one is desired.
42 | - The command is executed on the project's directory.
43 | - The latest chain spec is `chaos2.json`, as specified in the command.
44 |
45 | If the process is successful, the specified `base path` should be created and the key inserted:
46 |
47 | ```bash
48 | ls hashed-chaos-data/chains/chaos/keystore/
49 | # The command should output a file named something like "6175726...", the name might be different
50 | ```
51 |
52 | ## Starting the node
53 |
54 | Now all that is left is to boot up the node, replacing the `--node-key` content with the generated node-key:
55 | ```bash
56 | sudo ./target/release/hashed --base-path hashed-chaos-data --chain chaos2.json --node-key= --rpc-external --rpc-cors all --rpc-methods=unsafe --no-mdns --validator --bootnodes /ip4/206.221.189.10/tcp/30335/p2p/12D3KooWQxwQyQ3BaCs5tweoTmHNWHbpHePZt6P9SscBps1FWsUc --offchain-worker always
57 | ```
58 |
59 | ## References
60 | [Permissioned network tutorial](https://docs.substrate.io/tutorials/v3/permissioned-network/)
--------------------------------------------------------------------------------
/docs/pallets-review/fruniques.md:
--------------------------------------------------------------------------------
1 | # Fruniques pallet
2 |
3 | ## Spawn mechanism
4 |
5 | Taken from #1
6 |
7 | `Fruniques` is a stateful pallet. It needs to store additional data to maintain various relationships and state. We need to design/build the data structure for this additional state, as described below.
8 |
9 | There are a few NFT protocols in the Polkadot ecosystem: https://wiki.polkadot.network/docs/learn-nft
10 |
11 | Of these, we should build to the [`Uniques` ](https://wiki.polkadot.network/docs/learn-nft#uniques) patterns. It is the implementation from Parity and I believe the most recent. It is the only one compatible with Statemint/Statemine. We can build to multiple protocols if it makes sense, but let's start with `Uniques`.
12 |
13 | In addition to a regular `Unique`, a [`Frunique`](https://hashed.systems/hashed-chain) needs to store a reference to the parent, a different `Unique`. There also needs to be a heuristic for specifying if metadata is inherited from the parent or not. It seems like Metadata is a set of Key:Value pairs that can be assigned at the `class` level (a group or collection of NFTs) and at the `instance` level (a single NFT).
14 |
15 | Here's the function `set_attribute`:
16 | https://github.com/paritytech/substrate/blob/master/frame/uniques/src/lib.rs#L959
17 |
18 | Let's map the cannabis lifecycle.
19 | > NOTE: the cannabis use case may be able to be implemented with a lighter weight protocol, but it seems like it might be handy to use the same structure
20 | 1. Seeds come from a vendor as a package with a count, e.g. 100 seeds in a bag. This bag is an `InstanceId` even though it actually contains 100 seeds.
21 | 2. Seeds that germinate get cubed; others are scrapped.
22 | 3. When a seed is cubed, it receives its own `InstanceID` (I've been calling this a `spawn` function) for the first time. The count of seeds that did germinate should be tracked, but not individually, and they are scrapped.
23 | 4. Successful cubed seeds become mother plants; perhaps through some iteration or trial/error to discover most productive mother(s).
24 | 5. Mother plants produce clones (and may produce flower directly).
25 | 7. The parent-->child relationship is well represented as a [Directed Acyclic Graph](https://hazelcast.com/glossary/directed-acyclic-graph), which is what we are building on chain.
26 | 8. Clones may be sold directly to clone buyers.
27 | 7. Clones produce flower, measured in weight. When flower is harvested, the weight values of the material are recorded as continuous value. So the `InstanceId` would map this specific `bag of weed`, and there would also be a data element for weight.
28 |
29 | The sum of this continuous value for all peers should always equal the continuous value of the parent. This is a critical feature that maintains the economic hierarchy of the NFTs. Tax credits can be subdivided based on this continuous value, but just like the weed, none can be lost or compromised along the way. This feature - the `NFT Rollup` enables many use cases.
30 |
31 | 9. Flower gets tested, and results are implied across that entire harvest/mother? The test results include a set of files and also a set of values. We need a structure to assign this data/metadata across the appropriate `InstanceIds`.
32 | 10. Flower is sold to dispensaries.
33 |
34 | - [ ] Research and prototype a pallet data storage mapping to hold the appropriate data to maintain the hierarchy and enforce the aggregation rules.
35 |
36 | 
37 |
--------------------------------------------------------------------------------
/docs/pallets-review/gated-marketplace.md:
--------------------------------------------------------------------------------
1 | # Gated Marketplace
2 |
3 | ## Description
4 |
5 | A simple description of the project
6 |
7 | ## TOC
8 |
9 | - [Gated Marketplace](#gated-marketplace)
10 | - [Description](#description)
11 | - [TOC](#toc)
12 | - [Setup](#setup)
13 | - [Order Part of an NFT](#order-part-of-an-nft)
14 | - [Complete/Confirm Order](#completeconfirm-order)
15 | - [Order Settlement](#order-settlement)
16 | - [Approve Redemption Specialists](#approve-redemption-specialists)
17 | - [Request Redemption](#request-redemption)
18 | - [Asset Manager](#asset-manager)
19 | - [Code Contributors](#code-contributors)
20 |
21 | ## Setup
22 |
23 | To initialize the project make sure that you are running a local node of the solo-chain,
24 | then you need to sign as Sudo the extrinsic `initial_setup()`.
25 |
26 | ## Order Part of an NFT
27 |
28 | To order a fraction of a NFT you need to specify the percentage of the NFT that will be sold, keep in mind that the percentage is measured in 1 -> 100, as integers.
29 |
30 | To do that you need to call the `enlist_sell_offer` extrinsic.
31 |
32 | Keep in mind that you need to create a marketplace and enroll participants as well as create the fruniques.
33 |
34 | ## Complete/Confirm Order
35 |
36 | To complete the order, you need to call the extrinsic `take_sell_offer` with the user that will buy the NFT, the pallet in the background is in charge of distributing the price and dividing the NFT, as well as paying the fees for the marketplace.
37 |
38 | ## Order Settlement
39 |
40 | To check the fees where distributed we can see it on the block transactions, that should indicate that there is a transfer to the owner of the marketplace and the creator of the order receive the price less the fees.
41 |
42 | In this example we sold a NFT for 10 currency units, so whe admin of the market receives 1 unit and the rest is of the price is for the seller.
43 |
44 | 
45 |
46 | ## Approve Redemption Specialists
47 |
48 | The redemption specialists are users that are in charge of making all the IRL process related to the exchange of the tax credit, for the moment the admin of the marketplace takes this responsibility.
49 |
50 | ## Request Redemption
51 |
52 | When a customer buys a tax credit, the customer is allowed to make a request to redeem the NFT, this process takes place on the marketplace, as the redemption specialist should take the responsibility of making the transaction in real life. Also redeeming a NFT causes to lose the ability to spawn new items and freezes it just to say that the NFT does not have any value left.
53 |
54 | ## Asset Manager
55 |
56 | The asset manager is responsible for actually making the transaction, at the moment, the marketplace takes the responsibility of making the transaction as it changes the storage maps in order to move the NFT information into a redeemed version of it.
57 |
58 | ## Code Contributors
59 |
60 | Made with ❤️ by:
61 |
62 |
63 |
64 |
65 |
76 |
77 |
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/docs/pallets-review/gatedmarketplace-fees.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashed-io/hashed-substrate/96d68391b8c0ce9c2e03f7cf245fe920cf329875/docs/pallets-review/gatedmarketplace-fees.png
--------------------------------------------------------------------------------
/docs/pallets-review/recovery.md:
--------------------------------------------------------------------------------
1 | # Recovery workflow on polkadotjs api (javascript)
2 | Below is a workflow for Recovery. You can run them as Steve and another account you have access to (you have the mnemonic) that will recover steve's account, that account will be referred as ``.
3 |
4 | Aditionally, your contacts/friends must paricipate on the acount retrieval, their public keys shall be referred as `friend_1,friend_2,...,friend_n`
5 |
6 | ## Basic setup
7 |
8 | ```javascript
9 | // Required imports
10 | const { ApiPromise, WsProvider } = require("@polkadot/api");
11 | const { Keyring } = require("@polkadot/keyring");
12 |
13 | /* Steve's info
14 | set SEED="bargain album current caught tragic slab identify squirrel embark black drip imitate"
15 | set ADDR="5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym"
16 | */
17 | async function main() {
18 | // Initialixe the provider to connect to the local node
19 | const provider = new WsProvider("wss://n4.hashed.systems");
20 |
21 | // Create the API and wait until ready
22 | const api = await ApiPromise.create({ provider });
23 |
24 | // Constuct the keyring after the API (crypto has an async init)
25 | const keyring = new Keyring({ type: "sr25519" });
26 | // Add Alice to our keyring with a hard-deived path (empty phrase, so uses dev)
27 | const steve = keyring.addFromUri(
28 | "bargain album current caught tragic slab identify squirrel embark black drip imitate"
29 | );
30 | console.log("Steve's keyring: ", steve.toJson());
31 |
32 | /* Insert the extrinsics and queries here. don't run more than 1 extrinsic per script execution*/
33 |
34 | }
35 |
36 | main()
37 | .catch(console.error)
38 | .finally(() => process.exit());
39 |
40 | ```
41 |
42 | ```bash
43 | # You can run the script by typing the following command on your terminal
44 | node .json
45 | ```
46 |
47 | ## Create a recovery
48 | It is needed at least 1 additional public key to create the recovery.
49 |
50 | ```javascript
51 | /*--- Create recovery---*/
52 | const createRecovery = await api.tx.recovery.createRecovery(
53 | [
54 | ,
55 |
56 | ], 2, 0)
57 | .signAndSend(steve);
58 | console.log( createRecovery.toHex() );
59 | ```
60 |
61 | ## Initiate recovery
62 |
63 | ```javascript
64 | const new_account = keyring.addFromUri();
65 | const initiateRecovery = await api.tx.recovery
66 | .initiateRecovery(steve.address).signAndSend(new_account);
67 | console.log(initiateRecovery.toHex());
68 | ```
69 |
70 | ## Vouch recovery
71 | Your friends will have to vouch for that recovery, in this case, it's specified that 2 out of 2 registered friends must sign a vouch transaction:
72 |
73 | ```javascript
74 | const friend_1 = keyring.addFromUri(
75 |
76 | );
77 | const f1_vouch = await api.tx.recovery
78 | .vouchRecovery(
79 | "5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym",
80 |
81 | )
82 | .signAndSend(friend_1);
83 | console.log(f1_vouch.toHex());
84 | ```
85 |
86 | You can check your recovery status with
87 |
88 | ```javascript
89 | const getActiveRecovery = await api.query.recovery.
90 | activeRecoveries.entries(steve.address);
91 | console.log(getActiveRecovery.map(
92 | ([k,v])=>{ return{key: k.toHuman(), val: v.toHuman()} })
93 | );
94 | ```
95 |
96 | ## Claim recovery
97 |
98 | ```javascript
99 | const claimRecovery = await api.tx.recovery.claimRecovery(steve.address).signAndSend(new_account);
100 | console.log(claimRecovery.toHex());
101 | ```
102 |
103 | ## Close recovery
104 |
105 | ```javascript
106 | const closeRecovery = await api.tx.recovery
107 | .asRecovered(
108 | steve.address,
109 | api.tx.recovery.closeRecovery(new_account.address)
110 | )
111 | .signAndSend(new_account);
112 | console.log(closeRecovery.toHex());
113 | ```
114 |
115 | ## Remove recovery config
116 |
117 | ```javascript
118 | const removeRecovery = await api.tx.recovery
119 | .asRecovered(
120 | steve.address,
121 | api.tx.recovery.removeRecovery()
122 | ).signAndSend(new_account);
123 | ```
124 |
125 | ## Recover all funds
126 |
127 | ```javascript
128 | const transferAll = await api.tx.recovery
129 | .asRecovered(
130 | steve.address,
131 | api.tx.balances.transferAll(new_account.address,false)
132 | )
133 | .signAndSend(new_account);
134 | console.log(transferAll.toHex());
135 | ```
--------------------------------------------------------------------------------
/docs/pallets-review/uniques.md:
--------------------------------------------------------------------------------
1 | Below is a workflow for Uniques. You can run them as Steve.
2 |
3 | ```
4 | # steve's info
5 | set SEED="bargain album current caught tragic slab identify squirrel embark black drip imitate"
6 | set ADDR="5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym"
7 | ```
8 | ### Install `polkadot-js-api`
9 | ```bash
10 | yarn add @polkadot/api
11 | ```
12 | ### Check Steve's Identity for awareness
13 | ```bash
14 | polkadot-js-api --ws wss://n1.hashed.systems query.identity.identityOf 5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym
15 | ```
16 | ### Create a new class of `Uniques`/(NFTs)
17 | ```bash
18 | polkadot-js-api --ws wss://n1.hashed.systems tx.uniques.create 1 5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
19 | ```
20 | ### Set a Class Attribute
21 | ```bash
22 | polkadot-js-api --ws wss://n1.hashed.systems tx.uniques.setAttribute 1 null "project" "cannabis" --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
23 | ```
24 | ### Mint a New Unique, class=1, id=0, Steve as owner
25 | ```bash
26 | polkadot-js-api --ws wss://n1.hashed.systems tx.uniques.mint 1 0 5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
27 | ```
28 | ### Set an Instance Attribute, class=1, id=0, key=label
29 | ```bash
30 | polkadot-js-api --ws wss://n1.hashed.systems tx.uniques.setAttribute 1 0 "label" "100 seeds of Runtz strain" --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
31 | ```
32 | ### Mint a second Unique, a germinated seed as it is cubed
33 | ```bash
34 | polkadot-js-api --ws wss://n1.hashed.systems tx.uniques.mint 1 1 5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
35 | ```
36 | ### Set a Label of this one
37 | ```bash
38 | polkadot-js-api --ws wss://n1.hashed.systems tx.uniques.setAttribute 1 1 "label" "Plumply germinated Runtz sprout" --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
39 | ```
40 | ### Query the Label of the new sprout
41 | ```bash
42 | polkadot-js-api --ws wss://n1.hashed.systems query.uniques.attribute 1 1 label
43 | ```
44 | ### Set `parent` Attribute
45 | ```bash
46 | polkadot-js-api --ws wss://n1.hashed.systems tx.uniques.setAttribute 1 1 "parent" "0" --seed "bargain album current caught tragic slab identify squirrel embark black drip imitate"
47 | ```
48 | ### Check Steve's Balance
49 | ```bash
50 | polkadot-js-api --ws wss://n1.hashed.systems query.uniques.account 5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym 1
51 | ```
52 |
53 | ```
54 | # steve's info
55 | set SEED="bargain album current caught tragic slab identify squirrel embark black drip imitate"
56 | set ADDR="5HGZfBpqUUqGY7uRCYA6aRwnRHJVhrikn8to31GcfNcifkym"
57 | ```
--------------------------------------------------------------------------------
/docs/parachain/add-genesis-balance.md:
--------------------------------------------------------------------------------
1 | # Add your key to Genesis Spec for MD5
2 |
3 | 1. Install `subkey`
4 | https://docs.substrate.io/reference/command-line-tools/subkey/
5 |
6 |
7 | 2. Generate a key
8 | ```bash
9 | subkey generate
10 | ```
11 |
12 | Output looks like this:
13 | ```
14 | Secret phrase: pear afraid genre damage fury visa gentle divert vocal risk local boil
15 | Network ID: substrate
16 | Secret seed: 0xddb2eb2b38cf69a0db0397e9aaf47bb48d71437d037a225be92acf178db3810c
17 | Public key (hex): 0x70140a32dbd165c862b5d1a51b8cebb4ffd07a92ab72fc0beef7c220b8050a5a
18 | Account ID: 0x70140a32dbd165c862b5d1a51b8cebb4ffd07a92ab72fc0beef7c220b8050a5a
19 | Public key (SS58): 5EbfB1K1xes3uywAZ5MwXZc1vUZMYbGZuMiY5BojSWs2r7FD
20 | SS58 Address: 5EbfB1K1xes3uywAZ5MwXZc1vUZMYbGZuMiY5BojSWs2r7FD
21 | ```
22 |
23 | 3. Edit `node/src/chain_spec/md5.rs`
24 |
25 | In the genesis configuration, there is a vector of addresses:
26 |
27 | ```rust
28 | vec![
29 | // 5HgAxuAcEybo448w5BZdoceCuHMAbEW9AetBKsj9s5GEBZT3
30 | hex!["f83a0218e100ce3ede12c5d403116ef034124c62b181fff6935403cea9396d2f"].into(),
31 | // 5DkJvQp2gqHraWZU1BNCDxEKTQHezn2Qy7z5hLPksUdjtEG9
32 | hex!["4a70d789b0f0897e0880e8d3d532187ac77cbda04228cfadf8bededdd0b1005e"].into(),
33 | get_account_id_from_seed::("Alice"),
34 | get_account_id_from_seed::("Bob"),
35 | ```
36 |
37 | Add a new `hex!` line with the hex public key from `subkey`. Be sure to remove the `0x` prefix.
38 |
39 | 4. Recompile, test, push
40 |
--------------------------------------------------------------------------------
/docs/parachain/quick-start.md:
--------------------------------------------------------------------------------
1 | # Quick Start
2 | This is a quick guide on connecting the parachain to a local testnet relay chain.
3 |
4 | # Launch the Relay Chain
5 | ```bash
6 | cd ~/github.com/paritytech
7 |
8 | git clone https://github.com/paritytech/polkadot
9 | cd polkadot
10 |
11 | cargo build --release
12 |
13 | # Generate a raw chain spec
14 | ./target/release/polkadot build-spec --chain rococo-local --disable-default-bootnode --raw > ~/github.com/paritytech/polkadot/rococo-custom-2-raw.json
15 |
16 | # Alice
17 | ./target/release/polkadot --alice --validator --base-path /tmp/relay/alice --chain ~/github.com/paritytech/polkadot/rococo-custom-2-raw.json --port 30333 --ws-port 9944
18 |
19 | # Bob (In a separate terminal)
20 | ./target/release/polkadot --bob --validator --base-path /tmp/relay/bob --chain ~/github.com/paritytech/polkadot/rococo-custom-2-raw.json --port 30334 --ws-port 9945
21 | ```
22 |
23 | # Reserve the Para ID
24 | Go to https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A9944#/parachains/parathreads
25 |
26 | and Click `+ParaID`
27 |
28 | # Launch the Parachain
29 | ```bash
30 |
31 | cd ~/github.com/hashed-io
32 |
33 | git clone https://github.com/hashed-io/hashed-parachain
34 | cd hashed-parachain
35 |
36 | cargo build --release
37 |
38 | ./target/release/hashed-parachain build-spec --chain md5 --disable-default-bootnode > md5-local-parachain.json
39 | ```
40 |
41 | # Add the ParaID
42 | Update `md5-local-parachain.json` and change the parachain ID to 2000 in two places.
43 |
44 | ```json
45 | // --snip--
46 | "para_id": 2000,
47 | // --snip--
48 | "parachainInfo": {
49 | "parachainId": 2000
50 | },
51 | // --snip--
52 | ```
53 |
54 | # Build the Raw Spec File
55 | ```bash
56 | # build raw spec
57 | ./target/release/hashed-parachain build-spec --chain md5-local-parachain.json --raw --disable-default-bootnode > md5-local-parachain-raw.json
58 | ```
59 |
60 | # Building genesis state and wasm files
61 | ```bash
62 | ./target/release/hashed-parachain export-genesis-state --chain md5-local-parachain-raw.json > md5-genesis-head
63 |
64 | ./target/release/hashed-parachain export-genesis-wasm --chain md5-local-parachain-raw.json > md5-wasm
65 | ```
66 |
67 | # Start Collator
68 | ```bash
69 | ./target/release/hashed-parachain \
70 | --alice \
71 | --collator \
72 | --force-authoring \
73 | --chain md5-local-parachain-raw.json \
74 | --base-path /tmp/parachain/alice \
75 | --port 40333 \
76 | --ws-port 8844 \
77 | -- \
78 | --execution wasm \
79 | --chain ~/github.com/paritytech/polkadot/rococo-custom-2-raw.json \
80 | --port 30343 \
81 | --ws-port 9977
82 |
83 | ```
84 |
85 | ## Sudo Register the parachain
86 | 
87 |
88 |
89 | ### Purging the Chains
90 | ```bash
91 | # Purge a chain
92 | ./target/release/hashed-parachain \
93 | purge-chain \
94 | --base-path /tmp/parachain/alice \
95 | --chain ~/github.com/hashed-io/hashed-parachain/md5-local-parachain-raw.json
96 |
97 | # Purge relay chain
98 | ./target/release/polkadot purge-chain --base-path /tmp/relay/alice --chain ~/github.com/paritytech/polkadot/rococo-custom-2-raw.json
99 |
100 | # Sometimes I use this:
101 | rm -rf /tmp/relay && rm -rf /tmp/parachain
102 | ```
103 |
--------------------------------------------------------------------------------
/docs/parachain/rococo.md:
--------------------------------------------------------------------------------
1 | # Connecting MD5 to Rococo
2 |
3 | The MD5 Network is `ParaId = 4088` for now. We are using a temporary status. Once we have a Kusama parachain slot, we can apply for a Rococo permanent slot for testing. This is hard-coded in the `md5.rs` chain_spec currently.
4 |
5 | # Create the Rococo spec (could also download one)
6 | ```bash
7 | cd ~/github.com/paritytech
8 |
9 | git clone https://github.com/paritytech/polkadot
10 | cd polkadot
11 |
12 | cargo build --release
13 |
14 | # Generate a raw chain spec
15 | ./target/release/polkadot build-spec --chain rococo --disable-default-bootnode --raw > ~/github.com/paritytech/polkadot/rococo-raw.json
16 | ```
17 |
18 | # Build the Collator
19 | ```bash
20 |
21 | cd ~/github.com/hashed-io
22 |
23 | git clone https://github.com/hashed-io/hashed-parachain
24 | cd hashed-parachain
25 |
26 | cargo build --release
27 | ```
28 |
29 | # Build the `md5-spec-raw.json` file using defaults
30 |
31 | ```bash
32 | # build raw spec
33 | ./target/release/hashed-parachain build-spec --chain md5 --raw --disable-default-bootnode > md5-spec-raw.json
34 | ```
35 |
36 | # Building genesis state and wasm files
37 | ```bash
38 | ./target/release/hashed-parachain export-genesis-state --chain resources/md5-spec-raw.json > md5-genesis-head
39 | ./target/release/hashed-parachain export-genesis-wasm --chain resources/md5-spec-raw.json > md5-wasm
40 | ```
41 |
42 | # Start Collator
43 | ```bash
44 | ./target/release/hashed-parachain \
45 | --alice \
46 | --collator \
47 | --force-authoring \
48 | --chain resources/md5-spec-raw.json \
49 | --base-path ~/chain-data/md5 \
50 | --port 40333 \
51 | --ws-port 8844 \
52 | -- \
53 | --execution wasm \
54 | --chain ~/github.com/paritytech/polkadot/rococo-raw.json \
55 | --port 30343 \
56 | --ws-port 9977
57 | ```
58 |
59 | # Register the parachain
60 |
61 | ### In Process
--------------------------------------------------------------------------------
/docs/rmrk-commands.md:
--------------------------------------------------------------------------------
1 | Create a Collection
2 | ```
3 | polkadot-js-api --seed "//Alice" tx.rmrkCore.createCollection "my metadata" 45 AFLOAT
4 | ```
5 |
6 | Query a Collection
7 | ```
8 | polkadot-js-api --seed "//Alice" query.rmrkCore.collections 1
9 | ```
10 |
11 | Query an NFT
12 | ```
13 | polkadot-js-api --seed "//Alice" query.rmrkCore.nfts 0 0
14 | ```
15 |
16 | Mint an NFT
17 | ```
18 | polkadot-js-api --seed "//Alice" tx.rmrkCore.mintNft 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY 0 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY 10000 "Sams tax credit"
19 | ```
20 |
21 | Send an NFT
22 | ```
23 | polkadot-js-api --seed "//Alice" tx.rmrkCore.send 0 0 '{"AccountId":"5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty"}'
24 | ```
25 |
26 | Set a Property on an NFT
27 | ```
28 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "NFT Type" "Afloat Tax Credit"
29 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "Title" "My VA Land Prez Credit"
30 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "State" "Virginia"
31 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "Tax Credit Type" "Virginia Land Preservation"
32 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "Entity Type" "Individual"
33 | ```
34 |
35 | Set a Resource on an NFT
36 | ```
37 | polkadot-js-api --seed "//Alice" tx.rmrkCore.addBasicResource 0 1 "BasicResource"
38 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "Title" "My VA Land Prez Credit"
39 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "State" "Virginia"
40 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "Tax Credit Type" "Virginia Land Preservation"
41 | polkadot-js-api --seed "//Alice" tx.rmrkCore.setProperty 0 1 "Entity Type" "Individual"
42 | ```
43 |
44 |
--------------------------------------------------------------------------------
/docs/road-map-gantt.iuml:
--------------------------------------------------------------------------------
1 | @startgantt RoadMap
2 | printscale weekly
3 | Project starts the 1st of May 2022
4 | hide ressources footbox
5 | today is 12 days after start and is colored in #AAF
6 |
7 | -- Native Bitcoin Vaults (NBV) --
8 | [NBV MVP Development] as [nbv-dev] on {Max, Abel, Seb, Chema} lasts 45 days
9 | [nbv-dev] is colored in Yellow/Black
10 | [NBV on Hashed Network] happens after [nbv-dev]'s end
11 |
12 | -- Afloat --
13 | [Afloat MVP Migration] as [afloat-dev] lasts 90 days
14 | [afloat-dev] is colored in Yellow/Black
15 | [Privacy Implementation] as [privacy-dev] lasts 60 days
16 | [Establish Bank Relationship] as [banking] lasts 30 days
17 | [Banking Implementation] as [banking-dev] lasts 30 days
18 | [privacy-dev] ends at [afloat-dev]'s end
19 | [banking-dev] ends at [afloat-dev]'s end
20 | [banking] -> [banking-dev]
21 |
22 | -- Proxy --
23 | [Proxy Phase 1 Dev] as [pot] lasts 2 weeks
24 | [pot] is colored in Coral/Green
25 | [Proxy Live on Telos] as [p-live] happens after [pot]'s end
26 | [Proxy Migration] as [p-mig] starts at [afloat-dev]'s end
27 | [p-mig] lasts 90 days
28 | [Proxy Live on Hashed Network] happens after [p-mig]'s end
29 |
30 | -- LeaseX --
31 | [LeaseX Design] as [l-design] lasts 2 weeks
32 | [l-design] is colored in Coral/Green
33 | [LeaseX MVP Development] as [l-dev] lasts 90 days
34 | [l-dev] starts at [afloat-dev]'s end
35 | [l-design] -> [l-dev]
36 |
37 | -- Digitally Native Organizations (Creation and Liquidity) --
38 | [DNO Creator Migration] as [dno-dev] lasts 45 days
39 | [dno-dev] starts at [l-dev]'s end
40 | [DNOs on Hashed Network] happens at [dno-dev]'s end
41 |
42 | -- Triple Entry Accounting --
43 | [Accounting MVP Migration] as [a-mig] lasts 90 days
44 | [a-mig] starts at [dno-dev]'s end
45 | [Accounting on Hashed Network] happens at [a-mig]'s end
46 |
47 | -- Hypha --
48 | [Hypha Wallet] as [h-wallet] on {HH Partners} lasts 60 days
49 | [Wallet Release] happens at [h-wallet]'s end
50 | [Hypha Migration] as [h-mig] starts at [h-wallet]'s end
51 | [h-mig] lasts 90 days
52 | [Hypha MVP Live on Hashed Network] happens at [h-mig]'s end
53 |
54 | -- Parachain --
55 | [Parachain Build] as [para-dev] lasts 120 days
56 | [nbv-dev] -> [para-dev]
57 | [Kusama Launch] as [ksm] happens at [para-dev]'s end
58 | [Polkadot Launch] as [dot] happens on 2023-02-02
59 |
60 | @endgantt
--------------------------------------------------------------------------------
/docs/rust-setup.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Installation
3 | ---
4 |
5 | This page will guide you through the steps needed to prepare a computer for development with the
6 | Substrate Node Template. Since Substrate is built with
7 | [the Rust programming language](https://www.rust-lang.org/), the first thing you will need to do is
8 | prepare the computer for Rust development - these steps will vary based on the computer's operating
9 | system. Once Rust is configured, you will use its toolchains to interact with Rust projects; the
10 | commands for Rust's toolchains will be the same for all supported, Unix-based operating systems.
11 |
12 | ## Unix-Based Operating Systems
13 |
14 | Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples
15 | in the Substrate [Tutorials](https://docs.substrate.io/tutorials/v3) and
16 | [How-to Guides](https://docs.substrate.io/how-to-guides/v3) use Unix-style terminals to demonstrate
17 | how to interact with Substrate from the command line.
18 |
19 | ### macOS
20 |
21 | Open the Terminal application and execute the following commands:
22 |
23 | ```bash
24 | # Install Homebrew if necessary https://brew.sh/
25 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
26 |
27 | # Make sure Homebrew is up-to-date, install openssl and cmake
28 | brew update
29 | brew install openssl cmake
30 | ```
31 |
32 | ### Ubuntu/Debian
33 |
34 | Use a terminal shell to execute the following commands:
35 |
36 | ```bash
37 | sudo apt update && sudo apt install -y cmake pkg-config libssl-dev git build-essential clang libclang-dev curl
38 | ```
39 |
40 | ### Arch Linux
41 |
42 | Run these commands from a terminal:
43 |
44 | ```bash
45 | pacman -Syu --needed --noconfirm cmake gcc openssl-1.0 pkgconf git clang
46 | export OPENSSL_LIB_DIR="/usr/lib/openssl-1.0"
47 | export OPENSSL_INCLUDE_DIR="/usr/include/openssl-1.0"
48 | ```
49 |
50 | ### Fedora/RHEL/CentOS
51 |
52 | Use a terminal to run the following commands:
53 |
54 | ```bash
55 | # Update
56 | sudo dnf update
57 | # Install packages
58 | sudo dnf install cmake pkgconfig rocksdb rocksdb-devel llvm git libcurl libcurl-devel curl-devel clang
59 | ```
60 |
61 | ## Rust Developer Environment
62 |
63 | This project uses [`rustup`](https://rustup.rs/) to help manage the Rust toolchain. First install
64 | and configure `rustup`:
65 |
66 | ```bash
67 | # Install
68 | curl https://sh.rustup.rs -sSf | sh
69 | # Configure
70 | source ~/.cargo/env
71 | ```
72 |
73 | Finally, configure the Rust toolchain:
74 |
75 | ```bash
76 | rustup default stable
77 | rustup update nightly
78 | rustup update stable
79 | rustup target add wasm32-unknown-unknown --toolchain nightly
80 | ```
81 |
--------------------------------------------------------------------------------
/docs/traceability-tree.iuml:
--------------------------------------------------------------------------------
1 | @startuml tracability
2 | digraph G {
3 | rankdir=TB;
4 | notes [label="- Purple lines are checkpoints\n- Gold lines are new InstanceIds\n- +n are new InstanceIds\n- -n are scrapped InstanceIds",shape=note];
5 | node [shape=record];
6 |
7 | seeds_from_cookies [label="Seeds\n|{InstanceIds|Pieces}|{{1}|{100}}"];
8 | cubed [label="Cubed\n|{InstanceIds|Pieces}|{{75}|{75}}"];
9 | did_not_germinate [label="Did not Germinate\n|{InstanceIds|Pieces}|{{1}|{25}}"];
10 | discard [fillcolor=pink,style=filled,label="Discard\n|{InstanceIds|Pieces}|{{1}|{var}}"];
11 | mother_plants [label="Mother Plants\n|{InstanceIds|Pieces}|{{55}|{55}}"];
12 | clones [label="Clones\n|{InstanceIds|Pieces}|{{375}|{375}}"];
13 | clone_buyers [fillcolor=darkseagreen1,style=filled,label="Clone Buyers\n|{InstanceIds|Pieces}|{{75}|{75}}"];
14 | flower [label="Flower\n|{InstanceIds|Pieces}|{{1,783}|{1,783}}"];
15 | dispensaries [label="Dispensaries\n|{InstanceIds|Pieces}|{{1,505}|{1,505}}"];
16 | consumers [label="Consumer Scan\n|{InstanceIds|Pieces}|{{?}|{?}}"];
17 | pos_scan [fillcolor=darkseagreen1,style=filled,label="PoS Scan\n|{InstanceIds|Pieces}|{{1,273}|{1,273}}"];
18 |
19 | seeds_from_cookies -> cubed [label="+ 75 InstanceIds",color=gold4];
20 | cubed -> mother_plants [label="",color=purple];
21 | cubed -> discard [label=" -20",color=purple];
22 |
23 | seeds_from_cookies -> did_not_germinate [label="",color=purple];
24 | did_not_germinate -> discard [label=" (never became a InstanceIds)",color=purple];
25 |
26 | mother_plants -> clones [color=gold4,label=" + 320"];
27 | mother_plants -> discard [label="-7",color=purple];
28 |
29 | clones -> flower [label=" + 1,531",color=gold4];
30 | clones -> clone_buyers [color=purple];
31 | clones -> discard [label="-123",color=purple];
32 |
33 | flower -> dispensaries [label="",color=purple];
34 | flower -> discard [label="-278",color=purple];
35 |
36 | dispensaries -> consumers [label="",color=purple];
37 | dispensaries -> discard [label="-232",color=purple];
38 |
39 | consumers -> pos_scan [label="",color=purple];
40 | dispensaries -> pos_scan [label=" Some portion of InstanceIds will be scanned by consumers\n and all should have point-of-sale scan",color=purple];
41 | }
42 | @enduml
--------------------------------------------------------------------------------
/docs/uniques-commands.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | polkadot-js-api --seed "//Alice" tx.uniques.create 0 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
5 |
6 |
7 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 null "NFT Type" "Afloat Tax Credits"
8 |
9 | polkadot-js-api --seed "//Alice" tx.uniques.mint 0 0 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
10 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "Title" "My VA Land Prez Credit"
11 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "Initial Amount USD" "50000"
12 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "Current Credit Balance USD" "50000"
13 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "Amount to be Transferred USD" "50000"
14 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "State" "Virginia"
15 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "Tax Credit Type" "Virginia Land Preservation"
16 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "Entity Type" "Individual"
17 | polkadot-js-api --seed "//Alice" tx.uniques.setAttribute 0 0 "Expiration Date" "2030"
--------------------------------------------------------------------------------
/docs/zombienet/before-completed-pod-err.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashed-io/hashed-substrate/96d68391b8c0ce9c2e03f7cf245fe920cf329875/docs/zombienet/before-completed-pod-err.png
--------------------------------------------------------------------------------
/docs/zombienet/completed-pod-err.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashed-io/hashed-substrate/96d68391b8c0ce9c2e03f7cf245fe920cf329875/docs/zombienet/completed-pod-err.png
--------------------------------------------------------------------------------
/docs/zombienet/testnet-running.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashed-io/hashed-substrate/96d68391b8c0ce9c2e03f7cf245fe920cf329875/docs/zombienet/testnet-running.png
--------------------------------------------------------------------------------
/docs/zombienet/timeout-err.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashed-io/hashed-substrate/96d68391b8c0ce9c2e03f7cf245fe920cf329875/docs/zombienet/timeout-err.png
--------------------------------------------------------------------------------
/docs/zombienet/zombienet-namespace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashed-io/hashed-substrate/96d68391b8c0ce9c2e03f7cf245fe920cf329875/docs/zombienet/zombienet-namespace.png
--------------------------------------------------------------------------------
/k8-manifests/collator-manifest.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: md5-collator
5 | namespace: hashed-network
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: md5-collator
10 | template:
11 | metadata:
12 | labels:
13 | app: md5-collator
14 | spec:
15 | containers:
16 | - name: md5-collator
17 | imagePullPolicy: Always
18 | image: dappdever/hashed-substrate:latest
19 | volumeMounts:
20 | - name: md5-db
21 | mountPath: /var/www/hashed/md5
22 | # env:
23 | # - name: MNEMO
24 | # valueFrom:
25 | # secretKeyRef:
26 | # name: hashedsecrets
27 | # key: N4MNEMO
28 | # optional: false
29 | # - name: NODEKEY
30 | # valueFrom:
31 | # secretKeyRef:
32 | # name: hashedsecrets
33 | # key: N4NODEKEY
34 | # optional: false
35 | resources:
36 | limits:
37 | memory: "4G"
38 | cpu: "1.5"
39 | ports:
40 | - containerPort: 9933
41 | - containerPort: 9944
42 | - containerPort: 9946
43 | - containerPort: 30333
44 | - containerPort: 40333
45 | volumes:
46 | - name: md5-db
47 | persistentVolumeClaim:
48 | claimName: md5collator-persistentvolumeclaim
49 | ---
50 | apiVersion: v1
51 | kind: PersistentVolumeClaim
52 | metadata:
53 | name: md5collator-persistentvolumeclaim
54 | namespace: hashed-network
55 | spec:
56 | volumeMode: Filesystem
57 | accessModes: [ "ReadWriteOnce" ]
58 | resources:
59 | requests:
60 | storage: 50Gi
61 | storageClassName: standard
62 | ---
63 | apiVersion: v1
64 | kind: Service
65 | metadata:
66 | name: md5-collator
67 | namespace: hashed-network
68 | spec:
69 | selector:
70 | app: md5-collator
71 | type: NodePort
72 | ports:
73 | - name: collator-p2p-port
74 | port: 40333
75 | targetPort: 40333
76 | - name: collator-ws-port
77 | port: 9946
78 | targetPort: 9946
79 | - name: relay-p2p-port
80 | port: 30333
81 | targetPort: 30333
82 | - name: relay-ws-port
83 | port: 9944
84 | targetPort: 9944
85 | - name: rpc-port
86 | port: 9933
87 | targetPort: 9933
88 | ---
89 | # Uncomment if a persistent volume needs to be created manually
90 | # apiVersion: v1
91 | # kind: PersistentVolume
92 | # metadata:
93 | # name: md5-persistent-volume
94 | # namespace: hashed-network
95 | # spec:
96 | # capacity:
97 | # storage: 20Gi
98 | # volumeMode: Filesystem
99 | # accessModes:
100 | # - ReadWriteOnce
101 | # persistentVolumeReclaimPolicy: Retain
102 | # storageClassName: local-storage
103 | # local:
104 | # path: /var/www/hashed/
105 | # nodeAffinity:
106 | # required:
107 | # nodeSelectorTerms:
108 | # - matchExpressions:
109 | # - key: minikube.k8s.io/hostname
110 | # operator: In
111 | # values:
112 | # - minikube
--------------------------------------------------------------------------------
/k8-manifests/collator-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: md5-collator
5 | namespace: hashed-network
6 | spec:
7 | selector:
8 | app: md5-collator
9 | type: NodePort
10 | ports:
11 | - name: collator-p2p-port
12 | port: 40333
13 | targetPort: 40333
14 | - name: collator-ws-port
15 | port: 9946
16 | targetPort: 9946
17 | - name: relay-p2p-port
18 | port: 30333
19 | targetPort: 30333
20 | - name: relay-ws-port
21 | port: 9944
22 | targetPort: 9944
23 | - name: rpc-port
24 | port: 9933
25 | targetPort: 9933
--------------------------------------------------------------------------------
/k8-manifests/gcloud-ingress-example.yml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: md5-network
5 | namespace: hashed-network
6 | annotations:
7 | kubernetes.io/ingress.class: "nginx"
8 | cert-manager.io/cluster-issuer: "letsencrypt-prod"
9 | spec:
10 | tls:
11 | - hosts:
12 | - c1.md5.network
13 | secretName: md5-network
14 | rules:
15 | - host: c1.md5.network
16 | http:
17 | paths:
18 | - pathType: Prefix
19 | path: /p2p/
20 | backend:
21 | service:
22 | name: md5-collator
23 | port:
24 | number: 40333
25 | - pathType: Prefix
26 | path: /rp2p/
27 | backend:
28 | service:
29 | name: md5-collator
30 | port:
31 | number: 30333
32 | - pathType: Prefix
33 | path: /rws/
34 | backend:
35 | service:
36 | name: md5-collator
37 | port:
38 | number: 9944
39 | - pathType: Prefix
40 | path: /ws/
41 | backend:
42 | service:
43 | name: md5-collator
44 | port:
45 | number: 9946
46 | - pathType: Prefix
47 | path: /rpc/
48 | backend:
49 | service:
50 | name: md5-collator
51 | port:
52 | number: 9933
53 | - pathType: Exact
54 | path: /
55 | backend:
56 | service:
57 | name: md5-collator
58 | port:
59 | number: 9946
60 |
--------------------------------------------------------------------------------
/k8-manifests/hashed-manifest-example.yml:
--------------------------------------------------------------------------------
1 | # You can copy this file into hashed-manifest.yml so the changes are not tracked by git
2 | kind: Namespace
3 | apiVersion: v1
4 | metadata:
5 | name: hashed-network
6 | labels:
7 | name: hashed-network
8 | ---
9 | apiVersion: apps/v1
10 | kind: Deployment
11 | metadata:
12 | name: md5-network
13 | namespace: hashed-network
14 | spec:
15 | selector:
16 | matchLabels:
17 | app: md5-network
18 | template:
19 | metadata:
20 | labels:
21 | app: md5-network
22 | spec:
23 | containers:
24 | - name: md5-network
25 | imagePullPolicy: IfNotPresent
26 | image: abhashed/hashednet:latest
27 | command: ["bash"]
28 | args: ["scripts/start_node.sh"]
29 | volumeMounts:
30 | - name: md5-db
31 | mountPath: /var/www/hashed/hashed-chaos-data
32 | env:
33 | - name: MNEMO
34 | valueFrom:
35 | secretKeyRef:
36 | name: hashedsecrets
37 | key: N4MNEMO
38 | optional: false
39 | - name: NODEKEY
40 | valueFrom:
41 | secretKeyRef:
42 | name: hashedsecrets
43 | key: N4NODEKEY
44 | optional: false
45 | resources:
46 | limits:
47 | memory: "3G"
48 | cpu: "1.5"
49 | ports:
50 | - containerPort: 9933
51 | - containerPort: 9944
52 | - containerPort: 30333
53 | volumes:
54 | - name: md5-db
55 | persistentVolumeClaim:
56 | claimName: md5persistentvolumeclaim
57 | ---
58 | apiVersion: v1
59 | kind: PersistentVolumeClaim
60 | metadata:
61 | name: md5persistentvolumeclaim
62 | namespace: hashed-network
63 | spec:
64 | volumeMode: Filesystem
65 | accessModes: [ "ReadWriteOnce" ]
66 | resources:
67 | requests:
68 | storage: 20Gi
69 | storageClassName: standard
70 | ---
71 | apiVersion: v1
72 | kind: Service
73 | metadata:
74 | name: md5-network
75 | namespace: hashed-network
76 | spec:
77 | selector:
78 | app: md5-network
79 | type: NodePort
80 | ports:
81 | - name: p2p-port
82 | port: 30334
83 | targetPort: 30333
84 | - name: ws-port
85 | port: 9945
86 | targetPort: 9944
87 | - name: rpc-port
88 | port: 9934
89 | targetPort: 9933
90 | ---
91 | # Uncomment if a persistent volume needs to be created manually
92 | # apiVersion: v1
93 | # kind: PersistentVolume
94 | # metadata:
95 | # name: md5-persistent-volume
96 | # namespace: hashed-network
97 | # spec:
98 | # capacity:
99 | # storage: 20Gi
100 | # volumeMode: Filesystem
101 | # accessModes:
102 | # - ReadWriteOnce
103 | # persistentVolumeReclaimPolicy: Retain
104 | # storageClassName: local-storage
105 | # local:
106 | # path: /var/www/hashed/
107 | # nodeAffinity:
108 | # required:
109 | # nodeSelectorTerms:
110 | # - matchExpressions:
111 | # - key: minikube.k8s.io/hostname
112 | # operator: In
113 | # values:
114 | # - minikube
--------------------------------------------------------------------------------
/k8-manifests/hashed-secrets-manifest-example.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: hashedsecrets
5 | namespace: hashed-network
6 | type: Opaque
7 | data:
8 | N4MNEMO: aG9sYQ== # here goes the mnemonic and node key enconded on base 64
9 | N4NODEKEY: aG9sYQ==
--------------------------------------------------------------------------------
/k8-manifests/zombienet-local.toml:
--------------------------------------------------------------------------------
1 | [relaychain]
2 | default_image = "docker.io/parity/polkadot:v0.9.28"
3 | default_command = "polkadot"
4 | default_args = [ "-lparachain=debug" ]
5 |
6 | chain = "rococo-local"
7 |
8 | [[relaychain.nodes]]
9 | name = "alice"
10 | validator = true
11 |
12 | [[relaychain.nodes]]
13 | name = "bob"
14 | validator = true
15 |
16 | [[parachains]]
17 | id = 2000
18 | cumulus_based = true
19 |
20 | [parachains.collator]
21 | name = "collator01"
22 | image = "docker.io/abhashed/test-collator"
23 | command = "hashed-parachain"
24 |
--------------------------------------------------------------------------------
/node/build.rs:
--------------------------------------------------------------------------------
1 | use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed};
2 |
3 | fn main() {
4 | generate_cargo_keys();
5 |
6 | rerun_if_git_head_changed();
7 | }
8 |
--------------------------------------------------------------------------------
/node/src/benchmarking.rs:
--------------------------------------------------------------------------------
1 | //! Setup code for [`super::command`] which would otherwise bloat that module.
2 | //!
3 | //! Should only be used for benchmarking as it may break in other contexts.
4 |
5 | use crate::service::FullClient;
6 |
7 | use hashed_runtime as runtime;
8 | use runtime::{AccountId, Balance, BalancesCall, SystemCall};
9 | use sc_cli::Result;
10 | use sc_client_api::BlockBackend;
11 | use sp_core::{Encode, Pair};
12 | use sp_inherents::{InherentData, InherentDataProvider};
13 | use sp_keyring::Sr25519Keyring;
14 | use sp_runtime::{OpaqueExtrinsic, SaturatedConversion};
15 |
16 | use std::{sync::Arc, time::Duration};
17 |
18 | /// Generates extrinsics for the `benchmark overhead` command.
19 | ///
20 | /// Note: Should only be used for benchmarking.
21 | pub struct RemarkBuilder {
22 | client: Arc,
23 | }
24 |
25 | impl RemarkBuilder {
26 | /// Creates a new [`Self`] from the given client.
27 | pub fn new(client: Arc) -> Self {
28 | Self { client }
29 | }
30 | }
31 |
32 | impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder {
33 | fn pallet(&self) -> &str {
34 | "system"
35 | }
36 |
37 | fn extrinsic(&self) -> &str {
38 | "remark"
39 | }
40 |
41 | fn build(&self, nonce: u32) -> std::result::Result {
42 | let acc = Sr25519Keyring::Bob.pair();
43 | let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
44 | self.client.as_ref(),
45 | acc,
46 | SystemCall::remark { remark: vec![] }.into(),
47 | nonce,
48 | )
49 | .into();
50 |
51 | Ok(extrinsic)
52 | }
53 | }
54 |
55 | /// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks.
56 | ///
57 | /// Note: Should only be used for benchmarking.
58 | pub struct TransferKeepAliveBuilder {
59 | client: Arc,
60 | dest: AccountId,
61 | value: Balance,
62 | }
63 |
64 | impl TransferKeepAliveBuilder {
65 | /// Creates a new [`Self`] from the given client.
66 | pub fn new(client: Arc, dest: AccountId, value: Balance) -> Self {
67 | Self { client, dest, value }
68 | }
69 | }
70 |
71 | impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder {
72 | fn pallet(&self) -> &str {
73 | "balances"
74 | }
75 |
76 | fn extrinsic(&self) -> &str {
77 | "transfer_keep_alive"
78 | }
79 |
80 | fn build(&self, nonce: u32) -> std::result::Result {
81 | let acc = Sr25519Keyring::Bob.pair();
82 | let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
83 | self.client.as_ref(),
84 | acc,
85 | BalancesCall::transfer_keep_alive {
86 | dest: self.dest.clone().into(),
87 | value: self.value.into(),
88 | }
89 | .into(),
90 | nonce,
91 | )
92 | .into();
93 |
94 | Ok(extrinsic)
95 | }
96 | }
97 |
98 | /// Create a transaction using the given `call`.
99 | ///
100 | /// Note: Should only be used for benchmarking.
101 | pub fn create_benchmark_extrinsic(
102 | client: &FullClient,
103 | sender: sp_core::sr25519::Pair,
104 | call: runtime::RuntimeCall,
105 | nonce: u32,
106 | ) -> runtime::UncheckedExtrinsic {
107 | let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
108 | let best_hash = client.chain_info().best_hash;
109 | let best_block = client.chain_info().best_number;
110 |
111 | let period = runtime::BlockHashCount::get()
112 | .checked_next_power_of_two()
113 | .map(|c| c / 2)
114 | .unwrap_or(2) as u64;
115 | let extra: runtime::SignedExtra = (
116 | frame_system::CheckNonZeroSender::::new(),
117 | frame_system::CheckSpecVersion::::new(),
118 | frame_system::CheckTxVersion::::new(),
119 | frame_system::CheckGenesis::::new(),
120 | frame_system::CheckEra::::from(sp_runtime::generic::Era::mortal(
121 | period,
122 | best_block.saturated_into(),
123 | )),
124 | frame_system::CheckNonce::::from(nonce),
125 | frame_system::CheckWeight::::new(),
126 | pallet_transaction_payment::ChargeTransactionPayment::::from(0),
127 | );
128 |
129 | let raw_payload = runtime::SignedPayload::from_raw(
130 | call.clone(),
131 | extra.clone(),
132 | (
133 | (),
134 | runtime::VERSION.spec_version,
135 | runtime::VERSION.transaction_version,
136 | genesis_hash,
137 | best_hash,
138 | (),
139 | (),
140 | (),
141 | ),
142 | );
143 | let signature = raw_payload.using_encoded(|e| sender.sign(e));
144 |
145 | runtime::UncheckedExtrinsic::new_signed(
146 | call.clone(),
147 | sp_runtime::AccountId32::from(sender.public()).into(),
148 | runtime::Signature::Sr25519(signature.clone()),
149 | extra.clone(),
150 | )
151 | }
152 |
153 | /// Generates inherent data for the `benchmark overhead` command.
154 | ///
155 | /// Note: Should only be used for benchmarking.
156 | pub fn inherent_benchmark_data() -> Result {
157 | let mut inherent_data = InherentData::new();
158 | let d = Duration::from_millis(0);
159 | let timestamp = sp_timestamp::InherentDataProvider::new(d.into());
160 |
161 | futures::executor::block_on(timestamp.provide_inherent_data(&mut inherent_data))
162 | .map_err(|e| format!("creating inherent data: {:?}", e))?;
163 | Ok(inherent_data)
164 | }
165 |
--------------------------------------------------------------------------------
/node/src/cli.rs:
--------------------------------------------------------------------------------
1 | use sc_cli::RunCmd;
2 |
3 | #[derive(Debug, clap::Parser)]
4 | pub struct Cli {
5 | #[clap(subcommand)]
6 | pub subcommand: Option,
7 |
8 | #[clap(flatten)]
9 | pub run: RunCmd,
10 | }
11 |
12 | #[derive(Debug, clap::Subcommand)]
13 | pub enum Subcommand {
14 | /// Key management cli utilities
15 | #[clap(subcommand)]
16 | Key(sc_cli::KeySubcommand),
17 |
18 | /// Build a chain specification.
19 | BuildSpec(sc_cli::BuildSpecCmd),
20 |
21 | /// Validate blocks.
22 | CheckBlock(sc_cli::CheckBlockCmd),
23 |
24 | /// Export blocks.
25 | ExportBlocks(sc_cli::ExportBlocksCmd),
26 |
27 | /// Export the state of a given block into a chain spec.
28 | ExportState(sc_cli::ExportStateCmd),
29 |
30 | /// Import blocks.
31 | ImportBlocks(sc_cli::ImportBlocksCmd),
32 |
33 | /// Remove the whole chain.
34 | PurgeChain(sc_cli::PurgeChainCmd),
35 |
36 | /// Revert the chain to a previous state.
37 | Revert(sc_cli::RevertCmd),
38 |
39 | /// Sub-commands concerned with benchmarking.
40 | #[clap(subcommand)]
41 | Benchmark(frame_benchmarking_cli::BenchmarkCmd),
42 |
43 | /// Try some command against runtime state.
44 | #[cfg(feature = "try-runtime")]
45 | TryRuntime(try_runtime_cli::TryRuntimeCmd),
46 |
47 | /// Try some command against runtime state. Note: `try-runtime` feature must be enabled.
48 | #[cfg(not(feature = "try-runtime"))]
49 | TryRuntime,
50 |
51 | /// Db meta columns information.
52 | ChainInfo(sc_cli::ChainInfoCmd),
53 | }
54 |
--------------------------------------------------------------------------------
/node/src/command.rs:173:6:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashed-io/hashed-substrate/96d68391b8c0ce9c2e03f7cf245fe920cf329875/node/src/command.rs:173:6
--------------------------------------------------------------------------------
/node/src/command_helper.rs:
--------------------------------------------------------------------------------
1 | // This file is part of Substrate.
2 |
3 | // Copyright (C) 2022 Parity Technologies (UK) Ltd.
4 | // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5 |
6 | // This program is free software: you can redistribute it and/or modify
7 | // it under the terms of the GNU General Public License as published by
8 | // the Free Software Foundation, either version 3 of the License, or
9 | // (at your option) any later version.
10 |
11 | // This program is distributed in the hope that it will be useful,
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | // GNU General Public License for more details.
15 |
16 | // You should have received a copy of the GNU General Public License
17 | // along with this program. If not, see .
18 |
19 | //! Contains code to setup the command invocations in [`super::command`] which would
20 | //! otherwise bloat that module.
21 |
22 | use crate::service::FullClient;
23 |
24 | use node_template_runtime as runtime;
25 | use runtime::SystemCall;
26 | use sc_cli::Result;
27 | use sc_client_api::BlockBackend;
28 | use sp_core::{Encode, Pair};
29 | use sp_inherents::{InherentData, InherentDataProvider};
30 | use sp_keyring::Sr25519Keyring;
31 | use sp_runtime::{OpaqueExtrinsic, SaturatedConversion};
32 |
33 | use std::{sync::Arc, time::Duration};
34 |
35 | /// Generates extrinsics for the `benchmark overhead` command.
36 | ///
37 | /// Note: Should only be used for benchmarking.
38 | pub struct BenchmarkExtrinsicBuilder {
39 | client: Arc,
40 | }
41 |
42 | impl BenchmarkExtrinsicBuilder {
43 | /// Creates a new [`Self`] from the given client.
44 | pub fn new(client: Arc) -> Self {
45 | Self { client }
46 | }
47 | }
48 |
49 | impl frame_benchmarking_cli::ExtrinsicBuilder for BenchmarkExtrinsicBuilder {
50 | fn remark(&self, nonce: u32) -> std::result::Result {
51 | let acc = Sr25519Keyring::Bob.pair();
52 | let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
53 | self.client.as_ref(),
54 | acc,
55 | SystemCall::remark { remark: vec![] }.into(),
56 | nonce,
57 | )
58 | .into();
59 |
60 | Ok(extrinsic)
61 | }
62 | }
63 |
64 | /// Create a transaction using the given `call`.
65 | ///
66 | /// Note: Should only be used for benchmarking.
67 | pub fn create_benchmark_extrinsic(
68 | client: &FullClient,
69 | sender: sp_core::sr25519::Pair,
70 | call: runtime::Call,
71 | nonce: u32,
72 | ) -> runtime::UncheckedExtrinsic {
73 | let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
74 | let best_hash = client.chain_info().best_hash;
75 | let best_block = client.chain_info().best_number;
76 |
77 | let period = runtime::BlockHashCount::get()
78 | .checked_next_power_of_two()
79 | .map(|c| c / 2)
80 | .unwrap_or(2) as u64;
81 | let extra: runtime::SignedExtra = (
82 | frame_system::CheckNonZeroSender::::new(),
83 | frame_system::CheckSpecVersion::::new(),
84 | frame_system::CheckTxVersion::::new(),
85 | frame_system::CheckGenesis::::new(),
86 | frame_system::CheckEra::::from(sp_runtime::generic::Era::mortal(
87 | period,
88 | best_block.saturated_into(),
89 | )),
90 | frame_system::CheckNonce::::from(nonce),
91 | frame_system::CheckWeight::::new(),
92 | pallet_transaction_payment::ChargeTransactionPayment::::from(0),
93 | );
94 |
95 | let raw_payload = runtime::SignedPayload::from_raw(
96 | call.clone(),
97 | extra.clone(),
98 | (
99 | (),
100 | runtime::VERSION.spec_version,
101 | runtime::VERSION.transaction_version,
102 | genesis_hash,
103 | best_hash,
104 | (),
105 | (),
106 | (),
107 | ),
108 | );
109 | let signature = raw_payload.using_encoded(|e| sender.sign(e));
110 |
111 | runtime::UncheckedExtrinsic::new_signed(
112 | call.clone(),
113 | sp_runtime::AccountId32::from(sender.public()).into(),
114 | runtime::Signature::Sr25519(signature.clone()),
115 | extra.clone(),
116 | )
117 | }
118 |
119 | /// Generates inherent data for the `benchmark overhead` command.
120 | ///
121 | /// Note: Should only be used for benchmarking.
122 | pub fn inherent_benchmark_data() -> Result {
123 | let mut inherent_data = InherentData::new();
124 | let d = Duration::from_millis(0);
125 | let timestamp = sp_timestamp::InherentDataProvider::new(d.into());
126 |
127 | timestamp
128 | .provide_inherent_data(&mut inherent_data)
129 | .map_err(|e| format!("creating inherent data: {:?}", e))?;
130 | Ok(inherent_data)
131 | }
132 |
--------------------------------------------------------------------------------
/node/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod chain_spec;
2 | pub mod rpc;
3 | pub mod service;
4 |
--------------------------------------------------------------------------------
/node/src/main.rs:
--------------------------------------------------------------------------------
1 | //! Substrate Node Template CLI library.
2 | #![warn(missing_docs)]
3 |
4 | mod chain_spec;
5 | #[macro_use]
6 | mod service;
7 | mod benchmarking;
8 | mod cli;
9 | mod command;
10 | mod rpc;
11 |
12 | fn main() -> sc_cli::Result<()> {
13 | command::run()
14 | }
15 |
--------------------------------------------------------------------------------
/node/src/rpc.rs:
--------------------------------------------------------------------------------
1 | //! A collection of node-specific RPC methods.
2 | //! Substrate provides the `sc-rpc` crate, which defines the core RPC layer
3 | //! used by Substrate nodes. This file extends those RPC definitions with
4 | //! capabilities that are specific to this project's runtime configuration.
5 |
6 | #![warn(missing_docs)]
7 |
8 | use std::sync::Arc;
9 |
10 | use hashed_runtime::{opaque::Block, AccountId, Balance, Index};
11 | use jsonrpsee::RpcModule;
12 | use sc_transaction_pool_api::TransactionPool;
13 | use sp_api::ProvideRuntimeApi;
14 | use sp_block_builder::BlockBuilder;
15 | use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
16 |
17 | pub use sc_rpc_api::DenyUnsafe;
18 |
19 | /// Full client dependencies.
20 | pub struct FullDeps {
21 | /// The client instance to use.
22 | pub client: Arc,
23 | /// Transaction pool instance.
24 | pub pool: Arc,
25 | /// Whether to deny unsafe calls
26 | pub deny_unsafe: DenyUnsafe,
27 | }
28 |
29 | /// Instantiate all full RPC extensions.
30 | pub fn create_full(
31 | deps: FullDeps,
32 | ) -> Result, Box>
33 | where
34 | C: ProvideRuntimeApi,
35 | C: HeaderBackend + HeaderMetadata + 'static,
36 | C: Send + Sync + 'static,
37 | C::Api: substrate_frame_rpc_system::AccountNonceApi,
38 | C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi,
39 | C::Api: BlockBuilder,
40 | P: TransactionPool + 'static,
41 | {
42 | use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
43 | use substrate_frame_rpc_system::{System, SystemApiServer};
44 |
45 | let mut module = RpcModule::new(());
46 | let FullDeps { client, pool, deny_unsafe } = deps;
47 |
48 | module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?;
49 | module.merge(TransactionPayment::new(client).into_rpc())?;
50 |
51 | // Extend this RPC with a custom API by using the following syntax.
52 | // `YourRpcStruct` should have a reference to a client, which is needed
53 | // to call into the runtime.
54 | // `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;`
55 |
56 | Ok(module)
57 | }
58 |
--------------------------------------------------------------------------------
/pallets/afloat/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-afloat"
3 | version = "4.0.0-dev"
4 | description = ""
5 | authors = ["Hashed
--------------------------------------------------------------------------------
/pallets/afloat/src/benchmarking.rs:
--------------------------------------------------------------------------------
1 | //! Benchmarking setup for pallet-template
2 |
3 | use super::*;
4 |
5 | #[allow(unused)]
6 | use crate::Pallet as Template;
7 | use frame_benchmarking::{benchmarks, whitelisted_caller};
8 | use frame_system::RawOrigin;
9 |
10 | benchmarks! {
11 | do_something {
12 | let s in 0 .. 100;
13 | let caller: T::AccountId = whitelisted_caller();
14 | }: _(RawOrigin::Signed(caller), s)
15 | verify {
16 | assert_eq!(Something::::get(), Some(s));
17 | }
18 |
19 | impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
20 | }
21 |
--------------------------------------------------------------------------------
/pallets/bitcoin-vaults/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-bitcoin-vaults"
3 | version = "4.0.0-dev"
4 | description = ""
5 | authors = ["Hashed ::get(), Some(s));
17 | }
18 |
19 | impl_benchmark_test_suite!(NBVStorage, crate::mock::new_test_ext(), crate::mock::Test);
20 | }
21 |
--------------------------------------------------------------------------------
/pallets/bitcoin-vaults/src/mock.rs:
--------------------------------------------------------------------------------
1 | use crate as pallet_bitcoin_vaults;
2 | use frame_support::{
3 | parameter_types,
4 | traits::{ConstU32, ConstU64},
5 | };
6 | use frame_system::EnsureRoot;
7 | //use frame_system as system;
8 | use pallet_balances;
9 | use sp_core::H256;
10 | //use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStore};
11 | use sp_runtime::{
12 | testing::{Header, TestXt},
13 | traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify},
14 | //RuntimeAppPublic,
15 | };
16 | type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic;
17 | type Block = frame_system::mocking::MockBlock;
18 | //use sp_runtime::generic::SignedPayload;
19 | use sp_core::sr25519::Signature;
20 |
21 | // Configure a mock runtime to test the pallet.
22 | frame_support::construct_runtime!(
23 | pub enum Test where
24 | Block = Block,
25 | NodeBlock = Block,
26 | UncheckedExtrinsic = UncheckedExtrinsic,
27 | {
28 | System: frame_system::{Pallet, Call, Config, Storage, Event},
29 | BitcoinVaults: pallet_bitcoin_vaults::{Pallet, Call, Storage, Event, ValidateUnsigned},
30 | Balances: pallet_balances::{Pallet, Call, Storage, Event},
31 | }
32 | );
33 |
34 | impl pallet_balances::Config for Test {
35 | type MaxLocks = ();
36 | type MaxReserves = ();
37 | type ReserveIdentifier = [u8; 8];
38 | type Balance = u64;
39 | type RuntimeEvent = RuntimeEvent;
40 | type DustRemoval = ();
41 | type ExistentialDeposit = ConstU64<1>;
42 | type AccountStore = System;
43 | type WeightInfo = ();
44 | }
45 |
46 | parameter_types! {
47 | pub const XPubLen: u32 = 166;
48 | pub const PSBTMaxLen: u32 = 2048;
49 | pub const MaxVaultsPerUser: u32 = 2;
50 | pub const MaxCosignersPerVault: u32 =3;
51 | pub const VaultDescriptionMaxLen: u32 = 200;
52 | pub const OutputDescriptorMaxLen: u32 = 2048;
53 | pub const MaxProposalsPerVault : u32 = 2;
54 | }
55 |
56 | impl pallet_bitcoin_vaults::Config for Test {
57 | type AuthorityId = pallet_bitcoin_vaults::types::crypto::TestAuthId;
58 | type RuntimeEvent = RuntimeEvent;
59 | type ChangeBDKOrigin = EnsureRoot;
60 | type XPubLen = XPubLen;
61 | type PSBTMaxLen = PSBTMaxLen;
62 | type MaxVaultsPerUser = MaxVaultsPerUser;
63 | type MaxCosignersPerVault = MaxCosignersPerVault;
64 | type VaultDescriptionMaxLen = VaultDescriptionMaxLen;
65 | type OutputDescriptorMaxLen = OutputDescriptorMaxLen;
66 | type MaxProposalsPerVault = MaxProposalsPerVault;
67 | }
68 |
69 | type Extrinsic = TestXt;
70 | type AccountId = <::Signer as IdentifyAccount>::AccountId;
71 |
72 | impl frame_system::offchain::SigningTypes for Test {
73 | type Public = ::Signer;
74 | type Signature = Signature;
75 | }
76 |
77 | impl frame_system::offchain::SendTransactionTypes for Test
78 | where
79 | RuntimeCall: From,
80 | {
81 | type OverarchingCall = RuntimeCall;
82 | type Extrinsic = Extrinsic;
83 | }
84 |
85 | impl frame_system::offchain::CreateSignedTransaction for Test
86 | where
87 | RuntimeCall: From,
88 | {
89 | fn create_transaction>(
90 | call: RuntimeCall,
91 | _public: ::Signer,
92 | _account: AccountId,
93 | nonce: u64,
94 | ) -> Option<(RuntimeCall, ::SignaturePayload)> {
95 | Some((call, (nonce, ())))
96 | }
97 | }
98 |
99 | parameter_types! {
100 | pub const BlockHashCount: u64 = 250;
101 | pub const SS58Prefix: u8 = 42;
102 | }
103 |
104 | impl frame_system::Config for Test {
105 | type BaseCallFilter = frame_support::traits::Everything;
106 | type BlockWeights = ();
107 | type BlockLength = ();
108 | type RuntimeOrigin = RuntimeOrigin;
109 | type Index = u64;
110 | type BlockNumber = u64;
111 | type Hash = H256;
112 | type RuntimeCall = RuntimeCall;
113 | type Hashing = BlakeTwo256;
114 | type AccountId = sp_core::sr25519::Public;
115 | type Lookup = IdentityLookup;
116 | type Header = Header;
117 | type RuntimeEvent = RuntimeEvent;
118 | type BlockHashCount = ConstU64<250>;
119 | type DbWeight = ();
120 | type Version = ();
121 | type PalletInfo = PalletInfo;
122 | type AccountData = pallet_balances::AccountData;
123 | type OnNewAccount = ();
124 | type OnKilledAccount = ();
125 | type SystemWeightInfo = ();
126 | type SS58Prefix = ();
127 | type OnSetCode = ();
128 | type MaxConsumers = ConstU32<16>;
129 | }
130 |
131 | pub fn test_pub(n: u8) -> sp_core::sr25519::Public {
132 | sp_core::sr25519::Public::from_raw([n; 32])
133 | }
134 |
135 | // Build genesis storage according to the mock runtime.
136 | pub fn new_test_ext() -> sp_io::TestExternalities {
137 | let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap();
138 | pallet_balances::GenesisConfig:: {
139 | balances: vec![(test_pub(1), 10000), (test_pub(2), 1000), (test_pub(3), 1000)],
140 | }
141 | .assimilate_storage(&mut t)
142 | .unwrap();
143 | t.into()
144 | }
145 |
--------------------------------------------------------------------------------
/pallets/confidential-docs/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-confidential-docs"
3 | version = "4.0.0-dev"
4 | description = "Provides backend services for the confidentials docs solution"
5 | authors = ["Hashed ;
12 | type Block = frame_system::mocking::MockBlock;
13 |
14 | // Configure a mock runtime to test the pallet.
15 | frame_support::construct_runtime!(
16 | pub enum Test where
17 | Block = Block,
18 | NodeBlock = Block,
19 | UncheckedExtrinsic = UncheckedExtrinsic,
20 | {
21 | System: frame_system::{Pallet, Call, Config, Storage, Event},
22 | ConfidentialDocs: pallet_confidential_docs::{Pallet, Call, Storage, Event},
23 | }
24 | );
25 |
26 | parameter_types! {
27 | pub const BlockHashCount: u64 = 250;
28 | pub const SS58Prefix: u8 = 42;
29 | }
30 |
31 | impl system::Config for Test {
32 | type BaseCallFilter = frame_support::traits::Everything;
33 | type BlockWeights = ();
34 | type BlockLength = ();
35 | type DbWeight = ();
36 | type RuntimeOrigin = RuntimeOrigin;
37 | type RuntimeCall = RuntimeCall;
38 | type Index = u64;
39 | type BlockNumber = u64;
40 | type Hash = H256;
41 | type Hashing = BlakeTwo256;
42 | type AccountId = u64;
43 | type Lookup = IdentityLookup;
44 | type Header = Header;
45 | type RuntimeEvent = RuntimeEvent;
46 | type BlockHashCount = BlockHashCount;
47 | type Version = ();
48 | type PalletInfo = PalletInfo;
49 | type AccountData = ();
50 | type OnNewAccount = ();
51 | type OnKilledAccount = ();
52 | type SystemWeightInfo = ();
53 | type SS58Prefix = SS58Prefix;
54 | type OnSetCode = ();
55 | type MaxConsumers = frame_support::traits::ConstU32<16>;
56 | }
57 |
58 | parameter_types! {
59 | pub const MaxOwnedDocs: u32 = 100;
60 | pub const MaxSharedToDocs: u32 = 100;
61 | pub const MaxSharedFromDocs: u32 = 100;
62 | pub const DocNameMinLen: u32 = 4;
63 | pub const DocNameMaxLen: u32 = 30;
64 | pub const DocDescMinLen: u32 = 5;
65 | pub const DocDescMaxLen: u32 = 100;
66 | pub const GroupNameMinLen: u32 = 3;
67 | pub const GroupNameMaxLen: u32 = 30;
68 | pub const MaxMemberGroups: u32 = 100;
69 | }
70 |
71 | impl pallet_confidential_docs::Config for Test {
72 | type RuntimeEvent = RuntimeEvent;
73 | type RemoveOrigin = EnsureRoot;
74 | type MaxOwnedDocs = MaxOwnedDocs;
75 | type MaxSharedToDocs = MaxSharedToDocs;
76 | type MaxSharedFromDocs = MaxSharedFromDocs;
77 | type DocNameMinLen = DocNameMinLen;
78 | type DocNameMaxLen = DocNameMaxLen;
79 | type DocDescMinLen = DocDescMinLen;
80 | type DocDescMaxLen = DocDescMaxLen;
81 | type GroupNameMinLen = GroupNameMinLen;
82 | type GroupNameMaxLen = GroupNameMaxLen;
83 | type MaxMemberGroups = MaxMemberGroups;
84 | }
85 |
86 | // Build genesis storage according to the mock runtime.
87 | pub fn new_test_ext() -> sp_io::TestExternalities {
88 | let storage = frame_system::GenesisConfig::default().build_storage::().unwrap();
89 | let mut ext: sp_io::TestExternalities = storage.into();
90 | ext.execute_with(|| System::set_block_number(1));
91 | ext
92 | }
93 |
--------------------------------------------------------------------------------
/pallets/confidential-docs/src/types.rs:
--------------------------------------------------------------------------------
1 | //! Defines the types required by the confidential docs pallet
2 | use super::*;
3 | use frame_support::pallet_prelude::*;
4 |
5 | /// Defines the type used by fields that store an IPFS CID
6 | pub type CID = BoundedVec>;
7 | /// Defines the type used by fields that store a public key
8 | pub type PublicKey = [u8; 32];
9 | /// Defines the type used by fields that store a UserId
10 | pub type UserId = [u8; 32];
11 | /// Defines the type used by fields that store a document name
12 | pub type DocName = BoundedVec::DocNameMaxLen>;
13 | /// Defines the type used by fields that store a document description
14 | pub type DocDesc = BoundedVec::DocDescMaxLen>;
15 | /// Defines the type used by fields that store a group name
16 | pub type GroupName = BoundedVec::GroupNameMaxLen>;
17 |
18 | /// User vault, the vault stores the cipher private key used to cipher the user documents.
19 | /// The way the user vault is ciphered depends on the login method used by the user
20 | #[derive(
21 | CloneNoBound, Encode, Decode, RuntimeDebugNoBound, Default, TypeInfo, MaxEncodedLen, PartialEq,
22 | )]
23 | #[scale_info(skip_type_params(T))]
24 | #[codec(mel_bound())]
25 | pub struct Vault {
26 | /// IPFS CID where the vault data is stored
27 | pub cid: CID,
28 | /// Owner of the vault
29 | pub owner: T::AccountId,
30 | }
31 |
32 | /// Owned confidential document
33 | #[derive(
34 | CloneNoBound, Encode, Decode, RuntimeDebugNoBound, Default, TypeInfo, MaxEncodedLen, PartialEq,
35 | )]
36 | #[scale_info(skip_type_params(T))]
37 | #[codec(mel_bound())]
38 | pub struct OwnedDoc {
39 | /// IPFS CID where the document data is stored
40 | pub cid: CID,
41 | /// User provided name for the document
42 | pub name: DocName,
43 | /// User provided description for the document
44 | pub description: DocDesc,
45 | /// Owner of the document
46 | pub owner: T::AccountId,
47 | }
48 |
49 | /// Shared confidential document
50 | #[derive(
51 | CloneNoBound, Encode, Decode, RuntimeDebugNoBound, Default, TypeInfo, MaxEncodedLen, PartialEq,
52 | )]
53 | #[scale_info(skip_type_params(T))]
54 | #[codec(mel_bound())]
55 | pub struct SharedDoc {
56 | /// IPFS CID where the document data is stored
57 | pub cid: CID,
58 | /// User provided name for the document
59 | pub name: DocName,
60 | /// User provided description for the document
61 | pub description: DocDesc,
62 | /// User that shared the document
63 | pub from: T::AccountId,
64 | /// User to which the document was shared
65 | pub to: T::AccountId,
66 | }
67 |
68 | /// Group member role
69 | #[derive(
70 | CloneNoBound, Encode, Decode, RuntimeDebugNoBound, Default, TypeInfo, MaxEncodedLen, PartialEq,
71 | )]
72 | #[scale_info(skip_type_params(T))]
73 | #[codec(mel_bound())]
74 | pub enum GroupRole {
75 | Admin,
76 | #[default]
77 | Member,
78 | Owner,
79 | }
80 |
81 | #[derive(
82 | CloneNoBound, Encode, Decode, RuntimeDebugNoBound, Default, TypeInfo, MaxEncodedLen, PartialEq,
83 | )]
84 | #[scale_info(skip_type_params(T))]
85 | #[codec(mel_bound())]
86 | pub struct Group {
87 | /// Account id of the group
88 | pub group: T::AccountId,
89 | /// User that created the group
90 | pub creator: T::AccountId,
91 | /// Group Name
92 | pub name: GroupName,
93 | }
94 |
95 | /// Group member
96 | #[derive(
97 | CloneNoBound, Encode, Decode, RuntimeDebugNoBound, Default, TypeInfo, MaxEncodedLen, PartialEq,
98 | )]
99 | #[scale_info(skip_type_params(T))]
100 | #[codec(mel_bound())]
101 | pub struct GroupMember {
102 | /// IPFS CID where the group key is stored
103 | pub cid: CID,
104 | /// User that is part of the group
105 | pub group: T::AccountId,
106 | /// User that is part of the group
107 | pub member: T::AccountId,
108 | /// User that authorized the member to the group
109 | pub authorizer: T::AccountId,
110 | /// Role of the member within the group
111 | pub role: GroupRole,
112 | }
113 |
114 | impl GroupMember {
115 | pub fn can_add_group_member(&self) -> bool {
116 | self.role == GroupRole::Admin || self.role == GroupRole::Owner
117 | }
118 |
119 | pub fn can_remove_group_member(&self, group_member: &GroupMember) -> bool {
120 | group_member.role != GroupRole::Owner
121 | && (self.role == GroupRole::Owner
122 | || (self.role == GroupRole::Admin && group_member.authorizer == self.member))
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/pallets/fruniques/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-fruniques"
3 | version = "0.1.0-dev"
4 | description = "Fractionalized NFTs compatible with the Statemine parachain"
5 | authors = ["Hashed ::get(), Some(s));
17 | }
18 |
19 | impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
20 | }
21 |
--------------------------------------------------------------------------------
/pallets/fruniques/src/mock.rs:
--------------------------------------------------------------------------------
1 | use crate as pallet_fruniques;
2 | use frame_support::{construct_runtime, parameter_types, traits::AsEnsureOriginWithArg};
3 | use frame_system::{EnsureRoot, EnsureSigned};
4 | use pallet_balances;
5 | use sp_core::H256;
6 | use sp_runtime::{
7 | testing::Header,
8 | traits::{BlakeTwo256, IdentityLookup},
9 | };
10 |
11 | type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic;
12 | type Block = frame_system::mocking::MockBlock;
13 |
14 | construct_runtime!(
15 | pub enum Test where
16 | Block = Block,
17 | NodeBlock = Block,
18 | UncheckedExtrinsic = UncheckedExtrinsic,
19 | {
20 | System: frame_system::{Pallet, Call, Config, Storage, Event},
21 | Uniques: pallet_uniques::{Pallet, Call, Storage, Event},
22 | Balances: pallet_balances::{Pallet, Call, Storage, Config, Event},
23 | Fruniques: pallet_fruniques::{Pallet, Call, Storage, Event},
24 | RBAC: pallet_rbac::{Pallet, Call, Storage, Event},
25 | }
26 | );
27 | parameter_types! {
28 | pub const BlockHashCount: u64 = 250;
29 | pub const ChildMaxLen: u32 = 10;
30 | pub const MaxParentsInCollection: u32 = 100;
31 | }
32 |
33 | impl frame_system::Config for Test {
34 | type BaseCallFilter = frame_support::traits::Everything;
35 | type BlockWeights = ();
36 | type BlockLength = ();
37 | type RuntimeOrigin = RuntimeOrigin;
38 | type RuntimeCall = RuntimeCall;
39 | type Index = u64;
40 | type BlockNumber = u64;
41 | type Hash = H256;
42 | type Hashing = BlakeTwo256;
43 | type AccountId = u64;
44 | type Lookup = IdentityLookup;
45 | type Header = Header;
46 | type RuntimeEvent = RuntimeEvent;
47 | type BlockHashCount = BlockHashCount;
48 | type DbWeight = ();
49 | type Version = ();
50 | type PalletInfo = PalletInfo;
51 | type AccountData = pallet_balances::AccountData;
52 | type OnNewAccount = ();
53 | type OnKilledAccount = ();
54 | type SystemWeightInfo = ();
55 | type SS58Prefix = ();
56 | type OnSetCode = ();
57 | type MaxConsumers = frame_support::traits::ConstU32<16>;
58 | }
59 |
60 | impl pallet_fruniques::Config for Test {
61 | type RuntimeEvent = RuntimeEvent;
62 | type RemoveOrigin = EnsureRoot;
63 | type ChildMaxLen = ChildMaxLen;
64 | type MaxParentsInCollection = MaxParentsInCollection;
65 | type Rbac = RBAC;
66 | }
67 |
68 | parameter_types! {
69 | pub const ClassDeposit: u64 = 2;
70 | pub const InstanceDeposit: u64 = 1;
71 | pub const KeyLimit: u32 = 50;
72 | pub const ValueLimit: u32 = 50;
73 | pub const StringLimit: u32 = 50;
74 | pub const MetadataDepositBase: u64 = 1;
75 | pub const AttributeDepositBase: u64 = 1;
76 | pub const MetadataDepositPerByte: u64 = 1;
77 | }
78 |
79 | impl pallet_uniques::Config for Test {
80 | type RuntimeEvent = RuntimeEvent;
81 | type CollectionId = u32;
82 | type ItemId = u32;
83 | type Currency = Balances;
84 | type ForceOrigin = frame_system::EnsureRoot;
85 | type CollectionDeposit = ClassDeposit;
86 | type ItemDeposit = InstanceDeposit;
87 | type MetadataDepositBase = MetadataDepositBase;
88 | type AttributeDepositBase = MetadataDepositBase;
89 | type DepositPerByte = MetadataDepositPerByte;
90 | type StringLimit = StringLimit;
91 | type KeyLimit = KeyLimit;
92 | type ValueLimit = ValueLimit;
93 | type WeightInfo = ();
94 | #[cfg(feature = "runtime-benchmarks")]
95 | type Helper = ();
96 | type CreateOrigin = AsEnsureOriginWithArg>;
97 | type Locker = ();
98 | }
99 |
100 | parameter_types! {
101 | pub const ExistentialDeposit: u64 = 1;
102 | pub const MaxReserves: u32 = 50;
103 | }
104 |
105 | impl pallet_balances::Config for Test {
106 | type Balance = u64;
107 | type DustRemoval = ();
108 | type RuntimeEvent = RuntimeEvent;
109 | type ExistentialDeposit = ExistentialDeposit;
110 | type AccountStore = System;
111 | type WeightInfo = ();
112 | type MaxLocks = ();
113 | type MaxReserves = MaxReserves;
114 | type ReserveIdentifier = [u8; 8];
115 | }
116 |
117 | parameter_types! {
118 | pub const MaxScopesPerPallet: u32 = 2;
119 | pub const MaxRolesPerPallet: u32 = 6;
120 | pub const RoleMaxLen: u32 = 25;
121 | pub const PermissionMaxLen: u32 = 25;
122 | pub const MaxPermissionsPerRole: u32 = 11;
123 | pub const MaxRolesPerUser: u32 = 2;
124 | pub const MaxUsersPerRole: u32 = 2;
125 | }
126 | impl pallet_rbac::Config for Test {
127 | type RuntimeEvent = RuntimeEvent;
128 | type MaxScopesPerPallet = MaxScopesPerPallet;
129 | type MaxRolesPerPallet = MaxRolesPerPallet;
130 | type RoleMaxLen = RoleMaxLen;
131 | type PermissionMaxLen = PermissionMaxLen;
132 | type MaxPermissionsPerRole = MaxPermissionsPerRole;
133 | type MaxRolesPerUser = MaxRolesPerUser;
134 | type MaxUsersPerRole = MaxUsersPerRole;
135 | type RemoveOrigin = EnsureRoot;
136 | }
137 | // Build genesis storage according to the mock runtime.
138 | // pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
139 | // frame_system::GenesisConfig::default().build_storage::().unwrap().into()
140 | // }
141 | // Build genesis storage according to the mock runtime.
142 |
143 | pub fn new_test_ext() -> sp_io::TestExternalities {
144 | let balance_amount = 1_000_000 as u64;
145 | let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap();
146 | pallet_balances::GenesisConfig:: {
147 | balances: vec![(1, balance_amount), (2, balance_amount), (3, balance_amount)],
148 | }
149 | .assimilate_storage(&mut t)
150 | .expect("assimilate_storage failed");
151 | let mut t: sp_io::TestExternalities = t.into();
152 | t.execute_with(|| Fruniques::do_initial_setup().expect("Error on configuring initial setup"));
153 | t
154 | }
155 |
--------------------------------------------------------------------------------
/pallets/fund-admin/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-fund-admin"
3 | version = "4.0.0-dev"
4 | description = "Proxy Financial Pallet"
5 | authors = ["Hashed ::get(), Some(s));
17 | }
18 |
19 | impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
20 | }
21 |
--------------------------------------------------------------------------------
/pallets/gated-marketplace/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-gated-marketplace"
3 | version = "4.0.0-dev"
4 | description = "Pallet to create marketplaces"
5 | authors = ["Hashed "]
5 | edition = "2021"
6 | license = "Apache-2.0"
7 | homepage = "https://substrate.io"
8 | repository = "https://github.com/paritytech/substrate/"
9 | description = "FRAME asset management pallet"
10 | readme = "README.md"
11 |
12 | [package.metadata.docs.rs]
13 | targets = ["x86_64-unknown-linux-gnu"]
14 |
15 | [dependencies]
16 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false }
17 | scale-info = { version = "2.1.1", default-features = false, features = ["derive"] }
18 | sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
19 | # Needed for various traits. In our case, `OnFinalize`.
20 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
21 | # Needed for type-safe access to storage DB.
22 | frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
23 | # `system` module provides us with all sorts of useful stuff and macros depend on it being around.
24 | frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
25 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false, optional = true }
26 |
27 | [dev-dependencies]
28 | sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
29 | sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
30 | sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
31 | pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
32 |
33 | [features]
34 | default = ["std"]
35 | std = [
36 | "codec/std",
37 | "scale-info/std",
38 | "sp-std/std",
39 | "sp-runtime/std",
40 | "frame-support/std",
41 | "frame-system/std",
42 | "frame-benchmarking?/std",
43 | ]
44 | runtime-benchmarks = [
45 | "frame-benchmarking/runtime-benchmarks",
46 | "sp-runtime/runtime-benchmarks",
47 | "frame-system/runtime-benchmarks",
48 | ]
49 | try-runtime = ["frame-support/try-runtime"]
50 |
--------------------------------------------------------------------------------
/pallets/mapped-assets/README.md:
--------------------------------------------------------------------------------
1 | # Assets Module
2 |
3 | A simple, secure module for dealing with fungible assets.
4 |
5 | ## Overview
6 |
7 | The Assets module provides functionality for asset management of fungible asset classes
8 | with a fixed supply, including:
9 |
10 | * Asset Issuance
11 | * Asset Transfer
12 | * Asset Destruction
13 |
14 | To use it in your runtime, you need to implement the assets [`assets::Config`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/trait.Config.html).
15 |
16 | The supported dispatchable functions are documented in the [`assets::Call`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/enum.Call.html) enum.
17 |
18 | ### Terminology
19 |
20 | * **Asset issuance:** The creation of a new asset, whose total supply will belong to the
21 | account that issues the asset.
22 | * **Asset transfer:** The action of transferring assets from one account to another.
23 | * **Asset destruction:** The process of an account removing its entire holding of an asset.
24 | * **Fungible asset:** An asset whose units are interchangeable.
25 | * **Non-fungible asset:** An asset for which each unit has unique characteristics.
26 |
27 | ### Goals
28 |
29 | The assets system in Substrate is designed to make the following possible:
30 |
31 | * Issue a unique asset to its creator's account.
32 | * Move assets between accounts.
33 | * Remove an account's balance of an asset when requested by that account's owner and update
34 | the asset's total supply.
35 |
36 | ## Interface
37 |
38 | ### Dispatchable Functions
39 |
40 | * `issue` - Issues the total supply of a new fungible asset to the account of the caller of the function.
41 | * `transfer` - Transfers an `amount` of units of fungible asset `id` from the balance of
42 | the function caller's account (`origin`) to a `target` account.
43 | * `destroy` - Destroys the entire holding of a fungible asset `id` associated with the account
44 | that called the function.
45 |
46 | Please refer to the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum and its associated variants for documentation on each function.
47 |
48 | ### Public Functions
49 |
50 |
51 | * `balance` - Get the asset `id` balance of `who`.
52 | * `total_supply` - Get the total supply of an asset `id`.
53 |
54 | Please refer to the [`Pallet`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/struct.Pallet.html) struct for details on publicly available functions.
55 |
56 | ## Usage
57 |
58 | The following example shows how to use the Assets module in your runtime by exposing public functions to:
59 |
60 | * Issue a new fungible asset for a token distribution event (airdrop).
61 | * Query the fungible asset holding balance of an account.
62 | * Query the total supply of a fungible asset that has been issued.
63 |
64 | ### Prerequisites
65 |
66 | Import the Assets module and types and derive your runtime's configuration traits from the Assets module trait.
67 |
68 | ### Simple Code Snippet
69 |
70 | ```rust
71 | use pallet_assets as assets;
72 | use sp_runtime::ArithmeticError;
73 |
74 | #[frame_support::pallet]
75 | pub mod pallet {
76 | use super::*;
77 | use frame_support::pallet_prelude::*;
78 | use frame_system::pallet_prelude::*;
79 |
80 | #[pallet::pallet]
81 | pub struct Pallet(_);
82 |
83 | #[pallet::config]
84 | pub trait Config: frame_system::Config + assets::Config {}
85 |
86 | #[pallet::call]
87 | impl Pallet {
88 | pub fn issue_token_airdrop(origin: OriginFor) -> DispatchResult {
89 | let sender = ensure_signed(origin)?;
90 |
91 | const ACCOUNT_ALICE: u64 = 1;
92 | const ACCOUNT_BOB: u64 = 2;
93 | const COUNT_AIRDROP_RECIPIENTS: u64 = 2;
94 | const TOKENS_FIXED_SUPPLY: u64 = 100;
95 |
96 | ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), ArithmeticError::DivisionByZero);
97 |
98 | let asset_id = Self::next_asset_id();
99 |
100 | >::mutate(|asset_id| *asset_id += 1);
101 | >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS);
102 | >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS);
103 | >::insert(asset_id, TOKENS_FIXED_SUPPLY);
104 |
105 | Self::deposit_event(Event::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY));
106 | Ok(())
107 | }
108 | }
109 | }
110 | ```
111 |
112 | ## Assumptions
113 |
114 | Below are assumptions that must be held when using this module. If any of
115 | them are violated, the behavior of this module is undefined.
116 |
117 | * The total count of assets should be less than
118 | `Config::AssetId::max_value()`.
119 |
120 | ## Related Modules
121 |
122 | * [`System`](https://docs.rs/frame-system/latest/frame_system/)
123 | * [`Support`](https://docs.rs/frame-support/latest/frame_support/)
124 |
125 | License: Apache-2.0
126 |
--------------------------------------------------------------------------------
/pallets/mapped-assets/src/extra_mutator.rs:
--------------------------------------------------------------------------------
1 | // This file is part of Substrate.
2 |
3 | // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
4 | // SPDX-License-Identifier: Apache-2.0
5 |
6 | // Licensed under the Apache License, Version 2.0 (the "License");
7 | // you may not use this file except in compliance with the License.
8 | // You may obtain a copy of the License at
9 | //
10 | // http://www.apache.org/licenses/LICENSE-2.0
11 | //
12 | // Unless required by applicable law or agreed to in writing, software
13 | // distributed under the License is distributed on an "AS IS" BASIS,
14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | // See the License for the specific language governing permissions and
16 | // limitations under the License.
17 |
18 | //! Datatype for easy mutation of the extra "sidecar" data.
19 |
20 | use super::*;
21 |
22 | /// A mutator type allowing inspection and possible modification of the extra "sidecar" data.
23 | ///
24 | /// This may be used as a `Deref` for the pallet's extra data. If mutated (using `DerefMut`), then
25 | /// any uncommitted changes (see `commit` function) will be automatically committed to storage when
26 | /// dropped. Changes, even after committed, may be reverted to their original values with the
27 | /// `revert` function.
28 | pub struct ExtraMutator, I: 'static = ()> {
29 | id: T::AssetId,
30 | who: T::AccountId,
31 | original: T::Extra,
32 | pending: Option,
33 | }
34 |
35 | impl, I: 'static> Drop for ExtraMutator {
36 | fn drop(&mut self) {
37 | debug_assert!(self.commit().is_ok(), "attempt to write to non-existent asset account");
38 | }
39 | }
40 |
41 | impl, I: 'static> sp_std::ops::Deref for ExtraMutator {
42 | type Target = T::Extra;
43 | fn deref(&self) -> &T::Extra {
44 | match self.pending {
45 | Some(ref value) => value,
46 | None => &self.original,
47 | }
48 | }
49 | }
50 |
51 | impl, I: 'static> sp_std::ops::DerefMut for ExtraMutator {
52 | fn deref_mut(&mut self) -> &mut T::Extra {
53 | if self.pending.is_none() {
54 | self.pending = Some(self.original.clone());
55 | }
56 | self.pending.as_mut().unwrap()
57 | }
58 | }
59 |
60 | impl, I: 'static> ExtraMutator {
61 | pub(super) fn maybe_new(
62 | id: T::AssetId,
63 | who: impl sp_std::borrow::Borrow,
64 | ) -> Option> {
65 | if let Some(a) = Account::::get(id, who.borrow()) {
66 | Some(ExtraMutator:: { id, who: who.borrow().clone(), original: a.extra, pending: None })
67 | } else {
68 | None
69 | }
70 | }
71 |
72 | /// Commit any changes to storage.
73 | pub fn commit(&mut self) -> Result<(), ()> {
74 | if let Some(extra) = self.pending.take() {
75 | Account::::try_mutate(self.id, self.who.borrow(), |maybe_account| {
76 | maybe_account.as_mut().ok_or(()).map(|account| account.extra = extra)
77 | })
78 | } else {
79 | Ok(())
80 | }
81 | }
82 |
83 | /// Revert any changes, even those already committed by `self` and drop self.
84 | pub fn revert(mut self) -> Result<(), ()> {
85 | self.pending = None;
86 | Account::::try_mutate(self.id, self.who.borrow(), |maybe_account| {
87 | maybe_account
88 | .as_mut()
89 | .ok_or(())
90 | .map(|account| account.extra = self.original.clone())
91 | })
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/pallets/mapped-assets/src/impl_stored_map.rs:
--------------------------------------------------------------------------------
1 | // This file is part of Substrate.
2 |
3 | // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
4 | // SPDX-License-Identifier: Apache-2.0
5 |
6 | // Licensed under the Apache License, Version 2.0 (the "License");
7 | // you may not use this file except in compliance with the License.
8 | // You may obtain a copy of the License at
9 | //
10 | // http://www.apache.org/licenses/LICENSE-2.0
11 | //
12 | // Unless required by applicable law or agreed to in writing, software
13 | // distributed under the License is distributed on an "AS IS" BASIS,
14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | // See the License for the specific language governing permissions and
16 | // limitations under the License.
17 |
18 | //! Assets pallet's `StoredMap` implementation.
19 |
20 | use super::*;
21 |
22 | impl, I: 'static> StoredMap<(T::AssetId, T::AccountId), T::Extra> for Pallet {
23 | fn get(id_who: &(T::AssetId, T::AccountId)) -> T::Extra {
24 | let &(id, ref who) = id_who;
25 | Account::::get(id, who).map(|a| a.extra).unwrap_or_default()
26 | }
27 |
28 | fn try_mutate_exists>(
29 | id_who: &(T::AssetId, T::AccountId),
30 | f: impl FnOnce(&mut Option) -> Result,
31 | ) -> Result {
32 | let &(id, ref who) = id_who;
33 | let mut maybe_extra = Account::::get(id, who).map(|a| a.extra);
34 | let r = f(&mut maybe_extra)?;
35 | // They want to write some value or delete it.
36 | // If the account existed and they want to write a value, then we write.
37 | // If the account didn't exist and they want to delete it, then we let it pass.
38 | // Otherwise, we fail.
39 | Account::::try_mutate(id, who, |maybe_account| {
40 | if let Some(extra) = maybe_extra {
41 | // They want to write a value. Let this happen only if the account actually exists.
42 | if let Some(ref mut account) = maybe_account {
43 | account.extra = extra;
44 | } else {
45 | return Err(DispatchError::NoProviders.into());
46 | }
47 | } else {
48 | // They want to delete it. Let this pass if the item never existed anyway.
49 | ensure!(maybe_account.is_none(), DispatchError::ConsumerRemaining);
50 | }
51 | Ok(r)
52 | })
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/pallets/rbac/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-rbac"
3 | version = "4.0.0-dev"
4 | description = "FRAME pallet template for defining custom runtime logic."
5 | authors = ["Substrate DevHub "]
6 | homepage = "https://substrate.io/"
7 | edition = "2021"
8 | license = "MIT"
9 | publish = false
10 | repository = "https://github.com/hashed-io/hashed-substrate"
11 |
12 | [package.metadata.docs.rs]
13 | targets = ["x86_64-unknown-linux-gnu"]
14 |
15 | [dependencies]
16 | log = "0.4"
17 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [
18 | "derive",
19 | ] }
20 | scale-info = { version = "2.1.1", default-features = false, features = [
21 | "derive"
22 | ] }
23 | frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
24 | frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
25 | frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false, optional = true }
26 | sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
27 |
28 | [dev-dependencies]
29 | sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
30 | sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", default-features = false }
31 |
32 | [features]
33 | default = ["std"]
34 | std = [
35 | "codec/std",
36 | "scale-info/std",
37 | "frame-support/std",
38 | "frame-system/std",
39 | "frame-benchmarking/std",
40 | ]
41 | runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"]
42 | try-runtime = ["frame-support/try-runtime"]
43 |
--------------------------------------------------------------------------------
/pallets/rbac/src/benchmarking.rs:
--------------------------------------------------------------------------------
1 | //! Benchmarking setup for pallet-template
2 |
3 | use super::*;
4 |
5 | #[allow(unused)]
6 | use crate::Pallet as Template;
7 | use frame_benchmarking::{benchmarks, whitelisted_caller};
8 | use frame_system::RawOrigin;
9 |
10 | benchmarks! {
11 | do_something {
12 | let s in 0 .. 100;
13 | let caller: T::AccountId = whitelisted_caller();
14 | }: _(RawOrigin::Signed(caller), s)
15 | verify {
16 | assert_eq!(Something::::get(), Some(s));
17 | }
18 |
19 | impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
20 | }
21 |
--------------------------------------------------------------------------------
/pallets/rbac/src/mock.rs:
--------------------------------------------------------------------------------
1 | use crate as pallet_rbac;
2 | use frame_support::parameter_types;
3 | use frame_system as system;
4 | use frame_system::EnsureRoot;
5 | use sp_core::H256;
6 | use sp_runtime::{
7 | testing::Header,
8 | traits::{BlakeTwo256, IdentityLookup},
9 | };
10 | type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic;
11 | type Block = frame_system::mocking::MockBlock;
12 |
13 | // Configure a mock runtime to test the pallet.
14 | frame_support::construct_runtime!(
15 | pub enum Test where
16 | Block = Block,
17 | NodeBlock = Block,
18 | UncheckedExtrinsic = UncheckedExtrinsic,
19 | {
20 | System: frame_system::{Pallet, Call, Config, Storage, Event},
21 | RBAC: pallet_rbac::{Pallet, Call, Storage, Event},
22 | }
23 | );
24 |
25 | parameter_types! {
26 | pub const BlockHashCount: u64 = 250;
27 | pub const SS58Prefix: u8 = 42;
28 | }
29 |
30 | impl system::Config for Test {
31 | type BaseCallFilter = frame_support::traits::Everything;
32 | type BlockWeights = ();
33 | type BlockLength = ();
34 | type DbWeight = ();
35 | type RuntimeOrigin = RuntimeOrigin;
36 | type RuntimeCall = RuntimeCall;
37 | type Index = u64;
38 | type BlockNumber = u64;
39 | type Hash = H256;
40 | type Hashing = BlakeTwo256;
41 | type AccountId = u64;
42 | type Lookup = IdentityLookup;
43 | type Header = Header;
44 | type RuntimeEvent = RuntimeEvent;
45 | type BlockHashCount = BlockHashCount;
46 | type Version = ();
47 | type PalletInfo = PalletInfo;
48 | type AccountData = ();
49 | type OnNewAccount = ();
50 | type OnKilledAccount = ();
51 | type SystemWeightInfo = ();
52 | type SS58Prefix = SS58Prefix;
53 | type OnSetCode = ();
54 | type MaxConsumers = frame_support::traits::ConstU32<16>;
55 | }
56 |
57 | parameter_types! {
58 | pub const MaxScopesPerPallet: u32 = 2;
59 | pub const MaxRolesPerPallet: u32 = 3;
60 | pub const RoleMaxLen: u32 = 10;
61 | pub const PermissionMaxLen: u32 = 15;
62 | pub const MaxPermissionsPerRole: u32 = 3;
63 | pub const MaxRolesPerUser: u32 = 2;
64 | pub const MaxUsersPerRole: u32 = 2;
65 | }
66 | impl pallet_rbac::Config for Test {
67 | type RuntimeEvent = RuntimeEvent;
68 | type MaxScopesPerPallet = MaxScopesPerPallet;
69 | type MaxRolesPerPallet = MaxRolesPerPallet;
70 | type RoleMaxLen = RoleMaxLen;
71 | type PermissionMaxLen = PermissionMaxLen;
72 | type MaxPermissionsPerRole = MaxPermissionsPerRole;
73 | type MaxRolesPerUser = MaxRolesPerUser;
74 | type MaxUsersPerRole = MaxUsersPerRole;
75 | type RemoveOrigin = EnsureRoot;
76 | }
77 | // Build genesis storage according to the mock runtime.
78 | pub fn new_test_ext() -> sp_io::TestExternalities {
79 | system::GenesisConfig::default().build_storage::().unwrap().into()
80 | }
81 |
--------------------------------------------------------------------------------
/pallets/rbac/src/types.rs:
--------------------------------------------------------------------------------
1 | //use super::*;
2 | use frame_support::{pallet_prelude::*, sp_io::hashing::blake2_256};
3 | use sp_runtime::sp_std::vec::Vec;
4 |
5 | pub type PalletId = [u8; 32];
6 | pub type RoleId = [u8; 32];
7 | pub type ScopeId = [u8; 32];
8 | pub type PermissionId = [u8; 32];
9 |
10 | #[derive(Encode, Decode, Debug, Clone, Eq, PartialEq, TypeInfo)]
11 | pub enum IdOrVec {
12 | Id([u8; 32]),
13 | Vec(Vec),
14 | }
15 |
16 | impl IdOrVec {
17 | pub fn to_id_enum(&self) -> Self {
18 | match self {
19 | Self::Id(_) => self.clone(),
20 | Self::Vec(_) => Self::Id(Self::to_id(self)),
21 | }
22 | }
23 |
24 | pub fn to_id(&self) -> [u8; 32] {
25 | match self {
26 | Self::Id(id) => *id,
27 | Self::Vec(v) => v.clone().using_encoded(blake2_256),
28 | }
29 | }
30 | }
31 |
32 | pub trait RoleBasedAccessControl {
33 | type MaxRolesPerPallet: Get;
34 | type MaxPermissionsPerRole: Get;
35 | type RoleMaxLen: Get;
36 | type PermissionMaxLen: Get;
37 | // scopes
38 | fn create_scope(pallet: IdOrVec, scope_id: ScopeId) -> DispatchResult;
39 | // scope removal
40 | fn remove_scope(pallet: IdOrVec, scope_id: ScopeId) -> DispatchResult;
41 | // removes all from one pallet/application
42 | fn remove_pallet_storage(pallet: IdOrVec) -> DispatchResult;
43 | // roles creation and setting
44 | fn create_and_set_roles(
45 | pallet: IdOrVec,
46 | roles: Vec>,
47 | ) -> Result, DispatchError>;
48 | fn create_role(role: Vec) -> Result;
49 | fn set_role_to_pallet(pallet: IdOrVec, role_id: RoleId) -> DispatchResult;
50 | fn set_multiple_pallet_roles(pallet: IdOrVec, roles: Vec) -> DispatchResult;
51 | fn assign_role_to_user(
52 | user: AccountId,
53 | pallet: IdOrVec,
54 | scope_id: &ScopeId,
55 | role_id: RoleId,
56 | ) -> DispatchResult;
57 | // role removal
58 | fn remove_role_from_user(
59 | user: AccountId,
60 | pallet: IdOrVec,
61 | scope_id: &ScopeId,
62 | role_id: RoleId,
63 | ) -> DispatchResult;
64 | // permissions
65 | fn create_and_set_permissions(
66 | pallet: IdOrVec,
67 | role: RoleId,
68 | permissions: Vec>,
69 | ) -> Result, DispatchError>;
70 | fn create_permission(
71 | pallet: IdOrVec,
72 | permissions: Vec,
73 | ) -> Result;
74 | fn set_permission_to_role(
75 | pallet: IdOrVec,
76 | role: RoleId,
77 | permission: PermissionId,
78 | ) -> DispatchResult;
79 | fn set_multiple_permissions_to_role(
80 | pallet: IdOrVec,
81 | role: RoleId,
82 | permission: Vec,
83 | ) -> DispatchResult;
84 | fn do_revoke_permission_from_role(
85 | pallet: IdOrVec,
86 | role: RoleId,
87 | permission: PermissionId,
88 | ) -> DispatchResult;
89 | fn do_remove_permission_from_pallet(pallet: IdOrVec, permission: PermissionId) -> DispatchResult;
90 | // helpers
91 | fn is_authorized(
92 | user: AccountId,
93 | pallet: IdOrVec,
94 | scope_id: &ScopeId,
95 | permission_id: &PermissionId,
96 | ) -> DispatchResult;
97 | fn has_role(
98 | user: AccountId,
99 | pallet: IdOrVec,
100 | scope_id: &ScopeId,
101 | role_ids: Vec,
102 | ) -> DispatchResult;
103 | fn scope_exists(pallet: IdOrVec, scope_id: &ScopeId) -> DispatchResult;
104 | fn permission_exists(pallet: IdOrVec, permission_id: &PermissionId) -> DispatchResult;
105 | fn is_role_linked_to_pallet(pallet: IdOrVec, role_id: &RoleId) -> DispatchResult;
106 | fn is_permission_linked_to_role(
107 | pallet: IdOrVec,
108 | role_id: &RoleId,
109 | permission_id: &PermissionId,
110 | ) -> DispatchResult;
111 | fn get_role_users_len(pallet: IdOrVec, scope_id: &ScopeId, role_id: &RoleId) -> usize;
112 | fn to_id(v: Vec) -> [u8; 32];
113 | fn does_user_have_any_role_in_scope(user: AccountId, pallet: IdOrVec, scope_id: &ScopeId)
114 | -> bool;
115 | fn get_roles_by_user(user: AccountId, pallet: IdOrVec, scope_id: &ScopeId) -> Vec;
116 | fn get_roles_that_have_permission(pallet: PalletId, permission_id: &PermissionId) -> Vec;
117 | }
118 |
--------------------------------------------------------------------------------
/pallets/template/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pallet-template"
3 | version = "4.0.0-dev"
4 | description = "FRAME pallet template for defining custom runtime logic."
5 | authors = ["Substrate DevHub "]
6 | homepage = "https://substrate.io/"
7 | edition = "2021"
8 | license = "Unlicense"
9 | publish = false
10 | repository = "https://github.com/substrate-developer-hub/substrate-node-template/"
11 |
12 | [package.metadata.docs.rs]
13 | targets = ["x86_64-unknown-linux-gnu"]
14 |
15 | [dependencies]
16 | codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [
17 | "derive",
18 | ] }
19 | log = { version = "0.4.14", default-features = false }
20 | scale-info = { version = "2.1.1", default-features = false, features = [
21 | "derive"
22 | ] }
23 | frame-support = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
24 | frame-system = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
25 | frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38", optional = true }
26 | sp-std = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
27 |
28 | [dev-dependencies]
29 | sp-core = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
30 | sp-io = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
31 | sp-runtime = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
32 | sp-std = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" }
33 |
34 | [features]
35 | default = ["std"]
36 | std = [
37 | "codec/std",
38 | "scale-info/std",
39 | "frame-support/std",
40 | "frame-system/std",
41 | "frame-benchmarking/std",
42 | "sp-std/std",
43 | ]
44 | runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"]
45 | try-runtime = ["frame-support/try-runtime"]
46 |
--------------------------------------------------------------------------------
/pallets/template/README.md:
--------------------------------------------------------------------------------
1 | License: Unlicense
--------------------------------------------------------------------------------
/pallets/template/src/benchmarking.rs:
--------------------------------------------------------------------------------
1 | //! Benchmarking setup for pallet-template
2 |
3 | use super::*;
4 |
5 | #[allow(unused)]
6 | use crate::Pallet as Template;
7 | use frame_benchmarking::{benchmarks, whitelisted_caller};
8 | use frame_system::RawOrigin;
9 |
10 | benchmarks! {
11 | do_something {
12 | let s in 0 .. 100;
13 | let caller: T::AccountId = whitelisted_caller();
14 | }: _(RawOrigin::Signed(caller), s)
15 | verify {
16 | assert_eq!(Something::::get(), Some(s));
17 | }
18 |
19 | impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
20 | }
21 |
--------------------------------------------------------------------------------
/pallets/template/src/lib.rs:
--------------------------------------------------------------------------------
1 | #![cfg_attr(not(feature = "std"), no_std)]
2 |
3 | /// Edit this file to define custom logic or remove it if it is not needed.
4 | /// Learn more about FRAME and the core library of Substrate FRAME pallets:
5 | ///
6 | pub use pallet::*;
7 |
8 | #[cfg(test)]
9 | mod mock;
10 |
11 | #[cfg(test)]
12 | mod tests;
13 |
14 | #[cfg(feature = "runtime-benchmarks")]
15 | mod benchmarking;
16 |
17 | /// All migrations.
18 | pub mod migrations;
19 |
20 | #[frame_support::pallet]
21 | pub mod pallet {
22 |
23 | use frame_support::pallet_prelude::*;
24 | use frame_system::pallet_prelude::*;
25 |
26 | const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
27 |
28 | /// Configure the pallet by specifying the parameters and types on which it depends.
29 | #[pallet::config]
30 | pub trait Config: frame_system::Config {
31 | /// Because this pallet emits events, it depends on the runtime's definition of an event.
32 | type RuntimeEvent: From> + IsType<::RuntimeEvent>;
33 | }
34 |
35 | #[pallet::pallet]
36 | #[pallet::storage_version(STORAGE_VERSION)]
37 | #[pallet::generate_store(pub(super) trait Store)]
38 | pub struct Pallet(_);
39 |
40 | // The pallet's runtime storage items.
41 | // https://docs.substrate.io/v3/runtime/storage
42 | #[pallet::storage]
43 | #[pallet::getter(fn something)]
44 | // Learn more about declaring storage items:
45 | // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items
46 | pub type Something = StorageValue<_, u32>;
47 |
48 | #[pallet::storage]
49 | #[pallet::getter(fn my_bytes_val)]
50 | pub type MyBytesVal = StorageValue<_, MyBytes, ValueQuery>;
51 |
52 | // Pallets use events to inform users when important changes are made.
53 | // https://docs.substrate.io/v3/runtime/events-and-errors
54 | #[pallet::event]
55 | #[pallet::generate_deposit(pub(super) fn deposit_event)]
56 | pub enum Event {
57 | /// Event documentation should end with an array that provides descriptive names for event
58 | /// parameters. [something, who]
59 | SomethingStored(u32, T::AccountId),
60 | }
61 |
62 | pub type MyBytes = BoundedVec>;
63 |
64 | // Errors inform users that something went wrong.
65 | #[pallet::error]
66 | pub enum Error {
67 | /// Error names should be descriptive.
68 | NoneValue,
69 | /// Errors should have helpful documentation associated with them.
70 | StorageOverflow,
71 | }
72 |
73 | // Dispatchable functions allows users to interact with the pallet and invoke state changes.
74 | // These functions materialize as "extrinsics", which are often compared to transactions.
75 | // Dispatchable functions must be annotated with a weight and must return a DispatchResult.
76 | #[pallet::call]
77 | impl Pallet {
78 | /// An example dispatchable that takes a singles value as a parameter, writes the value to
79 | /// storage and emits an event. This function must be dispatched by a signed extrinsic.
80 | #[pallet::call_index(0)]
81 | #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().writes(1))]
82 | pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult {
83 | // Check that the extrinsic was signed and get the signer.
84 | // This function will return an error if the extrinsic is not signed.
85 | // https://docs.substrate.io/v3/runtime/origins
86 | let who = ensure_signed(origin)?;
87 |
88 | // Update storage.
89 | >::put(something);
90 |
91 | // Emit an event.
92 | Self::deposit_event(Event::SomethingStored(something, who));
93 | // Return a successful DispatchResultWithPostInfo
94 | Ok(())
95 | }
96 |
97 | /// An example dispatchable that may throw a custom error.
98 | #[pallet::call_index(1)]
99 | #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().reads_writes(1,1))]
100 | pub fn cause_error(origin: OriginFor) -> DispatchResult {
101 | let _who = ensure_signed(origin)?;
102 |
103 | // Read a value from storage.
104 | match >::get() {
105 | // Return an error if the value has not been set.
106 | None => Err(Error::::NoneValue)?,
107 | Some(old) => {
108 | // Increment the value read from storage; will error in the event of overflow.
109 | let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?;
110 | // Update the value in storage with the incremented result.
111 | >::put(new);
112 | Ok(())
113 | },
114 | }
115 | }
116 |
117 | #[pallet::call_index(2)]
118 | #[pallet::weight(Weight::from_ref_time(10_000) + T::DbWeight::get().writes(1))]
119 | pub fn insert_my_bytes(
120 | origin: OriginFor,
121 | optional_bytes: Option,
122 | ) -> DispatchResult {
123 | // Check that the extrinsic was signed and get the signer.
124 | // This function will return an error if the extrinsic is not signed.
125 | // https://docs.substrate.io/v3/runtime/origins
126 | let _ = ensure_signed(origin)?;
127 |
128 | // Update storage.
129 | let s = optional_bytes.unwrap_or_default();
130 |