├── .gitignore ├── README.md ├── docs ├── en │ ├── resources │ │ ├── epoch.png │ │ ├── arch_overlord.png │ │ ├── block_process.png │ │ ├── mempool_process.png │ │ └── state_transition.png │ ├── index.md │ ├── getting_started.md │ ├── transaction_pool.md │ └── overlord.md └── zh │ ├── resources │ ├── epoch.png │ ├── arch_overlord.png │ ├── block_process.png │ ├── mempool_process.png │ └── state_transition.png │ ├── index.md │ ├── metadata_service.md │ ├── vm_lang.md │ ├── overview.md │ ├── overlord_data_structure.md │ ├── riscv_service.md │ ├── network.md │ ├── transaction_pool.md │ ├── toolchain_minits_on_ckbvm.md │ ├── getting_started.md │ ├── node_manager_service.md │ ├── asset-service.md │ └── overlord.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | _book 2 | .vscode 3 | *.pdf 4 | *.patch 5 | *.sh -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # muta-docs 2 | This repository contains documentations related to Muta 3 | -------------------------------------------------------------------------------- /docs/en/resources/epoch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/en/resources/epoch.png -------------------------------------------------------------------------------- /docs/zh/resources/epoch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/zh/resources/epoch.png -------------------------------------------------------------------------------- /docs/en/resources/arch_overlord.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/en/resources/arch_overlord.png -------------------------------------------------------------------------------- /docs/en/resources/block_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/en/resources/block_process.png -------------------------------------------------------------------------------- /docs/zh/resources/arch_overlord.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/zh/resources/arch_overlord.png -------------------------------------------------------------------------------- /docs/zh/resources/block_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/zh/resources/block_process.png -------------------------------------------------------------------------------- /docs/en/resources/mempool_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/en/resources/mempool_process.png -------------------------------------------------------------------------------- /docs/en/resources/state_transition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/en/resources/state_transition.png -------------------------------------------------------------------------------- /docs/zh/resources/mempool_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/zh/resources/mempool_process.png -------------------------------------------------------------------------------- /docs/zh/resources/state_transition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lite/muta-docs/master/docs/zh/resources/state_transition.png -------------------------------------------------------------------------------- /docs/en/index.md: -------------------------------------------------------------------------------- 1 | # Muta documentation 2 | 3 | - [Getting Started](./getting_started.md) 4 | - Module Design 5 | - [Transaction Pool](./transaction_pool.md) 6 | - [Overlord Consensus](./overlord.md) 7 | -------------------------------------------------------------------------------- /docs/zh/index.md: -------------------------------------------------------------------------------- 1 | # Muta 文档 2 | 3 | - [概览](./overview.md) 4 | - [快速开始](./getting_started.md) 5 | - 模块设计 6 | - [交易池](./transaction_pool.md) 7 | - [Overlord 共识](./overlord.md) 8 | - [合约语言](./vm_lang.md) 9 | - [网络](./network.md) 10 | - 合约开发 11 | - [在 CKB-VM 上运行 TypeScript](./toolchain_minits_on_ckbvm.md) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Nervos Network 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/zh/metadata_service.md: -------------------------------------------------------------------------------- 1 | # Metadata Service 2 | Metadata Service 负责存储链的元数据信息,包括: 3 | 4 | ```rust 5 | pub struct Metadata { 6 | pub chain_id: Hash, 7 | pub common_ref: String, // BLS 签名算法的公共参数 8 | pub timeout_gap: u64, // (交易有效期 - 当前区块高度)的最大值 9 | pub cycles_limit: u64, // 区块全部交易消耗的 cycles 上限 10 | pub cycles_price: u64, // 节点设置的交易打包进区块的最小 cycles_price 11 | pub interval: u64, // 区块产生间隔 12 | pub verifier_list: Vec, // 共识验证人列表 13 | pub propose_ratio: u64, // 共识 propose 阶段的超时时间与 interval 的比值 14 | pub prevote_ratio: u64, // 共识 prevote 阶段的超时时间与 interval 的比值 15 | pub precommit_ratio: u64, // 共识 precommit 阶段的超时时间与 interval 的比值 16 | } 17 | 18 | pub struct Validator { 19 | pub address: Address, 20 | pub propose_weight: u32, //出块权重 21 | pub vote_weight: u32, // 投票权重 22 | } 23 | ``` 24 | 通过 Metadata Service 可以读取这些信息,接口如下: 25 | 26 | ## 接口 27 | ### 读取链元数据信息 28 | 29 | ```rust 30 | fn get_metadata(&self, ctx: ServiceContext) -> ProtocolResult; 31 | 32 | // Example: graphiql send tx 33 | query get_admin{ 34 | queryService( 35 | caller: "016cbd9ee47a255a6f68882918dcdd9e14e6bee1" 36 | serviceName: "metadata" 37 | method: "get_metadata" 38 | payload: "" 39 | ){ 40 | ret, 41 | isError 42 | } 43 | } 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/zh/vm_lang.md: -------------------------------------------------------------------------------- 1 | # 合约编程 2 | 3 | 由于合约虚拟机使用了 RISCV 指令集, 因此任何可以编译到 RISCV 的编程语言都可以作为合约的编程语言. 虚拟机使用的是 rv64imc 架构, 它基于 RV64I ISA 核心, 具有 M 标准扩展用于整数乘法和除法, 以及 C 标准扩展用于 RCV(RISC-V压缩指令). 注意的是, 虚拟机不支持浮点指令. 4 | 5 | 合约开发者可以根据自己的爱好来选择合约开发语言, 或者使用我们专门为 RISCV 虚拟机编写的编程语言: minits. minits 是一个以 LLVM 为后端的 TypeScript 静态编译器, 它可以将 TypeScript 代码编译为 RISCV 指令, 它拥有 TypeScript 的对开发者友好的语法, 同时拥有不亚于 C 语言的执行性能. 可访问 [https://github.com/cryptape/minits](https://github.com/cryptape/minits) 来获取关于 minits 更多的信息. 6 | 7 | # Minits 最小运行模型 8 | 9 | 任何合约都已一个 main 函数作为其入口函数, 同时返回一个退出码. 如果退出码非 0, 则意味着合约调用失败. 10 | 11 | ```ts 12 | function main(argc: number, argv: string[]): number { 13 | return 0 14 | } 15 | ``` 16 | 17 | # Example 18 | 19 | 下面的代码是一个 SimpleStorage 合约的例子. 该合约允许使用者存储或读取一对 K/V 值. 我们未来会将 syscall, set_storage, get_storage 这些函数以一个 SDK 形式对外提供, 但目前直接写在合约代码中更有利于开发者理解. syscall 是一个特殊的函数, 它允许合约与链上数据进行交互, 比如查询当前链高度, 获取某个账号的余额等. 20 | 21 | ```ts 22 | // A simplestorage contract for blockchain. 23 | 24 | const STORAGE_SET = 2180; 25 | const STORAGE_GET = 2181; 26 | const RET = 2182; 27 | 28 | function syscall(n: number, a: any, b: any, c: any, d: any, e: any, f: any): number { 29 | return 0; 30 | } 31 | 32 | function set_storage(k: string, v: string): number { 33 | return syscall(STORAGE_SET, k, v, 0, 0, 0, 0); 34 | } 35 | 36 | function get_storage(k: string): string { 37 | let v = ""; 38 | syscall(STORAGE_GET, k, v, 0, 0, 0, 0); 39 | return v 40 | } 41 | 42 | function ret(d: string): number { 43 | return syscall(RET, d, 0, 0, 0, 0, 0); 44 | } 45 | 46 | function main(argc: number, argv: string[]): number { 47 | if (argc == 1) { 48 | return 1; 49 | } 50 | switch (argv[1]) { 51 | case "get": 52 | const v = get_storage(argv[2]); 53 | ret(v); 54 | return 0; 55 | case "set": 56 | set_storage(argv[2], argv[3]); 57 | return 0; 58 | default: 59 | return 1; 60 | } 61 | } 62 | ``` -------------------------------------------------------------------------------- /docs/zh/overview.md: -------------------------------------------------------------------------------- 1 | # 概览 2 | 3 | - [概览](#%e6%a6%82%e8%a7%88) 4 | - [介绍](#%e4%bb%8b%e7%bb%8d) 5 | - [什么是区块链框架](#%e4%bb%80%e4%b9%88%e6%98%af%e5%8c%ba%e5%9d%97%e9%93%be%e6%a1%86%e6%9e%b6) 6 | - [Muta 的特点](#muta-%e7%9a%84%e7%89%b9%e7%82%b9) 7 | - [高性能](#%e9%ab%98%e6%80%a7%e8%83%bd) 8 | - [高吞吐量的共识算法](#%e9%ab%98%e5%90%9e%e5%90%90%e9%87%8f%e7%9a%84%e5%85%b1%e8%af%86%e7%ae%97%e6%b3%95) 9 | - [CKB-VM 上的 Account 编程模型](#ckb-vm-%e4%b8%8a%e7%9a%84-account-%e7%bc%96%e7%a8%8b%e6%a8%a1%e5%9e%8b) 10 | - [First-class Asset](#first-class-asset) 11 | 12 | ## 介绍 13 | 14 | Muta 是一个由 Rust 编写的具备可扩展性的高性能区块链框架。它允许你使用 Rust 或 Typescript 编写你的业务逻辑,构建你的专有区块链。 15 | 16 | 同时,Muta 还是 Nervos layer2 解决方案 Axon 的底层基础设施,Muta 将内置一套跨链方案联通整个 Nervos 网络。 17 | 18 | ## 什么是区块链框架 19 | 20 | 有过互联网开发经验的或多或少都会使用一些 Web 框架(eg. Express, Ruby on Rails),这些框架融合了 HTTP 协议解析、URL 解析、请求路由、中间件等必要功能,使用者只需要专注于每个 API 对应的业务逻辑即可。 21 | 22 | 区块链框架也一样,在区块链中 **智能合约+链上治理** 即为区块链中的业务逻辑,框架在底层提供了区块链运行的必要模块(eg. 共识算法、虚拟机、P2P)并且提供了稳定性、高性能的保证。 23 | 24 | ## Muta 的特点 25 | 26 | ### 高性能 27 | 28 | Muta 的目标是达到每秒处理数千个事务(TPS)。在目前的 benchmark 环境中,Muta 的每秒处理事务(TPS)数大约在 2000 左右,出块间隔在 2.5s。 29 | 30 | ### 高吞吐量的共识算法 31 | 32 | [Overlord][overlord] 是由 Nervos 研究团队设计研发的 BFT 类共识算法,其设计目标是成为能够支持上百个共识节点,满足数千笔每秒的交易处理能力,且交易延迟不超过数秒。Overlord 的核心思想是解耦交易定序与状态共识,从而实现共识和执行完全并行,极大提高整条链的交易吞吐量。 33 | 34 | ### CKB-VM 上的 Account 编程模型 35 | 36 | [CKB-VM][ckb-vm] 是一个实现了 RISCV 指令集的区块链虚拟机,具有高性能,可扩展性,灵活性等优点。 37 | 38 | 在 Muta 中,智能合约的编程模型采用的是 Account 模型,相比 UTXO,Account 模型更便于开发复杂逻辑的智能合约。 Account 模型最早是由以太坊所采用的一种智能合约编程模型,并且许多关于操作 Account 的指令都内嵌到了以太坊虚拟机 EVM 中, 39 | 40 | 得益于 [CKB-VM][ckb-vm] 的灵活性和可扩展性,在不侵入指令集修改的前提下,我们在 [CKB-VM][ckb-vm] 之上实现了一套 Account SDK 以实现 Muta 中的 Account 模型, 不仅如此,我们还提供了合约编程语言 [Minits][minits], [Minits][minits] 是一个专为区块链智能合约开发设计的 Typescript 的子集,它使用 LLVM 最终把代码编译成 RISCV 在 [CKB-VM][ckb-vm] 中运行。 41 | 42 | ### First-class Asset 43 | 44 | 在以太坊等智能合约平台中,用户自定义代币(User Defined Token, UDT)通常以标准智能合约形式出现。平台对某合约记录的是资产还是普通数据没有区分。这样带来了安全性、通用性和复杂性等多重风险。 45 | 46 | Libra 使用的 Move 以及 Nervos CKB 提出了区块链中一等公民(first-class citizen)的概念。在这个概念中资产,或者所属权成为了系统可直接识别的数据,而不是和其他数据糅合在一起对系统保持透明。 47 | 48 | Muta 对原生代币和 UDT 设置了一等公民的地位,我们称之为 first-class asset(简称 FCA)。所有代币的基础行为均由系统提供的原生合约实现,用户只需要给出代币的名称、发行量、管理方式等定义即可创建一等资产。 49 | 50 | 这样做的优势除了在于大幅降低实现复杂度、统一资产标准、提高安全性之外,更重要的是系统对代币行为可感知,从而便于实现更底层的经济激励、手续费计算逻辑以及原生跨链等业务。 51 | 52 | [关于 First-class Asset 的更多讨论][first-class asset] 53 | 54 | [overlord]: https://github.com/cryptape/overlord 55 | [ckb-vm]: https://github.com/nervosnetwork/ckb-vm 56 | [minits]: https://github.com/cryptape/minits 57 | [first-class asset]: https://talk.nervos.org/t/first-class-asset/405 58 | -------------------------------------------------------------------------------- /docs/zh/overlord_data_structure.md: -------------------------------------------------------------------------------- 1 | # Overlord 数据结构 2 | 3 | ## 类型 4 | 5 | ```rust 6 | type Address = Vec; 7 | 8 | type Signature = Vec; 9 | 10 | type Hash = Vec; 11 | ``` 12 | 13 | ## 枚举 14 | 15 | ```rust 16 | pub enum Role { 17 | Leader = 0, 18 | Replica = 1, 19 | } 20 | 21 | pub enum VoteType { 22 | Prevote = 0, 23 | Precommit = 1, 24 | } 25 | 26 | pub enum OutputMsg { 27 | SignedProposal(SignedProposal), 28 | SignedVote(SignedVote), 29 | AggregatedVote(AggregatedVote), 30 | } 31 | ``` 32 | 33 | ## Proposal 34 | 35 | ```rust 36 | pub struct SignedProposal { 37 | pub signature: Signature, 38 | pub proposal: Proposal, 39 | } 40 | 41 | pub struct Proposal { 42 | pub epoch: u64, 43 | pub round: u64, 44 | pub content: T, 45 | pub lock_round: Option, 46 | pub lock_votes: Vec>, 47 | pub proposer: Address, 48 | } 49 | ``` 50 | 51 | ## Vote 52 | 53 | ```rust 54 | pub struct SignedVote { 55 | pub signature: Signature, 56 | pub vote: Vote, 57 | } 58 | 59 | pub struct AggregatedVote { 60 | pub signature: AggregatedSignature, 61 | pub type: VoteType, 62 | pub epoch: u64, 63 | pub round: u64, 64 | pub proposal: Hash, 65 | } 66 | 67 | pub struct Vote { 68 | pub type: VoteType, 69 | pub epoch: u64, 70 | pub round: u64, 71 | pub proposal: Hash, 72 | pub voter: Address, 73 | } 74 | ``` 75 | 76 | ## Commit 77 | 78 | ```rust 79 | pub struct Commit { 80 | pub epoch: u64, 81 | pub proposal: T, 82 | pub proof: Proof, 83 | } 84 | ``` 85 | 86 | ## AggregatedSignature 87 | 88 | ```rust 89 | pub struct AggregatedSignature { 90 | pub signature: Signature, 91 | pub address_bitmap: Vec, 92 | } 93 | ``` 94 | 95 | ## Proof 96 | 97 | ```rust 98 | pub struct Proof { 99 | pub epoch: u64, 100 | pub round: u64, 101 | pub proposal_hash: Hash, 102 | pub signature: AggregatedSignature, 103 | } 104 | ``` 105 | 106 | ## Node 107 | 108 | ```rust 109 | pub struct Node { 110 | pub address: Address, 111 | pub proposal_weight: usize, 112 | pub vote_weight: usize, 113 | } 114 | ``` 115 | 116 | ## Status 117 | 118 | ```rust 119 | pub Status { 120 | pub epoch: u64, 121 | pub interval: u64, 122 | pub authority_list: Vec, 123 | } 124 | ``` 125 | 126 | ## VerifyResp 127 | 128 | ```rust 129 | pub(crate) struct VerifyResp { 130 | pub(crate) proposal_hash: Hash, 131 | pub(crate) is_pass: bool, 132 | } 133 | ``` 134 | 135 | ## Feed 136 | 137 | ```rust 138 | pub(crate) struct Feed { 139 | pub(crate) epoch: u64, 140 | pub(crate) proposal: T, 141 | pub(crate) hash: Hash, 142 | } 143 | ``` 144 | -------------------------------------------------------------------------------- /docs/zh/riscv_service.md: -------------------------------------------------------------------------------- 1 | # RISC-V Service 2 | 3 | ## 概述 4 | 5 | RISC-V service 是一个基于 [`CKB-VM`](https://github.com/nervosnetwork/ckb-vm) 开发的虚拟机服务组件。 6 | 7 | 该组件内置了一个 [RISC-V](https://riscv.org/) 指令集解释器作为虚拟机。通过该组件,用户可以自由的部署和调用合约,实现强大的自定义功能。 8 | 任何支持 [RV64I]((https://riscv.org/specifications/)) 的编译器 (如 [riscv-gcc](https://github.com/riscv/riscv-gcc), [riscv-llvm](https://github.com/lowRISC/riscv-llvm), [Rust](https://github.com/rust-embedded/wg/issues/218)) 生成的可执行文件均可以作为合约使用。 9 | 10 | 想要了解跟多 CKB-VM 的信息,可以参考 [CKB RFC](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0003-ckb-vm/0003-ckb-vm.zh.md)。 11 | 12 | ## RISC-V 运行模型 13 | 14 | RISC-V service 使用 64 位的 RISC-V 虚拟机作为 VM 来执行合约。合约直接使用 Linux 的 ELF 可执行文件格式,合约的运行等同于 Linux 环境下一个可执行文件在单核 CPU 下的运行。 15 | 16 | ```c 17 | #include 18 | 19 | int main() { 20 | char args[100] = {0}; 21 | uint64_t args_len = 0; 22 | pvm_load_args(args, &args_len); 23 | 24 | // your contract logics here 25 | 26 | char result[] = "contract_execute_result"; 27 | pvm_ret(result, strlen(result)); 28 | 29 | return 0; 30 | } 31 | ``` 32 | 33 | 合约运行从合约 ELF 文件中的 main 函数开始执行。当 main 函数返回值为 0 时,认为合约执行成功,否则合约执行失败。 34 | 35 | `pvm.h` 中提供了一些合约执行的辅助函数,包括获取函数执行参数、获取交易上下文、操作合约数据等。 36 | - `pvm_load_args` 从交易中获取执行参数,`pvm_ret` 返回执行结果 37 | - 部分辅助函数可以获取交易上下文。例如 `pvm_block_height` 可以获取当前块高度。 38 | - 每个合约有自己独立的状态空间,相当于一个 kv 数据库,可以存储任意的 bytes。用户可以通过 `pvm_get_storage` 和 `pvm_set_storage` 来操作合约的状态。 39 | 40 | CKB-VM 仅为单线程模型,合约文件可以自行提供 coroutine 实现,但是在 VM 层不提供 threading。 41 | 42 | ## 开发语言 43 | 44 | 理论上任何提供了 RISC-V 后端的语言均可以用来开发合约: 45 | 46 | - 可以直接使用标准的 riscv-gcc 以及 riscv-llvm 以 C/C++ 语言来进行开发,编译后的可执行文件直接作为合约来使用。这是目前最成熟的方案,也是我们推荐使用的方案。文档后续的内容和示例均会用这种方法进行合约开发。 47 | - 其他的高级语言 VM 如 duktape 及 mruby 在编译后,也可以用来相应的运行 JavaScript 或者 Ruby 编写的合约。我们在 dev 和 test 环境提供了 duktape 的内置支持,用户可以用 JavaScript 快速编写合约,进行原型开发和 PoC 验证。此方案虚拟机执行开销较大,不建议在生产环境使用。 48 | - 相应的也可以使用 Rust 作为实现语言来编写合约 49 | 50 | ## 示例 51 | 52 | 在 service 源代码的 example 和 test 文件夹中有大量的参考示例。本文档也提供了一个合约开发的教程,请读者自行参阅相关章节。 53 | 54 | ## 接口 55 | 56 | ### 部署合约 57 | 58 | ```rust 59 | pub enum InterpreterType { 60 | Binary = 1, 61 | #[cfg(debug_assertions)] 62 | Duktape = 2, 63 | } 64 | 65 | pub struct DeployPayload { 66 | pub code: String, 67 | pub intp_type: InterpreterType, 68 | pub init_args: String, 69 | } 70 | 71 | pub struct DeployResp { 72 | pub address: Address, 73 | pub init_ret: String, 74 | } 75 | ``` 76 | 77 | - 参数 78 | - code:合约代码,使用 hex 编码 79 | - intp_type:生产环境目前仅支持 `Binary`,即 ELF 二进制文件格式,dev 和 test 环境可以使用 `Duktape`,即使用 js 代码作为合约代码 80 | - init_args:初始化参数 81 | - 返回值 82 | - address:合约地址 83 | - init_ret:初始化函数调用返回值 84 | 85 | ### 调用合约 86 | 87 | ```rust 88 | pub struct ExecPayload { 89 | pub address: Address, 90 | pub args: String, 91 | } 92 | ``` 93 | 94 | - 参数 95 | - address:调用的合约地址 96 | - args:合约调用参数 97 | - 返回值:为合约返回的字符串 -------------------------------------------------------------------------------- /docs/zh/network.md: -------------------------------------------------------------------------------- 1 | ## 网络设计 2 | 3 | - [网络设计](#%e7%bd%91%e7%bb%9c%e8%ae%be%e8%ae%a1) 4 | - [当前目标](#%e5%bd%93%e5%89%8d%e7%9b%ae%e6%a0%87) 5 | - [消息收发](#%e6%b6%88%e6%81%af%e6%94%b6%e5%8f%91) 6 | - [节点消息端 (Endpoint)](#%e8%8a%82%e7%82%b9%e6%b6%88%e6%81%af%e7%ab%af-endpoint) 7 | - [Gossip](#gossip) 8 | - [RPC Call](#rpc-call) 9 | - [RPC Response](#rpc-response) 10 | - [消息序列化](#%e6%b6%88%e6%81%af%e5%ba%8f%e5%88%97%e5%8c%96) 11 | - [消息处理](#%e6%b6%88%e6%81%af%e5%a4%84%e7%90%86) 12 | - [消息处理逻辑注册](#%e6%b6%88%e6%81%af%e5%a4%84%e7%90%86%e9%80%bb%e8%be%91%e6%b3%a8%e5%86%8c) 13 | - [消息的发送](#%e6%b6%88%e6%81%af%e7%9a%84%e5%8f%91%e9%80%81) 14 | 15 | ### 当前目标 16 | 17 | 基于 [tentacle crate](https://github.com/nervosnetwork/p2p) 实现一个简单的可工作的 P2P 网络,主要功能如下: 18 | 19 | - 节点身份 20 | 21 | - PeerID: secp256k1 的公钥派生出的 ID,[tentacle-secio](https://crates.io/crates/tentacle-secio) 22 | - Address: MultiAddress [REF](https://multiformats.io/multiaddr),只支持 TCP 23 | 24 | - 节点发现 25 | 26 | - bootstrap,[tentacle-discovery](https://crates.io/crates/tentacle-discovery) 27 | 28 | - 节点质量维护 29 | 30 | - ping,[tentacle-ping](https://crates.io/crates/tentacle-ping),超时断开 31 | 32 | - 节点持久化 33 | 34 | - 基于文件的简易持久化,服务退出时,将保存节点信息,默认关闭 35 | 36 | - 消息广播以及单播 37 | 38 | - 基础的广播服务,以及基于 secp256k1 公钥地址的单播 39 | 40 | - 消息加密传输 41 | 42 | - 基于 [tentacle-secio](https://crates.io/crates/tentacle-secio) 43 | 44 | - 其他 45 | 46 | - 消息优先级: 使用 tentacle 自带的消息发送优先级,目前只有两种,High 和 Normal 47 | - 消息压缩: 使用 snappy 48 | - 消息处理: 基于 handler 注册形式,由各个模块自定义接受消息处理逻辑 49 | 50 | ### 消息收发 51 | 52 | #### 节点消息端 (Endpoint) 53 | 54 | 节点通过注册消息端地址对外暴露服务,实现消息接受及处理。目前提供三种类型的地址: 55 | 56 | ##### Gossip 57 | 58 | ```text 59 | /gossip/[service_name]/[message_name] 60 | ``` 61 | 62 | 消息单向广播以及单播 63 | 64 | ##### RPC Call 65 | 66 | ```text 67 | /rpc_call/[service_name]/[message_name] 68 | ``` 69 | 70 | ##### RPC Response 71 | 72 | ```text 73 | /rpc_resp/[service_name]/[message_name] 74 | ``` 75 | 76 | RPC 用于节点之间的消息交互通信,RPC Call 发送请求,RPC Response 返回。 77 | 78 | #### 消息序列化 79 | 80 | 序列化采用 protobuf ,消息需要实现 MessageCodec trait 。 81 | 82 | ```rust 83 | #[async_trait] 84 | pub trait MessageCodec: Sized + Send + Debug + 'static { 85 | async fn encode(&mut self) -> ProtocolResult; 86 | 87 | async fn decode(bytes: Bytes) -> ProtocolResult; 88 | } 89 | ``` 90 | 91 | 目前针对实现了 serde Serialize 和 Deserialize trait 的消息自动实现了 MessageCodec , 92 | 采用 bincode 作为中间序列化过渡。 93 | 94 | #### 消息处理 95 | 96 | 消息处理需要实现 MessageHandler trait 97 | 98 | ```rust 99 | #[async_trait] 100 | pub trait MessageHandler: Sync + Send + 'static { 101 | type Message: MessageCodec; 102 | 103 | async fn process(&self, ctx: Context, msg: Self::Message) -> ProtocolResult<()>; 104 | } 105 | ``` 106 | 107 | #### 消息处理逻辑注册 108 | 109 | 完成上述实现之后,可通过如下接口,完成消息逻辑处理的注册。 110 | 111 | ```rust 112 | pub fn register_endpoint_handler( 113 | &mut self, 114 | end: &str, 115 | handler: Box>, 116 | ) -> ProtocolResult<()> 117 | where 118 | M: MessageCodec; 119 | 120 | pub fn register_rpc_response(&mut self, end: &str) -> ProtocolResult<()> 121 | where 122 | M: MessageCodec; 123 | ``` 124 | 125 | `Gossip` 和 `RPC Call` 都需要通过 `register_endpoint_handler` 完成注册, 126 | 而 `RPC Response` 需要通过 `register_rpc_response` 完成注册。 127 | 128 | 未来计划将 `RPC Response` 注册去掉。 129 | 130 | `end` 即签名提到的节点消息端 `Endpoint` 缩写。 131 | 132 | #### 消息的发送 133 | 134 | ```rust 135 | #[async_trait] 136 | pub trait Gossip: Send + Sync { 137 | async fn broadcast(&self, cx: Context, end: &str, msg: M, p: Priority) -> ProtocolResult<()> 138 | where 139 | M: MessageCodec; 140 | 141 | async fn users_cast( 142 | &self, 143 | cx: Context, 144 | end: &str, 145 | users: Vec, 146 | msg: M, 147 | p: Priority, 148 | ) -> ProtocolResult<()> 149 | where 150 | M: MessageCodec; 151 | } 152 | 153 | #[async_trait] 154 | pub trait Rpc: Send + Sync { 155 | async fn call(&self, ctx: Context, end: &str, msg: M, pri: Priority) -> ProtocolResult 156 | where 157 | M: MessageCodec, 158 | R: MessageCodec; 159 | 160 | async fn response(&self, cx: Context, end: &str, msg: M, p: Priority) -> ProtocolResult<()> 161 | where 162 | M: MessageCodec; 163 | } 164 | ``` 165 | 166 | 如上述定义,网路服务实例化后,可通过调用 `handle()` 获取一个网络服务引用,该 167 | `handle` 实现了上述的接口,同时实现了 `Clone`。各模块可以通过它来完成消息的 168 | 发送。 169 | 170 | 注意:`UserAddress` 目前同 `tentacle-secio` 提供的 secp256k1 公钥绑定。 171 | -------------------------------------------------------------------------------- /docs/zh/transaction_pool.md: -------------------------------------------------------------------------------- 1 | # Mempool 2 | 3 | - [Mempool](#mempool) 4 | - [设计要求](#%e8%ae%be%e8%ae%a1%e8%a6%81%e6%b1%82) 5 | - [解决方案](#%e8%a7%a3%e5%86%b3%e6%96%b9%e6%a1%88) 6 | - [要求1](#%e8%a6%81%e6%b1%821) 7 | - [要求2](#%e8%a6%81%e6%b1%822) 8 | - [要求3](#%e8%a6%81%e6%b1%823) 9 | - [具体设计](#%e5%85%b7%e4%bd%93%e8%ae%be%e8%ae%a1) 10 | 11 | ## 设计要求 12 | 13 | Mempool 是节点负责收集新交易以及打包新交易给共识模块进行共识的功能模块。很自然地,我们对 Mempool 会提出一些要求: 14 | 15 | 1. 性能优秀,在普通计算设备中运行即可达到每秒插入 10000+ 笔交易的性能要求。 16 | 2. 公平性,按照收到交易的顺序打包交易。 17 | 18 | 此外,为了配合共识过程与交易同步过程并发的设计,还有第三个要求: 19 | 20 | 3. 打包给共识的交易包含两部分:用于共识的 order 交易以及用于同步的 propose 交易。 21 | 22 | ## 解决方案 23 | 24 | ### 要求1 25 | 26 | 要获得优秀的性能,首先要分析交易插入的过程,找到性能瓶颈之处对症解决。一笔交易插入 Mempool 的过程包括: 27 | 1. 检查交易池是否已满,以避免内存溢出。 28 | 2. 检查交易是否已经被交易池包含,以避免重复插入。 29 | 3. 检查交易的签名是否正确,格式是否合规,以避免插入明显错误的交易。 30 | 4. 检查交易是否已经上链,以避免共识已上链的交易。 31 | 32 | 步骤 1, 2 的检查非常快,不是性能瓶颈。 33 | 34 | 步骤 3 涉及验签,是耗时操作,好在验签是一个独立的计算密集型操作,很适合用高并发来充分挖掘 CPU 性能,以达到提高性能的要求。 35 | 36 | 随着区块链的不断增长,历史交易数据日益庞大,步骤 4 的查询将会成为性能黑洞。我们通过在交易中设置一个必填的 timeout 字段,以及一个全局约束参数 g 来解决该问题。 37 | 38 | 具体来说,当某笔交易的 timeout 为 t,若该交易在高度 t 仍未被打包,则会被视为失效而被节点抛弃。为了避免用户设置过高的 timeout,若最新高度为 h 的节点收到的交易的 timeout t > h + g,同样会被节点视为非法而抛弃。在这种机制的约束下,最新高度为 h 的节点仅需保存高度区间为 [h - g, h] 的历史交易用于查重,查重的计算复杂度和存储复杂度均降到了O(g),与历史交易总量无关。 39 | 40 | ### 要求2 41 | 42 | 在交易优先级相同的情况下,如果节点后收到的交易却先被打包,这显然有违公平性。因此,交易池的交易必须按照先入先出的原则进行打包。 43 | 44 | 然而,如果按照以太坊的 nonce 单调递增的设计(交易中的 nonce 字段的设计是为了确保交易的唯一性),若交易池同时包含多笔同一用户发出的交易,则这些交易之间还需要满足偏序关系,这会给打包机制带来非常大的复杂性。因此,我们采用随机 nonce 的方式生成唯一交易,这种设计还会带来其他一些额外的好处,比如获得了更好的并发执行能力(例如同一个用户发出的多笔交易被同一个区块打包,在以太坊中这些交易必须顺序执行,而采用随机 nonce 后,我们就可以并发执行这些交易),简化钱包的设计(在以太坊中,钱包需要同步最新的 nonce,以避免发出重复的交易,而在我们的设计中就没有这样的要求)。 45 | 46 | 总之,强制要求一个用户的所有交易保持偏序是没有必要且低效的,如果某些交易之间存在某种依赖关系,我们可以使用 ref 字段来表示这种关系,以此获得比以太坊更通用的依赖表达,比如用于表示不同用户之间交易的依赖关系。并且我们的顺序打包方案可以很容易地扩展到满足这种依赖需求。 47 | 48 | ### 要求3 49 | 50 | 由于区块链是一个分布式系统,不同节点的交易池所包含的交易集合不会完全相同。共识过程与交易同步打包的核心思想是,在交易池中包含的交易很多,无法被一次共识完成的情况下(受限于 cycle_limit,类似以太坊的 gas_limit),未参与共识的交易的同步过程可以与共识过程并发进行。通过这样的设计,在下一个高度的共识开始的时候,参与共识的交易的同步过程已经提前一个高度开始了,共识效率因此得到了提升。 51 | 52 | 具体来说,就是交易池打包的时候,在 order 交易满了之后,继续打包交易作为 propose 交易。在共识的时候,leader 节点发出的提案中包含了 order 交易 和 propose 交易(提案中包含的实际上都是交易哈希,在共识过程中,我们采用的是 compact block 的设计)。order 交易参与共识,而 propose 交易开始同步。 53 | 54 | 注:compact block 设计,leader 发送的 proposal 中仅包含交易哈希,收到 proposal 的节点检查交易哈希是否在本地 Mempool 中,如果没有则向 leader 请求缺失的完整交易。通过 compact block 的设计,可以减少交易传输量,提高带宽利用率。 55 | 56 | ## 具体设计 57 | 58 | 根据以上分析,我们需要的是一个可以支持高并发插入交易,遵循先入先出原则打包交易,打包有不同用途的两类交易的 Mempool。 59 | 60 | 为了满足以上要求,我们用 Map 和 Queue 结构共享存储交易数据,Map 可快速查询和删除,而 Queue 满足先入先出的打包要求。事实上,我们用了两个 queue,就像两个杯子交替倒牛奶。Mempool 的核心数据结构如下: 61 | 62 | ```rust 63 | struct TxCache { 64 | /// 用 queue 实现先入先出的打包功能. 65 | /// 用两个 queue 轮流存储交易. 一个 queue 当前轮值, 另一个则作为替补. 66 | /// 打包时从当前轮值的 queue 中顺序打包. 67 | queue_0: Queue, 68 | queue_1: Queue, 69 | /// 用 map 完成高效的随机查询和删除交易. 70 | map: Map, 71 | /// 指示当前轮值的 queue, true 为 queue_0, false 为 queue_1. 72 | is_zero: AtomicBool, 73 | /// 用于原子操作,以妥善处理打包与插入的并发问题. 74 | concurrent_count: AtomicUsize, 75 | } 76 | 77 | /// 用于 map 和 queue 中共享的交易结构 78 | type SharedTx = Arc; 79 | 80 | struct TxWrapper { 81 | tx: SignedTransaction, 82 | /// 该交易是否被 map 删除,有该标识的交易在打包交易时会被跳过,并且从 queue 中删除 83 | removed: AtomicBool, 84 | /// 避免重复同步的标识,有该标识的交易在打包 propose 交易时会被跳过 85 | proposed: AtomicBool, 86 | } 87 | 88 | /// 用于存储共识同步返回的交易 89 | type CallbackCache = Map; 90 | 91 | /// Mempool 打包返回给共识模块的数据结构 92 | struct MixedTxHashes { 93 | order_tx_hashes: Vec, 94 | propose_tx_hashes: Vec, 95 | } 96 | ``` 97 | 98 | 通过所有检查的新交易在插入 Mempool 时,首先包装为 `TxWrapper`(`removed` 和 `proposed` 均设置为 `false`)。然后转换为 `SharedTx` 并插入 `TxCache` 中(插入当前轮值的 `queue` 的尾部,以及 `map` 中)。 99 | 100 | Mempool 收到共识的打包请求时,返回 `MixedTxHashes`,其中包含用于共识的 `order_tx_hashes` 和用于提前同步的 `propose_tx_hashes`。 101 | 102 | 打包算法如下,从当前轮值的 `queue` 的头部开始弹出交易,跳过 `removed = true` 的 `TxWrapper`,直到达到 `cycle_limit `上限为止,将这些交易哈希插入 `order_tx_hashes` 中。继续弹出交易,跳过 `proposed = true` 的 `TxWrapper`,直到达到 `cycle_limit` 上限为止,将这些交易哈希插入 `propose_tx_hashes` 中。以上弹出的交易除了 `removed = true` 的交易外都按照弹出顺序插入到当前替补的 `queue` 中。当轮值 `queue` 全部弹出后,交换两个 `queue` 的身份。 103 | 104 | 当节点收到来自 leader 的 proposal 时,会请求 Mempool 检查 `order_tx_hashes` 和 `propose_tx_hashes`。Mempool 通过查询 `TxCache.map` 确定交易是否存在,对于缺失的交易发起同步请求。对于同步返回的 order 交易插入到 `CallbackCache` 中,而对于同步返回的 propose 交易则插入到 `TxCache` 中,并将 `proposed` 设置为 `true`。 105 | 106 | Mempool 收到共识的删除指定 `tx_hashes` 集合的请求时,先清空 `CallbackCache`,然后查询 `TxCache.map`,将对应的 `TxWrapper` 中的 `removed` 设置为 `true`,然后删除该 `SharedTx`。 107 | 108 | Mempool 的插入和打包过程如下图所示。 109 | 110 | ![image](./resources/mempool_process.png) 111 | -------------------------------------------------------------------------------- /docs/zh/toolchain_minits_on_ckbvm.md: -------------------------------------------------------------------------------- 1 | # 在 CKB-VM 上运行 TypeScript 2 | 3 | - [在 CKB-VM 上运行 TypeScript](#在-ckb-vm-上运行-typescript) 4 | - [太长不看](#太长不看) 5 | - [构建 ckb-vm](#构建-ckb-vm) 6 | - [构建 minits](#构建-minits) 7 | - [构建 LLVM](#构建-llvm) 8 | - [构建 riscv-gnu-toolchain](#构建-riscv-gnu-toolchain) 9 | - [测试代码](#测试代码) 10 | - [LLVM RISC-V 后端支持情况](#llvm-risc-v-后端支持情况) 11 | - [坑](#坑) 12 | - [**Bug 24389** **- can't link soft-float modules with double-float modules**](#bug-24389---cant-link-soft-float-modules-with-double-float-modules) 13 | - [构建失败](#构建失败) 14 | - [一些有用的链接](#一些有用的链接) 15 | 16 | [CKB-VM](https://github.com/nervosnetwork/ckb-vm) 是一个能够运行 RISC-V 指令集的虚拟机。要是它支持 TypeScript ,我们可以将 TypeScript 编译到 RISC-V 指令集。这里我们可以借助 [minits](https://github.com/cryptape/minits) 与 [LLVM](https://llvm.org/) 来实现,也就是`TypeScript -- minits --> LLVM IR -- LLVM-RISCV --> RISC-V` 这样一条编译路径最终获得 RISC-V 指令集。 17 | 18 | ## 太长不看 19 | 20 | ### 构建 ckb-vm 21 | 22 | ```shell 23 | git clone https://github.com/nervosnetwork/ckb-vm 24 | cd ckb-vm 25 | cargo build 26 | ``` 27 | 28 | ### 构建 minits 29 | 30 | ```shell 31 | git clone https://github.com/cryptape/minits 32 | cd minits 33 | npm install 34 | npm config set cmake_LLVM_DIR $(path-to-llvm/bin/llvm-config --cmakedir) 35 | npm run build 36 | ``` 37 | 38 | ### 构建 LLVM 39 | 40 | ```shell 41 | git clone https://github.com/llvm/llvm-project.git 42 | cd llvm-project 43 | mkdir build 44 | cd build 45 | cmake -DLLVM_TARGETS_TO_BUILD="X86;RISCV;PowerPC" -DLLVM_ENABLE_PROJECTS=clang -G "Unix Makefiles" ../llvm 46 | cmake --build . 47 | ``` 48 | 49 | ### 构建 riscv-gnu-toolchain 50 | 51 | ```shell 52 | git clone --recursive https://github.com/riscv/riscv-gnu-toolchain 53 | sudo apt-get install autoconf automake autotools-dev curl libmpc-dev libmpfr-dev libgmp-dev gawk build-essential bison flex texinfo gperf libtool patchutils bc zlib1g-dev libexpat-dev 54 | ./configure --prefix=/opt/riscv 55 | make 56 | ``` 57 | 58 | ### 测试代码 59 | 60 | ```typescript 61 | // fib.ts 62 | function fibo(n: number): number { 63 | if (n < 2) { 64 | return n; 65 | } 66 | return fibo(n - 1) + fibo(n - 2); 67 | } 68 | 69 | function main(argc: number, argv: string[]): number { 70 | return fibo(13); 71 | } 72 | ``` 73 | 74 | ```rust 75 | // demo.rs 76 | use bytes::Bytes; 77 | use std::io::Read; 78 | 79 | fn main() { 80 | let args: Vec = std::env::args().map(|a| a.into()).collect(); 81 | 82 | let mut file = std::fs::File::open("examples/main").unwrap(); 83 | let mut buffer = Vec::new(); 84 | file.read_to_end(&mut buffer).unwrap(); 85 | let buffer = Bytes::from(buffer); 86 | 87 | let r = ckb_vm::run::>(&buffer, &args[..]).unwrap(); 88 | println!("result is {:?}", r); 89 | } 90 | ``` 91 | 92 | ```shell 93 | # minits 94 | node ./build/main/index.js build main.ts -o main.ll 95 | clang -O -c --target=riscv64 main.ll 96 | rriscv64-unknown-elf-gcc -o main main.o 97 | 98 | # copy main to ckb-vm/examples 99 | cargo run --example demo 100 | ``` 101 | 102 | ## LLVM RISC-V 后端支持情况 103 | 104 | 目前 *LLVM 8.x* RISC-V 后端还处于实验状态,但已经出现在 [Target列表](https://llvm.org/svn/llvm-project/llvm/trunk/lib/Target/RISCV/) 中。[在 *LLVM 9.0* 中 *RISC-V* 支持将有望从实验性功能变更为官方功能](https://lists.llvm.org/pipermail/llvm-dev/2019-July/133724.html),并已经加入 [RC 版本的 Release Note](http://prereleases.llvm.org/9.0.0/rc6/docs/ReleaseNotes.html#changes-to-the-riscv-target) 。[要开启实验性的功能只能从源码构建](https://stackoverflow.com/questions/46905464/how-to-enable-a-llvm-backend),目前需要构建需要加上 `-DLLVM_TARGETS_TO_BUILD="RISCV"`参数才能构建出有RISC-V的backend。 105 | 106 | 107 | 108 | ## 坑 109 | 110 | ### [**Bug 24389**](https://sourceware.org/bugzilla/show_bug.cgi?id=24389) **- can't link soft-float modules with double-float modules** 111 | 112 | 在 Mac 上直接使用 brew 安装的版本可能还未修复这个bug,只能在 **Ubuntu 18** 下从源码构建 113 | 114 | ### 构建失败 115 | 116 | ``` 117 | cmake -DLLVM_TARGETS_TO_BUILD="RISCV" -DLLVM_ENABLE_PROJECTS=clang -G "Unix 118 | Makefiles" ../llvm 119 | 120 | collect2: fatal error: ld terminated with signal 9 [Killed] 121 | compilation terminated. 122 | tools/lto/CMakeFiles/LTO.dir/build.make:167: recipe for target 'lib/libLTO.so.10svn' failed 123 | make[2]: *** [lib/libLTO.so.10svn] Error 1 124 | make[2]: *** Deleting file 'lib/libLTO.so.10svn' 125 | CMakeFiles/Makefile2:10139: recipe for target 'tools/lto/CMakeFiles/LTO.dir/all' failed 126 | make[1]: *** [tools/lto/CMakeFiles/LTO.dir/all] Error 2 127 | Makefile:151: recipe for target 'all' failed 128 | make: *** [all] Error 2 129 | ``` 130 | 131 | ## 一些有用的链接 132 | 133 | - [LLVM Target Triple](https://llvm.org/doxygen/classllvm_1_1Triple.html) 134 | - [使用Clang 交叉编译](https://clang.llvm.org/docs/CrossCompilation.html) -------------------------------------------------------------------------------- /docs/zh/getting_started.md: -------------------------------------------------------------------------------- 1 | # Muta 入门 2 | 3 | - [Muta 入门](#muta-%e5%85%a5%e9%97%a8) 4 | - [安装和运行](#%e5%ae%89%e8%a3%85%e5%92%8c%e8%bf%90%e8%a1%8c) 5 | - [安装依赖](#%e5%ae%89%e8%a3%85%e4%be%9d%e8%b5%96) 6 | - [MacOS](#macos) 7 | - [ubuntu](#ubuntu) 8 | - [centos7](#centos7) 9 | - [archlinux](#archlinux) 10 | - [直接下载预编译的二进制文件](#%e7%9b%b4%e6%8e%a5%e4%b8%8b%e8%bd%bd%e9%a2%84%e7%bc%96%e8%af%91%e7%9a%84%e4%ba%8c%e8%bf%9b%e5%88%b6%e6%96%87%e4%bb%b6) 11 | - [从源码编译](#%e4%bb%8e%e6%ba%90%e7%a0%81%e7%bc%96%e8%af%91) 12 | - [获取源码](#%e8%8e%b7%e5%8f%96%e6%ba%90%e7%a0%81) 13 | - [安装 rust](#%e5%ae%89%e8%a3%85-rust) 14 | - [编译](#%e7%bc%96%e8%af%91) 15 | - [运行单节点](#%e8%bf%90%e8%a1%8c%e5%8d%95%e8%8a%82%e7%82%b9) 16 | - [运行多节点](#%e8%bf%90%e8%a1%8c%e5%a4%9a%e8%8a%82%e7%82%b9) 17 | - [使用 docker](#%e4%bd%bf%e7%94%a8-docker) 18 | - [配置说明](#%e9%85%8d%e7%bd%ae%e8%af%b4%e6%98%8e) 19 | 20 | ## 安装和运行 21 | 22 | 此处讲解在你的操作系统直接安装 muta 的方法,如果想要通过 docker 快速尝试 muta,可以参考 [使用 docker](#%e4%bd%bf%e7%94%a8-docker)。 23 | 24 | ### 安装依赖 25 | 26 | #### MacOS 27 | 28 | ``` 29 | brew install autoconf libtool 30 | ``` 31 | 32 | #### ubuntu 33 | 34 | ``` 35 | apt update 36 | apt install -y git curl openssl cmake pkg-config libssl-dev gcc build-essential clang libclang-dev 37 | ``` 38 | 39 | #### centos7 40 | 41 | ``` 42 | yum install -y centos-release-scl 43 | yum install -y git make gcc-c++ openssl-devel llvm-toolset-7 44 | 45 | # 打开 llvm 支持 46 | scl enable llvm-toolset-7 bash 47 | ``` 48 | 49 | #### archlinux 50 | 51 | ``` 52 | pacman -Sy --noconfirm git gcc pkgconf clang make 53 | ``` 54 | 55 | ### 直接下载预编译的二进制文件 56 | 57 | 我们会通过 [github releases](https://github.com/nervosnetwork/muta/releases) 发布一些常用操作系统的预编译二进制文件。如果其中包含你的操作系统,可以直接下载对应的文件。 58 | 59 | ### 从源码编译 60 | 61 | #### 获取源码 62 | 63 | 通过 git 下载源码: 64 | 65 | ``` 66 | git clone https://github.com/nervosnetwork/muta.git 67 | ``` 68 | 69 | 或者在 [github releases](https://github.com/nervosnetwork/muta/releases) 下载源码压缩包解压。 70 | 71 | #### 安装 rust 72 | 73 | 参考: 74 | 75 | ``` 76 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 77 | ``` 78 | 79 | #### 编译 80 | 81 | ``` 82 | cd /path/to/muta 83 | make prod 84 | ``` 85 | 86 | 编译完成后的二进制文件在 `target/release/muta-chain`。 87 | 88 | ### 运行单节点 89 | 90 | ``` 91 | cd /path/to/muta 92 | 93 | # 使用默认配置运行 muta 94 | # 如果是直接下载的 binary,请自行替换下面的命令为对应的路径 95 | ./target/release/muta-chain 96 | 97 | # 查看帮助 98 | $ ./target/release/muta-chain -h 99 | Muta v0.1.0 100 | Muta Dev 101 | 102 | USAGE: 103 | muta-chain [OPTIONS] 104 | 105 | FLAGS: 106 | -h, --help Prints help information 107 | -V, --version Prints version information 108 | 109 | OPTIONS: 110 | -c, --config a required file for the configuration [default: ./devtools/chain/config.toml] 111 | -g, --genesis a required file for the genesis json [default: ./devtools/chain/genesis.json] 112 | ``` 113 | 114 | ### 运行多节点 115 | 116 | 1. 根据节点拓扑,修改配置文件 config.toml,主要注意其中的 privkey、network 和 verifier_list 部分,可以参考下面的 docker-compose 配置,或者详细阅读下文的配置说明; 117 | 2. 将 muta binary 文件、muta 配置 config.toml 和创世块文件 genesis.json 分发到待部署的节点机器; 118 | 3. 启动 bootstrap 节点; 119 | 4. 启动其它节点; 120 | 121 | ## 使用 docker 122 | 123 | ``` 124 | docker run -it --init -p 8000:8000 nervos/muta 125 | 126 | # 如果想要保留链的数据,可以数据目录挂载到 host 机器 127 | docker run -it --init -p 8000:8000 -v `pwd`/data:/app/devtools/chain/data nervos/muta 128 | ``` 129 | 130 | 可以访问 页面与链进行交互。 131 | 132 | 133 | 使用 docker compose 运行多节点: 134 | 135 | ``` 136 | docker-compose -f devtools/docker/dockercompose/docker-compose-bft.yaml up 137 | ``` 138 | 139 | 此处默认启动 4 节点,数据在 `target/data/bft1` ~ `target/data/bft4` 文件夹下, 可以查看 `docker-compose-bft.yaml` 获取更详细的配置信息。 140 | 141 | ## 配置说明 142 | 143 | 默认的配置样例在 `./devtools/chain/config.toml`,此处对其中的一些字段进行说明。 144 | 145 | ```toml 146 | # chain id,链的唯一标识,同一个链的所有节点该项配置必须相同 147 | chain_id = "b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036" # by sha256(Muta) 148 | 149 | # 节点私钥,节点的唯一标识,在作为 bootstraps 节点时,需要给出地址和该私钥对应的公钥让其他节点连接;如果是出块节点,该私钥对应的地址需要在 consensus verifier_list 中 150 | privkey = "45c56be699dca666191ad3446897e0f480da234da896270202514a0e1a587c3f" 151 | 152 | # db config,链数据所在目录 153 | data_path = "./devtools/chain/data" 154 | 155 | [graphql] 156 | # graphql 监听地址 157 | listening_address = "0.0.0.0:8000" 158 | # graphql 访问路径 159 | graphql_uri = "/graphql" 160 | # graphiql 路径 161 | graphiql_uri = "/graphiql" 162 | 163 | [network] 164 | # p2p 监听地址 165 | listening_address = "0.0.0.0:1337" 166 | 167 | [[network.bootstraps]] 168 | # 初始启动时访问的节点信息 169 | pubkey = "031288a6788678c25952eba8693b2f278f66e2187004b64ac09416d07f83f96d5b" 170 | address = "0.0.0.0:1888" 171 | 172 | # 交易池相关配置 173 | [mempool] 174 | # 最大超时间隔,如果 当前区块数 + timeout_gap > tx 中的 timeout 字段,则交易池会拒绝接收该交易 175 | timeout_gap = 20 176 | # 交易池大小 177 | pool_size = 20000 178 | # 为了增加性能,每积累到这么多个交易才对外广播一次 179 | broadcast_txs_size = 200 180 | # 交易池广播交易间隔,单位为 毫秒(ms) 181 | broadcast_txs_interval = 200 182 | 183 | [consensus] 184 | # 最大 cycles 限制 185 | cycles_limit = 99999999 186 | # cycle 价格 187 | cycles_price = 1 188 | # 出块间隔,单位为 毫秒(ms) 189 | interval = 3000 190 | # 出块节点的地址合集 191 | verifier_list = [ "10f8389d774afdad8755ef8e629e5a154fddc6325a" ] 192 | 193 | # 共识相关配置 194 | [consensus.duration] 195 | # 下面两项标识 propose 阶段的超时时间占共识间隔的比例的分子和分母。 196 | # 按照上述配置为 3000ms,则 propose 共识阶段的超时时间为 3000ms * 24 / 30 = 2400ms。 197 | # 下面类似的有 prevote 和 precommit 阶段的超时设置。 198 | propose_numerator = 24 199 | propose_denominator = 30 200 | prevote_numerator = 6 201 | prevote_denominator = 30 202 | precommit_numerator = 6 203 | precommit_denominator = 30 204 | 205 | [executor] 206 | # 设为 true 时,节点将只保存最新高度的 state 207 | light = false 208 | ``` -------------------------------------------------------------------------------- /docs/zh/node_manager_service.md: -------------------------------------------------------------------------------- 1 | # Node Manager Service 2 | Node Manager Service 负责变更节点的共识配置,并对变更权限进行管理。这些信息存储在 Metadata Service 中,在 `Metadata` 中可以动态变更的字段有 `interval`、`verifier_list`、 `propose_ratio`、 `prevote_ratio`、`precommit_ratio` 。只有 admin 账户有权限进行变更操作,admin 账户的初始值写在 `config/genesis.toml` 配置文件中,起链后可以发交易给 Node Manager Service 进行修改。 3 | 4 | ## 接口 5 | 6 | 1. 读取 admin 地址 7 | 8 | ```rust 9 | fn get_admin(&self, ctx: ServiceContext) -> ProtocolResult
; 10 | 11 | // Example: graphiql send tx 12 | query get_admin{ 13 | queryService( 14 | caller: "016cbd9ee47a255a6f68882918dcdd9e14e6bee1" 15 | serviceName: "node_manager" 16 | method: "get_admin" 17 | payload: "" 18 | ){ 19 | ret, 20 | isError 21 | } 22 | } 23 | ``` 24 | 25 | 2. 设置 admin 地址 26 | 27 | ```rust 28 | // 需要 admin 权限 29 | fn set_admin(&mut self, ctx: ServiceContext, payload: SetAdminPayload) -> ProtocolResult<()>; 30 | 31 | // 参数 32 | pub struct SetAdminPayload { 33 | pub admin: Address, 34 | } 35 | 36 | // Example: graphiql send tx 37 | mutation set_admin{ 38 | unsafeSendTransaction(inputRaw: { 39 | serviceName:"node_manager", 40 | method:"set_admin", 41 | payload:"{\"admin\": \"016cbd9ee47a255a6f68882918dcdd9e14e6bee1\"}", 42 | timeout:"0x14", 43 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 44 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 45 | cyclesPrice:"0x9999", 46 | cyclesLimit:"0x9999", 47 | }, inputPrivkey: "0x45c56be699dca666191ad3446897e0f480da234da896270202514a0e1a587c3f" 48 | ) 49 | } 50 | ``` 51 | 52 | 3. 更新元数据 53 | 54 | ```rust 55 | // 需要 admin 权限 56 | fn update_metadata(&mut self, ctx: ServiceContext, payload: UpdateMetadataPayload) -> ProtocolResult<()>; 57 | 58 | // 参数 59 | pub struct UpdateMetadataPayload { 60 | pub verifier_list: Vec, 61 | pub interval: u64, 62 | pub propose_ratio: u64, 63 | pub prevote_ratio: u64, 64 | pub precommit_ratio: u64, 65 | } 66 | 67 | // Example: graphiql send tx 68 | mutation update_metadata{ 69 | unsafeSendTransaction(inputRaw: { 70 | serviceName:"node_manager", 71 | method:"update_metadata", 72 | payload:"{\"verifier_list\": [{\"address\": \"016cbd9ee47a255a6f68882918dcdd9e14e6bee1\", \"propose_weight\": 5, \"vote_weight\": 5}], \"interval\": 5000, \"propose_ratio\": 5, \"prevote_ratio\": 5, \"precommit_ratio\": 5}", 73 | timeout:"0xbe", 74 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 75 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 76 | cyclesPrice:"0x9999", 77 | cyclesLimit:"0x9999", 78 | }, inputPrivkey: "0x30269d47fcf602b889243722b666881bf953f1213228363d34cf04ddcd51dfd2" 79 | ) 80 | } 81 | ``` 82 | 83 | 5. 更新区块间隔 84 | 85 | ```rust 86 | // 需要 admin 权限 87 | fn update_interval(&mut self, ctx: ServiceContext, payload: UpdateIntervalPayload) -> ProtocolResult<()>; 88 | 89 | // 参数 90 | pub struct UpdateIntervalPayload { 91 | pub interval: u64, 92 | } 93 | 94 | // Example: graphiql send tx 95 | mutation update_interval{ 96 | unsafeSendTransaction(inputRaw: { 97 | serviceName:"node_manager", 98 | method:"update_interval", 99 | payload:"{\"interval\": 666}", 100 | timeout:"0x20", 101 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 102 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 103 | cyclesPrice:"0x9999", 104 | cyclesLimit:"0x9999", 105 | }, inputPrivkey: "0x45c56be699dca666191ad3446897e0f480da234da896270202514a0e1a587c3f" 106 | ) 107 | } 108 | ``` 109 | 110 | 6. 更新验证人集合 111 | 112 | ```rust 113 | // 需要 admin 权限 114 | fn update_validators(&mut self, ctx: ServiceContext, payload: UpdateValidatorsPayload) -> ProtocolResult<()>; 115 | 116 | // 参数 117 | pub struct UpdateValidatorsPayload { 118 | pub verifier_list: Vec, 119 | } 120 | 121 | pub struct Validator { 122 | pub address: Address, 123 | pub propose_weight: u32, 124 | pub vote_weight: u32, 125 | } 126 | 127 | // Example: graphiql send tx 128 | mutation update_validators{ 129 | unsafeSendTransaction(inputRaw: { 130 | serviceName:"node_manager", 131 | method:"update_validators", 132 | payload:"{\"verifier_list\": [{\"address\": \"016cbd9ee47a255a6f68882918dcdd9e14e6bee1\", \"propose_weight\": 5, \"vote_weight\": 5}]}", 133 | timeout:"0xbe", 134 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 135 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 136 | cyclesPrice:"0x9999", 137 | cyclesLimit:"0x9999", 138 | }, inputPrivkey: "0x30269d47fcf602b889243722b666881bf953f1213228363d34cf04ddcd51dfd2" 139 | ) 140 | } 141 | ``` 142 | 143 | 7. 更新共识 round 超时时间 144 | 145 | ```rust 146 | // 需要 admin 权限 147 | fn update_ratio(&mut self, ctx: ServiceContext, payload: UpdateRatioPayload) -> ProtocolResult<()>; 148 | 149 | // 参数 150 | pub struct UpdateRatioPayload { 151 | pub propose_ratio: u64, 152 | pub prevote_ratio: u64, 153 | pub precommit_ratio: u64, 154 | } 155 | 156 | // Example: graphiql send tx 157 | mutation update_ratio{ 158 | unsafeSendTransaction(inputRaw: { 159 | serviceName:"node_manager", 160 | method:"update_ratio", 161 | payload:"{\"propose_ratio\": 5, \"prevote_ratio\": 5, \"precommit_ratio\": 5}", 162 | timeout:"0xbe", 163 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 164 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 165 | cyclesPrice:"0x9999", 166 | cyclesLimit:"0x9999", 167 | }, inputPrivkey: "0x30269d47fcf602b889243722b666881bf953f1213228363d34cf04ddcd51dfd2" 168 | ) 169 | } 170 | ``` 171 | -------------------------------------------------------------------------------- /docs/zh/asset-service.md: -------------------------------------------------------------------------------- 1 | # Asset Service 2 | 3 | Asset service 是 huobi-chain 的内置资产模块,负责管理链原生资产以及第三方发行资产。 4 | 5 | ## 特点 6 | 7 | - 资产成为一等公民:加密资产作为区块链的核心,理应成为一等公民。Asset 模块利用 muta 框架提供的 service 能力,为所有资产提供链级别的支持,为面向资产编程提供支持。 8 | 9 | - 第三方发行资产: 用户可以使用 Asset 模块发行资产,自定义资产属性和总量等 10 | 11 | - 资产与合约交互: 未来可以打通虚拟机和资产模块,为资产的广泛使用提供支持 12 | 13 | ## 接口 14 | 15 | Asset 模块采用类似以太坊 ERC-20 的接口设计,主要包含: 16 | 17 | 1. 发行资产 18 | 19 | ```rust 20 | // 资产数据结构 21 | pub struct Asset { 22 | pub id: Hash, 23 | pub name: String, 24 | pub symbol: String, 25 | pub supply: u64, 26 | pub issuer: Address, 27 | } 28 | 29 | // 发行资产接口 30 | // 资产 ID 自动生成,确保唯一 31 | fn create_asset(&mut self, ctx: ServiceContext, payload: CreateAssetPayload) -> ProtocolResult; 32 | 33 | // 发行资产参数 34 | pub struct CreateAssetPayload { 35 | pub name: String, 36 | pub symbol: String, 37 | pub supply: u64, 38 | } 39 | 40 | // Example: graphiql send tx 41 | mutation create_asset{ 42 | unsafeSendTransaction(inputRaw: { 43 | serviceName:"asset", 44 | method:"create_asset", 45 | payload:"{\"name\":\"Test Coin\",\"symbol\":\"TC\",\"supply\":100000000}", 46 | timeout:"0x172", 47 | nonce:"0x9db2d7efe2b61a88827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 48 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 49 | cyclesPrice:"0x9999", 50 | cyclesLimit:"0x9999" 51 | }, inputPrivkey: "0x30269d47fcf602b889243722b666881bf953f1213228363d34cf04ddcd51dfd2" 52 | ) 53 | } 54 | ``` 55 | 56 | 2. 查询资产信息 57 | 58 | ```rust 59 | // 查询接口 60 | fn get_asset(&self, ctx: ServiceContext, payload: GetAssetPayload) -> ProtocolResult; 61 | 62 | // 查询参数 63 | pub struct GetAssetPayload { 64 | pub id: Hash, // 资产 ID 65 | } 66 | 67 | // Example: graphiql send tx 68 | query get_asset{ 69 | queryService( 70 | caller: "016cbd9ee47a255a6f68882918dcdd9e14e6bee1" 71 | serviceName: "asset" 72 | method: "get_asset" 73 | payload: "{\"id\": \"5f1364a8e6230f68ccc18bc9d1000cedd522d6d63cef06d0062f832bdbe1a78a\"}" 74 | ){ 75 | ret, 76 | isError 77 | } 78 | } 79 | ``` 80 | 81 | 3. 转账 82 | 83 | ```rust 84 | // 转账接口 85 | fn transfer(&mut self, ctx: ServiceContext, payload: TransferPayload) -> ProtocolResult<()>; 86 | 87 | // 转账参数 88 | pub struct TransferPayload { 89 | pub asset_id: Hash, 90 | pub to: Address, 91 | pub value: u64, 92 | } 93 | 94 | // Example: graphiql send tx 95 | mutation transfer{ 96 | unsafeSendTransaction(inputRaw: { 97 | serviceName:"asset", 98 | method:"transfer", 99 | payload:"{\"asset_id\":\"5f1364a8e6230f68ccc18bc9d1000cedd522d6d63cef06d0062f832bdbe1a78a\",\"to\":\"f8389d774afdad8755ef8e629e5a154fddc6325a\", \"value\":10000}", 100 | timeout:"0x289", 101 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 102 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 103 | cyclesPrice:"0x9999", 104 | cyclesLimit:"0x9999", 105 | }, inputPrivkey: "0x30269d47fcf602b889243722b666881bf953f1213228363d34cf04ddcd51dfd2" 106 | ) 107 | } 108 | ``` 109 | 110 | 4. 查询余额 111 | 112 | ```rust 113 | // 查询接口 114 | fn get_balance(&self, ctx: ServiceContext, payload: GetBalancePayload) -> ProtocolResult 115 | 116 | // 查询参数 117 | pub struct GetBalancePayload { 118 | pub asset_id: Hash, 119 | pub user: Address, 120 | } 121 | 122 | // 返回值 123 | pub struct GetBalanceResponse { 124 | pub asset_id: Hash, 125 | pub user: Address, 126 | pub balance: u64, 127 | } 128 | 129 | // Example: graphiql send tx 130 | query get_balance{ 131 | queryService( 132 | caller: "016cbd9ee47a255a6f68882918dcdd9e14e6bee1" 133 | serviceName: "asset" 134 | method: "get_balance" 135 | payload: "{\"asset_id\": \"5f1364a8e6230f68ccc18bc9d1000cedd522d6d63cef06d0062f832bdbe1a78a\", \"user\": \"016cbd9ee47a255a6f68882918dcdd9e14e6bee1\"}" 136 | ){ 137 | ret, 138 | isError 139 | } 140 | } 141 | ``` 142 | 143 | 5. 批准额度 144 | 145 | ```rust 146 | // 批准接口 147 | fn approve(&mut self, ctx: ServiceContext, payload: ApprovePayload) -> ProtocolResult<()>; 148 | 149 | // 批准参数 150 | pub struct ApprovePayload { 151 | pub asset_id: Hash, 152 | pub to: Address, 153 | pub value: u64, 154 | } 155 | 156 | // Example: graphiql send tx 157 | unsafeSendTransaction(inputRaw: { 158 | serviceName:"asset", 159 | method:"approve", 160 | payload:"{\"asset_id\":\"5f1364a8e6230f68ccc18bc9d1000cedd522d6d63cef06d0062f832bdbe1a78a\",\"to\":\"f8389d774afdad8755ef8e629e5a154fddc6325a\", \"value\":10000}", 161 | timeout:"0x378", 162 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 163 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 164 | cyclesPrice:"0x9999", 165 | cyclesLimit:"0x9999", 166 | }, inputPrivkey: "0x30269d47fcf602b889243722b666881bf953f1213228363d34cf04ddcd51dfd2" 167 | ) 168 | } 169 | ``` 170 | 171 | 6. 授权转账 172 | 173 | ```rust 174 | // 接口 175 | fn transfer_from(&mut self, ctx: ServiceContext, payload: TransferFromPayload) -> ProtocolResult<()>; 176 | 177 | // 参数 178 | pub struct TransferFromPayload { 179 | pub asset_id: Hash, 180 | pub sender: Address, 181 | pub recipient: Address, 182 | pub value: u64, 183 | } 184 | 185 | // Example: graphiql send tx 186 | mutation transfer_from{ 187 | unsafeSendTransaction(inputRaw: { 188 | serviceName:"asset", 189 | method:"transfer_from", 190 | payload:"{\"asset_id\":\"5f1364a8e6230f68ccc18bc9d1000cedd522d6d63cef06d0062f832bdbe1a78a\",\"sender\":\"016cbd9ee47a255a6f68882918dcdd9e14e6bee1\", \"recipient\":\"fffffd774afdad8755ef8e629e5a154fddc6325a\", \"value\":5000}", 191 | timeout:"0x12c", 192 | nonce:"0x9db2d7efe2b61a28827e4836e2775d913a442ed2f9096ca1233e479607c27cf7", 193 | chainId:"b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", 194 | cyclesPrice:"0x9999", 195 | cyclesLimit:"0x9999", 196 | }, inputPrivkey: "0x45c56be699dca666191ad3446897e0f480da234da896270202514a0e1a587c3f" 197 | ) 198 | } 199 | ``` 200 | 201 | 7. 查询限额 202 | 203 | ```rust 204 | // 查询接口 205 | fn get_allowance(&self, ctx: ServiceContext, payload: GetAllowancePayload) -> ProtocolResult; 206 | 207 | // 查询参数 208 | pub struct GetAllowancePayload { 209 | pub asset_id: Hash, 210 | pub grantor: Address, 211 | pub grantee: Address, 212 | } 213 | 214 | // 返回值 215 | pub struct GetAllowanceResponse { 216 | pub asset_id: Hash, 217 | pub grantor: Address, 218 | pub grantee: Address, 219 | pub value: u64, 220 | } 221 | 222 | // Example: graphiql send tx 223 | query get_allowance{ 224 | queryService( 225 | caller: "016cbd9ee47a255a6f68882918dcdd9e14e6bee1" 226 | serviceName: "asset" 227 | method: "get_allowance" 228 | payload: "{\"asset_id\": \"5f1364a8e6230f68ccc18bc9d1000cedd522d6d63cef06d0062f832bdbe1a78a\", \"grantor\": \"016cbd9ee47a255a6f68882918dcdd9e14e6bee1\", \"grantee\": \"f8389d774afdad8755ef8e629e5a154fddc6325a\"}" 229 | ){ 230 | ret, 231 | isError 232 | } 233 | } 234 | ``` -------------------------------------------------------------------------------- /docs/en/getting_started.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | - [Getting Started](#getting-started) 4 | - [Install and Run](#install-and-run) 5 | - [Install Dependencies](#install-dependencies) 6 | - [MacOS](#macos) 7 | - [ubuntu](#ubuntu) 8 | - [centos7](#centos7) 9 | - [archlinux](#archlinux) 10 | - [Download Pre-compiled Binary](#download-pre-compiled-binary) 11 | - [Compile From Source Code](#compile-from-source-code) 12 | - [Get Source Code](#get-source-code) 13 | - [Install Rust](#install-rust) 14 | - [Compile](#compile) 15 | - [Run Single Node Chain](#run-single-node-chain) 16 | - [Run Multiple Nodes Chain](#run-multiple-nodes-chain) 17 | - [Use Docker](#use-docker) 18 | - [Config Reference](#config-reference) 19 | 20 | ## Install and Run 21 | 22 | This is how you can install muta on your own operating system. If you want to try muta via docker quickly, you can check [use docker](#use-docker) first. 23 | 24 | ### Install Dependencies 25 | 26 | #### MacOS 27 | 28 | ``` 29 | brew install autoconf libtool 30 | ``` 31 | 32 | #### ubuntu 33 | 34 | ``` 35 | apt update 36 | apt install -y git curl openssl cmake pkg-config libssl-dev gcc build-essential clang libclang-dev 37 | ``` 38 | 39 | #### centos7 40 | 41 | ``` 42 | yum install -y centos-release-scl 43 | yum install -y git make gcc-c++ openssl-devel llvm-toolset-7 44 | 45 | # enable llvm 46 | scl enable llvm-toolset-7 bash 47 | ``` 48 | 49 | #### archlinux 50 | 51 | ``` 52 | pacman -Sy --noconfirm git gcc pkgconf clang make 53 | ``` 54 | 55 | ### Download Pre-compiled Binary 56 | 57 | We will publish the binary files for some common operating system on [github releases](https://github.com/nervosnetwork/muta/releases). 58 | If your system is in the list, you can download the file directly. 59 | 60 | ### Compile From Source Code 61 | 62 | #### Get Source Code 63 | 64 | Get source code via git: 65 | 66 | ``` 67 | git clone https://github.com/nervosnetwork/muta.git 68 | ``` 69 | 70 | Or download the source package on [github releases](https://github.com/nervosnetwork/muta/releases). 71 | 72 | #### Install Rust 73 | 74 | reference: 75 | 76 | ``` 77 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 78 | ``` 79 | 80 | #### Compile 81 | 82 | ``` 83 | cd /path/to/muta 84 | make prod 85 | ``` 86 | 87 | The compiled binary path is `target/release/muta-chain`. 88 | 89 | ### Run Single Node Chain 90 | 91 | ``` 92 | cd /path/to/muta 93 | 94 | # run muta with default config 95 | # if you downloaded the binary instead of compiling it yourself, you should change the path to your binary path 96 | ./target/release/muta-chain 97 | 98 | # print help information 99 | $ ./target/release/muta-chain -h 100 | Muta v0.1.0 101 | Muta Dev 102 | 103 | USAGE: 104 | muta-chain [OPTIONS] 105 | 106 | FLAGS: 107 | -h, --help Prints help information 108 | -V, --version Prints version information 109 | 110 | OPTIONS: 111 | -c, --config a required file for the configuration [default: ./devtools/chain/config.toml] 112 | -g, --genesis a required file for the genesis json [default: ./devtools/chain/genesis.json] 113 | ``` 114 | 115 | ### Run Multiple Nodes Chain 116 | 117 | 1. Modify the `config.toml` according to your network topology. Pay attention to the `privkey`, `network` and `verifier_list` part. You can refer the docker-compose config or read the config reference below. 118 | 2. Release your binary file, config.toml and genesis.json to you node machines. 119 | 3. Start the bootstrap nodes. 120 | 4. Start the other nodes. 121 | 122 | ## Use Docker 123 | 124 | ``` 125 | docker run -it --init -p 8000:8000 nervos/muta 126 | 127 | # if you want to keep the chain data, you can mount the data diretory to the host machine 128 | docker run -it --init -p 8000:8000 -v `pwd`/data:/app/devtools/chain/data nervos/muta 129 | ``` 130 | 131 | Visit [graphiql](http://localhost:8000/graphiql) to interact with your chain! 132 | 133 | 134 | Use docker compose to run multiple nodes chain: 135 | 136 | ``` 137 | docker-compose -f devtools/docker/dockercompose/docker-compose-bft.yaml up 138 | ``` 139 | 140 | This config start 4 nodes, and the chain data path is `target/data/bft1` ~ `target/data/bft4`. 141 | You can check `docker-compose-bft.yaml` for more details. 142 | 143 | ## Config Reference 144 | 145 | The default config sample is located at `./devtools/chain/config.toml`. 146 | There are some comments below. 147 | 148 | ```toml 149 | # Chain id, the unique identifier of the chain. This field of all nodes of a chain should be the same. 150 | chain_id = "b6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036" # by sha256(Muta) 151 | 152 | # The private key of the node. 153 | # When connected as the bootstrap node, the public key which the connecting node uses should match this private key. 154 | # When used as validator, the address of this private key should be in the verifier_list. 155 | privkey = "45c56be699dca666191ad3446897e0f480da234da896270202514a0e1a587c3f" 156 | 157 | # Where the chain data locates. 158 | data_path = "./devtools/chain/data" 159 | 160 | [graphql] 161 | # graphql listen address 162 | listening_address = "0.0.0.0:8000" 163 | # graphql path 164 | graphql_uri = "/graphql" 165 | # graphiql path 166 | graphiql_uri = "/graphiql" 167 | 168 | [network] 169 | # p2p listen address 170 | listening_address = "0.0.0.0:1337" 171 | 172 | [[network.bootstraps]] 173 | pubkey = "031288a6788678c25952eba8693b2f278f66e2187004b64ac09416d07f83f96d5b" 174 | address = "0.0.0.0:1888" 175 | 176 | [mempool] 177 | # The max timeout gap. If current_epoch_id + timeout_gap > timeout field in transaction, the memory pool will reject this transaction. 178 | timeout_gap = 20 179 | # Memory pool size. When the memory pool is full, new transactions will be rejected. 180 | pool_size = 20000 181 | # To increase the performance of memory pool, we broadcast transactions in batches of this number. 182 | broadcast_txs_size = 200 183 | # Max transaction broadcast interval. Even if there are no transactions more than broadcast_txs_size, they will be broadcasted after this interval. 184 | # The unit is ms. 185 | broadcast_txs_interval = 200 186 | 187 | [consensus] 188 | cycles_limit = 99999999 189 | cycles_price = 1 190 | # block interval, the unit is ms. 191 | interval = 3000 192 | # verifier address list 193 | verifier_list = [ "10f8389d774afdad8755ef8e629e5a154fddc6325a" ] 194 | 195 | [consensus.duration] 196 | # The numerator of the proportion of propose timeout to the epoch interval. 197 | propose_numerator = 24 198 | # The denominator of the proportion of propose timeout to the epoch interval. 199 | propose_denominator = 30 200 | # The numerator of the proportion of prevote timeout to the epoch interval. 201 | prevote_numerator = 6 202 | # The denominator of the proportion of prevote timeout to the epoch interval. 203 | prevote_denominator = 30 204 | # The numerator of the proportion of precommit timeout to the epoch interval. 205 | precommit_numerator = 6 206 | # The denominator of the proportion of precommit timeout to the epoch interval. 207 | precommit_denominator = 30 208 | 209 | [executor] 210 | # When set to true, the node will only keep the latest world state. 211 | light = false 212 | ``` -------------------------------------------------------------------------------- /docs/en/transaction_pool.md: -------------------------------------------------------------------------------- 1 | # Mempool 2 | 3 | ## Design Requirements 4 | 5 | Mempool is responsible for collecting and packaging new transactions to consensus module. 6 | Naturally, we have some requirements for mempool. 7 | 8 | 1. Excellent performance. 9 | It is required to achieve the performance of inserting 10,000+ tps in an ordinary computer. 10 | 11 | 2. Fairness. Transactions should been packaged in the order in which they are received. 12 | 13 | 3. In addition, in order to match the design of PPCT (parallel process of consensus 14 | and transaction-synchronization), there is a third requirement. 15 | The data structure returned by package should consists of two parts: 16 | order-transactions for consensus and propose-transactions for synchronization. 17 | 18 | ## Solution 19 | 20 | ### Requirement 1. 21 | 22 | To achieve excellent performance, we should first analyze the process of insertion and 23 | propose solutions to the performance bottlenecks. 24 | The process includes: 25 | 1. Check if the mempool is full. 26 | 2. Check if the transaction has been included in the mempool. 27 | 3. Check that the signature of the transaction is correct and the format is compliant. 28 | 4. Check if the transaction is already on the chain. 29 | 30 | Steps 1, 2 are very fast, not a performance bottleneck. 31 | 32 | Step 3 involves the verification of signature, which is a time-consuming operation. 33 | Fortunately, the verification is an independent computationally intensive operation. 34 | It is suitable for high-concurrency to fully exploit CPU performance to improve performance. 35 | 36 | As the blockchain continues to grow, historical transaction data is growing, 37 | and the query in step 4 will become a performance black hole. 38 | We solve this problem by setting a timeout field in the transaction 39 | and a global constraint parameter `g`. 40 | 41 | Specifically, when the timeout of a transaction is `t`, if this transaction is still unpacked at the 42 | height of `t`, it will be considered invalid and discarded by the mempool. 43 | In order to avoid users setting timeout too high, if the timeout `t` > `h` + `g`, 44 | mempool with latest height `h` will also discard such transaction as illegal. 45 | Under this constraint, mempool with latest height `h` only needs 46 | to keep historical transactions with height between [h - g, h] for checking, 47 | and do the computational and storage complexity of the check are both reduced to `O(g)`, 48 | regardless of the total amount of historical transactions. 49 | 50 | ### Requirement 2. 51 | 52 | In the case of the same transaction priority, if transactions received after but package first, 53 | this is obviously against fairness. 54 | Therefore, transactions in the mempool must be packaged on a FIFO basis. 55 | 56 | However, according to Ethereum's nonce monotonous design, 57 | if the transaction pool contains multiple transactions issued by the same user, 58 | then the transaction needs to satisfy the partial order relationship, 59 | which brings great complexity to the packaging design. 60 | Therefore, we take random nonce instead. 61 | This design also brings additional benefits, 62 | such as better concurrent execution, simplified wallet design, and more. 63 | 64 | In short, it is unnecessary and inefficient to force all transactions of a user to remain partial order. 65 | If there is some dependency between certain transactions, 66 | we can use a `ref` field to represent this relationship. 67 | This is a generic dependency expression compared to Ethereum. 68 | Our package algorithm can be easily extended to meet this dependency requirement. 69 | 70 | ### Requirement 3. 71 | 72 | Since blockchain is a distributed system, 73 | the transaction set of different node's mempool will not be identical. 74 | The core idea of PPCT is that if there are so many transactions in the mempool that 75 | cannot reach consensus in one time, 76 | the left transactions can be synchronized in parallel with the consensus process. 77 | With this design, the synchronization process of the order-transactions 78 | can started one epoch earlier, such that the consensus efficiency has been improved. 79 | 80 | Specifically, at the time of packaging, after order-transaction been full, 81 | mempool continues to package transactions as propose-transaction. 82 | The proposal issued by the leader contains order-transactions and propose-transactions. 83 | The order-transaction participates in the consensus, and the propose-transaction begins to synchronize. 84 | 85 | ## Specific Design 86 | 87 | According to the above analysis, what we need is a mempool that can support high concurrent insertion, 88 | package transactions on a FIFO basis, and package two types of transactions with different purposes. 89 | 90 | In order to meet the above requirements, we use the Map and Queue structures to share transactions received, 91 | Map can quickly query and delete, and Queue meets the FIFO packaging requirements. 92 | In fact, we used two queues, just like pouring milk in two cups. 93 | The core data structure of mempool is as follows. 94 | 95 | ```rust 96 | struct TxCache { 97 | /// Use two queues to store transactions in turn. 98 | /// One queue for incumbent, and the other is candidate. 99 | /// Insertion and Packaging are working on incumbent queue. 100 | queue_0: Queue, 101 | queue_1: Queue, 102 | /// Use map to complete efficient random queries and deletion. 103 | map: Map, 104 | /// Indicate which queue is incumbent. 105 | is_zero: AtomicBool, 106 | /// Used for atomic operations to properly handle concurrency issues. 107 | concurrent_count: AtomicUsize, 108 | } 109 | 110 | /// A structure for sharing transactions in map and queues. 111 | type SharedTx = Arc; 112 | 113 | struct TxWrapper { 114 | tx: SignedTransaction, 115 | /// Indicate whether this transaction is deleted by map. 116 | removed: AtomicBool, 117 | /// Indicate whether this transaction is from propose-transaction synchronization. 118 | proposed: AtomicBool, 119 | } 120 | 121 | /// Store transactions that from order-transaction synchronization. 122 | type CallbackCache = Map; 123 | 124 | /// Data structure returned by package. 125 | struct MixedTxHashes { 126 | order_tx_hashes: Vec, 127 | propose_tx_hashes: Vec, 128 | } 129 | ``` 130 | 131 | New transactions that pass all checks are wrapped as `TxWrapper` with `removed` and `proposed` 132 | set to `false`, and then convert to `SharedTx` and insert into `TxCache`. 133 | 134 | `MixedTxHashes` is the data structure returned by package, which contains `order_tx_hashes` for consensus 135 | and `propose_tx_hashes` for early synchronization. 136 | 137 | The package algorithm is as follows, popping transactions from the incumbent queue, 138 | skipping the `TxWrapper` of `removed = true` until reach `cycle_limit`, 139 | and these transaction hashes are as `order_tx_hashes`. 140 | Continue to pop transactions, skipping the `TxWrapper` of `proposed = true` until reach `cycle_limit`, 141 | and these transaction hashes are as `propose_tx_hashes`. 142 | The above pop-up transactions are pushed into the candidate queue except for transactions of `removed = true`. 143 | When the incumbent queue is popped up, the role of two queues are exchanged. 144 | 145 | When a node receives a proposal from the leader, 146 | it will query mempool checking `order_tx_hashes` and `propose_tx_hashes`. 147 | Mempool determines if a transaction exists by querying `TxCache.map` 148 | and initiates a synchronization request for the missing transaction. 149 | The order-transaction returned synchronously is inserted into `CallbackCache`, 150 | and the propose-transaction returned for synchronization is inserted into `TxCache` 151 | with `proposed` setting `true`. 152 | 153 | When mempool receives a request to delete a set of `tx_hashes`, first clear `CallbackCache`, 154 | then query `TxCache.map`, set `removed` in the corresponding `TxWrapper` to `true`, 155 | and then delete the `SharedTx`. 156 | 157 | The insertion and packaging process of mempool is shown in the figure below. 158 | 159 | ![image](./resources/mempool_process.png) 160 | -------------------------------------------------------------------------------- /docs/zh/overlord.md: -------------------------------------------------------------------------------- 1 | # Overlord 架构设计 2 | 3 | - [目标](#目标) 4 | - [设计背景](#设计背景) 5 | - [Overlord 协议](#Overlord协议) 6 | - [总体设计](#总体设计) 7 | - [协议描述](#协议描述) 8 | - [Overlord 架构](#Overlord架构) 9 | - [共识状态机](#共识状态机) 10 | - [状态存储](#状态存储) 11 | - [定时器](#定时器) 12 | - [Wal](#Wal) 13 | - [Overlord 接口](#Overlord接口) 14 | - [共识接口](#共识接口) 15 | - [密码学接口](#密码学接口) 16 | 17 | ## 目标 18 | 19 | Overlord 的目标是成为能够支持上百个共识节点,满足数千笔每秒的交易处理能力,且交易延迟不超过数秒的 BFT 共识算法。简单来讲,就是能够满足大部分现实业务需求的高性能共识算法。 20 | 21 | ## 设计背景 22 | 23 | 在区块链中,一次共识至少包含两层语义: 24 | 25 | 1. 完成交易定序 26 | 2. 对最新状态达成共识 27 | 28 | 对于 UTXO 模型的区块链来说,新状态隐含在交易输出中,因此 1 和 2 是一体不可分割的。而对于 Account 模型的区块链来说,交易中并没有包含状态,只有执行完交易才能生成最新状态,状态用单独的一颗 MPT 树保存。 29 | 30 | 在 Account 模型中,为了实现第二层语义,常用的办法是,共识节点在打包新区块之前执行完区块中的所有交易,以计算出最新状态保存到块头中。包含了最新状态的区块达成共识后,区块中的交易完成了定序,同时最新状态亦完成了共识,任何节点可以重放区块中的交易验证状态的正确性。然而,这种处理方法制约了 BFT 类共识算法的交易处理能力。如下图所示,当高度为 h 的区块 B(h) 达成共识后,高度为 h+1 的新 leader 打包并执行 B(h+1) 后才能广播 B(h+1),其他共识节点收到 B(h+1) 后必须再执行 B(h+1) 以验证其正确性。在共识过程中,这两次串行的区块执行过程拖慢了共识效率。 31 | 32 |
33 | 34 | 一种改进的办法是,Leader 在打包新区块时并不立即执行该块,待区块达成共识后,共识节点才执行该块生成新的状态,下一个高度的 Leader 将新状态与下一个区块一起参与共识。这种办法省掉了一次区块执行过程。 35 | 36 | 当从更微观的角度来审察这种改进方案时,我们发现其仍然存在很大的改进空间。这是因为,任何一个共识节点的共识模块和执行模块在整个共识过程中始终是串行的,如上图所示,当共识模块在对区块共识时,执行模块始终是空闲的,反之亦然。如果能够将执行模块和共识模块并行,那么共识的交易处理能力理论上能够达到执行模块的最大处理极限。 37 | 38 | ## Overlord 协议 39 | 40 | ### 总体设计 41 | 42 | Overlord 的核心思想是解耦交易定序与状态共识。 43 | 44 | 我们用 B(h, S, T) 表示高度为 h 的区块,其包含的状态是 S,定序的交易集合是 T。在共识的第二层语义中,人们对 S 的解读往往是执行完 T 后的状态,正是这种思维定势使得执行模块和共识模块无法并行。如果将 S 理解为是共识模块在开始对 B(h, S, T) 共识时,执行模块执行达到的最新状态,那么共识模块将无需等待执行模块执行新的区块,而执行模块只需要沿着已定序的交易向前执行。这样,共识模块可以连续向前推进,不断将新交易定序,同时完成执行模块的最新状态共识; 执行模块也可以连续执行已定序的交易集合,直到将所有已定序的交易执行完毕。 45 | 46 | ### 协议描述 47 | 48 | 在 Overlord 中,一次共识过程称为一个 *epoch*,我们将达成共识的区块称为 *epoch*。*epoch* 包含 Header 和 Body 两部分(如下图所示)。*epoch* 的核心结构如下图所示,`epoch_id` 是单调递增的数值,相当于高度;`prev_hash` 是上一个 *epoch* 的哈希;`order_root` 是包含在 Body 中的所有待定序的交易的 merkle root;`state_root` 表示最新的世界状态的 MPT Root;`confirm_roots` 表示从上一个 *epoch* 的 `state_root` 到当前 *epoch* 的 `state_root` 之间执行模块向前推进的 `order_root` 集合;`receipt_roots` 记录被执行的每一个 `order_root` 所对应的 `receipt_root`;`proof` 是对上一个 *epoch* 的证明。 49 | 50 |
51 | 52 | 在具体的方案中,共识模块批量打包交易进行共识, 达成共识后, 将已定序的交易集合添加到待执行的队列中, 执行模块以交易集合为单位依次执行, 每执行完一个交易集合, 就将被执行的交易集合的 order_root, 以及执行后的 stateRoot 发给共识模块。在 Leader 打包交易拼装 *epoch* 时, 取最新收到的 state_root 作为最新状态参与共识. 53 | 54 | Overlord 是在具体共识算法之上的解释层, 通过重新诠释共识的语义, 使得交易定序与状态共识解耦, 从而在实际运行中获得更高的交易处理能力。理论上, Overlord 能够基于几乎任何 BFT 类共识算法, 具体在我们的项目中则是基于改进的 Tendermint。 55 | 56 | 我们对 Tendermint 主要做了三点改进: 57 | 58 | 1. 将聚合签名应用到 Tendermint 中, 使共识的消息复杂度从 降到 , 从而能够支持更多的共识节点 59 | 2. 在 *proposal* 中增加了 propose 交易区, 使新交易的同步与共识过程可并行 60 | 3. 共识节点收到 *proposal* 后, 无需等 *epoch* 校验通过即可投 *prevote* 票, 而在投 *precommit* 票之前必须得到 *epoch* 校验结果, 从而使得区块校验与 *prevote* 投票过程并行 61 | 62 | #### 聚合签名 63 | 64 | 在 Tendermint 共识协议中,节点在收到 *proposal* 之后对其投出 *prevote*,*prevote* 投票是全网广播给其他节点的。这时的通信复杂度是 。使用聚合签名优化是所有的节点将 *prevote* 投票发给一个指定的 *Relayer* 节点,Relayer 节点可以是任何一个共识节点。Relayer 节点将收到的签名通过算法计算聚合签名,再用一个位图 (bit-vec) 表示是哪些节点的投票。将聚合签名和位图发送给其他节点,对于 *precommit* 投票同理。这样就将通信复杂度降到了 。 65 | 66 | 如果 *Relayer* 出现故障,没有发送聚合签名给共识节点,或者 *Relayer* 作恶,只给小部分共识节点发送聚合签名,那么共识将会失活。我们采用超时投空票的方式解决这个问题。当节点在投出 *prevote* 投票之后,立即设置一个定时器,如果的超时时间内没有收到 *prevoteQC* 直接进入预提交状态,投出 *nil precommit* 投票。之后进入到下一个 round。如果预投票阶段正常,投出 *precommit* 之后同样设置一个定时器,如果超时没有收到 *precommitQC* 则直接进入下一个 round。 67 | 68 | #### 同步并行 69 | 70 | Overlord 采用压缩区块(compact block)的方式广播 *CompactEpoch*,即其 Body 中仅包含交易哈希,而非完整交易。共识节点收到 *CompactEpoch* 后,需要同步获得其 Body 中包含的全部完整交易后才能构造出完整的 *epoch*。 71 | 72 | 我们在 proposal 里除了包含 *CompactEpoch* 外,还额外增加了一个 *propose* *交易区,propose* 交易区中包含待同步的新交易的哈希。需要注意的是,这些交易与 *CompactEpoch* 里包含的待定序的交易哈希并不重叠,当 *CompactEpoch* 不足以包含交易池中所有的新交易时,剩余的新交易可以包含到 *propose* 交易区中提前同步。这在系统交易量很大的时候,可以提高交易同步与共识的并发程度,进一步提高交易处理能力. 73 | 74 | #### 校验并行 75 | 76 | 共识节点收到 *proposal* 后,将 *CompactEpoch* 的校验(获得完整交易,校验交易的正确性) 与 *prevote* 投票并行,只有当收到 *prevote* 聚合签名和 *CompactEpoch* 的检验结果后,才会投 *precommit* 票。 77 | 78 | ## Overlord 架构 79 | 80 | Overlord 共识由以下几个组件组成的: 81 | 82 | * 状态机(SMR):根据输入消息的进行状态转换 83 | 84 | * 状态存储(State):用于存储提议,投票等状态 85 | 86 | * 定时器(Timer):设定超时时间触发状态机操作 87 | 88 | * Wal:用于读写 Wal 日志 89 | 90 | 在 Overlord 共识架构中,当收到消息时,状态存储模块先对消息做基本检查。通过后,根据接收到的消息更新状态,并将消息传输给状态机。此外,为了保持活性还需要一个定时器,当超时时定时器调用接口触发状态机。状态机在做状态变更之后会抛出一个当前状态的事件,状态存储模块和定时器模块监听状态机抛出的事件,根据监听到的事件做相应的处理,例如写 Wal,发送投票,设置定时器等。在重启时状态存储模块先从 Wal 中读取数据,再发送给状态机。整体的架构如下图所示: 91 | 92 |
93 | 94 | ### 共识状态机(SMR) 95 | 96 | 状态机模块是整个共识的逻辑核心,它主要的功能是状态变更和 **lock** 的控制。当收到消息触发时,根据收到的消息做状态变更,并将变更后的状态作为事件抛出。在我们的实现中,Overlord 使用一个应用 BLS 聚合签名优化的 Tendermint 状态机进行共识,整体的工作过程如下. 97 | 98 | #### 提议阶段 99 | 100 | 节点使用确定性随机算法确定本轮的 *Leader*。 101 | 102 | **Leader**: 广播一个 *proposal* 103 | 104 | **Others**: 设置一个定时器 T1,当收到 *proposal* 之后向 *Relayer* 发送 *prevote* 投票 105 | 106 | #### 预投票阶段 107 | 108 | **Relayer**: 设置一个定时器 T2,对收到的 *prevote* 投票进行聚合并生成位图,将聚合后的投票和位图广播给其他节点 109 | 110 | **Others**: 设置一个定时器 T2,检查聚合的 *prevote* 投票的合法性,生成 **PoLC** 发送 *precommit* 投票 111 | 112 | #### 校验等待阶段 113 | 114 | 所有节点设置一个定时器 T3,当收到对 *proposal* 的校验结果之后,进入预提交阶段 115 | 116 | #### 预提交阶段 117 | 118 | **Relayer**: 设置一个定时器 T4,对收到的 *precommit* 投票进行聚合并生成位图,将聚合后的投票和位图广播给其他节点 119 | 120 | **Others**: 设置一个定时器 T4,检查聚合的 *precommit* 投票的合法性 121 | 122 | #### 提交阶段 123 | 124 | 所有节点将 *proposal* 提交 125 | 126 | 共识状态机的状态转换图如下图所示: 127 | 128 |
129 | 130 | 在工程中,我们将预投票阶段和校验等待阶段合并为一个阶段,共用一个超时时间。当状态机收到聚合后的投票和校验结果之后,进入到预提交阶段。 131 | 132 | #### 状态机状态 133 | 134 | 状态机模块需要存储的状态有: 135 | 136 | * *epoch_id*: 当前共识的 epoch 137 | 138 | * *round*: 当前共识的轮次 139 | 140 | * *step*: 当前所在的阶段 141 | 142 | * *proposal_hash*: 可选,当前正在共识的哈希 143 | 144 | * *lock*: 可选,当前是否已经达成 **PoLC** 145 | 146 | #### 数据结构 147 | 148 | 状态机的触发结构如下: 149 | 150 | ```rust 151 | pub struct SMRTrigger { 152 | pub hash: Hash, 153 | pub round: Option, 154 | pub trigger_type: TriggerType, 155 | } 156 | ``` 157 | 158 | 状态机的输出结构如下: 159 | 160 | ```rust 161 | pub enum SMREvent { 162 | /// New round event 163 | /// for state: update round, 164 | /// for timer: set a propose step timer. 165 | NewRoundInfo { 166 | round: u64, 167 | lock_round: Option, 168 | lock_proposal: Option, 169 | }, 170 | /// Prevote event, 171 | /// for state: transmit a prevote vote, 172 | /// for timer: set a prevote step timer. 173 | PrevoteVote(Hash), 174 | /// Precommit event, 175 | /// for state: transmit a precommit vote, 176 | /// for timer: set a precommit step timer. 177 | PrecommitVote(Hash), 178 | /// Commit event 179 | /// for state: do commit, 180 | /// for timer: do nothing. 181 | Commit(Hash), 182 | } 183 | ``` 184 | 185 | #### 状态机接口 186 | 187 | ```rust 188 | /// Create a new SMR service. 189 | pub fn new() -> Self 190 | /// Trigger a SMR action. 191 | pub fn trigger(&self, gate: SMRTrigger) -> Result<(), Error> 192 | /// Goto a new consensus epoch. 193 | pub fn new_epoch(&self, epoch_id: u64) -> Result<(), Error> 194 | ``` 195 | 196 | ### 状态存储(State) 197 | 198 | 状态存储模块是整个共识的功能核心,主要的功能为存储状态,消息分发,出块和密码学相关操作。在工作过程中,对于网络层传输来的消息,首先进行验签,校验消息的合法性。对通过的消息判断是否需要写入 Wal。之后将消息发送给状态机。状态存储模块时刻监听状态机抛出的事件,并根据事件作出相应的处理。 199 | 200 | #### 存储状态 201 | 202 | 状态存储模块需要存储的状态有: 203 | 204 | * *epoch_id*: 当前共识的 epoch 205 | 206 | * *round*: 当前共识的轮次 207 | 208 | * *proposals*: 缓存当前 epoch 所有的提议 209 | 210 | * *votes*: 缓存当前 epoch 所有的投票 211 | 212 | * *QCs*: 缓存当前 epoch 所有的 *QC* 213 | 214 | * *authority_manage*: 共识列表管理 215 | 216 | * *is_leader*: 节点是不是 *leader* 217 | 218 | * *proof*: 可选,上一个 epoch 的证明 219 | 220 | * *last_commit_round*: 可选,上一次提交的轮次 221 | 222 | * *last_commit_proposal*: 可选,上一次提交的提议 223 | 224 | #### 消息分发 225 | 226 | 发送消息时,根据消息及参数选择发送消息的方式(广播给其他节点或发送给 *Relayer*)。 227 | 228 | #### 出块 229 | 230 | 当状态存储模块监听到状态机抛出的 `NewRound` 事件时,通过一个确定性随机数算法判断自己是不是出块节点。如果是出块节点则提出一个 proposal。 231 | 232 | *确定性随机数算法*:因为 Overlord 共识协议允许设置不同的出块权重和投票权重,在判断出块时,节点将出块权重进行归一化,并投射到整个 `u64` 的范围中,使用当前 `epoch_id` 与 `round` 之和作为随机数种子,判断生成的随机数落入到`u64` 范围中的哪一个区间中,该权重对应的节点即为出块节点。 233 | 234 | #### 密码学操作 235 | 236 | 密码学操作包括如下方法: 237 | 238 | * 收到消息时,对消息进行验签 239 | 240 | * 收到聚合投票时,验签并校验权重是否超过阈值 241 | 242 | * 发出提议或投票时,对消息进行签名 243 | 244 | * 自己是 *Relayer* 时,对收到的投票进行聚合 245 | 246 | #### 状态存储接口 247 | 248 | ### 定时器 249 | 250 | 当状态机运行到某些状态的时候,需要设定定时器以便超时重发等操作。定时器模块会监听状态机抛出的事件,根据事件设置定时器。当达到超时时间,调用状态机模块的接口触发超时。定时器与状态存储复用 `SMREvent` 和接口。 251 | 252 | ### Wal 253 | 254 | 在共识过程中,需要将一些消息写入到 Wal 中。当重启时,状态存储模块首先从 Wal 中读取消息,回复重启前的状态。Wal 模块只与状态存储模块交互。 255 | 256 | #### Wal 接口 257 | 258 | ```rust 259 | /// Create a new Wal struct. 260 | pub fn new(path: &str) -> Self 261 | /// Set a new epoch of Wal, while go to new epoch. 262 | pub fn set_epoch(&self, epoch_id: u64) -> Result<(), Error> 263 | /// Save message to Wal. 264 | pub async fn save(&self, msg_type: WalMsgType, msg: Vec) -> Result<(), Error>; 265 | /// Load message from Wal. 266 | pub fn load(&self) -> Vec<(WalMsgType, Vec)> 267 | ``` 268 | 269 | ## Overlord 接口 270 | 271 | ### 共识接口 272 | 273 | ```rust 274 | #[async_trait] 275 | pub trait Consensus { 276 | /// Consensus error 277 | type Error: ::std::error::Error; 278 | /// Get an epoch of an epoch_id and return the epoch with its hash. 279 | async fn get_epoch( 280 | &self, 281 | ctx: Context, 282 | epoch_id: u64 283 | ) -> Result<(T, Hash)), Self::Error>; 284 | /// Check the correctness of an epoch. 285 | async fn check_epoch( 286 | &self, 287 | ctx: Context, 288 | hash: Hash 289 | ) -> Result<(), Self::Error>; 290 | /// Commit an epoch. 291 | async fn commit( 292 | &self, ctx: Context, 293 | epoch_id: u64, 294 | commit: Commit 295 | ) -> Result; 296 | /// Transmit a message to the Relayer. 297 | async fn transmit_to_relayer( 298 | &self, 299 | ctx: Context, 300 | msg: OutputMsg, 301 | addr: Address 302 | ) -> Result<(), Self::Error>; 303 | /// Broadcast a message to other replicas. 304 | async fn broadcast_to_other( 305 | &self, 306 | ctx: Context, 307 | msg: OutputMsg 308 | ) -> Result<(), Self::Error>; 309 | } 310 | ``` 311 | 312 | ### 密码学接口 313 | 314 | ```rust 315 | pub trait Crypto { 316 | /// Crypto error. 317 | type Error: ::std::error::Error; 318 | /// Hash a message. 319 | fn hash(&self, msg: &[u8]) -> Hash; 320 | /// Sign to the given hash by private key. 321 | fn sign(&self, hash: Hash) -> Result; 322 | /// Aggregate signatures into an aggregated signature. 323 | fn aggregate_signatures(&self, signatures: Vec) -> Result; 324 | /// Verify a signature. 325 | fn verify_signature(&self, signature: Signature, hash: Hash) -> Result; 326 | /// Verify an aggregated signature. 327 | fn verify_aggregated_signature(&self, aggregate_signature: Signature) -> Result<(), Self::Error>; 328 | } 329 | ``` 330 | -------------------------------------------------------------------------------- /docs/en/overlord.md: -------------------------------------------------------------------------------- 1 | # Overlord 2 | 3 | ## Goal 4 | 5 | Overlord is a Byzantine fault tolerance (BFT) consensus algorithm aiming to support thousands of transactions per second under hundreds of consensus nodes, with transaction delays of no more than a few seconds. Simply put, it is a high-performance consensus algorithm able to meets most of the real business needs. 6 | 7 | ## Background 8 | 9 | Usually, a consensus process consists of at least two layers of semantics: 10 | 11 | 1. Complete the transaction sequencing 12 | 2. Achieve consensus on the latest state 13 | 14 | For the blockchain of the UTXO model, the new state is implicit in the transaction output, so 1 and 2 are integral and inseparable. For the blockchain of the Account model, the transaction does not contain the state, and only after the transaction is executed can the latest state be generated and the state is saved in an independent MPT tree. 15 | 16 | For the Account model, in order to implement the second semantic, the common method is to let the consensus nodes execute all the transactions firstly. In this step, the latest state could be calculated and be saved to the block header. And then, consensus node will broadcast the block in which the transaction is sequenced. Once the consensus is achieved, the state and the transaction sequence is determined among all the nodes. 17 | However, this method restricts the transaction processing capability of the BFT-like consensus algorithm. As shown in the figure below, Only after the block B(h)(h means the block height) reaches a consensus, the new leader can pack and execute B(h+1),and then broadcast B(h+1). After receiving B(h+1), other consensus nodes need to execute B(h+1) again to verify its correctness. So, in this process, these two serial block execution processes slowed down the consensus efficiency. 18 | 19 |
20 | 21 | An improved method is that the Leader does not execute the block immediately after packing the new block. After the block reaches a consensus, the consensus node executes the block to generate a new state, and the next height leader will pack this state and the next height blocks together to participate in the next consensus process. This method saves one block execution process time. 22 | 23 | When examining this improvement from a more microscopic perspective, we found that there is still much room for improvement. This is because the consensus module and the execution module of any consensus node are always serial throughout the consensus process. As shown in the figure above, when the consensus module runs, the execution module is always idle, and vice versa. If the execution module and the consensus module can be paralleled, the consensus transaction processing capability can theoretically reach the maximum processing limit of the execution module. 24 | 25 | 26 | 27 | ## Overlord Protocol 28 | 29 | ### Overview of the protocol 30 | 31 | The core of Overlord is to decouple transaction sequence and state consensus. 32 | 33 | We use B(h, S, T) to represent a block of height h, which contains a state of S, and the ordered transaction set is T. In the second semantic of consensus, people's interpretation of S is often the state after the execution of T, and this makes the execution module and consensus module impossible to parallel. If S is understood as the latest state in execution module execution in this moment, the consensus module will not have to wait for the execution module to execute the block. And the execution module only needs to execute forward. In this way, the consensus module can continuously advance, continuously ordering new transactions, and complete consensus on the latest state of the execution module; the execution module can also continuously execute the ordered transaction set until all the ordered transactions are executed. 34 | 35 | 36 | ### Protocol description 37 | 38 | 39 | In Overlord, a consensus process is called an epoch. The epoch contains two parts, Header and Body (as shown below). The core structure of epoch is shown below, `epoch_id` is a monotonically increasing value, equivalent to height; `prev_hash` is the hash of the previous epoch; `order_root` is the merkle root of all pending transactions contained in the Body; `state_root` represents the latest world The MPT root of the state; `confirm_roots` represents the `order_root` collection from the `state_root` of the previous epoch to the `state_root` of the current epoch; the `receipt_roots` records the `receipt_root` corresponding to each `order_root` being executed; proof is proof of the previous epoch . 40 | 41 |
42 | 43 | In this method, the consensus module batches the transactions to make a consensus. After the consensus is reached, the ordered transaction set is added to the queue to be executed, and the execution module executes in order of the transaction set, and each execution of the transaction set is performed. The ordered root of the transaction set to be executed, and the executed stateRoot are sent to the consensus module. When packing the transactions to assemble the epoch, the leader take the latest state_root as the latest state to participate in the consensus. 44 | 45 | Overlord is an interpretation layer above the specific consensus algorithm. By reinterpreting the semantics of consensus, the transaction sequence is decoupled from the state consensus, so that higher transaction processing capability can be obtained in actual operation. In theory, Overlord can be based on almost any BFT-like consensus algorithm, specifically in our project based on the improved Tendermint. 46 | 47 | We have made three major improvements compared to Tendermint: 48 | 49 | 1. Apply the aggregate signature to Tendermint to make the consensus message complexity from falls to , thus being able to support more consensus nodes 50 | 2. The propose transaction area is added to the proposal, so that the synchronization of the new transaction can be paralleled with the consensus process. 51 | 3. After receiving the proposal, the consensus node can vote for the prevote without waiting for the epoch check, and must obtain the epoch check result before voting the precommit vote, so that the block check is parallel with the prevote process. 52 | 53 | #### Aggregate signature 54 | 55 | In the Tendermint consensus protocol, the node casts a prevote on the proposal after it receives the proposal, and the prevote vote is broadcast to other nodes throughout the network. The communication complexity at this time is Using aggregated signature optimization is the process by which all nodes send prevote votes to a specified Relayer node, which can be any consensus node. The Relayer node calculates the aggregated signature by the algorithm, and then uses a bitmap (bit-vec) to indicate which nodes vote. Send aggregated signatures and bitmaps to other nodes, for the same reason as precommit voting. This reduces the communication complexity to . 56 | 57 | If Relayer fails, no aggregated signature is sent to the consensus node, or Relayer does evil, and only a small number of consensus nodes send aggregated signatures, the consensus will be inactivated. We use a time-out vote to solve this problem. When the node sends a prevote vote, it immediately sets a timer. If the prevoteQC is not received within the timeout period, it directly enters the pre-commit status, and the nil precommit vote is thrown. Then go to the next round. If the pre-voting phase is normal, a timer is also set after the precommit is sent. If the precommitQC is not received after the timeout, the next round is entered. 58 | 59 | #### Synchronous parallelism 60 | 61 | Overlord broadcasts CompactEpoch in a compact block, meaning that its body contains only transaction hashes, not full transactions. After receiving the CompactEpoch, the consensus node needs to synchronize all the complete transactions contained in its Body to construct a complete epoch. 62 | 63 | In addition to the CompactEpoch, we also added a propose trading area in the proposal. The pose contains the hash of the new transaction to be synchronized. It should be noted that these transactions do not overlap with the pending transaction hashes contained in CompactEpoch. When CompactEpoch is not sufficient to contain all new transactions in the trading pool, the remaining new transactions can be included in the proposed trading area for early synchronization. This can increase the degree of concurrency of transaction synchronization and consensus when the system transaction volume is large, and further improve transaction processing capability. 64 | 65 | #### Verify parallelism 66 | 67 | After receiving the *proposal*, the consensus node will verify the *CompactEpoch* (to obtain the complete transaction and verify the correctness of the transaction) in parallel with the *prevote* vote. Only after receiving the *prevote* aggregate signature and the *CompactEpoch* test result will the *precommit* be cast. 68 | 69 | ## Overlord architecture 70 | 71 | The Overlord consensus consists of the following components: 72 | 73 | * State Machine (SMR): State transition based on input messages 74 | * State storage (State): used to store the status of the proposal, voting, etc. 75 | * Timer: Set the timeout period to trigger the state machine operation 76 | * Wal: used to read and write Wal logs 77 | 78 | In the Overlord consensus architecture, when a message is received, the state storage module performs a basic check on the message. After passing, the status is updated according to the received message and the message is transmitted to the state machine. In addition, a timer is required to maintain activity, and when the timer expires, the timer calls the interface to trigger the state machine. The state machine will throw a current state event after making the state change. The state storage module and the timer module listen to the event thrown by the state machine, and perform corresponding processing according to the monitored event, such as writing Wal, sending a vote, setting the timing. And so on. At the time of the restart, the state storage module reads the data from the Wal and sends it to the state machine. The overall architecture is shown below: 79 | 80 |
81 | 82 | ### Consensus State Machine (SMR) 83 | 84 | The state machine module is the logical core of the entire consensus, and its main functions are state changes and *lock* control. When the received message is triggered, the status change is made according to the received message, and the changed status is thrown as an event. In our implementation, Overlord uses a Tendermint state machine that applies BLS aggregate signature optimization for consensus. The overall working process is as follows. 85 | 86 | #### Prepare phase 87 | 88 | The node uses a deterministic random algorithm to determine the leader of the current round. 89 | 90 | *Leader*: Broadcast a proposal 91 | 92 | *Others*: Set a timer T1 to send a *prevote* vote to *Relayer* when the proposal is received 93 | 94 | #### Prevote step 95 | 96 | *Relayer*: Set a timer T2 to aggregate the received *prevote* votes and generate a bitmap to broadcast the aggregated votes and bitmaps to other nodes. 97 | 98 | *Others*: Set a timer T2, check the validity of the aggregated prevote vote, generate *PoLC* send precommit vote 99 | 100 | ####Verification step 101 | 102 | All nodes set a timer T3. After receiving the verification result of the *proposal*, they enter the *pre-commit* stage. 103 | 104 | #### Precommit step 105 | 106 | *Relayer*: Set a timer T4 to aggregate the received precommit votes and generate a bitmap to broadcast the aggregated votes and bitmaps to other nodes. 107 | 108 | *Others*: Set a timer T4 to check the legitimacy of the aggregated precommit vote 109 | 110 | #### Commit step 111 | 112 | All nodes commit the proposal 113 | 114 | The state transition diagram of the consensus state machine is shown below: 115 | 116 |
117 | 118 | In the project, we combine the pre-voting phase and the verification phase into one phase, sharing a timeout. When the state machine receives the aggregated voting and verification results, it enters the pre-commit phase. 119 | 120 | #### State Machine State 121 | 122 | The states that the state machine module needs to store are: 123 | 124 | * epoch_id: current consensus epoch 125 | * round: round of current consensus 126 | * step: the current stage 127 | * proposal_hash: optional, current hash of consensus 128 | * lock: optional, whether it has been reached *PoLC* 129 | 130 | #### data structure 131 | 132 | The trigger structure of the state machine is as follows: 133 | 134 | ``` 135 | pub struct SMRTrigger { 136 | pub hash: Hash, 137 | pub round: Option, 138 | pub trigger_type: TriggerType, 139 | } 140 | ``` 141 | 142 | The output structure of the state machine is as follows: 143 | 144 | ``` 145 | pub enum SMREvent { 146 | /// New round event 147 | /// for state: update round, 148 | /// for timer: set a propose step timer. 149 | NewRoundInfo { 150 | round: u64, 151 | lock_round: Option, 152 | lock_proposal: Option, 153 | }, 154 | /// Prevote event, 155 | /// for state: transmit a prevote vote, 156 | /// for timer: set a prevote step timer. 157 | PrevoteVote(Hash), 158 | /// Precommit event, 159 | /// for state: transmit a precommit vote, 160 | /// for timer: set a precommit step timer. 161 | PrecommitVote(Hash), 162 | /// Commit event 163 | /// for state: do commit, 164 | /// for timer: do nothing. 165 | Commit(Hash), 166 | /// Stop event, 167 | /// for state: stop process, 168 | /// for timer: stop process. 169 | Stop, 170 | } 171 | ``` 172 | 173 | #### State Machine Interface 174 | 175 | ``` 176 | /// Create a new SMR service. 177 | pub fn new() -> Self 178 | /// Trigger a SMR action. 179 | pub fn trigger(&self, gate: SMRTrigger) -> Result<(), Error> 180 | /// Goto a new consensus epoch. 181 | pub fn new_epoch(&self, epoch_id: u64) -> Result<(), Error> 182 | ``` 183 | 184 | ### State storage (State) 185 | 186 | The state storage module is the functional core of the entire consensus. The main functions are storage state, message distribution, block, and cryptography related operations. In the working process, for the message transmitted by the network layer, the first check is performed to verify the validity of the message. Determine whether the written message needs to be written to Wal. The message is then sent to the state machine. The state storage module constantly listens for events thrown by the state machine and processes them accordingly. 187 | 188 | #### Storage Status 189 | 190 | The state that the state storage module needs to store include: 191 | 192 | * epoch_id: current consensus epoch 193 | * round: round of current consensus 194 | * proposals: cache current epoch all offers 195 | * votes: cache current epoch all votes 196 | * QCs: Cache current epoch all QC 197 | * authority_manage: consensus list management 198 | * is_leader: whether the node is a leader 199 | * proof: optional, proof of the last epoch 200 | * last_commit_round: optional, the last round of submissions 201 | * last_commit_proposal: Optional, last submitted proposal 202 | 203 | #### Message Distribution 204 | 205 | When sending a message, choose how to send the message based on the message and parameters (broadcast to other nodes or sent to Relayer). 206 | 207 | #### Block 208 | 209 | When the state storage module listens to the NewRound event thrown by the state machine, it determines whether it is a block node by a deterministic random number algorithm. If it is a block node, a proposal is made. 210 | 211 | Deterministic random number algorithm: Because the Overlord consensus protocol allows different out-of-block weights and voting weights to be set, when determining the block, the node normalizes the block weights and projects them into the entire u64 range, using the current epoch_id and The sum of round is used as a random number seed to determine which of the u64 ranges the generated random number falls into, and the node corresponding to the weight is the outbound node. 212 | 213 | #### Cryptography 214 | 215 | Cryptographic operations include the following methods: 216 | 217 | * When the message is received, the signature of message need to be verified 218 | * When receiving the aggregate vote, verify the signature and check whether the weight exceeds the threshold 219 | * Sign the message when making a proposal or voting 220 | * When you are a Relayer, aggregate the votes you receive. 221 | 222 | #### Status Storage Interface 223 | 224 | ### Timer 225 | 226 | When the state machine runs to certain states, it needs to set a timer to perform operations such as timeout retransmission. The timer module listens for events thrown by the state machine and sets the timer based on the event. When the timeout period is reached, the interface of the calling state machine module triggers a timeout. The timer is multiplexed with the state store SMREvent and interface. 227 | 228 | ### Wal 229 | 230 | In the consensus process, some messages need to be written to Wal. When restarting, the state storage module first reads the message from Wal and replies to the state before the restart. The Wal module only interacts with the state storage module. 231 | 232 | #### Wal Interface 233 | 234 | ``` 235 | /// Create a new Wal struct. 236 | pub fn new(path: &str) -> Self 237 | /// Set a new epoch of Wal, while go to new epoch. 238 | pub fn set_epoch(&self, epoch_id: u64) -> Result<(), Error> 239 | /// Save message to Wal. 240 | pub async fn save(&self, msg_type: WalMsgType, msg: Vec) -> Result<(), Error>; 241 | /// Load message from Wal. 242 | pub fn load(&self) -> Vec<(WalMsgType, Vec)> 243 | ``` 244 | 245 | ## Overlord Interface 246 | 247 | ### Consensus Interface 248 | 249 | ``` 250 | #[async_trait] 251 | pub trait Consensus: Send + Sync { 252 | /// Get an epoch of an epoch_id and return the epoch with its hash. 253 | async fn get_epoch( 254 | &self, 255 | _ctx: Vec, 256 | epoch_id: u64, 257 | ) -> Result<(T, Hash), Box>; 258 | 259 | /// Check the correctness of an epoch. If is passed, return the integrated transcations to do 260 | /// data persistence. 261 | async fn check_epoch( 262 | &self, 263 | _ctx: Vec, 264 | epoch_id: u64, 265 | hash: Hash, 266 | ) -> Result<(), Box>; 267 | 268 | /// Commit a given epoch to execute and return the rich status. 269 | async fn commit( 270 | &self, 271 | _ctx: Vec, 272 | epoch_id: u64, 273 | commit: Commit, 274 | ) -> Result>; 275 | 276 | /// Get an authority list of the given epoch ID. 277 | async fn get_authority_list( 278 | &self, 279 | _ctx: Vec, 280 | epoch_id: u64 281 | ) -> Result, Box>; 282 | 283 | /// Broadcast a message to other replicas. 284 | async fn broadcast_to_other( 285 | &self, 286 | _ctx: Vec, 287 | msg: OutputMsg, 288 | ) -> Result<(), Box>; 289 | 290 | /// Transmit a message to the Relayer, the third argument is the relayer's address. 291 | async fn transmit_to_relayer( 292 | &self, 293 | _ctx: Vec, 294 | addr: Address, 295 | msg: OutputMsg, 296 | ) -> Result<(), Box>; 297 | } 298 | ``` 299 | 300 | ### Cryptography Interface 301 | 302 | ``` 303 | pub trait Crypto { 304 | /// Hash a message. 305 | fn hash(&self, msg: &[u8]) -> Hash; 306 | 307 | /// Sign to the given hash by private key. 308 | fn sign(&self, hash: Hash) -> Result>; 309 | 310 | /// Aggregate signatures into an aggregated signature. 311 | fn aggregate_signatures( 312 | &self, 313 | signatures: Vec, 314 | ) -> Result>; 315 | 316 | /// Verify a signature. 317 | fn verify_signature( 318 | &self, 319 | signature: Signature, 320 | hash: Hash, 321 | ) -> Result>; 322 | 323 | /// Verify an aggregated signature. 324 | fn verify_aggregated_signature( 325 | &self, 326 | aggregate_signature: AggregatedSignature, 327 | ) -> Result<(), Box>; 328 | } 329 | ``` --------------------------------------------------------------------------------