├── README.md ├── chaincode └── demo │ ├── callback │ ├── delete.js │ ├── open.js │ ├── query.js │ └── transfer.js │ └── demo.go ├── multi-channel-network ├── base │ ├── docker-compose-base.yaml │ └── peer-base.yaml ├── benchmarks │ └── config.yaml ├── configtx.yaml ├── crypto-config.yaml ├── docker-compose-cli.yaml └── scripts │ ├── env.sh │ ├── gen.sh │ └── utils.sh ├── pbft-network ├── base │ ├── docker-compose-base.yaml │ └── peer-base.yaml ├── benchmarks │ ├── config.yaml │ └── network.yaml ├── configtx.yaml ├── crypto-config.yaml ├── docker-compose-cli.yaml └── scripts │ ├── env.sh │ ├── gen.sh │ └── utils.sh ├── pbft ├── chain.go ├── cmd │ └── cmd.go ├── consensus.go ├── consenter.go ├── doc.md ├── message │ ├── buffer.go │ ├── crypto.go │ ├── lastreply.go │ └── message.go ├── node │ ├── boradcast.go │ ├── checkpoint.go │ ├── commit.go │ ├── execute.go │ ├── node.go │ ├── prepare.go │ ├── preprepare.go │ ├── reply.go │ ├── request.go │ ├── sequence.go │ └── utils.go └── server │ ├── handle.go │ └── server.go ├── rbft-network ├── base │ ├── docker-compose-base.yaml │ └── peer-base.yaml ├── benchmarks │ ├── config.yaml │ └── network.yaml ├── configtx.yaml ├── crypto-config.yaml ├── docker-compose-cli.yaml └── scripts │ ├── env.sh │ ├── gen.sh │ └── utils.sh ├── rbft ├── algorithm │ ├── queue.go │ └── queue_test.go ├── chain.go ├── cmd │ ├── cmd.go │ └── config.go ├── consensus.go ├── consenter.go ├── crypto │ ├── crypto.go │ └── crypto_test.go ├── doc.md ├── message │ ├── block.go │ ├── buffer.go │ ├── com.go │ ├── commit.go │ ├── lastblock.go │ ├── message.go │ ├── prepare.go │ └── proposal.go ├── node │ ├── block.go │ ├── boradcast.go │ ├── com.go │ ├── commit.go │ ├── node.go │ ├── prepare.go │ ├── proposal.go │ ├── sequence.go │ └── state.go └── server │ ├── handle.go │ └── server.go └── solo-network ├── base ├── docker-compose-base.yaml └── peer-base.yaml ├── benchmarks ├── config.yaml └── network.yaml ├── configtx.yaml ├── crypto-config.yaml ├── docker-compose-cli.yaml └── scripts ├── env.sh ├── gen.sh └── utils.sh /README.md: -------------------------------------------------------------------------------- 1 | # 说明 2 | 3 | * [PBFT 共识实现与网络搭建方法](https://www.yezhem.com/index.php/archives/52/) 4 | * [SOLO 共识网络搭建方法](https://www.yezhem.com/index.php/archives/39/) 5 | 6 | # 环境 7 | * Hyperleger/fabric v1.4.4 8 | * Hyperleger/caliepr-cli v0.3.0 9 | * node v8.10.0 10 | * npm v5.6.0 11 | * docker 19.03.5 12 | * docker-compose 1.25.4 13 | 14 | # 文件 15 | 16 | ``` 17 | chaincode/demo: 测试 chaincode 18 | chaincode/callback: hyperleger/caliper测试用例 19 | 20 | pbft: 可插拔 PBFT 共识算法简单实现 21 | rbft: 可插拔 RBFT 共识算法简单实现 22 | 23 | solo-network: solo共识配置 24 | pbft-network: pbft共识配置 25 | rbft-network: rbft共识配置 26 | multi-channel-network: solo多链配置 27 | ``` 28 | 29 | # 链码 30 | 31 | | 函数 | 功能 | 参数 | 32 | | :-------: | :--------------: | :--------------------: | 33 | | open | 开户 | 账户名, 金额 | 34 | | query | 查询 | 账户名 | 35 | | invoke | 转账 | 账户名, 账户名, 金额 | 36 | | delete | 销户 | 账户名 | 37 | 38 | # 编译 39 | 40 | [编译 pbft 说明文件](https://github.com/yezhem/fabric-sample/blob/master/pbft/doc.md):`./pbft/doc.md` 41 | 42 | [编译 rbft 说明文件](https://github.com/yezhem/fabric-sample/blob/master/rbft/doc.md):`./rbft/doc.md` 43 | 44 | # 测试 45 | 46 | ``` 47 | $ npx caliper launch master --caliper-workspace --caliper-benchconfig benchmarks/config.yaml --caliper-networkconfig benchmarks/network.yaml 48 | ``` 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /chaincode/demo/callback/delete.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const G = require("./open.js"); 4 | 5 | const logger = require("@hyperledger/caliper-core").CaliperUtils.getLogger("Test"); 6 | 7 | let bc, contx; 8 | let index = 0; 9 | 10 | module.exports.init = async (blockchain, context, args) => { 11 | bc = blockchain; 12 | contx = context; 13 | index = 0; 14 | }; 15 | 16 | module.exports.run = async() => { 17 | let count = G.counts[index]; 18 | index++; 19 | 20 | let txArgs = { 21 | chaincodeFunction: "delete", 22 | chaincodeArguments: [count] 23 | }; 24 | 25 | return bc.invokeSmartContract(contx, G.contractID, G.contractVer, txArgs, 10000); 26 | }; 27 | 28 | module.exports.end = async() => { 29 | }; -------------------------------------------------------------------------------- /chaincode/demo/callback/open.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const logger = require("@hyperledger/caliper-core").CaliperUtils.getLogger("Test"); 4 | 5 | const contractID = "money_demo"; 6 | const contractVer = "1.0"; 7 | 8 | let bc, contx; 9 | let initmoney = "100"; 10 | let counts = []; 11 | 12 | module.exports.init = async (blockchain, context, args) => { 13 | bc = blockchain; 14 | contx = context; 15 | }; 16 | 17 | module.exports.run = async() => { 18 | let count = "count_" + Math.random().toString(36).substr(7); 19 | counts.push(count); 20 | 21 | let txArgs = { 22 | chaincodeFunction: "open", 23 | chaincodeArguments: [count, initmoney] 24 | }; 25 | 26 | return bc.invokeSmartContract(contx, contractID, contractVer, txArgs, 10000); 27 | }; 28 | 29 | module.exports.end = async() => { 30 | }; 31 | 32 | module.exports.initmoney = initmoney 33 | module.exports.contractID = contractID 34 | module.exports.contractVer = contractVer 35 | module.exports.counts = counts; -------------------------------------------------------------------------------- /chaincode/demo/callback/query.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const G = require("./open.js"); 4 | 5 | const logger = require("@hyperledger/caliper-core").CaliperUtils.getLogger("Test"); 6 | 7 | let bc, contx; 8 | let index = 0; 9 | 10 | module.exports.init = async (blockchain, context, args) => { 11 | bc = blockchain; 12 | contx = context; 13 | index = 0; 14 | }; 15 | 16 | module.exports.run = async() => { 17 | let count = G.counts[index]; 18 | index++; 19 | 20 | let txArgs = { 21 | chaincodeFunction: "query", 22 | chaincodeArguments: [count] 23 | }; 24 | 25 | return bc.invokeSmartContract(contx, G.contractID, G.contractVer, txArgs, 10000); 26 | }; 27 | 28 | module.exports.end = async() => { 29 | }; -------------------------------------------------------------------------------- /chaincode/demo/callback/transfer.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const G = require("./open.js"); 4 | 5 | const logger = require("@hyperledger/caliper-core").CaliperUtils.getLogger("Test"); 6 | 7 | let bc, contx; 8 | let total; 9 | let index = 0; 10 | 11 | module.exports.init = async (blockchain, context, args) => { 12 | bc = blockchain; 13 | contx = context; 14 | total = G.counts.length; 15 | index = 0; 16 | }; 17 | 18 | module.exports.run = async() => { 19 | let money = 1; 20 | let srccount = G.counts[index]; 21 | let dstcount = G.counts[total - index - 1]; 22 | 23 | if(index < total / 2) { 24 | money = 1; 25 | }else { 26 | money = 20; 27 | } 28 | 29 | index++; 30 | 31 | let txArgs = { 32 | chaincodeFunction: "invoke", 33 | chaincodeArguments: [srccount, dstcount, money.toString()] 34 | }; 35 | 36 | return bc.invokeSmartContract(contx, G.contractID, G.contractVer, txArgs, 10000); 37 | }; 38 | 39 | module.exports.end = async() => { 40 | }; -------------------------------------------------------------------------------- /chaincode/demo/demo.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/hyperledger/fabric/core/chaincode/shim" 8 | pb "github.com/hyperledger/fabric/protos/peer" 9 | ) 10 | 11 | type SimpleChaincode struct { 12 | } 13 | 14 | // 初始化 15 | func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { 16 | fmt.Println("ex02 Init") 17 | _, args := stub.GetFunctionAndParameters() 18 | 19 | if len(args) != 0 { 20 | return shim.Error("Incorrect number of arguments. Expecting 0") 21 | } 22 | 23 | return shim.Success(nil) 24 | } 25 | 26 | // Invoke方法 27 | func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { 28 | fmt.Println("ex02 Invoke") 29 | function, args := stub.GetFunctionAndParameters() 30 | 31 | if function == "invoke" { 32 | return t.invoke(stub, args) 33 | } else if function == "delete" { 34 | return t.delete(stub, args) 35 | } else if function == "query" { 36 | return t.query(stub, args) 37 | } else if function == "open" { 38 | return t.open(stub, args) 39 | } 40 | 41 | return shim.Error("Invalid invoke function name. Expecting \"invoke\" \"delete\" \"query\"") 42 | } 43 | 44 | // 开户 45 | func (t *SimpleChaincode) open(stub shim.ChaincodeStubInterface, args []string) pb.Response { 46 | if len(args) != 2 { 47 | return shim.Error("Incorrent number of arguments. Expecting 2") 48 | } 49 | 50 | count := args[0] 51 | val, err := strconv.Atoi(args[1]) 52 | if err != nil { 53 | return shim.Error("Expecting integer value " + args[0] + ":" + args[1]) 54 | } 55 | 56 | fmt.Printf("count = %s val = %d\n", count, val) 57 | 58 | // 验证账户存在 59 | valBytes, err := stub.GetState(count) 60 | if err != nil { 61 | return shim.Error("Failed to get state") 62 | } 63 | if valBytes != nil { 64 | return shim.Error("Entity already exist") 65 | } 66 | 67 | err = stub.PutState(count, []byte(strconv.Itoa(val))) 68 | 69 | return shim.Success(nil) 70 | } 71 | 72 | // 转账 73 | func (t *SimpleChaincode) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { 74 | var A, B string 75 | var Aval, Bval int 76 | var X int 77 | var err error 78 | 79 | if len(args) != 3 { 80 | return shim.Error("Incorrect number of arguments. Expecting 3") 81 | } 82 | 83 | A = args[0] 84 | B = args[1] 85 | 86 | Avalbytes, err := stub.GetState(A) 87 | if err != nil { 88 | return shim.Error("Failed to get state") 89 | } 90 | if Avalbytes == nil { 91 | return shim.Error("Entity not found") 92 | } 93 | Aval, _ = strconv.Atoi(string(Avalbytes)) 94 | 95 | Bvalbytes, err := stub.GetState(B) 96 | if err != nil { 97 | return shim.Error("Failed to get state") 98 | } 99 | if Bvalbytes == nil { 100 | return shim.Error("Entity not found") 101 | } 102 | Bval, _ = strconv.Atoi(string(Bvalbytes)) 103 | 104 | X, err = strconv.Atoi(args[2]) 105 | if err != nil { 106 | return shim.Error("Invalid transaction amount, expecting a integer value") 107 | } 108 | Aval = Aval - X 109 | Bval = Bval + X 110 | fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) 111 | 112 | // Write the state back to the ledger 113 | err = stub.PutState(A, []byte(strconv.Itoa(Aval))) 114 | if err != nil { 115 | return shim.Error(err.Error()) 116 | } 117 | 118 | err = stub.PutState(B, []byte(strconv.Itoa(Bval))) 119 | if err != nil { 120 | return shim.Error(err.Error()) 121 | } 122 | 123 | return shim.Success(nil) 124 | } 125 | 126 | // 删除 127 | func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response { 128 | if len(args) != 1 { 129 | return shim.Error("Incorrect number of arguments. Expecting 1") 130 | } 131 | 132 | A := args[0] 133 | 134 | // Delete the key from the state in ledger 135 | err := stub.DelState(A) 136 | if err != nil { 137 | return shim.Error("Failed to delete state") 138 | } 139 | 140 | return shim.Success(nil) 141 | } 142 | 143 | // 查询 144 | func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { 145 | var A string // Entities 146 | var err error 147 | 148 | if len(args) != 1 { 149 | return shim.Error("Incorrect number of arguments. Expecting name of the person to query") 150 | } 151 | 152 | A = args[0] 153 | 154 | // Get the state from the ledger 155 | Avalbytes, err := stub.GetState(A) 156 | if err != nil { 157 | jsonResp := "{\"Error\":\"Failed to get state for " + A + "\"}" 158 | return shim.Error(jsonResp) 159 | } 160 | 161 | if Avalbytes == nil { 162 | jsonResp := "{\"Error\":\"Nil amount for " + A + "\"}" 163 | return shim.Error(jsonResp) 164 | } 165 | 166 | jsonResp := "{\"Name\":\"" + A + "\",\"Amount\":\"" + string(Avalbytes) + "\"}" 167 | fmt.Printf("Query Response:%s\n", jsonResp) 168 | return shim.Success(Avalbytes) 169 | } 170 | 171 | func main() { 172 | err := shim.Start(new(SimpleChaincode)) 173 | if err != nil { 174 | fmt.Printf("Error starting Simple chaincode: %s", err) 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /multi-channel-network/base/peer-base.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | peer-base: 5 | image: hyperledger/fabric-peer:$IMAGETAG 6 | environment: 7 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 8 | - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=multi-channel-network_mc 9 | - FABRIC_LOGGING_SPEC=INFO 10 | - CORE_PEER_TLS_ENABLED=false 11 | - CORE_PEER_GOSSIP_USELEADERELECTION=true 12 | - CORE_PEER_GOSSIP_ORGLEADER=false 13 | - CORE_PEER_PROFILE_ENABLED=true 14 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 15 | command: peer node start 16 | 17 | orderer-base: 18 | image: hyperledger/fabric-orderer:$IMAGETAG 19 | environment: 20 | - FABRIC_LOGGING_SPEC=INFO 21 | - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0 22 | - ORDERER_GENERAL_GENESISMETHOD=file 23 | - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block 24 | - ORDERER_GENERAL_LOCALMSPID=OrdererMSP 25 | - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp 26 | - ORDERER_GENERAL_TLS_ENABLED=false 27 | - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1 28 | - ORDERER_KAFKA_VERBOSE=true 29 | - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt 30 | - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key 31 | - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt] 32 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric 33 | command: orderer 34 | 35 | -------------------------------------------------------------------------------- /multi-channel-network/benchmarks/config.yaml: -------------------------------------------------------------------------------- 1 | test: 2 | name: multi-channel-network 3 | description: multi-channel-network 4 | workers: 5 | type: local 6 | number: 4 7 | 8 | rounds: 9 | - label: open 10 | description: open 11 | txNumber: 1000 12 | rateControl: 13 | type: fixed-rate 14 | opts: 15 | tps: 100 16 | callback: ../chaincode/demo/callback/open.js 17 | 18 | - label: transfer 19 | description: transfer 20 | txNumber: 1000 21 | rateControl: 22 | type: fixed-rate 23 | opts: 24 | tps: 40 25 | callback: ../chaincode/demo/callback/transfer.js 26 | 27 | - label: query 28 | description: query 29 | txNumber: 1000 30 | rateControl: 31 | type: fixed-rate 32 | opts: 33 | tps: 100 34 | callback: ../chaincode/demo/callback/query.js 35 | 36 | - label: delete 37 | description: delete 38 | txNumber: 1000 39 | rateControl: 40 | type: fixed-rate 41 | opts: 42 | tps: 100 43 | callback: ../chaincode/demo/callback/delete.js 44 | 45 | monitor: 46 | interval: 1 47 | type: 48 | - docker 49 | docker: 50 | containers: 51 | - peer0.orgA.example.com 52 | - peer0.orgB.example.com 53 | - peer1.orgB.example.com 54 | - peer2.orgB.example.com 55 | - peer3.orgB.example.com 56 | - peer4.orgB.example.com 57 | - peer0.orgC.example.com 58 | - peer1.orgC.example.com 59 | - peer2.orgC.example.com 60 | - peer3.orgC.example.com 61 | - peer4.orgC.example.com 62 | - peer0.orgD.example.com 63 | - peer1.orgD.example.com 64 | - peer2.orgD.example.com 65 | - peer3.orgD.example.com 66 | - peer4.orgD.example.com 67 | - peer5.orgD.example.com 68 | - orderer.example.com 69 | -------------------------------------------------------------------------------- /multi-channel-network/configtx.yaml: -------------------------------------------------------------------------------- 1 | Organizations: 2 | - &OrdererOrg 3 | Name: OrdererOrg 4 | ID: OrdererMSP 5 | MSPDir: crypto-config/ordererOrganizations/example.com/msp 6 | Policies: 7 | Readers: 8 | Type: Signature 9 | Rule: "OR('OrdererMSP.member')" 10 | Writers: 11 | Type: Signature 12 | Rule: "OR('OrdererMSP.member')" 13 | Admins: 14 | Type: Signature 15 | Rule: "OR('OrdererMSP.admin')" 16 | 17 | - &OrgA 18 | Name: OrgAMSP 19 | ID: OrgAMSP 20 | MSPDir: crypto-config/peerOrganizations/orgA.example.com/msp 21 | Policies: 22 | Readers: 23 | Type: Signature 24 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.peer', 'OrgAMSP.client')" 25 | Writers: 26 | Type: Signature 27 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.client')" 28 | Admins: 29 | Type: Signature 30 | Rule: "OR('OrgAMSP.admin')" 31 | AnchorPeers: 32 | - Host: peer0.orgA.example.com 33 | Port: 7051 34 | 35 | - &OrgB 36 | Name: OrgBMSP 37 | ID: OrgBMSP 38 | MSPDir: crypto-config/peerOrganizations/orgB.example.com/msp 39 | Policies: 40 | Readers: 41 | Type: Signature 42 | Rule: "OR('OrgBMSP.admin', 'OrgBMSP.peer', 'OrgBMSP.client')" 43 | Writers: 44 | Type: Signature 45 | Rule: "OR('OrgBMSP.admin', 'OrgBMSP.client')" 46 | Admins: 47 | Type: Signature 48 | Rule: "OR('OrgBMSP.admin')" 49 | AnchorPeers: 50 | - Host: peer0.orgB.example.com 51 | Port: 8051 52 | 53 | - &OrgC 54 | Name: OrgCMSP 55 | ID: OrgCMSP 56 | MSPDir: crypto-config/peerOrganizations/orgC.example.com/msp 57 | Policies: 58 | Readers: 59 | Type: Signature 60 | Rule: "OR('OrgCMSP.admin', 'OrgCMSP.peer', 'OrgCMSP.client')" 61 | Writers: 62 | Type: Signature 63 | Rule: "OR('OrgCMSP.admin', 'OrgCMSP.client')" 64 | Admins: 65 | Type: Signature 66 | Rule: "OR('OrgCMSP.admin')" 67 | AnchorPeers: 68 | - Host: peer0.orgC.example.com 69 | Port: 9051 70 | 71 | - &OrgD 72 | Name: OrgDMSP 73 | ID: OrgDMSP 74 | MSPDir: crypto-config/peerOrganizations/orgD.example.com/msp 75 | Policies: 76 | Readers: 77 | Type: Signature 78 | Rule: "OR('OrgDMSP.admin', 'OrgDMSP.peer', 'OrgDMSP.client')" 79 | Writers: 80 | Type: Signature 81 | Rule: "OR('OrgDMSP.admin', 'OrgDMSP.client')" 82 | Admins: 83 | Type: Signature 84 | Rule: "OR('OrgDMSP.admin')" 85 | AnchorPeers: 86 | - Host: peer0.orgD.example.com 87 | Port: 10051 88 | 89 | Capabilities: 90 | Channel: &ChannelCapabilities 91 | V1_4_3: true 92 | V1_3: false 93 | V1_1: false 94 | 95 | Orderer: &OrdererCapabilities 96 | V1_4_2: true 97 | V1_1: false 98 | 99 | Application: &ApplicationCapabilities 100 | V1_4_2: true 101 | V1_3: false 102 | V1_2: false 103 | V1_1: false 104 | 105 | Application: &ApplicationDefaults 106 | Organizations: 107 | Policies: 108 | Readers: 109 | Type: ImplicitMeta 110 | Rule: "ANY Readers" 111 | Writers: 112 | Type: ImplicitMeta 113 | Rule: "ANY Writers" 114 | Admins: 115 | Type: ImplicitMeta 116 | Rule: "MAJORITY Admins" 117 | Capabilities: 118 | <<: *ApplicationCapabilities 119 | 120 | Orderer: &OrdererDefaults 121 | OrdererType: solo 122 | Addresses: 123 | - orderer.example.com:7050 124 | BatchTimeout: 2s 125 | BatchSize: 126 | MaxMessageCount: 10 127 | AbsoluteMaxBytes: 99 MB 128 | PreferredMaxBytes: 512 KB 129 | Organizations: 130 | Policies: 131 | Readers: 132 | Type: ImplicitMeta 133 | Rule: "ANY Readers" 134 | Writers: 135 | Type: ImplicitMeta 136 | Rule: "ANY Writers" 137 | Admins: 138 | Type: ImplicitMeta 139 | Rule: "MAJORITY Admins" 140 | BlockValidation: 141 | Type: ImplicitMeta 142 | Rule: "ANY Writers" 143 | 144 | 145 | Channel: &ChannelDefaults 146 | Policies: 147 | Readers: 148 | Type: ImplicitMeta 149 | Rule: "ANY Readers" 150 | Writers: 151 | Type: ImplicitMeta 152 | Rule: "ANY Writers" 153 | Admins: 154 | Type: ImplicitMeta 155 | Rule: "MAJORITY Admins" 156 | Capabilities: 157 | <<: *ChannelCapabilities 158 | 159 | 160 | Profiles: 161 | Genesis: 162 | <<: *ChannelDefaults 163 | Orderer: 164 | <<: *OrdererDefaults 165 | Organizations: 166 | - *OrdererOrg 167 | Capabilities: 168 | <<: *OrdererCapabilities 169 | Consortiums: 170 | SampleConsortium: 171 | Organizations: 172 | - *OrgA 173 | - *OrgB 174 | - *OrgC 175 | - *OrgD 176 | 177 | ChannelABCD: 178 | Consortium: SampleConsortium 179 | <<: *ChannelDefaults 180 | Application: 181 | <<: *ApplicationDefaults 182 | Organizations: 183 | - *OrgA 184 | - *OrgB 185 | - *OrgC 186 | - *OrgD 187 | Capabilities: 188 | <<: *ApplicationCapabilities 189 | 190 | ChannelBC: 191 | Consortium: SampleConsortium 192 | <<: *ChannelDefaults 193 | Application: 194 | <<: *ApplicationDefaults 195 | Organizations: 196 | - *OrgB 197 | - *OrgC 198 | Capabilities: 199 | <<: *ApplicationCapabilities 200 | 201 | ChannelBCD: 202 | Consortium: SampleConsortium 203 | <<: *ChannelDefaults 204 | Application: 205 | <<: *ApplicationDefaults 206 | Organizations: 207 | - *OrgB 208 | - *OrgC 209 | - *OrgD 210 | Capabilities: 211 | <<: *ApplicationCapabilities 212 | 213 | ChannelCD: 214 | Consortium: SampleConsortium 215 | <<: *ChannelDefaults 216 | Application: 217 | <<: *ApplicationDefaults 218 | Organizations: 219 | - *OrgC 220 | - *OrgD 221 | Capabilities: 222 | <<: *ApplicationCapabilities 223 | -------------------------------------------------------------------------------- /multi-channel-network/crypto-config.yaml: -------------------------------------------------------------------------------- 1 | OrdererOrgs: 2 | - Name: Orderer 3 | Domain: example.com 4 | EnableNodeOUs: true 5 | Specs: 6 | - Hostname: orderer 7 | 8 | PeerOrgs: 9 | - Name: OrgA 10 | Domain: orgA.example.com 11 | EnableNodeOUs: true 12 | Template: 13 | Count: 1 14 | Users: 15 | Count: 0 16 | 17 | - Name: OrgB 18 | Domain: orgB.example.com 19 | EnableNodeOUs: true 20 | Template: 21 | Count: 5 22 | Users: 23 | Count: 4 24 | 25 | - Name: OrgC 26 | Domain: orgC.example.com 27 | EnableNodeOUs: true 28 | Template: 29 | Count: 5 30 | Users: 31 | Count: 4 32 | 33 | - Name: OrgD 34 | Domain: orgD.example.com 35 | EnableNodeOUs: true 36 | Template: 37 | Count: 6 38 | Users: 39 | Count: 5 -------------------------------------------------------------------------------- /multi-channel-network/docker-compose-cli.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | networks: 4 | mc: 5 | ipam: 6 | config: 7 | - subnet: 172.22.0.0/16 8 | gateway: 172.22.0.1 9 | 10 | services: 11 | orderer.example.com: 12 | extends: 13 | file: base/docker-compose-base.yaml 14 | service: orderer.example.com 15 | container_name: orderer.example.com 16 | networks: 17 | mc: 18 | ipv4_address: 172.22.0.2 19 | 20 | peer0.orgA.example.com: 21 | container_name: peer0.orgA.example.com 22 | extends: 23 | file: base/docker-compose-base.yaml 24 | service: peer0.orgA.example.com 25 | networks: 26 | mc: 27 | ipv4_address: 172.22.0.3 28 | 29 | peer0.orgB.example.com: 30 | container_name: peer0.orgB.example.com 31 | extends: 32 | file: base/docker-compose-base.yaml 33 | service: peer0.orgB.example.com 34 | networks: 35 | mc: 36 | ipv4_address: 172.22.0.4 37 | 38 | peer1.orgB.example.com: 39 | container_name: peer1.orgB.example.com 40 | extends: 41 | file: base/docker-compose-base.yaml 42 | service: peer1.orgB.example.com 43 | networks: 44 | mc: 45 | ipv4_address: 172.22.0.5 46 | 47 | peer2.orgB.example.com: 48 | container_name: peer2.orgB.example.com 49 | extends: 50 | file: base/docker-compose-base.yaml 51 | service: peer2.orgB.example.com 52 | networks: 53 | mc: 54 | ipv4_address: 172.22.0.6 55 | 56 | peer3.orgB.example.com: 57 | container_name: peer3.orgB.example.com 58 | extends: 59 | file: base/docker-compose-base.yaml 60 | service: peer3.orgB.example.com 61 | networks: 62 | mc: 63 | ipv4_address: 172.22.0.7 64 | 65 | peer4.orgB.example.com: 66 | container_name: peer4.orgB.example.com 67 | extends: 68 | file: base/docker-compose-base.yaml 69 | service: peer4.orgB.example.com 70 | networks: 71 | mc: 72 | ipv4_address: 172.22.0.8 73 | 74 | peer0.orgC.example.com: 75 | container_name: peer0.orgC.example.com 76 | extends: 77 | file: base/docker-compose-base.yaml 78 | service: peer0.orgC.example.com 79 | networks: 80 | mc: 81 | ipv4_address: 172.22.0.9 82 | 83 | peer1.orgC.example.com: 84 | container_name: peer1.orgC.example.com 85 | extends: 86 | file: base/docker-compose-base.yaml 87 | service: peer1.orgC.example.com 88 | networks: 89 | mc: 90 | ipv4_address: 172.22.0.10 91 | 92 | peer2.orgC.example.com: 93 | container_name: peer2.orgC.example.com 94 | extends: 95 | file: base/docker-compose-base.yaml 96 | service: peer2.orgC.example.com 97 | networks: 98 | mc: 99 | ipv4_address: 172.22.0.11 100 | 101 | peer3.orgC.example.com: 102 | container_name: peer3.orgC.example.com 103 | extends: 104 | file: base/docker-compose-base.yaml 105 | service: peer3.orgC.example.com 106 | networks: 107 | mc: 108 | ipv4_address: 172.22.0.12 109 | 110 | peer4.orgC.example.com: 111 | container_name: peer4.orgC.example.com 112 | extends: 113 | file: base/docker-compose-base.yaml 114 | service: peer4.orgC.example.com 115 | networks: 116 | mc: 117 | ipv4_address: 172.22.0.13 118 | 119 | peer0.orgD.example.com: 120 | container_name: peer0.orgD.example.com 121 | extends: 122 | file: base/docker-compose-base.yaml 123 | service: peer0.orgD.example.com 124 | networks: 125 | mc: 126 | ipv4_address: 172.22.0.14 127 | 128 | peer1.orgD.example.com: 129 | container_name: peer1.orgD.example.com 130 | extends: 131 | file: base/docker-compose-base.yaml 132 | service: peer1.orgD.example.com 133 | networks: 134 | mc: 135 | ipv4_address: 172.22.0.15 136 | 137 | peer2.orgD.example.com: 138 | container_name: peer2.orgD.example.com 139 | extends: 140 | file: base/docker-compose-base.yaml 141 | service: peer2.orgD.example.com 142 | networks: 143 | mc: 144 | ipv4_address: 172.22.0.16 145 | 146 | peer3.orgD.example.com: 147 | container_name: peer3.orgD.example.com 148 | extends: 149 | file: base/docker-compose-base.yaml 150 | service: peer3.orgD.example.com 151 | networks: 152 | mc: 153 | ipv4_address: 172.22.0.17 154 | 155 | peer4.orgD.example.com: 156 | container_name: peer4.orgD.example.com 157 | extends: 158 | file: base/docker-compose-base.yaml 159 | service: peer4.orgD.example.com 160 | networks: 161 | mc: 162 | ipv4_address: 172.22.0.18 163 | 164 | peer5.orgD.example.com: 165 | container_name: peer5.orgD.example.com 166 | extends: 167 | file: base/docker-compose-base.yaml 168 | service: peer5.orgD.example.com 169 | networks: 170 | mc: 171 | ipv4_address: 172.22.0.19 172 | 173 | cli: 174 | container_name: cli 175 | image: hyperledger/fabric-tools:$IMAGETAG 176 | tty: true 177 | stdin_open: true 178 | environment: 179 | - SYS_CHANNEL=sys_channel 180 | - GOPATH=/opt/gopath 181 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 182 | - FABRIC_LOGGING_SPEC=INFO 183 | - CORE_PEER_ID=cli 184 | - CORE_PEER_ADDRESS=peer0.orgA.example.com:7051 185 | - CORE_PEER_LOCALMSPID=OrgAMSP 186 | - CORE_PEER_TLS_ENABLED=false 187 | - CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/orgA.example.com/users/Admin@orgA.example.com/msp 188 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 189 | command: /bin/bash 190 | volumes: 191 | - /var/run/:/host/var/run/ 192 | - ./../chaincode/:/opt/gopath/src/github.com/chaincode 193 | - ./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ 194 | - ./scripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/scripts/ 195 | - ./channel-artifacts:/opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts 196 | depends_on: 197 | - orderer.example.com 198 | - peer0.orgA.example.com 199 | - peer0.orgB.example.com 200 | - peer1.orgB.example.com 201 | - peer2.orgB.example.com 202 | - peer3.orgB.example.com 203 | - peer4.orgB.example.com 204 | - peer0.orgC.example.com 205 | - peer1.orgC.example.com 206 | - peer2.orgC.example.com 207 | - peer3.orgC.example.com 208 | - peer4.orgC.example.com 209 | - peer0.orgD.example.com 210 | - peer1.orgD.example.com 211 | - peer2.orgD.example.com 212 | - peer3.orgD.example.com 213 | - peer4.orgD.example.com 214 | - peer5.orgD.example.com 215 | networks: 216 | mc: 217 | ipv4_address: 172.22.0.20 218 | -------------------------------------------------------------------------------- /multi-channel-network/scripts/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 证书文件夹 4 | PEERROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations 5 | ORDEROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations 6 | 7 | # 节点设置 8 | ORDERERNODE=orderer.example.com:7050 9 | PEER0ORGANODE=peer0.orgA.example.com:7051 10 | PEER0ORGBNODE=peer0.orgB.example.com:8051 11 | PEER1ORGBNODE=peer1.orgB.example.com:8061 12 | PEER2ORGBNODE=peer2.orgB.example.com:8071 13 | PEER3ORGBNODE=peer3.orgB.example.com:8081 14 | PEER4ORGBNODE=peer4.orgB.example.com:8091 15 | PEER0ORGCNODE=peer0.orgC.example.com:9051 16 | PEER1ORGCNODE=peer1.orgC.example.com:9061 17 | PEER2ORGCNODE=peer2.orgC.example.com:9071 18 | PEER3ORGCNODE=peer3.orgC.example.com:9081 19 | PEER4ORGCNODE=peer4.orgC.example.com:9091 20 | PEER0ORGDNODE=peer0.orgD.example.com:10051 21 | PEER1ORGDNODE=peer1.orgD.example.com:10061 22 | PEER2ORGDNODE=peer2.orgD.example.com:10071 23 | PEER3ORGDNODE=peer3.orgD.example.com:10081 24 | PEER4ORGDNODE=peer4.orgD.example.com:10091 25 | PEER5ORGDNODE=peer5.orgD.example.com:10101 26 | 27 | CHANNEL_NAME=(channelabcd channelbc channelbcd channelcd) 28 | CHANNEL_INSTANTIATE=( 29 | "AND ('OrgAMSP.peer','OrgBMSP.peer','OrgCMSP.peer','OrgDMSP.peer')" 30 | "AND ('OrgBMSP.peer','OrgCMSP.peer')" 31 | "AND ('OrgCMSP.peer','OrgDMSP.peer')" 32 | "AND ('OrgCMSP.peer','OrgDMSP.peer')" 33 | ) 34 | CHANNELABCD=channelabcd 35 | CHANNELBC=channelbc 36 | CHANNELBCD=channelbcd 37 | CHANNELCD=channelcd 38 | 39 | 40 | CHANNEL_A=($CHANNELABCD) 41 | CHANNEL_B=($CHANNELABCD $CHANNELBC $CHANNELBCD) 42 | CHANNEL_C=($CHANNELABCD $CHANNELBC $CHANNELBCD $CHANNELCD) 43 | CHANNEL_D=($CHANNELABCD $CHANNELBCD $CHANNELCD) 44 | 45 | OrgA_PEERS=(${PEER0ORGANODE}) 46 | OrgB_PEERS=(${PEER0ORGBNODE} ${PEER1ORGBNODE} ${PEER2ORGBNODE} ${PEER3ORGBNODE} ${PEER4ORGBNODE}) 47 | OrgC_PEERS=(${PEER0ORGCNODE} ${PEER1ORGCNODE} ${PEER2ORGCNODE} ${PEER3ORGCNODE} ${PEER4ORGCNODE}) 48 | OrgD_PEERS=(${PEER0ORGDNODE} ${PEER1ORGDNODE} ${PEER2ORGDNODE} ${PEER3ORGDNODE} ${PEER4ORGDNODE} ${PEER5ORGDNODE}) 49 | 50 | NAME=money_demo 51 | VERSION=1.0 52 | 53 | # 切换peer0 orgA 54 | OrgA(){ 55 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orgA.example.com/users/Admin@orgA.example.com/msp 56 | CORE_PEER_ADDRESS=${PEER0ORGANODE} 57 | CORE_PEER_LOCALMSPID="OrgAMSP" 58 | echo "org now: orga; node now:peer0" 59 | } 60 | 61 | # 切换peer0 orgB 62 | OrgB(){ 63 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orgB.example.com/users/Admin@orgB.example.com/msp 64 | CORE_PEER_ADDRESS=${PEER0ORGBNODE} 65 | CORE_PEER_LOCALMSPID="OrgBMSP" 66 | echo "org now: orgb; node now:peer0" 67 | } 68 | 69 | # 切换peer0 orgC 70 | OrgC(){ 71 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orgC.example.com/users/Admin@orgC.example.com/msp 72 | CORE_PEER_ADDRESS=${PEER0ORGCNODE} 73 | CORE_PEER_LOCALMSPID="OrgCMSP" 74 | echo "org now: orgc; node now:peer0" 75 | } 76 | 77 | # 切换peer0 orgD 78 | OrgD(){ 79 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orgD.example.com/users/Admin@orgD.example.com/msp 80 | CORE_PEER_ADDRESS=${PEER0ORGDNODE} 81 | CORE_PEER_LOCALMSPID="OrgDMSP" 82 | echo "org now: orgd; node now:peer0" 83 | } 84 | 85 | # 安装channel 86 | InstallChannel() { 87 | # 所有channel包含OrgC - 使用OrgC创建channel 88 | OrgC 89 | for i in ${CHANNEL_NAME[@]}; do 90 | peer channel create \ 91 | -o ${ORDERERNODE} \ 92 | -c ${i} \ 93 | -f ./channel-artifacts/${i}.tx 94 | echo "install channel " ${i} " done !" 95 | sleep 1 96 | done 97 | } 98 | 99 | # OrgA加入的channel 100 | JoinChannelA() { 101 | peer channel join -b ${CHANNELABCD}.block 102 | } 103 | 104 | # OrgB加入的channel 105 | JoinChannelB() { 106 | peer channel join -b ${CHANNELABCD}.block 107 | peer channel join -b ${CHANNELBC}.block 108 | peer channel join -b ${CHANNELBCD}.block 109 | } 110 | 111 | # OrgC加入的channel 112 | JoinChannelC() { 113 | peer channel join -b ${CHANNELABCD}.block 114 | peer channel join -b ${CHANNELBC}.block 115 | peer channel join -b ${CHANNELBCD}.block 116 | peer channel join -b ${CHANNELCD}.block 117 | } 118 | 119 | # OrgD加入的channel 120 | JoinChannelD() { 121 | peer channel join -b ${CHANNELABCD}.block 122 | peer channel join -b ${CHANNELBCD}.block 123 | peer channel join -b ${CHANNELCD}.block 124 | } 125 | 126 | # 加入channel 127 | JoinChannel() { 128 | OrgA 129 | for i in ${OrgA_PEERS[@]}; do 130 | CORE_PEER_ADDRESS=${i} 131 | JoinChannelA 132 | echo ${i}" join channel" 133 | done 134 | OrgB 135 | for i in ${OrgB_PEERS[@]}; do 136 | CORE_PEER_ADDRESS=${i} 137 | JoinChannelB 138 | echo ${i}" join channel" 139 | done 140 | OrgC 141 | for i in ${OrgC_PEERS[@]}; do 142 | CORE_PEER_ADDRESS=${i} 143 | JoinChannelC 144 | echo ${i}" join channel" 145 | done 146 | OrgD 147 | for i in ${OrgD_PEERS[@]}; do 148 | CORE_PEER_ADDRESS=${i} 149 | JoinChannelD 150 | echo ${i}" join channel" 151 | done 152 | } 153 | 154 | AnchorUpdateA() { 155 | for i in ${CHANNEL_A[@]}; do 156 | peer channel update \ 157 | -o ${ORDERERNODE} \ 158 | -c ${i} \ 159 | -f ./channel-artifacts/OrgAMSPanchor_${i}.tx 160 | done 161 | } 162 | 163 | AnchorUpdateB() { 164 | for i in ${CHANNEL_B[@]}; do 165 | peer channel update \ 166 | -o ${ORDERERNODE} \ 167 | -c ${i} \ 168 | -f ./channel-artifacts/OrgBMSPanchor_${i}.tx 169 | done 170 | } 171 | 172 | AnchorUpdateC() { 173 | for i in ${CHANNEL_C[@]}; do 174 | peer channel update \ 175 | -o ${ORDERERNODE} \ 176 | -c ${i} \ 177 | -f ./channel-artifacts/OrgCMSPanchor_${i}.tx 178 | done 179 | } 180 | 181 | AnchorUpdateD() { 182 | for i in ${CHANNEL_D[@]}; do 183 | peer channel update \ 184 | -o ${ORDERERNODE} \ 185 | -c ${i} \ 186 | -f ./channel-artifacts/OrgDMSPanchor_${i}.tx 187 | done 188 | } 189 | 190 | # 更新锚节点 191 | AnchorUpdate() { 192 | OrgA 193 | AnchorUpdateA 194 | OrgB 195 | AnchorUpdateB 196 | OrgC 197 | AnchorUpdateC 198 | OrgD 199 | AnchorUpdateD 200 | } 201 | 202 | InstallChainCodeFunc() { 203 | peer chaincode install \ 204 | -n ${NAME} \ 205 | -v ${VERSION} \ 206 | -p github.com/chaincode/demo/ 207 | } 208 | 209 | # 安装链码 210 | InstallChainCode() { 211 | OrgA 212 | for i in ${OrgA_PEERS[@]}; do 213 | CORE_PEER_ADDRESS=${i} 214 | InstallChainCodeFunc 215 | echo ${i} 216 | done 217 | OrgB 218 | for i in ${OrgB_PEERS[@]}; do 219 | CORE_PEER_ADDRESS=${i} 220 | InstallChainCodeFunc 221 | echo ${i} 222 | done 223 | OrgC 224 | for i in ${OrgC_PEERS[@]}; do 225 | CORE_PEER_ADDRESS=${i} 226 | InstallChainCodeFunc 227 | echo ${i} 228 | done 229 | OrgD 230 | for i in ${OrgD_PEERS[@]}; do 231 | CORE_PEER_ADDRESS=${i} 232 | InstallChainCodeFunc 233 | echo ${i} 234 | done 235 | } 236 | 237 | # 实例链码 238 | InstantiateChainCode() { 239 | OrgC 240 | for i in ${!CHANNEL_NAME[@]}; do 241 | peer chaincode instantiate \ 242 | -o ${ORDERERNODE} \ 243 | -C ${CHANNEL_NAME[i]} \ 244 | -n ${NAME} \ 245 | -v ${VERSION} \ 246 | -c '{"Args":["Init"]}' \ 247 | -P "${CHANNEL_INSTANTIATE[i]}" 248 | sleep 1 249 | done 250 | for i in ${CHANNEL_NAME[@]}; do 251 | peer chaincode list --instantiated -C ${i} 252 | done 253 | } 254 | 255 | # 链码测试 256 | TestDemo() { 257 | OrgC 258 | # 创建账户 259 | peer chaincode invoke \ 260 | -C ${CHANNELABCD} \ 261 | -o ${ORDERERNODE} \ 262 | -n ${NAME} \ 263 | --peerAddresses ${PEER0ORGANODE} \ 264 | --peerAddresses ${PEER0ORGBNODE} \ 265 | --peerAddresses ${PEER0ORGCNODE} \ 266 | --peerAddresses ${PEER0ORGDNODE} \ 267 | -c '{"Args":["open","count_a", "100"]}' 268 | peer chaincode invoke \ 269 | -C ${CHANNELABCD} \ 270 | -o ${ORDERERNODE} \ 271 | -n ${NAME} \ 272 | --peerAddresses ${PEER0ORGANODE} \ 273 | --peerAddresses ${PEER0ORGBNODE} \ 274 | --peerAddresses ${PEER0ORGCNODE} \ 275 | --peerAddresses ${PEER0ORGDNODE} \ 276 | -c '{"Args":["open","count_b", "100"]}' 277 | peer chaincode invoke \ 278 | -C ${CHANNELABCD} \ 279 | -o ${ORDERERNODE} \ 280 | -n ${NAME} \ 281 | --peerAddresses ${PEER0ORGANODE} \ 282 | --peerAddresses ${PEER0ORGBNODE} \ 283 | --peerAddresses ${PEER0ORGCNODE} \ 284 | --peerAddresses ${PEER0ORGDNODE} \ 285 | -c '{"Args":["invoke","count_a", "count_b","1"]}' 286 | peer chaincode query \ 287 | -C ${CHANNELABCD} \ 288 | -n ${NAME} \ 289 | -c '{"Args":["query","count_a"]}' 290 | peer chaincode query \ 291 | -C ${CHANNELABCD} \ 292 | -n ${NAME} \ 293 | -c '{"Args":["query","count_b"]}' 294 | } 295 | 296 | case $1 in 297 | installchannel) 298 | InstallChannel 299 | ;; 300 | joinchannel) 301 | JoinChannel 302 | ;; 303 | anchorupdate) 304 | AnchorUpdate 305 | ;; 306 | installchaincode) 307 | InstallChainCode 308 | ;; 309 | instantiatechaincode) 310 | InstantiateChainCode 311 | ;; 312 | testdemo) 313 | TestDemo 314 | ;; 315 | all) 316 | InstallChannel 317 | JoinChannel 318 | AnchorUpdate 319 | InstallChainCode 320 | InstantiateChainCode 321 | TestDemo 322 | ;; 323 | esac 324 | -------------------------------------------------------------------------------- /multi-channel-network/scripts/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | GENESIS_PROFILE=Genesis 4 | CHANNEL_PROFILE=(ChannelABCD ChannelBC ChannelBCD ChannelCD) 5 | CHANNEL_ID=(channelabcd channelbc channelbcd channelcd) 6 | CHANNEL_ANCHOR=('OrgAMSP OrgBMSP OrgCMSP OrgDMSP' 'OrgBMSP OrgCMSP' 'OrgBMSP OrgCMSP OrgDMSP' 'OrgCMSP OrgDMSP') 7 | 8 | SYS_CHANNEL=sys-channel 9 | VERSION=1.4.4 10 | 11 | FABRIC_CFG_PATH=$PWD 12 | 13 | # 检测cryptogen和版本 14 | if ! [ -x "$(command -v cryptogen)" ] ; then 15 | echo -e "\033[31m no cryptogen\033[0m" 16 | exit 1 17 | fi 18 | if [ ${VERSION} != "$(cryptogen version | grep Version | awk -F ': ' '{print $2}')" ] ; then 19 | echo -e "\033[31m cryptogen need version \033[0m"${VERSION} 20 | exit 1 21 | fi 22 | # 检测configtxgen和版本 23 | if ! [ -x "$(command -v configtxgen)" ] ; then 24 | echo -e "\033[31m no configtxgen\033[0m" 25 | exit 1 26 | fi 27 | if [ ${VERSION} != "$(configtxgen --version | grep Version | awk -F ': ' '{print $2}')" ] ; then 28 | echo -e "\033[31m configtxgen need version \033[0m"${VERSION} 29 | exit 1 30 | fi 31 | # 生成证书文件 32 | echo -e "\033[31m clear crypto files\033[0m" 33 | rm -rf crypto-config 34 | echo -e "\033[31m generate crypto files\033[0m" 35 | cryptogen generate --config ./crypto-config.yaml 36 | # 清理多余文件 37 | echo -e "\033[31m clear block files\033[0m" 38 | rm -rf ./channel-artifacts 39 | mkdir ./channel-artifacts 40 | # 生成创世块 41 | echo -e "\033[31m generate genesis block\033[0m" 42 | configtxgen \ 43 | -profile ${GENESIS_PROFILE} \ 44 | -channelID ${SYS_CHANNEL} \ 45 | -outputBlock ./channel-artifacts/genesis.block \ 46 | # 生成通道交易 47 | echo -e "\033[31m generate channel transcation\033[0m" 48 | for i in ${!CHANNEL_PROFILE[@]}; do 49 | configtxgen \ 50 | -profile ${CHANNEL_PROFILE[$i]} \ 51 | -channelID ${CHANNEL_ID[$i]} \ 52 | -outputCreateChannelTx ./channel-artifacts/${CHANNEL_ID[$i]}.tx 53 | done 54 | # 生成铆节点配置 55 | for chi in ${!CHANNEL_ANCHOR[@]}; do 56 | echo -e "\033[31m generate anchor transcation for \033[0m" ${CHANNEL_ID[$chi]} 57 | ORGS=${CHANNEL_ANCHOR[$chi]} 58 | for i in ${ORGS[@]}; do 59 | configtxgen \ 60 | -profile ${CHANNEL_PROFILE[$chi]}\ 61 | -channelID ${CHANNEL_ID[$chi]} \ 62 | -outputAnchorPeersUpdate ./channel-artifacts/${i}anchor_${CHANNEL_ID[$chi]}.tx \ 63 | -asOrg ${i} 64 | echo $i 65 | done 66 | done 67 | -------------------------------------------------------------------------------- /multi-channel-network/scripts/utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ORGA=orgA.example.com 4 | ORGB=orgB.example.com 5 | ORGC=orgC.example.com 6 | ORGD=orgD.example.com 7 | ORGAUSERS=(Admin) 8 | ORGBUSERS=(Admin User1 User2 User3 User4) 9 | ORGCUSERS=(Admin User1 User2 User3 User4) 10 | ORGDUSERS=(Admin User1 User2 User3 User4 User5) 11 | VERSION=1.4.4 12 | 13 | # 复制keystore 14 | CPFile() { 15 | files=$(ls $1) 16 | echo ${files[0]} 17 | cd $1 18 | cp ${files[0]} ./key.pem 19 | cd - 20 | } 21 | 22 | # 复制所有文件keystore 23 | CPAllFiles() { 24 | PREFIX=crypto-config/peerOrganizations 25 | SUFFIX=msp/keystore 26 | for u in ${ORGAUSERS[@]}; do 27 | CPFile ${PREFIX}/${ORGA}/users/${u}@${ORGA}/${SUFFIX} 28 | done 29 | for u in ${ORGBUSERS[@]}; do 30 | CPFile ${PREFIX}/${ORGB}/users/${u}@${ORGB}/${SUFFIX} 31 | done 32 | for u in ${ORGCUSERS[@]}; do 33 | CPFile ${PREFIX}/${ORGC}/users/${u}@${ORGC}/${SUFFIX} 34 | done 35 | for u in ${ORGDUSERS[@]}; do 36 | CPFile ${PREFIX}/${ORGD}/users/${u}@${ORGD}/${SUFFIX} 37 | done 38 | } 39 | 40 | # 清理缓存文件 41 | Clean() { 42 | rm -rf ./channel-artifacts 43 | rm -rf ./crypto-config 44 | rm -rf ./production 45 | rm -rf /tmp/crypto 46 | } 47 | 48 | case $1 in 49 | # 压力测试启动/关闭 50 | up) 51 | CPAllFiles 52 | env IMAGETAG=${VERSION} docker-compose -f ./docker-compose-cli.yaml up -d 53 | docker exec cli /bin/bash -c "scripts/env.sh all" 54 | ;; 55 | down) 56 | docker kill $(docker ps -qa) 57 | echo y | docker system prune 58 | Clean 59 | ;; 60 | esac 61 | -------------------------------------------------------------------------------- /pbft-network/base/docker-compose-base.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | orderer0.yzm.com: 4 | container_name: orderer0.yzm.com 5 | extends: 6 | file: peer-base.yaml 7 | service: orderer-base 8 | environment: 9 | - ORDERER_GENERAL_LISTENPORT=6050 10 | - PBFT_LISTEN_PORT=6070 11 | - PBFT_NODE_ID=0 12 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 13 | volumes: 14 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 15 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer0.yzm.com/msp:/var/hyperledger/orderer/msp 16 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer0.yzm.com/tls:/var/hyperledger/orderer/tls 17 | - ../production/orderer:/var/hyperledger/production/orderer0 18 | ports: 19 | - 6050:6050 20 | - 6070:6070 21 | 22 | orderer1.yzm.com: 23 | container_name: orderer1.yzm.com 24 | extends: 25 | file: peer-base.yaml 26 | service: orderer-base 27 | environment: 28 | - ORDERER_GENERAL_LISTENPORT=6051 29 | - PBFT_LISTEN_PORT=6071 30 | - PBFT_NODE_ID=1 31 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 32 | volumes: 33 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 34 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer1.yzm.com/msp:/var/hyperledger/orderer/msp 35 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer1.yzm.com/tls:/var/hyperledger/orderer/tls 36 | - ../production/orderer:/var/hyperledger/production/orderer1 37 | ports: 38 | - 6051:6051 39 | - 6071:6071 40 | 41 | orderer2.yzm.com: 42 | container_name: orderer2.yzm.com 43 | extends: 44 | file: peer-base.yaml 45 | service: orderer-base 46 | environment: 47 | - ORDERER_GENERAL_LISTENPORT=6052 48 | - PBFT_LISTEN_PORT=6072 49 | - PBFT_NODE_ID=2 50 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 51 | volumes: 52 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 53 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer2.yzm.com/msp:/var/hyperledger/orderer/msp 54 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer2.yzm.com/tls:/var/hyperledger/orderer/tls 55 | - ../production/orderer:/var/hyperledger/production/orderer2 56 | ports: 57 | - 6052:6052 58 | - 6072:6072 59 | 60 | orderer3.yzm.com: 61 | container_name: orderer3.yzm.com 62 | extends: 63 | file: peer-base.yaml 64 | service: orderer-base 65 | environment: 66 | - ORDERER_GENERAL_LISTENPORT=6053 67 | - PBFT_LISTEN_PORT=6073 68 | - PBFT_NODE_ID=3 69 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 70 | volumes: 71 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 72 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer3.yzm.com/msp:/var/hyperledger/orderer/msp 73 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer3.yzm.com/tls:/var/hyperledger/orderer/tls 74 | - ../production/orderer:/var/hyperledger/production/orderer3 75 | ports: 76 | - 6053:6053 77 | - 6073:6073 78 | 79 | peer0.orga.com: 80 | container_name: peer0.orga.com 81 | extends: 82 | file: peer-base.yaml 83 | service: peer-base 84 | environment: 85 | - CORE_PEER_ID=peer0.orga.com 86 | - CORE_PEER_ADDRESS=peer0.orga.com:7051 87 | - CORE_PEER_LISTENADDRESS=0.0.0.0:7051 88 | - CORE_PEER_CHAINCODEADDRESS=peer0.orga.com:7052 89 | - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052 90 | - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.orga.com:7051 91 | - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.orga.com:7051 92 | - CORE_PEER_LOCALMSPID=OrgAMSP 93 | volumes: 94 | - /var/run/:/host/var/run/ 95 | - ../crypto-config/peerOrganizations/orga.com/peers/peer0.orga.com/msp:/etc/hyperledger/fabric/msp 96 | - ../crypto-config/peerOrganizations/orga.com/peers/peer0.orga.com/tls:/etc/hyperledger/fabric/tls 97 | - ../production/orga:/var/hyperledger/production 98 | ports: 99 | - 7051:7051 100 | -------------------------------------------------------------------------------- /pbft-network/base/peer-base.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | peer-base: 4 | image: hyperledger/fabric-peer:$IMAGETAG 5 | environment: 6 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 7 | - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=pbft-network_solonet 8 | - FABRIC_LOGGING_SPEC=INFO 9 | - CORE_PEER_TLS_ENABLED=false 10 | - CORE_PEER_GOSSIP_USELEADERELECTION=true 11 | - CORE_PEER_GOSSIP_ORGLEADER=false 12 | - CORE_PEER_PROFILE_ENABLED=true 13 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 14 | command: peer node start 15 | 16 | orderer-base: 17 | image: hyperledger/fabric-orderer:$IMAGETAG 18 | environment: 19 | # - FABRIC_LOGGING_SPEC=INFO 20 | - FABRIC_LOGGING_SPEC=FATAL 21 | - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0 22 | - ORDERER_GENERAL_GENESISMETHOD=file 23 | - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block 24 | - ORDERER_GENERAL_LOCALMSPID=OrdererMSP 25 | - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp 26 | - ORDERER_GENERAL_TLS_ENABLED=false 27 | - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1 28 | - ORDERER_KAFKA_VERBOSE=true 29 | - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt 30 | - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key 31 | - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt] 32 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric 33 | command: orderer 34 | 35 | -------------------------------------------------------------------------------- /pbft-network/benchmarks/config.yaml: -------------------------------------------------------------------------------- 1 | test: 2 | name: pbft-network 3 | description: pbft-network 4 | workers: 5 | type: local 6 | number: 5 7 | 8 | rounds: 9 | - label: open 10 | description: open 11 | txNumber: 2000 12 | rateControl: 13 | type: fixed-rate 14 | opts: 15 | tps: 100 16 | callback: ../chaincode/demo/callback/open.js 17 | 18 | - label: transfer 19 | description: transfer 20 | txNumber: 2000 21 | rateControl: 22 | type: fixed-rate 23 | opts: 24 | tps: 100 25 | callback: ../chaincode/demo/callback/transfer.js 26 | 27 | - label: query 28 | description: query 29 | txNumber: 2000 30 | rateControl: 31 | type: fixed-rate 32 | opts: 33 | tps: 100 34 | callback: ../chaincode/demo/callback/query.js 35 | 36 | - label: delete 37 | description: delete 38 | txNumber: 2000 39 | rateControl: 40 | type: fixed-rate 41 | opts: 42 | tps: 100 43 | callback: ../chaincode/demo/callback/delete.js 44 | 45 | monitor: 46 | interval: 1 47 | type: 48 | - docker 49 | docker: 50 | containers: 51 | - peer0.orga.com 52 | - orderer0.yzm.com 53 | - orderer1.yzm.com 54 | - orderer2.yzm.com 55 | - orderer3.yzm.com 56 | -------------------------------------------------------------------------------- /pbft-network/benchmarks/network.yaml: -------------------------------------------------------------------------------- 1 | name: Fabric 2 | version: "1.0" 3 | 4 | mutual-tls: false 5 | 6 | caliper: 7 | blockchain: fabric 8 | command: 9 | start: scripts/gen.sh;scripts/utils.sh up 10 | end: scripts/utils.sh 11 | 12 | info: 13 | Version: 1.4.4 14 | Size: 4 Orgs with 2 Peer 15 | Orderer: Pbft 16 | Distribution: Single Host 17 | StateDB: GoLevelDB 18 | 19 | clients: 20 | peer0.orga.com: 21 | client: 22 | organization: OrgA 23 | credentialStore: 24 | path: /tmp/crypto/orga 25 | cryptoStore: 26 | path: /tmp/crypto/orga 27 | clientPrivateKey: 28 | path: crypto-config/peerOrganizations/orga.com/users/User1@orga.com/msp/keystore/key.pem 29 | clientSignedCert: 30 | path: crypto-config/peerOrganizations/orga.com/users/User1@orga.com/msp/signcerts/User1@orga.com-cert.pem 31 | 32 | channels: 33 | mychannel: 34 | configBinary: ./channel-artifacts/channel.tx 35 | created: true 36 | orderers: 37 | - orderer0.yzm.com 38 | - orderer1.yzm.com 39 | - orderer2.yzm.com 40 | - orderer3.yzm.com 41 | peers: 42 | peer0.orga.com: 43 | endorsingPeer: true 44 | chaincodeQuery: true 45 | ledgerQuery: true 46 | eventSource: true 47 | 48 | chaincodes: 49 | - id: money_demo 50 | version: "1.0" 51 | contractID: money_demo 52 | language: golang 53 | path: ../chaincode/demo 54 | targetPeers: 55 | - peer0.orga.com 56 | 57 | organizations: 58 | OrgA: 59 | mspid: OrgAMSP 60 | peers: 61 | - peer0.orga.com 62 | adminPrivateKey: 63 | path: crypto-config/peerOrganizations/orga.com/users/Admin@orga.com/msp/keystore/key.pem 64 | signedCert: 65 | path: crypto-config/peerOrganizations/orga.com/users/Admin@orga.com/msp/signcerts/Admin@orga.com-cert.pem 66 | 67 | orderers: 68 | orderer0.yzm.com: 69 | url: grpc://localhost:6050 70 | grpcOptions: 71 | grpc.keepalive_time_ms: 600000 72 | orderer1.yzm.com: 73 | url: grpc://localhost:6051 74 | grpcOptions: 75 | grpc.keepalive_time_ms: 600000 76 | orderer2.yzm.com: 77 | url: grpc://localhost:6052 78 | grpcOptions: 79 | grpc.keepalive_time_ms: 600000 80 | orderer3.yzm.com: 81 | url: grpc://localhost:6053 82 | grpcOptions: 83 | grpc.keepalive_time_ms: 600000 84 | 85 | peers: 86 | peer0.orga.com: 87 | url: grpc://localhost:7051 88 | grpcOptions: 89 | grpc.keepalive_time_ms: 600000 90 | -------------------------------------------------------------------------------- /pbft-network/configtx.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | Organizations: 3 | - &OrdererOrg 4 | Name: OrdererOrg 5 | ID: OrdererMSP 6 | MSPDir: crypto-config/ordererOrganizations/yzm.com/msp 7 | Policies: 8 | Readers: 9 | Type: Signature 10 | Rule: "OR('OrdererMSP.member')" 11 | Writers: 12 | Type: Signature 13 | Rule: "OR('OrdererMSP.member')" 14 | Admins: 15 | Type: Signature 16 | Rule: "OR('OrdererMSP.admin')" 17 | - &OrgA 18 | Name: OrgAMSP 19 | ID: OrgAMSP 20 | MSPDir: crypto-config/peerOrganizations/orga.com/msp 21 | Policies: 22 | Readers: 23 | Type: Signature 24 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.peer', 'OrgAMSP.client')" 25 | Writers: 26 | Type: Signature 27 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.client')" 28 | Admins: 29 | Type: Signature 30 | Rule: "OR('OrgAMSP.admin')" 31 | AnchorPeers: 32 | - Host: peer0.orga.com 33 | Port: 7051 34 | 35 | Capabilities: 36 | Channel: &ChannelCapabilities 37 | V1_4_3: true 38 | V1_3: false 39 | V1_1: false 40 | 41 | Orderer: &OrdererCapabilities 42 | V1_4_2: true 43 | V1_1: false 44 | 45 | Application: &ApplicationCapabilities 46 | V1_4_2: true 47 | V1_3: false 48 | V1_2: false 49 | V1_1: false 50 | 51 | Application: &ApplicationDefaults 52 | Organizations: 53 | 54 | Policies: 55 | Readers: 56 | Type: ImplicitMeta 57 | Rule: "ANY Readers" 58 | Writers: 59 | Type: ImplicitMeta 60 | Rule: "ANY Writers" 61 | Admins: 62 | Type: ImplicitMeta 63 | Rule: "MAJORITY Admins" 64 | 65 | Capabilities: 66 | <<: *ApplicationCapabilities 67 | 68 | Orderer: &OrdererDefaults 69 | OrdererType: pbft 70 | Addresses: 71 | - orderer0.yzm.com:6050 72 | - orderer1.yzm.com:6051 73 | - orderer2.yzm.com:6052 74 | - orderer3.yzm.com:6053 75 | 76 | BatchTimeout: 2s 77 | BatchSize: 78 | MaxMessageCount: 1000 79 | AbsoluteMaxBytes: 256 MB 80 | PreferredMaxBytes: 512 KB 81 | 82 | Organizations: 83 | Policies: 84 | Readers: 85 | Type: ImplicitMeta 86 | Rule: "ANY Readers" 87 | Writers: 88 | Type: ImplicitMeta 89 | Rule: "ANY Writers" 90 | Admins: 91 | Type: ImplicitMeta 92 | Rule: "MAJORITY Admins" 93 | BlockValidation: 94 | Type: ImplicitMeta 95 | Rule: "ANY Writers" 96 | 97 | Channel: &ChannelDefaults 98 | Policies: 99 | Readers: 100 | Type: ImplicitMeta 101 | Rule: "ANY Readers" 102 | Writers: 103 | Type: ImplicitMeta 104 | Rule: "ANY Writers" 105 | Admins: 106 | Type: ImplicitMeta 107 | Rule: "MAJORITY Admins" 108 | 109 | Capabilities: 110 | <<: *ChannelCapabilities 111 | 112 | Profiles: 113 | Genesis: 114 | <<: *ChannelDefaults 115 | Orderer: 116 | <<: *OrdererDefaults 117 | Organizations: 118 | - *OrdererOrg 119 | Capabilities: 120 | <<: *OrdererCapabilities 121 | Consortiums: 122 | SampleConsortium: 123 | Organizations: 124 | - *OrgA 125 | Channel: 126 | Consortium: SampleConsortium 127 | <<: *ChannelDefaults 128 | Application: 129 | <<: *ApplicationDefaults 130 | Organizations: 131 | - *OrgA 132 | Capabilities: 133 | <<: *ApplicationCapabilities 134 | -------------------------------------------------------------------------------- /pbft-network/crypto-config.yaml: -------------------------------------------------------------------------------- 1 | OrdererOrgs: 2 | - Name: Orderer 3 | Domain: yzm.com 4 | EnableNodeOUs: true # 控制节点目录中是否生成配置文件 5 | Specs: 6 | - Hostname: orderer0 7 | - Hostname: orderer1 8 | - Hostname: orderer2 9 | - Hostname: orderer3 10 | 11 | PeerOrgs: 12 | - Name: OrgA 13 | Domain: orga.com 14 | EnableNodeOUs: true 15 | Template: 16 | Count: 1 17 | Users: 18 | Count: 1 19 | -------------------------------------------------------------------------------- /pbft-network/docker-compose-cli.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | networks: 4 | solonet: 5 | ipam: 6 | config: 7 | - subnet: 172.22.0.0/24 8 | gateway: 172.22.0.1 9 | 10 | services: 11 | 12 | orderer0.yzm.com: 13 | extends: 14 | file: base/docker-compose-base.yaml 15 | service: orderer0.yzm.com 16 | container_name: orderer0.yzm.com 17 | networks: 18 | solonet: 19 | ipv4_address: 172.22.0.100 20 | 21 | orderer1.yzm.com: 22 | extends: 23 | file: base/docker-compose-base.yaml 24 | service: orderer1.yzm.com 25 | container_name: orderer1.yzm.com 26 | networks: 27 | solonet: 28 | ipv4_address: 172.22.0.101 29 | 30 | orderer2.yzm.com: 31 | extends: 32 | file: base/docker-compose-base.yaml 33 | service: orderer2.yzm.com 34 | container_name: orderer2.yzm.com 35 | networks: 36 | solonet: 37 | ipv4_address: 172.22.0.102 38 | 39 | orderer3.yzm.com: 40 | extends: 41 | file: base/docker-compose-base.yaml 42 | service: orderer3.yzm.com 43 | container_name: orderer3.yzm.com 44 | networks: 45 | solonet: 46 | ipv4_address: 172.22.0.103 47 | 48 | peer0.orga.com: 49 | container_name: peer0.orga.com 50 | extends: 51 | file: base/docker-compose-base.yaml 52 | service: peer0.orga.com 53 | networks: 54 | solonet: 55 | ipv4_address: 172.22.0.2 56 | 57 | cli: 58 | container_name: cli 59 | image: hyperledger/fabric-tools:$IMAGETAG 60 | tty: true 61 | stdin_open: true 62 | environment: 63 | - SYS_CHANNEL=sys_channel 64 | - GOPATH=/opt/gopath 65 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 66 | - FABRIC_LOGGING_SPEC=INFO 67 | - CORE_PEER_ID=cli 68 | - CORE_PEER_ADDRESS=peer0.orga.com:7051 69 | - CORE_PEER_LOCALMSPID=OrgAMSP 70 | - CORE_PEER_TLS_ENABLED=false 71 | - CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/orga.com/users/Admin@orga.com/msp 72 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 73 | command: /bin/bash 74 | volumes: 75 | - /var/run/:/host/var/run/ 76 | - ./../chaincode/:/opt/gopath/src/github.com/chaincode 77 | - ./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ 78 | - ./scripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/scripts/ 79 | - ./channel-artifacts:/opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts 80 | depends_on: 81 | - orderer0.yzm.com 82 | - orderer1.yzm.com 83 | - orderer2.yzm.com 84 | - orderer3.yzm.com 85 | - peer0.orga.com 86 | networks: 87 | solonet: 88 | ipv4_address: 172.22.0.200 89 | -------------------------------------------------------------------------------- /pbft-network/scripts/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 证书文件夹 4 | PEERROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations 5 | ORDEROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations 6 | 7 | # 节点设置 8 | ORDERER0NODE=orderer0.yzm.com:6050 9 | ORDERER1NODE=orderer1.yzm.com:6051 10 | ORDERER2NODE=orderer2.yzm.com:6052 11 | ORDERER3NODE=orderer3.yzm.com:6053 12 | 13 | ORDERERNODE=${ORDERER1NODE} 14 | 15 | PEERORGANODE=peer0.orga.com:7051 16 | CHANNEL_NAME=mychannel 17 | 18 | NAME=money_demo 19 | VERSION=1.0 20 | 21 | # 切换peer0 orgA 22 | OrgA(){ 23 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orga.com/users/Admin@orga.com/msp 24 | CORE_PEER_ADDRESS=${PEERORGANODE} 25 | CORE_PEER_LOCALMSPID="OrgAMSP" 26 | echo "node now:peer0.orga.com" 27 | } 28 | 29 | # 安装channel 30 | InstallChannel() { 31 | peer channel create \ 32 | -o ${ORDERERNODE} \ 33 | -c ${CHANNEL_NAME} \ 34 | -f ./channel-artifacts/channel.tx \ 35 | echo "install channel" 36 | } 37 | 38 | # 加入channel 39 | JoinChannel() { 40 | OrgA 41 | peer channel join -b ${CHANNEL_NAME}.block 42 | echo "peer0.orga.com join channel" 43 | } 44 | 45 | # 更新锚节点 46 | AnchorUpdate() { 47 | OrgA 48 | peer channel update \ 49 | -o ${ORDERERNODE} \ 50 | -c ${CHANNEL_NAME} \ 51 | -f ./channel-artifacts/OrgAMSPanchor.tx \ 52 | echo "orga update anchor peer0.orga.com" 53 | } 54 | 55 | # 安装链码 56 | InstallChainCode() { 57 | OrgA 58 | peer chaincode install \ 59 | -n ${NAME} \ 60 | -v ${VERSION} \ 61 | -p github.com/chaincode/demo/ 62 | echo "peer0.orga.com install chaincode - demo" 63 | } 64 | 65 | # 实例链码 66 | InstantiateChainCode() { 67 | peer chaincode instantiate \ 68 | -o ${ORDERERNODE} \ 69 | -C ${CHANNEL_NAME} \ 70 | -n ${NAME} \ 71 | -v ${VERSION} \ 72 | -c '{"Args":["Init"]}' \ 73 | -P "AND ('OrgAMSP.peer')" 74 | echo "instantiate chaincode" 75 | sleep 10 76 | } 77 | 78 | # 链码测试 79 | TestDemo() { 80 | # 创建账户 81 | peer chaincode invoke \ 82 | -C ${CHANNEL_NAME} \ 83 | -o ${ORDERERNODE} \ 84 | -n ${NAME} \ 85 | --peerAddresses ${PEERORGANODE} \ 86 | -c '{"Args":["open","count_a", "100"]}' 87 | peer chaincode invoke \ 88 | -C ${CHANNEL_NAME} \ 89 | -o ${ORDERERNODE} \ 90 | -n ${NAME} \ 91 | --peerAddresses ${PEERORGANODE} \ 92 | -c '{"Args":["open","count_b", "100"]}' 93 | peer chaincode query \ 94 | -C ${CHANNEL_NAME} \ 95 | -n ${NAME} \ 96 | -c '{"Args":["query","count_a"]}' 97 | peer chaincode query \ 98 | -C ${CHANNEL_NAME} \ 99 | -n ${NAME} \ 100 | -c '{"Args":["query","count_b"]}' 101 | peer chaincode invoke \ 102 | -C ${CHANNEL_NAME} \ 103 | -o ${ORDERERNODE} \ 104 | -n ${NAME} \ 105 | --peerAddresses ${PEERORGANODE} \ 106 | -c '{"Args":["invoke","count_a","count_b","50"]}' 107 | peer chaincode invoke \ 108 | -C ${CHANNEL_NAME} \ 109 | -o ${ORDERERNODE} \ 110 | -n ${NAME} \ 111 | --peerAddresses ${PEERORGANODE} \ 112 | -c '{"Args":["open","count_c", "100"]}' 113 | peer chaincode invoke \ 114 | -C ${CHANNEL_NAME} \ 115 | -o ${ORDERER3NODE} \ 116 | -n ${NAME} \ 117 | --peerAddresses ${PEERORGANODE} \ 118 | -c '{"Args":["invoke","count_a","count_c","10"]}' 119 | peer chaincode query \ 120 | -C ${CHANNEL_NAME} \ 121 | -n ${NAME} \ 122 | -c '{"Args":["query","count_a"]}' 123 | peer chaincode query \ 124 | -C ${CHANNEL_NAME} \ 125 | -n ${NAME} \ 126 | -c '{"Args":["query","count_b"]}' 127 | peer chaincode query \ 128 | -C ${CHANNEL_NAME} \ 129 | -n ${NAME} \ 130 | -c '{"Args":["query","count_c"]}' 131 | } 132 | 133 | case $1 in 134 | installchannel) 135 | InstallChannel 136 | ;; 137 | joinchannel) 138 | JoinChannel 139 | ;; 140 | anchorupdate) 141 | AnchorUpdate 142 | ;; 143 | installchaincode) 144 | InstallChainCode 145 | ;; 146 | instantiatechaincode) 147 | InstantiateChainCode 148 | ;; 149 | testdemo) 150 | OrgA 151 | TestDemo 152 | ;; 153 | all) 154 | OrgA 155 | InstallChannel 156 | JoinChannel 157 | AnchorUpdate 158 | InstallChainCode 159 | InstantiateChainCode 160 | TestDemo 161 | ;; 162 | esac 163 | -------------------------------------------------------------------------------- /pbft-network/scripts/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | GENESIS_PROFILE=Genesis 4 | CHANNEL_PROFILE=Channel 5 | SYS_CHANNEL=sys-channel 6 | CHANNEL_NAME=mychannel 7 | VERSION=1.4.4 8 | 9 | FABRIC_CFG_PATH=$PWD 10 | 11 | ORG_NAMES=(OrgAMSP) 12 | 13 | # 检测cryptogen和版本 14 | if ! [ -x "$(command -v cryptogen)" ] ; then 15 | echo -e "\033[31m no cryptogen\033[0m" 16 | exit 1 17 | fi 18 | if [ ${VERSION} != "$(cryptogen version | grep Version | awk -F ': ' '{print $2}')" ] ; then 19 | echo -e "\033[31m cryptogen need version \033[0m"${VERSION} 20 | exit 1 21 | fi 22 | # 检测configtxgen和版本 23 | if ! [ -x "$(command -v configtxgen)" ] ; then 24 | echo -e "\033[31m no configtxgen\033[0m" 25 | exit 1 26 | fi 27 | if [ ${VERSION} != "$(configtxgen --version | grep Version | awk -F ': ' '{print $2}')" ] ; then 28 | echo -e "\033[31m configtxgen need version \033[0m"${VERSION} 29 | exit 1 30 | fi 31 | # 生成证书文件 32 | echo -e "\033[31m clear crypto files\033[0m" 33 | rm -rf crypto-config 34 | echo -e "\033[31m generate crypto files\033[0m" 35 | cryptogen generate --config ./crypto-config.yaml 36 | # 清理多余文件 37 | echo -e "\033[31m clear block files\033[0m" 38 | rm -rf ./channel-artifacts 39 | mkdir ./channel-artifacts 40 | # 生成创世块 41 | echo -e "\033[31m generate genesis block\033[0m" 42 | configtxgen \ 43 | -profile ${GENESIS_PROFILE} \ 44 | -channelID ${SYS_CHANNEL} \ 45 | -outputBlock ./channel-artifacts/genesis.block \ 46 | # 生成通道交易 47 | echo -e "\033[31m generate channel transcation\033[0m" 48 | configtxgen \ 49 | -profile ${CHANNEL_PROFILE} \ 50 | -channelID ${CHANNEL_NAME} \ 51 | -outputCreateChannelTx ./channel-artifacts/channel.tx 52 | # 生成铆节点配置 53 | echo -e "\033[31m generate anchor transcation\033[0m" 54 | for i in ${ORG_NAMES[@]}; do 55 | configtxgen \ 56 | -profile ${CHANNEL_PROFILE} \ 57 | -channelID ${CHANNEL_NAME} \ 58 | -outputAnchorPeersUpdate ./channel-artifacts/${i}anchor.tx \ 59 | -asOrg ${i} 60 | done 61 | -------------------------------------------------------------------------------- /pbft-network/scripts/utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ORGA=orga 4 | ORGAUSERS=(Admin User1) 5 | VERSION=pbft 6 | 7 | # 复制keystore 8 | CPFile() { 9 | files=$(ls $1) 10 | echo ${files[0]} 11 | cd $1 12 | cp ${files[0]} ./key.pem 13 | cd - 14 | } 15 | 16 | # 复制所有文件keystore 17 | CPAllFiles() { 18 | PREFIX=crypto-config/peerOrganizations 19 | SUFFIX=msp/keystore 20 | for u in ${ORGAUSERS[@]}; do 21 | CPFile ${PREFIX}/${ORGA}.com/users/${u}@${ORGA}.com/${SUFFIX} 22 | done 23 | } 24 | 25 | # 清理缓存文件 26 | Clean() { 27 | rm -rf ./channel-artifacts 28 | rm -rf ./crypto-config 29 | rm -rf ./production 30 | rm -rf /tmp/crypto 31 | } 32 | 33 | case $1 in 34 | # 压力测试启动/关闭 35 | cli) 36 | env IMAGETAG=${VERSION} docker-compose -f ./docker-compose-cli.yaml up -d 37 | docker exec -ti cli /bin/bash 38 | ;; 39 | up) 40 | CPAllFiles 41 | env IMAGETAG=${VERSION} docker-compose -f ./docker-compose-cli.yaml up -d 42 | sleep 3 43 | docker exec cli /bin/bash -c "scripts/env.sh all" 44 | ;; 45 | down) 46 | docker kill $(docker ps -qa) 47 | echo y | docker system prune 48 | docker rmi $(docker images | grep 'dev-*' | awk '{print $3}') 49 | echo y | docker system prune 50 | Clean 51 | ;; 52 | esac 53 | -------------------------------------------------------------------------------- /pbft/chain.go: -------------------------------------------------------------------------------- 1 | package pbft 2 | 3 | import ( 4 | "fmt" 5 | "github.com/hyperledger/fabric/orderer/consensus" 6 | "github.com/hyperledger/fabric/orderer/consensus/pbft/cmd" 7 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 8 | "github.com/hyperledger/fabric/orderer/consensus/pbft/node" 9 | cb "github.com/hyperledger/fabric/protos/common" 10 | "time" 11 | ) 12 | 13 | type Chain struct { 14 | exitChan chan struct{} 15 | support consensus.ConsenterSupport 16 | pbftNode *node.Node 17 | } 18 | 19 | func NewChain(support consensus.ConsenterSupport) *Chain { 20 | // 创建PBFT服务器 21 | logger.Info("NewChain - ", support.ChainID()) 22 | if node.GNode == nil { 23 | node.GNode = node.NewNode(cmd.ReadConfig(), support) 24 | node.GNode.Run() 25 | } else { 26 | node.GNode.RegisterChain(support) 27 | } 28 | 29 | c := &Chain{ 30 | exitChan: make(chan struct{}), 31 | support: support, 32 | pbftNode: node.GNode, 33 | } 34 | return c 35 | } 36 | 37 | // 启动 38 | func (ch *Chain) Start() { 39 | logger.Info("start") 40 | } 41 | 42 | // 发送错误 43 | func (ch *Chain) Errored() <-chan struct{} { 44 | return ch.exitChan 45 | } 46 | 47 | // 清理资源 48 | func (ch *Chain) Halt() { 49 | logger.Info("halt") 50 | select { 51 | case <- ch.exitChan: 52 | default: 53 | close(ch.exitChan) 54 | } 55 | } 56 | 57 | // Order Configure 前 58 | func (ch *Chain) WaitReady() error { 59 | logger.Info("wait ready") 60 | return nil 61 | } 62 | 63 | // 接受交易 64 | func (ch *Chain) Order(env *cb.Envelope, configSeq uint64) error { 65 | logger.Info("Normal") 66 | select { 67 | case <-ch.exitChan: 68 | logger.Info("[CHAIN error exit normal]") 69 | return fmt.Errorf("Exiting") 70 | default: 71 | 72 | } 73 | req := &message.Request{ 74 | Op: message.Operation{ 75 | Envelope: env, 76 | ChannelID: ch.support.ChainID(), 77 | ConfigSeq: configSeq, 78 | Type: message.TYPENORMAL, 79 | }, 80 | TimeStamp: message.TimeStamp(time.Now().UnixNano()), 81 | ID: 0, 82 | } 83 | ch.pbftNode.SendPrimary(req) 84 | return nil 85 | } 86 | 87 | // 接收配置 88 | func (ch *Chain) Configure(config *cb.Envelope, configSeq uint64) error { 89 | logger.Info("Config") 90 | select { 91 | case <-ch.exitChan: 92 | logger.Info("[CHAIN error exit config]") 93 | return fmt.Errorf("Exiting") 94 | default: 95 | } 96 | req := &message.Request{ 97 | Op: message.Operation{ 98 | Envelope: config, 99 | ChannelID: ch.support.ChainID(), 100 | ConfigSeq: configSeq, 101 | Type: message.TYPECONFIG, 102 | }, 103 | TimeStamp: message.TimeStamp(time.Now().UnixNano()), 104 | ID: 0, 105 | } 106 | ch.pbftNode.SendPrimary(req) 107 | return nil 108 | } -------------------------------------------------------------------------------- /pbft/cmd/cmd.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 7 | "log" 8 | "os" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | type SharedConfig struct { 14 | ClientServer bool 15 | Port int 16 | Id message.Identify 17 | View message.View 18 | Table map[message.Identify]string 19 | FaultNum uint 20 | ExecuteMaxNum int 21 | CheckPointNum message.Sequence 22 | WaterL message.Sequence 23 | WaterH message.Sequence 24 | } 25 | 26 | func ReadConfig() *SharedConfig { 27 | port, _ := GetConfigurePort() 28 | id, _ := GetConfigureID() 29 | view, _ := GetConfigureView() 30 | table, _ := GetConfigureTable() 31 | 32 | t := make(map[message.Identify]string) 33 | for k, v := range table { 34 | t[message.Identify(k)] = v 35 | } 36 | // calc the fault num 37 | if len(t) % 3 != 1 { 38 | log.Fatalf("[Config Error] the incorrent node num : %d, need 3f + 1", len(t)) 39 | return nil 40 | } 41 | 42 | flag.Parse() 43 | return &SharedConfig{ 44 | Port: port, 45 | Id: message.Identify(id), 46 | View: message.View(view), 47 | Table: t, 48 | FaultNum: uint(len(t)/3), 49 | ExecuteMaxNum: 1, 50 | CheckPointNum: 200, 51 | WaterL: 0, 52 | WaterH: 400, 53 | } 54 | } 55 | 56 | // 获取配置 57 | func GetConfigureID() (id int, err error){ 58 | rawID := os.Getenv("PBFT_NODE_ID") 59 | if id, err = strconv.Atoi(rawID); err != nil { 60 | return 61 | } 62 | return 63 | } 64 | 65 | func GetConfigureTable() (map[int]string, error){ 66 | rawTable := os.Getenv("PBFT_NODE_TABLE") 67 | nodeTable := make(map[int]string, 0) 68 | 69 | tables := strings.Split(rawTable, ";") 70 | for index, t := range tables { 71 | nodeTable[index] = t 72 | } 73 | // 节点不满足 3f + 1 74 | if len(tables) < 3 || len(tables) % 3 != 1 { 75 | return nil, errors.New("") 76 | } 77 | return nodeTable, nil 78 | } 79 | 80 | func GetConfigurePort() (port int, err error){ 81 | rawPort := os.Getenv("PBFT_LISTEN_PORT") 82 | if port, err = strconv.Atoi(rawPort); err != nil { 83 | return 84 | } 85 | return 86 | } 87 | 88 | func GetConfigureView() (int, error) { 89 | const ViewID = 100000 90 | return ViewID, nil 91 | } 92 | -------------------------------------------------------------------------------- /pbft/consensus.go: -------------------------------------------------------------------------------- 1 | package pbft 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/common/flogging" 5 | ) 6 | 7 | var logger = flogging.MustGetLogger("orderer.consensus.pbft") -------------------------------------------------------------------------------- /pbft/consenter.go: -------------------------------------------------------------------------------- 1 | package pbft 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus" 5 | cb "github.com/hyperledger/fabric/protos/common" 6 | ) 7 | 8 | type consenter struct{ 9 | } 10 | 11 | func New() consensus.Consenter { 12 | return &consenter{} 13 | } 14 | 15 | func (pbft *consenter) HandleChain(support consensus.ConsenterSupport, metadata *cb.Metadata) (consensus.Chain, error) { 16 | logger.Info("Handle Chain For PBFT") 17 | return NewChain(support), nil 18 | } -------------------------------------------------------------------------------- /pbft/doc.md: -------------------------------------------------------------------------------- 1 | ## 一、Fabric 可插拔共识算法 PBFT 开发流程 2 | 3 | * `configtxgen`工具源码修改,使其识别`pbft`共识配置。 4 | 5 | ```go 6 | // common/tools/configtxgen/localconfig/config.go:388 7 | switch ord.OrdererType { 8 | case 'pbft': 9 | } 10 | // commom/tools/configtxgen/encoder/encoder.go:38 11 | const ConsensusTypePbft = "pbft" 12 | // commom/tools/configtxgen/encoder/encoder.go:215 13 | switch conf.OrdererType { 14 | case ConsensusTypePbft: 15 | } 16 | ``` 17 | 18 | * 添加共识算法实例 19 | 20 | ```go 21 | // orderer/common/server/main.go:664 22 | consenters["pbft"] = pbft.New() 23 | ``` 24 | 25 | * 实现共识接口`/orderer/consensus/consensus.go` 26 | 27 | ```go 28 | // 接口说明 - Consneter 29 | // 返回 Chain 用于实现处理区块接口 30 | type Consenter interface { 31 | HandleChain(support ConsenterSupport, metadata *cb.Metadata) (Chain, error) 32 | } 33 | // Chain 处理区块接口 34 | type Chain interface { 35 | // 处理 Normal 交易 36 | Order(env *cb.Envelope, configSeq uint64) error 37 | // 处理配置交易 38 | Configure(config *cb.Envelope, configSeq uint64) error 39 | // 等待接收交易,处理函数交易前 40 | WaitReady() error 41 | // 发送错误 chan 42 | Errored() <-chan struct{} 43 | // 初始化 Chain 中资源 44 | Start() 45 | // 资源释放 46 | Halt() 47 | } 48 | ``` 49 | 50 | * 编译产生 orderer 镜像(修改`orderer\peer\tools` tag 为 `pbft`) 51 | 52 | ``` 53 | $ make orderer-docker 54 | ``` 55 | 56 | * 编译产生 configtxgen 工具(输出目录:`.build/bin/configtxgen`) 57 | 58 | ``` 59 | $ make configtxgen 60 | ``` 61 | 62 | ## 二、网络拓扑 63 | 64 | | 类型/组织 | 域名 | IP/端口/PBFT端口 | 组织名 | 65 | | :-------: | :--------------: | :--------------------: | :--------: | 66 | | Orderer | orderer0.yzm.com | 172.22.0.100:6050/6070 | OrdererOrg | 67 | | Orderer | orderer1.yzm.com | 172.22.0.101:6051/6071 | OrdererOrg | 68 | | Orderer | orderer2.yzm.com | 172.22.0.101:6052/6072 | OrdererOrg | 69 | | Orderer | orderer3.yzm.com | 172.22.0.101:6053/6073 | OrdererOrg | 70 | | Peer/OrgA | peer0.orga.com | 172.22.0.2:7051 | OrgAMSP | 71 | 72 | ## 三、配置说明 73 | 74 | 采用环境变量: 75 | 76 | * `PBFT_LISTEN_PORT`:PBFT 节点监听端口 77 | * `PBFT_NODE_ID`:PBFT 节点 ID 78 | * `PBFT_NODE_TABLE`:PBFT 网络列表 79 | -------------------------------------------------------------------------------- /pbft/message/crypto.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "encoding/json" 7 | "log" 8 | ) 9 | 10 | func Hash(content []byte) string { 11 | h := sha256.New() 12 | h.Write(content) 13 | return hex.EncodeToString(content) 14 | } 15 | 16 | func Digest(obj interface{}) (string, error) { 17 | content, err := json.Marshal(obj) 18 | if err != nil { 19 | log.Printf("[Crypto] marshl the object error: %s", err) 20 | return "", err 21 | } 22 | return Hash(content), nil 23 | } 24 | -------------------------------------------------------------------------------- /pbft/message/lastreply.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "sync" 4 | 5 | // node last reply 6 | type LastReply struct { 7 | reply *Reply 8 | locker *sync.RWMutex 9 | } 10 | 11 | func NewLastReply() *LastReply { 12 | return &LastReply{ 13 | reply: nil, 14 | locker: new(sync.RWMutex), 15 | } 16 | } 17 | 18 | // only read 19 | func (r *LastReply) Equal(msg *Request) bool { 20 | r.locker.RLock() 21 | ret := true 22 | if r.reply == nil || r.reply.TimeStamp != msg.TimeStamp { 23 | ret = false 24 | } 25 | r.locker.RUnlock() 26 | return ret 27 | } 28 | 29 | func (r *LastReply) Set(msg *Reply) { 30 | r.locker.Lock() 31 | r.reply = msg 32 | r.locker.Unlock() 33 | } 34 | 35 | -------------------------------------------------------------------------------- /pbft/message/message.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/json" 5 | cb "github.com/hyperledger/fabric/protos/common" 6 | "strconv" 7 | ) 8 | 9 | type TimeStamp uint64 // 时间戳格式 10 | type Identify uint64 // 客户端标识格式 11 | type View Identify // 视图 12 | type Sequence int64 // 序号 13 | 14 | const TYPENORMAL = "normal" 15 | const TYPECONFIG = "config" 16 | 17 | // Operation 18 | type Operation struct { 19 | Envelope *cb.Envelope 20 | ChannelID string 21 | ConfigSeq uint64 22 | Type string 23 | } 24 | 25 | // Result 26 | type Result struct { 27 | } 28 | 29 | // Request 30 | type Request struct { 31 | Op Operation `json:"operation"` 32 | TimeStamp TimeStamp `json:"timestamp"` 33 | ID Identify `json:"clientID"` 34 | } 35 | 36 | // Message 37 | type Message struct { 38 | Requests []*Request `json:"requests"` 39 | } 40 | 41 | // Pre-Prepare 42 | type PrePrepare struct { 43 | View View `json:"view"` 44 | Sequence Sequence `json:"sequence"` 45 | Digest string `json:"digest"` 46 | Message Message `json:"message"` 47 | } 48 | 49 | // Prepare 50 | type Prepare struct { 51 | View View `json:"view"` 52 | Sequence Sequence `json:"sequence"` 53 | Digest string `json:"digest"` 54 | Identify Identify `json:"id"` 55 | } 56 | 57 | // Commit 58 | type Commit struct { 59 | View View `json:"view"` 60 | Sequence Sequence `json:"sequence"` 61 | Digest string `json:"digest"` 62 | Identify Identify `json:"id"` 63 | } 64 | 65 | // Reply 66 | type Reply struct { 67 | View View `json:"view"` 68 | TimeStamp TimeStamp `json:"timestamp"` 69 | Id Identify `json:"nodeID"` 70 | Result Result `json:"result"` 71 | } 72 | 73 | // CheckPoint 74 | type CheckPoint struct { 75 | Sequence Sequence `json:"sequence"` 76 | Digest string `json:"digest"` 77 | Id Identify `json:"nodeID"` 78 | } 79 | 80 | // return byte, msg, digest, error 81 | func NewPreprepareMsg(view View, seq Sequence, batch []*Request) ([]byte, *PrePrepare, string, error) { 82 | message := Message{Requests: batch} 83 | d, err := Digest(message) 84 | if err != nil { 85 | return []byte{}, nil, "", nil 86 | } 87 | prePrepare := &PrePrepare{ 88 | View: view, 89 | Sequence: seq, 90 | Digest: d, 91 | Message: message, 92 | } 93 | ret, err := json.Marshal(prePrepare) 94 | if err != nil { 95 | return []byte{}, nil, "", nil 96 | } 97 | return ret, prePrepare, d, nil 98 | } 99 | 100 | // return byte, prepare, error 101 | func NewPrepareMsg(id Identify, msg *PrePrepare) ([]byte, *Prepare, error) { 102 | prepare := &Prepare{ 103 | View: msg.View, 104 | Sequence: msg.Sequence, 105 | Digest: msg.Digest, 106 | Identify: id, 107 | } 108 | content, err := json.Marshal(prepare) 109 | if err != nil { 110 | return []byte{}, nil, err 111 | } 112 | return content, prepare, nil 113 | } 114 | 115 | // return byte, commit, error 116 | func NewCommitMsg(id Identify, msg *Prepare) ([]byte, *Commit, error) { 117 | commit := &Commit{ 118 | View: msg.View, 119 | Sequence: msg.Sequence, 120 | Digest: msg.Digest, 121 | Identify: id, 122 | } 123 | content, err := json.Marshal(commit) 124 | if err != nil { 125 | return []byte{}, nil, err 126 | } 127 | return content, commit, nil 128 | } 129 | 130 | func ViewSequenceString(view View, seq Sequence) string { 131 | // TODO need better method 132 | seqStr := strconv.Itoa(int(seq)) 133 | viewStr := strconv.Itoa(int(view)) 134 | seqLen := 4 - len(seqStr) 135 | viewLen := 28 - len(viewStr) 136 | // high 4 for viewStr 137 | for i := 0; i < seqLen; i++ { 138 | viewStr = "0" + viewStr 139 | } 140 | // low 28 for seqStr 141 | for i := 0; i < viewLen; i++ { 142 | seqStr = "0" + seqStr 143 | } 144 | return viewStr + seqStr 145 | } 146 | -------------------------------------------------------------------------------- /pbft/node/boradcast.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 7 | "github.com/hyperledger/fabric/orderer/consensus/pbft/server" 8 | "log" 9 | "net/http" 10 | ) 11 | 12 | func (n *Node) SendPrimary(msg *message.Request) { 13 | content, err := json.Marshal(msg) 14 | if err != nil { 15 | log.Printf("error to marshal json") 16 | return 17 | } 18 | go SendPost(content, n.table[n.GetPrimary()] + server.RequestEntry) 19 | } 20 | 21 | func (n *Node) BroadCast(content []byte, handle string) { 22 | for k, v := range n.table { 23 | // do not send to my self 24 | if k == n.id { 25 | continue 26 | } 27 | go SendPost(content, v + handle) 28 | } 29 | } 30 | 31 | func SendPost(content []byte, url string) { 32 | buff := bytes.NewBuffer(content) 33 | if _, err := http.Post(url, "application/json", buff); err != nil { 34 | log.Printf("[Send] send to %s error: %s", url, err) 35 | } 36 | } 37 | 38 | -------------------------------------------------------------------------------- /pbft/node/checkpoint.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import "log" 4 | 5 | func (n *Node) checkPointRecvThread() { 6 | for { 7 | select { 8 | case msg := <-n.checkPointRecv: 9 | n.buffer.BufferCheckPointMsg(msg, msg.Id) 10 | if n.buffer.IsTrueOfCheckPointMsg(msg.Digest, n.cfg.FaultNum) { 11 | n.buffer.Show() 12 | log.Printf("[CheckPoint] vote checkpoint(%s) success to clear buffer", msg.Digest[0:9]) 13 | n.buffer.ClearBuffer(msg) 14 | n.sequence.CheckPoint() 15 | log.Printf("[CheckPoint] reset the water mark (%d) - (%d)", n.sequence.waterL, n.sequence.waterH) 16 | n.buffer.Show() 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /pbft/node/commit.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 5 | "log" 6 | ) 7 | 8 | func (n *Node) commitRecvThread() { 9 | for { 10 | select { 11 | case msg := <-n.commitRecv: 12 | if !n.checkCommitMsg(msg) { 13 | continue 14 | } 15 | // buffer the commit msg 16 | n.buffer.BufferCommitMsg(msg) 17 | log.Printf("[Commit] node(%d) vote to the msg(%d)", msg.Identify, msg.Sequence) 18 | if n.buffer.IsReadyToExecute(msg.Digest, n.cfg.FaultNum, msg.View, msg.Sequence) { 19 | n.readytoExecute(msg.Digest) 20 | } 21 | } 22 | } 23 | } 24 | 25 | func (n *Node) checkCommitMsg(msg *message.Commit) bool { 26 | if n.view != msg.View { 27 | return false 28 | } 29 | if !n.sequence.CheckBound(msg.Sequence) { 30 | return false 31 | } 32 | return true 33 | } -------------------------------------------------------------------------------- /pbft/node/execute.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | // ready to execute the msg(digest) send to execute queue 4 | func (n *Node) readytoExecute(digest string) { 5 | // buffer to ExcuteQueue 6 | n.buffer.AppendToExecuteQueue(n.buffer.FetchPreprepareMsg(digest)) 7 | // notify ExcuteThread 8 | n.executeNum.Dec() 9 | n.executeNotify<-true 10 | } 11 | -------------------------------------------------------------------------------- /pbft/node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus" 5 | "github.com/hyperledger/fabric/orderer/consensus/pbft/cmd" 6 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 7 | "github.com/hyperledger/fabric/orderer/consensus/pbft/server" 8 | "log" 9 | ) 10 | 11 | var GNode *Node = nil 12 | 13 | type Node struct { 14 | cfg *cmd.SharedConfig 15 | server *server.HttpServer 16 | 17 | id message.Identify 18 | view message.View 19 | table map[message.Identify]string 20 | faultNum uint 21 | 22 | lastReply *message.LastReply 23 | sequence *Sequence 24 | executeNum *ExecuteOpNum 25 | 26 | buffer *message.Buffer 27 | 28 | requestRecv chan *message.Request 29 | prePrepareRecv chan *message.PrePrepare 30 | prepareRecv chan *message.Prepare 31 | commitRecv chan *message.Commit 32 | checkPointRecv chan *message.CheckPoint 33 | 34 | prePrepareSendNotify chan bool 35 | executeNotify chan bool 36 | 37 | supports map[string]consensus.ConsenterSupport 38 | } 39 | 40 | func NewNode(cfg *cmd.SharedConfig, support consensus.ConsenterSupport) *Node { 41 | node := &Node{ 42 | // config 43 | cfg: cfg, 44 | // http server 45 | server: server.NewServer(cfg), 46 | // information about node 47 | id: cfg.Id, 48 | view: cfg.View, 49 | table: cfg.Table, 50 | faultNum: cfg.FaultNum, 51 | // lastReply state 52 | lastReply: message.NewLastReply(), 53 | sequence: NewSequence(cfg), 54 | executeNum: NewExecuteOpNum(), 55 | // the message buffer to store msg 56 | buffer: message.NewBuffer(), 57 | // chan for server and recv thread 58 | requestRecv: make(chan *message.Request), 59 | prePrepareRecv: make(chan *message.PrePrepare), 60 | prepareRecv: make(chan *message.Prepare), 61 | commitRecv: make(chan *message.Commit), 62 | checkPointRecv: make(chan *message.CheckPoint), 63 | // chan for notify pre-prepare send thread 64 | prePrepareSendNotify: make(chan bool), 65 | // chan for notify execute op and reply thread 66 | executeNotify: make(chan bool, 100), 67 | supports: make(map[string]consensus.ConsenterSupport), 68 | } 69 | log.Printf("[Node] the node id:%d, view:%d, fault number:%d\n", node.id, node.view, node.faultNum) 70 | node.RegisterChain(support) 71 | return node 72 | } 73 | 74 | func (n *Node) RegisterChain(support consensus.ConsenterSupport) { 75 | if _, ok := n.supports[support.ChainID()]; ok { 76 | return 77 | } 78 | log.Printf("[Node] Register the chain(%s)", support.ChainID()) 79 | n.supports[support.ChainID()] = support 80 | } 81 | 82 | func (n *Node) Run() { 83 | // first register chan for server 84 | n.server.RegisterChan(n.requestRecv, n.prePrepareRecv, n.prepareRecv, n.commitRecv, n.checkPointRecv) 85 | go n.server.Run() 86 | go n.requestRecvThread() 87 | go n.prePrepareSendThread() 88 | go n.prePrepareRecvAndPrepareSendThread() 89 | go n.prepareRecvAndCommitSendThread() 90 | go n.commitRecvThread() 91 | go n.executeAndReplyThread() 92 | go n.checkPointRecvThread() 93 | } 94 | -------------------------------------------------------------------------------- /pbft/node/prepare.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 5 | "github.com/hyperledger/fabric/orderer/consensus/pbft/server" 6 | "log" 7 | ) 8 | 9 | func (n *Node) prepareRecvAndCommitSendThread() { 10 | for { 11 | select { 12 | case msg := <-n.prepareRecv: 13 | if !n.checkPrepareMsg(msg) { 14 | continue 15 | } 16 | // buffer the prepare msg 17 | n.buffer.BufferPrepareMsg(msg) 18 | // verify send commit msg 19 | if n.buffer.IsTrueOfPrepareMsg(msg.Digest, n.cfg.FaultNum) { 20 | log.Printf("[Prepare] prepare msg(%d) vote success and to send commit", msg.Sequence) 21 | content, msg, err := message.NewCommitMsg(n.id, msg) 22 | if err != nil { 23 | continue 24 | } 25 | // buffer commit msg 26 | n.buffer.BufferCommitMsg(msg) 27 | // TODO broadcast error when buffer the commit msg 28 | n.BroadCast(content, server.CommitEntry) 29 | } 30 | if n.buffer.IsReadyToExecute(msg.Digest, n.cfg.FaultNum, msg.View, msg.Sequence) { 31 | n.readytoExecute(msg.Digest) 32 | } 33 | } 34 | } 35 | } 36 | 37 | func (n *Node) checkPrepareMsg(msg *message.Prepare) bool { 38 | if n.view != msg.View { 39 | return false 40 | } 41 | if !n.sequence.CheckBound(msg.Sequence) { 42 | return false 43 | } 44 | return true 45 | } -------------------------------------------------------------------------------- /pbft/node/preprepare.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 5 | "github.com/hyperledger/fabric/orderer/consensus/pbft/server" 6 | "log" 7 | "time" 8 | ) 9 | 10 | // send pre-prepare thread by request notify or timer 11 | func (n *Node) prePrepareSendThread() { 12 | // TODO change timer duration from config 13 | duration := time.Second 14 | timer := time.After(duration) 15 | for { 16 | select { 17 | // recv request or time out 18 | case <-n.prePrepareSendNotify: 19 | n.prePrepareSendHandleFunc() 20 | case <-timer: 21 | timer = nil 22 | n.prePrepareSendHandleFunc() 23 | timer = time.After(duration) 24 | } 25 | } 26 | } 27 | 28 | func (n *Node) prePrepareSendHandleFunc() { 29 | // buffer is empty or execute op num max 30 | n.executeNum.Lock() 31 | defer n.executeNum.UnLock() 32 | if n.executeNum.Get() >= n.cfg.ExecuteMaxNum { 33 | return 34 | } 35 | if n.buffer.SizeofRequestQueue() < 1 { 36 | return 37 | } 38 | // batch request to discard network traffic 39 | batch := n.buffer.BatchRequest() 40 | if len(batch) < 1 { 41 | return 42 | } 43 | seq := n.sequence.Get() 44 | n.executeNum.Inc() 45 | content, msg, digest, err := message.NewPreprepareMsg(n.view, seq, batch) 46 | if err != nil { 47 | log.Printf("[PrePrepare] generate pre-prepare message error") 48 | return 49 | } 50 | log.Printf("[PrePrepare] generate sequence(%d) for msg(%s) request batch size(%d)", seq, digest[0:9], len(batch)) 51 | // buffer the pre-prepare msg 52 | n.buffer.BufferPreprepareMsg(msg) 53 | // boradcast 54 | n.BroadCast(content, server.PrePrepareEntry) 55 | // TODO send error but buffer the request 56 | } 57 | 58 | // recv pre-prepare and send prepare thread 59 | func (n *Node) prePrepareRecvAndPrepareSendThread() { 60 | for { 61 | select { 62 | case msg := <-n.prePrepareRecv: 63 | if !n.checkPrePrepareMsg(msg) { 64 | continue 65 | } 66 | // buffer pre-prepare 67 | n.buffer.BufferPreprepareMsg(msg) 68 | // generate prepare message 69 | content, prepare, err := message.NewPrepareMsg(n.id, msg) 70 | log.Printf("[Pre-Prepare] recv pre-prepare(%d) and send the prepare", msg.Sequence) 71 | if err != nil { 72 | continue 73 | } 74 | // buffer the prepare msg, verify 2f backup 75 | n.buffer.BufferPrepareMsg(prepare) 76 | // boradcast prepare message 77 | n.BroadCast(content, server.PrepareEntry) 78 | // when commit and prepare vote success but not recv pre-prepare 79 | if n.buffer.IsReadyToExecute(msg.Digest, n.cfg.FaultNum, msg.View, msg.Sequence) { 80 | n.readytoExecute(msg.Digest) 81 | } 82 | } 83 | } 84 | } 85 | 86 | func (n *Node) checkPrePrepareMsg(msg *message.PrePrepare) bool { 87 | // check the same view 88 | if n.view != msg.View { 89 | return false 90 | } 91 | // check the same v and n exist diffrent digest 92 | if n.buffer.IsExistPreprepareMsg(msg.View, msg.Sequence) { 93 | return false 94 | } 95 | // check the digest 96 | d, err := message.Digest(msg.Message) 97 | if err != nil { 98 | return false 99 | } 100 | if d != msg.Digest { 101 | return false 102 | } 103 | // check the n bound 104 | if !n.sequence.CheckBound(msg.Sequence) { 105 | return false 106 | } 107 | return true 108 | } 109 | -------------------------------------------------------------------------------- /pbft/node/reply.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 5 | "github.com/hyperledger/fabric/orderer/consensus/pbft/server" 6 | cb "github.com/hyperledger/fabric/protos/common" 7 | "log" 8 | ) 9 | 10 | var test_reqeust_num uint64 = 0 11 | 12 | func (n *Node) executeAndReplyThread() { 13 | for { 14 | select { 15 | case <-n.executeNotify: 16 | // execute batch 17 | batchs, lastSeq := n.buffer.BatchExecute(n.sequence.GetLastSequence()) 18 | if len(batchs) == 0 { 19 | log.Printf("[Reply] lost sequence now(%d)", n.sequence.GetLastSequence()) 20 | continue 21 | } 22 | n.sequence.SetLastSequence(lastSeq) 23 | // check point 24 | if n.sequence.ReadyToCheckPoint() { 25 | checkSeq := n.sequence.GetCheckPoint() 26 | content, checkPoint := n.buffer.CheckPoint(checkSeq, n.id) 27 | // buffer checkpoint 28 | n.buffer.BufferCheckPointMsg(checkPoint, n.id) 29 | log.Printf("[Reply] ready to create check point to sequence(%d) msg(%s)", checkSeq, checkPoint.Digest[0:9]) 30 | n.BroadCast(content, server.CheckPointEntry) 31 | } 32 | // map the digest to request 33 | requestBatchs := make([]*message.Request, 0) 34 | for _, b := range batchs { 35 | requestBatchs = append(requestBatchs, b.Message.Requests...) 36 | } 37 | test_reqeust_num = test_reqeust_num + uint64(len(requestBatchs)) 38 | log.Printf("[Reply] set last sequence(%d) already execute request(%d)", lastSeq, test_reqeust_num) 39 | // pending state 40 | pending := make(map[string]bool) 41 | for _, r := range requestBatchs { 42 | msg := r.Op.Envelope 43 | channel := r.Op.ChannelID 44 | configSeq := r.Op.ConfigSeq 45 | switch r.Op.Type { 46 | case message.TYPECONFIG: 47 | var err error 48 | seq := n.supports[channel].Sequence() 49 | if configSeq < seq { 50 | if msg, _, err = n.supports[r.Op.ChannelID].ProcessConfigMsg(r.Op.Envelope); err != nil { 51 | log.Println(err) 52 | } 53 | } 54 | batch := n.supports[channel].BlockCutter().Cut() 55 | if batch != nil { 56 | block := n.supports[channel].CreateNextBlock(batch) 57 | n.supports[channel].WriteBlock(block, nil) 58 | } 59 | pending[channel] = false 60 | // write config block 61 | block := n.supports[channel].CreateNextBlock([]*cb.Envelope{msg}) 62 | n.supports[channel].WriteConfigBlock(block, nil) 63 | case message.TYPENORMAL: 64 | seq := n.supports[channel].Sequence() 65 | if configSeq < seq { 66 | if _, err := n.supports[channel].ProcessNormalMsg(msg); err != nil { 67 | } 68 | } 69 | batches, p := n.supports[channel].BlockCutter().Ordered(msg) 70 | for _, batch := range batches { 71 | block := n.supports[channel].CreateNextBlock(batch) 72 | n.supports[channel].WriteBlock(block, nil) 73 | } 74 | pending[channel] = p 75 | } 76 | } 77 | for k, v := range pending { 78 | if v { 79 | batch := n.supports[k].BlockCutter().Cut() 80 | if batch != nil { 81 | block := n.supports[k].CreateNextBlock(batch) 82 | n.supports[k].WriteBlock(block, nil) 83 | } 84 | } 85 | } 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /pbft/node/request.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "log" 5 | ) 6 | 7 | func (n *Node) requestRecvThread() { 8 | log.Printf("[Node] start recv the request thread") 9 | for { 10 | msg := <- n.requestRecv 11 | // check is primary 12 | if !n.IsPrimary() { 13 | if n.lastReply.Equal(msg) { 14 | // TODO just reply 15 | }else { 16 | // TODO just send it to primary 17 | } 18 | } 19 | n.buffer.AppendToRequestQueue(msg) 20 | n.prePrepareSendNotify <- true 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /pbft/node/sequence.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/pbft/cmd" 5 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 6 | "sync" 7 | ) 8 | 9 | type Sequence struct { 10 | lastSequence message.Sequence 11 | checkSequence message.Sequence 12 | stepSequence message.Sequence 13 | sequence message.Sequence 14 | waterL message.Sequence 15 | waterH message.Sequence 16 | checkLocker bool 17 | locker *sync.RWMutex 18 | } 19 | 20 | func NewSequence(cfg *cmd.SharedConfig) *Sequence { 21 | // default sequence start by 0 22 | // waterl start by 0 23 | // waterh start by 100 24 | return &Sequence{ 25 | lastSequence: 0, 26 | checkSequence: cfg.CheckPointNum, 27 | stepSequence: cfg.CheckPointNum, 28 | sequence: 0, 29 | waterL: message.Sequence(cfg.WaterL), 30 | waterH: message.Sequence(cfg.WaterH), 31 | checkLocker: false, 32 | locker: new(sync.RWMutex), 33 | } 34 | } 35 | 36 | // generate new sequence number 37 | func (s *Sequence) Get() message.Sequence { 38 | s.locker.Lock() 39 | s.sequence = s.sequence + 1 40 | s.locker.Unlock() 41 | return s.sequence 42 | } 43 | 44 | func (s *Sequence) CheckBound(seq message.Sequence) bool { 45 | s.locker.RLock() 46 | defer s.locker.RUnlock() 47 | if seq < s.lastSequence { 48 | return false 49 | } 50 | if seq < s.waterL || seq > s.waterH { 51 | return false 52 | } 53 | return true 54 | } 55 | 56 | func (s *Sequence) SetLastSequence(sequence message.Sequence) { 57 | s.locker.Lock() 58 | s.lastSequence = sequence 59 | s.locker.Unlock() 60 | } 61 | 62 | func (s *Sequence) GetLastSequence() (ret message.Sequence) { 63 | s.locker.RLock() 64 | ret = s.lastSequence 65 | s.locker.RUnlock() 66 | return 67 | } 68 | 69 | func (s *Sequence) GetCheckPoint() (ret message.Sequence) { 70 | s.locker.RLock() 71 | ret = s.checkSequence 72 | s.locker.RUnlock() 73 | return 74 | } 75 | 76 | func (s *Sequence) CheckPoint() { 77 | s.locker.Lock() 78 | s.waterL = s.checkSequence + 1 79 | s.checkSequence = s.checkSequence + s.stepSequence 80 | s.waterH = s.checkSequence + s.stepSequence * 2 81 | s.checkLocker = false 82 | s.locker.Unlock() 83 | return 84 | } 85 | 86 | func (s *Sequence) ReadyToCheckPoint() (ret bool){ 87 | ret = false 88 | s.locker.RLock() 89 | if s.lastSequence >= s.checkSequence && !s.checkLocker { 90 | s.checkLocker = true 91 | ret = true 92 | } 93 | s.locker.RUnlock() 94 | return 95 | } 96 | -------------------------------------------------------------------------------- /pbft/node/utils.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 5 | "sync" 6 | ) 7 | 8 | // the execute op num now in state 9 | type ExecuteOpNum struct { 10 | num int 11 | locker *sync.RWMutex 12 | } 13 | 14 | func NewExecuteOpNum() *ExecuteOpNum { 15 | return &ExecuteOpNum{ 16 | num: 0, 17 | locker: new(sync.RWMutex), 18 | } 19 | } 20 | 21 | func (n *ExecuteOpNum) Get() int { 22 | return n.num 23 | } 24 | 25 | func (n *ExecuteOpNum) Inc() { 26 | n.num = n.num + 1 27 | } 28 | 29 | func (n *ExecuteOpNum) Dec() { 30 | n.Lock() 31 | n.num = n.num - 1 32 | n.UnLock() 33 | } 34 | 35 | func (n *ExecuteOpNum) Lock() { 36 | n.locker.Lock() 37 | } 38 | 39 | func (n *ExecuteOpNum) UnLock() { 40 | n.locker.Unlock() 41 | } 42 | 43 | func (n *Node) GetPrimary() message.Identify { 44 | all := len(n.table) 45 | return message.Identify(int(n.view)%all) 46 | } 47 | 48 | func (n *Node) IsPrimary() bool { 49 | p := n.GetPrimary() 50 | if p == message.Identify(n.view) { 51 | return true 52 | } 53 | return false 54 | } 55 | 56 | func StringCalc(a string, b string) string { 57 | return "" 58 | } 59 | -------------------------------------------------------------------------------- /pbft/server/handle.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 6 | "log" 7 | "net/http" 8 | ) 9 | 10 | func (s *HttpServer) HttpRequest(w http.ResponseWriter, r *http.Request) { 11 | var msg message.Request 12 | if err := json.NewDecoder(r.Body).Decode(&msg); err != nil { 13 | log.Printf("[Http Error] %s", err) 14 | return 15 | } 16 | s.requestRecv <- &msg 17 | } 18 | 19 | func (s *HttpServer) HttpPrePrepare(w http.ResponseWriter, r *http.Request) { 20 | var msg message.PrePrepare 21 | if err := json.NewDecoder(r.Body).Decode(&msg); err != nil { 22 | log.Printf("[Http Error] %s", err) 23 | return 24 | } 25 | s.prePrepareRecv <- &msg 26 | } 27 | 28 | func (s *HttpServer) HttpPrepare(w http.ResponseWriter, r *http.Request) { 29 | var msg message.Prepare 30 | if err := json.NewDecoder(r.Body).Decode(&msg); err != nil { 31 | log.Printf("[Http Error] %s", err) 32 | return 33 | } 34 | s.prepareRecv <- &msg 35 | } 36 | 37 | func (s *HttpServer) HttpCommit(w http.ResponseWriter, r *http.Request) { 38 | var msg message.Commit 39 | if err := json.NewDecoder(r.Body).Decode(&msg); err != nil { 40 | log.Printf("[Http Error] %s", err) 41 | return 42 | } 43 | s.commitRecv <- &msg 44 | } 45 | 46 | func (s *HttpServer) HttpCheckPoint(w http.ResponseWriter, r *http.Request) { 47 | var msg message.CheckPoint 48 | if err := json.NewDecoder(r.Body).Decode(&msg); err != nil { 49 | log.Printf("[Http Error] %s", err) 50 | return 51 | } 52 | s.checkPointRecv <- &msg 53 | } 54 | -------------------------------------------------------------------------------- /pbft/server/server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/pbft/cmd" 5 | "github.com/hyperledger/fabric/orderer/consensus/pbft/message" 6 | "log" 7 | "net/http" 8 | "strconv" 9 | ) 10 | 11 | const ( 12 | RequestEntry = "/request" 13 | PrePrepareEntry = "/preprepare" 14 | PrepareEntry = "/prepare" 15 | CommitEntry = "/commit" 16 | CheckPointEntry = "/checkpoint" 17 | ) 18 | 19 | // http 监听请求 20 | type HttpServer struct { 21 | port int 22 | server *http.Server 23 | 24 | requestRecv chan *message.Request 25 | prePrepareRecv chan *message.PrePrepare 26 | prepareRecv chan *message.Prepare 27 | commitRecv chan *message.Commit 28 | checkPointRecv chan *message.CheckPoint 29 | } 30 | 31 | func NewServer(cfg *cmd.SharedConfig) *HttpServer { 32 | httpServer := &HttpServer{ 33 | port: cfg.Port, 34 | server: nil, 35 | } 36 | // set server 37 | return httpServer 38 | } 39 | 40 | // config server: to register the handle chan 41 | func (s *HttpServer) RegisterChan(r chan *message.Request, pre chan *message.PrePrepare, 42 | p chan *message.Prepare, c chan *message.Commit, cp chan *message.CheckPoint) { 43 | log.Printf("[Server] register the chan for listen func") 44 | s.requestRecv = r 45 | s.prePrepareRecv = pre 46 | s.prepareRecv = p 47 | s.commitRecv = c 48 | s.checkPointRecv = cp 49 | } 50 | 51 | func (s *HttpServer) Run() { 52 | // register server service and run 53 | log.Printf("[Node] start the listen server") 54 | s.registerServer() 55 | } 56 | 57 | func (s *HttpServer) registerServer() { 58 | log.Printf("[Server] set listen port:%d\n", s.port) 59 | 60 | httpRegister := map[string]func(http.ResponseWriter, *http.Request){ 61 | RequestEntry: s.HttpRequest, 62 | PrePrepareEntry: s.HttpPrePrepare, 63 | PrepareEntry: s.HttpPrepare, 64 | CommitEntry: s.HttpCommit, 65 | CheckPointEntry: s.HttpCheckPoint, 66 | } 67 | 68 | mux := http.NewServeMux() 69 | for k, v := range httpRegister { 70 | log.Printf("[Server] register the func for %s", k) 71 | mux.HandleFunc(k, v) 72 | } 73 | 74 | s.server = &http.Server{ 75 | Addr: ":" + strconv.Itoa(s.port), 76 | Handler: mux, 77 | } 78 | 79 | if err := s.server.ListenAndServe(); err != nil { 80 | log.Printf("[Server Error] %s", err) 81 | return 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /rbft-network/base/docker-compose-base.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | orderer0.yzm.com: 4 | container_name: orderer0.yzm.com 5 | extends: 6 | file: peer-base.yaml 7 | service: orderer-base 8 | environment: 9 | - ORDERER_GENERAL_LISTENPORT=6050 10 | - PBFT_LISTEN_PORT=6070 11 | - PBFT_NODE_ID=0 12 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 13 | - PBFT_PUBLIC_KEY=909eeae6502d12b12a712551caeccc08ccab604619a7f370cd9dfef1f08fd6d5;c9a870e99461fd1668de345d2e156957562631cdefcfe1bd53cc26ea4747ee80;cbef40aee08f2e11f2b370a55e6aa28a932a7d3985f95ea5149cfcd8fcb6068f;fe2c145b38152588132a2374cc11b17a1e81ea1c2625374cefaca43fb54882bc 14 | - PBFT_PRIVATE_KEY=e0797b4a9279f0987a3d7f4bb565ce501b2e8f88b2182c6567ce721f3fb28608 15 | - PBFT_TBLS_PUBLIC_KEY=1475dc210a35dfd8fdd9a41c98d4ab07a4c2353a4f12d97c0a01ddddd347732514b82764c070e3162566cb512662ad5fd6c7deac75da71a8c871f5b0de97543f3d04366d98c744e7d2569043a941bcb8bac1654b776e4f25e9864c0a17c7b90669960affe8238c3dc00ef00b56807f2bab84a6fe9c32ab5553332e78fe21db00,66cb901e46e4700fcd90265204a56a836e5274de4c9c22bbb0adc2e013bfd54f484fba89acb26a9ae06cd64a5471c00ca2ef926851cdeb66d0f62dcac8f41f32762dcdbe3c7236162d4ffa742937979c681e4da5441bb9e986be0dc5528dc7bf401b573f3795170fe5ddf0283c41c0e46f02511a65c45f12b967c9713f922796,1a6cd6c30cfb41b0a597051d846a01a59798c3d02a8be9572ca9b9195acb5e172382addfdaae516569d52390bbcb2811e69a606219ceb33d28790f529cf48a2e8a2d53d9c27119faf9b8771675a28a2bf98fd8a4be8d0ccccab9b593926f009683e59b5483977ce5d661d1f05a89ffa7fa8a59b0b2ea31b77fa5dd32de42860a 16 | - PBFT_TBLS_PRIVATE_KEY=10b6bcf6e928a5b6636a4a80a29bc5ff22b281012a24f10c5b85af758a00602f 17 | volumes: 18 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 19 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer0.yzm.com/msp:/var/hyperledger/orderer/msp 20 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer0.yzm.com/tls:/var/hyperledger/orderer/tls 21 | - ../production/orderer:/var/hyperledger/production/orderer0 22 | ports: 23 | - 6050:6050 24 | - 6070:6070 25 | 26 | orderer1.yzm.com: 27 | container_name: orderer1.yzm.com 28 | extends: 29 | file: peer-base.yaml 30 | service: orderer-base 31 | environment: 32 | - ORDERER_GENERAL_LISTENPORT=6051 33 | - PBFT_LISTEN_PORT=6071 34 | - PBFT_NODE_ID=1 35 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 36 | - PBFT_PUBLIC_KEY=909eeae6502d12b12a712551caeccc08ccab604619a7f370cd9dfef1f08fd6d5;c9a870e99461fd1668de345d2e156957562631cdefcfe1bd53cc26ea4747ee80;cbef40aee08f2e11f2b370a55e6aa28a932a7d3985f95ea5149cfcd8fcb6068f;fe2c145b38152588132a2374cc11b17a1e81ea1c2625374cefaca43fb54882bc 37 | - PBFT_PRIVATE_KEY=bd3e50a519b2eb00f2d59a47c13c9e02b6488fefa276d76d3ac9125ad9cc0100 38 | - PBFT_TBLS_PUBLIC_KEY=1475dc210a35dfd8fdd9a41c98d4ab07a4c2353a4f12d97c0a01ddddd347732514b82764c070e3162566cb512662ad5fd6c7deac75da71a8c871f5b0de97543f3d04366d98c744e7d2569043a941bcb8bac1654b776e4f25e9864c0a17c7b90669960affe8238c3dc00ef00b56807f2bab84a6fe9c32ab5553332e78fe21db00,66cb901e46e4700fcd90265204a56a836e5274de4c9c22bbb0adc2e013bfd54f484fba89acb26a9ae06cd64a5471c00ca2ef926851cdeb66d0f62dcac8f41f32762dcdbe3c7236162d4ffa742937979c681e4da5441bb9e986be0dc5528dc7bf401b573f3795170fe5ddf0283c41c0e46f02511a65c45f12b967c9713f922796,1a6cd6c30cfb41b0a597051d846a01a59798c3d02a8be9572ca9b9195acb5e172382addfdaae516569d52390bbcb2811e69a606219ceb33d28790f529cf48a2e8a2d53d9c27119faf9b8771675a28a2bf98fd8a4be8d0ccccab9b593926f009683e59b5483977ce5d661d1f05a89ffa7fa8a59b0b2ea31b77fa5dd32de42860a 39 | - PBFT_TBLS_PRIVATE_KEY=570010f7fdde6a6807a63e6fbc5c50f15d1c32c34028dafe6a57a67d0501b1cc 40 | volumes: 41 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 42 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer1.yzm.com/msp:/var/hyperledger/orderer/msp 43 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer1.yzm.com/tls:/var/hyperledger/orderer/tls 44 | - ../production/orderer:/var/hyperledger/production/orderer1 45 | ports: 46 | - 6051:6051 47 | - 6071:6071 48 | 49 | orderer2.yzm.com: 50 | container_name: orderer2.yzm.com 51 | extends: 52 | file: peer-base.yaml 53 | service: orderer-base 54 | environment: 55 | - ORDERER_GENERAL_LISTENPORT=6052 56 | - PBFT_LISTEN_PORT=6072 57 | - PBFT_NODE_ID=2 58 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 59 | - PBFT_PUBLIC_KEY=909eeae6502d12b12a712551caeccc08ccab604619a7f370cd9dfef1f08fd6d5;c9a870e99461fd1668de345d2e156957562631cdefcfe1bd53cc26ea4747ee80;cbef40aee08f2e11f2b370a55e6aa28a932a7d3985f95ea5149cfcd8fcb6068f;fe2c145b38152588132a2374cc11b17a1e81ea1c2625374cefaca43fb54882bc 60 | - PBFT_PRIVATE_KEY=002bbbe74ed66812e07095ecb16543b2f3943b09d3ebca74351553744c538207 61 | - PBFT_TBLS_PUBLIC_KEY=1475dc210a35dfd8fdd9a41c98d4ab07a4c2353a4f12d97c0a01ddddd347732514b82764c070e3162566cb512662ad5fd6c7deac75da71a8c871f5b0de97543f3d04366d98c744e7d2569043a941bcb8bac1654b776e4f25e9864c0a17c7b90669960affe8238c3dc00ef00b56807f2bab84a6fe9c32ab5553332e78fe21db00,66cb901e46e4700fcd90265204a56a836e5274de4c9c22bbb0adc2e013bfd54f484fba89acb26a9ae06cd64a5471c00ca2ef926851cdeb66d0f62dcac8f41f32762dcdbe3c7236162d4ffa742937979c681e4da5441bb9e986be0dc5528dc7bf401b573f3795170fe5ddf0283c41c0e46f02511a65c45f12b967c9713f922796,1a6cd6c30cfb41b0a597051d846a01a59798c3d02a8be9572ca9b9195acb5e172382addfdaae516569d52390bbcb2811e69a606219ceb33d28790f529cf48a2e8a2d53d9c27119faf9b8771675a28a2bf98fd8a4be8d0ccccab9b593926f009683e59b5483977ce5d661d1f05a89ffa7fa8a59b0b2ea31b77fa5dd32de42860a 62 | - PBFT_TBLS_PRIVATE_KEY=4374dd9878b78d02f42aefd58fff7d4f29acf164911a34de9bb789b63ad0713c 63 | volumes: 64 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 65 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer2.yzm.com/msp:/var/hyperledger/orderer/msp 66 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer2.yzm.com/tls:/var/hyperledger/orderer/tls 67 | - ../production/orderer:/var/hyperledger/production/orderer2 68 | ports: 69 | - 6052:6052 70 | - 6072:6072 71 | 72 | orderer3.yzm.com: 73 | container_name: orderer3.yzm.com 74 | extends: 75 | file: peer-base.yaml 76 | service: orderer-base 77 | environment: 78 | - ORDERER_GENERAL_LISTENPORT=6053 79 | - PBFT_LISTEN_PORT=6073 80 | - PBFT_NODE_ID=3 81 | - PBFT_NODE_TABLE=http://orderer0.yzm.com:6070;http://orderer1.yzm.com:6071;http://orderer2.yzm.com:6072;http://orderer3.yzm.com:6073 82 | - PBFT_PUBLIC_KEY=909eeae6502d12b12a712551caeccc08ccab604619a7f370cd9dfef1f08fd6d5;c9a870e99461fd1668de345d2e156957562631cdefcfe1bd53cc26ea4747ee80;cbef40aee08f2e11f2b370a55e6aa28a932a7d3985f95ea5149cfcd8fcb6068f;fe2c145b38152588132a2374cc11b17a1e81ea1c2625374cefaca43fb54882bc 83 | - PBFT_PRIVATE_KEY=7b3aba267bd6fc195da3aad06e8611fade48ea7fcef243ee502a1fbc390dd108 84 | - PBFT_TBLS_PUBLIC_KEY=1475dc210a35dfd8fdd9a41c98d4ab07a4c2353a4f12d97c0a01ddddd347732514b82764c070e3162566cb512662ad5fd6c7deac75da71a8c871f5b0de97543f3d04366d98c744e7d2569043a941bcb8bac1654b776e4f25e9864c0a17c7b90669960affe8238c3dc00ef00b56807f2bab84a6fe9c32ab5553332e78fe21db00,66cb901e46e4700fcd90265204a56a836e5274de4c9c22bbb0adc2e013bfd54f484fba89acb26a9ae06cd64a5471c00ca2ef926851cdeb66d0f62dcac8f41f32762dcdbe3c7236162d4ffa742937979c681e4da5441bb9e986be0dc5528dc7bf401b573f3795170fe5ddf0283c41c0e46f02511a65c45f12b967c9713f922796,1a6cd6c30cfb41b0a597051d846a01a59798c3d02a8be9572ca9b9195acb5e172382addfdaae516569d52390bbcb2811e69a606219ceb33d28790f529cf48a2e8a2d53d9c27119faf9b8771675a28a2bf98fd8a4be8d0ccccab9b593926f009683e59b5483977ce5d661d1f05a89ffa7fa8a59b0b2ea31b77fa5dd32de42860a 85 | - PBFT_TBLS_PRIVATE_KEY=65ca24bba4579580d3684b6a7f0a2739b6f24af8152437d109d44d7c831910e0 86 | volumes: 87 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 88 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer3.yzm.com/msp:/var/hyperledger/orderer/msp 89 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer3.yzm.com/tls:/var/hyperledger/orderer/tls 90 | - ../production/orderer:/var/hyperledger/production/orderer3 91 | ports: 92 | - 6053:6053 93 | - 6073:6073 94 | 95 | peer0.orga.com: 96 | container_name: peer0.orga.com 97 | extends: 98 | file: peer-base.yaml 99 | service: peer-base 100 | environment: 101 | - CORE_PEER_ID=peer0.orga.com 102 | - CORE_PEER_ADDRESS=peer0.orga.com:7051 103 | - CORE_PEER_LISTENADDRESS=0.0.0.0:7051 104 | - CORE_PEER_CHAINCODEADDRESS=peer0.orga.com:7052 105 | - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052 106 | - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.orga.com:7051 107 | - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.orga.com:7051 108 | - CORE_PEER_LOCALMSPID=OrgAMSP 109 | volumes: 110 | - /var/run/:/host/var/run/ 111 | - ../crypto-config/peerOrganizations/orga.com/peers/peer0.orga.com/msp:/etc/hyperledger/fabric/msp 112 | - ../crypto-config/peerOrganizations/orga.com/peers/peer0.orga.com/tls:/etc/hyperledger/fabric/tls 113 | - ../production/orga:/var/hyperledger/production 114 | ports: 115 | - 7051:7051 116 | -------------------------------------------------------------------------------- /rbft-network/base/peer-base.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | peer-base: 4 | image: hyperledger/fabric-peer:$IMAGETAG 5 | environment: 6 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 7 | - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=rbft-network_solonet 8 | - FABRIC_LOGGING_SPEC=INFO 9 | - CORE_PEER_TLS_ENABLED=false 10 | - CORE_PEER_GOSSIP_USELEADERELECTION=true 11 | - CORE_PEER_GOSSIP_ORGLEADER=false 12 | - CORE_PEER_PROFILE_ENABLED=true 13 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 14 | command: peer node start 15 | 16 | orderer-base: 17 | image: hyperledger/fabric-orderer:$IMAGETAG 18 | environment: 19 | # - FABRIC_LOGGING_SPEC=INFO 20 | - FABRIC_LOGGING_SPEC=FATAL 21 | - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0 22 | - ORDERER_GENERAL_GENESISMETHOD=file 23 | - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block 24 | - ORDERER_GENERAL_LOCALMSPID=OrdererMSP 25 | - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp 26 | - ORDERER_GENERAL_TLS_ENABLED=false 27 | - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1 28 | - ORDERER_KAFKA_VERBOSE=true 29 | - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt 30 | - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key 31 | - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt] 32 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric 33 | command: orderer 34 | 35 | -------------------------------------------------------------------------------- /rbft-network/benchmarks/config.yaml: -------------------------------------------------------------------------------- 1 | test: 2 | name: rbft-network 3 | description: rbft-network 4 | workers: 5 | type: local 6 | number: 5 7 | 8 | rounds: 9 | - label: open 10 | description: open 11 | txNumber: 2000 12 | rateControl: 13 | type: fixed-rate 14 | opts: 15 | tps: 100 16 | callback: ../chaincode/demo/callback/open.js 17 | 18 | - label: transfer 19 | description: transfer 20 | txNumber: 2000 21 | rateControl: 22 | type: fixed-rate 23 | opts: 24 | tps: 100 25 | callback: ../chaincode/demo/callback/transfer.js 26 | 27 | - label: query 28 | description: query 29 | txNumber: 2000 30 | rateControl: 31 | type: fixed-rate 32 | opts: 33 | tps: 100 34 | callback: ../chaincode/demo/callback/query.js 35 | 36 | - label: delete 37 | description: delete 38 | txNumber: 2000 39 | rateControl: 40 | type: fixed-rate 41 | opts: 42 | tps: 100 43 | callback: ../chaincode/demo/callback/delete.js 44 | 45 | monitor: 46 | interval: 1 47 | type: 48 | - docker 49 | docker: 50 | containers: 51 | - peer0.orga.com 52 | - orderer0.yzm.com 53 | - orderer1.yzm.com 54 | - orderer2.yzm.com 55 | - orderer3.yzm.com 56 | -------------------------------------------------------------------------------- /rbft-network/benchmarks/network.yaml: -------------------------------------------------------------------------------- 1 | name: Fabric 2 | version: "1.0" 3 | 4 | mutual-tls: false 5 | 6 | caliper: 7 | blockchain: fabric 8 | command: 9 | start: scripts/gen.sh;scripts/utils.sh up 10 | end: scripts/utils.sh down 11 | 12 | info: 13 | Version: 1.4.4 14 | Size: 4 Orgs with 2 Peer 15 | Orderer: RBFT 16 | Distribution: Single Host 17 | StateDB: GoLevelDB 18 | 19 | clients: 20 | peer0.orga.com: 21 | client: 22 | organization: OrgA 23 | credentialStore: 24 | path: /tmp/crypto/orga 25 | cryptoStore: 26 | path: /tmp/crypto/orga 27 | clientPrivateKey: 28 | path: crypto-config/peerOrganizations/orga.com/users/User1@orga.com/msp/keystore/key.pem 29 | clientSignedCert: 30 | path: crypto-config/peerOrganizations/orga.com/users/User1@orga.com/msp/signcerts/User1@orga.com-cert.pem 31 | 32 | channels: 33 | mychannel: 34 | configBinary: ./channel-artifacts/channel.tx 35 | created: true 36 | orderers: 37 | - orderer0.yzm.com 38 | - orderer1.yzm.com 39 | - orderer2.yzm.com 40 | - orderer3.yzm.com 41 | peers: 42 | peer0.orga.com: 43 | endorsingPeer: true 44 | chaincodeQuery: true 45 | ledgerQuery: true 46 | eventSource: true 47 | 48 | chaincodes: 49 | - id: money_demo 50 | version: "1.0" 51 | contractID: money_demo 52 | language: golang 53 | path: ../chaincode/demo 54 | targetPeers: 55 | - peer0.orga.com 56 | 57 | organizations: 58 | OrgA: 59 | mspid: OrgAMSP 60 | peers: 61 | - peer0.orga.com 62 | adminPrivateKey: 63 | path: crypto-config/peerOrganizations/orga.com/users/Admin@orga.com/msp/keystore/key.pem 64 | signedCert: 65 | path: crypto-config/peerOrganizations/orga.com/users/Admin@orga.com/msp/signcerts/Admin@orga.com-cert.pem 66 | 67 | orderers: 68 | orderer0.yzm.com: 69 | url: grpc://localhost:6050 70 | grpcOptions: 71 | grpc.keepalive_time_ms: 600000 72 | orderer1.yzm.com: 73 | url: grpc://localhost:6051 74 | grpcOptions: 75 | grpc.keepalive_time_ms: 600000 76 | orderer2.yzm.com: 77 | url: grpc://localhost:6052 78 | grpcOptions: 79 | grpc.keepalive_time_ms: 600000 80 | orderer3.yzm.com: 81 | url: grpc://localhost:6053 82 | grpcOptions: 83 | grpc.keepalive_time_ms: 600000 84 | 85 | peers: 86 | peer0.orga.com: 87 | url: grpc://localhost:7051 88 | grpcOptions: 89 | grpc.keepalive_time_ms: 600000 90 | -------------------------------------------------------------------------------- /rbft-network/configtx.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | Organizations: 3 | - &OrdererOrg 4 | Name: OrdererOrg 5 | ID: OrdererMSP 6 | MSPDir: crypto-config/ordererOrganizations/yzm.com/msp 7 | Policies: 8 | Readers: 9 | Type: Signature 10 | Rule: "OR('OrdererMSP.member')" 11 | Writers: 12 | Type: Signature 13 | Rule: "OR('OrdererMSP.member')" 14 | Admins: 15 | Type: Signature 16 | Rule: "OR('OrdererMSP.admin')" 17 | - &OrgA 18 | Name: OrgAMSP 19 | ID: OrgAMSP 20 | MSPDir: crypto-config/peerOrganizations/orga.com/msp 21 | Policies: 22 | Readers: 23 | Type: Signature 24 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.peer', 'OrgAMSP.client')" 25 | Writers: 26 | Type: Signature 27 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.client')" 28 | Admins: 29 | Type: Signature 30 | Rule: "OR('OrgAMSP.admin')" 31 | AnchorPeers: 32 | - Host: peer0.orga.com 33 | Port: 7051 34 | 35 | Capabilities: 36 | Channel: &ChannelCapabilities 37 | V1_4_3: true 38 | V1_3: false 39 | V1_1: false 40 | 41 | Orderer: &OrdererCapabilities 42 | V1_4_2: true 43 | V1_1: false 44 | 45 | Application: &ApplicationCapabilities 46 | V1_4_2: true 47 | V1_3: false 48 | V1_2: false 49 | V1_1: false 50 | 51 | Application: &ApplicationDefaults 52 | Organizations: 53 | 54 | Policies: 55 | Readers: 56 | Type: ImplicitMeta 57 | Rule: "ANY Readers" 58 | Writers: 59 | Type: ImplicitMeta 60 | Rule: "ANY Writers" 61 | Admins: 62 | Type: ImplicitMeta 63 | Rule: "MAJORITY Admins" 64 | 65 | Capabilities: 66 | <<: *ApplicationCapabilities 67 | 68 | Orderer: &OrdererDefaults 69 | OrdererType: rbft 70 | Addresses: 71 | - orderer0.yzm.com:6050 72 | - orderer1.yzm.com:6051 73 | - orderer2.yzm.com:6052 74 | - orderer3.yzm.com:6053 75 | 76 | BatchTimeout: 2s 77 | BatchSize: 78 | MaxMessageCount: 1000 79 | AbsoluteMaxBytes: 256 MB 80 | PreferredMaxBytes: 512 KB 81 | 82 | Organizations: 83 | Policies: 84 | Readers: 85 | Type: ImplicitMeta 86 | Rule: "ANY Readers" 87 | Writers: 88 | Type: ImplicitMeta 89 | Rule: "ANY Writers" 90 | Admins: 91 | Type: ImplicitMeta 92 | Rule: "MAJORITY Admins" 93 | BlockValidation: 94 | Type: ImplicitMeta 95 | Rule: "ANY Writers" 96 | 97 | Channel: &ChannelDefaults 98 | Policies: 99 | Readers: 100 | Type: ImplicitMeta 101 | Rule: "ANY Readers" 102 | Writers: 103 | Type: ImplicitMeta 104 | Rule: "ANY Writers" 105 | Admins: 106 | Type: ImplicitMeta 107 | Rule: "MAJORITY Admins" 108 | 109 | Capabilities: 110 | <<: *ChannelCapabilities 111 | 112 | Profiles: 113 | Genesis: 114 | <<: *ChannelDefaults 115 | Orderer: 116 | <<: *OrdererDefaults 117 | Organizations: 118 | - *OrdererOrg 119 | Capabilities: 120 | <<: *OrdererCapabilities 121 | Consortiums: 122 | SampleConsortium: 123 | Organizations: 124 | - *OrgA 125 | Channel: 126 | Consortium: SampleConsortium 127 | <<: *ChannelDefaults 128 | Application: 129 | <<: *ApplicationDefaults 130 | Organizations: 131 | - *OrgA 132 | Capabilities: 133 | <<: *ApplicationCapabilities 134 | -------------------------------------------------------------------------------- /rbft-network/crypto-config.yaml: -------------------------------------------------------------------------------- 1 | OrdererOrgs: 2 | - Name: Orderer 3 | Domain: yzm.com 4 | EnableNodeOUs: true # 控制节点目录中是否生成配置文件 5 | Specs: 6 | - Hostname: orderer0 7 | - Hostname: orderer1 8 | - Hostname: orderer2 9 | - Hostname: orderer3 10 | 11 | PeerOrgs: 12 | - Name: OrgA 13 | Domain: orga.com 14 | EnableNodeOUs: true 15 | Template: 16 | Count: 1 17 | Users: 18 | Count: 1 19 | -------------------------------------------------------------------------------- /rbft-network/docker-compose-cli.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | networks: 4 | solonet: 5 | ipam: 6 | config: 7 | - subnet: 172.22.0.0/24 8 | gateway: 172.22.0.1 9 | 10 | services: 11 | 12 | orderer0.yzm.com: 13 | extends: 14 | file: base/docker-compose-base.yaml 15 | service: orderer0.yzm.com 16 | container_name: orderer0.yzm.com 17 | networks: 18 | solonet: 19 | ipv4_address: 172.22.0.100 20 | 21 | orderer1.yzm.com: 22 | extends: 23 | file: base/docker-compose-base.yaml 24 | service: orderer1.yzm.com 25 | container_name: orderer1.yzm.com 26 | networks: 27 | solonet: 28 | ipv4_address: 172.22.0.101 29 | 30 | orderer2.yzm.com: 31 | extends: 32 | file: base/docker-compose-base.yaml 33 | service: orderer2.yzm.com 34 | container_name: orderer2.yzm.com 35 | networks: 36 | solonet: 37 | ipv4_address: 172.22.0.102 38 | 39 | orderer3.yzm.com: 40 | extends: 41 | file: base/docker-compose-base.yaml 42 | service: orderer3.yzm.com 43 | container_name: orderer3.yzm.com 44 | networks: 45 | solonet: 46 | ipv4_address: 172.22.0.103 47 | 48 | peer0.orga.com: 49 | container_name: peer0.orga.com 50 | extends: 51 | file: base/docker-compose-base.yaml 52 | service: peer0.orga.com 53 | networks: 54 | solonet: 55 | ipv4_address: 172.22.0.2 56 | 57 | cli: 58 | container_name: cli 59 | image: hyperledger/fabric-tools:$IMAGETAG 60 | tty: true 61 | stdin_open: true 62 | environment: 63 | - SYS_CHANNEL=sys_channel 64 | - GOPATH=/opt/gopath 65 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 66 | - FABRIC_LOGGING_SPEC=INFO 67 | - CORE_PEER_ID=cli 68 | - CORE_PEER_ADDRESS=peer0.orga.com:7051 69 | - CORE_PEER_LOCALMSPID=OrgAMSP 70 | - CORE_PEER_TLS_ENABLED=false 71 | - CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/orga.com/users/Admin@orga.com/msp 72 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 73 | command: /bin/bash 74 | volumes: 75 | - /var/run/:/host/var/run/ 76 | - ./../chaincode/:/opt/gopath/src/github.com/chaincode 77 | - ./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ 78 | - ./scripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/scripts/ 79 | - ./channel-artifacts:/opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts 80 | depends_on: 81 | - orderer0.yzm.com 82 | - orderer1.yzm.com 83 | - orderer2.yzm.com 84 | - orderer3.yzm.com 85 | - peer0.orga.com 86 | networks: 87 | solonet: 88 | ipv4_address: 172.22.0.200 89 | -------------------------------------------------------------------------------- /rbft-network/scripts/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 证书文件夹 4 | PEERROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations 5 | ORDEROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations 6 | 7 | # 节点设置 8 | ORDERER0NODE=orderer0.yzm.com:6050 9 | ORDERER1NODE=orderer1.yzm.com:6051 10 | ORDERER2NODE=orderer2.yzm.com:6052 11 | ORDERER3NODE=orderer3.yzm.com:6053 12 | 13 | ORDERERNODE=${ORDERER1NODE} 14 | 15 | PEERORGANODE=peer0.orga.com:7051 16 | CHANNEL_NAME=mychannel 17 | 18 | NAME=money_demo 19 | VERSION=1.0 20 | 21 | # 切换peer0 orgA 22 | OrgA(){ 23 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orga.com/users/Admin@orga.com/msp 24 | CORE_PEER_ADDRESS=${PEERORGANODE} 25 | CORE_PEER_LOCALMSPID="OrgAMSP" 26 | echo "node now:peer0.orga.com" 27 | } 28 | 29 | # 安装channel 30 | InstallChannel() { 31 | peer channel create \ 32 | -o ${ORDERERNODE} \ 33 | -c ${CHANNEL_NAME} \ 34 | -f ./channel-artifacts/channel.tx \ 35 | echo "install channel" 36 | } 37 | 38 | # 加入channel 39 | JoinChannel() { 40 | OrgA 41 | peer channel join -b ${CHANNEL_NAME}.block 42 | echo "peer0.orga.com join channel" 43 | } 44 | 45 | # 更新锚节点 46 | AnchorUpdate() { 47 | OrgA 48 | peer channel update \ 49 | -o ${ORDERERNODE} \ 50 | -c ${CHANNEL_NAME} \ 51 | -f ./channel-artifacts/OrgAMSPanchor.tx \ 52 | echo "orga update anchor peer0.orga.com" 53 | } 54 | 55 | # 安装链码 56 | InstallChainCode() { 57 | OrgA 58 | peer chaincode install \ 59 | -n ${NAME} \ 60 | -v ${VERSION} \ 61 | -p github.com/chaincode/demo/ 62 | echo "peer0.orga.com install chaincode - demo" 63 | } 64 | 65 | # 实例链码 66 | InstantiateChainCode() { 67 | peer chaincode instantiate \ 68 | -o ${ORDERERNODE} \ 69 | -C ${CHANNEL_NAME} \ 70 | -n ${NAME} \ 71 | -v ${VERSION} \ 72 | -c '{"Args":["Init"]}' \ 73 | -P "AND ('OrgAMSP.peer')" 74 | echo "instantiate chaincode" 75 | sleep 10 76 | } 77 | 78 | # 链码测试 79 | TestDemo() { 80 | # 创建账户 81 | peer chaincode invoke \ 82 | -C ${CHANNEL_NAME} \ 83 | -o ${ORDERERNODE} \ 84 | -n ${NAME} \ 85 | --peerAddresses ${PEERORGANODE} \ 86 | -c '{"Args":["open","count_a", "100"]}' 87 | sleep 5 88 | peer chaincode invoke \ 89 | -C ${CHANNEL_NAME} \ 90 | -o ${ORDERERNODE} \ 91 | -n ${NAME} \ 92 | --peerAddresses ${PEERORGANODE} \ 93 | -c '{"Args":["open","count_b", "100"]}' 94 | sleep 5 95 | peer chaincode query \ 96 | -C ${CHANNEL_NAME} \ 97 | -n ${NAME} \ 98 | -c '{"Args":["query","count_a"]}' 99 | peer chaincode query \ 100 | -C ${CHANNEL_NAME} \ 101 | -n ${NAME} \ 102 | -c '{"Args":["query","count_b"]}' 103 | peer chaincode invoke \ 104 | -C ${CHANNEL_NAME} \ 105 | -o ${ORDERERNODE} \ 106 | -n ${NAME} \ 107 | --peerAddresses ${PEERORGANODE} \ 108 | -c '{"Args":["invoke","count_a","count_b","50"]}' 109 | sleep 5 110 | peer chaincode invoke \ 111 | -C ${CHANNEL_NAME} \ 112 | -o ${ORDERERNODE} \ 113 | -n ${NAME} \ 114 | --peerAddresses ${PEERORGANODE} \ 115 | -c '{"Args":["open","count_c", "100"]}' 116 | sleep 5 117 | peer chaincode invoke \ 118 | -C ${CHANNEL_NAME} \ 119 | -o ${ORDERER3NODE} \ 120 | -n ${NAME} \ 121 | --peerAddresses ${PEERORGANODE} \ 122 | -c '{"Args":["invoke","count_a","count_c","10"]}' 123 | sleep 5 124 | peer chaincode query \ 125 | -C ${CHANNEL_NAME} \ 126 | -n ${NAME} \ 127 | -c '{"Args":["query","count_a"]}' 128 | peer chaincode query \ 129 | -C ${CHANNEL_NAME} \ 130 | -n ${NAME} \ 131 | -c '{"Args":["query","count_b"]}' 132 | peer chaincode query \ 133 | -C ${CHANNEL_NAME} \ 134 | -n ${NAME} \ 135 | -c '{"Args":["query","count_c"]}' 136 | } 137 | 138 | case $1 in 139 | installchannel) 140 | InstallChannel 141 | ;; 142 | joinchannel) 143 | JoinChannel 144 | ;; 145 | anchorupdate) 146 | AnchorUpdate 147 | ;; 148 | installchaincode) 149 | InstallChainCode 150 | ;; 151 | instantiatechaincode) 152 | InstantiateChainCode 153 | ;; 154 | testdemo) 155 | OrgA 156 | TestDemo 157 | ;; 158 | all) 159 | OrgA 160 | InstallChannel 161 | JoinChannel 162 | AnchorUpdate 163 | InstallChainCode 164 | InstantiateChainCode 165 | TestDemo 166 | ;; 167 | esac 168 | -------------------------------------------------------------------------------- /rbft-network/scripts/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | GENESIS_PROFILE=Genesis 4 | CHANNEL_PROFILE=Channel 5 | SYS_CHANNEL=sys-channel 6 | CHANNEL_NAME=mychannel 7 | VERSION=1.4.4 8 | 9 | FABRIC_CFG_PATH=$PWD 10 | 11 | ORG_NAMES=(OrgAMSP) 12 | 13 | # 检测cryptogen和版本 14 | if ! [ -x "$(command -v cryptogen)" ] ; then 15 | echo -e "\033[31m no cryptogen\033[0m" 16 | exit 1 17 | fi 18 | if [ ${VERSION} != "$(cryptogen version | grep Version | awk -F ': ' '{print $2}')" ] ; then 19 | echo -e "\033[31m cryptogen need version \033[0m"${VERSION} 20 | exit 1 21 | fi 22 | # 检测configtxgen和版本 23 | if ! [ -x "$(command -v configtxgen)" ] ; then 24 | echo -e "\033[31m no configtxgen\033[0m" 25 | exit 1 26 | fi 27 | if [ ${VERSION} != "$(configtxgen --version | grep Version | awk -F ': ' '{print $2}')" ] ; then 28 | echo -e "\033[31m configtxgen need version \033[0m"${VERSION} 29 | exit 1 30 | fi 31 | # 生成证书文件 32 | echo -e "\033[31m clear crypto files\033[0m" 33 | rm -rf crypto-config 34 | echo -e "\033[31m generate crypto files\033[0m" 35 | cryptogen generate --config ./crypto-config.yaml 36 | # 清理多余文件 37 | echo -e "\033[31m clear block files\033[0m" 38 | rm -rf ./channel-artifacts 39 | mkdir ./channel-artifacts 40 | # 生成创世块 41 | echo -e "\033[31m generate genesis block\033[0m" 42 | configtxgen \ 43 | -profile ${GENESIS_PROFILE} \ 44 | -channelID ${SYS_CHANNEL} \ 45 | -outputBlock ./channel-artifacts/genesis.block \ 46 | # 生成通道交易 47 | echo -e "\033[31m generate channel transcation\033[0m" 48 | configtxgen \ 49 | -profile ${CHANNEL_PROFILE} \ 50 | -channelID ${CHANNEL_NAME} \ 51 | -outputCreateChannelTx ./channel-artifacts/channel.tx 52 | # 生成铆节点配置 53 | echo -e "\033[31m generate anchor transcation\033[0m" 54 | for i in ${ORG_NAMES[@]}; do 55 | configtxgen \ 56 | -profile ${CHANNEL_PROFILE} \ 57 | -channelID ${CHANNEL_NAME} \ 58 | -outputAnchorPeersUpdate ./channel-artifacts/${i}anchor.tx \ 59 | -asOrg ${i} 60 | done 61 | -------------------------------------------------------------------------------- /rbft-network/scripts/utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ORGA=orga 4 | ORGAUSERS=(Admin User1) 5 | VERSION=rbft 6 | 7 | # 复制keystore 8 | CPFile() { 9 | files=$(ls $1) 10 | echo ${files[0]} 11 | cd $1 12 | cp ${files[0]} ./key.pem 13 | cd - 14 | } 15 | 16 | # 复制所有文件keystore 17 | CPAllFiles() { 18 | PREFIX=crypto-config/peerOrganizations 19 | SUFFIX=msp/keystore 20 | for u in ${ORGAUSERS[@]}; do 21 | CPFile ${PREFIX}/${ORGA}.com/users/${u}@${ORGA}.com/${SUFFIX} 22 | done 23 | } 24 | 25 | # 清理缓存文件 26 | Clean() { 27 | rm -rf ./channel-artifacts 28 | rm -rf ./crypto-config 29 | rm -rf ./production 30 | rm -rf /tmp/crypto 31 | } 32 | 33 | case $1 in 34 | # 压力测试启动/关闭 35 | cli) 36 | env IMAGETAG=${VERSION} docker-compose -f ./docker-compose-cli.yaml up -d 37 | docker exec -ti cli /bin/bash 38 | ;; 39 | up) 40 | CPAllFiles 41 | env IMAGETAG=${VERSION} docker-compose -f ./docker-compose-cli.yaml up -d 42 | sleep 3 43 | docker exec cli /bin/bash -c "scripts/env.sh all" 44 | ;; 45 | down) 46 | docker kill $(docker ps -qa) 47 | echo y | docker system prune 48 | docker rmi $(docker images | grep 'dev-*' | awk '{print $3}') 49 | echo y | docker system prune 50 | Clean 51 | ;; 52 | esac 53 | -------------------------------------------------------------------------------- /rbft/algorithm/queue.go: -------------------------------------------------------------------------------- 1 | package algorithm 2 | 3 | import ( 4 | "container/list" 5 | "sync" 6 | ) 7 | 8 | type QueueBuffer struct { 9 | fifo *list.List 10 | locker *sync.RWMutex 11 | } 12 | 13 | func NewQueueBuffer() *QueueBuffer { 14 | return &QueueBuffer{ 15 | fifo: list.New(), 16 | locker: new(sync.RWMutex), 17 | } 18 | } 19 | 20 | func (b *QueueBuffer) Pop() { 21 | b.fifo.Remove(b.fifo.Front()) 22 | } 23 | 24 | func (b *QueueBuffer) Top() interface{} { 25 | if b.fifo.Len() == 0 { 26 | return nil 27 | } 28 | return b.fifo.Front().Value 29 | } 30 | 31 | func (b *QueueBuffer) PushHandle(v interface{}, less func(i, j interface{}) bool) { 32 | if b.fifo.Front() == nil { 33 | b.fifo.PushFront(v) 34 | return 35 | } 36 | 37 | if less(v, b.fifo.Front().Value) && !less(b.fifo.Front().Value, v) { 38 | b.fifo.PushFront(v) 39 | return 40 | } 41 | 42 | if less(b.fifo.Back().Value, v) && !less(v, b.fifo.Back().Value){ 43 | b.fifo.PushBack(v) 44 | return 45 | } 46 | 47 | loop := b.fifo.Front() 48 | 49 | for { 50 | if !less(v, loop.Value) && !less(loop.Value, v) { 51 | return 52 | } 53 | if less(v, loop.Value) { 54 | break 55 | } 56 | loop = loop.Next() 57 | } 58 | 59 | b.fifo.InsertBefore(v, loop) 60 | } 61 | 62 | func (b *QueueBuffer) Push(v interface{}) { 63 | b.fifo.PushBack(v) 64 | } 65 | 66 | func (b *QueueBuffer) Empty() bool { 67 | if b.fifo.Len() == 0 { 68 | return true 69 | } 70 | return false 71 | } 72 | 73 | func (b *QueueBuffer) Len() int { 74 | return b.fifo.Len() 75 | } 76 | 77 | func (b *QueueBuffer) LenHandle(e func(i, j interface{}) bool) int { 78 | if b.fifo.Front() == nil { 79 | return 0 80 | } 81 | 82 | cnt := 1 83 | v := b.fifo.Front().Value 84 | loop := b.fifo.Front().Next() 85 | 86 | for loop != nil { 87 | if e(v, loop.Value) { 88 | cnt++ 89 | }else { 90 | break 91 | } 92 | loop = loop.Next() 93 | } 94 | 95 | return cnt 96 | } 97 | 98 | func (b *QueueBuffer) BatchHandle(e func(i, j interface{}) bool) []interface{} { 99 | ret := make([]interface{}, 0) 100 | ret = append(ret, b.fifo.Front().Value) 101 | b.fifo.Remove(b.fifo.Front()) 102 | 103 | for b.fifo.Front() != nil && e(ret[0], b.fifo.Front().Value) { 104 | ret = append(ret, b.fifo.Front().Value) 105 | b.fifo.Remove(b.fifo.Front()) 106 | } 107 | 108 | return ret 109 | } 110 | 111 | func (b *QueueBuffer) Batch() []interface{}{ 112 | ret := make([]interface{}, b.fifo.Len()) 113 | index := 0 114 | for b.fifo.Len() != 0 { 115 | ret[index] = b.fifo.Front().Value 116 | b.fifo.Remove(b.fifo.Front()) 117 | index = index + 1 118 | } 119 | return ret 120 | } 121 | 122 | func (b *QueueBuffer) Lock() { 123 | b.locker.Lock() 124 | } 125 | 126 | func (b *QueueBuffer) ULock() { 127 | b.locker.Unlock() 128 | } 129 | 130 | func (b *QueueBuffer) RLock() { 131 | b.locker.RLock() 132 | } 133 | 134 | func (b *QueueBuffer) RULock() { 135 | b.locker.RUnlock() 136 | } -------------------------------------------------------------------------------- /rbft/algorithm/queue_test.go: -------------------------------------------------------------------------------- 1 | package algorithm 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestQueueBuffer(t *testing.T) { 9 | queue := NewQueueBuffer() 10 | 11 | for i := 0; i < 10; i++ { 12 | queue.Push(i) 13 | } 14 | kase := 0 15 | for !queue.Empty() { 16 | assert.Equal(t, queue.Top(), kase) 17 | kase = kase + 1 18 | queue.Pop() 19 | } 20 | } 21 | 22 | func TestQueueBuffer_Batch(t *testing.T) { 23 | queue := NewQueueBuffer() 24 | for i := 0; i < 100; i++ { 25 | queue.Push(i) 26 | } 27 | batch := queue.Batch() 28 | assert.Equal(t, len(batch), 100) 29 | assert.Equal(t, queue.Empty(), true) 30 | for i := 0; i < 100; i++ { 31 | assert.Equal(t, i, batch[i]) 32 | } 33 | } 34 | 35 | func CmpIntLess(i, j interface{}) bool { 36 | vi := i.(int) 37 | vj := j.(int) 38 | return vi < vj 39 | } 40 | 41 | func Equal(i, j interface{}) bool { 42 | return true; 43 | } 44 | 45 | func TestQueueBufferOrder(t *testing.T) { 46 | queue := NewQueueBuffer() 47 | 48 | for i := 0; i <= 100; i = i + 2 { 49 | queue.PushHandle(i, CmpIntLess) 50 | } 51 | 52 | for i := 99; i >=1; i = i - 2 { 53 | queue.PushHandle(i, CmpIntLess) 54 | } 55 | 56 | for i := 1; i <= 100; i++ { 57 | queue.PushHandle(i, CmpIntLess) 58 | } 59 | 60 | cnt := 0 61 | all := 101 62 | 63 | assert.Equal(t, queue.Len(), all) 64 | 65 | for !queue.Empty() { 66 | assert.Equal(t, all, queue.LenHandle(Equal)) 67 | top := queue.Top() 68 | queue.Pop() 69 | 70 | assert.Equal(t, top, cnt) 71 | cnt = cnt + 1 72 | all = all - 1 73 | } 74 | } 75 | 76 | -------------------------------------------------------------------------------- /rbft/chain.go: -------------------------------------------------------------------------------- 1 | package rbft 2 | 3 | import ( 4 | "fmt" 5 | "github.com/hyperledger/fabric/orderer/consensus" 6 | "github.com/hyperledger/fabric/orderer/consensus/rbft/cmd" 7 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 8 | "github.com/hyperledger/fabric/orderer/consensus/rbft/node" 9 | cb "github.com/hyperledger/fabric/protos/common" 10 | "time" 11 | ) 12 | 13 | type Chain struct { 14 | exitChan chan struct{} 15 | support consensus.ConsenterSupport 16 | pbftNode *node.Node 17 | } 18 | 19 | func NewChain(support consensus.ConsenterSupport) *Chain { 20 | // 创建PBFT服务器 21 | logger.Info("NewChain - ", support.ChainID()) 22 | if node.GNode == nil { 23 | node.GNode = node.NewNode(cmd.ReadConfig(), support) 24 | node.GNode.Run() 25 | } else { 26 | node.GNode.RegisterChain(support) 27 | } 28 | 29 | c := &Chain{ 30 | exitChan: make(chan struct{}), 31 | support: support, 32 | pbftNode: node.GNode, 33 | } 34 | return c 35 | } 36 | 37 | // 启动 38 | func (ch *Chain) Start() { 39 | logger.Info("start") 40 | } 41 | 42 | // 发送错误 43 | func (ch *Chain) Errored() <-chan struct{} { 44 | return ch.exitChan 45 | } 46 | 47 | // 清理资源 48 | func (ch *Chain) Halt() { 49 | logger.Info("halt") 50 | select { 51 | case <-ch.exitChan: 52 | default: 53 | close(ch.exitChan) 54 | } 55 | } 56 | 57 | // Order Configure 前 58 | func (ch *Chain) WaitReady() error { 59 | logger.Info("wait ready") 60 | return nil 61 | } 62 | 63 | // 接受交易 64 | func (ch *Chain) Order(env *cb.Envelope, configSeq uint64) error { 65 | logger.Info("Normal") 66 | select { 67 | case <-ch.exitChan: 68 | logger.Info("[CHAIN error exit normal]") 69 | return fmt.Errorf("Exiting") 70 | default: 71 | 72 | } 73 | op := message.Operation{ 74 | Envelope: env, 75 | ChannelID: ch.support.ChainID(), 76 | ConfigSeq: configSeq, 77 | Type: message.TYPENORMAL, 78 | } 79 | // 广播 80 | _, msg := message.NewMessage(op, message.TimeStamp(time.Now().UnixNano()), ch.pbftNode.GetId()) 81 | ch.pbftNode.MsgRecv <- msg 82 | return nil 83 | } 84 | 85 | // 接收配置 86 | func (ch *Chain) Configure(config *cb.Envelope, configSeq uint64) error { 87 | logger.Info("Config") 88 | select { 89 | case <-ch.exitChan: 90 | logger.Info("[CHAIN error exit config]") 91 | return fmt.Errorf("Exiting") 92 | default: 93 | } 94 | op := message.Operation{ 95 | Envelope: config, 96 | ChannelID: ch.support.ChainID(), 97 | ConfigSeq: configSeq, 98 | Type: message.TYPECONFIG, 99 | } 100 | _, msg := message.NewMessage(op, message.TimeStamp(time.Now().UnixNano()), ch.pbftNode.GetId()) 101 | ch.pbftNode.MsgRecv <- msg 102 | // 广播 103 | return nil 104 | } 105 | -------------------------------------------------------------------------------- /rbft/cmd/cmd.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 7 | "go.dedis.ch/kyber" 8 | "go.dedis.ch/kyber/group/edwards25519" 9 | "go.dedis.ch/kyber/pairing/bn256" 10 | "go.dedis.ch/kyber/share" 11 | "go.dedis.ch/kyber/util/encoding" 12 | "log" 13 | "os" 14 | "strconv" 15 | "strings" 16 | "time" 17 | ) 18 | 19 | func ReadConfig() *SharedConfig { 20 | port, _ := GetConfigurePort() 21 | id, _ := GetConfigureID() 22 | view, _ := GetConfigureView() 23 | table, _ := GetConfigureTable() 24 | 25 | t := make(map[message.Identify]string) 26 | for k, v := range table { 27 | t[message.Identify(k)] = v 28 | } 29 | // calc the fault num 30 | if len(t)%3 != 1 { 31 | log.Fatalf("[Config Error] the incorrent node num : %d, need 3f + 1", len(t)) 32 | return nil 33 | } 34 | 35 | flag.Parse() 36 | return &SharedConfig{ 37 | Port: port, 38 | Id: message.Identify(id), 39 | View: message.View(view), 40 | Table: t, 41 | Fault: uint(len(t) / 3), 42 | ExecuteMaxNum: 1, 43 | CheckPointNum: 200, 44 | WaterL: 0, 45 | WaterH: 400, 46 | PrivateScalar: GetPrivateScalar(), 47 | PublicSet: GetPublicSet(), 48 | TblsPubPoly: GetTblsPubPoly(), 49 | TblsPrivateScalar: GetTblsPrivateScalar(), 50 | ClientBatchSize: 100, 51 | ClientBatchTime: time.Second, 52 | } 53 | } 54 | 55 | // 获取配置 56 | func GetConfigureID() (id int, err error) { 57 | rawID := os.Getenv("PBFT_NODE_ID") 58 | if id, err = strconv.Atoi(rawID); err != nil { 59 | return 60 | } 61 | return 62 | } 63 | 64 | func GetConfigureTable() (map[int]string, error) { 65 | rawTable := os.Getenv("PBFT_NODE_TABLE") 66 | nodeTable := make(map[int]string, 0) 67 | 68 | tables := strings.Split(rawTable, ";") 69 | for index, t := range tables { 70 | nodeTable[index] = t 71 | } 72 | // 节点不满足 3f + 1 73 | if len(tables) < 3 || len(tables)%3 != 1 { 74 | return nil, errors.New("") 75 | } 76 | return nodeTable, nil 77 | } 78 | 79 | func GetConfigurePort() (port int, err error) { 80 | rawPort := os.Getenv("PBFT_LISTEN_PORT") 81 | if port, err = strconv.Atoi(rawPort); err != nil { 82 | return 83 | } 84 | return 85 | } 86 | 87 | func GetConfigureView() (int, error) { 88 | const ViewID = 0 89 | return ViewID, nil 90 | } 91 | 92 | func GetPrivateScalar() kyber.Scalar { 93 | raw := os.Getenv("PBFT_PRIVATE_KEY") 94 | ret, err := encoding.StringHexToScalar(edwards25519.NewBlakeSHA256Ed25519(), raw) 95 | if err != nil { 96 | log.Fatalf("[Config] read private key error") 97 | } 98 | return ret 99 | } 100 | 101 | func GetPublicSet() []kyber.Point { 102 | ret := make([]kyber.Point, 0) 103 | raw := os.Getenv("PBFT_PUBLIC_KEY") 104 | tables := strings.Split(raw, ";") 105 | for _, k := range tables { 106 | point, err := encoding.StringHexToPoint(edwards25519.NewBlakeSHA256Ed25519(), k) 107 | if err != nil { 108 | log.Fatalf("[Config] read public key error") 109 | } 110 | ret = append(ret, point) 111 | } 112 | return ret 113 | } 114 | 115 | func GetTblsPubPoly() *share.PubPoly { 116 | pubPoint := make([]kyber.Point, 0) 117 | raw := os.Getenv("PBFT_TBLS_PUBLIC_KEY") 118 | tables := strings.Split(raw, ",") 119 | for _, k := range tables { 120 | p, err := encoding.StringHexToPoint(bn256.NewSuite().G2(), k) 121 | if err != nil { 122 | log.Fatalf("[Config] read tbls public key error") 123 | } 124 | pubPoint = append(pubPoint, p) 125 | } 126 | return share.NewPubPoly(bn256.NewSuite().G2(), bn256.NewSuite().G2().Point().Base(), pubPoint) 127 | } 128 | 129 | func GetTblsPrivateScalar() kyber.Scalar { 130 | raw := os.Getenv("PBFT_TBLS_PRIVATE_KEY") 131 | p, err := encoding.StringHexToScalar(bn256.NewSuite().G2(), raw) 132 | if err != nil { 133 | log.Fatalf("[Config] read tbls private key error") 134 | } 135 | return p 136 | } 137 | -------------------------------------------------------------------------------- /rbft/cmd/config.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 5 | "go.dedis.ch/kyber" 6 | "go.dedis.ch/kyber/share" 7 | "time" 8 | ) 9 | 10 | type SharedConfig struct { 11 | ClientServer bool 12 | Port int 13 | Id message.Identify 14 | View message.View 15 | Table map[message.Identify]string 16 | Fault uint 17 | ExecuteMaxNum int 18 | CheckPointNum message.Sequence 19 | WaterL message.Sequence 20 | WaterH message.Sequence 21 | PrivateScalar kyber.Scalar 22 | PublicSet []kyber.Point 23 | TblsPubPoly *share.PubPoly 24 | TblsPrivateScalar kyber.Scalar 25 | ClientBatchSize int 26 | ClientBatchTime time.Duration 27 | } 28 | -------------------------------------------------------------------------------- /rbft/consensus.go: -------------------------------------------------------------------------------- 1 | package rbft 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/common/flogging" 5 | ) 6 | 7 | var logger = flogging.MustGetLogger("orderer.consensus.rbft") -------------------------------------------------------------------------------- /rbft/consenter.go: -------------------------------------------------------------------------------- 1 | package rbft 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus" 5 | cb "github.com/hyperledger/fabric/protos/common" 6 | ) 7 | 8 | type consenter struct{ 9 | } 10 | 11 | func New() consensus.Consenter { 12 | return &consenter{} 13 | } 14 | 15 | func (pbft *consenter) HandleChain(support consensus.ConsenterSupport, metadata *cb.Metadata) (consensus.Chain, error) { 16 | logger.Info("Handle Chain For PBFT") 17 | return NewChain(support), nil 18 | } -------------------------------------------------------------------------------- /rbft/crypto/crypto.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "go.dedis.ch/kyber" 5 | "go.dedis.ch/kyber/group/edwards25519" 6 | "go.dedis.ch/kyber/pairing/bn256" 7 | "go.dedis.ch/kyber/share" 8 | "go.dedis.ch/kyber/sign/anon" 9 | "go.dedis.ch/kyber/sign/bls" 10 | "go.dedis.ch/kyber/sign/eddsa" 11 | "go.dedis.ch/kyber/sign/tbls" 12 | "log" 13 | ) 14 | 15 | // ring signature and verify 16 | func RingSign(msg []byte, id int, pri kyber.Scalar, pubSet []kyber.Point) []byte { 17 | suite := edwards25519.NewBlakeSHA256Ed25519() 18 | // pubset to point 19 | content := anon.Sign(suite, msg, pubSet, nil, id, pri) 20 | return content 21 | } 22 | 23 | func RingVerify(msg, sign []byte, pubSet []kyber.Point) bool { 24 | suite := edwards25519.NewBlakeSHA256Ed25519() 25 | tag, err := anon.Verify(suite, msg, pubSet, nil, sign) 26 | if err != nil || tag == nil || len(tag) != 0 { 27 | return false 28 | } 29 | return true 30 | } 31 | 32 | // tbls 33 | func TblsSign(msg []byte, id int, pri kyber.Scalar) []byte { 34 | suite := bn256.NewSuite() 35 | 36 | p := &share.PriShare{ 37 | I: id, 38 | V: pri, 39 | } 40 | 41 | content, err := tbls.Sign(suite, p, msg) 42 | if err != nil { 43 | return nil 44 | } 45 | return content 46 | } 47 | 48 | func TblsRecover(msg []byte, parts [][]byte, t, n int, pub *share.PubPoly) []byte { 49 | sig, err := tbls.Recover(bn256.NewSuite(), pub, msg, parts, t, n) 50 | if err != nil { 51 | return nil 52 | } 53 | return sig 54 | } 55 | 56 | func TblsVerify(msg, sig []byte, pub *share.PubPoly) bool { 57 | err := bls.Verify(bn256.NewSuite(), pub.Commit(), msg, sig) 58 | if err != nil { 59 | return false 60 | } 61 | return true 62 | } 63 | 64 | // signature and verify 65 | func Sign(msg []byte, pub kyber.Point, pri kyber.Scalar) []byte { 66 | ed := eddsa.EdDSA{ 67 | Secret: pri, 68 | Public: pub, 69 | } 70 | sign, err := ed.Sign(msg) 71 | if err != nil { 72 | log.Printf("[Crypto] error to sign the message") 73 | return nil 74 | } 75 | return sign 76 | } 77 | 78 | func Verify(msg, sig []byte, pub kyber.Point) bool { 79 | err := eddsa.Verify(pub, msg, sig) 80 | if err != nil { 81 | return false 82 | } 83 | return true 84 | } 85 | -------------------------------------------------------------------------------- /rbft/crypto/crypto_test.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "go.dedis.ch/kyber" 6 | "go.dedis.ch/kyber/group/edwards25519" 7 | "go.dedis.ch/kyber/pairing/bn256" 8 | "go.dedis.ch/kyber/share" 9 | "go.dedis.ch/kyber/util/encoding" 10 | "testing" 11 | ) 12 | 13 | func TestRingSign(t *testing.T) { 14 | n := 4 15 | signer := 2 16 | msg := []byte("helloworld") 17 | suite := edwards25519.NewBlakeSHA256Ed25519() 18 | 19 | priSet := make([]kyber.Scalar, n) 20 | pubSet := make([]kyber.Point, n) 21 | 22 | for i := 0; i < n; i++ { 23 | stream := suite.RandomStream() 24 | priSet[i] = suite.Scalar().Pick(stream) 25 | pubSet[i] = suite.Point().Mul(priSet[i], nil) 26 | } 27 | 28 | pri := priSet[signer] 29 | s := RingSign(msg, signer, pri, pubSet) 30 | // verify 31 | b := RingVerify(msg, s, pubSet) 32 | assert.Equal(t, true, b) 33 | s[0]++ 34 | b = RingVerify(msg, s, pubSet) 35 | assert.Equal(t, false, b) 36 | } 37 | 38 | func TestTblsSign(t *testing.T) { 39 | n := 4 40 | v := 3 41 | 42 | msg := []byte("HelloWorld") 43 | 44 | suite := bn256.NewSuite() 45 | 46 | secret := suite.G1().Scalar().Pick(suite.RandomStream()) 47 | 48 | priPoly := share.NewPriPoly(suite.G2(), v, secret, suite.RandomStream()) 49 | pubPoly := priPoly.Commit(suite.G2().Point().Base()) 50 | _, commits := pubPoly.Info() 51 | 52 | priKey := make([]string, 0) 53 | pubKey := make([]string, 0) 54 | 55 | for _, x := range commits { 56 | pub, _ := encoding.PointToStringHex(suite.G2(), x) 57 | pubKey = append(pubKey, pub) 58 | } 59 | 60 | for _, x := range priPoly.Shares(n) { 61 | pri, _ := encoding.ScalarToStringHex(suite.G2(), x.V) 62 | priKey = append(priKey, pri) 63 | } 64 | 65 | // 复原 key 和 pubPoly 66 | priScalr := make([]kyber.Scalar, 0) 67 | pubPoint := make([]kyber.Point, 0) 68 | 69 | for _, k := range pubKey { 70 | p, _ := encoding.StringHexToPoint(suite.G2(), k) 71 | pubPoint = append(pubPoint, p) 72 | } 73 | for _, k := range priKey { 74 | p, _ := encoding.StringHexToScalar(suite.G2(), k) 75 | priScalr = append(priScalr, p) 76 | } 77 | 78 | pubPoly = share.NewPubPoly(suite.G2(), suite.G2().Point().Base(), pubPoint) 79 | 80 | // pubPoly - 共享公匙 81 | // proScalr - 私钥 82 | 83 | // 0 1 2 签 84 | sigPart := make([][]byte, 0) 85 | for i := 0; i < v; i++ { 86 | sig := TblsSign(msg, i, priScalr[i]) 87 | sigPart = append(sigPart, sig) 88 | } 89 | content := TblsRecover(msg, sigPart, v, n, pubPoly) 90 | assert.NotEqual(t, len(content), 0) 91 | verify := TblsVerify(msg, content, pubPoly) 92 | assert.Equal(t, verify, true) 93 | // 1 2 3 签 94 | sigPart = make([][]byte, 0) 95 | for i := 1; i <= 3; i++ { 96 | sigPart = append(sigPart, TblsSign(msg, i, priScalr[i])) 97 | } 98 | content = TblsRecover(msg, sigPart, v, n, pubPoly) 99 | assert.NotEqual(t, len(content), 0) 100 | verify = TblsVerify(msg, content, pubPoly) 101 | assert.Equal(t, verify, true) 102 | // 0 1 签字 103 | sigPart = make([][]byte, 0) 104 | for i := 0; i <= 1; i++ { 105 | sigPart = append(sigPart, TblsSign(msg, i, priScalr[i])) 106 | } 107 | content = TblsRecover(msg, sigPart, v, n, pubPoly) 108 | assert.Equal(t, len(content), 0) 109 | verify = TblsVerify(msg, content, pubPoly) 110 | assert.Equal(t, verify, false) 111 | // 0 1 2 3 签字 112 | sigPart = make([][]byte, 0) 113 | for i := 0; i <= 3; i++ { 114 | sigPart = append(sigPart, TblsSign(msg, i, priScalr[i])) 115 | } 116 | content = TblsRecover(msg, sigPart, v, n, pubPoly) 117 | assert.NotEqual(t, len(content), 0) 118 | verify = TblsVerify(msg, content, pubPoly) 119 | assert.Equal(t, verify, true) 120 | // 0 1 1 1 签字 121 | sigPart = make([][]byte, 0) 122 | sigPart = append(sigPart, TblsSign(msg, 0, priScalr[0])) 123 | sigPart = append(sigPart, TblsSign(msg, 1, priScalr[1])) 124 | sigPart = append(sigPart, TblsSign(msg, 1, priScalr[1])) 125 | sigPart = append(sigPart, TblsSign(msg, 1, priScalr[1])) 126 | content = TblsRecover(msg, sigPart, v, n, pubPoly) 127 | assert.Equal(t, len(content), 0) 128 | verify = TblsVerify(msg, content, pubPoly) 129 | assert.Equal(t, verify, false) 130 | // 0 1 1 1 2 签字 131 | sigPart = make([][]byte, 0) 132 | sigPart = append(sigPart, TblsSign(msg, 0, priScalr[0])) 133 | sigPart = append(sigPart, TblsSign(msg, 1, priScalr[1])) 134 | sigPart = append(sigPart, TblsSign(msg, 1, priScalr[1])) 135 | sigPart = append(sigPart, TblsSign(msg, 1, priScalr[1])) 136 | sigPart = append(sigPart, TblsSign(msg, 2, priScalr[2])) 137 | content = TblsRecover(msg, sigPart, v, n, pubPoly) 138 | assert.Equal(t, len(content), 0) 139 | verify = TblsVerify(msg, content, pubPoly) 140 | assert.Equal(t, verify, false) 141 | } 142 | 143 | func TestSign(t *testing.T) { 144 | msg := []byte("helloworld") 145 | suite := edwards25519.NewBlakeSHA256Ed25519() 146 | 147 | pri := suite.Scalar().Pick(suite.RandomStream()) 148 | pub := suite.Point().Mul(pri, nil) 149 | 150 | sign := Sign(msg, pub, pri) 151 | v := Verify(msg, sign, pub) 152 | assert.Equal(t, v, true) 153 | pub2 := suite.Point().Mul(suite.Scalar().Pick(suite.RandomStream()), nil) 154 | assert.Equal(t, Verify(msg, sign, pub2), false) 155 | } 156 | 157 | -------------------------------------------------------------------------------- /rbft/doc.md: -------------------------------------------------------------------------------- 1 | ## 一、Fabric 可插拔共识算法 RBFT 开发流程 2 | 3 | * `configtxgen`工具源码修改,使其识别`rbft`共识配置。 4 | 5 | ```go 6 | // common/tools/configtxgen/localconfig/config.go:388 7 | switch ord.OrdererType { 8 | case 'rbft': 9 | } 10 | // commom/tools/configtxgen/encoder/encoder.go:38 11 | const ConsensusTypeRbft = "rbft" 12 | // commom/tools/configtxgen/encoder/encoder.go:215 13 | switch conf.OrdererType { 14 | case ConsensusTypeRbft: 15 | } 16 | ``` 17 | 18 | * 添加共识算法实例 19 | 20 | ```go 21 | // orderer/common/server/main.go:664 22 | consenters["rbft"] = rbft.New() 23 | ``` 24 | 25 | * 实现共识接口`/orderer/consensus/consensus.go` 26 | 27 | ```go 28 | // 接口说明 - Consneter 29 | // 返回 Chain 用于实现处理区块接口 30 | type Consenter interface { 31 | HandleChain(support ConsenterSupport, metadata *cb.Metadata) (Chain, error) 32 | } 33 | // Chain 处理区块接口 34 | type Chain interface { 35 | // 处理 Normal 交易 36 | Order(env *cb.Envelope, configSeq uint64) error 37 | // 处理配置交易 38 | Configure(config *cb.Envelope, configSeq uint64) error 39 | // 等待接收交易,处理函数交易前 40 | WaitReady() error 41 | // 发送错误 chan 42 | Errored() <-chan struct{} 43 | // 初始化 Chain 中资源 44 | Start() 45 | // 资源释放 46 | Halt() 47 | } 48 | ``` 49 | 50 | * 编译产生 orderer 镜像(修改`orderer\peer\tools` tag 为 `pbft`) 51 | 52 | ``` 53 | $ make orderer-docker 54 | ``` 55 | 56 | * 编译产生 configtxgen 工具(输出目录:`.build/bin/configtxgen`) 57 | 58 | ``` 59 | $ make configtxgen 60 | ``` 61 | 62 | ## 二、网络拓扑 63 | 64 | | 类型/组织 | 域名 | IP/端口/RBFT端口 | 组织名 | 65 | | :-------: | :--------------: | :--------------------: | :--------: | 66 | | Orderer | orderer0.yzm.com | 172.22.0.100:6050/6070 | OrdererOrg | 67 | | Orderer | orderer1.yzm.com | 172.22.0.101:6051/6071 | OrdererOrg | 68 | | Orderer | orderer2.yzm.com | 172.22.0.101:6052/6072 | OrdererOrg | 69 | | Orderer | orderer3.yzm.com | 172.22.0.101:6053/6073 | OrdererOrg | 70 | | Peer/OrgA | peer0.orga.com | 172.22.0.2:7051 | OrgAMSP | 71 | 72 | ## 三、配置说明 73 | 74 | 采用环境变量: 75 | 76 | * `PBFT_LISTEN_PORT`:PBFT 节点监听端口 77 | * `PBFT_NODE_ID`:PBFT 节点 ID 78 | * `PBFT_NODE_TABLE`:PBFT 网络列表 79 | 80 | * `PBFT_PUBLIC_KEY`:签名公钥 81 | * `PBFT_PRIVATE_KEY`:签名私钥 82 | * `PBFT_TBLS_PUBLIC_KEY`:门限签名公钥 83 | * `PBFT_TBLS_PRIVATE_KEY`:门限签名私钥 -------------------------------------------------------------------------------- /rbft/message/block.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/json" 5 | cb "github.com/hyperledger/fabric/protos/common" 6 | ) 7 | 8 | const TYPENORMAL = "normal" 9 | const TYPECONFIG = "config" 10 | 11 | // Operation 12 | type Operation struct { 13 | Envelope *cb.Envelope `json:"payload"` 14 | ChannelID string `json:"channel"` 15 | ConfigSeq uint64 `json:"configSeq"` 16 | Type string `json:"type"` 17 | } 18 | 19 | // Message 20 | type Message struct { 21 | Op Operation `json:"operation"` 22 | ID Identify `json:"clientID"` 23 | TimeStamp TimeStamp `json:"timeStamp"` 24 | } 25 | 26 | // Request 27 | type Block struct { 28 | Requests []*Message `json:"requests"` 29 | TimeStamp TimeStamp `json:"timeStamp"` 30 | } 31 | 32 | func (b *Block) Content() []byte { 33 | content, err := json.Marshal(b) 34 | if err != nil { 35 | return nil 36 | } 37 | return content 38 | } 39 | 40 | func (b *Block) Digest() string { 41 | return Hash(b.Content()) 42 | } 43 | 44 | func NewBlockByContent(payload []byte) *Block { 45 | ret := new(Block) 46 | err := json.Unmarshal(payload, ret) 47 | if err != nil { 48 | return nil 49 | } 50 | return ret 51 | } 52 | 53 | func NewMessage(op Operation, t TimeStamp, id Identify) ([]byte, *Message) { 54 | msg := &Message{ 55 | Op: op, 56 | TimeStamp: t, 57 | ID: id, 58 | } 59 | content, err := json.Marshal(msg) 60 | if err != nil { 61 | return nil, nil 62 | } 63 | return content, msg 64 | } 65 | 66 | func LessBlock(i, j interface{}) bool { 67 | vi := *i.(*Block) 68 | vj := *j.(*Block) 69 | return vi.TimeStamp < vj.TimeStamp 70 | } 71 | -------------------------------------------------------------------------------- /rbft/message/buffer.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "github.com/hyperledger/fabric/orderer/consensus/rbft/algorithm" 4 | 5 | type Buffer struct { 6 | ComBuffer *algorithm.QueueBuffer 7 | ProposalBuffer *algorithm.QueueBuffer 8 | PrepareBuffer *algorithm.QueueBuffer 9 | CommitBuffer *algorithm.QueueBuffer 10 | BlockBuffer *algorithm.QueueBuffer 11 | } 12 | 13 | func NewBuffer() *Buffer { 14 | return &Buffer{ 15 | ComBuffer: algorithm.NewQueueBuffer(), 16 | ProposalBuffer: algorithm.NewQueueBuffer(), 17 | PrepareBuffer: algorithm.NewQueueBuffer(), 18 | CommitBuffer: algorithm.NewQueueBuffer(), 19 | BlockBuffer: algorithm.NewQueueBuffer(), 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /rbft/message/com.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/crypto" 6 | "go.dedis.ch/kyber" 7 | ) 8 | 9 | // com 消息封装 10 | type ComMsg struct { 11 | View View `json:"view"` 12 | Id Identify `json:"id"` 13 | Com []byte `json:"com"` 14 | ACom []byte `json:"acom"` 15 | } 16 | 17 | // 比较函数,用于处理 buffer 18 | // view -> id 19 | func LessComMsg(i, j interface{}) bool { 20 | vi := *i.(*ComMsg) 21 | vj := *j.(*ComMsg) 22 | 23 | if vi.View < vj.View { 24 | return true 25 | }else if vi.View > vj.View { 26 | return false 27 | } 28 | 29 | // vi.View == vj.View 30 | if vi.Id < vj.Id { 31 | return true 32 | } 33 | 34 | return false 35 | } 36 | 37 | // 比较函数,用于处理 buffer 38 | func EqualComMsg(i, j interface{}) bool { 39 | vi := *i.(*ComMsg) 40 | vj := *j.(*ComMsg) 41 | 42 | return vi.View == vj.View 43 | } 44 | 45 | // 生成 com 消息 46 | func NewComMsg(view View, id Identify, pri kyber.Scalar, pub []kyber.Point, msg []byte) ([]byte, *ComMsg) { 47 | cp := crypto.Sign(msg, pub[int(id)], pri) 48 | com := HashByte(cp) 49 | acom := crypto.RingSign(com, int(id), pri, pub) 50 | 51 | comMsg := &ComMsg{ 52 | View: view, 53 | Id: id, 54 | Com: com, 55 | ACom: acom, 56 | } 57 | 58 | content, err := json.Marshal(comMsg) 59 | if err != nil { 60 | return nil, nil 61 | } 62 | 63 | return content, comMsg 64 | } 65 | -------------------------------------------------------------------------------- /rbft/message/commit.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "encoding/json" 4 | 5 | type CommitMsg struct { 6 | View View `json:"view"` 7 | Sequence Sequence `json:"sequence"` 8 | Digest string `json:"degest"` 9 | Threshold []byte `json:"threshold"` 10 | } 11 | 12 | func LessCommitMsg(i, j interface{}) bool { 13 | vi := *i.(*CommitMsg) 14 | vj := *j.(*CommitMsg) 15 | 16 | if vi.View < vj.View { 17 | return true 18 | }else if vi.View > vj.View { 19 | return false 20 | } 21 | 22 | if vi.Sequence < vj.Sequence { 23 | return true 24 | }else if vi.Sequence > vj.Sequence { 25 | return false 26 | } 27 | 28 | for i, _ := range vi.Threshold { 29 | if vi.Threshold[i] < vj.Threshold[i] { 30 | return true 31 | } 32 | } 33 | 34 | return false 35 | } 36 | 37 | func NewCommitMsg(view View, seq Sequence, dig string, threshold []byte) ([]byte, *CommitMsg) { 38 | msg := &CommitMsg{ 39 | View: view, 40 | Sequence: seq, 41 | Digest: dig, 42 | Threshold: threshold, 43 | } 44 | content, err := json.Marshal(msg) 45 | if err != nil { 46 | return nil, nil 47 | } 48 | return content, msg 49 | } 50 | -------------------------------------------------------------------------------- /rbft/message/lastblock.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "log" 7 | "sort" 8 | ) 9 | 10 | type LastBlock struct { 11 | Coms map[string]Identify `json:"coms"` 12 | AComs map[string]Identify `json:"acoms"` 13 | } 14 | 15 | 16 | func NewLastBlock() *LastBlock { 17 | return &LastBlock{ 18 | Coms: nil, 19 | AComs: nil, 20 | } 21 | } 22 | 23 | func NewLastBlockByComs(coms []*ComMsg) *LastBlock { 24 | l := NewLastBlock() 25 | 26 | l.Coms = make(map[string]Identify) 27 | l.AComs = make(map[string]Identify) 28 | 29 | for _, i := range coms { 30 | l.Coms[hex.EncodeToString(i.Com)] = i.Id 31 | l.AComs[hex.EncodeToString(i.ACom)] = i.Id 32 | } 33 | 34 | return l 35 | } 36 | 37 | func NewLastBlockByContent(payLoad []byte) *LastBlock { 38 | ret := new(LastBlock) 39 | err := json.Unmarshal(payLoad, ret) 40 | if err != nil { 41 | log.Printf("[LastBlock] payload to lastblock error") 42 | } 43 | return ret 44 | } 45 | 46 | // 从 lastblock 查主 47 | func (l *LastBlock) GetPrimaryIdentify(view View) Identify { 48 | list := make([]string, 0) 49 | for k := range l.Coms { 50 | list = append(list, k) 51 | } 52 | index := int(view) % len(list) 53 | sort.Slice(list, func(i, j int) bool { 54 | return list[i] < list[j] 55 | }) 56 | log.Printf("[LastBlock] index from com(%d) and primary(%d)", index, l.Coms[list[index]]) 57 | return l.Coms[list[index]] 58 | } 59 | 60 | // lastBlock 数据 61 | func (l *LastBlock) Content() []byte { 62 | content, err := json.Marshal(*l) 63 | if err != nil { 64 | return nil 65 | } 66 | return content 67 | } 68 | 69 | // lastBlock 摘要 70 | func (l *LastBlock) Digest() string { 71 | return Hash(l.Content()) 72 | } 73 | -------------------------------------------------------------------------------- /rbft/message/message.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | ) 7 | 8 | type TimeStamp uint64 // 时间戳格式 9 | type Identify uint64 // 客户端标识格式 10 | type View Identify // 视图 11 | type Sequence int64 // 序号 12 | 13 | func Hash(content []byte) string { 14 | return hex.EncodeToString(HashByte(content)) 15 | } 16 | 17 | func HashByte(content []byte) []byte { 18 | h := sha256.New() 19 | h.Write(content) 20 | return h.Sum(nil) 21 | } 22 | -------------------------------------------------------------------------------- /rbft/message/prepare.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "encoding/json" 4 | 5 | type PrepareMsg struct { 6 | View View `json:"view"` 7 | Sequence Sequence `json:"sequence"` 8 | Digest string `json:"digest"` 9 | PartSig []byte `json:"parSig"` 10 | } 11 | 12 | func NewPrepareMsg(view View, seq Sequence, dig string, partSig []byte) ([]byte, *PrepareMsg) { 13 | msg := &PrepareMsg{ 14 | View: view, 15 | Sequence: seq, 16 | Digest: dig, 17 | PartSig: partSig, 18 | } 19 | content, err := json.Marshal(msg) 20 | if err != nil { 21 | return nil, nil 22 | } 23 | return content, msg 24 | } 25 | 26 | func EqualPrepareMsg(i, j interface{}) bool { 27 | vi := *i.(*PrepareMsg) 28 | vj := *j.(*PrepareMsg) 29 | return vi.Digest == vj.Digest 30 | } 31 | 32 | func LessPrepareMsg(i, j interface{}) bool { 33 | vi := *i.(*PrepareMsg) 34 | vj := *j.(*PrepareMsg) 35 | // first view 36 | if vi.View < vj.View { 37 | return true 38 | }else if vi.View > vj.View { 39 | return false 40 | } 41 | 42 | // second sequence 43 | if vi.Sequence < vj.Sequence { 44 | return true 45 | }else if vi.Sequence > vj.Sequence { 46 | return false 47 | } 48 | 49 | // digest 排序 50 | if vi.Digest < vj.Digest { 51 | return true 52 | }else if vi.Digest > vj.Digest { 53 | return false 54 | } 55 | 56 | for i, _ := range vi.PartSig { 57 | if vi.PartSig[i] < vj.PartSig[i] { 58 | return true 59 | } 60 | } 61 | 62 | return false 63 | } 64 | -------------------------------------------------------------------------------- /rbft/message/proposal.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | type Proposal struct { 8 | CP []byte `json:"cp"` 9 | View View `json:"view"` 10 | Sequence Sequence `json:"sequence"` 11 | Digest string `json:"digest"` 12 | BlockType bool `json:"blockType"` 13 | PayLoad []byte `json:"payLoad"` 14 | } 15 | 16 | func LessproposalMsg(i, j interface{}) bool { 17 | vi := *i.(*Proposal) 18 | vj := *j.(*Proposal) 19 | if vi.View < vj.View { 20 | return true 21 | } 22 | return false 23 | } 24 | 25 | func NewProposalByLastBlock(view View, sequence Sequence, cp []byte, lastBlock *LastBlock) ([]byte, *Proposal){ 26 | msg := &Proposal{ 27 | CP: cp, 28 | View: view, 29 | Sequence: sequence, 30 | Digest: lastBlock.Digest(), 31 | BlockType: true, 32 | PayLoad: lastBlock.Content(), 33 | } 34 | content, err := json.Marshal(msg) 35 | if err != nil { 36 | return nil, nil 37 | } 38 | return content, msg 39 | } 40 | 41 | func NewProposalByBlock(view View, seq Sequence, cp []byte, block *Block) ([]byte, *Proposal) { 42 | msg := &Proposal{ 43 | CP: cp, 44 | View: view, 45 | Sequence: seq, 46 | Digest: block.Digest(), 47 | BlockType: false, 48 | PayLoad: block.Content(), 49 | } 50 | content, err := json.Marshal(msg) 51 | if err != nil { 52 | return nil, nil 53 | } 54 | return content, msg 55 | } -------------------------------------------------------------------------------- /rbft/node/block.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 5 | "log" 6 | ) 7 | 8 | func (n *Node) blockHandle(cp []byte) ([]byte, *message.Proposal) { 9 | n.buffer.BlockBuffer.Lock() 10 | defer n.buffer.BlockBuffer.ULock() 11 | 12 | if n.buffer.BlockBuffer.Top().(*message.Block).TimeStamp <= n.lastTimeStamp { 13 | n.buffer.BlockBuffer.Pop() 14 | return nil, nil 15 | } else if n.buffer.BlockBuffer.Empty() { 16 | return nil, nil 17 | } 18 | 19 | block := n.buffer.BlockBuffer.Top().(*message.Block) 20 | n.buffer.BlockBuffer.Pop() 21 | 22 | log.Printf("[Block] the request len(%d)", len(block.Requests)) 23 | return message.NewProposalByBlock(n.view, n.sequence.PrepareSequence(), cp, block) 24 | } 25 | 26 | func (n *Node) blockRecvThread() { 27 | for { 28 | select { 29 | case msg := <-n.blockRecv: 30 | if msg.TimeStamp <= n.lastTimeStamp { 31 | // 过期请求 32 | log.Printf("[Request] the block request(%d) is expire, last time(%d)", msg.TimeStamp, n.lastTimeStamp) 33 | } else { 34 | log.Printf("[Request] recv the block request(%s) at time(%d)", msg.Digest(), msg.TimeStamp) 35 | n.buffer.BlockBuffer.Lock() 36 | n.buffer.BlockBuffer.PushHandle(msg, message.LessBlock) 37 | n.buffer.BlockBuffer.ULock() 38 | } 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /rbft/node/boradcast.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "net/http" 7 | ) 8 | 9 | func (n *Node) BroadCastAll(content []byte, handle string) { 10 | for _, v := range n.table { 11 | go SendPost(content, v + handle) 12 | } 13 | } 14 | 15 | func (n *Node) BroadCast(content []byte, handle string) { 16 | for k, v := range n.table { 17 | // do not send to my self 18 | if k == n.id { 19 | continue 20 | } 21 | go SendPost(content, v + handle) 22 | } 23 | } 24 | 25 | func SendPost(content []byte, url string) { 26 | buff := bytes.NewBuffer(content) 27 | if _, err := http.Post(url, "application/json", buff); err != nil { 28 | log.Printf("[Send] send to %s error: %s", url, err) 29 | } 30 | } 31 | 32 | -------------------------------------------------------------------------------- /rbft/node/com.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/crypto" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 6 | "github.com/hyperledger/fabric/orderer/consensus/rbft/server" 7 | "log" 8 | ) 9 | 10 | // 广播 com 消息 11 | func (n *Node) broadCastComMessage() { 12 | content, comMsg := message.NewComMsg( 13 | n.view, n.id, 14 | n.privateScalar, n.publicSet, n.lastBlock.Content()) 15 | 16 | n.buffer.ComBuffer.Lock() 17 | n.buffer.ComBuffer.PushHandle(comMsg, message.LessComMsg) 18 | n.buffer.ComBuffer.ULock() 19 | 20 | n.BroadCast(content, server.ComEntry) 21 | } 22 | 23 | // 接收 com 消息 24 | func (n *Node) comRecvThread() { 25 | for { 26 | select { 27 | case msg := <-n.comRecv: 28 | if crypto.RingVerify(msg.Com, msg.ACom, n.publicSet) { 29 | // 合法消息 30 | log.Printf("[Com] recv com message from(%d) view(%d)", msg.Id, msg.View) 31 | n.buffer.ComBuffer.Lock() 32 | n.buffer.ComBuffer.PushHandle(msg, message.LessComMsg) 33 | n.buffer.ComBuffer.ULock() 34 | }else { 35 | log.Printf("[Com] recv error com message from(%d) view(%d)", msg.Id, msg.View) 36 | } 37 | } 38 | } 39 | } 40 | 41 | // 处理 com 消息 42 | func (n *Node) comHandle() []*message.ComMsg { 43 | n.buffer.ComBuffer.Lock() 44 | defer n.buffer.ComBuffer.ULock() 45 | 46 | if n.buffer.ComBuffer.Empty() { 47 | // 空等待 48 | return nil 49 | } else if n.buffer.ComBuffer.Top().(*message.ComMsg).View < n.view { 50 | // 过期凭证 51 | n.buffer.ComBuffer.Pop() 52 | return nil 53 | } else if n.buffer.ComBuffer.Top().(*message.ComMsg).View > n.view { 54 | // 超前凭证,发送错误 55 | return nil 56 | } else if n.buffer.ComBuffer.LenHandle(message.EqualComMsg) == int(n.fault*3+1) { 57 | // 打包当前 view 的所有凭证 58 | coms := n.buffer.ComBuffer.BatchHandle(message.EqualComMsg) 59 | ret := make([]*message.ComMsg, 0) 60 | for _, c := range coms { 61 | ret = append(ret, c.(*message.ComMsg)) 62 | } 63 | return ret 64 | } 65 | return nil 66 | } 67 | -------------------------------------------------------------------------------- /rbft/node/commit.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/crypto" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 6 | cb "github.com/hyperledger/fabric/protos/common" 7 | "log" 8 | ) 9 | 10 | func (n *Node) checkCommitMsg(msg *message.CommitMsg) bool { 11 | return crypto.TblsVerify([]byte(msg.Digest), msg.Threshold, n.tblsPublicPoly) 12 | } 13 | 14 | func (n *Node) commitHandle() bool { 15 | n.buffer.CommitBuffer.Lock() 16 | defer n.buffer.CommitBuffer.ULock() 17 | 18 | if n.buffer.CommitBuffer.Empty() { 19 | return false 20 | } else if n.buffer.CommitBuffer.Top().(*message.CommitMsg).View < n.view { 21 | n.buffer.CommitBuffer.Pop() 22 | return false 23 | } else if n.buffer.CommitBuffer.Top().(*message.CommitMsg).Sequence < n.sequence.PrepareSequence() { 24 | n.buffer.CommitBuffer.Pop() 25 | return false 26 | } else if n.buffer.CommitBuffer.Top().(*message.CommitMsg).View > n.view { 27 | return false 28 | } else if n.buffer.CommitBuffer.Top().(*message.CommitMsg).Sequence > n.sequence.PrepareSequence() { 29 | return false 30 | } else if n.buffer.CommitBuffer.Top().(*message.CommitMsg).Digest != n.nowProposal.Digest { 31 | n.buffer.CommitBuffer.Pop() 32 | return false 33 | } 34 | 35 | n.buffer.CommitBuffer.Pop() 36 | if n.nowProposal.BlockType { 37 | // 直接写特殊区块 38 | n.prevBlock = n.lastBlock 39 | n.lastBlock = message.NewLastBlockByContent(n.nowProposal.PayLoad) 40 | n.sequence.NextSequence() 41 | n.view = n.view + 1 42 | log.Printf("[Commit] com block, change view(%d) sequecen(%d)", n.view, n.sequence.lastSequence) 43 | } else { 44 | // 执行 fabric 区块 45 | block := message.NewBlockByContent(n.nowProposal.PayLoad) 46 | // pending state 47 | pending := make(map[string]bool) 48 | for _, r := range block.Requests { 49 | op := r.Op 50 | channel := op.ChannelID 51 | configSeq := op.ConfigSeq 52 | msg := r.Op.Envelope 53 | switch op.Type { 54 | case message.TYPECONFIG: 55 | var err error 56 | seq := n.supports[channel].Sequence() 57 | if configSeq < seq { 58 | if msg, _, err = n.supports[r.Op.ChannelID].ProcessConfigMsg(r.Op.Envelope); err != nil { 59 | log.Println(err) 60 | } 61 | } 62 | batch := n.supports[channel].BlockCutter().Cut() 63 | if batch != nil { 64 | block := n.supports[channel].CreateNextBlock(batch) 65 | n.supports[channel].WriteBlock(block, nil) 66 | } 67 | pending[channel] = false 68 | // write config block 69 | block := n.supports[channel].CreateNextBlock([]*cb.Envelope{msg}) 70 | n.supports[channel].WriteConfigBlock(block, nil) 71 | case message.TYPENORMAL: 72 | seq := n.supports[channel].Sequence() 73 | if configSeq < seq { 74 | if _, err := n.supports[channel].ProcessNormalMsg(msg); err != nil { 75 | } 76 | } 77 | batches, p := n.supports[channel].BlockCutter().Ordered(msg) 78 | for _, batch := range batches { 79 | block := n.supports[channel].CreateNextBlock(batch) 80 | n.supports[channel].WriteBlock(block, nil) 81 | } 82 | pending[channel] = p 83 | } 84 | } 85 | // pending state 86 | for k, v := range pending { 87 | if v { 88 | batch := n.supports[k].BlockCutter().Cut() 89 | if batch != nil { 90 | block := n.supports[k].CreateNextBlock(batch) 91 | n.supports[k].WriteBlock(block, nil) 92 | } 93 | } 94 | } 95 | n.sequence.NextSequence() 96 | n.view = n.view + 1 97 | n.lastTimeStamp = block.TimeStamp 98 | log.Printf("[Commit] request block(%d), change view(%d) sequecen(%d)", n.lastTimeStamp, n.view, n.sequence.lastSequence) 99 | } 100 | return true 101 | } 102 | 103 | func (n *Node) commitRecvThread() { 104 | for { 105 | select { 106 | case msg := <-n.commitRecv: 107 | if n.checkCommitMsg(msg) { 108 | log.Printf("[Commit] verify commit message for proposal(%s) success", msg.Digest[:9]) 109 | n.buffer.CommitBuffer.Lock() 110 | n.buffer.CommitBuffer.PushHandle(msg, message.LessCommitMsg) 111 | n.buffer.CommitBuffer.ULock() 112 | } else { 113 | log.Printf("[Commit] verify commit message for proposal(%s) failed", msg.Digest[:9]) 114 | } 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /rbft/node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/cmd" 6 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 7 | "github.com/hyperledger/fabric/orderer/consensus/rbft/server" 8 | "go.dedis.ch/kyber" 9 | "go.dedis.ch/kyber/share" 10 | "log" 11 | "time" 12 | ) 13 | 14 | var GNode *Node = nil 15 | 16 | type Node struct { 17 | cfg *cmd.SharedConfig 18 | server *server.HttpServer 19 | id message.Identify 20 | view message.View 21 | table map[message.Identify]string 22 | fault uint 23 | 24 | privateScalar kyber.Scalar 25 | publicSet []kyber.Point 26 | tblsPublicPoly *share.PubPoly 27 | tblsPrivateScalar kyber.Scalar 28 | 29 | state State 30 | nowProposal *message.Proposal 31 | lastBlock *message.LastBlock 32 | prevBlock *message.LastBlock 33 | lastTimeStamp message.TimeStamp 34 | 35 | sequence *Sequence 36 | buffer *message.Buffer 37 | 38 | MsgRecv chan *message.Message 39 | blockRecv chan *message.Block 40 | comRecv chan *message.ComMsg 41 | proposalRecv chan *message.Proposal 42 | prepareRecv chan *message.PrepareMsg 43 | commitRecv chan *message.CommitMsg 44 | 45 | supports map[string]consensus.ConsenterSupport 46 | } 47 | 48 | func NewNode(cfg *cmd.SharedConfig, support consensus.ConsenterSupport) *Node { 49 | node := &Node{ 50 | // config 51 | cfg: cfg, 52 | // http server 53 | server: server.NewServer(cfg), 54 | // information about node 55 | id: cfg.Id, 56 | view: cfg.View, 57 | table: cfg.Table, 58 | fault: cfg.Fault, 59 | // crypto 60 | privateScalar: cfg.PrivateScalar, 61 | publicSet: cfg.PublicSet, 62 | tblsPublicPoly: cfg.TblsPubPoly, 63 | tblsPrivateScalar: cfg.TblsPrivateScalar, 64 | // lastblock 65 | lastBlock: message.NewLastBlock(), 66 | prevBlock: message.NewLastBlock(), 67 | lastTimeStamp: 0, 68 | nowProposal: nil, 69 | // lastReply state 70 | sequence: NewSequence(cfg), 71 | // the message buffer to store msg 72 | buffer: message.NewBuffer(), 73 | state: STATESENDORDER, 74 | // chan for message 75 | MsgRecv: make(chan *message.Message, 100), 76 | blockRecv: make(chan *message.Block), 77 | comRecv: make(chan *message.ComMsg), 78 | proposalRecv: make(chan *message.Proposal), 79 | prepareRecv: make(chan *message.PrepareMsg), 80 | commitRecv: make(chan *message.CommitMsg), 81 | // chan for notify pre-prepare send thread 82 | supports: make(map[string]consensus.ConsenterSupport), 83 | } 84 | log.Printf("[Node] the node id:%d, view:%d, fault number:%d, sequence: %d, lastblock:%s\n", 85 | node.id, node.view, node.fault, node.sequence.PrepareSequence(), message.Hash(node.lastBlock.Content())[0:9]) 86 | 87 | node.RegisterChain(support) 88 | 89 | return node 90 | } 91 | 92 | func (n *Node) RegisterChain(support consensus.ConsenterSupport) { 93 | if _, ok := n.supports[support.ChainID()]; ok { 94 | return 95 | } 96 | log.Printf("[Node] Register the chain(%s)", support.ChainID()) 97 | n.supports[support.ChainID()] = support 98 | } 99 | 100 | func (n *Node) Run() { 101 | // register chan for client and server 102 | n.server.RegisterBlockChan(n.blockRecv) 103 | n.server.RegisterComChan(n.comRecv) 104 | n.server.RegisterProposalChan(n.proposalRecv) 105 | n.server.RegisterPrepareChan(n.prepareRecv) 106 | n.server.RegisterCommitChan(n.commitRecv) 107 | 108 | go n.clientThread() 109 | go n.server.Run() 110 | 111 | timer := time.After(time.Second * 3) 112 | <-timer 113 | 114 | go n.stateThread() 115 | 116 | go n.blockRecvThread() 117 | go n.proposalRecvThread() 118 | go n.comRecvThread() 119 | go n.prepareRecvThread() 120 | go n.commitRecvThread() 121 | } 122 | 123 | func (n *Node) GetId() message.Identify { 124 | return n.id 125 | } 126 | 127 | func (n *Node) clientThread() { 128 | log.Printf("[Client] run the client thread") 129 | requestBuffer := make([]*message.Message, 0) 130 | var timer <-chan time.Time 131 | for { 132 | select { 133 | case msg := <-n.MsgRecv: 134 | timer = nil 135 | requestBuffer = append(requestBuffer, msg) 136 | if msg.Op.Type == message.TYPECONFIG { 137 | // 有特殊配置区块需要立即写入 138 | block := message.Block{ 139 | Requests: requestBuffer, 140 | TimeStamp: message.TimeStamp(time.Now().UnixNano()), 141 | } 142 | n.BroadCastAll(block.Content(), server.BlockEntry) 143 | log.Printf("[Client] send request(%d) due to config", len(requestBuffer)) 144 | requestBuffer = make([]*message.Message, 0) 145 | }else if len(requestBuffer) >= 512 { 146 | // 达到区块配置交易最大数量 147 | block := message.Block{ 148 | Requests: requestBuffer, 149 | TimeStamp: message.TimeStamp(time.Now().UnixNano()), 150 | } 151 | n.BroadCastAll(block.Content(), server.BlockEntry) 152 | log.Printf("[Client] send request(%d) due to oversize", len(requestBuffer)) 153 | requestBuffer = make([]*message.Message, 0) 154 | } 155 | timer = time.After(time.Second) 156 | case <-timer: 157 | timer = nil 158 | if len(requestBuffer) > 0 { 159 | // 超时打包 160 | block := message.Block{ 161 | Requests: requestBuffer, 162 | TimeStamp: message.TimeStamp(time.Now().UnixNano()), 163 | } 164 | n.BroadCastAll(block.Content(), server.BlockEntry) 165 | log.Printf("[Client] send request(%d) due to overtime", len(requestBuffer)) 166 | requestBuffer = make([]*message.Message, 0) 167 | } 168 | timer = time.After(time.Second) 169 | } 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /rbft/node/prepare.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/crypto" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 6 | "github.com/hyperledger/fabric/orderer/consensus/rbft/server" 7 | "log" 8 | ) 9 | 10 | func (n *Node) boradCastPrepareMsg(proposal *message.Proposal) { 11 | sigPart := crypto.TblsSign([]byte(proposal.Digest), int(n.id), n.tblsPrivateScalar) 12 | content, msg := message.NewPrepareMsg(n.view, proposal.Sequence, proposal.Digest, sigPart) 13 | 14 | n.buffer.PrepareBuffer.Lock() 15 | n.buffer.PrepareBuffer.PushHandle(msg, message.LessPrepareMsg) 16 | n.buffer.PrepareBuffer.ULock() 17 | 18 | n.BroadCast(content, server.PrepareEntry) 19 | } 20 | 21 | func (n *Node) checkPrepareMsg(prepare *message.PrepareMsg) bool { 22 | ret := crypto.TblsRecover([]byte(prepare.Digest), [][]byte{prepare.PartSig}, 23 | 1, int(n.fault * 3 + 1), n.tblsPublicPoly) 24 | 25 | if len(ret) == 0 { 26 | log.Printf("[Prepare] message check error, part sig error") 27 | return false 28 | } 29 | 30 | return true 31 | } 32 | 33 | func (n *Node) prepareRecvThread() { 34 | for { 35 | select { 36 | // 按序号缓存,稍后 check 37 | case msg := <-n.prepareRecv: 38 | if n.checkPrepareMsg(msg) { 39 | log.Printf("[Prepare] success verify prepare message for proposal(%s)", msg.Digest[:9]) 40 | n.buffer.PrepareBuffer.Lock() 41 | n.buffer.PrepareBuffer.PushHandle(msg, message.LessPrepareMsg) 42 | n.buffer.PrepareBuffer.ULock() 43 | } 44 | 45 | } 46 | } 47 | } 48 | 49 | func (n *Node) prepareHandle() []byte { 50 | n.buffer.PrepareBuffer.Lock() 51 | defer n.buffer.PrepareBuffer.ULock() 52 | 53 | if n.buffer.PrepareBuffer.Empty() { 54 | // 没有缓存 55 | return nil 56 | } else if n.buffer.PrepareBuffer.Top().(*message.PrepareMsg).View < n.view { 57 | // view 过期 58 | n.buffer.PrepareBuffer.Pop() 59 | return nil 60 | }else if n.buffer.PrepareBuffer.Top().(*message.PrepareMsg).Sequence < n.sequence.PrepareSequence() { 61 | // sequence 过期 62 | n.buffer.PrepareBuffer.Pop() 63 | return nil 64 | }else if n.buffer.PrepareBuffer.Top().(*message.PrepareMsg).Sequence == n.sequence.PrepareSequence() && 65 | n.buffer.PrepareBuffer.Top().(*message.PrepareMsg).View == n.view && 66 | n.buffer.PrepareBuffer.Top().(*message.PrepareMsg).Digest != n.nowProposal.Digest { 67 | // view 和 sequence 正确 处理的 proposal 不正确 68 | n.buffer.PrepareBuffer.Pop() 69 | return nil 70 | }else if n.buffer.PrepareBuffer.Top().(*message.PrepareMsg).Sequence > n.sequence.PrepareSequence() { 71 | return nil 72 | }else if n.buffer.PrepareBuffer.Top().(*message.PrepareMsg).View > n.view { 73 | return nil 74 | } 75 | 76 | // view 和 sequence 和 proposal 正确 取出相同 77 | if n.buffer.PrepareBuffer.LenHandle(message.EqualPrepareMsg) >= int(2 * n.fault + 1) { 78 | parSig := n.buffer.PrepareBuffer.BatchHandle(message.EqualPrepareMsg) 79 | toRecover := make([][]byte, 0) 80 | for _, p := range parSig { 81 | toRecover = append(toRecover, p.(*message.PrepareMsg).PartSig) 82 | } 83 | t := crypto.TblsRecover([]byte(n.nowProposal.Digest), toRecover, 84 | int(n.fault) * 2 + 1, int(n.fault) * 3 + 1, n.tblsPublicPoly) 85 | if len(t) == 0 { 86 | log.Printf("[Prepare] can not recover the part sig\n") 87 | return nil 88 | } 89 | return t 90 | } 91 | 92 | return nil 93 | } 94 | 95 | -------------------------------------------------------------------------------- /rbft/node/proposal.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "encoding/hex" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/crypto" 6 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 7 | "log" 8 | ) 9 | 10 | // 检查 proposal 消息是否正确 11 | func (n *Node) CheckProposalMessage(msg *message.Proposal) bool { 12 | if msg.Sequence < n.sequence.PrepareSequence() { 13 | return false 14 | } 15 | if msg.View < n.view { 16 | return false 17 | } 18 | if msg.Digest != message.Hash(msg.PayLoad){ 19 | return false 20 | } 21 | // 开始时提案 22 | if n.view == 0 { 23 | return crypto.Verify(n.prevBlock.Content(), msg.CP, n.publicSet[0]) 24 | } 25 | 26 | return true 27 | } 28 | 29 | func (n *Node) VerifyProposalMessage(msg *message.Proposal) bool { 30 | r := hex.EncodeToString(message.HashByte(msg.CP)) 31 | if _, ok := n.lastBlock.Coms[r]; !ok && n.view != 0 { 32 | return false 33 | } 34 | return crypto.Verify(n.prevBlock.Content(), msg.CP, n.publicSet[n.lastBlock.Coms[r]]) 35 | } 36 | 37 | func (n *Node) proposalRecvThread() { 38 | for { 39 | select { 40 | case msg := <- n.proposalRecv: 41 | if n.CheckProposalMessage(msg) { 42 | log.Printf("[Proposal] recv proposal message view(%d) sequence(%d)", msg.View, msg.Sequence) 43 | n.buffer.ProposalBuffer.Lock() 44 | n.buffer.ProposalBuffer.PushHandle(msg, message.LessproposalMsg) 45 | n.buffer.ProposalBuffer.ULock() 46 | }else { 47 | log.Printf("[Proposal] the proposal block error") 48 | } 49 | } 50 | } 51 | } 52 | 53 | func (n *Node) proposalHandle() bool { 54 | n.buffer.ProposalBuffer.Lock() 55 | defer n.buffer.ProposalBuffer.ULock() 56 | 57 | if n.buffer.ProposalBuffer.Empty() { 58 | return false 59 | }else if n.buffer.ProposalBuffer.Top().(*message.Proposal).View < n.view { 60 | n.buffer.ProposalBuffer.Pop() 61 | return false 62 | }else if n.buffer.ProposalBuffer.Top().(*message.Proposal).View > n.view { 63 | return false 64 | } 65 | 66 | t := n.buffer.ProposalBuffer.Top().(*message.Proposal) 67 | n.buffer.ProposalBuffer.Pop() 68 | 69 | if n.VerifyProposalMessage(t) == false { 70 | log.Printf("[Proposal] verify the proposal(%s) cp(%s) failed", t.Digest[:9], hex.EncodeToString(t.CP)[:9]) 71 | log.Println(n.lastBlock.Coms) 72 | return false 73 | } 74 | 75 | n.nowProposal = t 76 | return true 77 | } 78 | 79 | -------------------------------------------------------------------------------- /rbft/node/sequence.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/cmd" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 6 | "sync" 7 | ) 8 | 9 | type Sequence struct { 10 | lastSequence message.Sequence 11 | locker *sync.RWMutex 12 | } 13 | 14 | func NewSequence(cfg *cmd.SharedConfig) *Sequence { 15 | return &Sequence{ 16 | lastSequence: 0, 17 | locker: new(sync.RWMutex), 18 | } 19 | } 20 | 21 | 22 | func (s *Sequence) NextSequence() { 23 | s.locker.Lock() 24 | defer s.locker.Unlock() 25 | 26 | s.lastSequence = s.lastSequence + 1 27 | } 28 | 29 | func (s *Sequence) PrepareSequence() (ret message.Sequence) { 30 | s.locker.RLock() 31 | defer s.locker.RUnlock() 32 | 33 | ret = s.lastSequence + 1 34 | return 35 | } 36 | 37 | -------------------------------------------------------------------------------- /rbft/node/state.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/crypto" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 6 | "github.com/hyperledger/fabric/orderer/consensus/rbft/server" 7 | "log" 8 | "time" 9 | ) 10 | 11 | type State int 12 | 13 | const ( 14 | // 搜集 15 | STATESENDORDER State = iota 16 | STATERECVORDER 17 | // 提案 18 | STATESENDPROPOSAL 19 | STATERECVPROPOSAL 20 | // 准备 21 | STATESENDPREPARE 22 | STATERECVPREPARE 23 | // 提交 24 | STATESENDCOMMIT 25 | STATERECVCOMMIT 26 | // 空状态 27 | STATENONE 28 | ) 29 | 30 | func (n *Node) stateThread() { 31 | log.Printf("[State] start the state thread") 32 | 33 | var nowLastBlock *message.LastBlock = nil 34 | var buffer []byte 35 | 36 | for { 37 | switch n.state { 38 | case STATENONE: 39 | 40 | case STATESENDORDER: 41 | n.broadCastComMessage() 42 | // 广播凭证 43 | // 开始状态 view = 0 44 | // id = 0 主节点,搜集凭证 45 | // id != 0 备节点,接收特殊区块 46 | // 非开始状态 view != 0 同上 47 | if n.view == 0 { 48 | if n.id == 0 { 49 | n.state = STATERECVORDER 50 | log.Printf("[State] send com message and state to recv com message, view(%d)", n.view) 51 | } else { 52 | n.state = STATERECVPROPOSAL 53 | log.Printf("[State] send com message and state to recv porposal message, view(%d)", n.view) 54 | } 55 | } else { 56 | if n.lastBlock.GetPrimaryIdentify(n.view) == n.id { 57 | n.state = STATERECVORDER 58 | log.Printf("[State] send com message and state to recv com, view(%d)", n.view) 59 | } else { 60 | n.state = STATERECVPROPOSAL 61 | log.Printf("[State] send com message and state to recv proposal, view(%d)", n.view) 62 | } 63 | } 64 | 65 | case STATERECVORDER: 66 | // 仅主节点搜集凭证序列 67 | coms := n.comHandle() 68 | if coms == nil { 69 | log.Printf("[State] recv com message not enough") 70 | time.Sleep(time.Millisecond * 500) 71 | } else { 72 | // 打包提案区块 73 | log.Printf("[State] recv com message enough to create last block") 74 | nowLastBlock = message.NewLastBlockByComs(coms) 75 | n.state = STATESENDPROPOSAL 76 | } 77 | 78 | case STATESENDPROPOSAL: 79 | // 主节点发起特殊区块提案 80 | cp := crypto.Sign(n.prevBlock.Content(), n.publicSet[n.id], n.privateScalar) 81 | if n.view == 0 || (int(n.view+1)%len(n.table)) == 0 { 82 | content, msg := message.NewProposalByLastBlock( 83 | n.view, n.sequence.PrepareSequence(), 84 | cp, nowLastBlock) 85 | n.nowProposal = msg 86 | log.Printf("[State] broadcast last block, proposal(%s)", n.nowProposal.Digest[:9]) 87 | n.BroadCast(content, server.ProposalEntry) 88 | n.state = STATESENDPREPARE 89 | }else { 90 | // 主节点发起区块提案 91 | n.buffer.BlockBuffer.Lock() 92 | if n.buffer.BlockBuffer.Empty() { 93 | // 无区块处理 94 | n.buffer.BlockBuffer.ULock() 95 | time.Sleep(time.Millisecond * 500) 96 | }else if n.buffer.BlockBuffer.Top().(*message.Block).TimeStamp <= n.lastTimeStamp { 97 | // 过期区块 98 | n.buffer.BlockBuffer.Pop() 99 | n.buffer.BlockBuffer.ULock() 100 | }else { 101 | // 打包请求 102 | n.buffer.BlockBuffer.ULock() 103 | content, m := n.blockHandle(cp) 104 | if content != nil { 105 | n.nowProposal = m 106 | log.Printf("[State] broadcast request block, now proposal(%s)", n.nowProposal.Digest[:9]) 107 | n.BroadCast(content, server.ProposalEntry) 108 | n.state = STATESENDPREPARE 109 | } 110 | } 111 | } 112 | 113 | case STATERECVPROPOSAL: 114 | // 备节点接收提案 115 | if n.proposalHandle() { 116 | log.Printf("[State] recv proposal(%s) to handle", n.nowProposal.Digest[:9]) 117 | n.state = STATESENDPREPARE 118 | } else { 119 | time.Sleep(time.Millisecond * 500) 120 | } 121 | 122 | case STATESENDPREPARE: 123 | log.Printf("[State] send prepare message for proposal(%s)", n.nowProposal.Digest[:9]) 124 | // 发送准备消息 125 | n.boradCastPrepareMsg(n.nowProposal) 126 | n.state = STATERECVPREPARE 127 | 128 | case STATERECVPREPARE: 129 | // 接收准备消息并合成 130 | threshold := n.prepareHandle() 131 | if threshold != nil { 132 | log.Printf("[State] prepare message enough to commit the proposal(%s)", n.nowProposal.Digest[:9]) 133 | buffer, _ = message.NewCommitMsg(n.view, n.sequence.PrepareSequence(), 134 | n.nowProposal.Digest, threshold) 135 | n.state = STATESENDCOMMIT 136 | }else { 137 | log.Printf("[State] wait prepare message for proposal(%s)", n.nowProposal.Digest[:9]) 138 | time.Sleep(time.Millisecond * 500) 139 | } 140 | 141 | case STATESENDCOMMIT: 142 | log.Printf("[State] send commit message for proposal(%s)", n.nowProposal.Digest[:9]) 143 | n.BroadCast(buffer, server.CommitEntry) 144 | n.state = STATERECVCOMMIT 145 | 146 | case STATERECVCOMMIT: 147 | log.Printf("[State] wait commit message for proposal(%s)", n.nowProposal.Digest[:9]) 148 | if n.commitHandle() { 149 | log.Printf("[State] commit enough to handle the proposal(%s)", n.nowProposal.Digest[:9]) 150 | if (int(n.view + 1) % len(n.table)) == 0{ 151 | log.Printf("[State] the last view to send order") 152 | n.state = STATESENDORDER 153 | }else if n.lastBlock.GetPrimaryIdentify(n.view) == n.id { 154 | log.Printf("[State] new primary to send proposal") 155 | n.state = STATESENDPROPOSAL 156 | }else { 157 | log.Printf("[State] not primary to recv proposal") 158 | n.state = STATERECVPROPOSAL 159 | } 160 | }else { 161 | time.Sleep(time.Millisecond * 500) 162 | } 163 | 164 | 165 | } 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /rbft/server/handle.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 6 | "log" 7 | "net/http" 8 | ) 9 | 10 | func CheckJsonDecode(err error) bool { 11 | if err != nil { 12 | log.Printf("[Http] error to decode json") 13 | return false 14 | } 15 | return true 16 | } 17 | 18 | func (s *HttpServer) HttpCom(w http.ResponseWriter, r *http.Request) { 19 | var msg message.ComMsg 20 | if !CheckJsonDecode(json.NewDecoder(r.Body).Decode(&msg)) { 21 | return 22 | } 23 | s.comRecv <- &msg 24 | } 25 | 26 | func (s *HttpServer) HttpProposal(w http.ResponseWriter, r *http.Request) { 27 | var msg message.Proposal 28 | if !CheckJsonDecode(json.NewDecoder(r.Body).Decode(&msg)) { 29 | return 30 | } 31 | s.proposalRecv <- &msg 32 | } 33 | 34 | func (s *HttpServer) HttpPrepare(w http.ResponseWriter, r *http.Request) { 35 | var msg message.PrepareMsg 36 | if !CheckJsonDecode(json.NewDecoder(r.Body).Decode(&msg)) { 37 | return 38 | } 39 | s.prepareRecv <- &msg 40 | } 41 | 42 | func (s *HttpServer) HttpCommit(w http.ResponseWriter, r *http.Request) { 43 | var msg message.CommitMsg 44 | if !CheckJsonDecode(json.NewDecoder(r.Body).Decode(&msg)) { 45 | return 46 | } 47 | s.commitRecv <- &msg 48 | } 49 | 50 | func (s *HttpServer) HttpBlock(w http.ResponseWriter, r *http.Request) { 51 | var msg message.Block 52 | if !CheckJsonDecode(json.NewDecoder(r.Body).Decode(&msg)) { 53 | return 54 | } 55 | s.blockRecv <- &msg 56 | } -------------------------------------------------------------------------------- /rbft/server/server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/hyperledger/fabric/orderer/consensus/rbft/cmd" 5 | "github.com/hyperledger/fabric/orderer/consensus/rbft/message" 6 | "log" 7 | "net/http" 8 | "strconv" 9 | ) 10 | 11 | const ( 12 | BlockEntry = "/block" 13 | ComEntry = "/com" 14 | ProposalEntry = "/proposal" 15 | PrepareEntry = "/prepare" 16 | CommitEntry = "/commit" 17 | ) 18 | 19 | // http 监听请求 20 | type HttpServer struct { 21 | port int 22 | server *http.Server 23 | 24 | blockRecv chan *message.Block 25 | comRecv chan *message.ComMsg 26 | proposalRecv chan *message.Proposal 27 | prepareRecv chan *message.PrepareMsg 28 | commitRecv chan *message.CommitMsg 29 | } 30 | 31 | func NewServer(cfg *cmd.SharedConfig) *HttpServer { 32 | httpServer := &HttpServer{ 33 | port: cfg.Port, 34 | server: nil, 35 | } 36 | // set server 37 | return httpServer 38 | } 39 | 40 | // register server service and run 41 | func (s *HttpServer) Run() { 42 | log.Printf("[Node] start the listen server thread") 43 | s.registerServer() 44 | } 45 | 46 | // config server: to register the handle chan 47 | func (s *HttpServer) RegisterBlockChan(c chan *message.Block) { 48 | log.Printf("[Server] register the chan for recv block msg") 49 | s.blockRecv = c 50 | } 51 | 52 | func (s *HttpServer) RegisterComChan(com chan *message.ComMsg) { 53 | log.Printf("[Server] register the chan for recv com msg") 54 | s.comRecv = com 55 | } 56 | 57 | func (s *HttpServer) RegisterProposalChan(c chan *message.Proposal) { 58 | log.Printf("[Server] register the chan for recv proposal msg") 59 | s.proposalRecv = c 60 | } 61 | 62 | func (s *HttpServer) RegisterPrepareChan(c chan *message.PrepareMsg) { 63 | log.Printf("[Server] register the chan for recv prepare msg") 64 | s.prepareRecv = c 65 | } 66 | 67 | func (s *HttpServer) RegisterCommitChan(c chan *message.CommitMsg) { 68 | log.Printf("[Server] register the chan for recv commit msg") 69 | s.commitRecv = c 70 | } 71 | 72 | func (s *HttpServer) registerServer() { 73 | log.Printf("[Server] set listen port:%d\n", s.port) 74 | 75 | httpRegister := map[string]func(http.ResponseWriter, *http.Request){ 76 | BlockEntry: s.HttpBlock, 77 | ComEntry: s.HttpCom, 78 | ProposalEntry: s.HttpProposal, 79 | PrepareEntry: s.HttpPrepare, 80 | CommitEntry: s.HttpCommit, 81 | } 82 | 83 | mux := http.NewServeMux() 84 | for k, v := range httpRegister { 85 | log.Printf("[Server] register the func for %s", k) 86 | mux.HandleFunc(k, v) 87 | } 88 | 89 | s.server = &http.Server{ 90 | Addr: ":" + strconv.Itoa(s.port), 91 | Handler: mux, 92 | } 93 | 94 | if err := s.server.ListenAndServe(); err != nil { 95 | log.Printf("[Server Error] %s", err) 96 | return 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /solo-network/base/docker-compose-base.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | orderer.yzm.com: 4 | container_name: orderer.yzm.com 5 | extends: 6 | file: peer-base.yaml 7 | service: orderer-base 8 | volumes: 9 | - ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block 10 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer.yzm.com/msp:/var/hyperledger/orderer/msp 11 | - ../crypto-config/ordererOrganizations/yzm.com/orderers/orderer.yzm.com/tls:/var/hyperledger/orderer/tls 12 | - ../production/orderer:/var/hyperledger/production/orderer 13 | ports: 14 | - 7050:7050 15 | 16 | peer0.orga.com: 17 | container_name: peer0.orga.com 18 | extends: 19 | file: peer-base.yaml 20 | service: peer-base 21 | environment: 22 | - CORE_PEER_ID=peer0.orga.com 23 | - CORE_PEER_ADDRESS=peer0.orga.com:7051 24 | - CORE_PEER_LISTENADDRESS=0.0.0.0:7051 25 | - CORE_PEER_CHAINCODEADDRESS=peer0.orga.com:7052 26 | - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052 27 | - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.orga.com:7051 28 | - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.orga.com:7051 29 | - CORE_PEER_LOCALMSPID=OrgAMSP 30 | volumes: 31 | - /var/run/:/host/var/run/ 32 | - ../crypto-config/peerOrganizations/orga.com/peers/peer0.orga.com/msp:/etc/hyperledger/fabric/msp 33 | - ../crypto-config/peerOrganizations/orga.com/peers/peer0.orga.com/tls:/etc/hyperledger/fabric/tls 34 | - ../production/orga:/var/hyperledger/production 35 | ports: 36 | - 7051:7051 37 | 38 | peer0.orgb.com: 39 | container_name: peer0.orgb.com 40 | extends: 41 | file: peer-base.yaml 42 | service: peer-base 43 | environment: 44 | - CORE_PEER_ID=peer0.orgb.com 45 | - CORE_PEER_ADDRESS=peer0.orgb.com:8051 46 | - CORE_PEER_LISTENADDRESS=0.0.0.0:8051 47 | - CORE_PEER_CHAINCODEADDRESS=peer0.orgb.com:8052 48 | - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:8052 49 | - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.orgb.com:8051 50 | - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.orgb.com:8051 51 | - CORE_PEER_LOCALMSPID=OrgBMSP 52 | volumes: 53 | - /var/run/:/host/var/run/ 54 | - ../crypto-config/peerOrganizations/orgb.com/peers/peer0.orgb.com/msp:/etc/hyperledger/fabric/msp 55 | - ../crypto-config/peerOrganizations/orgb.com/peers/peer0.orgb.com/tls:/etc/hyperledger/fabric/tls 56 | - ../production/orgb:/var/hyperledger/production 57 | ports: 58 | - 8051:8051 59 | -------------------------------------------------------------------------------- /solo-network/base/peer-base.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | peer-base: 4 | image: hyperledger/fabric-peer:$IMAGETAG 5 | environment: 6 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 7 | - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=solo-network_solonet 8 | - FABRIC_LOGGING_SPEC=INFO 9 | - CORE_PEER_TLS_ENABLED=false 10 | - CORE_PEER_GOSSIP_USELEADERELECTION=true 11 | - CORE_PEER_GOSSIP_ORGLEADER=false 12 | - CORE_PEER_PROFILE_ENABLED=true 13 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 14 | command: peer node start 15 | 16 | orderer-base: 17 | image: hyperledger/fabric-orderer:$IMAGETAG 18 | environment: 19 | - FABRIC_LOGGING_SPEC=INFO 20 | - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0 21 | - ORDERER_GENERAL_GENESISMETHOD=file 22 | - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block 23 | - ORDERER_GENERAL_LOCALMSPID=OrdererMSP 24 | - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp 25 | - ORDERER_GENERAL_TLS_ENABLED=false 26 | - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1 27 | - ORDERER_KAFKA_VERBOSE=true 28 | - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt 29 | - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key 30 | - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt] 31 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric 32 | command: orderer 33 | 34 | -------------------------------------------------------------------------------- /solo-network/benchmarks/config.yaml: -------------------------------------------------------------------------------- 1 | test: 2 | name: solo-network 3 | description: solo-network 4 | workers: 5 | type: local 6 | number: 4 7 | 8 | rounds: 9 | - label: open 10 | description: open 11 | txNumber: 1000 12 | rateControl: 13 | type: fixed-rate 14 | opts: 15 | tps: 100 16 | callback: ../chaincode/demo/callback/open.js 17 | 18 | - label: transfer 19 | description: transfer 20 | txNumber: 1000 21 | rateControl: 22 | type: fixed-rate 23 | opts: 24 | tps: 40 25 | callback: ../chaincode/demo/callback/transfer.js 26 | 27 | - label: query 28 | description: query 29 | txNumber: 1000 30 | rateControl: 31 | type: fixed-rate 32 | opts: 33 | tps: 100 34 | callback: ../chaincode/demo/callback/query.js 35 | 36 | - label: delete 37 | description: delete 38 | txNumber: 1000 39 | rateControl: 40 | type: fixed-rate 41 | opts: 42 | tps: 100 43 | callback: ../chaincode/demo/callback/delete.js 44 | 45 | monitor: 46 | interval: 1 47 | type: 48 | - docker 49 | docker: 50 | containers: 51 | - peer0.orga.com 52 | - peer0.orgb.com 53 | - orderer.yzm.com 54 | -------------------------------------------------------------------------------- /solo-network/benchmarks/network.yaml: -------------------------------------------------------------------------------- 1 | name: Fabric 2 | version: "1.0" 3 | 4 | mutual-tls: false 5 | 6 | caliper: 7 | blockchain: fabric 8 | command: 9 | start: scripts/gen.sh;scripts/utils.sh up 10 | end: scripts/utils.sh down 11 | 12 | info: 13 | Version: 1.4.4 14 | Size: 2 Orgs with 1 Peer 15 | Orderer: Solo 16 | Distribution: Single Host 17 | StateDB: GoLevelDB 18 | 19 | clients: 20 | peer0.orga.com: 21 | client: 22 | organization: OrgA 23 | credentialStore: 24 | path: /tmp/crypto/orga 25 | cryptoStore: 26 | path: /tmp/crypto/orga 27 | clientPrivateKey: 28 | path: crypto-config/peerOrganizations/orga.com/users/User1@orga.com/msp/keystore/key.pem 29 | clientSignedCert: 30 | path: crypto-config/peerOrganizations/orga.com/users/User1@orga.com/msp/signcerts/User1@orga.com-cert.pem 31 | 32 | peer0.orgb.com: 33 | client: 34 | organization: OrgB 35 | credentialStore: 36 | path: /tmp/crypto/orgb 37 | cryptoStore: 38 | path: /tmp/crypto/orgb 39 | clientPrivateKey: 40 | path: crypto-config/peerOrganizations/orgb.com/users/User1@orgb.com/msp/keystore/key.pem 41 | clientSignedCert: 42 | path: crypto-config/peerOrganizations/orgb.com/users/User1@orgb.com/msp/signcerts/User1@orgb.com-cert.pem 43 | 44 | channels: 45 | mychannel: 46 | configBinary: ./channel-artifacts/channel.tx 47 | created: true 48 | orderers: 49 | - orderer.yzm.com 50 | peers: 51 | peer0.orga.com: 52 | endorsingPeer: true 53 | chaincodeQuery: true 54 | ledgerQuery: true 55 | eventSource: true 56 | peer0.orgb.com: 57 | eventSource: true 58 | 59 | chaincodes: 60 | - id: money_demo 61 | version: "1.0" 62 | contractID: money_demo 63 | language: golang 64 | path: ../chaincode/demo 65 | targetPeers: 66 | - peer0.orga.com 67 | - peer0.orgb.com 68 | 69 | organizations: 70 | OrgA: 71 | mspid: OrgAMSP 72 | peers: 73 | - peer0.orga.com 74 | adminPrivateKey: 75 | path: crypto-config/peerOrganizations/orga.com/users/Admin@orga.com/msp/keystore/key.pem 76 | signedCert: 77 | path: crypto-config/peerOrganizations/orga.com/users/Admin@orga.com/msp/signcerts/Admin@orga.com-cert.pem 78 | 79 | OrgB: 80 | mspid: OrgBMSP 81 | peers: 82 | - peer0.orgb.com 83 | adminPrivateKey: 84 | path: crypto-config/peerOrganizations/orgb.com/users/Admin@orgb.com/msp/keystore/key.pem 85 | signedCert: 86 | path: crypto-config/peerOrganizations/orgb.com/users/Admin@orgb.com/msp/signcerts/Admin@orgb.com-cert.pem 87 | 88 | orderers: 89 | orderer.yzm.com: 90 | url: grpc://localhost:7050 91 | grpcOptions: 92 | grpc.keepalive_time_ms: 600000 93 | 94 | peers: 95 | peer0.orga.com: 96 | url: grpc://localhost:7051 97 | grpcOptions: 98 | grpc.keepalive_time_ms: 600000 99 | 100 | peer0.orgb.com: 101 | url: grpc://localhost:8051 102 | grpcOptions: 103 | grpc.keepalive_time_ms: 600000 104 | -------------------------------------------------------------------------------- /solo-network/configtx.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | Organizations: 3 | - &OrdererOrg 4 | Name: OrdererOrg 5 | ID: OrdererMSP 6 | MSPDir: crypto-config/ordererOrganizations/yzm.com/msp 7 | Policies: 8 | Readers: 9 | Type: Signature 10 | Rule: "OR('OrdererMSP.member')" 11 | Writers: 12 | Type: Signature 13 | Rule: "OR('OrdererMSP.member')" 14 | Admins: 15 | Type: Signature 16 | Rule: "OR('OrdererMSP.admin')" 17 | - &OrgA 18 | Name: OrgAMSP 19 | ID: OrgAMSP 20 | MSPDir: crypto-config/peerOrganizations/orga.com/msp 21 | Policies: 22 | Readers: 23 | Type: Signature 24 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.peer', 'OrgAMSP.client')" 25 | Writers: 26 | Type: Signature 27 | Rule: "OR('OrgAMSP.admin', 'OrgAMSP.client')" 28 | Admins: 29 | Type: Signature 30 | Rule: "OR('OrgAMSP.admin')" 31 | AnchorPeers: 32 | - Host: peer0.orga.com 33 | Port: 7051 34 | 35 | - &OrgB 36 | Name: OrgBMSP 37 | ID: OrgBMSP 38 | MSPDir: crypto-config/peerOrganizations/orgb.com/msp 39 | Policies: 40 | Readers: 41 | Type: Signature 42 | Rule: "OR('OrgBMSP.admin', 'OrgBMSP.peer', 'OrgBMSP.client')" 43 | Writers: 44 | Type: Signature 45 | Rule: "OR('OrgBMSP.admin', 'OrgBMSP.client')" 46 | Admins: 47 | Type: Signature 48 | Rule: "OR('OrgBMSP.admin')" 49 | AnchorPeers: 50 | - Host: peer0.orgb.com 51 | Port: 8051 52 | 53 | Capabilities: 54 | Channel: &ChannelCapabilities 55 | V1_4_3: true 56 | V1_3: false 57 | V1_1: false 58 | 59 | Orderer: &OrdererCapabilities 60 | V1_4_2: true 61 | V1_1: false 62 | 63 | Application: &ApplicationCapabilities 64 | V1_4_2: true 65 | V1_3: false 66 | V1_2: false 67 | V1_1: false 68 | 69 | Application: &ApplicationDefaults 70 | Organizations: 71 | 72 | Policies: 73 | Readers: 74 | Type: ImplicitMeta 75 | Rule: "ANY Readers" 76 | Writers: 77 | Type: ImplicitMeta 78 | Rule: "ANY Writers" 79 | Admins: 80 | Type: ImplicitMeta 81 | Rule: "MAJORITY Admins" 82 | 83 | Capabilities: 84 | <<: *ApplicationCapabilities 85 | 86 | Orderer: &OrdererDefaults 87 | OrdererType: solo 88 | Addresses: 89 | - orderer.yzm.com:7050 90 | 91 | BatchTimeout: 2s 92 | BatchSize: 93 | MaxMessageCount: 10 94 | AbsoluteMaxBytes: 99 MB 95 | PreferredMaxBytes: 512 KB 96 | 97 | Organizations: 98 | Policies: 99 | Readers: 100 | Type: ImplicitMeta 101 | Rule: "ANY Readers" 102 | Writers: 103 | Type: ImplicitMeta 104 | Rule: "ANY Writers" 105 | Admins: 106 | Type: ImplicitMeta 107 | Rule: "MAJORITY Admins" 108 | BlockValidation: 109 | Type: ImplicitMeta 110 | Rule: "ANY Writers" 111 | 112 | Channel: &ChannelDefaults 113 | Policies: 114 | Readers: 115 | Type: ImplicitMeta 116 | Rule: "ANY Readers" 117 | Writers: 118 | Type: ImplicitMeta 119 | Rule: "ANY Writers" 120 | Admins: 121 | Type: ImplicitMeta 122 | Rule: "MAJORITY Admins" 123 | 124 | Capabilities: 125 | <<: *ChannelCapabilities 126 | 127 | Profiles: 128 | Genesis: 129 | <<: *ChannelDefaults 130 | Orderer: 131 | <<: *OrdererDefaults 132 | Organizations: 133 | - *OrdererOrg 134 | Capabilities: 135 | <<: *OrdererCapabilities 136 | Consortiums: 137 | SampleConsortium: 138 | Organizations: 139 | - *OrgA 140 | - *OrgB 141 | Channel: 142 | Consortium: SampleConsortium 143 | <<: *ChannelDefaults 144 | Application: 145 | <<: *ApplicationDefaults 146 | Organizations: 147 | - *OrgA 148 | - *OrgB 149 | Capabilities: 150 | <<: *ApplicationCapabilities 151 | -------------------------------------------------------------------------------- /solo-network/crypto-config.yaml: -------------------------------------------------------------------------------- 1 | OrdererOrgs: 2 | - Name: Orderer 3 | Domain: yzm.com 4 | EnableNodeOUs: true # 控制节点目录中是否生成配置文件 5 | Specs: 6 | - Hostname: orderer 7 | 8 | PeerOrgs: 9 | - Name: OrgA 10 | Domain: orga.com 11 | EnableNodeOUs: true 12 | Template: 13 | Count: 1 14 | Users: 15 | Count: 1 16 | 17 | - Name: OrgB 18 | Domain: orgb.com 19 | EnableNodeOUs: true 20 | Template: 21 | Count: 1 22 | Users: 23 | Count: 1 24 | -------------------------------------------------------------------------------- /solo-network/docker-compose-cli.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | networks: 4 | solonet: 5 | ipam: 6 | config: 7 | - subnet: 172.22.0.0/26 8 | gateway: 172.22.0.1 9 | 10 | services: 11 | 12 | orderer.yzm.com: 13 | extends: 14 | file: base/docker-compose-base.yaml 15 | service: orderer.yzm.com 16 | container_name: orderer.yzm.com 17 | networks: 18 | solonet: 19 | ipv4_address: 172.22.0.2 20 | 21 | peer0.orga.com: 22 | container_name: peer0.orga.com 23 | extends: 24 | file: base/docker-compose-base.yaml 25 | service: peer0.orga.com 26 | networks: 27 | solonet: 28 | ipv4_address: 172.22.0.3 29 | 30 | peer0.orgb.com: 31 | container_name: peer0.orgb.com 32 | extends: 33 | file: base/docker-compose-base.yaml 34 | service: peer0.orgb.com 35 | networks: 36 | solonet: 37 | ipv4_address: 172.22.0.4 38 | 39 | cli: 40 | container_name: cli 41 | image: hyperledger/fabric-tools:$IMAGETAG 42 | tty: true 43 | stdin_open: true 44 | environment: 45 | - SYS_CHANNEL=sys_channel 46 | - GOPATH=/opt/gopath 47 | - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock 48 | - FABRIC_LOGGING_SPEC=INFO 49 | - CORE_PEER_ID=cli 50 | - CORE_PEER_ADDRESS=peer0.orga.com:7051 51 | - CORE_PEER_LOCALMSPID=OrgAMSP 52 | - CORE_PEER_TLS_ENABLED=false 53 | - CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/orga.com/users/Admin@orga.com/msp 54 | working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer 55 | command: /bin/bash 56 | volumes: 57 | - /var/run/:/host/var/run/ 58 | - ./../chaincode/:/opt/gopath/src/github.com/chaincode 59 | - ./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ 60 | - ./scripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/scripts/ 61 | - ./channel-artifacts:/opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts 62 | depends_on: 63 | - orderer.yzm.com 64 | - peer0.orga.com 65 | - peer0.orgb.com 66 | networks: 67 | solonet: 68 | ipv4_address: 172.22.0.5 69 | -------------------------------------------------------------------------------- /solo-network/scripts/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 证书文件夹 4 | PEERROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations 5 | ORDEROOT=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations 6 | 7 | # 节点设置 8 | ORDERERNODE=orderer.yzm.com:7050 9 | PEERORGANODE=peer0.orga.com:7051 10 | PEERORGBNODE=peer0.orgb.com:8051 11 | CHANNEL_NAME=mychannel 12 | 13 | NAME=money_demo 14 | VERSION=1.0 15 | 16 | # 切换peer0 orgA 17 | OrgA(){ 18 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orga.com/users/Admin@orga.com/msp 19 | CORE_PEER_ADDRESS=${PEERORGANODE} 20 | CORE_PEER_LOCALMSPID="OrgAMSP" 21 | echo "node now:peer0.orga.com" 22 | } 23 | 24 | # 切换peer0 orgB 25 | OrgB(){ 26 | CORE_PEER_MSPCONFIGPATH=${PEERROOT}/orgb.com/users/Admin@orgb.com/msp 27 | CORE_PEER_ADDRESS=${PEERORGBNODE} 28 | CORE_PEER_LOCALMSPID="OrgBMSP" 29 | echo "node now:peer0.orgb.com" 30 | } 31 | 32 | # 安装channel 33 | InstallChannel() { 34 | peer channel create \ 35 | -o ${ORDERERNODE} \ 36 | -c ${CHANNEL_NAME} \ 37 | -f ./channel-artifacts/channel.tx \ 38 | echo "install channel" 39 | } 40 | 41 | # 加入channel 42 | JoinChannel() { 43 | OrgA 44 | peer channel join -b ${CHANNEL_NAME}.block 45 | echo "peer0.orga.com join channel" 46 | OrgB 47 | peer channel join -b ${CHANNEL_NAME}.block 48 | echo "peer0.orgb.com join channel" 49 | } 50 | 51 | # 更新锚节点 52 | AnchorUpdate() { 53 | OrgA 54 | peer channel update \ 55 | -o ${ORDERERNODE} \ 56 | -c ${CHANNEL_NAME} \ 57 | -f ./channel-artifacts/OrgAMSPanchor.tx \ 58 | echo "orga update anchor peer0.orga.com" 59 | OrgB 60 | peer channel update \ 61 | -o ${ORDERERNODE} \ 62 | -c ${CHANNEL_NAME} \ 63 | -f ./channel-artifacts/OrgBMSPanchor.tx \ 64 | echo "orgb update anchor peer0.orgb.com" 65 | } 66 | 67 | # 安装链码 68 | InstallChainCode() { 69 | OrgA 70 | peer chaincode install \ 71 | -n ${NAME} \ 72 | -v ${VERSION} \ 73 | -p github.com/chaincode/demo/ 74 | echo "peer0.orga.com install chaincode - demo" 75 | 76 | OrgB 77 | peer chaincode install \ 78 | -n ${NAME} \ 79 | -v ${VERSION} \ 80 | -p github.com/chaincode/demo/ 81 | echo "peer0.orgb.com install chaincode - demo" 82 | } 83 | 84 | # 实例链码 85 | InstantiateChainCode() { 86 | peer chaincode instantiate \ 87 | -o ${ORDERERNODE} \ 88 | -C ${CHANNEL_NAME} \ 89 | -n ${NAME} \ 90 | -v ${VERSION} \ 91 | -c '{"Args":["Init"]}' \ 92 | -P "AND ('OrgAMSP.peer','OrgBMSP.peer')" 93 | sleep 10 94 | echo "instantiate chaincode" 95 | } 96 | 97 | # 链码测试 98 | TestDemo() { 99 | # 创建账户 100 | peer chaincode invoke \ 101 | -C ${CHANNEL_NAME} \ 102 | -o ${ORDERERNODE} \ 103 | -n ${NAME} \ 104 | --peerAddresses ${PEERORGANODE} \ 105 | --peerAddresses ${PEERORGBNODE} \ 106 | -c '{"Args":["open","count_a", "100"]}' 107 | sleep 3 108 | peer chaincode invoke \ 109 | -C ${CHANNEL_NAME} \ 110 | -o ${ORDERERNODE} \ 111 | -n ${NAME} \ 112 | --peerAddresses ${PEERORGANODE} \ 113 | --peerAddresses ${PEERORGBNODE} \ 114 | -c '{"Args":["open","count_b", "100"]}' 115 | sleep 3 116 | peer chaincode query \ 117 | -C ${CHANNEL_NAME} \ 118 | -n ${NAME} \ 119 | -c '{"Args":["query","count_a"]}' 120 | peer chaincode query \ 121 | -C ${CHANNEL_NAME} \ 122 | -n ${NAME} \ 123 | -c '{"Args":["query","count_b"]}' 124 | peer chaincode invoke \ 125 | -C ${CHANNEL_NAME} \ 126 | -o ${ORDERERNODE} \ 127 | -n ${NAME} \ 128 | --peerAddresses ${PEERORGANODE} \ 129 | --peerAddresses ${PEERORGBNODE} \ 130 | -c '{"Args":["invoke","count_a","count_b","50"]}' 131 | sleep 3 132 | peer chaincode invoke \ 133 | -C ${CHANNEL_NAME} \ 134 | -o ${ORDERERNODE} \ 135 | -n ${NAME} \ 136 | --peerAddresses ${PEERORGANODE} \ 137 | --peerAddresses ${PEERORGBNODE} \ 138 | -c '{"Args":["open","count_c", "100"]}' 139 | sleep 3 140 | peer chaincode query \ 141 | -C ${CHANNEL_NAME} \ 142 | -n ${NAME} \ 143 | -c '{"Args":["query","count_a"]}' 144 | peer chaincode query \ 145 | -C ${CHANNEL_NAME} \ 146 | -n ${NAME} \ 147 | -c '{"Args":["query","count_b"]}' 148 | peer chaincode query \ 149 | -C ${CHANNEL_NAME} \ 150 | -n ${NAME} \ 151 | -c '{"Args":["query","count_c"]}' 152 | } 153 | 154 | case $1 in 155 | installchannel) 156 | InstallChannel 157 | ;; 158 | joinchannel) 159 | JoinChannel 160 | ;; 161 | anchorupdate) 162 | AnchorUpdate 163 | ;; 164 | installchaincode) 165 | InstallChainCode 166 | ;; 167 | instantiatechaincode) 168 | InstantiateChainCode 169 | ;; 170 | testdemo) 171 | OrgA 172 | TestDemo 173 | ;; 174 | all) 175 | InstallChannel 176 | JoinChannel 177 | AnchorUpdate 178 | InstallChainCode 179 | InstantiateChainCode 180 | OrgA 181 | TestDemo 182 | ;; 183 | esac 184 | -------------------------------------------------------------------------------- /solo-network/scripts/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | GENESIS_PROFILE=Genesis 4 | CHANNEL_PROFILE=Channel 5 | SYS_CHANNEL=sys-channel 6 | CHANNEL_NAME=mychannel 7 | VERSION=1.4.4 8 | 9 | FABRIC_CFG_PATH=$PWD 10 | 11 | ORG_NAMES=(OrgAMSP OrgBMSP) 12 | 13 | # 检测cryptogen和版本 14 | if ! [ -x "$(command -v cryptogen)" ] ; then 15 | echo -e "\033[31m no cryptogen\033[0m" 16 | exit 1 17 | fi 18 | if [ ${VERSION} != "$(cryptogen version | grep Version | awk -F ': ' '{print $2}')" ] ; then 19 | echo -e "\033[31m cryptogen need version \033[0m"${VERSION} 20 | exit 1 21 | fi 22 | # 检测configtxgen和版本 23 | if ! [ -x "$(command -v configtxgen)" ] ; then 24 | echo -e "\033[31m no configtxgen\033[0m" 25 | exit 1 26 | fi 27 | if [ ${VERSION} != "$(configtxgen --version | grep Version | awk -F ': ' '{print $2}')" ] ; then 28 | echo -e "\033[31m configtxgen need version \033[0m"${VERSION} 29 | exit 1 30 | fi 31 | # 生成证书文件 32 | echo -e "\033[31m clear crypto files\033[0m" 33 | rm -rf crypto-config 34 | echo -e "\033[31m generate crypto files\033[0m" 35 | cryptogen generate --config ./crypto-config.yaml 36 | # 清理多余文件 37 | echo -e "\033[31m clear block files\033[0m" 38 | rm -rf ./channel-artifacts 39 | mkdir ./channel-artifacts 40 | # 生成创世块 41 | echo -e "\033[31m generate genesis block\033[0m" 42 | configtxgen \ 43 | -profile ${GENESIS_PROFILE} \ 44 | -channelID ${SYS_CHANNEL} \ 45 | -outputBlock ./channel-artifacts/genesis.block \ 46 | # 生成通道交易 47 | echo -e "\033[31m generate channel transcation\033[0m" 48 | configtxgen \ 49 | -profile ${CHANNEL_PROFILE} \ 50 | -channelID ${CHANNEL_NAME} \ 51 | -outputCreateChannelTx ./channel-artifacts/channel.tx 52 | # 生成铆节点配置 53 | echo -e "\033[31m generate anchor transcation\033[0m" 54 | for i in ${ORG_NAMES[@]}; do 55 | configtxgen \ 56 | -profile ${CHANNEL_PROFILE} \ 57 | -channelID ${CHANNEL_NAME} \ 58 | -outputAnchorPeersUpdate ./channel-artifacts/${i}anchor.tx \ 59 | -asOrg ${i} 60 | done 61 | -------------------------------------------------------------------------------- /solo-network/scripts/utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ORGA=orga 4 | ORGB=orgb 5 | ORGAUSERS=(Admin User1) 6 | ORGBUSERS=(Admin User1) 7 | VERSION=1.4.4 8 | 9 | # 复制keystore 10 | CPFile() { 11 | files=$(ls $1) 12 | echo ${files[0]} 13 | cd $1 14 | cp ${files[0]} ./key.pem 15 | cd - 16 | } 17 | 18 | # 复制所有文件keystore 19 | CPAllFiles() { 20 | PREFIX=crypto-config/peerOrganizations 21 | SUFFIX=msp/keystore 22 | for u in ${ORGAUSERS[@]}; do 23 | CPFile ${PREFIX}/${ORGA}.com/users/${u}@${ORGA}.com/${SUFFIX} 24 | done 25 | for u in ${ORGBUSERS[@]}; do 26 | CPFile ${PREFIX}/${ORGB}.com/users/${u}@${ORGB}.com/${SUFFIX} 27 | done 28 | } 29 | 30 | # 清理缓存文件 31 | Clean() { 32 | rm -rf ./channel-artifacts 33 | rm -rf ./crypto-config 34 | rm -rf ./production 35 | rm -rf /tmp/crypto 36 | } 37 | 38 | case $1 in 39 | # 压力测试启动/关闭 40 | up) 41 | CPAllFiles 42 | env IMAGETAG=${VERSION} docker-compose -f ./docker-compose-cli.yaml up -d 43 | docker exec cli /bin/bash -c "scripts/env.sh all" 44 | ;; 45 | down) 46 | docker kill $(docker ps -qa) 47 | docker rmi $(docker images | grep 'dev-*' | awk '{print $3}') 48 | echo y | docker system prune 49 | Clean 50 | ;; 51 | esac 52 | --------------------------------------------------------------------------------