├── README.md ├── configtx.yaml ├── core.yaml ├── crypto-config.yaml ├── fabricservice.js ├── mainorigin.js ├── orderer.yaml ├── origin_dairyfarm.go ├── origin_machining.go └── origin_salesterminal.go /README.md: -------------------------------------------------------------------------------- 1 | # fabric_milk_tracer 2 | 区块链开发实战牛奶溯源项目的代码 3 | -------------------------------------------------------------------------------- /configtx.yaml: -------------------------------------------------------------------------------- 1 | 2 | Organizations: 3 | 4 | - &OrderOrg 5 | # Name is the key by which this org will be referenced in channel 6 | # configuration transactions. 7 | # Name can include alphanumeric characters as well as dots and dashes. 8 | Name: OrderOrg 9 | 10 | # SkipAsForeign can be set to true for org definitions which are to be 11 | # inherited from the orderer system channel during channel creation. This 12 | # is especially useful when an admin of a single org without access to the 13 | # MSP directories of the other orgs wishes to create a channel. Note 14 | # this property must always be set to false for orgs included in block 15 | # creation. 16 | SkipAsForeign: false 17 | 18 | # ID is the key by which this org's MSP definition will be referenced. 19 | # ID can include alphanumeric characters as well as dots and dashes. 20 | ID: OrdererMSP 21 | 22 | # MSPDir is the filesystem path which contains the MSP configuration. 23 | MSPDir: /home/gopath/src/github.com/hyperledger/cow/crypto-config/ordererOrganizations/cow.com/msp 24 | 25 | Policies: 26 | Readers: 27 | Type: Signature 28 | Rule: "OR('OrdererMSP.member')" 29 | # If your MSP is configured with the new NodeOUs, you might 30 | # want to use a more specific rule like the following: 31 | # Rule: "OR('OrdererMSP.admin', 'OrdererMSP.peer', 'OrdererMSP.client')" 32 | Writers: 33 | Type: Signature 34 | Rule: "OR('OrdererMSP.member')" 35 | # If your MSP is configured with the new NodeOUs, you might 36 | # want to use a more specific rule like the following: 37 | # Rule: "OR('OrdererMSP.admin', 'OrdererMSP.client')" 38 | Admins: 39 | Type: Signature 40 | Rule: "OR('OrdererMSP.admin')" 41 | Endorsement: 42 | Type: Signature 43 | Rule: "OR('OrdererMSP.member')" 44 | 45 | # OrdererEndpoints is a list of all orderers this org runs which clients 46 | # and peers may to connect to to push transactions and receive blocks respectively. 47 | OrdererEndpoints: 48 | - "127.0.0.1:7050" 49 | - &Org1 50 | 51 | Name: Org1MSP 52 | ID: Org1MSP 53 | MSPDir: /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/msp 54 | AdminPrincipal: Role.ADMIN 55 | AnchorPeers: 56 | - Host: peer0.org1.cow.com 57 | Port: 7051 58 | Policies: 59 | Readers: 60 | Type: Signature 61 | Rule: "OR('Org1MSP.member')" 62 | Writers: 63 | Type: Signature 64 | Rule: "OR('Org1MSP.member')" 65 | Admins: 66 | Type: Signature 67 | Rule: "OR('Org1MSP.admin')" 68 | Endorsement: 69 | Type: Signature 70 | Rule: "OR('Org1MSP.member')" 71 | 72 | - &Org2 73 | Name: Org2MSP 74 | ID: Org2MSP 75 | MSPDir: /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org2.cow.com/msp 76 | AdminPrincipal: Role.ADMIN 77 | AnchorPeers: 78 | - Host: peer0.org2.cow.com 79 | Port: 8051 80 | Policies: 81 | Readers: 82 | Type: Signature 83 | Rule: "OR('Org2MSP.member')" 84 | Writers: 85 | Type: Signature 86 | Rule: "OR('Org2MSP.member')" 87 | Admins: 88 | Type: Signature 89 | Rule: "OR('Org2MSP.admin')" 90 | Endorsement: 91 | Type: Signature 92 | Rule: "OR('Org2MSP.member')" 93 | 94 | - &Org3 95 | Name: Org3MSP 96 | ID: Org3MSP 97 | MSPDir: /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org3.cow.com/msp 98 | AdminPrincipal: Role.ADMIN 99 | AnchorPeers: 100 | - Host: peer0.org3.cow.com 101 | Port: 9051 102 | Policies: 103 | Readers: 104 | Type: Signature 105 | Rule: "OR('Org3MSP.member')" 106 | Writers: 107 | Type: Signature 108 | Rule: "OR('Org3MSP.member')" 109 | Admins: 110 | Type: Signature 111 | Rule: "OR('Org3MSP.admin')" 112 | Endorsement: 113 | Type: Signature 114 | Rule: "OR('Org3MSP.member')" 115 | 116 | Capabilities: 117 | # Channel capabilities apply to both the orderers and the peers and must be 118 | # supported by both. 119 | # Set the value of the capability to true to require it. 120 | Channel: &ChannelCapabilities 121 | # V2.0 for Channel is a catchall flag for behavior which has been 122 | # determined to be desired for all orderers and peers running at the v2.0.0 123 | # level, but which would be incompatible with orderers and peers from 124 | # prior releases. 125 | # Prior to enabling V2.0 channel capabilities, ensure that all 126 | # orderers and peers on a channel are at v2.0.0 or later. 127 | V2_0: true 128 | 129 | # Orderer capabilities apply only to the orderers, and may be safely 130 | # used with prior release peers. 131 | # Set the value of the capability to true to require it. 132 | Orderer: &OrdererCapabilities 133 | # V1.1 for Orderer is a catchall flag for behavior which has been 134 | # determined to be desired for all orderers running at the v1.1.x 135 | # level, but which would be incompatible with orderers from prior releases. 136 | # Prior to enabling V2.0 orderer capabilities, ensure that all 137 | # orderers on a channel are at v2.0.0 or later. 138 | V2_0: true 139 | 140 | # Application capabilities apply only to the peer network, and may be safely 141 | # used with prior release orderers. 142 | # Set the value of the capability to true to require it. 143 | Application: &ApplicationCapabilities 144 | # V2.0 for Application enables the new non-backwards compatible 145 | # features and fixes of fabric v2.0. 146 | # Prior to enabling V2.0 orderer capabilities, ensure that all 147 | # orderers on a channel are at v2.0.0 or later. 148 | V2_0: true 149 | 150 | ################################################################################ 151 | # 152 | # APPLICATION 153 | # 154 | # This section defines the values to encode into a config transaction or 155 | # genesis block for application-related parameters. 156 | # 157 | ################################################################################ 158 | Application: &ApplicationDefaults 159 | ACLs: &ACLsDefault 160 | # This section provides defaults for policies for various resources 161 | # in the system. These "resources" could be functions on system chaincodes 162 | # (e.g., "GetBlockByNumber" on the "qscc" system chaincode) or other resources 163 | # (e.g.,who can receive Block events). This section does NOT specify the resource's 164 | # definition or API, but just the ACL policy for it. 165 | # 166 | # Users can override these defaults with their own policy mapping by defining the 167 | # mapping under ACLs in their channel definition 168 | 169 | #---New Lifecycle System Chaincode (_lifecycle) function to policy mapping for access control--# 170 | 171 | # ACL policy for _lifecycle's "CheckCommitReadiness" function 172 | _lifecycle/CheckCommitReadiness: /Channel/Application/Writers 173 | 174 | # ACL policy for _lifecycle's "CommitChaincodeDefinition" function 175 | _lifecycle/CommitChaincodeDefinition: /Channel/Application/Writers 176 | 177 | # ACL policy for _lifecycle's "QueryChaincodeDefinition" function 178 | _lifecycle/QueryChaincodeDefinition: /Channel/Application/Readers 179 | 180 | # ACL policy for _lifecycle's "QueryChaincodeDefinitions" function 181 | _lifecycle/QueryChaincodeDefinitions: /Channel/Application/Readers 182 | 183 | #---Lifecycle System Chaincode (lscc) function to policy mapping for access control---# 184 | 185 | # ACL policy for lscc's "getid" function 186 | lscc/ChaincodeExists: /Channel/Application/Readers 187 | 188 | # ACL policy for lscc's "getdepspec" function 189 | lscc/GetDeploymentSpec: /Channel/Application/Readers 190 | 191 | # ACL policy for lscc's "getccdata" function 192 | lscc/GetChaincodeData: /Channel/Application/Readers 193 | 194 | # ACL Policy for lscc's "getchaincodes" function 195 | lscc/GetInstantiatedChaincodes: /Channel/Application/Readers 196 | 197 | #---Query System Chaincode (qscc) function to policy mapping for access control---# 198 | 199 | # ACL policy for qscc's "GetChainInfo" function 200 | qscc/GetChainInfo: /Channel/Application/Readers 201 | 202 | # ACL policy for qscc's "GetBlockByNumber" function 203 | qscc/GetBlockByNumber: /Channel/Application/Readers 204 | 205 | # ACL policy for qscc's "GetBlockByHash" function 206 | qscc/GetBlockByHash: /Channel/Application/Readers 207 | 208 | # ACL policy for qscc's "GetTransactionByID" function 209 | qscc/GetTransactionByID: /Channel/Application/Readers 210 | 211 | # ACL policy for qscc's "GetBlockByTxID" function 212 | qscc/GetBlockByTxID: /Channel/Application/Readers 213 | 214 | #---Configuration System Chaincode (cscc) function to policy mapping for access control---# 215 | 216 | # ACL policy for cscc's "GetConfigBlock" function 217 | cscc/GetConfigBlock: /Channel/Application/Readers 218 | 219 | # ACL policy for cscc's "GetConfigTree" function 220 | cscc/GetConfigTree: /Channel/Application/Readers 221 | 222 | # ACL policy for cscc's "SimulateConfigTreeUpdate" function 223 | cscc/SimulateConfigTreeUpdate: /Channel/Application/Readers 224 | 225 | #---Miscellanesous peer function to policy mapping for access control---# 226 | 227 | # ACL policy for invoking chaincodes on peer 228 | peer/Propose: /Channel/Application/Writers 229 | 230 | # ACL policy for chaincode to chaincode invocation 231 | peer/ChaincodeToChaincode: /Channel/Application/Readers 232 | 233 | #---Events resource to policy mapping for access control###---# 234 | 235 | # ACL policy for sending block events 236 | event/Block: /Channel/Application/Readers 237 | 238 | # ACL policy for sending filtered block events 239 | event/FilteredBlock: /Channel/Application/Readers 240 | 241 | # Organizations lists the orgs participating on the application side of the 242 | # network. 243 | Organizations: 244 | 245 | # Policies defines the set of policies at this level of the config tree 246 | # For Application policies, their canonical path is 247 | # /Channel/Application/ 248 | Policies: &ApplicationDefaultPolicies 249 | LifecycleEndorsement: 250 | Type: ImplicitMeta 251 | Rule: "MAJORITY Endorsement" 252 | Endorsement: 253 | Type: ImplicitMeta 254 | Rule: "MAJORITY Endorsement" 255 | Readers: 256 | Type: ImplicitMeta 257 | Rule: "ANY Readers" 258 | Writers: 259 | Type: ImplicitMeta 260 | Rule: "ANY Writers" 261 | Admins: 262 | Type: ImplicitMeta 263 | Rule: "MAJORITY Admins" 264 | 265 | # Capabilities describes the application level capabilities, see the 266 | # dedicated Capabilities section elsewhere in this file for a full 267 | # description 268 | Capabilities: 269 | <<: *ApplicationCapabilities 270 | 271 | ################################################################################ 272 | # 273 | # ORDERER 274 | # 275 | # This section defines the values to encode into a config transaction or 276 | # genesis block for orderer related parameters. 277 | # 278 | ################################################################################ 279 | Orderer: &OrdererDefaults 280 | 281 | # Orderer Type: The orderer implementation to start. 282 | # Available types are "solo", "kafka" and "etcdraft". 283 | OrdererType: solo 284 | 285 | # Addresses used to be the list of orderer addresses that clients and peers 286 | # could connect to. However, this does not allow clients to associate orderer 287 | # addresses and orderer organizations which can be useful for things such 288 | # as TLS validation. The preferred way to specify orderer addresses is now 289 | # to include the OrdererEndpoints item in your org definition 290 | Addresses: 291 | - orderer.cow.com:7050 292 | 293 | # Batch Timeout: The amount of time to wait before creating a batch. 294 | BatchTimeout: 2s 295 | 296 | # Batch Size: Controls the number of messages batched into a block. 297 | # The orderer views messages opaquely, but typically, messages may 298 | # be considered to be Fabric transactions. The 'batch' is the group 299 | # of messages in the 'data' field of the block. Blocks will be a few kb 300 | # larger than the batch size, when signatures, hashes, and other metadata 301 | # is applied. 302 | BatchSize: 303 | 304 | # Max Message Count: The maximum number of messages to permit in a 305 | # batch. No block will contain more than this number of messages. 306 | MaxMessageCount: 500 307 | 308 | # Absolute Max Bytes: The absolute maximum number of bytes allowed for 309 | # the serialized messages in a batch. The maximum block size is this value 310 | # plus the size of the associated metadata (usually a few KB depending 311 | # upon the size of the signing identities). Any transaction larger than 312 | # this value will be rejected by ordering. If the "kafka" OrdererType is 313 | # selected, set 'message.max.bytes' and 'replica.fetch.max.bytes' on 314 | # the Kafka brokers to a value that is larger than this one. 315 | AbsoluteMaxBytes: 10 MB 316 | 317 | # Preferred Max Bytes: The preferred maximum number of bytes allowed 318 | # for the serialized messages in a batch. Roughly, this field may be considered 319 | # the best effort maximum size of a batch. A batch will fill with messages 320 | # until this size is reached (or the max message count, or batch timeout is 321 | # exceeded). If adding a new message to the batch would cause the batch to 322 | # exceed the preferred max bytes, then the current batch is closed and written 323 | # to a block, and a new batch containing the new message is created. If a 324 | # message larger than the preferred max bytes is received, then its batch 325 | # will contain only that message. Because messages may be larger than 326 | # preferred max bytes (up to AbsoluteMaxBytes), some batches may exceed 327 | # the preferred max bytes, but will always contain exactly one transaction. 328 | PreferredMaxBytes: 2 MB 329 | 330 | # Max Channels is the maximum number of channels to allow on the ordering 331 | # network. When set to 0, this implies no maximum number of channels. 332 | MaxChannels: 0 333 | 334 | Kafka: 335 | # Brokers: A list of Kafka brokers to which the orderer connects. Edit 336 | # this list to identify the brokers of the ordering service. 337 | # NOTE: Use IP:port notation. 338 | Brokers: 339 | - 127.0.0.1:9092 340 | - kafka0:9092 341 | - kafka1:9092 342 | - kafka2:9092 343 | 344 | # EtcdRaft defines configuration which must be set when the "etcdraft" 345 | # orderertype is chosen. 346 | EtcdRaft: 347 | # The set of Raft replicas for this network. For the etcd/raft-based 348 | # implementation, we expect every replica to also be an OSN. Therefore, 349 | # a subset of the host:port items enumerated in this list should be 350 | # replicated under the Orderer.Addresses key above. 351 | Consenters: 352 | - Host: raft0.example.com 353 | Port: 7050 354 | ClientTLSCert: path/to/ClientTLSCert0 355 | ServerTLSCert: path/to/ServerTLSCert0 356 | - Host: raft1.example.com 357 | Port: 7050 358 | ClientTLSCert: path/to/ClientTLSCert1 359 | ServerTLSCert: path/to/ServerTLSCert1 360 | - Host: raft2.example.com 361 | Port: 7050 362 | ClientTLSCert: path/to/ClientTLSCert2 363 | ServerTLSCert: path/to/ServerTLSCert2 364 | 365 | # Options to be specified for all the etcd/raft nodes. The values here 366 | # are the defaults for all new channels and can be modified on a 367 | # per-channel basis via configuration updates. 368 | Options: 369 | # TickInterval is the time interval between two Node.Tick invocations. 370 | TickInterval: 500ms 371 | 372 | # ElectionTick is the number of Node.Tick invocations that must pass 373 | # between elections. That is, if a follower does not receive any 374 | # message from the leader of current term before ElectionTick has 375 | # elapsed, it will become candidate and start an election. 376 | # ElectionTick must be greater than HeartbeatTick. 377 | ElectionTick: 10 378 | 379 | # HeartbeatTick is the number of Node.Tick invocations that must 380 | # pass between heartbeats. That is, a leader sends heartbeat 381 | # messages to maintain its leadership every HeartbeatTick ticks. 382 | HeartbeatTick: 1 383 | 384 | # MaxInflightBlocks limits the max number of in-flight append messages 385 | # during optimistic replication phase. 386 | MaxInflightBlocks: 5 387 | 388 | # SnapshotIntervalSize defines number of bytes per which a snapshot is taken 389 | SnapshotIntervalSize: 16 MB 390 | 391 | # Organizations lists the orgs participating on the orderer side of the 392 | # network. 393 | Organizations: 394 | 395 | # Policies defines the set of policies at this level of the config tree 396 | # For Orderer policies, their canonical path is 397 | # /Channel/Orderer/ 398 | Policies: 399 | Readers: 400 | Type: ImplicitMeta 401 | Rule: "ANY Readers" 402 | Writers: 403 | Type: ImplicitMeta 404 | Rule: "ANY Writers" 405 | Admins: 406 | Type: ImplicitMeta 407 | Rule: "MAJORITY Admins" 408 | # BlockValidation specifies what signatures must be included in the block 409 | # from the orderer for the peer to validate it. 410 | BlockValidation: 411 | Type: ImplicitMeta 412 | Rule: "ANY Writers" 413 | 414 | # Capabilities describes the orderer level capabilities, see the 415 | # dedicated Capabilities section elsewhere in this file for a full 416 | # description 417 | Capabilities: 418 | <<: *OrdererCapabilities 419 | 420 | ################################################################################ 421 | # 422 | # CHANNEL 423 | # 424 | # This section defines the values to encode into a config transaction or 425 | # genesis block for channel related parameters. 426 | # 427 | ################################################################################ 428 | Channel: &ChannelDefaults 429 | # Policies defines the set of policies at this level of the config tree 430 | # For Channel policies, their canonical path is 431 | # /Channel/ 432 | Policies: 433 | # Who may invoke the 'Deliver' API 434 | Readers: 435 | Type: ImplicitMeta 436 | Rule: "ANY Readers" 437 | # Who may invoke the 'Broadcast' API 438 | Writers: 439 | Type: ImplicitMeta 440 | Rule: "ANY Writers" 441 | # By default, who may modify elements at this config level 442 | Admins: 443 | Type: ImplicitMeta 444 | Rule: "MAJORITY Admins" 445 | 446 | 447 | # Capabilities describes the channel level capabilities, see the 448 | # dedicated Capabilities section elsewhere in this file for a full 449 | # description 450 | Capabilities: 451 | <<: *ChannelCapabilities 452 | 453 | ################################################################################ 454 | # 455 | # PROFILES 456 | # 457 | # Different configuration profiles may be encoded here to be specified as 458 | # parameters to the configtxgen tool. The profiles which specify consortiums 459 | # are to be used for generating the orderer genesis block. With the correct 460 | # consortium members defined in the orderer genesis block, channel creation 461 | # requests may be generated with only the org member names and a consortium 462 | # name. 463 | # 464 | ################################################################################ 465 | Profiles: 466 | 467 | OrgsOrdererGenesis: 468 | <<: *ChannelDefaults 469 | Orderer: 470 | 471 | <<: *OrdererDefaults 472 | 473 | Organizations: 474 | 475 | - *OrderOrg 476 | 477 | Consortiums: 478 | 479 | SampleConsortium: 480 | 481 | Organizations: 482 | 483 | - *Org1 484 | 485 | - *Org2 486 | 487 | - *Org3 488 | 489 | OrgsChannel: 490 | 491 | <<: *ChannelDefaults 492 | 493 | Consortium: SampleConsortium 494 | 495 | Application: 496 | 497 | <<: *ApplicationDefaults 498 | 499 | Organizations: 500 | 501 | - *Org1 502 | 503 | - *Org2 504 | 505 | - *Org3 506 | 507 | 508 | -------------------------------------------------------------------------------- /core.yaml: -------------------------------------------------------------------------------- 1 | # Copyright IBM Corp. All Rights Reserved. 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | # 5 | 6 | ############################################################################### 7 | # 8 | # Peer section 9 | # 10 | ############################################################################### 11 | peer: 12 | 13 | # The peer id provides a name for this peer instance and is used when 14 | # naming docker resources. 15 | id: peer0.org1.cow.com 16 | 17 | # The networkId allows for logical separation of networks and is used when 18 | # naming docker resources. 19 | networkId: dev 20 | 21 | # The Address at local network interface this Peer will listen on. 22 | # By default, it will listen on all network interfaces 23 | listenAddress: 0.0.0.0:7051 24 | 25 | # The endpoint this peer uses to listen for inbound chaincode connections. 26 | # If this is commented-out, the listen address is selected to be 27 | # the peer's address (see below) with port 7052 28 | chaincodeListenAddress: 0.0.0.0:7052 29 | 30 | # The endpoint the chaincode for this peer uses to connect to the peer. 31 | # If this is not specified, the chaincodeListenAddress address is selected. 32 | # And if chaincodeListenAddress is not specified, address is selected from 33 | # peer listenAddress. 34 | # chaincodeAddress: 0.0.0.0:7052 35 | 36 | # When used as peer config, this represents the endpoint to other peers 37 | # in the same organization. For peers in other organization, see 38 | # gossip.externalEndpoint for more info. 39 | # When used as CLI config, this means the peer's endpoint to interact with 40 | address: peer0.org1.mynetwork.com:7051 41 | 42 | # Whether the Peer should programmatically determine its address 43 | # This case is useful for docker containers. 44 | addressAutoDetect: false 45 | 46 | # Keepalive settings for peer server and clients 47 | keepalive: 48 | # Interval is the duration after which if the server does not see 49 | # any activity from the client it pings the client to see if it's alive 50 | interval: 7200s 51 | # Timeout is the duration the server waits for a response 52 | # from the client after sending a ping before closing the connection 53 | timeout: 20s 54 | # MinInterval is the minimum permitted time between client pings. 55 | # If clients send pings more frequently, the peer server will 56 | # disconnect them 57 | minInterval: 60s 58 | # Client keepalive settings for communicating with other peer nodes 59 | client: 60 | # Interval is the time between pings to peer nodes. This must 61 | # greater than or equal to the minInterval specified by peer 62 | # nodes 63 | interval: 60s 64 | # Timeout is the duration the client waits for a response from 65 | # peer nodes before closing the connection 66 | timeout: 20s 67 | # DeliveryClient keepalive settings for communication with ordering 68 | # nodes. 69 | deliveryClient: 70 | # Interval is the time between pings to ordering nodes. This must 71 | # greater than or equal to the minInterval specified by ordering 72 | # nodes. 73 | interval: 60s 74 | # Timeout is the duration the client waits for a response from 75 | # ordering nodes before closing the connection 76 | timeout: 20s 77 | 78 | 79 | # Gossip related configuration 80 | gossip: 81 | # Bootstrap set to initialize gossip with. 82 | # This is a list of other peers that this peer reaches out to at startup. 83 | # Important: The endpoints here have to be endpoints of peers in the same 84 | # organization, because the peer would refuse connecting to these endpoints 85 | # unless they are in the same organization as the peer. 86 | bootstrap: 127.0.0.1:7051 87 | 88 | # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. 89 | # Setting both to true would result in the termination of the peer 90 | # since this is undefined state. If the peers are configured with 91 | # useLeaderElection=false, make sure there is at least 1 peer in the 92 | # organization that its orgLeader is set to true. 93 | 94 | # Defines whenever peer will initialize dynamic algorithm for 95 | # "leader" selection, where leader is the peer to establish 96 | # connection with ordering service and use delivery protocol 97 | # to pull ledger blocks from ordering service. It is recommended to 98 | # use leader election for large networks of peers. 99 | useLeaderElection: true 100 | # Statically defines peer to be an organization "leader", 101 | # where this means that current peer will maintain connection 102 | # with ordering service and disseminate block across peers in 103 | # its own organization 104 | orgLeader: false 105 | 106 | # Interval for membershipTracker polling 107 | membershipTrackerInterval: 5s 108 | 109 | # Overrides the endpoint that the peer publishes to peers 110 | # in its organization. For peers in foreign organizations 111 | # see 'externalEndpoint' 112 | endpoint: 113 | # Maximum count of blocks stored in memory 114 | maxBlockCountToStore: 100 115 | # Max time between consecutive message pushes(unit: millisecond) 116 | maxPropagationBurstLatency: 10ms 117 | # Max number of messages stored until a push is triggered to remote peers 118 | maxPropagationBurstSize: 10 119 | # Number of times a message is pushed to remote peers 120 | propagateIterations: 1 121 | # Number of peers selected to push messages to 122 | propagatePeerNum: 3 123 | # Determines frequency of pull phases(unit: second) 124 | # Must be greater than digestWaitTime + responseWaitTime 125 | pullInterval: 4s 126 | # Number of peers to pull from 127 | pullPeerNum: 3 128 | # Determines frequency of pulling state info messages from peers(unit: second) 129 | requestStateInfoInterval: 4s 130 | # Determines frequency of pushing state info messages to peers(unit: second) 131 | publishStateInfoInterval: 4s 132 | # Maximum time a stateInfo message is kept until expired 133 | stateInfoRetentionInterval: 134 | # Time from startup certificates are included in Alive messages(unit: second) 135 | publishCertPeriod: 10s 136 | # Should we skip verifying block messages or not (currently not in use) 137 | skipBlockVerification: false 138 | # Dial timeout(unit: second) 139 | dialTimeout: 3s 140 | # Connection timeout(unit: second) 141 | connTimeout: 2s 142 | # Buffer size of received messages 143 | recvBuffSize: 20 144 | # Buffer size of sending messages 145 | sendBuffSize: 200 146 | # Time to wait before pull engine processes incoming digests (unit: second) 147 | # Should be slightly smaller than requestWaitTime 148 | digestWaitTime: 1s 149 | # Time to wait before pull engine removes incoming nonce (unit: milliseconds) 150 | # Should be slightly bigger than digestWaitTime 151 | requestWaitTime: 1500ms 152 | # Time to wait before pull engine ends pull (unit: second) 153 | responseWaitTime: 2s 154 | # Alive check interval(unit: second) 155 | aliveTimeInterval: 5s 156 | # Alive expiration timeout(unit: second) 157 | aliveExpirationTimeout: 25s 158 | # Reconnect interval(unit: second) 159 | reconnectInterval: 25s 160 | # This is an endpoint that is published to peers outside of the organization. 161 | # If this isn't set, the peer will not be known to other organizations. 162 | externalEndpoint: peer0.org1.mynetwork.com:7051 163 | # Leader election service configuration 164 | election: 165 | # Longest time peer waits for stable membership during leader election startup (unit: second) 166 | startupGracePeriod: 15s 167 | # Interval gossip membership samples to check its stability (unit: second) 168 | membershipSampleInterval: 1s 169 | # Time passes since last declaration message before peer decides to perform leader election (unit: second) 170 | leaderAliveThreshold: 10s 171 | # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) 172 | leaderElectionDuration: 5s 173 | 174 | pvtData: 175 | # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block 176 | # would be attempted to be pulled from peers until the block would be committed without the private data 177 | pullRetryThreshold: 60s 178 | # As private data enters the transient store, it is associated with the peer's ledger's height at that time. 179 | # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, 180 | # and the private data residing inside the transient store that is guaranteed not to be purged. 181 | # Private data is purged from the transient store when blocks with sequences that are multiples 182 | # of transientstoreMaxBlockRetention are committed. 183 | transientstoreMaxBlockRetention: 1000 184 | # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer 185 | # at private data push at endorsement time. 186 | pushAckTimeout: 3s 187 | # Block to live pulling margin, used as a buffer 188 | # to prevent peer from trying to pull private data 189 | # from peers that is soon to be purged in next N blocks. 190 | # This helps a newly joined peer catch up to current 191 | # blockchain height quicker. 192 | btlPullMargin: 10 193 | # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to 194 | # pull from the other peers the most recent missing blocks with a maximum batch size limitation. 195 | # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a 196 | # single iteration. 197 | reconcileBatchSize: 10 198 | # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning 199 | # of the next reconciliation iteration. 200 | reconcileSleepInterval: 1m 201 | # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. 202 | reconciliationEnabled: true 203 | # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid 204 | # transaction's private data from other peers need to be skipped during the commit time and pulled 205 | # only through reconciler. 206 | skipPullingInvalidTransactionsDuringCommit: false 207 | # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. 208 | # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values 209 | # for disseminating private data. 210 | # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to 211 | # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. 212 | implicitCollectionDisseminationPolicy: 213 | # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully 214 | # disseminate private data for its own implicit collection during endorsement. Default value is 0. 215 | requiredPeerCount: 0 216 | # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to 217 | # disseminate private data for its own implicit collection during endorsement. Default value is 1. 218 | maxPeerCount: 1 219 | 220 | # Gossip state transfer related configuration 221 | state: 222 | # indicates whenever state transfer is enabled or not 223 | # default value is true, i.e. state transfer is active 224 | # and takes care to sync up missing blocks allowing 225 | # lagging peer to catch up to speed with rest network 226 | enabled: true 227 | # checkInterval interval to check whether peer is lagging behind enough to 228 | # request blocks via state transfer from another peer. 229 | checkInterval: 10s 230 | # responseTimeout amount of time to wait for state transfer response from 231 | # other peers 232 | responseTimeout: 3s 233 | # batchSize the number of blocks to request via state transfer from another peer 234 | batchSize: 10 235 | # blockBufferSize reflects the size of the re-ordering buffer 236 | # which captures blocks and takes care to deliver them in order 237 | # down to the ledger layer. The actually buffer size is bounded between 238 | # 0 and 2*blockBufferSize, each channel maintains its own buffer 239 | blockBufferSize: 100 240 | # maxRetries maximum number of re-tries to ask 241 | # for single state transfer request 242 | maxRetries: 3 243 | 244 | # TLS Settings 245 | tls: 246 | # Require server-side TLS 247 | enabled: false 248 | # Require client certificates / mutual TLS. 249 | # Note that clients that are not configured to use a certificate will 250 | # fail to connect to the peer. 251 | clientAuthRequired: false 252 | # X.509 certificate used for TLS server 253 | cert: 254 | file: /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/peers/peer0.org1.cow.com/tls/server.crt 255 | # Private key used for TLS server (and client if clientAuthEnabled 256 | # is set to true 257 | key: 258 | file: /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/peers/peer0.org1.cow.com/tls/server.key 259 | # Trusted root certificate chain for tls.cert 260 | rootcert: 261 | file: /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/peers/peer0.org1.cow.com/tls/ca.crt 262 | # Set of root certificate authorities used to verify client certificates 263 | clientRootCAs: 264 | files: 265 | - /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/peers/peer0.org1.cow.com/tls/ca.crt 266 | # Private key used for TLS when making client connections. If 267 | # not set, peer.tls.key.file will be used instead 268 | clientKey: 269 | file: 270 | # X.509 certificate used for TLS when making client connections. 271 | # If not set, peer.tls.cert.file will be used instead 272 | clientCert: 273 | file: 274 | 275 | # Authentication contains configuration parameters related to authenticating 276 | # client messages 277 | authentication: 278 | # the acceptable difference between the current server time and the 279 | # client's time as specified in a client request message 280 | timewindow: 15m 281 | 282 | # Path on the file system where peer will store data (eg ledger). This 283 | # location must be access control protected to prevent unintended 284 | # modification that might corrupt the peer operations. 285 | fileSystemPath: /home/gopath/src/github.com/hyperledger/cow/org1/peer0 286 | 287 | # BCCSP (Blockchain crypto provider): Select which crypto implementation or 288 | # library to use 289 | BCCSP: 290 | Default: SW 291 | # Settings for the SW crypto provider (i.e. when DEFAULT: SW) 292 | SW: 293 | # TODO: The default Hash and Security level needs refactoring to be 294 | # fully configurable. Changing these defaults requires coordination 295 | # SHA2 is hardcoded in several places, not only BCCSP 296 | Hash: SHA2 297 | Security: 256 298 | # Location of Key Store 299 | FileKeyStore: 300 | # If "", defaults to 'mspConfigPath'/keystore 301 | KeyStore: 302 | # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) 303 | PKCS11: 304 | # Location of the PKCS11 module library 305 | Library: 306 | # Token Label 307 | Label: 308 | # User PIN 309 | Pin: 310 | Hash: 311 | Security: 312 | 313 | # Path on the file system where peer will find MSP local configurations 314 | mspConfigPath: /home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/peers/peer0.org1.cow.com/msp 315 | 316 | # Identifier of the local MSP 317 | # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- 318 | # Deployers need to change the value of the localMspId string. 319 | # In particular, the name of the local MSP ID of a peer needs 320 | # to match the name of one of the MSPs in each of the channel 321 | # that this peer is a member of. Otherwise this peer's messages 322 | # will not be identified as valid by other nodes. 323 | localMspId: Org1MSP 324 | 325 | # CLI common client config options 326 | client: 327 | # connection timeout 328 | connTimeout: 3s 329 | 330 | # Delivery service related config 331 | deliveryclient: 332 | # It sets the total time the delivery service may spend in reconnection 333 | # attempts until its retry logic gives up and returns an error 334 | reconnectTotalTimeThreshold: 3600s 335 | 336 | # It sets the delivery service <-> ordering service node connection timeout 337 | connTimeout: 3s 338 | 339 | # It sets the delivery service maximal delay between consecutive retries 340 | reConnectBackoffThreshold: 3600s 341 | 342 | # A list of orderer endpoint addresses which should be overridden 343 | # when found in channel configurations. 344 | addressOverrides: 345 | # - from: 346 | # to: 347 | # caCertsFile: 348 | # - from: 349 | # to: 350 | # caCertsFile: 351 | 352 | # Type for the local MSP - by default it's of type bccsp 353 | localMspType: bccsp 354 | 355 | # Used with Go profiling tools only in none production environment. In 356 | # production, it should be disabled (eg enabled: false) 357 | profile: 358 | enabled: false 359 | listenAddress: 0.0.0.0:6060 360 | 361 | # Handlers defines custom handlers that can filter and mutate 362 | # objects passing within the peer, such as: 363 | # Auth filter - reject or forward proposals from clients 364 | # Decorators - append or mutate the chaincode input passed to the chaincode 365 | # Endorsers - Custom signing over proposal response payload and its mutation 366 | # Valid handler definition contains: 367 | # - A name which is a factory method name defined in 368 | # core/handlers/library/library.go for statically compiled handlers 369 | # - library path to shared object binary for pluggable filters 370 | # Auth filters and decorators are chained and executed in the order that 371 | # they are defined. For example: 372 | # authFilters: 373 | # - 374 | # name: FilterOne 375 | # library: /opt/lib/filter.so 376 | # - 377 | # name: FilterTwo 378 | # decorators: 379 | # - 380 | # name: DecoratorOne 381 | # - 382 | # name: DecoratorTwo 383 | # library: /opt/lib/decorator.so 384 | # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. 385 | # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality 386 | # as the default ESCC. 387 | # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar 388 | # to auth filters and decorators. 389 | # endorsers: 390 | # escc: 391 | # name: DefaultESCC 392 | # library: /etc/hyperledger/fabric/plugin/escc.so 393 | handlers: 394 | authFilters: 395 | - 396 | name: DefaultAuth 397 | - 398 | name: ExpirationCheck # This filter checks identity x509 certificate expiration 399 | decorators: 400 | - 401 | name: DefaultDecorator 402 | endorsers: 403 | escc: 404 | name: DefaultEndorsement 405 | library: 406 | validators: 407 | vscc: 408 | name: DefaultValidation 409 | library: 410 | 411 | # library: /etc/hyperledger/fabric/plugin/escc.so 412 | # Number of goroutines that will execute transaction validation in parallel. 413 | # By default, the peer chooses the number of CPUs on the machine. Set this 414 | # variable to override that choice. 415 | # NOTE: overriding this value might negatively influence the performance of 416 | # the peer so please change this value only if you know what you're doing 417 | validatorPoolSize: 418 | 419 | # The discovery service is used by clients to query information about peers, 420 | # such as - which peers have joined a certain channel, what is the latest 421 | # channel config, and most importantly - given a chaincode and a channel, 422 | # what possible sets of peers satisfy the endorsement policy. 423 | discovery: 424 | enabled: true 425 | # Whether the authentication cache is enabled or not. 426 | authCacheEnabled: true 427 | # The maximum size of the cache, after which a purge takes place 428 | authCacheMaxSize: 1000 429 | # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation 430 | authCachePurgeRetentionRatio: 0.75 431 | # Whether to allow non-admins to perform non channel scoped queries. 432 | # When this is false, it means that only peer admins can perform non channel scoped queries. 433 | orgMembersAllowedAccess: false 434 | 435 | # Limits is used to configure some internal resource limits. 436 | limits: 437 | # Concurrency limits the number of concurrently running requests to a service on each peer. 438 | # Currently this option is only applied to endorser service and deliver service. 439 | # When the property is missing or the value is 0, the concurrency limit is disabled for the service. 440 | concurrency: 441 | # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, 442 | # including both user chaincodes and system chaincodes. 443 | endorserService: 2500 444 | # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. 445 | deliverService: 2500 446 | 447 | ############################################################################### 448 | # 449 | # VM section 450 | # 451 | ############################################################################### 452 | vm: 453 | 454 | # Endpoint of the vm management system. For docker can be one of the following in general 455 | # unix:///var/run/docker.sock 456 | # http://localhost:2375 457 | # https://localhost:2376 458 | endpoint: unix:///var/run/docker.sock 459 | 460 | # settings for docker vms 461 | docker: 462 | tls: 463 | enabled: false 464 | ca: 465 | file: docker/ca.crt 466 | cert: 467 | file: docker/tls.crt 468 | key: 469 | file: docker/tls.key 470 | 471 | # Enables/disables the standard out/err from chaincode containers for 472 | # debugging purposes 473 | attachStdout: false 474 | 475 | # Parameters on creating docker container. 476 | # Container may be efficiently created using ipam & dns-server for cluster 477 | # NetworkMode - sets the networking mode for the container. Supported 478 | # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. 479 | # Dns - a list of DNS servers for the container to use. 480 | # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of 481 | # Docker Host Config are not supported and will not be used if set. 482 | # LogConfig - sets the logging driver (Type) and related options 483 | # (Config) for Docker. For more info, 484 | # https://docs.docker.com/engine/admin/logging/overview/ 485 | # Note: Set LogConfig using Environment Variables is not supported. 486 | hostConfig: 487 | NetworkMode: host 488 | Dns: 489 | - 172.17.0.1 490 | LogConfig: 491 | Type: json-file 492 | Config: 493 | max-size: "50m" 494 | max-file: "5" 495 | Memory: 2147483648 496 | 497 | ############################################################################### 498 | # 499 | # Chaincode section 500 | # 501 | ############################################################################### 502 | chaincode: 503 | 504 | # The id is used by the Chaincode stub to register the executing Chaincode 505 | # ID with the Peer and is generally supplied through ENV variables 506 | # the `path` form of ID is provided when installing the chaincode. 507 | # The `name` is used for all other requests and can be any string. 508 | id: 509 | path: 510 | name: 511 | 512 | # Generic builder environment, suitable for most chaincode types 513 | builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) 514 | 515 | # Enables/disables force pulling of the base docker images (listed below) 516 | # during user chaincode instantiation. 517 | # Useful when using moving image tags (such as :latest) 518 | pull: false 519 | 520 | golang: 521 | # golang will never need more than baseos 522 | runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) 523 | 524 | # whether or not golang chaincode should be linked dynamically 525 | dynamicLink: false 526 | 527 | java: 528 | # This is an image based on java:openjdk-8 with addition compiler 529 | # tools added for java shim layer packaging. 530 | # This image is packed with shim layer libraries that are necessary 531 | # for Java chaincode runtime. 532 | runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) 533 | 534 | node: 535 | # This is an image based on node:$(NODE_VER)-alpine 536 | runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) 537 | 538 | # List of directories to treat as external builders and launchers for 539 | # chaincode. The external builder detection processing will iterate over the 540 | # builders in the order specified below. 541 | externalBuilders: [] 542 | # - path: /path/to/directory 543 | # name: descriptive-builder-name 544 | # environmentWhitelist: 545 | # - ENVVAR_NAME_TO_PROPAGATE_FROM_PEER 546 | # - GOPROXY 547 | 548 | # The maximum duration to wait for the chaincode build and install process 549 | # to complete. 550 | installTimeout: 300s 551 | 552 | # Timeout duration for starting up a container and waiting for Register 553 | # to come through. 554 | startuptimeout: 300s 555 | 556 | # Timeout duration for Invoke and Init calls to prevent runaway. 557 | # This timeout is used by all chaincodes in all the channels, including 558 | # system chaincodes. 559 | # Note that during Invoke, if the image is not available (e.g. being 560 | # cleaned up when in development environment), the peer will automatically 561 | # build the image, which might take more time. In production environment, 562 | # the chaincode image is unlikely to be deleted, so the timeout could be 563 | # reduced accordingly. 564 | executetimeout: 30s 565 | 566 | # There are 2 modes: "dev" and "net". 567 | # In dev mode, user runs the chaincode after starting peer from 568 | # command line on local machine. 569 | # In net mode, peer will run chaincode in a docker container. 570 | mode: net 571 | 572 | # keepalive in seconds. In situations where the communication goes through a 573 | # proxy that does not support keep-alive, this parameter will maintain connection 574 | # between peer and chaincode. 575 | # A value <= 0 turns keepalive off 576 | keepalive: 0 577 | 578 | # system chaincodes whitelist. To add system chaincode "myscc" to the 579 | # whitelist, add "myscc: enable" to the list below, and register in 580 | # chaincode/importsysccs.go 581 | system: 582 | _lifecycle: enable 583 | cscc: enable 584 | lscc: enable 585 | escc: enable 586 | vscc: enable 587 | qscc: enable 588 | 589 | # Logging section for the chaincode container 590 | logging: 591 | # Default level for all loggers within the chaincode container 592 | level: info 593 | # Override default level for the 'shim' logger 594 | shim: warning 595 | # Format for the chaincode container logs 596 | format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' 597 | 598 | ############################################################################### 599 | # 600 | # Ledger section - ledger configuration encompasses both the blockchain 601 | # and the state 602 | # 603 | ############################################################################### 604 | ledger: 605 | 606 | blockchain: 607 | 608 | state: 609 | # stateDatabase - options are "goleveldb", "CouchDB" 610 | # goleveldb - default state database stored in goleveldb. 611 | # CouchDB - store state database in CouchDB 612 | stateDatabase: goleveldb 613 | # Limit on the number of records to return per query 614 | totalQueryLimit: 100000 615 | couchDBConfig: 616 | # It is recommended to run CouchDB on the same server as the peer, and 617 | # not map the CouchDB container port to a server port in docker-compose. 618 | # Otherwise proper security must be provided on the connection between 619 | # CouchDB client (on the peer) and server. 620 | couchDBAddress: 127.0.0.1:5984 621 | # This username must have read and write authority on CouchDB 622 | username: 623 | # The password is recommended to pass as an environment variable 624 | # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). 625 | # If it is stored here, the file must be access control protected 626 | # to prevent unintended users from discovering the password. 627 | password: 628 | # Number of retries for CouchDB errors 629 | maxRetries: 3 630 | # Number of retries for CouchDB errors during peer startup 631 | maxRetriesOnStartup: 12 632 | # CouchDB request timeout (unit: duration, e.g. 20s) 633 | requestTimeout: 35s 634 | # Limit on the number of records per each CouchDB query 635 | # Note that chaincode queries are only bound by totalQueryLimit. 636 | # Internally the chaincode may execute multiple CouchDB queries, 637 | # each of size internalQueryLimit. 638 | internalQueryLimit: 1000 639 | # Limit on the number of records per CouchDB bulk update batch 640 | maxBatchUpdateSize: 1000 641 | # Warm indexes after every N blocks. 642 | # This option warms any indexes that have been 643 | # deployed to CouchDB after every N blocks. 644 | # A value of 1 will warm indexes after every block commit, 645 | # to ensure fast selector queries. 646 | # Increasing the value may improve write efficiency of peer and CouchDB, 647 | # but may degrade query response time. 648 | warmIndexesAfterNBlocks: 1 649 | # Create the _global_changes system database 650 | # This is optional. Creating the global changes database will require 651 | # additional system resources to track changes and maintain the database 652 | createGlobalChangesDB: false 653 | # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state 654 | # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple 655 | # of 32 MB, the peer would round the size to the next multiple of 32 MB. 656 | # To disable the cache, 0 MB needs to be assigned to the cacheSize. 657 | cacheSize: 64 658 | 659 | history: 660 | # enableHistoryDatabase - options are true or false 661 | # Indicates if the history of key updates should be stored. 662 | # All history 'index' will be stored in goleveldb, regardless if using 663 | # CouchDB or alternate database for the state. 664 | enableHistoryDatabase: true 665 | 666 | pvtdataStore: 667 | # the maximum db batch size for converting 668 | # the ineligible missing data entries to eligible missing data entries 669 | collElgProcMaxDbBatchSize: 5000 670 | # the minimum duration (in milliseconds) between writing 671 | # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries 672 | collElgProcDbBatchesInterval: 1000 673 | 674 | ############################################################################### 675 | # 676 | # Operations section 677 | # 678 | ############################################################################### 679 | operations: 680 | # host and port for the operations server 681 | listenAddress: 127.0.0.1:9443 682 | 683 | # TLS configuration for the operations endpoint 684 | tls: 685 | # TLS enabled 686 | enabled: false 687 | 688 | # path to PEM encoded server certificate for the operations server 689 | cert: 690 | file: 691 | 692 | # path to PEM encoded server key for the operations server 693 | key: 694 | file: 695 | 696 | # most operations service endpoints require client authentication when TLS 697 | # is enabled. clientAuthRequired requires client certificate authentication 698 | # at the TLS layer to access all resources. 699 | clientAuthRequired: false 700 | 701 | # paths to PEM encoded ca certificates to trust for client authentication 702 | clientRootCAs: 703 | files: [] 704 | 705 | ############################################################################### 706 | # 707 | # Metrics section 708 | # 709 | ############################################################################### 710 | metrics: 711 | # metrics provider is one of statsd, prometheus, or disabled 712 | provider: disabled 713 | 714 | # statsd configuration 715 | statsd: 716 | # network type: tcp or udp 717 | network: udp 718 | 719 | # statsd server address 720 | address: 127.0.0.1:8125 721 | 722 | # the interval at which locally cached counters and gauges are pushed 723 | # to statsd; timings are pushed immediately 724 | writeInterval: 10s 725 | 726 | # prefix is prepended to all emitted statsd metrics 727 | prefix: 728 | -------------------------------------------------------------------------------- /crypto-config.yaml: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------- 2 | # 3 | # "OrdererOrgs" - Definition of organizations managing orderer nodes 4 | # --------------------------------------------------------------------------- 5 | OrdererOrgs: 6 | # --------------------------------------------------------------------------- 7 | # Orderer 8 | # --------------------------------------------------------------------------- 9 | - Name: Orderer 10 | Domain: cow.com 11 | EnableNodeOUs: false 12 | 13 | # --------------------------------------------------------------------------- 14 | # "Specs" - See PeerOrgs below for complete description 15 | # --------------------------------------------------------------------------- 16 | Specs: 17 | - Hostname: orderer 18 | 19 | # --------------------------------------------------------------------------- 20 | # "PeerOrgs" - Definition of organizations managing peer nodes 21 | # --------------------------------------------------------------------------- 22 | PeerOrgs: 23 | # --------------------------------------------------------------------------- 24 | # Org1 25 | # --------------------------------------------------------------------------- 26 | - Name: Org1 27 | Domain: org1.cow.com 28 | EnableNodeOUs: false 29 | 30 | Template: 31 | Count: 3 32 | 33 | Users: 34 | Count: 4 35 | 36 | # --------------------------------------------------------------------------- 37 | # Org2: See "Org1" for full specification 38 | # --------------------------------------------------------------------------- 39 | - Name: Org2 40 | Domain: org2.cow.com 41 | EnableNodeOUs: false 42 | Template: 43 | Count: 3 44 | Users: 45 | Count: 4 46 | 47 | # --------------------------------------------------------------------------- 48 | # Org2: See "Org1" for full specification 49 | # --------------------------------------------------------------------------- 50 | - Name: Org3 51 | Domain: org3.cow.com 52 | EnableNodeOUs: false 53 | Template: 54 | Count: 3 55 | Users: 56 | Count: 4 57 | 58 | -------------------------------------------------------------------------------- /fabricservice.js: -------------------------------------------------------------------------------- 1 | var path = require('path'); 2 | var fs = require('fs'); 3 | var util = require('util'); 4 | var hfc = require('fabric-client'); 5 | var Peer = require('fabric-client/lib/Peer.js'); 6 | var EventHub = require('fabric-client/lib/ChannelEventHub.js'); 7 | var User = require('fabric-client/lib/User.js'); 8 | var crypto = require('crypto'); 9 | var FabricCAService = require('fabric-ca-client'); 10 | 11 | // 12 | var log4js = require('log4js'); 13 | var logger = log4js.getLogger(); 14 | logger.level = "debug"; 15 | 16 | var channelid = "milkgen"; 17 | 18 | var tempdir = "/home/gopath/src/github.com/hyperledger/cow/nodejs/fabric-sdk-node-master/fabric-client-kvs"; 19 | 20 | let client = new hfc(); 21 | var channel = client.newChannel(channelid); 22 | var order = client.newOrderer('grpc://127.0.0.1:7050'); 23 | channel.addOrderer(order); 24 | 25 | var peer1 = client.newPeer('grpc://127.0.0.1:7051') 26 | var peer2 = client.newPeer('grpc://127.0.0.1:8051') 27 | var peer3 = client.newPeer('grpc://127.0.0.1:9051') 28 | channel.addPeer(peer1); 29 | channel.addPeer(peer2); 30 | channel.addPeer(peer3); 31 | 32 | var queryCc = function (chaincodeid,func,chaincode_args) { 33 | return getOrgUser4Local().then((user)=>{ 34 | 35 | tx_id = client.newTransactionID(); 36 | 37 | var request = { 38 | 39 | chaincodeId: chaincodeid, 40 | fcn: func, 41 | args: chaincode_args, 42 | txId: tx_id 43 | }; 44 | console.log(request); 45 | 46 | return channel.queryByChaincode(request,peer1); 47 | 48 | },(err)=>{ 49 | console.log('error',e); 50 | 51 | }).then((sendtransresult)=>{ 52 | console.log("dasdasd"); 53 | console.log(sendtransresult); 54 | return sendtransresult; 55 | 56 | },(err)=>{ 57 | console.log('error',e); 58 | }); 59 | } 60 | 61 | var sendTransaction = function (chaincodeid,func,chaincode_args){ 62 | 63 | var tx_id = null; 64 | 65 | return getOrgUser4Local().then((user)=>{ 66 | tx_id = client.newTransactionID(); 67 | var request = { 68 | 69 | chaincodeId: chaincodeid, 70 | fcn: func, 71 | args: chaincode_args, 72 | chainId: channelid, 73 | txId: tx_id 74 | }; 75 | console.log(request); 76 | return channel.sendTransactionProposal(request); 77 | },(err)=>{ 78 | console.log('error',e); 79 | }).then((chaincodeinvokresult)=>{ 80 | var proposalResponses = chaincodeinvokresult[0]; 81 | var proposal = chaincodeinvokresult[1]; 82 | var header = chaincodeinvokresult[2]; 83 | var all_good = true; 84 | 85 | for(var i in proposalResponses) { 86 | let one_good = false; 87 | if(proposalResponses && proposalResponses[0].response&&proposalResponses[0].response.status === 200){ 88 | one_good = true; 89 | console.info('transaction proposal was good'); 90 | 91 | }else{ 92 | console.error('transaction proposal was bad'); 93 | } 94 | all_good = all_good&one_good; 95 | } 96 | if(all_good){ 97 | console.info(util.format( 98 | 'Successfully sent Proposal and received ProposalResponse: Status - %s,message - "%s",metadata - "%s",endorsement signature:%s', 99 | proposalResponses[0].response.status,proposalResponses[0].response.message, 100 | proposalResponses[0].response.payload, 101 | proposalResponses[0].endorsement.signature)); 102 | 103 | var request = { 104 | proposalResponses: proposalResponses, 105 | proposal: proposal, 106 | header: header 107 | }; 108 | 109 | var transactionID = tx_id.getTransactionID(); 110 | return channel.sendTransaction(request); 111 | } 112 | },(err)=>{ 113 | console.log('error',e); 114 | }).then((sendtransresult)=>{ 115 | return sendtransresult; 116 | },(err)=>{ 117 | console.log('error',e); 118 | }); 119 | } 120 | 121 | function getOrgUser4Local(){ 122 | 123 | var keyPath = "/home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/users/Admin@org1.cow.com/msp/keystore"; 124 | 125 | //var keyPEM = Buffer.from(readAllFiles(keyPath)[0]).toString(); 126 | //var keyPEM = "/home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/users/Admin@org1.cow.com/msp/keystore/priv_sk"; 127 | var keyPEM = "-----BEGIN PRIVATE KEY-----\nMIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgwrZTJ9uoPcU1Gvr3\nteSyPOB0Z5p6xBbnhBTsIUMmcVChRANCAARCTpqoZjAZljiiWOUdxKjbSdulu1I0\nC0Ru83vED+ZVrqnjbrvo1XM5vbf6QtIboCcJXTt1Q0uMmbB/YGtLvZbY\n-----END PRIVATE KEY-----" 128 | var certPath = "/home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/users/Admin@org1.cow.com/msp/signcerts"; 129 | //var certPEM = readAllFiles(certPath)[0].toString(); 130 | var certPEM = "/home/gopath/src/github.com/hyperledger/cow/crypto-config/peerOrganizations/org1.cow.com/users/Admin@org1.cow.com/msp/signcerts/Admin@org1.cow.com-cert.pem"; 131 | var certPEM = "-----BEGIN CERTIFICATE-----\nMIICDTCCAbSgAwIBAgIRAPzM6zCN0fPlmgf2NeMgnjYwCgYIKoZIzj0EAwIwazEL\nMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG\ncmFuY2lzY28xFTATBgNVBAoTDG9yZzEuY293LmNvbTEYMBYGA1UEAxMPY2Eub3Jn\nMS5jb3cuY29tMB4XDTIwMDUxOTEzNDEwMFoXDTMwMDUxNzEzNDEwMFowVzELMAkG\nA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBGcmFu\nY2lzY28xGzAZBgNVBAMMEkFkbWluQG9yZzEuY293LmNvbTBZMBMGByqGSM49AgEG\nCCqGSM49AwEHA0IABEJOmqhmMBmWOKJY5R3EqNtJ26W7UjQLRG7ze8QP5lWuqeNu\nu+jVczm9t/pC0hugJwldO3VDS4yZsH9ga0u9ltijTTBLMA4GA1UdDwEB/wQEAwIH\ngDAMBgNVHRMBAf8EAjAAMCsGA1UdIwQkMCKAICEjqFB5hX+pmGwqJYztB63LI9nx\nk/OPaa71jt9lZz3QMAoGCCqGSM49BAMCA0cAMEQCIAV7w7kNvemcTVQyKeYX4823\nE2BqClcweY29sWtKekS4AiAsXnpA0dfOebXLv1j6/nD6SxQzd0g77tbtBOu9x2PH\nBA==\n-----END CERTIFICATE-----" 132 | 133 | var useropt = { 134 | username: 'user87', 135 | mspid: 'Org1MSP', 136 | cryptoContent: { 137 | privateKeyPEM: keyPEM, 138 | signedCertPEM: certPEM 139 | } 140 | } 141 | 142 | return hfc.newDefaultKeyValueStore({ 143 | path:tempdir 144 | }).then((store)=>{ 145 | client.setStateStore(store); 146 | //console.log("1111111"); 147 | //client.createUser(useropt); 148 | //console.log("11111"); 149 | return client.createUser(useropt); 150 | 151 | }); 152 | }; 153 | 154 | function readAllFiles(dir) { 155 | console.info(keyPath); 156 | var files = fs.readdirSync(dir); 157 | var certs = []; 158 | files.forEach((file_name)=>{ 159 | let file_path = path.join(dir.file_name); 160 | let data = fs.readFileSync(file_path); 161 | certs.push(data); 162 | }); 163 | return certs; 164 | } 165 | 166 | exports.sendTransaction = sendTransaction; 167 | exports.queryCc = queryCc;1 168 | -------------------------------------------------------------------------------- /mainorigin.js: -------------------------------------------------------------------------------- 1 | var co = require('co'); 2 | var fabricservice = require('./fabricservice.js'); 3 | var express = require('express'); 4 | 5 | var app = express(); 6 | 7 | var cowid = "cow_001"; 8 | var machiningid = "machining_002"; 9 | var milk_bottle = "milk_bottle_002"; 10 | 11 | var cow_cc_name = "origin_dairyfarm"; 12 | var machining_cc_name = "origin_machining"; 13 | var milkbottle_cc_name = "origin_salesterminal"; 14 | 15 | //var channelid = "milkgen"; 16 | //for test 17 | app.get('/test',function(req,res){ 18 | 19 | co(function *(){ 20 | 21 | //logger.debug("daasdasdasdas"); 22 | console.info("FOR TEST"); 23 | res.send("test res!!!") 24 | }).catch((err)=>{ 25 | res.send(err); 26 | }) 27 | }); 28 | 29 | app.get('/init',function(req,res){ 30 | 31 | co(function *(){ 32 | //logger.debug("daasdasdasdas"); 33 | console.info("FOR TEST"); 34 | var diaryfarminitresult = yield fabricservice.sendTransaction(cow_cc_name,"invoke",["putvalue",cowid,"food"]); 35 | var machininginitresult = yield fabricservice.sendTransaction(cow_cc_name,"invoke",["putvalue",machiningid,cowid]); 36 | var salesterminalinitresult = yield fabricservice.sendTransaction(cow_cc_name,"invoke",["putvalue",milk_bottle,machiningid]); 37 | 38 | for(let i = 0;i{ 45 | res.send(err); 46 | }) 47 | }); 48 | 49 | //牛奶厂相关操作 50 | 51 | app.get('/dairyfarm',function(req,res){ 52 | 53 | co(function* () { 54 | var parm = req.query.parms; 55 | console.info("FOR TEST"); 56 | var chaincodequeryresult = yield fabricservice.sendTransaction(cow_cc_name,"invoke",["putvalue",cowid,parm]); 57 | //console.info(chaincodequeryresult.length); 58 | /*for (let i=0;i{ 66 | res.send(err); 67 | }) 68 | }); 69 | 70 | 71 | //加工车间相关操作 72 | 73 | app.get('/machining',function(req,res){ 74 | co(function*(){ 75 | 76 | var parm = req.query.parms; 77 | 78 | var chaincodequeryresult = yield fabricservice.sendTransaction(machining_cc_name,"invoke",["putvalue",machiningid,parm]); 79 | 80 | for(let i=0;i{ 87 | res.send(err); 88 | }) 89 | }); 90 | 91 | //销售终端相关操作 92 | 93 | app.get('/salesterminal',function(req,res){ 94 | co(function*(){ 95 | 96 | var parm = req.query.parms; 97 | 98 | var chaincodequeryresult = yield fabricservice.sendTransaction(milkbottle_cc_name,"invoke",["putvalue",milk_bottle,parm]); 99 | 100 | for (let i = 0;i{ 107 | res.send(err); 108 | }) 109 | }); 110 | 111 | 112 | //客户端查询牛奶的历史 113 | 114 | 115 | app.get('/getmilkhistory',function(req,res){ 116 | co(function*(){ 117 | 118 | var chaincodequeryresult = yield fabricservice.queryCc(milkbottle_cc_name,"invoke",["getmilkhistory",milk_bottle,"a"]); 119 | 120 | for (let i = 0;i{ 126 | res.send(err); 127 | }) 128 | }); 129 | 130 | //启动服务 131 | var server = app.listen(3000,function(){ 132 | var host = server.address().address; 133 | var port = server.address().port; 134 | 135 | 136 | console.log('cow app listening at http://%s:%s',host,port); 137 | }); 138 | 139 | 140 | //注册异常处理器 141 | 142 | process.on('unhandledRejection',function(err){ 143 | console.error(err.stack); 144 | }); 145 | 146 | process.on('uncaughtException',console.error); 147 | -------------------------------------------------------------------------------- /orderer.yaml: -------------------------------------------------------------------------------- 1 | # Copyright IBM Corp. All Rights Reserved. 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | # 5 | 6 | --- 7 | ################################################################################ 8 | # 9 | # Orderer Configuration 10 | # 11 | # - This controls the type and configuration of the orderer. 12 | # 13 | ################################################################################ 14 | General: 15 | # Listen address: The IP on which to bind to listen. 16 | ListenAddress: 127.0.0.1 17 | 18 | # Listen port: The port on which to bind to listen. 19 | ListenPort: 7050 20 | 21 | # TLS: TLS settings for the GRPC server. 22 | TLS: 23 | Enabled: false 24 | # PrivateKey governs the file location of the private key of the TLS certificate. 25 | PrivateKey: /home/gopath/src/github.com/hyperledger/cow/crypto-config/ordererOrganizations/cow.com/orderers/orderer.cow.com/tls/server.key 26 | # Certificate governs the file location of the server TLS certificate. 27 | Certificate: /home/gopath/src/github.com/hyperledger/cow/crypto-config/ordererOrganizations/cow.com/orderers/orderer.cow.com/tls/server.crt 28 | RootCAs: 29 | - /home/gopath/src/github.com/hyperledger/cow/crypto-config/ordererOrganizations/cow.com/orderers/orderer.cow.com/tls/ca.crt 30 | ClientAuthRequired: false 31 | ClientRootCAs: 32 | # Keepalive settings for the GRPC server. 33 | Keepalive: 34 | # ServerMinInterval is the minimum permitted time between client pings. 35 | # If clients send pings more frequently, the server will 36 | # disconnect them. 37 | ServerMinInterval: 60s 38 | # ServerInterval is the time between pings to clients. 39 | ServerInterval: 7200s 40 | # ServerTimeout is the duration the server waits for a response from 41 | # a client before closing the connection. 42 | ServerTimeout: 20s 43 | # Cluster settings for ordering service nodes that communicate with other ordering service nodes 44 | # such as Raft based ordering service. 45 | Cluster: 46 | # SendBufferSize is the maximum number of messages in the egress buffer. 47 | # Consensus messages are dropped if the buffer is full, and transaction 48 | # messages are waiting for space to be freed. 49 | SendBufferSize: 10 50 | # ClientCertificate governs the file location of the client TLS certificate 51 | # used to establish mutual TLS connections with other ordering service nodes. 52 | ClientCertificate: 53 | # ClientPrivateKey governs the file location of the private key of the client TLS certificate. 54 | ClientPrivateKey: 55 | # The below 4 properties should be either set together, or be unset together. 56 | # If they are set, then the orderer node uses a separate listener for intra-cluster 57 | # communication. If they are unset, then the general orderer listener is used. 58 | # This is useful if you want to use a different TLS server certificates on the 59 | # client-facing and the intra-cluster listeners. 60 | 61 | # ListenPort defines the port on which the cluster listens to connections. 62 | ListenPort: 63 | # ListenAddress defines the IP on which to listen to intra-cluster communication. 64 | ListenAddress: 65 | # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster 66 | # communication. 67 | ServerCertificate: 68 | # ServerPrivateKey defines the file location of the private key of the TLS certificate. 69 | ServerPrivateKey: 70 | 71 | # Bootstrap method: The method by which to obtain the bootstrap block 72 | # system channel is specified. The option can be one of: 73 | # "file" - path to a file containing the genesis block or config block of system channel 74 | # "none" - allows an orderer to start without a system channel configuration 75 | BootstrapMethod: file 76 | 77 | # Bootstrap file: The file containing the bootstrap block to use when 78 | # initializing the orderer system channel and BootstrapMethod is set to 79 | # "file". The bootstrap file can be the genesis block, and it can also be 80 | # a config block for late bootstrap of some consensus methods like Raft. 81 | # Generate a genesis block by updating $FABRIC_CFG_PATH/configtx.yaml and 82 | # using configtxgen command with "-outputBlock" option. 83 | # Defaults to file "genesisblock" (in $FABRIC_CFG_PATH directory) if not specified. 84 | BootstrapFile: /home/gopath/src/github.com/hyperledger/cow/order/cowgenesis.block 85 | 86 | # LocalMSPDir is where to find the private crypto material needed by the 87 | # orderer. It is set relative here as a default for dev environments but 88 | # should be changed to the real location in production. 89 | LocalMSPDir: /home/gopath/src/github.com/hyperledger/cow/crypto-config/ordererOrganizations/cow.com/orderers/orderer.cow.com/msp 90 | 91 | # LocalMSPID is the identity to register the local MSP material with the MSP 92 | # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP 93 | # ID of one of the organizations defined in the orderer system channel's 94 | # /Channel/Orderer configuration. The sample organization defined in the 95 | # sample configuration provided has an MSP ID of "SampleOrg". 96 | LocalMSPID: OrdererMSP 97 | 98 | # Enable an HTTP service for Go "pprof" profiling as documented at: 99 | # https://golang.org/pkg/net/http/pprof 100 | Profile: 101 | Enabled: false 102 | Address: 0.0.0.0:6060 103 | 104 | # BCCSP configures the blockchain crypto service providers. 105 | BCCSP: 106 | # Default specifies the preferred blockchain crypto service provider 107 | # to use. If the preferred provider is not available, the software 108 | # based provider ("SW") will be used. 109 | # Valid providers are: 110 | # - SW: a software based crypto provider 111 | # - PKCS11: a CA hardware security module crypto provider. 112 | Default: SW 113 | 114 | # SW configures the software based blockchain crypto provider. 115 | SW: 116 | # TODO: The default Hash and Security level needs refactoring to be 117 | # fully configurable. Changing these defaults requires coordination 118 | # SHA2 is hardcoded in several places, not only BCCSP 119 | Hash: SHA2 120 | Security: 256 121 | # Location of key store. If this is unset, a location will be 122 | # chosen using: 'LocalMSPDir'/keystore 123 | FileKeyStore: 124 | KeyStore: 125 | 126 | # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) 127 | PKCS11: 128 | # Location of the PKCS11 module library 129 | Library: 130 | # Token Label 131 | Label: 132 | # User PIN 133 | Pin: 134 | Hash: 135 | Security: 136 | FileKeyStore: 137 | KeyStore: 138 | 139 | # Authentication contains configuration parameters related to authenticating 140 | # client messages 141 | Authentication: 142 | # the acceptable difference between the current server time and the 143 | # client's time as specified in a client request message 144 | TimeWindow: 15m 145 | 146 | 147 | ################################################################################ 148 | # 149 | # SECTION: File Ledger 150 | # 151 | # - This section applies to the configuration of the file or json ledgers. 152 | # 153 | ################################################################################ 154 | FileLedger: 155 | 156 | # Location: The directory to store the blocks in. 157 | # NOTE: If this is unset, a new temporary location will be chosen every time 158 | # the orderer is restarted, using the prefix specified by Prefix. 159 | Location: /home/gopath/src/github.com/hyperledger/cow/orderer 160 | 161 | # The prefix to use when generating a ledger directory in temporary space. 162 | # Otherwise, this value is ignored. 163 | Prefix: hyperledger-fabric-ordererledger 164 | 165 | ################################################################################ 166 | # 167 | # SECTION: Kafka 168 | # 169 | # - This section applies to the configuration of the Kafka-based orderer, and 170 | # its interaction with the Kafka cluster. 171 | # 172 | ################################################################################ 173 | Kafka: 174 | 175 | # Retry: What do if a connection to the Kafka cluster cannot be established, 176 | # or if a metadata request to the Kafka cluster needs to be repeated. 177 | Retry: 178 | # When a new channel is created, or when an existing channel is reloaded 179 | # (in case of a just-restarted orderer), the orderer interacts with the 180 | # Kafka cluster in the following ways: 181 | # 1. It creates a Kafka producer (writer) for the Kafka partition that 182 | # corresponds to the channel. 183 | # 2. It uses that producer to post a no-op CONNECT message to that 184 | # partition 185 | # 3. It creates a Kafka consumer (reader) for that partition. 186 | # If any of these steps fail, they will be re-attempted every 187 | # for a total of , and then every 188 | # for a total of until they succeed. 189 | # Note that the orderer will be unable to write to or read from a 190 | # channel until all of the steps above have been completed successfully. 191 | ShortInterval: 5s 192 | ShortTotal: 10m 193 | LongInterval: 5m 194 | LongTotal: 12h 195 | # Affects the socket timeouts when waiting for an initial connection, a 196 | # response, or a transmission. See Config.Net for more info: 197 | # https://godoc.org/github.com/Shopify/sarama#Config 198 | NetworkTimeouts: 199 | DialTimeout: 10s 200 | ReadTimeout: 10s 201 | WriteTimeout: 10s 202 | # Affects the metadata requests when the Kafka cluster is in the middle 203 | # of a leader election.See Config.Metadata for more info: 204 | # https://godoc.org/github.com/Shopify/sarama#Config 205 | Metadata: 206 | RetryBackoff: 250ms 207 | RetryMax: 3 208 | # What to do if posting a message to the Kafka cluster fails. See 209 | # Config.Producer for more info: 210 | # https://godoc.org/github.com/Shopify/sarama#Config 211 | Producer: 212 | RetryBackoff: 100ms 213 | RetryMax: 3 214 | # What to do if reading from the Kafka cluster fails. See 215 | # Config.Consumer for more info: 216 | # https://godoc.org/github.com/Shopify/sarama#Config 217 | Consumer: 218 | RetryBackoff: 2s 219 | # Settings to use when creating Kafka topics. Only applies when 220 | # Kafka.Version is v0.10.1.0 or higher 221 | Topic: 222 | # The number of Kafka brokers across which to replicate the topic 223 | ReplicationFactor: 3 224 | # Verbose: Enable logging for interactions with the Kafka cluster. 225 | Verbose: false 226 | 227 | # TLS: TLS settings for the orderer's connection to the Kafka cluster. 228 | TLS: 229 | 230 | # Enabled: Use TLS when connecting to the Kafka cluster. 231 | Enabled: false 232 | 233 | # PrivateKey: PEM-encoded private key the orderer will use for 234 | # authentication. 235 | PrivateKey: 236 | # As an alternative to specifying the PrivateKey here, uncomment the 237 | # following "File" key and specify the file name from which to load the 238 | # value of PrivateKey. 239 | #File: path/to/PrivateKey 240 | 241 | # Certificate: PEM-encoded signed public key certificate the orderer will 242 | # use for authentication. 243 | Certificate: 244 | # As an alternative to specifying the Certificate here, uncomment the 245 | # following "File" key and specify the file name from which to load the 246 | # value of Certificate. 247 | #File: path/to/Certificate 248 | 249 | # RootCAs: PEM-encoded trusted root certificates used to validate 250 | # certificates from the Kafka cluster. 251 | RootCAs: 252 | # As an alternative to specifying the RootCAs here, uncomment the 253 | # following "File" key and specify the file name from which to load the 254 | # value of RootCAs. 255 | #File: path/to/RootCAs 256 | 257 | # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers 258 | SASLPlain: 259 | # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers 260 | Enabled: false 261 | # User: Required when Enabled is set to true 262 | User: 263 | # Password: Required when Enabled is set to true 264 | Password: 265 | 266 | # Kafka protocol version used to communicate with the Kafka cluster brokers 267 | # (defaults to 0.10.2.0 if not specified) 268 | Version: 269 | 270 | ################################################################################ 271 | # 272 | # Debug Configuration 273 | # 274 | # - This controls the debugging options for the orderer 275 | # 276 | ################################################################################ 277 | Debug: 278 | 279 | # BroadcastTraceDir when set will cause each request to the Broadcast service 280 | # for this orderer to be written to a file in this directory 281 | BroadcastTraceDir: 282 | 283 | # DeliverTraceDir when set will cause each request to the Deliver service 284 | # for this orderer to be written to a file in this directory 285 | DeliverTraceDir: 286 | 287 | ################################################################################ 288 | # 289 | # Operations Configuration 290 | # 291 | # - This configures the operations server endpoint for the orderer 292 | # 293 | ################################################################################ 294 | Operations: 295 | # host and port for the operations server 296 | ListenAddress: 127.0.0.1:8443 297 | 298 | # TLS configuration for the operations endpoint 299 | TLS: 300 | # TLS enabled 301 | Enabled: false 302 | 303 | # Certificate is the location of the PEM encoded TLS certificate 304 | Certificate: 305 | 306 | # PrivateKey points to the location of the PEM-encoded key 307 | PrivateKey: 308 | 309 | # Most operations service endpoints require client authentication when TLS 310 | # is enabled. ClientAuthRequired requires client certificate authentication 311 | # at the TLS layer to access all resources. 312 | ClientAuthRequired: false 313 | 314 | # Paths to PEM encoded ca certificates to trust for client authentication 315 | ClientRootCAs: [] 316 | 317 | ################################################################################ 318 | # 319 | # Metrics Configuration 320 | # 321 | # - This configures metrics collection for the orderer 322 | # 323 | ################################################################################ 324 | Metrics: 325 | # The metrics provider is one of statsd, prometheus, or disabled 326 | Provider: disabled 327 | 328 | # The statsd configuration 329 | Statsd: 330 | # network type: tcp or udp 331 | Network: udp 332 | 333 | # the statsd server address 334 | Address: 127.0.0.1:8125 335 | 336 | # The interval at which locally cached counters and gauges are pushed 337 | # to statsd; timings are pushed immediately 338 | WriteInterval: 30s 339 | 340 | # The prefix is prepended to all emitted statsd metrics 341 | Prefix: 342 | 343 | ################################################################################ 344 | # 345 | # Consensus Configuration 346 | # 347 | # - This section contains config options for a consensus plugin. It is opaque 348 | # to orderer, and completely up to consensus implementation to make use of. 349 | # 350 | ################################################################################ 351 | Consensus: 352 | # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, 353 | # we use following options: 354 | 355 | # WALDir specifies the location at which Write Ahead Logs for etcd/raft are 356 | # stored. Each channel will have its own subdir named after channel ID. 357 | WALDir: /var/hyperledger/production/orderer/etcdraft/wal 358 | 359 | # SnapDir specifies the location at which snapshots for etcd/raft are 360 | # stored. Each channel will have its own subdir named after channel ID. 361 | SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot 362 | -------------------------------------------------------------------------------- /origin_dairyfarm.go: -------------------------------------------------------------------------------- 1 | // origin_dairyfarm.go 2 | package main 3 | 4 | import ( 5 | "encoding/json" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/hyperledger/fabric-chaincode-go/shim" 10 | "github.com/hyperledger/fabric-protos-go/peer" 11 | ) 12 | 13 | type dairyfarm struct{} 14 | 15 | func (t *dairyfarm) Init(stub shim.ChaincodeStubInterface) peer.Response { 16 | return shim.Success([]byte("suceess invok and Not opter!!!!!!")) 17 | } 18 | func (t *dairyfarm) Invoke(stub shim.ChaincodeStubInterface) peer.Response { 19 | 20 | _, args := stub.GetFunctionAndParameters() 21 | 22 | var opttype = args[0] 23 | var assetname = args[1] 24 | var optcontont = args[2] 25 | 26 | fmt.Printf("parm is %s %s %s \n ", opttype, assetname, optcontont) 27 | 28 | if opttype == "putvalue" { //设值 29 | 30 | stub.PutState(assetname, []byte(optcontont)) 31 | return shim.Success([]byte("success put " + optcontont)) 32 | } else if opttype == "getlastvalue" { //取值 33 | var keyvalue []byte 34 | var err error 35 | keyvalue, err = stub.GetState(assetname) 36 | 37 | if err != nil { 38 | 39 | return shim.Error("find error!") 40 | } 41 | 42 | return shim.Success(keyvalue) 43 | } else if opttype == "gethistory" { 44 | keysIter, err := stub.GetHistoryForKey(assetname) 45 | 46 | if err != nil { 47 | return shim.Error(fmt.Sprintf("GetHistoryForKey failed.Error accessing state %s", err)) 48 | } 49 | defer keysIter.Close() 50 | var keys []string 51 | for keysIter.HasNext() { 52 | response, iterErr := keysIter.Next() 53 | if iterErr != nil { 54 | return shim.Error(fmt.Sprintf("GetHistoryForKey operation failed.Error accessing state %s", err)) 55 | } 56 | //交易编号 57 | txid := response.TxId 58 | //交易的值 59 | txvalue := response.Value 60 | //当前交易的状态 61 | txstatus := response.IsDelete 62 | //交易发生的时间戳 63 | txtimestamp := response.Timestamp 64 | 65 | tm := time.Unix(txtimestamp.Seconds, 0) 66 | datestr := tm.Format("2006-01-02 03:04:05 PM") 67 | 68 | fmt.Printf("Tx info - txid:%s value: %s if delete %t datetime:%s\n", txid, string(txvalue), txstatus, datestr) 69 | keys = append(keys, string(txvalue)+":"+datestr) 70 | } 71 | 72 | jsonKeys, err := json.Marshal(keys) 73 | if err != nil { 74 | return shim.Error(fmt.Sprintf("query operation failed.Error marshaling JSON :%s", err)) 75 | } 76 | 77 | return shim.Success(jsonKeys) 78 | } else { 79 | 80 | return shim.Success([]byte("success invoke and no operation !!!!!!!! ")) 81 | } 82 | } 83 | 84 | func main() { 85 | err := shim.Start(new(dairyfarm)) 86 | if err != nil { 87 | fmt.Printf("Error starting Simple chaincode:%s", err) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /origin_machining.go: -------------------------------------------------------------------------------- 1 | // origin_machining.go 2 | package main 3 | 4 | import ( 5 | "encoding/json" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/hyperledger/fabric-chaincode-go/shim" 10 | "github.com/hyperledger/fabric-protos-go/peer" 11 | ) 12 | 13 | type machining struct{} 14 | 15 | func (t *machining) Init(stub shim.ChaincodeStubInterface) peer.Response { 16 | return shim.Success([]byte("success invok and Not opter !!!!1!! ")) 17 | } 18 | 19 | func (t *machining) Invoke(stub shim.ChaincodeStubInterface) peer.Response { 20 | _, args := stub.GetFunctionAndParameters() 21 | 22 | var opttype = args[0] 23 | var assetname = args[1] 24 | var optcontont = args[2] 25 | 26 | fmt.Printf("parm is %s %s %s \n", opttype,assetname, optcontont) 27 | if opttype == "putvalue" { 28 | stub.PutState(assetname, []byte(optcontont)) 29 | return shim.Success([]byte("success put " + optcontont)) 30 | } else if opttype == "getlastvalue" { 31 | var keyvalue []byte 32 | var err error 33 | keyvalue, err = stub.GetState(assetname) 34 | 35 | if err != nil { 36 | return shim.Error("find error!") 37 | } 38 | 39 | return shim.Success(keyvalue) 40 | } else if opttype == "gethistory" { 41 | keysIter, err := stub.GetHistoryForKey(assetname) 42 | 43 | if err != nil { 44 | return shim.Error(fmt.Sprintf("GetHistoryForKey fialed.Error accessing state: %s", err)) 45 | } 46 | defer keysIter.Close() 47 | var keys []string 48 | for keysIter.HasNext() { 49 | response, iterErr := keysIter.Next() 50 | if iterErr != nil { 51 | return shim.Error(fmt.Sprintf("GetHistoryForKey operation failed.Error accessing state: %s", err)) 52 | } 53 | txid := response.TxId 54 | txvalue := response.Value 55 | txstatus := response.IsDelete 56 | txtimestamp := response.Timestamp 57 | 58 | tm := time.Unix(txtimestamp.Seconds, 0) 59 | datestr := tm.Format("2006-01-02 03:04:05 PM") 60 | 61 | fmt.Printf("Tx info - txid:%s value:%s if delete:%t datatime:%s \n", txid, string(txvalue), txstatus, datestr) 62 | keys = append(keys, string(txvalue)+":"+datestr) 63 | 64 | } 65 | jsonKeys, err := json.Marshal(keys) 66 | if err != nil { 67 | return shim.Error(fmt.Sprintf("query operation failed.Error marshaling JSON: %s", err)) 68 | } 69 | return shim.Success(jsonKeys) 70 | } else { 71 | return shim.Success([]byte("success invok and No operation !!!!!!!!")) 72 | } 73 | 74 | } 75 | 76 | func main() { 77 | err := shim.Start(new(machining)) 78 | if err != nil { 79 | fmt.Printf("Error starting Simple chaincode: %s", err) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /origin_salesterminal.go: -------------------------------------------------------------------------------- 1 | // origin_salesterminal 2 | package main 3 | 4 | import ( 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | "time" 9 | 10 | "github.com/hyperledger/fabric-chaincode-go/shim" 11 | "github.com/hyperledger/fabric-protos-go/peer" 12 | ) 13 | 14 | type salesterminal struct{} 15 | 16 | func (t *salesterminal) Init(stub shim.ChaincodeStubInterface) peer.Response { 17 | _, args := stub.GetFunctionAndParameters() 18 | 19 | var a_parm = args[0] 20 | var b_parm = args[1] 21 | var c_parm = args[2] 22 | 23 | fmt.Printf(" parm is %s %s %s \n", a_parm, b_parm, c_parm) 24 | 25 | return shim.Success([]byte("success invok and Not opter !!!!!!")) 26 | 27 | } 28 | 29 | func (t *salesterminal) Invoke(stub shim.ChaincodeStubInterface) peer.Response { 30 | _, args := stub.GetFunctionAndParameters() 31 | 32 | var opttype = args[0] 33 | var assetname = args[1] 34 | var optcontont = args[2] 35 | 36 | fmt.Printf("parm is %s %s %s \n", opttype, assetname, optcontont) 37 | 38 | if opttype == "putvalue" { 39 | stub.PutState(assetname, []byte(optcontont)) 40 | return shim.Success([]byte("success put " + optcontont)) 41 | } else if opttype == "getlastvalue" { 42 | var keyvalue []byte 43 | var err error 44 | keyvalue, err = stub.GetState(assetname) 45 | 46 | if err != nil { 47 | return shim.Error("find error!") 48 | } 49 | return shim.Success(keyvalue) 50 | } else if opttype == "gethistory" { 51 | keysIter, err := stub.GetHistoryForKey(assetname) 52 | 53 | if err != nil { 54 | return shim.Error(fmt.Sprintf("GetHistoryForKey failed, Error accessing state: %s ", err)) 55 | } 56 | defer keysIter.Close() 57 | var keys []string 58 | 59 | for keysIter.HasNext() { 60 | response, iterErr := keysIter.Next() 61 | if iterErr != nil { 62 | return shim.Error(fmt.Sprintf("GetHistoryForKey opteration failed. Error accessing state :%s", err)) 63 | } 64 | txid := response.TxId 65 | txvalue := response.Value 66 | txstatus := response.IsDelete 67 | txtimestamp := response.Timestamp 68 | 69 | tm := time.Unix(txtimestamp.Seconds, 0) 70 | datestr := tm.Format("2006-01-02 03:04:05 PM") 71 | 72 | fmt.Printf("Tx info - rxid:%s value: %s if delete: %t datetime : %s\n", txid, string(txvalue), txstatus, datestr) 73 | keys = append(keys, string(txvalue)+":"+datestr) 74 | 75 | } 76 | jsonKeys, err := json.Marshal(keys) 77 | if err != nil { 78 | return shim.Error(fmt.Sprintf("query opteration failed.Error marshaling JSON: %s", err)) 79 | } 80 | 81 | return shim.Success(jsonKeys) 82 | 83 | } else if opttype == "getmilkhistory" { 84 | keysIter, err := stub.GetHistoryForKey(assetname) 85 | 86 | if err != nil { 87 | return shim.Error(fmt.Sprintf("GetHistoryForKey failed.Error accessing state: %s", err)) 88 | } 89 | defer keysIter.Close() 90 | 91 | var keys []string 92 | var values []string 93 | 94 | for keysIter.HasNext() { 95 | response, iterErr := keysIter.Next() 96 | if iterErr != nil { 97 | return shim.Error(fmt.Sprintf("GetHistoryForKey opteration failed. Error accessing state :%s", err)) 98 | } 99 | txid := response.TxId 100 | txvalue := response.Value 101 | txstatus := response.IsDelete 102 | txtimestamp := response.Timestamp 103 | 104 | tm := time.Unix(txtimestamp.Seconds, 0) 105 | datestr := tm.Format("2006-01-02 03:04:05 PM") 106 | 107 | fmt.Printf("Tx info - rxid:%s value: %s if delete: %t datetime : %s\n", txid, string(txvalue), txstatus, datestr) 108 | 109 | keys = append(keys, string(txvalue)+":"+datestr) 110 | 111 | values = append(values, string(txvalue)) 112 | } 113 | //获取工厂编号 114 | machiningid := values[0] 115 | 116 | //调用加工厂的chaincode获取加工厂的溯源信息 117 | machining_history_parm := []string{"invoke", "gethistory", machiningid, "a"} 118 | queryArgs := make([][]byte, len(machining_history_parm)) 119 | for i, arg := range machining_history_parm { 120 | queryArgs[i] = []byte(arg) 121 | } 122 | 123 | response := stub.InvokeChaincode("origin_machining", queryArgs, "milkgen") 124 | 125 | if response.Status != shim.OK { 126 | errStr := fmt.Sprintf("Failed to query chaincode. Got error: %s", response.Payload) 127 | fmt.Printf(errStr) 128 | return shim.Error(errStr) 129 | } 130 | 131 | //获取加工的信息 132 | result := string(response.Payload) 133 | 134 | fmt.Printf("machining info - result : %s \n ", result) 135 | 136 | var machinginfos []string 137 | if err := json.Unmarshal([]byte(result), &machinginfos); err != nil { 138 | return shim.Error(fmt.Sprintf("query operation failed.Error marshaling JSON:%s", err)) 139 | } 140 | 141 | for _, v := range machinginfos { 142 | keys = append(keys, v) 143 | } 144 | 145 | milid := machinginfos[0] 146 | fmt.Printf("mil info - milid : %s \n", milid) 147 | 148 | milidarr := strings.Split(milid, ":") 149 | cowid := milidarr[0] 150 | 151 | fmt.Printf("mil info - cowid : %s \n", cowid) 152 | 153 | //通过牛奶的编号获取溯源信息 154 | cow_parms := []string{"invoke", "gethistory", cowid, "a"} 155 | queryArgs1 := make([][]byte, len(cow_parms)) 156 | for i, arg := range cow_parms { 157 | queryArgs1[i] = []byte(arg) 158 | } 159 | 160 | cow_response := stub.InvokeChaincode("origin_dairyfarm", queryArgs1, "milkgen") 161 | 162 | if cow_response.Status != shim.OK { 163 | errStr := fmt.Sprintf("Failed to query chaincode. Got error: %s", cow_response.Payload) 164 | fmt.Printf(errStr) 165 | return shim.Error(errStr) 166 | } 167 | 168 | cow_result := string(cow_response.Payload) 169 | 170 | fmt.Printf("cow info - result :%s \n", cow_result) 171 | 172 | var cowhistorys []string 173 | if err := json.Unmarshal([]byte(cow_result), &cowhistorys); err != nil { 174 | return shim.Error(fmt.Sprint("query operation failed.Error marshaling JSON:%s", err)) 175 | } 176 | 177 | for _, v1 := range cowhistorys { 178 | keys = append(keys, v1) 179 | } 180 | 181 | jsonKeys, err := json.Marshal(keys) 182 | if err != nil { 183 | return shim.Error(fmt.Sprintf("query operation failed.Error marshaling JSON:%s", err)) 184 | } 185 | 186 | return shim.Success(jsonKeys) 187 | 188 | } else { 189 | return shim.Success([]byte("success invok and No operation !!!!!!!!")) 190 | } 191 | } 192 | func main() { 193 | err := shim.Start(new(salesterminal)) 194 | if err != nil { 195 | fmt.Printf("Error starting Simple chaincode: %s", err) 196 | } 197 | } 198 | --------------------------------------------------------------------------------