├── .gitattributes ├── scripts ├── entrypoint.sh ├── ssh_config ├── local_config.cue ├── ssh_config.local ├── example_config.cue ├── kauri_config.cue ├── repeat_deploy.sh ├── docker-compose.yml ├── Dockerfile.worker ├── single_worker.sh ├── Dockerfile.controller ├── deploy_test.sh └── sweep.cue ├── metrics ├── plotting │ ├── doc.go │ ├── starttimes.go │ ├── reader.go │ ├── clientlatency.go │ ├── throughput.go │ └── throughputvslatency.go ├── ticker.go ├── doc.go ├── types │ ├── event.go │ └── types.proto ├── welford.go ├── timeouts.go ├── clientlatency.go ├── consensuslatency.go ├── throughput.go ├── registry.go └── datalogger.go ├── internal ├── config │ ├── testdata │ │ ├── valid2-no-loc-no-tree.cue │ │ ├── invalid2-tree-only.cue │ │ ├── valid2-loc-only.cue │ │ ├── valid2-loc-tree.cue │ │ ├── exp.cue │ │ ├── valid-loc-dup-entries.cue │ │ ├── valid-loc-only.cue │ │ ├── invalid-loc-size.cue │ │ ├── invalid-tree.cue │ │ ├── valid-loc-tree.cue │ │ ├── valid-loc-tree-byz.cue │ │ ├── sweep-experiments.cue │ │ └── four-experiments.cue │ ├── config_string_test.go │ ├── exp-config.cue │ ├── schema.cue │ ├── cue.go │ └── viper.go ├── proto │ ├── clientpb │ │ ├── events.go │ │ ├── messageid.go │ │ ├── batch.go │ │ └── client.proto │ ├── hotstuffpb │ │ ├── helpers.go │ │ └── hotstuff.proto │ ├── kauripb │ │ └── kauri.proto │ └── orchestrationpb │ │ └── client_opts.go ├── root │ └── dir.go ├── tree │ ├── shuffle.go │ ├── shuffle_test.go │ ├── treelatency.go │ └── treelatency_test.go ├── protostream │ └── protostream_test.go ├── test │ └── name.go ├── testutil │ ├── wiring.go │ └── mocksender_test.go ├── latency │ ├── latency.go │ └── latency_test.go └── cli │ └── worker.go ├── cmd ├── hotstuff │ └── main.go ├── latencygen │ └── main.go └── plot │ └── main.go ├── wiring ├── doc.go ├── core.go ├── client.go ├── security.go └── consensus.go ├── protocol ├── leaderrotation │ ├── common.go │ ├── leaderrotation.go │ ├── fixed.go │ ├── treeleader.go │ ├── roundrobin.go │ ├── factory.go │ ├── carousel.go │ └── reputation.go ├── consensus │ ├── doc.go │ ├── ruleset.go │ ├── committer_test.go │ └── committer.go ├── synchronizer │ ├── viewduration.go │ ├── timeoutrules.go │ ├── fixed.go │ ├── viewduration_test.go │ ├── timeout_collector.go │ ├── timeoutrule_simple.go │ └── timeoutrule_aggregate.go ├── comm │ ├── comm.go │ ├── kauri │ │ ├── service.go │ │ ├── kauri.go │ │ └── sender.go │ ├── factory.go │ └── clique.go ├── rules │ ├── byzantine │ │ ├── factory.go │ │ ├── silentproposer.go │ │ ├── increaseview.go │ │ └── fork.go │ ├── factory.go │ └── fasthotstuff.go ├── viewstates_test.go └── votingmachine │ └── votingmachine_test.go ├── Dockerfile ├── .vscode ├── extensions.json ├── cspell.json ├── settings.json ├── launch.json └── dict.txt ├── hotstuff.go ├── security └── crypto │ ├── errors.go │ ├── factory.go │ ├── base.go │ ├── multisignature.go │ ├── bitfield_test.go │ └── bitfield.go ├── quorum.go ├── genesis.go ├── .gitignore ├── .golangci.yml ├── twins ├── twinsrules.go ├── twins_test.go ├── timeoutmgr.go ├── vulnfhs.go ├── generator_test.go └── json_test.go ├── core ├── eventloop │ ├── gpool.go │ ├── context.go │ ├── context_test.go │ ├── queue.go │ └── queue_test.go ├── logging │ └── logging_test.go ├── sender.go ├── options.go ├── context.go ├── replica.go └── config.go ├── .github └── workflows │ ├── go-test.yml │ └── golangci-lint.yml ├── server └── options.go ├── network └── replica.go ├── LICENSE ├── replica └── options.go ├── quorum_test.go ├── make.ps1 ├── events.go └── block.go /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | -------------------------------------------------------------------------------- /scripts/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service ssh start 4 | exec "$@" 5 | -------------------------------------------------------------------------------- /scripts/ssh_config: -------------------------------------------------------------------------------- 1 | # ssh_config used with the controller container 2 | 3 | Host hotstuff-worker-* 4 | User root 5 | IdentityFile ~/.ssh/id 6 | -------------------------------------------------------------------------------- /scripts/local_config.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: ["localhost"] 5 | clientHosts: ["localhost"] 6 | replicas: 4 7 | clients: 1 8 | } 9 | -------------------------------------------------------------------------------- /metrics/plotting/doc.go: -------------------------------------------------------------------------------- 1 | // Package plotting provides functions and structures for plotting measurement data collected from running an experiment. 2 | package plotting 3 | -------------------------------------------------------------------------------- /internal/config/testdata/valid2-no-loc-no-tree.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: ["relab1"] 5 | clientHosts: ["relab2"] 6 | replicas: 3 7 | clients: 2 8 | } 9 | -------------------------------------------------------------------------------- /cmd/hotstuff/main.go: -------------------------------------------------------------------------------- 1 | // Hotstuff is a utility for running HotStuff clients and replicas. 2 | package main 3 | 4 | import "github.com/relab/hotstuff/internal/cli" 5 | 6 | func main() { 7 | cli.Execute() 8 | } 9 | -------------------------------------------------------------------------------- /wiring/doc.go: -------------------------------------------------------------------------------- 1 | // Package wiring provides helpers for initializing and wiring together components. 2 | // The package is useful for creating tests and seamlessly initializing runtime components. 3 | package wiring 4 | -------------------------------------------------------------------------------- /scripts/ssh_config.local: -------------------------------------------------------------------------------- 1 | # ssh_config used with single worker (on localhost) 2 | 3 | Host 127.0.0.1 localhost 4 | User root 5 | Port 2020 6 | IdentityFile ./scripts/id 7 | UserKnownHostsFile ./scripts/known_hosts 8 | -------------------------------------------------------------------------------- /internal/config/testdata/invalid2-tree-only.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: ["relab1"] 5 | clientHosts: ["relab2"] 6 | replicas: 3 7 | clients: 2 8 | 9 | treePositions: [3, 2, 1] 10 | } 11 | -------------------------------------------------------------------------------- /internal/config/testdata/valid2-loc-only.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: ["relab1"] 5 | clientHosts: ["relab2"] 6 | replicas: 3 7 | clients: 2 8 | 9 | locations: ["paris", "rome", "oslo"] 10 | } 11 | -------------------------------------------------------------------------------- /protocol/leaderrotation/common.go: -------------------------------------------------------------------------------- 1 | package leaderrotation 2 | 3 | import "github.com/relab/hotstuff" 4 | 5 | func ChooseRoundRobin(view hotstuff.View, numReplicas int) hotstuff.ID { 6 | return hotstuff.ID(view%hotstuff.View(numReplicas) + 1) 7 | } 8 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine AS builder 2 | 3 | WORKDIR /go/src/github.com/relab/hotstuff 4 | COPY . . 5 | RUN go mod download 6 | RUN go install -ldflags='-s -w' ./... 7 | 8 | FROM alpine 9 | 10 | RUN apk add iproute2 11 | 12 | COPY --from=builder /go/bin/* /usr/bin/ 13 | -------------------------------------------------------------------------------- /scripts/example_config.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: [ 5 | "hotstuff-worker-2", 6 | "hotstuff-worker-3", 7 | "hotstuff-worker-4", 8 | ] 9 | 10 | clientHosts: [ 11 | "hotstuff-worker-1", 12 | ] 13 | 14 | replicas: 8 15 | clients: 2 16 | } 17 | -------------------------------------------------------------------------------- /internal/config/testdata/valid2-loc-tree.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: ["relab1"] 5 | clientHosts: ["relab2"] 6 | replicas: 5 7 | clients: 2 8 | 9 | locations: ["paris", "rome", "oslo", "london", "berlin"] 10 | treePositions: [3, 2, 1, 4, 5] 11 | branchFactor: 2 12 | } 13 | -------------------------------------------------------------------------------- /scripts/kauri_config.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: ["localhost"] 5 | clientHosts: ["localhost"] 6 | replicas: 5 7 | clients: 2 8 | 9 | kauri: true 10 | locations: ["Paris", "Rome", "Oslo", "London", "Munich"] 11 | treePositions: [3, 2, 1, 4, 5] 12 | branchFactor: 2 13 | } 14 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "xaver.clang-format", 4 | "streetsidesoftware.code-spell-checker", 5 | "golang.go", 6 | "davidanson.vscode-markdownlint", 7 | "zxh404.vscode-proto3", 8 | "github.vscode-pull-request-github", 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /internal/proto/clientpb/events.go: -------------------------------------------------------------------------------- 1 | package clientpb 2 | 3 | // ExecuteEvent is raised when executing a batch of commands. 4 | type ExecuteEvent struct { 5 | Batch *Batch 6 | } 7 | 8 | // AbortEvent is raised when a batch of commands is invalid and is canceled. 9 | type AbortEvent struct { 10 | Batch *Batch 11 | } 12 | -------------------------------------------------------------------------------- /protocol/consensus/doc.go: -------------------------------------------------------------------------------- 1 | // Package consensus defines the components necessary to drive the consensus process. 2 | // It includes the following components: 3 | // - proposer 4 | // - voter 5 | // - committer 6 | // 7 | // Each component is separated intuitively and includes corresponding unit tests. 8 | package consensus 9 | -------------------------------------------------------------------------------- /protocol/leaderrotation/leaderrotation.go: -------------------------------------------------------------------------------- 1 | package leaderrotation 2 | 3 | import "github.com/relab/hotstuff" 4 | 5 | // LeaderRotation implements a leader rotation scheme. 6 | type LeaderRotation interface { 7 | // GetLeader returns the id of the leader in the given view. 8 | GetLeader(hotstuff.View) hotstuff.ID 9 | } 10 | -------------------------------------------------------------------------------- /internal/root/dir.go: -------------------------------------------------------------------------------- 1 | // Package root provides the root directory of the project. 2 | package root 3 | 4 | import ( 5 | "path/filepath" 6 | "runtime" 7 | ) 8 | 9 | var ( 10 | _, b, _, _ = runtime.Caller(0) 11 | 12 | // Dir is the root directory of the project. 13 | Dir = filepath.Join(filepath.Dir(b), "../..") 14 | ) 15 | -------------------------------------------------------------------------------- /internal/tree/shuffle.go: -------------------------------------------------------------------------------- 1 | package tree 2 | 3 | import ( 4 | "math/rand/v2" 5 | ) 6 | 7 | var rnd = rand.New(rand.NewPCG(rand.Uint64(), rand.Uint64())) 8 | 9 | func Shuffle(treePositions []uint32) { 10 | rnd.Shuffle(len(treePositions), func(i, j int) { 11 | treePositions[i], treePositions[j] = treePositions[j], treePositions[i] 12 | }) 13 | } 14 | -------------------------------------------------------------------------------- /hotstuff.go: -------------------------------------------------------------------------------- 1 | // Package hotstuff implements the basic types that are used by hotstuff. 2 | package hotstuff 3 | 4 | import "encoding/binary" 5 | 6 | // ID uniquely identifies a replica 7 | type ID uint32 8 | 9 | // ToBytes returns the ID as bytes. 10 | func (id ID) ToBytes() []byte { 11 | var idBytes [4]byte 12 | binary.LittleEndian.PutUint32(idBytes[:], uint32(id)) 13 | return idBytes[:] 14 | } 15 | -------------------------------------------------------------------------------- /internal/proto/clientpb/messageid.go: -------------------------------------------------------------------------------- 1 | package clientpb 2 | 3 | // MessageID is a unique identifier for a command. 4 | type MessageID struct { 5 | ClientID uint32 6 | SequenceNumber uint64 7 | } 8 | 9 | // ID returns the unique identifier for the command. 10 | func (x *Command) ID() MessageID { 11 | return MessageID{ 12 | ClientID: x.GetClientID(), 13 | SequenceNumber: x.GetSequenceNumber(), 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /internal/config/testdata/exp.cue: -------------------------------------------------------------------------------- 1 | config: { 2 | consensus: "chainedhotstuff" 3 | leaderRotation: "round-robin" 4 | crypto: "ecdsa" 5 | communication: "clique" 6 | byzantineStrategy: { 7 | "": [] 8 | } 9 | replicaHosts: ["localhost"] 10 | clientHosts: ["localhost"] 11 | replicas: 4 12 | clients: 1 13 | locations: ["Rome", "Oslo", "London", "Munich"] 14 | treePositions: [3, 2, 1, 4] 15 | branchFactor: 2 16 | } 17 | -------------------------------------------------------------------------------- /security/crypto/errors.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | var ( 8 | // ErrCombineMultiple is used when Combine is called with less than two signatures. 9 | ErrCombineMultiple = errors.New("must have at least two signatures") 10 | 11 | // ErrCombineOverlap is used when Combine is called with signatures that have overlapping participation. 12 | ErrCombineOverlap = errors.New("overlapping signatures") 13 | ) 14 | -------------------------------------------------------------------------------- /quorum.go: -------------------------------------------------------------------------------- 1 | package hotstuff 2 | 3 | import "math" 4 | 5 | // NumFaulty returns the maximum number of faulty replicas in a system with n replicas. 6 | func NumFaulty(n int) int { 7 | return (n - 1) / 3 8 | } 9 | 10 | // QuorumSize returns the minimum number of replicas that must agree on a value for it to be considered a quorum. 11 | func QuorumSize(n int) int { 12 | f := NumFaulty(n) 13 | return int(math.Ceil(float64(n+f+1) / 2.0)) 14 | } 15 | -------------------------------------------------------------------------------- /scripts/repeat_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LOG_DIR=".log/deploy-test-gorums-v0.10.0" 4 | BASE_FILE="gorums-v0.10.0-test" 5 | mkdir -p "$LOG_DIR" 6 | for i in {1..50}; do 7 | echo "Run #$i" 8 | go test -v -run TestDeployment ./internal/orchestration -timeout=4m > "$LOG_DIR/${BASE_FILE}-deployment-${i}.log" 2>&1 9 | go test -v -run TestOrchestration ./internal/orchestration -timeout=4m > "$LOG_DIR/${BASE_FILE}-orchestration-${i}.log" 2>&1 10 | done 11 | -------------------------------------------------------------------------------- /internal/config/testdata/valid-loc-dup-entries.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: [ 5 | "bbchain1", 6 | "bbchain2", 7 | "bbchain3", 8 | "bbchain4", 9 | "bbchain5", 10 | "bbchain6", 11 | ] 12 | clientHosts: [ 13 | "bbchain7", 14 | "bbchain8", 15 | ] 16 | 17 | replicas: 10 18 | clients: 2 19 | 20 | locations: ["Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Melbourne", "Toronto", "Prague", "Paris", "Tokyo"] 21 | } 22 | -------------------------------------------------------------------------------- /internal/config/testdata/valid-loc-only.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: [ 5 | "bbchain1", 6 | "bbchain2", 7 | "bbchain3", 8 | "bbchain4", 9 | "bbchain5", 10 | "bbchain6", 11 | ] 12 | clientHosts: [ 13 | "bbchain7", 14 | "bbchain8", 15 | ] 16 | 17 | replicas: 10 18 | clients: 2 19 | 20 | locations: ["Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Amsterdam", "Auckland", "Moscow", "Stockholm", "London"] 21 | } 22 | -------------------------------------------------------------------------------- /internal/config/testdata/invalid-loc-size.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: [ 5 | "bbchain1", 6 | "bbchain2", 7 | "bbchain3", 8 | "bbchain4", 9 | "bbchain5", 10 | "bbchain6", 11 | ] 12 | clientHosts: [ 13 | "bbchain7", 14 | "bbchain8", 15 | ] 16 | 17 | replicas: 10 18 | clients: 2 19 | 20 | locations: ["Oslo", "Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Amsterdam", "Auckland", "Moscow", "Stockholm", "London"] 21 | } 22 | -------------------------------------------------------------------------------- /internal/proto/hotstuffpb/helpers.go: -------------------------------------------------------------------------------- 1 | package hotstuffpb 2 | 3 | import "github.com/relab/hotstuff" 4 | 5 | // ProposerID returns the block proposer's ID. 6 | func (x *Proposal) ProposerID() hotstuff.ID { 7 | if x != nil { 8 | return hotstuff.ID(x.GetBlock().GetProposer()) 9 | } 10 | return 0 11 | } 12 | 13 | // ProposerID returns the ID of the replica who proposed the block. 14 | func (b *Block) ProposerID() hotstuff.ID { 15 | return hotstuff.ID(b.GetProposer()) 16 | } 17 | -------------------------------------------------------------------------------- /scripts/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | worker: 3 | build: 4 | context: ".." 5 | dockerfile: "scripts/Dockerfile.worker" 6 | networks: 7 | hotstuff: 8 | 9 | controller: 10 | build: 11 | context: ".." 12 | dockerfile: "scripts/Dockerfile.controller" 13 | networks: 14 | hotstuff: 15 | entrypoint: sleep infinity 16 | 17 | networks: 18 | hotstuff: 19 | ipam: 20 | config: 21 | - subnet: "192.168.1.0/24" 22 | -------------------------------------------------------------------------------- /internal/config/testdata/invalid-tree.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: [ 5 | "bbchain1", 6 | "bbchain2", 7 | "bbchain3", 8 | "bbchain4", 9 | "bbchain5", 10 | "bbchain6", 11 | ] 12 | clientHosts: [ 13 | "bbchain7", 14 | "bbchain8", 15 | ] 16 | 17 | replicas: 10 18 | clients: 2 19 | 20 | locations: ["Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Amsterdam", "Auckland", "Moscow", "Stockholm", "London"] 21 | treePositions: [11, 2, 3, 4, 5, 6, 7, 8, 9, 1] 22 | } 23 | -------------------------------------------------------------------------------- /genesis.go: -------------------------------------------------------------------------------- 1 | package hotstuff 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/relab/hotstuff/internal/proto/clientpb" 7 | ) 8 | 9 | // genesisBlock is initialized at package initialization time. 10 | var genesisBlock = func() *Block { 11 | ts := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC) 12 | b := NewBlock(Hash{}, QuorumCert{}, &clientpb.Batch{}, 0, 0) 13 | b.SetTimestamp(ts) 14 | return b 15 | }() 16 | 17 | // GetGenesis returns the genesis block. 18 | func GetGenesis() *Block { 19 | return genesisBlock 20 | } 21 | -------------------------------------------------------------------------------- /scripts/Dockerfile.worker: -------------------------------------------------------------------------------- 1 | FROM ubuntu:rolling 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y openssh-server 5 | 6 | # allow root login and pubkey authentication 7 | RUN sed -i s/#PermitRootLogin.*/PermitRootLogin\ prohibit-password/ /etc/ssh/sshd_config 8 | RUN sed -i s/#PubkeyAuthentication.*/PubkeyAuthentication\ yes/ /etc/ssh/sshd_config 9 | 10 | ADD scripts/entrypoint.sh /entrypoint.sh 11 | 12 | WORKDIR /root 13 | ADD scripts/id.pub .ssh/authorized_keys 14 | 15 | ENTRYPOINT [ "/entrypoint.sh", "sleep", "infinity"] 16 | -------------------------------------------------------------------------------- /internal/config/testdata/valid-loc-tree.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: [ 5 | "bbchain1", 6 | "bbchain2", 7 | "bbchain3", 8 | "bbchain4", 9 | "bbchain5", 10 | "bbchain6", 11 | ] 12 | clientHosts: [ 13 | "bbchain7", 14 | "bbchain8", 15 | ] 16 | 17 | replicas: 10 18 | clients: 2 19 | 20 | locations: ["Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Amsterdam", "Auckland", "Moscow", "Stockholm", "London"] 21 | treePositions: [10, 2, 3, 4, 5, 6, 7, 8, 9, 1] 22 | branchFactor: 5 23 | } 24 | -------------------------------------------------------------------------------- /internal/proto/kauripb/kauri.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package kauripb; 4 | option go_package = "github.com/relab/hotstuff/internal/proto/kauripb"; 5 | 6 | import "gorums.proto"; 7 | import "google/protobuf/empty.proto"; 8 | import "hotstuffpb/hotstuff.proto"; 9 | 10 | 11 | service Kauri { 12 | rpc SendContribution(Contribution) returns (google.protobuf.Empty) { 13 | option (gorums.unicast) = true; 14 | } 15 | } 16 | 17 | message Contribution { 18 | uint32 ID = 1; 19 | hotstuffpb.QuorumSignature Signature = 2; 20 | uint64 View = 3; 21 | } 22 | -------------------------------------------------------------------------------- /scripts/single_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | image="hotstuff-worker" 4 | 5 | if [ ! -f "./id" ]; then 6 | ssh-keygen -t ed25519 -C "hotstuff-test" -f "./id" -N "" 7 | fi 8 | 9 | # ensure that the image is built 10 | docker images | grep "$image" &>/dev/null || 11 | docker build -t "$image" -f "./Dockerfile.worker" ".." 12 | 13 | container="$(docker run --rm -d -p 2020:22 -p 4000:4000 "$image")" 14 | 15 | sleep 1s 16 | 17 | ssh-keyscan -p 2020 127.0.0.1 localhost >known_hosts 18 | 19 | docker logs --follow "$container" 20 | docker rm -f "$container" &>/dev/null 21 | -------------------------------------------------------------------------------- /scripts/Dockerfile.controller: -------------------------------------------------------------------------------- 1 | FROM golang:latest AS builder 2 | 3 | WORKDIR /go/src/github.com/relab/hotstuff 4 | 5 | # speed up the build by downloading the modules first 6 | COPY go.mod . 7 | COPY go.sum . 8 | RUN go mod download 9 | 10 | COPY . . 11 | RUN go install ./... 12 | 13 | FROM ubuntu 14 | 15 | RUN apt-get update 16 | RUN apt-get install -y openssh-client 17 | 18 | COPY --from=builder /go/bin/* /usr/bin/ 19 | 20 | WORKDIR /root 21 | ADD scripts/id .ssh/ 22 | ADD scripts/id.pub .ssh/ 23 | ADD scripts/ssh_config .ssh/config 24 | ADD scripts/example_config.cue ./example_config.cue 25 | -------------------------------------------------------------------------------- /protocol/leaderrotation/fixed.go: -------------------------------------------------------------------------------- 1 | package leaderrotation 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | ) 6 | 7 | const NameFixed = "fixed" 8 | 9 | type Fixed struct { 10 | leader hotstuff.ID 11 | } 12 | 13 | // GetLeader returns the id of the leader in the given view 14 | func (f *Fixed) GetLeader(_ hotstuff.View) hotstuff.ID { 15 | return f.leader 16 | } 17 | 18 | // NewFixed returns a new fixed-leader leader rotation implementation. 19 | func NewFixed( 20 | leader hotstuff.ID, 21 | ) *Fixed { 22 | return &Fixed{ 23 | leader: leader, 24 | } 25 | } 26 | 27 | var _ LeaderRotation = (*Fixed)(nil) 28 | -------------------------------------------------------------------------------- /internal/config/testdata/valid-loc-tree-byz.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | config: { 4 | replicaHosts: [ 5 | "bbchain1", 6 | "bbchain2", 7 | "bbchain3", 8 | "bbchain4", 9 | "bbchain5", 10 | "bbchain6", 11 | ] 12 | clientHosts: [ 13 | "bbchain7", 14 | "bbchain8", 15 | ] 16 | 17 | replicas: 10 18 | clients: 2 19 | 20 | locations: ["Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Amsterdam", "Auckland", "Moscow", "Stockholm", "London"] 21 | treePositions: [10, 2, 3, 4, 5, 6, 7, 8, 9, 1] 22 | branchFactor: 5 23 | 24 | byzantineStrategy: { 25 | silentproposer: [2, 5] 26 | fork: [4] 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool 12 | *.out 13 | 14 | # Test output 15 | *.log 16 | 17 | # delve debug binary 18 | __debug_bin 19 | 20 | # binaries 21 | /hotstuff 22 | /plot 23 | /hscov 24 | 25 | # other 26 | *.in 27 | 28 | rr/ 29 | 30 | scripts/id 31 | scripts/id.pub 32 | scripts/known_hosts 33 | 34 | measurements.json 35 | *.pdf 36 | 37 | twins.json 38 | internal/tree/benchmarkdata/* 39 | core/eventloop/benchmarkdata/* 40 | eventloop/benchmarkdata/* 41 | scratch/* 42 | -------------------------------------------------------------------------------- /security/crypto/factory.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff/core" 7 | ) 8 | 9 | func New( 10 | config *core.RuntimeConfig, 11 | name string, 12 | ) (impl Base, err error) { 13 | switch name { 14 | case "": 15 | fallthrough // default to ecdsa if no name is provided 16 | case NameECDSA: 17 | impl = NewECDSA(config) 18 | case NameEDDSA: 19 | impl = NewEDDSA(config) 20 | case NameBLS12: 21 | impl, err = NewBLS12(config) 22 | if err != nil { 23 | return nil, err 24 | } 25 | default: 26 | return nil, fmt.Errorf("invalid crypto name: '%s'", name) 27 | } 28 | return 29 | } 30 | -------------------------------------------------------------------------------- /protocol/synchronizer/viewduration.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import "time" 4 | 5 | // ViewDuration determines the duration of a view. 6 | // The view synchronizer uses this interface to set its timeouts. 7 | type ViewDuration interface { 8 | // Duration returns the duration that the next view should last. 9 | Duration() time.Duration 10 | // ViewStarted is called by the synchronizer when starting a new view. 11 | ViewStarted() 12 | // ViewSucceeded is called by the synchronizer when a view ended successfully. 13 | ViewSucceeded() 14 | // ViewTimeout is called by the synchronizer when a view timed out. 15 | ViewTimeout() 16 | } 17 | -------------------------------------------------------------------------------- /.vscode/cspell.json: -------------------------------------------------------------------------------- 1 | { 2 | // flagWords - list of words to be always considered incorrect. 3 | // This is useful for common spelling errors too short to be considered. 4 | // For example "hte" should be "the" 5 | "flagWords": [ 6 | "hte", 7 | "teh" 8 | ], 9 | "dictionaries": [ 10 | "hotstuff-words" 11 | ], 12 | "dictionaryDefinitions": [ 13 | { 14 | "name": "hotstuff-words", 15 | "path": "./dict.txt", 16 | // Some extensions use `addWords` for adding words to your personal dictionary. 17 | "addWords": true 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | enable: 4 | - gocyclo 5 | - misspell 6 | - revive 7 | settings: 8 | gocyclo: 9 | min-complexity: 15 10 | misspell: 11 | locale: US 12 | revive: 13 | rules: 14 | - name: unexported-return 15 | disabled: true 16 | - name: unused-parameter 17 | staticcheck: 18 | checks: 19 | - all 20 | exclusions: 21 | generated: lax 22 | paths: 23 | - third_party$ 24 | - builtin$ 25 | - examples$ 26 | formatters: 27 | exclusions: 28 | generated: lax 29 | paths: 30 | - third_party$ 31 | - builtin$ 32 | - examples$ 33 | -------------------------------------------------------------------------------- /internal/proto/clientpb/batch.go: -------------------------------------------------------------------------------- 1 | package clientpb 2 | 3 | import ( 4 | "fmt" 5 | 6 | "google.golang.org/protobuf/proto" 7 | ) 8 | 9 | // Marshal marshals the Batch into a byte slice using protobuf. 10 | // It panics if marshaling fails. 11 | func (b *Batch) Marshal() []byte { 12 | data, err := proto.MarshalOptions{Deterministic: true}.Marshal(b) 13 | if err != nil { 14 | panic(fmt.Sprintf("failed to marshal command batch: %v", err)) 15 | } 16 | return data 17 | } 18 | 19 | // isFull returns true if the batch contains the specified number of commands. 20 | func (b *Batch) isFull(batchSize uint32) bool { 21 | return uint32(len(b.Commands)) == batchSize 22 | } 23 | -------------------------------------------------------------------------------- /internal/proto/orchestrationpb/client_opts.go: -------------------------------------------------------------------------------- 1 | package orchestrationpb 2 | 3 | import ( 4 | "strconv" 5 | "unsafe" 6 | 7 | "github.com/relab/hotstuff/client" 8 | ) 9 | 10 | // ClientID returns the client's ID. 11 | func (x *ClientOpts) ClientID() client.ID { 12 | return client.ID(x.GetID()) 13 | } 14 | 15 | // ClientIDString returns the client's ID as a string. 16 | func (x *ClientOpts) ClientIDString() string { 17 | return strconv.Itoa(int(x.GetID())) 18 | } 19 | 20 | // ClientIDs returns the list of client IDs. 21 | func (x *StopClientRequest) ClientIDs() []client.ID { 22 | if x == nil || len(x.IDs) == 0 { 23 | return nil 24 | } 25 | return unsafe.Slice((*client.ID)(unsafe.Pointer(unsafe.SliceData(x.IDs))), len(x.IDs)) 26 | } 27 | -------------------------------------------------------------------------------- /protocol/leaderrotation/treeleader.go: -------------------------------------------------------------------------------- 1 | package leaderrotation 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core" 6 | ) 7 | 8 | const NameTree = "tree-leader" 9 | 10 | type TreeBased struct { 11 | leader hotstuff.ID 12 | config *core.RuntimeConfig 13 | } 14 | 15 | func NewTreeBased( 16 | config *core.RuntimeConfig, 17 | ) *TreeBased { 18 | return &TreeBased{ 19 | config: config, 20 | leader: 1, 21 | } 22 | } 23 | 24 | // GetLeader returns the id of the leader in the given view 25 | func (t *TreeBased) GetLeader(_ hotstuff.View) hotstuff.ID { 26 | if !t.config.HasKauriTree() { 27 | return 1 28 | } 29 | return t.config.Tree().Root() 30 | } 31 | 32 | var _ LeaderRotation = (*TreeBased)(nil) 33 | -------------------------------------------------------------------------------- /twins/twinsrules.go: -------------------------------------------------------------------------------- 1 | package twins 2 | 3 | import ( 4 | "github.com/relab/hotstuff/core" 5 | "github.com/relab/hotstuff/core/logging" 6 | "github.com/relab/hotstuff/protocol/consensus" 7 | "github.com/relab/hotstuff/protocol/rules" 8 | "github.com/relab/hotstuff/security/blockchain" 9 | ) 10 | 11 | func newTwinsConsensusRules( 12 | logger logging.Logger, 13 | config *core.RuntimeConfig, 14 | blockchain *blockchain.Blockchain, 15 | name string, 16 | ) (ruleset consensus.Ruleset, err error) { 17 | switch name { 18 | case nameVulnerableFHS: 19 | ruleset = NewVulnFHS(logger, blockchain, 20 | rules.NewFastHotStuff(logger, config, blockchain), 21 | ) 22 | default: 23 | ruleset, err = rules.New(logger, config, blockchain, name) 24 | } 25 | return 26 | } 27 | -------------------------------------------------------------------------------- /metrics/ticker.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/relab/hotstuff/core/eventloop" 7 | "github.com/relab/hotstuff/metrics/types" 8 | ) 9 | 10 | // ticker emits TickEvents on the metrics event loop. 11 | type ticker struct { 12 | tickerID int 13 | interval time.Duration 14 | lastTick time.Time 15 | } 16 | 17 | // addTicker returns a new ticker. 18 | func addTicker(eventLoop *eventloop.EventLoop, interval time.Duration) { 19 | t := &ticker{interval: interval} 20 | t.tickerID = eventLoop.AddTicker(t.interval, t.tick) 21 | } 22 | 23 | func (t *ticker) tick(tickTime time.Time) any { 24 | var event any 25 | if !t.lastTick.IsZero() { 26 | event = types.TickEvent{ 27 | LastTick: t.lastTick, 28 | } 29 | } 30 | t.lastTick = tickTime 31 | return event 32 | } 33 | -------------------------------------------------------------------------------- /protocol/leaderrotation/roundrobin.go: -------------------------------------------------------------------------------- 1 | package leaderrotation 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core" 6 | ) 7 | 8 | const NameRoundRobin = "round-robin" 9 | 10 | type RoundRobin struct { 11 | config *core.RuntimeConfig 12 | } 13 | 14 | // GetLeader returns the id of the leader in the given view 15 | func (rr RoundRobin) GetLeader(view hotstuff.View) hotstuff.ID { 16 | // TODO: does not support reconfiguration 17 | // assume IDs start at 1 18 | return ChooseRoundRobin(view, rr.config.ReplicaCount()) 19 | } 20 | 21 | // NewRoundRobin returns a new round-robin leader rotation implementation. 22 | func NewRoundRobin(config *core.RuntimeConfig) *RoundRobin { 23 | return &RoundRobin{ 24 | config: config, 25 | } 26 | } 27 | 28 | var _ LeaderRotation = (*RoundRobin)(nil) 29 | -------------------------------------------------------------------------------- /core/eventloop/gpool.go: -------------------------------------------------------------------------------- 1 | package eventloop 2 | 3 | import "sync" 4 | 5 | // pool is a generic sync.pool. 6 | type pool[T any] sync.Pool 7 | 8 | // New returns an initialized generic sync.Pool. 9 | func newPool[T any](newFunc func() T) pool[T] { 10 | if newFunc != nil { 11 | return pool[T](sync.Pool{ 12 | New: func() any { return newFunc() }, 13 | }) 14 | } 15 | return pool[T]{} 16 | } 17 | 18 | // Get retrieves a resource from the pool. 19 | // Returns the zero value of T if no resource is available and no New func is specified. 20 | func (p *pool[T]) Get() (val T) { 21 | sp := (*sync.Pool)(p) 22 | v := sp.Get() 23 | if v != nil { 24 | return v.(T) 25 | } 26 | return val 27 | } 28 | 29 | // Put puts the resource into the pool. 30 | func (p *pool[T]) Put(val T) { 31 | sp := (*sync.Pool)(p) 32 | sp.Put(val) 33 | } 34 | -------------------------------------------------------------------------------- /protocol/synchronizer/timeoutrules.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core" 6 | "github.com/relab/hotstuff/security/cert" 7 | ) 8 | 9 | // NewTimeoutRuler returns a TimeoutRuler based on the configuration. 10 | func NewTimeoutRuler(cfg *core.RuntimeConfig, auth *cert.Authority) TimeoutRuler { 11 | if cfg.HasAggregateQC() { 12 | return newAggregate(cfg, auth) 13 | } 14 | return newSimple(cfg, auth) 15 | } 16 | 17 | type TimeoutRuler interface { 18 | LocalTimeoutRule(hotstuff.View, hotstuff.SyncInfo) (*hotstuff.TimeoutMsg, error) 19 | RemoteTimeoutRule(currentView, timeoutView hotstuff.View, timeouts []hotstuff.TimeoutMsg) (hotstuff.SyncInfo, error) 20 | VerifySyncInfo(hotstuff.SyncInfo) (qc *hotstuff.QuorumCert, view hotstuff.View, timeout bool, err error) 21 | } 22 | -------------------------------------------------------------------------------- /security/crypto/base.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import "github.com/relab/hotstuff" 4 | 5 | // Base provides the basic cryptographic methods needed to create, verify, and combine signatures. 6 | type Base interface { 7 | // Sign creates a cryptographic signature of the given message. 8 | Sign(message []byte) (signature hotstuff.QuorumSignature, err error) 9 | // Combine combines multiple signatures into a single signature. 10 | Combine(signatures ...hotstuff.QuorumSignature) (signature hotstuff.QuorumSignature, err error) 11 | // Verify verifies the given quorum signature against the message. 12 | Verify(signature hotstuff.QuorumSignature, message []byte) error 13 | // BatchVerify verifies the given quorum signature against the batch of messages. 14 | BatchVerify(signature hotstuff.QuorumSignature, batch map[hotstuff.ID][]byte) error 15 | } 16 | -------------------------------------------------------------------------------- /metrics/doc.go: -------------------------------------------------------------------------------- 1 | // Package metrics enables modules that collect data or metrics from other modules. 2 | // 3 | // The preferred way to collect metrics is to send an event from the target module onto the event loop, 4 | // which can then be received by a metric module for processing before being written to a data file using the 5 | // metrics logger. 6 | // 7 | // A ticker is used to determine how often metrics should be logged. To receive tick events, you can add an observer 8 | // for a types.TickEvent on the metrics logger. When receiving the tick event, you should write the relevant data 9 | // to the MetricsLogger module as a protobuf message. 10 | // 11 | // The event loop is accessed through the EventLoop() method of the module system, 12 | // and the metrics logger is accessed through the MetricsLogger() method of the module system. 13 | package metrics 14 | -------------------------------------------------------------------------------- /.github/workflows/go-test.yml: -------------------------------------------------------------------------------- 1 | name: Go Test 2 | on: 3 | push: 4 | branches: [main] 5 | pull_request: 6 | 7 | jobs: 8 | test: 9 | strategy: 10 | matrix: 11 | platform: [ubuntu-latest, macos-latest, windows-latest] 12 | runs-on: ${{ matrix.platform }} 13 | 14 | steps: 15 | - uses: actions/checkout@v5 16 | - uses: actions/setup-go@v6 17 | with: 18 | go-version-file: "go.mod" 19 | cache: true 20 | 21 | - name: Download dependencies 22 | run: go mod download 23 | 24 | - name: Run Go tests 25 | run: | 26 | go test -v -timeout 5m ./... 27 | shell: bash 28 | env: 29 | HOTSTUFF_LOG: info 30 | 31 | - name: Run docker tests 32 | if: runner.os == 'Linux' 33 | run: | 34 | cd scripts 35 | bash deploy_test.sh 36 | -------------------------------------------------------------------------------- /protocol/comm/comm.go: -------------------------------------------------------------------------------- 1 | // Package comm provides interfaces for disseminating proposals and aggregating votes. 2 | package comm 3 | 4 | import "github.com/relab/hotstuff" 5 | 6 | // Disseminator is an interface for disseminating the proposal from the proposer. 7 | type Disseminator interface { 8 | // Disseminate disseminates the proposal from the proposer. 9 | Disseminate(proposal *hotstuff.ProposeMsg, pc hotstuff.PartialCert) error 10 | } 11 | 12 | // Aggregator is an interface for collecting votes for a given proposal. 13 | type Aggregator interface { 14 | // Aggregate sends the vote to the aggregating replica. 15 | Aggregate(proposal *hotstuff.ProposeMsg, pc hotstuff.PartialCert) error 16 | } 17 | 18 | // Communication is an interface that combines Disseminator and Aggregator for convenience. 19 | type Communication interface { 20 | Disseminator 21 | Aggregator 22 | } 23 | -------------------------------------------------------------------------------- /protocol/rules/byzantine/factory.go: -------------------------------------------------------------------------------- 1 | package byzantine 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff/core" 7 | "github.com/relab/hotstuff/protocol/consensus" 8 | "github.com/relab/hotstuff/security/blockchain" 9 | ) 10 | 11 | func Wrap( 12 | config *core.RuntimeConfig, 13 | blockchain *blockchain.Blockchain, 14 | rules consensus.Ruleset, 15 | name string, 16 | ) (byzRules consensus.Ruleset, _ error) { 17 | switch name { 18 | case "": 19 | return rules, nil // default to no byzantine strategy if no name is provided 20 | case NameSilentProposer: 21 | byzRules = NewSilentProposer(rules) 22 | case NameFork: 23 | byzRules = NewFork(config, blockchain, rules) 24 | case NameIncreaseView: 25 | byzRules = NewIncreaseView(config, rules) 26 | default: 27 | return nil, fmt.Errorf("invalid byzantine strategy: '%s'", name) 28 | } 29 | return 30 | } 31 | -------------------------------------------------------------------------------- /protocol/synchronizer/fixed.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type FixedDuration struct { 8 | duration time.Duration 9 | } 10 | 11 | // NewFixedDuration returns a ViewDuration with a fixed duration. 12 | func NewFixedDuration(duration time.Duration) *FixedDuration { 13 | return &FixedDuration{ 14 | duration: duration, 15 | } 16 | } 17 | 18 | // Duration returns the fixed duration. 19 | func (f *FixedDuration) Duration() time.Duration { 20 | return f.duration 21 | } 22 | 23 | // ViewStarted does nothing for FixedViewDuration. 24 | func (f *FixedDuration) ViewStarted() {} 25 | 26 | // ViewSucceeded does nothing for FixedViewDuration. 27 | func (f *FixedDuration) ViewSucceeded() {} 28 | 29 | // ViewTimeout does nothing for FixedViewDuration. 30 | func (f *FixedDuration) ViewTimeout() {} 31 | 32 | var _ ViewDuration = (*FixedDuration)(nil) 33 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | on: 3 | push: 4 | branches: [main] 5 | pull_request: 6 | 7 | permissions: 8 | # Required: allow read access to the content for analysis. 9 | contents: read 10 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 11 | pull-requests: read 12 | # Optional: allow write access to checks to allow the action to annotate code in the PR. 13 | checks: write 14 | 15 | jobs: 16 | golangci: 17 | name: lint 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v5 21 | - uses: actions/setup-go@v6 22 | with: 23 | go-version-file: "go.mod" 24 | cache: true 25 | 26 | - name: Run golangci-lint 27 | uses: golangci/golangci-lint-action@v8 28 | with: 29 | version: latest 30 | args: --timeout=5m 31 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "clang-format.style": "{ IndentWidth: 2, BasedOnStyle: google, AlignConsecutiveAssignments: true, ColumnLimit: 120 }", 3 | "editor.formatOnSave": true, 4 | "files.insertFinalNewline": true, 5 | "protoc": { 6 | "compile_on_save": false, 7 | "options": [ 8 | "--proto_path=${workspaceRoot}/", 9 | "--proto_path=${env.GOPATH}/pkg/mod/github.com/relab/gorums@v0.7.1-0.20220307181651-94a8af8e467c", 10 | ] 11 | }, 12 | "gopls": { 13 | "ui.semanticTokens": true 14 | }, 15 | "cSpell.enabled": true, 16 | "cSpell.ignorePaths": [ 17 | "package-lock.json", 18 | "node_modules", 19 | "vscode-extension", 20 | ".git/{info,lfs,logs,refs,objects}/**", 21 | ".git/{index,*refs,*HEAD}", 22 | ".vscode", 23 | ".vscode-insiders", 24 | "go.mod", 25 | "go.sum", 26 | "**/**/latency_matrix.go", 27 | "**/**/*.csv", 28 | "**/**/*.pb.go" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /internal/config/config_string_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestJoin(t *testing.T) { 8 | tests := []struct { 9 | name string 10 | a []any 11 | sep string 12 | want string 13 | }{ 14 | {name: "EmptySlice", a: []any{}, sep: ",", want: ""}, 15 | {name: "OneElement", a: []any{1}, sep: ",", want: "1"}, 16 | {name: "TwoElements", a: []any{1, 2}, sep: ",", want: "1,2"}, 17 | {name: "ThreeElements", a: []any{1, 2, 3}, sep: ",", want: "1,2,3"}, 18 | {name: "FourFloats", a: []any{1.1, 2.2, 3.3, 4.4}, sep: ",", want: "1.1,2.2,3.3,4.4"}, 19 | {name: "FourStrings", a: []any{"a", "b", "c", "d"}, sep: ",", want: "a,b,c,d"}, 20 | } 21 | for _, tt := range tests { 22 | t.Run(tt.name, func(t *testing.T) { 23 | if got := join(tt.a, tt.sep); got != tt.want { 24 | t.Errorf("join() = %v, want %v", got, tt.want) 25 | } 26 | }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /internal/proto/clientpb/client.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package clientpb; 4 | 5 | import "gorums.proto"; 6 | import "google/protobuf/empty.proto"; 7 | 8 | option go_package = "github.com/relab/hotstuff/internal/proto/clientpb"; 9 | 10 | // Client is the client-facing API to HotStuff 11 | service Client { 12 | // ExecCommand sends a command to all replicas and waits for valid signatures 13 | // from f+1 replicas 14 | rpc ExecCommand(Command) returns (google.protobuf.Empty) { 15 | option (gorums.quorumcall) = true; 16 | option (gorums.async) = true; 17 | } 18 | } 19 | 20 | // Command is the request that is sent to the HotStuff replicas with the data to 21 | // be executed. 22 | message Command { 23 | uint32 ClientID = 1; 24 | uint64 SequenceNumber = 2; 25 | bytes Data = 3; 26 | } 27 | 28 | // Batch is a list of commands to be executed 29 | message Batch { repeated Command Commands = 1; } 30 | -------------------------------------------------------------------------------- /protocol/rules/byzantine/silentproposer.go: -------------------------------------------------------------------------------- 1 | package byzantine 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/internal/proto/clientpb" 6 | "github.com/relab/hotstuff/protocol/consensus" 7 | ) 8 | 9 | const NameSilentProposer = "silentproposer" 10 | 11 | type SilentProposer struct { 12 | consensus.Ruleset 13 | } 14 | 15 | // NewSilentProposer returns a Byzantine replica that will never propose. 16 | // Note: A silent proposer will still participate in voting and other 17 | // protocol activities, it just won't propose new blocks. 18 | func NewSilentProposer(rules consensus.Ruleset) *SilentProposer { 19 | return &SilentProposer{Ruleset: rules} 20 | } 21 | 22 | func (s *SilentProposer) ProposeRule(_ hotstuff.View, _ hotstuff.SyncInfo, _ *clientpb.Batch) (hotstuff.ProposeMsg, bool) { 23 | return hotstuff.ProposeMsg{}, false 24 | } 25 | 26 | var _ consensus.Ruleset = (*SilentProposer)(nil) 27 | -------------------------------------------------------------------------------- /scripts/deploy_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | join() { 4 | local IFS="$1" 5 | shift 6 | echo "$*" 7 | } 8 | 9 | num_hosts=4 10 | 11 | declare -A hosts 12 | 13 | for ((i = 1; i <= num_hosts; i++)); do 14 | hosts[$i]="hotstuff-worker-$i" 15 | done 16 | 17 | if [ ! -f "./id" ]; then 18 | ssh-keygen -t ed25519 -C "hotstuff-test" -f "./id" -N "" 19 | fi 20 | 21 | compose_args="--project-name=hotstuff" 22 | 23 | docker compose $compose_args up -d --build --scale worker=4 24 | 25 | docker compose $compose_args exec -T controller /bin/sh -c "ssh-keyscan -H $(join ' ' "${hosts[@]}") >> ~/.ssh/known_hosts" &>/dev/null 26 | docker compose $compose_args exec -T controller /bin/sh -c "hotstuff run --config=./example_config.cue --ssh-config=.ssh/config" 27 | exit_code="$?" 28 | 29 | docker compose $compose_args down 30 | 31 | # Clean up dangling images 32 | docker image prune -f --filter "dangling=true" 33 | 34 | exit $exit_code 35 | -------------------------------------------------------------------------------- /protocol/comm/kauri/service.go: -------------------------------------------------------------------------------- 1 | package kauri 2 | 3 | import ( 4 | "github.com/relab/gorums" 5 | "github.com/relab/hotstuff/core/eventloop" 6 | "github.com/relab/hotstuff/internal/proto/kauripb" 7 | ) 8 | 9 | type kauriServiceImpl struct { 10 | eventLoop *eventloop.EventLoop 11 | } 12 | 13 | // RegisterService registers a service implementation for Gorums which allows sending ContributionRecvEvent. 14 | func RegisterService( 15 | eventLoop *eventloop.EventLoop, 16 | gorumsSrv *gorums.Server, 17 | ) { 18 | i := &kauriServiceImpl{eventLoop: eventLoop} 19 | kauripb.RegisterKauriServer(gorumsSrv, i) 20 | } 21 | 22 | func (i kauriServiceImpl) SendContribution(_ gorums.ServerCtx, request *kauripb.Contribution) { 23 | i.eventLoop.AddEvent(ContributionRecvEvent{Contribution: request}) 24 | } 25 | 26 | // ContributionRecvEvent is raised when a contribution is received. 27 | type ContributionRecvEvent struct { 28 | Contribution *kauripb.Contribution 29 | } 30 | -------------------------------------------------------------------------------- /server/options.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/relab/gorums" 5 | "github.com/relab/hotstuff" 6 | "github.com/relab/hotstuff/internal/latency" 7 | ) 8 | 9 | type serverOptions struct { 10 | id hotstuff.ID 11 | latencyMatrix latency.Matrix 12 | gorumsSrvOpts []gorums.ServerOption 13 | } 14 | 15 | // ServerOption is a function for configuring the Server. 16 | type ServerOption func(*serverOptions) 17 | 18 | // WithLatencies sets the locations assigned to the replicas and 19 | // constructs the corresponding latency matrix. 20 | func WithLatencies(id hotstuff.ID, locations []string) ServerOption { 21 | return func(opts *serverOptions) { 22 | opts.id = id 23 | opts.latencyMatrix = latency.MatrixFrom(locations) 24 | } 25 | } 26 | 27 | // WithGorumsServerOptions sets the gorums server options. 28 | func WithGorumsServerOptions(opts ...gorums.ServerOption) ServerOption { 29 | return func(o *serverOptions) { 30 | o.gorumsSrvOpts = append(o.gorumsSrvOpts, opts...) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /metrics/types/event.go: -------------------------------------------------------------------------------- 1 | // Package types defines various types for metrics collection. 2 | package types 3 | 4 | import ( 5 | "time" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/client" 9 | "google.golang.org/protobuf/types/known/timestamppb" 10 | ) 11 | 12 | func newEvent(client bool, id uint32, timestamp time.Time) *Event { 13 | return &Event{ 14 | ID: id, 15 | Client: client, 16 | Timestamp: timestamppb.New(timestamp), 17 | } 18 | } 19 | 20 | // NewReplicaEvent creates a new replica event. 21 | func NewReplicaEvent(id hotstuff.ID, timestamp time.Time) *Event { 22 | return newEvent(false, uint32(id), timestamp) 23 | } 24 | 25 | // NewClientEvent creates a new client event. 26 | func NewClientEvent(id client.ID, timestamp time.Time) *Event { 27 | return newEvent(true, uint32(id), timestamp) 28 | } 29 | 30 | // TickEvent is sent when new measurements should be recorded. 31 | type TickEvent struct { 32 | // The time when the previous tick happened. 33 | LastTick time.Time 34 | } 35 | -------------------------------------------------------------------------------- /protocol/rules/factory.go: -------------------------------------------------------------------------------- 1 | // Package rules contains consensus rulesets for various HotStuff-based protocols. 2 | package rules 3 | 4 | import ( 5 | "fmt" 6 | 7 | "github.com/relab/hotstuff/core" 8 | "github.com/relab/hotstuff/core/logging" 9 | "github.com/relab/hotstuff/protocol/consensus" 10 | "github.com/relab/hotstuff/security/blockchain" 11 | ) 12 | 13 | func New( 14 | logger logging.Logger, 15 | config *core.RuntimeConfig, 16 | blockchain *blockchain.Blockchain, 17 | name string, 18 | ) (ruleset consensus.Ruleset, _ error) { 19 | switch name { 20 | case "": 21 | fallthrough // default to chainedhotstuff if no name is provided 22 | case NameChainedHotStuff: 23 | ruleset = NewChainedHotStuff(logger, config, blockchain) 24 | case NameFastHotStuff: 25 | ruleset = NewFastHotStuff(logger, config, blockchain) 26 | case NameSimpleHotStuff: 27 | ruleset = NewSimpleHotStuff(logger, config, blockchain) 28 | default: 29 | return nil, fmt.Errorf("invalid consensus name: '%s'", name) 30 | } 31 | return 32 | } 33 | -------------------------------------------------------------------------------- /metrics/welford.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import "math" 4 | 5 | // Welford is an implementation of Welford's online algorithm for calculating variance. 6 | type Welford struct { 7 | mean float64 8 | m2 float64 9 | count uint64 10 | } 11 | 12 | // Update adds the value to the current estimate. 13 | func (w *Welford) Update(val float64) { 14 | w.count++ 15 | delta := val - w.mean 16 | w.mean += delta / float64(w.count) 17 | delta2 := val - w.mean 18 | w.m2 += delta * delta2 19 | } 20 | 21 | // Get returns the current mean and sample variance estimate. 22 | func (w *Welford) Get() (mean, variance float64, count uint64) { 23 | if w.count < 2 { 24 | return w.mean, math.NaN(), w.count 25 | } 26 | return w.mean, w.m2 / (float64(w.count - 1)), w.count 27 | } 28 | 29 | // Count returns the total number of values that have been added to the variance estimate. 30 | func (w *Welford) Count() uint64 { 31 | return w.count 32 | } 33 | 34 | // Reset resets all values to 0. 35 | func (w *Welford) Reset() { 36 | w.mean = 0 37 | w.m2 = 0 38 | w.count = 0 39 | } 40 | -------------------------------------------------------------------------------- /core/logging/logging_test.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func BenchmarkInnerLogger(b *testing.B) { 8 | SetLogLevel("error") 9 | logger := New("test").(*wrapper).inner 10 | 11 | for b.Loop() { 12 | logger.Info("test") 13 | } 14 | } 15 | 16 | func BenchmarkWrappedLoggerNoPackages(b *testing.B) { 17 | SetLogLevel("error") 18 | logger := New("test") 19 | 20 | for b.Loop() { 21 | logger.Info("test") 22 | } 23 | } 24 | 25 | func BenchmarkWrappedLoggerWithPackage(b *testing.B) { 26 | SetLogLevel("error") 27 | SetPackageLogLevel("foo", "error") 28 | logger := New("test") 29 | 30 | for b.Loop() { 31 | logger.Info("test") 32 | } 33 | } 34 | 35 | func BenchmarkWrappedLoggerWithMultiplePackages(b *testing.B) { 36 | SetLogLevel("error") 37 | SetPackageLogLevel("foo", "error") 38 | SetPackageLogLevel("bar", "error") 39 | SetPackageLogLevel("baz", "error") 40 | SetPackageLogLevel("qux", "error") 41 | SetPackageLogLevel("quux", "error") 42 | logger := New("test") 43 | 44 | for b.Loop() { 45 | logger.Info("test") 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /protocol/comm/kauri/kauri.go: -------------------------------------------------------------------------------- 1 | // Package kauri contains the utilities for the Kauri protocol 2 | package kauri 3 | 4 | import ( 5 | "errors" 6 | "slices" 7 | 8 | "github.com/relab/hotstuff" 9 | ) 10 | 11 | // CanMergeContributions returns nil if the contributions are non-overlapping. 12 | func CanMergeContributions(a, b hotstuff.QuorumSignature) error { 13 | if a == nil || b == nil { 14 | return errors.New("cannot merge nil contributions") 15 | } 16 | canMerge := true 17 | a.Participants().RangeWhile(func(i hotstuff.ID) bool { 18 | // cannot merge a and b if both contain a contribution from the same ID. 19 | canMerge = !b.Participants().Contains(i) 20 | return canMerge // exit the range-while loop if canMerge is false 21 | }) 22 | if !canMerge { 23 | return errors.New("cannot merge overlapping contributions") 24 | } 25 | return nil 26 | } 27 | 28 | // IsSubSet returns true if all elements in a are contained in b. 29 | func IsSubSet(a, b []hotstuff.ID) bool { 30 | for _, id := range a { 31 | if !slices.Contains(b, id) { 32 | return false 33 | } 34 | } 35 | return true 36 | } 37 | -------------------------------------------------------------------------------- /internal/protostream/protostream_test.go: -------------------------------------------------------------------------------- 1 | package protostream_test 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/relab/hotstuff" 8 | 9 | "github.com/relab/hotstuff/internal/proto/hotstuffpb" 10 | "github.com/relab/hotstuff/internal/protostream" 11 | ) 12 | 13 | func TestProtostream(t *testing.T) { 14 | var buf bytes.Buffer // in-memory stream 15 | msg := hotstuffpb.BlockToProto(hotstuff.GetGenesis()) // test message 16 | 17 | writer := protostream.NewWriter(&buf) 18 | reader := protostream.NewReader(&buf) 19 | 20 | err := writer.WriteAny(msg) 21 | if err != nil { 22 | t.Fatalf("WriteAny failed: %v", err) 23 | } 24 | 25 | gotMsg, err := reader.ReadAny() 26 | if err != nil { 27 | t.Fatalf("ReadAny failed: %v", err) 28 | } 29 | 30 | got, ok := gotMsg.(*hotstuffpb.Block) 31 | if !ok { 32 | t.Fatalf("wrong message type returned: got: %T, want: %T", got, msg) 33 | } 34 | 35 | gotBlock := hotstuffpb.BlockFromProto(got) 36 | if gotBlock.Hash() != hotstuff.GetGenesis().Hash() { 37 | t.Fatalf("message hash did not match") 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /metrics/types/types.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package types; 4 | 5 | option go_package = "github.com/relab/hotstuff/metrics/types"; 6 | 7 | import "google/protobuf/timestamp.proto"; 8 | import "google/protobuf/duration.proto"; 9 | 10 | message StartEvent { Event Event = 1; } 11 | 12 | // Event is the basic type that is recorded by hotstuff. 13 | // It contains the ID of the replica/client, the type (replica/client), 14 | // the timestamp of the event, and the data. 15 | message Event { 16 | uint32 ID = 1; 17 | bool Client = 2; 18 | google.protobuf.Timestamp Timestamp = 3; 19 | } 20 | 21 | message ThroughputMeasurement { 22 | Event Event = 1; 23 | uint64 Commits = 2; 24 | uint64 Commands = 3; 25 | google.protobuf.Duration Duration = 4; 26 | } 27 | 28 | message LatencyMeasurement { 29 | Event Event = 1; 30 | double Latency = 2; 31 | double Variance = 3; 32 | uint64 Count = 4; 33 | } 34 | 35 | message ViewTimeouts { 36 | Event Event = 1; 37 | // Number of views since last reading. 38 | uint64 Views = 2; 39 | // Number of view timeouts. 40 | uint64 Timeouts = 3; 41 | } 42 | -------------------------------------------------------------------------------- /network/replica.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core/eventloop" 6 | "github.com/relab/hotstuff/internal/proto/hotstuffpb" 7 | ) 8 | 9 | // replicaNode provides methods used by hotstuff to send messages to replicas. 10 | type replicaNode struct { 11 | eventLoop *eventloop.EventLoop 12 | node *hotstuffpb.Node 13 | id hotstuff.ID 14 | pubKey hotstuff.PublicKey 15 | md map[string]string 16 | } 17 | 18 | // vote sends the partial certificate to the other replica. 19 | func (r *replicaNode) vote(cert hotstuff.PartialCert) { 20 | if r.node == nil { 21 | return 22 | } 23 | ctx, cancel := r.eventLoop.TimeoutContext() 24 | defer cancel() 25 | pCert := hotstuffpb.PartialCertToProto(cert) 26 | r.node.Vote(ctx, pCert) 27 | } 28 | 29 | // newView sends the quorum certificate to the other replica. 30 | func (r *replicaNode) newView(msg hotstuff.SyncInfo) { 31 | if r.node == nil { 32 | return 33 | } 34 | ctx, cancel := r.eventLoop.TimeoutContext() 35 | defer cancel() 36 | r.node.NewView(ctx, hotstuffpb.SyncInfoToProto(msg)) 37 | } 38 | -------------------------------------------------------------------------------- /protocol/leaderrotation/factory.go: -------------------------------------------------------------------------------- 1 | package leaderrotation 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core" 8 | "github.com/relab/hotstuff/core/logging" 9 | "github.com/relab/hotstuff/protocol" 10 | "github.com/relab/hotstuff/security/blockchain" 11 | ) 12 | 13 | func New( 14 | logger logging.Logger, 15 | config *core.RuntimeConfig, 16 | blockchain *blockchain.Blockchain, 17 | viewStates *protocol.ViewStates, 18 | name string, 19 | chainLength int, 20 | ) (ld LeaderRotation, _ error) { 21 | switch name { 22 | case "": 23 | fallthrough // default to round-robin if no name is provided 24 | case NameRoundRobin: 25 | ld = NewRoundRobin(config) 26 | case NameFixed: 27 | ld = NewFixed(hotstuff.ID(1)) 28 | case NameTree: 29 | ld = NewTreeBased(config) 30 | case NameCarousel: 31 | ld = NewCarousel(chainLength, blockchain, viewStates, config, logger) 32 | case NameReputation: 33 | ld = NewRepBased(chainLength, viewStates, config, logger) 34 | default: 35 | return nil, fmt.Errorf("invalid leader-rotation algorithm: '%s'", name) 36 | } 37 | return 38 | } 39 | -------------------------------------------------------------------------------- /wiring/core.go: -------------------------------------------------------------------------------- 1 | package wiring 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core" 8 | "github.com/relab/hotstuff/core/eventloop" 9 | "github.com/relab/hotstuff/core/logging" 10 | ) 11 | 12 | type Core struct { 13 | eventLoop *eventloop.EventLoop 14 | logger logging.Logger 15 | config *core.RuntimeConfig 16 | } 17 | 18 | func NewCore( 19 | id hotstuff.ID, 20 | logTag string, 21 | privKey hotstuff.PrivateKey, 22 | opts ...core.RuntimeOption, 23 | ) *Core { 24 | logger := logging.New(fmt.Sprintf("%s%d", logTag, id)) 25 | return &Core{ 26 | config: core.NewRuntimeConfig(id, privKey, opts...), 27 | eventLoop: eventloop.New(logger, 100), 28 | logger: logger, 29 | } 30 | } 31 | 32 | // EventLoop returns the eventloop instance. 33 | func (c *Core) EventLoop() *eventloop.EventLoop { 34 | return c.eventLoop 35 | } 36 | 37 | // Logger returns the logger instance. 38 | func (c *Core) Logger() logging.Logger { 39 | return c.logger 40 | } 41 | 42 | // RuntimeCfg returns the runtime configuration. 43 | func (c *Core) RuntimeCfg() *core.RuntimeConfig { 44 | return c.config 45 | } 46 | -------------------------------------------------------------------------------- /protocol/rules/byzantine/increaseview.go: -------------------------------------------------------------------------------- 1 | package byzantine 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core" 6 | "github.com/relab/hotstuff/internal/proto/clientpb" 7 | "github.com/relab/hotstuff/protocol/consensus" 8 | ) 9 | 10 | const NameIncreaseView = "increaseview" 11 | 12 | type IncreaseView struct { 13 | config *core.RuntimeConfig 14 | consensus.Ruleset 15 | } 16 | 17 | // NewIncreaseView returns a replica that proposes with an inflated view number. 18 | func NewIncreaseView( 19 | config *core.RuntimeConfig, 20 | rules consensus.Ruleset, 21 | ) *IncreaseView { 22 | return &IncreaseView{ 23 | config: config, 24 | Ruleset: rules, 25 | } 26 | } 27 | 28 | func (iv *IncreaseView) ProposeRule(view hotstuff.View, cert hotstuff.SyncInfo, cmd *clientpb.Batch) (proposal hotstuff.ProposeMsg, ok bool) { 29 | qc, ok := cert.QC() 30 | if !ok { 31 | return proposal, false 32 | } 33 | const ByzViewExtraIncrease hotstuff.View = 1000 34 | proposal = hotstuff.NewProposeMsg(iv.config.ID(), view+ByzViewExtraIncrease, qc, cmd) 35 | return proposal, true 36 | } 37 | 38 | var _ consensus.Ruleset = (*IncreaseView)(nil) 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 John Ingve Olsen and Hans Erik Frøyland 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /wiring/client.go: -------------------------------------------------------------------------------- 1 | package wiring 2 | 3 | import ( 4 | "github.com/relab/gorums" 5 | "github.com/relab/hotstuff/core/eventloop" 6 | "github.com/relab/hotstuff/core/logging" 7 | "github.com/relab/hotstuff/internal/proto/clientpb" 8 | "github.com/relab/hotstuff/server" 9 | ) 10 | 11 | type Client struct { 12 | cmdCache *clientpb.CommandCache 13 | clientIO *server.ClientIO 14 | } 15 | 16 | // NewClient returns a set of dependencies for serving clients through 17 | func NewClient( 18 | eventLoop *eventloop.EventLoop, 19 | logger logging.Logger, 20 | commandBatchSize uint32, 21 | clientSrvOpts ...gorums.ServerOption, 22 | ) *Client { 23 | cmdCache := clientpb.NewCommandCache( 24 | commandBatchSize, 25 | ) 26 | clientSrv := server.NewClientIO( 27 | eventLoop, 28 | logger, 29 | cmdCache, 30 | clientSrvOpts..., 31 | ) 32 | return &Client{ 33 | cmdCache: cmdCache, 34 | clientIO: clientSrv, 35 | } 36 | } 37 | 38 | // Cache returns the command cache. 39 | func (s *Client) Cache() *clientpb.CommandCache { 40 | return s.cmdCache 41 | } 42 | 43 | // Server returns the client server. 44 | func (s *Client) Server() *server.ClientIO { 45 | return s.clientIO 46 | } 47 | -------------------------------------------------------------------------------- /twins/twins_test.go: -------------------------------------------------------------------------------- 1 | package twins_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/relab/hotstuff/core/logging" 8 | "github.com/relab/hotstuff/protocol/rules" 9 | "github.com/relab/hotstuff/twins" 10 | ) 11 | 12 | func TestTwins(t *testing.T) { 13 | const ( 14 | numNodes = 4 15 | numTwins = 1 16 | ) 17 | 18 | g := twins.NewGenerator(logging.New(""), twins.Settings{ 19 | NumNodes: numNodes, 20 | NumTwins: numTwins, 21 | Partitions: 2, 22 | Views: 8, 23 | }) 24 | seed := time.Now().Unix() 25 | g.Shuffle(seed) 26 | 27 | scenarioCount := 10 28 | totalCommits := 0 29 | 30 | for range scenarioCount { 31 | s, err := g.NextScenario() 32 | if err != nil { 33 | break 34 | } 35 | result, err := twins.ExecuteScenario(s, numNodes, numTwins, 100, rules.NameChainedHotStuff) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | t.Log(result.Safe, result.Commits) 40 | t.Log(s) 41 | if !result.Safe { 42 | t.Logf("Scenario not safe: %v", s) 43 | continue 44 | } 45 | if result.Commits > 0 { 46 | totalCommits += result.Commits 47 | } 48 | } 49 | 50 | t.Logf("Average %.1f commits per scenario.", float64(totalCommits)/float64(scenarioCount)) 51 | } 52 | -------------------------------------------------------------------------------- /scripts/sweep.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | // This file defines a configuration for running experiments with different parameters. 4 | // Use the following command to generate the `experiments.cue` file: 5 | // cue eval --out cue -e config.experiments exp-config.cue > experiments.cue 6 | config: { 7 | // Shared static settings 8 | shared: { 9 | replicaHosts: ["localhost"] 10 | clientHosts: ["localhost"] 11 | replicas: 4 12 | clients: 1 13 | } 14 | 15 | // Parameter sweeps 16 | params: { 17 | consensus: ["chainedhotstuff"] 18 | leaderRotation: ["round-robin", "fixed"] 19 | crypto: ["ecdsa", "eddsa"] 20 | communication: ["clique"] 21 | byz: [ 22 | {strategy: "", targets: []}, 23 | {strategy: "silentproposer", targets: [2]}, 24 | ] 25 | } 26 | 27 | // Cross-product into experiments 28 | experiments: [ 29 | for cs in params.consensus 30 | for ld in params.leaderRotation 31 | for cr in params.crypto 32 | for cm in params.communication 33 | for bc in params.byz { 34 | config: { 35 | shared 36 | consensus: cs 37 | leaderRotation: ld 38 | crypto: cr 39 | communication: cm 40 | byzantineStrategy: {(bc.strategy): bc.targets} 41 | } 42 | }, 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /internal/tree/shuffle_test.go: -------------------------------------------------------------------------------- 1 | package tree_test 2 | 3 | import ( 4 | "fmt" 5 | "slices" 6 | "testing" 7 | 8 | "github.com/relab/hotstuff/internal/tree" 9 | ) 10 | 11 | func TestTreeShuffle(t *testing.T) { 12 | tests := []struct { 13 | size int 14 | }{ 15 | {size: 0}, 16 | {size: 1}, 17 | {size: 2}, 18 | {size: 3}, 19 | {size: 4}, 20 | {size: 5}, 21 | {size: 10}, 22 | {size: 20}, 23 | } 24 | for _, tt := range tests { 25 | t.Run(fmt.Sprintf("size=%d", tt.size), func(t *testing.T) { 26 | treePos := tree.DefaultTreePosUint32(tt.size) 27 | tree.Shuffle(treePos) 28 | if len(treePos) != tt.size { 29 | t.Errorf("Randomize() got %v, want %v", len(treePos), tt.size) 30 | } 31 | want := tree.DefaultTreePosUint32(tt.size) 32 | for _, w := range want { 33 | if !slices.Contains(treePos, w) { 34 | t.Errorf("Randomize() = %v, want elements %v, missing %v", treePos, want, w) 35 | } 36 | } 37 | }) 38 | } 39 | } 40 | 41 | func BenchmarkTreeShuffle(b *testing.B) { 42 | for _, size := range []int{10, 100, 1000, 10000} { 43 | b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { 44 | treePos := tree.DefaultTreePosUint32(size) 45 | for b.Loop() { 46 | tree.Shuffle(treePos) 47 | } 48 | }) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /cmd/latencygen/main.go: -------------------------------------------------------------------------------- 1 | // LatencyGen generates a Go source file containing the latency matrix. 2 | package main 3 | 4 | import ( 5 | "embed" 6 | "flag" 7 | "log" 8 | "os" 9 | "path/filepath" 10 | 11 | "github.com/relab/hotstuff/internal/cli" 12 | "github.com/relab/hotstuff/internal/root" 13 | ) 14 | 15 | //go:embed latencies/*.csv 16 | var csvFiles embed.FS 17 | 18 | //go:generate go run . 19 | 20 | func main() { 21 | latencyFile := flag.String("file", "wonderproxy.csv", "csv file to use for latency matrix (default: wonderproxy)") 22 | flag.Parse() 23 | 24 | csvLatencies, err := csvFiles.ReadFile(filepath.Join("latencies", *latencyFile)) 25 | if err != nil { 26 | log.Fatal(err) 27 | } 28 | allToAllMatrix, err := cli.ParseCSVLatencies(string(csvLatencies)) 29 | if err != nil { 30 | log.Fatal(err) 31 | } 32 | srcFile := filepath.Join("cmd", "latencygen", "latencies", *latencyFile) 33 | latenciesGoCode, err := cli.GenerateGoLatencyMatrix(srcFile, allToAllMatrix) 34 | if err != nil { 35 | log.Fatal(err) 36 | } 37 | 38 | // file path to save generated latencies to. 39 | dstFile := filepath.Join(root.Dir, "internal", "latency", "latency_matrix.go") 40 | if err = os.WriteFile(dstFile, latenciesGoCode, 0o600); err != nil { 41 | log.Fatal(err) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /wiring/security.go: -------------------------------------------------------------------------------- 1 | package wiring 2 | 3 | import ( 4 | "github.com/relab/hotstuff/core" 5 | "github.com/relab/hotstuff/core/eventloop" 6 | "github.com/relab/hotstuff/core/logging" 7 | "github.com/relab/hotstuff/security/blockchain" 8 | "github.com/relab/hotstuff/security/cert" 9 | "github.com/relab/hotstuff/security/crypto" 10 | ) 11 | 12 | type Security struct { 13 | blockchain *blockchain.Blockchain 14 | auth *cert.Authority 15 | } 16 | 17 | // NewSecurity returns a set of dependencies necessary for application security and integrity. 18 | func NewSecurity( 19 | eventLoop *eventloop.EventLoop, 20 | logger logging.Logger, 21 | config *core.RuntimeConfig, 22 | sender core.Sender, 23 | base crypto.Base, 24 | ) *Security { 25 | blockchain := blockchain.New( 26 | eventLoop, 27 | logger, 28 | sender, 29 | ) 30 | auth := cert.NewAuthority( 31 | config, 32 | blockchain, 33 | base, 34 | ) 35 | return &Security{ 36 | blockchain: blockchain, 37 | auth: auth, 38 | } 39 | } 40 | 41 | // Blockchain returns the blockchain instance. 42 | func (s *Security) Blockchain() *blockchain.Blockchain { 43 | return s.blockchain 44 | } 45 | 46 | // Authority returns the certificate authority. 47 | func (s *Security) Authority() *cert.Authority { 48 | return s.auth 49 | } 50 | -------------------------------------------------------------------------------- /core/eventloop/context.go: -------------------------------------------------------------------------------- 1 | package eventloop 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/relab/hotstuff" 7 | ) 8 | 9 | // This file provides several functions for creating contexts with lifespans that are tied to synchronizer events. 10 | 11 | // ViewContext returns a context that is canceled at the end of view. 12 | // If view is nil or less than or equal to the current view, the context will be canceled at the next view change. 13 | func (el *EventLoop) ViewContext(view *hotstuff.View) (context.Context, context.CancelFunc) { 14 | ctx, cancel := context.WithCancel(el.Context()) 15 | 16 | unregister := Register(el, func(event hotstuff.ViewChangeEvent) { 17 | if view == nil || event.View >= *view { 18 | cancel() 19 | } 20 | }, Prioritize(), UnsafeRunInAddEvent()) 21 | 22 | return ctx, func() { 23 | unregister() 24 | cancel() 25 | } 26 | } 27 | 28 | // TimeoutContext returns a context that is canceled either when a timeout occurs, or when the view changes. 29 | func (el *EventLoop) TimeoutContext() (context.Context, context.CancelFunc) { 30 | // ViewContext handles view-change case. 31 | ctx, cancel := el.ViewContext(nil) 32 | 33 | unregister := Register(el, func(_ hotstuff.TimeoutEvent) { 34 | cancel() 35 | }, Prioritize(), UnsafeRunInAddEvent()) 36 | 37 | return ctx, func() { 38 | unregister() 39 | cancel() 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /internal/config/testdata/sweep-experiments.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | // This file defines a configuration for running experiments with different parameters. 4 | // Use the following command to generate the `experiments.cue` file: 5 | // cue eval --out cue -e config.experiments exp-config.cue > experiments.cue 6 | config: { 7 | // Shared static settings 8 | shared: { 9 | replicaHosts: ["localhost"] 10 | clientHosts: ["localhost"] 11 | replicas: 4 12 | clients: 1 13 | locations: ["Rome", "Oslo", "London", "Munich"] 14 | treePositions: [3, 2, 1, 4] 15 | branchFactor: 2 16 | } 17 | 18 | // Parameter sweeps 19 | params: { 20 | consensus: ["chainedhotstuff", "simplehotstuff"] 21 | leaderRotation: ["round-robin", "fixed"] 22 | crypto: ["ecdsa"] 23 | communication: ["clique", "kauri"] 24 | byz: [ 25 | {strategy: "", targets: []}, 26 | {strategy: "silentproposer", targets: [2]}, 27 | ] 28 | } 29 | 30 | // Cross-product into experiments 31 | experiments: [ 32 | for cs in params.consensus 33 | for ld in params.leaderRotation 34 | for cr in params.crypto 35 | for cm in params.communication 36 | for bc in params.byz { 37 | config: { 38 | shared 39 | consensus: cs 40 | leaderRotation: ld 41 | crypto: cr 42 | communication: cm 43 | byzantineStrategy: {(bc.strategy): bc.targets} 44 | } 45 | }, 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /core/sender.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/relab/hotstuff" 7 | ) 8 | 9 | // Sender handles the network layer of the consensus protocol by methods for sending specific messages. 10 | type Sender interface { 11 | // NewView sends a new view message to a replica. Returns an error if the replica was not found. 12 | NewView(id hotstuff.ID, msg hotstuff.SyncInfo) error 13 | // Vote sends a vote message to a replica. Returns an error if the replica was not found. 14 | Vote(id hotstuff.ID, cert hotstuff.PartialCert) error 15 | // Timeout broadcasts a timeout message to the replicas. 16 | Timeout(msg hotstuff.TimeoutMsg) 17 | // Propose broadcasts a propose message to the replicas. 18 | Propose(proposal *hotstuff.ProposeMsg) 19 | // RequestBlock sends a request to the replicas to send back a locally missing block. 20 | RequestBlock(ctx context.Context, hash hotstuff.Hash) (*hotstuff.Block, bool) 21 | // Sub returns a new sender copy that is only allowed to send to the provided ids. 22 | // Returns an error if the ids are not a subset of the parent's ids. 23 | Sub(ids []hotstuff.ID) (Sender, error) 24 | } 25 | 26 | // KauriSender is an extension of Sender allowing to send contribution messages to parent nodes. 27 | type KauriSender interface { 28 | Sender 29 | // SendContributionToParent aggregates the contribution to the parent. 30 | SendContributionToParent(view hotstuff.View, qc hotstuff.QuorumSignature) 31 | } 32 | -------------------------------------------------------------------------------- /internal/config/exp-config.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | // This file defines a configuration for running experiments with different parameters. 4 | // Use the following command to generate the `experiments.cue` file: 5 | // cue eval --out cue -e config.experiments exp-config.cue > experiments.cue 6 | config: { 7 | // Shared static settings 8 | shared: { 9 | replicaHosts: ["localhost"] 10 | clientHosts: ["localhost"] 11 | replicas: 4 12 | clients: 1 13 | locations: ["Rome", "Oslo", "London", "Munich"] 14 | treePositions: [3, 2, 1, 4] 15 | branchFactor: 2 16 | } 17 | 18 | // Parameter sweeps 19 | params: { 20 | consensus: ["chainedhotstuff", "simplehotstuff", "fasthotstuff"] 21 | leaderRotation: ["round-robin", "fixed", "carousel", "reputation"] 22 | crypto: ["ecdsa", "eddsa", "bls12"] 23 | communication: ["clique", "kauri"] 24 | byz: [ 25 | {strategy: "", targets: []}, 26 | {strategy: "fork", targets: [2]}, 27 | {strategy: "silentproposer", targets: [2]}, 28 | ] 29 | } 30 | 31 | // Cross-product into experiments 32 | experiments: [ 33 | for cs in params.consensus 34 | for ld in params.leaderRotation 35 | for cr in params.crypto 36 | for cm in params.communication 37 | for bc in params.byz { 38 | config: { 39 | shared 40 | consensus: cs 41 | leaderRotation: ld 42 | crypto: cr 43 | communication: cm 44 | byzantineStrategy: {(bc.strategy): bc.targets} 45 | } 46 | }, 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Local Run", 9 | "type": "go", 10 | "request": "launch", 11 | "mode": "debug", 12 | "program": "${workspaceRoot}/cmd/hotstuff/main.go", 13 | "cwd": "${workspaceRoot}", 14 | "args": [ 15 | "run", 16 | "--kauri" 17 | ] 18 | }, 19 | { 20 | "name": "Deploy to Docker container", 21 | "type": "go", 22 | "request": "launch", 23 | "mode": "debug", 24 | "program": "${workspaceRoot}/hotstuff-cli/main.go", 25 | "cwd": "${workspaceRoot}", 26 | "args": [ 27 | "run", 28 | "--hosts=localhost", 29 | "--ssh-config=scripts/ssh_config.local", 30 | "--output=foo", 31 | "--cpu-profile" 32 | ] 33 | }, 34 | { 35 | "name": "Attach to Process", 36 | "type": "go", 37 | "request": "attach", 38 | "mode": "local", 39 | "processId": 0 40 | }, 41 | { 42 | "name": "Launch file", 43 | "type": "go", 44 | "request": "launch", 45 | "mode": "debug", 46 | "program": "${file}", 47 | "cwd": "${workspaceRoot}" 48 | }, 49 | { 50 | "name": "Connect to server", 51 | "type": "go", 52 | "request": "attach", 53 | "mode": "remote", 54 | "remotePath": "${workspaceFolder}", 55 | "port": 2345, 56 | "host": "127.0.0.1" 57 | } 58 | ] 59 | } 60 | -------------------------------------------------------------------------------- /replica/options.go: -------------------------------------------------------------------------------- 1 | package replica 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | 7 | "github.com/relab/gorums" 8 | "github.com/relab/hotstuff/server" 9 | "google.golang.org/grpc" 10 | "google.golang.org/grpc/credentials" 11 | "google.golang.org/grpc/credentials/insecure" 12 | ) 13 | 14 | type replicaOptions struct { 15 | credentials credentials.TransportCredentials 16 | clientGorumsSrvOpts []gorums.ServerOption 17 | replicaGorumsSrvOpts []gorums.ServerOption 18 | serverOpts []server.ServerOption 19 | } 20 | 21 | func newDefaultOpts() *replicaOptions { 22 | return &replicaOptions{ 23 | credentials: insecure.NewCredentials(), 24 | } 25 | } 26 | 27 | type Option func(*replicaOptions) 28 | 29 | func WithServerOptions(opts ...server.ServerOption) Option { 30 | return func(ro *replicaOptions) { 31 | ro.serverOpts = append(ro.serverOpts, opts...) 32 | } 33 | } 34 | 35 | func WithTLS(certificate tls.Certificate, rootCAs *x509.CertPool, creds credentials.TransportCredentials) Option { 36 | return func(ro *replicaOptions) { 37 | ro.clientGorumsSrvOpts = append(ro.clientGorumsSrvOpts, gorums.WithGRPCServerOptions( 38 | grpc.Creds(credentials.NewServerTLSFromCert(&certificate)), 39 | )) 40 | ro.replicaGorumsSrvOpts = append(ro.replicaGorumsSrvOpts, gorums.WithGRPCServerOptions(grpc.Creds(credentials.NewTLS(&tls.Config{ 41 | Certificates: []tls.Certificate{certificate}, 42 | ClientCAs: rootCAs, 43 | ClientAuth: tls.RequireAndVerifyClientCert, 44 | })))) 45 | ro.credentials = creds 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /core/options.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import "github.com/relab/hotstuff/internal/tree" 4 | 5 | type RuntimeOption func(*RuntimeConfig) 6 | 7 | // WithSyncVerification forces synchronous verification of incoming votes at the leader. 8 | // The default is to verify votes concurrently. Forcing synchronous vote verification 9 | // can make it easier to debug. 10 | func WithSyncVerification() RuntimeOption { 11 | return func(rc *RuntimeConfig) { 12 | rc.syncVoteVerification = true 13 | } 14 | } 15 | 16 | // WithKauriTree adds a tree to the config to be used by a tree-based leader scheme in 17 | // the Kauri protocol. 18 | func WithKauriTree(t *tree.Tree) RuntimeOption { 19 | return func(g *RuntimeConfig) { 20 | g.tree = t 21 | } 22 | } 23 | 24 | // WithSharedRandomSeed adds a seed shared among replicas. 25 | // Default: 0 26 | func WithSharedRandomSeed(seed int64) RuntimeOption { 27 | return func(g *RuntimeConfig) { 28 | g.sharedRandomSeed = seed 29 | } 30 | } 31 | 32 | // WithAggregateQC returns true if aggregated quorum certificates should be used. 33 | // This is true for Fast-HotStuff: https://arxiv.org/abs/2010.11454 34 | func WithAggregateQC() RuntimeOption { 35 | return func(g *RuntimeConfig) { 36 | g.aggQC = true 37 | } 38 | } 39 | 40 | // WithCache specifies the cache size for crypto operations. This option causes 41 | // the Crypto implementation to be wrapped in a caching layer that caches the 42 | // results of recent crypto operations, avoiding repeated computations. 43 | func WithCache(size uint) RuntimeOption { 44 | return func(g *RuntimeConfig) { 45 | g.cacheSize = size 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /metrics/timeouts.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core/eventloop" 8 | "github.com/relab/hotstuff/metrics/types" 9 | ) 10 | 11 | const NameViewTimeouts = "timeouts" 12 | 13 | // viewTimeouts is a metric that measures the number of view timeouts that happen. 14 | type viewTimeouts struct { 15 | metricsLogger Logger 16 | id hotstuff.ID 17 | numViews uint64 18 | numTimeouts uint64 19 | } 20 | 21 | // enableViewTimeouts enables view timeout measurement. 22 | func enableViewTimeouts( 23 | el *eventloop.EventLoop, 24 | metricsLogger Logger, 25 | id hotstuff.ID, 26 | ) { 27 | vt := &viewTimeouts{ 28 | metricsLogger: metricsLogger, 29 | id: id, 30 | } 31 | eventloop.Register(el, func(event hotstuff.ViewChangeEvent) { 32 | vt.viewChange(event) 33 | }) 34 | eventloop.Register(el, func(tickEvent types.TickEvent) { 35 | vt.tick(tickEvent) 36 | }, eventloop.Prioritize()) 37 | } 38 | 39 | // viewChange records a view change event, incrementing the timeout count if applicable. 40 | func (vt *viewTimeouts) viewChange(event hotstuff.ViewChangeEvent) { 41 | vt.numViews++ 42 | if event.Timeout { 43 | vt.numTimeouts++ 44 | } 45 | } 46 | 47 | // tick logs the current view timeout measurement to the metrics logger. 48 | func (vt *viewTimeouts) tick(_ types.TickEvent) { 49 | vt.metricsLogger.Log(&types.ViewTimeouts{ 50 | Event: types.NewReplicaEvent(vt.id, time.Now()), 51 | Views: vt.numViews, 52 | Timeouts: vt.numTimeouts, 53 | }) 54 | vt.numViews = 0 55 | vt.numTimeouts = 0 56 | } 57 | -------------------------------------------------------------------------------- /protocol/comm/factory.go: -------------------------------------------------------------------------------- 1 | package comm 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff/core" 7 | "github.com/relab/hotstuff/core/eventloop" 8 | "github.com/relab/hotstuff/core/logging" 9 | "github.com/relab/hotstuff/network" 10 | "github.com/relab/hotstuff/protocol" 11 | "github.com/relab/hotstuff/protocol/comm/kauri" 12 | "github.com/relab/hotstuff/protocol/leaderrotation" 13 | "github.com/relab/hotstuff/protocol/votingmachine" 14 | "github.com/relab/hotstuff/security/blockchain" 15 | "github.com/relab/hotstuff/security/cert" 16 | ) 17 | 18 | func New( 19 | logger logging.Logger, 20 | eventLoop *eventloop.EventLoop, 21 | config *core.RuntimeConfig, 22 | blockchain *blockchain.Blockchain, 23 | auth *cert.Authority, 24 | sender core.Sender, 25 | leaderRotation leaderrotation.LeaderRotation, 26 | viewStates *protocol.ViewStates, 27 | name string, 28 | ) (communication Communication, _ error) { 29 | switch name { 30 | case NameKauri: 31 | communication = NewKauri( 32 | logger, 33 | eventLoop, 34 | config, 35 | blockchain, 36 | auth, 37 | kauri.WrapGorumsSender( 38 | eventLoop, 39 | config, 40 | sender.(*network.GorumsSender), // TODO(AlanRostem): avoid cast 41 | ), 42 | ) 43 | case NameClique: 44 | communication = NewClique( 45 | config, 46 | votingmachine.New( 47 | logger, 48 | eventLoop, 49 | config, 50 | blockchain, 51 | auth, 52 | viewStates, 53 | ), 54 | leaderRotation, 55 | sender, 56 | ) 57 | default: 58 | return nil, fmt.Errorf("invalid communication type: '%s'", name) 59 | } 60 | return 61 | } 62 | -------------------------------------------------------------------------------- /metrics/clientlatency.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/relab/hotstuff/client" 7 | "github.com/relab/hotstuff/core/eventloop" 8 | "github.com/relab/hotstuff/metrics/types" 9 | ) 10 | 11 | const NameClientLatency = "client-latency" 12 | 13 | // clientLatency measures the latency of client requests. 14 | type clientLatency struct { 15 | metricsLogger Logger 16 | id client.ID 17 | wf Welford 18 | } 19 | 20 | // enableClientLatency enables client latency measurement. 21 | func enableClientLatency( 22 | el *eventloop.EventLoop, 23 | metricsLogger Logger, 24 | id client.ID, 25 | ) { 26 | lr := &clientLatency{ 27 | id: id, 28 | metricsLogger: metricsLogger, 29 | } 30 | eventloop.Register(el, func(event client.LatencyMeasurementEvent) { 31 | lr.addLatency(event.Latency) 32 | }) 33 | eventloop.Register(el, func(tickEvent types.TickEvent) { 34 | lr.tick(tickEvent) 35 | }, eventloop.Prioritize()) 36 | } 37 | 38 | // addLatency adds a latency data point to the current measurement. 39 | func (lr *clientLatency) addLatency(latency time.Duration) { 40 | millis := float64(latency) / float64(time.Millisecond) 41 | lr.wf.Update(millis) 42 | } 43 | 44 | // tick logs the current latency measurement to the metrics logger. 45 | func (lr *clientLatency) tick(_ types.TickEvent) { 46 | mean, variance, count := lr.wf.Get() 47 | event := &types.LatencyMeasurement{ 48 | Event: types.NewClientEvent(lr.id, time.Now()), 49 | Latency: mean, 50 | Variance: variance, 51 | Count: count, 52 | } 53 | lr.metricsLogger.Log(event) 54 | lr.wf.Reset() 55 | } 56 | -------------------------------------------------------------------------------- /core/context.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | 8 | "github.com/relab/hotstuff" 9 | "google.golang.org/grpc/credentials" 10 | "google.golang.org/grpc/metadata" 11 | "google.golang.org/grpc/peer" 12 | ) 13 | 14 | // PeerIDFromContext extracts the ID of the peer from the context. 15 | func (g *RuntimeConfig) PeerIDFromContext(ctx context.Context) (hotstuff.ID, error) { 16 | peerInfo, ok := peer.FromContext(ctx) 17 | if !ok { 18 | return 0, fmt.Errorf("peerInfo not available") 19 | } 20 | 21 | if peerInfo.AuthInfo != nil && peerInfo.AuthInfo.AuthType() == "tls" { 22 | tlsInfo, ok := peerInfo.AuthInfo.(credentials.TLSInfo) 23 | if !ok { 24 | return 0, fmt.Errorf("authInfo of wrong type: %T", peerInfo.AuthInfo) 25 | } 26 | if len(tlsInfo.State.PeerCertificates) > 0 { 27 | cert := tlsInfo.State.PeerCertificates[0] 28 | for replicaID := range g.replicas { 29 | if subject, err := strconv.Atoi(cert.Subject.CommonName); err == nil && hotstuff.ID(subject) == replicaID { 30 | return replicaID, nil 31 | } 32 | } 33 | } 34 | return 0, fmt.Errorf("could not find matching certificate") 35 | } 36 | 37 | // If we're not using TLS, we'll fallback to checking the metadata 38 | md, ok := metadata.FromIncomingContext(ctx) 39 | if !ok { 40 | return 0, fmt.Errorf("metadata not available") 41 | } 42 | 43 | v := md.Get("id") 44 | if len(v) < 1 { 45 | return 0, fmt.Errorf("id field not present") 46 | } 47 | 48 | id, err := strconv.Atoi(v[0]) 49 | if err != nil { 50 | return 0, fmt.Errorf("cannot parse ID field: %w", err) 51 | } 52 | 53 | return hotstuff.ID(id), nil 54 | } 55 | -------------------------------------------------------------------------------- /metrics/consensuslatency.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core/eventloop" 8 | "github.com/relab/hotstuff/metrics/types" 9 | ) 10 | 11 | const NameConsensusLatency = "consensus-latency" 12 | 13 | // consensusLatency measures the latency of consensus decisions. 14 | type consensusLatency struct { 15 | metricsLogger Logger 16 | id hotstuff.ID 17 | wf Welford 18 | } 19 | 20 | // enableConsensusLatency enables consensus latency measurement. 21 | func enableConsensusLatency( 22 | el *eventloop.EventLoop, 23 | metricsLogger Logger, 24 | id hotstuff.ID, 25 | ) { 26 | lr := consensusLatency{ 27 | metricsLogger: metricsLogger, 28 | id: id, 29 | } 30 | eventloop.Register(el, func(event hotstuff.ConsensusLatencyEvent) { 31 | lr.addLatency(event.Latency) 32 | }) 33 | eventloop.Register(el, func(tickEvent types.TickEvent) { 34 | lr.tick(tickEvent) 35 | }, eventloop.Prioritize()) 36 | } 37 | 38 | // addLatency adds a latency data point to the current measurement. 39 | func (lr *consensusLatency) addLatency(latency time.Duration) { 40 | millis := float64(latency) / float64(time.Millisecond) 41 | lr.wf.Update(millis) 42 | } 43 | 44 | // tick logs the current latency measurement to the metrics logger. 45 | func (lr *consensusLatency) tick(_ types.TickEvent) { 46 | mean, variance, count := lr.wf.Get() 47 | event := &types.LatencyMeasurement{ 48 | Event: types.NewReplicaEvent(lr.id, time.Now()), 49 | Latency: mean, 50 | Variance: variance, 51 | Count: count, 52 | } 53 | lr.metricsLogger.Log(event) 54 | lr.wf.Reset() 55 | } 56 | -------------------------------------------------------------------------------- /protocol/rules/byzantine/fork.go: -------------------------------------------------------------------------------- 1 | // Package byzantine contains Byzantine consensus rules. 2 | package byzantine 3 | 4 | import ( 5 | "github.com/relab/hotstuff" 6 | "github.com/relab/hotstuff/core" 7 | "github.com/relab/hotstuff/internal/proto/clientpb" 8 | "github.com/relab/hotstuff/protocol/consensus" 9 | "github.com/relab/hotstuff/security/blockchain" 10 | ) 11 | 12 | const NameFork = "fork" 13 | 14 | type Fork struct { 15 | config *core.RuntimeConfig 16 | blockchain *blockchain.Blockchain 17 | consensus.Ruleset 18 | } 19 | 20 | // NewFork returns a Byzantine replica that will try to fork the chain. 21 | func NewFork( 22 | config *core.RuntimeConfig, 23 | blockchain *blockchain.Blockchain, 24 | rules consensus.Ruleset, 25 | ) *Fork { 26 | return &Fork{ 27 | config: config, 28 | blockchain: blockchain, 29 | Ruleset: rules, 30 | } 31 | } 32 | 33 | func (f *Fork) ProposeRule(view hotstuff.View, cert hotstuff.SyncInfo, cmd *clientpb.Batch) (proposal hotstuff.ProposeMsg, ok bool) { 34 | highQC, ok := cert.QC() 35 | if !ok { 36 | return proposal, false 37 | } 38 | block, ok := f.blockchain.Get(highQC.BlockHash()) 39 | if !ok { 40 | return proposal, false 41 | } 42 | parent, ok := f.blockchain.Get(block.Parent()) 43 | if !ok { 44 | return proposal, false 45 | } 46 | grandparent, ok := f.blockchain.Get(parent.Hash()) 47 | if !ok { 48 | return proposal, false 49 | } 50 | proposal = hotstuff.NewProposeMsg(f.config.ID(), view, grandparent.QuorumCert(), cmd) 51 | if aggQC, ok := cert.AggQC(); f.config.HasAggregateQC() && ok { 52 | proposal.AggregateQC = &aggQC 53 | } 54 | return proposal, true 55 | } 56 | 57 | var _ consensus.Ruleset = (*Fork)(nil) 58 | -------------------------------------------------------------------------------- /protocol/synchronizer/viewduration_test.go: -------------------------------------------------------------------------------- 1 | package synchronizer_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/relab/hotstuff/protocol/synchronizer" 8 | ) 9 | 10 | func checkDuration(t *testing.T, funcName string, want, got time.Duration) { 11 | if want != got { 12 | t.Fatalf("incorrect view duration after calling %s (want: %d, got: %d)", funcName, want, got) 13 | } 14 | } 15 | 16 | func TestFixed(t *testing.T) { 17 | want := 100 * time.Microsecond 18 | vd := synchronizer.NewFixedDuration(want) 19 | checkDuration(t, "nothing", want, vd.Duration()) 20 | vd.ViewStarted() 21 | checkDuration(t, "ViewStarted", want, vd.Duration()) 22 | vd.ViewSucceeded() 23 | checkDuration(t, "ViewSucceeded", want, vd.Duration()) 24 | vd.ViewTimeout() 25 | checkDuration(t, "ViewTimeout", want, vd.Duration()) 26 | } 27 | 28 | func TestDynamic(t *testing.T) { 29 | sampleSize := uint32(5) 30 | startTimeout := 100 * time.Millisecond 31 | maxTimeout := 500 * time.Millisecond 32 | multiplier := float32(2) 33 | vd := synchronizer.NewDynamicDuration( 34 | sampleSize, 35 | startTimeout, 36 | maxTimeout, 37 | multiplier, 38 | ) 39 | checkDuration(t, "nothing", startTimeout, vd.Duration()) 40 | vd.ViewStarted() 41 | checkDuration(t, "ViewStarted", startTimeout, vd.Duration()) 42 | vd.ViewTimeout() 43 | checkDuration(t, "ViewTimeout", time.Duration(multiplier)*startTimeout, vd.Duration()) 44 | // timeout many times to reach max timeout 45 | for range 10 { 46 | vd.ViewTimeout() 47 | } 48 | checkDuration(t, "ViewTimeout 10 times", maxTimeout, vd.Duration()) 49 | time.Sleep(2 * time.Second) 50 | vd.ViewSucceeded() 51 | if vd.Duration() == 0 { 52 | t.Fatal("expected view duration to be greater than zero") 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /twins/timeoutmgr.go: -------------------------------------------------------------------------------- 1 | package twins 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core/eventloop" 6 | "github.com/relab/hotstuff/protocol" 7 | ) 8 | 9 | type timeoutManager struct { 10 | eventLoop *eventloop.EventLoop 11 | viewStates *protocol.ViewStates 12 | 13 | node *node 14 | network *Network 15 | countdown int 16 | timeout int 17 | } 18 | 19 | func (tm *timeoutManager) advance() { 20 | tm.countdown-- 21 | if tm.countdown == 0 { 22 | view := tm.viewStates.View() 23 | tm.eventLoop.AddEvent(hotstuff.TimeoutEvent{View: view}) 24 | tm.countdown = tm.timeout 25 | if tm.node.effectiveView <= view { 26 | tm.node.effectiveView = view + 1 27 | tm.network.logger.Infof("node %v effective view is %d due to timeout", tm.node.id, tm.node.effectiveView) 28 | } 29 | } 30 | } 31 | 32 | func (tm *timeoutManager) viewChange(event hotstuff.ViewChangeEvent) { 33 | tm.countdown = tm.timeout 34 | if event.Timeout { 35 | tm.network.logger.Infof("node %v entered view %d after timeout", tm.node.id, event.View) 36 | } else { 37 | tm.network.logger.Infof("node %v entered view %d after voting", tm.node.id, event.View) 38 | } 39 | } 40 | 41 | func newTimeoutManager( 42 | network *Network, 43 | node *node, 44 | el *eventloop.EventLoop, 45 | viewStates *protocol.ViewStates, 46 | ) *timeoutManager { 47 | tm := &timeoutManager{ 48 | node: node, 49 | network: network, 50 | eventLoop: el, 51 | viewStates: viewStates, 52 | timeout: 5, 53 | } 54 | eventloop.Register(el, func(_ tick) { 55 | tm.advance() 56 | }, eventloop.Prioritize()) 57 | eventloop.Register(el, func(event hotstuff.ViewChangeEvent) { 58 | tm.viewChange(event) 59 | }, eventloop.Prioritize()) 60 | return tm 61 | } 62 | -------------------------------------------------------------------------------- /protocol/consensus/ruleset.go: -------------------------------------------------------------------------------- 1 | package consensus 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/internal/proto/clientpb" 6 | ) 7 | 8 | // VoteRuler is the interface that wraps the VoteRule method. 9 | // 10 | // The VoteRule method decides whether or not to vote for a block in a given view. 11 | type VoteRuler interface { 12 | // VoteRule returns true if the proposal's block should be voted for in the given view. 13 | VoteRule(hotstuff.View, hotstuff.ProposeMsg) bool 14 | } 15 | 16 | // CommitRuler is the interface that wraps the CommitRule method. 17 | // 18 | // The CommitRule method is used to determine the youngest ancestor of a given block 19 | // that can be committed on the local chain. 20 | type CommitRuler interface { 21 | // CommitRule returns the youngest ancestor of the given block that can be committed. 22 | CommitRule(*hotstuff.Block) *hotstuff.Block 23 | } 24 | 25 | // ProposeRuler is the interface that wraps the ProposeRule method. 26 | // 27 | // This allows implementors to specify how new blocks are created. 28 | type ProposeRuler interface { 29 | // ProposeRule creates a new proposal. 30 | ProposeRule(view hotstuff.View, cert hotstuff.SyncInfo, cmd *clientpb.Batch) (hotstuff.ProposeMsg, bool) 31 | } 32 | 33 | // Ruleset is the interface that groups the VoteRule, CommitRule, ProposeRule, and ChainLength methods. 34 | // 35 | // Ruleset is the minimum interface that a consensus protocol must implement and defines the 36 | // rules for voting, committing, proposing, and the chain length required for committing blocks. 37 | type Ruleset interface { 38 | VoteRuler 39 | CommitRuler 40 | ProposeRuler 41 | // ChainLength returns the number of blocks that need to be chained together in order to commit. 42 | ChainLength() int 43 | } 44 | -------------------------------------------------------------------------------- /internal/config/testdata/four-experiments.cue: -------------------------------------------------------------------------------- 1 | [{ 2 | config: { 3 | consensus: "chainedhotstuff" 4 | leaderRotation: "round-robin" 5 | crypto: "ecdsa" 6 | communication: "clique" 7 | byzantineStrategy: { 8 | "": [] 9 | } 10 | replicaHosts: ["localhost"] 11 | clientHosts: ["localhost"] 12 | replicas: 4 13 | clients: 1 14 | locations: ["Rome", "Oslo", "London", "Munich"] 15 | treePositions: [3, 2, 1, 4] 16 | branchFactor: 2 17 | } 18 | }, { 19 | config: { 20 | consensus: "simplehotstuff" 21 | leaderRotation: "round-robin" 22 | crypto: "ecdsa" 23 | communication: "clique" 24 | byzantineStrategy: { 25 | fork: [2] 26 | } 27 | replicaHosts: ["localhost"] 28 | clientHosts: ["localhost"] 29 | replicas: 4 30 | clients: 1 31 | locations: ["Rome", "Oslo", "London", "Munich"] 32 | treePositions: [3, 2, 1, 4] 33 | branchFactor: 2 34 | } 35 | }, { 36 | config: { 37 | consensus: "fasthotstuff" 38 | leaderRotation: "round-robin" 39 | crypto: "ecdsa" 40 | communication: "kauri" 41 | byzantineStrategy: { 42 | silentproposer: [2] 43 | } 44 | replicaHosts: ["localhost"] 45 | clientHosts: ["localhost"] 46 | replicas: 4 47 | clients: 1 48 | locations: ["Rome", "Oslo", "London", "Munich"] 49 | treePositions: [3, 2, 1, 4] 50 | branchFactor: 2 51 | } 52 | }, { 53 | config: { 54 | consensus: "chainedhotstuff" 55 | leaderRotation: "round-robin" 56 | crypto: "ecdsa" 57 | communication: "kauri" 58 | byzantineStrategy: { 59 | "": [] 60 | } 61 | replicaHosts: ["localhost"] 62 | clientHosts: ["localhost"] 63 | replicas: 4 64 | clients: 1 65 | locations: ["Rome", "Oslo", "London", "Munich"] 66 | treePositions: [3, 2, 1, 4] 67 | branchFactor: 2 68 | } 69 | }] 70 | -------------------------------------------------------------------------------- /quorum_test.go: -------------------------------------------------------------------------------- 1 | package hotstuff 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestQuorumSize(t *testing.T) { 9 | tests := []struct { 10 | n int 11 | want int 12 | }{ 13 | {n: 4, want: 3}, // f=1 14 | {n: 5, want: 4}, // f=1 15 | {n: 6, want: 4}, // f=1 16 | {n: 7, want: 5}, // f=2 17 | {n: 8, want: 6}, // f=2 18 | {n: 9, want: 6}, // f=2 19 | {n: 10, want: 7}, // f=3 20 | {n: 11, want: 8}, // f=3 21 | {n: 12, want: 8}, // f=3 22 | {n: 13, want: 9}, // f=4 23 | {n: 14, want: 10}, // f=4 24 | {n: 15, want: 10}, // f=4 25 | {n: 16, want: 11}, // f=5 26 | {n: 17, want: 12}, // f=5 27 | {n: 18, want: 12}, // f=5 28 | {n: 19, want: 13}, // f=6 29 | {n: 20, want: 14}, // f=6 30 | {n: 21, want: 14}, // f=6 31 | {n: 22, want: 15}, // f=7 32 | {n: 23, want: 16}, // f=7 33 | {n: 24, want: 16}, // f=7 34 | {n: 25, want: 17}, // f=8 35 | {n: 26, want: 18}, // f=8 36 | {n: 27, want: 18}, // f=8 37 | {n: 31, want: 21}, // f=10 38 | {n: 32, want: 22}, // f=10 39 | {n: 33, want: 22}, // f=10 40 | {n: 34, want: 23}, // f=11 41 | {n: 35, want: 24}, // f=11 42 | {n: 36, want: 24}, // f=11 43 | {n: 37, want: 25}, // f=12 44 | {n: 38, want: 26}, // f=12 45 | {n: 39, want: 26}, // f=12 46 | {n: 40, want: 27}, // f=13 47 | {n: 41, want: 28}, // f=13 48 | {n: 42, want: 28}, // f=13 49 | {n: 43, want: 29}, // f=14 50 | {n: 44, want: 30}, // f=14 51 | {n: 45, want: 30}, // f=14 52 | {n: 73, want: 49}, // f=24 53 | {n: 74, want: 50}, // f=24 54 | {n: 75, want: 50}, // f=24 55 | } 56 | for _, tt := range tests { 57 | t.Run(fmt.Sprintf("n=%d", tt.n), func(t *testing.T) { 58 | if got := QuorumSize(tt.n); got != tt.want { 59 | t.Errorf("QuorumSize(%d) = %d; want %d", tt.n, got, tt.want) 60 | } 61 | }) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /metrics/throughput.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core/eventloop" 8 | "github.com/relab/hotstuff/internal/proto/clientpb" 9 | "github.com/relab/hotstuff/metrics/types" 10 | "google.golang.org/protobuf/types/known/durationpb" 11 | ) 12 | 13 | const NameThroughput = "throughput" 14 | 15 | // throughput measures throughput in commits per second, and commands per second. 16 | type throughput struct { 17 | metricsLogger Logger 18 | id hotstuff.ID 19 | commitCount uint64 20 | commandCount uint64 21 | } 22 | 23 | // enableThroughput enables throughput measurement. 24 | func enableThroughput( 25 | el *eventloop.EventLoop, 26 | metricsLogger Logger, 27 | id hotstuff.ID, 28 | ) { 29 | t := &throughput{ 30 | metricsLogger: metricsLogger, 31 | id: id, 32 | } 33 | eventloop.Register(el, func(commitEvent clientpb.ExecuteEvent) { 34 | t.recordCommit(len(commitEvent.Batch.Commands)) 35 | }) 36 | eventloop.Register(el, func(tickEvent types.TickEvent) { 37 | t.tick(tickEvent) 38 | }, eventloop.Prioritize()) 39 | } 40 | 41 | // recordCommit records a commit with the given number of commands. 42 | func (t *throughput) recordCommit(commands int) { 43 | t.commitCount++ 44 | t.commandCount += uint64(commands) 45 | } 46 | 47 | // tick logs the current throughput measurement to the metrics logger. 48 | func (t *throughput) tick(tick types.TickEvent) { 49 | now := time.Now() 50 | event := &types.ThroughputMeasurement{ 51 | Event: types.NewReplicaEvent(t.id, now), 52 | Commits: t.commitCount, 53 | Commands: t.commandCount, 54 | Duration: durationpb.New(now.Sub(tick.LastTick)), 55 | } 56 | t.metricsLogger.Log(event) 57 | // reset count for next tick 58 | t.commandCount = 0 59 | t.commitCount = 0 60 | } 61 | -------------------------------------------------------------------------------- /twins/vulnfhs.go: -------------------------------------------------------------------------------- 1 | package twins 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core/logging" 6 | "github.com/relab/hotstuff/protocol/consensus" 7 | "github.com/relab/hotstuff/protocol/rules" 8 | "github.com/relab/hotstuff/security/blockchain" 9 | ) 10 | 11 | const nameVulnerableFHS = "vulnerableFHS" 12 | 13 | // A wrapper around the FHS rules that swaps the commit rule for a vulnerable version 14 | type vulnerableFHS struct { 15 | logger logging.Logger 16 | blockchain *blockchain.Blockchain 17 | rules.FastHotStuff 18 | } 19 | 20 | func NewVulnFHS( 21 | logger logging.Logger, 22 | blockchain *blockchain.Blockchain, 23 | inner *rules.FastHotStuff, 24 | ) *vulnerableFHS { 25 | return &vulnerableFHS{ 26 | logger: logger, 27 | blockchain: blockchain, 28 | FastHotStuff: *inner, 29 | } 30 | } 31 | 32 | func (fhs *vulnerableFHS) qcRef(qc hotstuff.QuorumCert) (*hotstuff.Block, bool) { 33 | if (hotstuff.Hash{}) == qc.BlockHash() { 34 | return nil, false 35 | } 36 | return fhs.blockchain.Get(qc.BlockHash()) 37 | } 38 | 39 | // CommitRule decides whether an ancestor of the block can be committed. 40 | func (fhs *vulnerableFHS) CommitRule(block *hotstuff.Block) *hotstuff.Block { 41 | parent, ok := fhs.qcRef(block.QuorumCert()) 42 | if !ok { 43 | return nil 44 | } 45 | fhs.logger.Debug("PRECOMMIT: ", parent) 46 | grandparent, ok := fhs.qcRef(parent.QuorumCert()) 47 | if !ok { 48 | return nil 49 | } 50 | // NOTE: this does check for a direct link between the block and the grandparent. 51 | // This is what causes the safety violation. 52 | if block.Parent() == parent.Hash() && parent.Parent() == grandparent.Hash() { 53 | fhs.logger.Debug("COMMIT(vulnerable): ", grandparent) 54 | return grandparent 55 | } 56 | return nil 57 | } 58 | 59 | var _ consensus.Ruleset = (*vulnerableFHS)(nil) 60 | -------------------------------------------------------------------------------- /core/replica.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff" 7 | ) 8 | 9 | // ReplicaInfo returns a replica if it is present in the configuration. 10 | func (g *RuntimeConfig) ReplicaInfo(id hotstuff.ID) (replica *hotstuff.ReplicaInfo, ok bool) { 11 | replica, ok = g.replicas[id] 12 | return 13 | } 14 | 15 | // ReplicaCount returns the number of replicas in the configuration. 16 | func (g *RuntimeConfig) ReplicaCount() int { 17 | return len(g.replicas) 18 | } 19 | 20 | // QuorumSize returns the size of a quorum. 21 | func (g *RuntimeConfig) QuorumSize() int { 22 | return hotstuff.QuorumSize(g.ReplicaCount()) 23 | } 24 | 25 | // AddReplica adds information about the replica. 26 | func (g *RuntimeConfig) AddReplica(replicaInfo *hotstuff.ReplicaInfo) { 27 | g.replicas[replicaInfo.ID] = replicaInfo 28 | } 29 | 30 | // SetReplicaMetadata sets the metadata for a replica based on id. 31 | func (g *RuntimeConfig) SetReplicaMetadata(id hotstuff.ID, metadata map[string]string) error { 32 | if _, ok := g.replicas[id]; !ok { 33 | return fmt.Errorf("replica %d does not exist", id) 34 | } 35 | g.replicas[id].Metadata = metadata 36 | return nil 37 | } 38 | 39 | // AddConnectionMetadata sets the value of a key in the connection metadata map. 40 | // 41 | // NOTE: if the value contains binary data, the key must have the "-bin" suffix. 42 | // This is to make it compatible with GRPC metadata. 43 | // See: https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md#storing-binary-data-in-metadata 44 | func (g *RuntimeConfig) AddConnectionMetadata(key string, value string) { 45 | g.connectionMetadata[key] = value 46 | } 47 | 48 | // ConnectionMetadata returns the metadata map that is sent when connecting to other replicas. 49 | func (g *RuntimeConfig) ConnectionMetadata() map[string]string { 50 | return g.connectionMetadata 51 | } 52 | -------------------------------------------------------------------------------- /protocol/consensus/committer_test.go: -------------------------------------------------------------------------------- 1 | package consensus_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/internal/testutil" 8 | "github.com/relab/hotstuff/protocol" 9 | "github.com/relab/hotstuff/protocol/consensus" 10 | "github.com/relab/hotstuff/protocol/rules" 11 | "github.com/relab/hotstuff/security/crypto" 12 | ) 13 | 14 | func wireUpCommitter( 15 | t *testing.T, 16 | essentials *testutil.Essentials, 17 | viewStates *protocol.ViewStates, 18 | commitRuler consensus.CommitRuler, 19 | ) *consensus.Committer { 20 | t.Helper() 21 | return consensus.NewCommitter( 22 | essentials.EventLoop(), 23 | essentials.Logger(), 24 | essentials.Blockchain(), 25 | viewStates, 26 | commitRuler, 27 | ) 28 | } 29 | 30 | func TestValidCommit(t *testing.T) { 31 | essentials := testutil.WireUpEssentials(t, 1, crypto.NameECDSA) 32 | viewStates, err := protocol.NewViewStates( 33 | essentials.Blockchain(), 34 | essentials.Authority(), 35 | ) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | // create a valid chain of blocks 40 | chain := essentials.Blockchain() 41 | parent := hotstuff.GetGenesis() 42 | var firstBlock *hotstuff.Block = nil 43 | chs := rules.NewChainedHotStuff( 44 | essentials.Logger(), 45 | essentials.RuntimeCfg(), 46 | essentials.Blockchain(), 47 | ) 48 | for range chs.ChainLength() { 49 | block := testutil.CreateParentedBlock(t, 1, parent) 50 | if firstBlock == nil { 51 | firstBlock = block 52 | } 53 | chain.Store(block) 54 | parent = block 55 | } 56 | blockToCommit := testutil.CreateParentedBlock(t, 1, parent) 57 | committer := wireUpCommitter(t, essentials, viewStates, chs) 58 | if err := committer.TryCommit(blockToCommit); err != nil { 59 | t.Fatal(err) 60 | } 61 | if firstBlock != viewStates.CommittedBlock() { 62 | t.Fatal("incorrect block was committed") 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /protocol/comm/kauri/sender.go: -------------------------------------------------------------------------------- 1 | package kauri 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core" 8 | "github.com/relab/hotstuff/core/eventloop" 9 | "github.com/relab/hotstuff/internal/proto/hotstuffpb" 10 | "github.com/relab/hotstuff/internal/proto/kauripb" 11 | "github.com/relab/hotstuff/internal/tree" 12 | "github.com/relab/hotstuff/network" 13 | ) 14 | 15 | type KauriGorumsSender struct { 16 | config *core.RuntimeConfig 17 | core.Sender 18 | 19 | nodes map[hotstuff.ID]*kauripb.Node 20 | tree *tree.Tree 21 | } 22 | 23 | func WrapGorumsSender( 24 | el *eventloop.EventLoop, 25 | config *core.RuntimeConfig, 26 | base *network.GorumsSender, 27 | ) *KauriGorumsSender { 28 | s := &KauriGorumsSender{ 29 | config: config, 30 | Sender: base, // important: extend the base 31 | 32 | nodes: make(map[hotstuff.ID]*kauripb.Node), 33 | tree: config.Tree(), 34 | } 35 | eventloop.Register(el, func(_ hotstuff.ReplicaConnectedEvent) { 36 | // translate the base hotstuffpb.Configuration to kauripb.Configuration 37 | kauriCfg, err := kauripb.ConfigurationFromRaw(base.GorumsConfig(), nil) 38 | if err != nil { 39 | panic(err) // should not happen 40 | } 41 | for _, n := range kauriCfg.Nodes() { 42 | s.nodes[hotstuff.ID(n.ID())] = n 43 | } 44 | }, eventloop.Prioritize()) 45 | return s 46 | } 47 | 48 | func (k *KauriGorumsSender) SendContributionToParent(view hotstuff.View, qc hotstuff.QuorumSignature) { 49 | parent, ok := k.tree.Parent() 50 | if ok { 51 | node, isPresent := k.nodes[parent] 52 | if isPresent { 53 | node.SendContribution(context.Background(), &kauripb.Contribution{ 54 | ID: uint32(k.config.ID()), 55 | Signature: hotstuffpb.QuorumSignatureToProto(qc), 56 | View: uint64(view), 57 | }) 58 | } 59 | } 60 | } 61 | 62 | var _ core.KauriSender = (*KauriGorumsSender)(nil) 63 | -------------------------------------------------------------------------------- /.vscode/dict.txt: -------------------------------------------------------------------------------- 1 | anypb 2 | Bano 3 | Baudet 4 | bbchain 5 | benchmarkdata 6 | BFT's 7 | Bitfield 8 | bitmask 9 | cerr 10 | cfgs 11 | chainedhotstuff 12 | checkf 13 | Chursin 14 | clientpb 15 | cmdqueue 16 | Cmds 17 | cmpopts 18 | CODECOV 19 | covdata 20 | covermode 21 | coverpkg 22 | coverprofile 23 | cpuprofile 24 | cuecontext 25 | cuelang 26 | Debugf 27 | disagg 28 | durationpb 29 | dylib 30 | eddsa 31 | emptypb 32 | Erevik 33 | eventloop 34 | Fangyu 35 | fasthotstuff 36 | felixge 37 | Feng 38 | ferr 39 | fgprof 40 | fgprofprofile 41 | fixedleader 42 | gcflags 43 | gcov 44 | GOCOVERDIR 45 | gocyclo 46 | golangci 47 | golint 48 | gomock 49 | gorums 50 | gpool 51 | grpc 52 | Gueta 53 | Hein 54 | hostnames 55 | HOTSTUFF 56 | hotstuffgorums 57 | hotstuffpb 58 | hscov 59 | iagotest 60 | ICDCS 61 | iface 62 | Iinternal 63 | increaseview 64 | Infof 65 | Jalalzai 66 | Jehl 67 | Jianyu 68 | kauripb 69 | keygen 70 | kilic 71 | latencygen 72 | ldflags 73 | leaderrotation 74 | Malkhi 75 | Mathieu 76 | Meling 77 | memprofile 78 | mitchellh 79 | mockgen 80 | nolint 81 | orchestrationpb 82 | partitioner 83 | Paulo 84 | perr 85 | pflag 86 | pflags 87 | pkgs 88 | PRECOMMIT 89 | Println 90 | propsed 91 | proto 92 | protobuf 93 | protoc 94 | protocmp 95 | protos 96 | protostream 97 | ptypes 98 | QC's 99 | qerr 100 | qspec 101 | Reiter 102 | relab 103 | Rostem 104 | roundrobin 105 | secp 106 | sigs 107 | silentproposer 108 | simplehotstuff 109 | Sonnino 110 | SSWU 111 | structs 112 | subconfiguration 113 | synctest 114 | testutil 115 | textfmt 116 | throughputvslatency 117 | timestamppb 118 | TMPDIR 119 | tmpl 120 | Tormod 121 | treeleader 122 | unexported 123 | unittests 124 | unmarshals 125 | unregisters 126 | viewduration 127 | votingmachine 128 | Warnf 129 | wcfg 130 | Welford 131 | Welford's 132 | wonderproxy 133 | wrfs 134 | xlabel 135 | xyer 136 | ylabel 137 | -------------------------------------------------------------------------------- /metrics/plotting/starttimes.go: -------------------------------------------------------------------------------- 1 | package plotting 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/relab/hotstuff/metrics/types" 7 | ) 8 | 9 | // StartTimes collects the start times for each client or replica. 10 | type StartTimes struct { 11 | clients map[uint32]time.Time 12 | replicas map[uint32]time.Time 13 | } 14 | 15 | // NewStartTimes returns a new StartTimes instance. 16 | func NewStartTimes() StartTimes { 17 | return StartTimes{ 18 | clients: make(map[uint32]time.Time), 19 | replicas: make(map[uint32]time.Time), 20 | } 21 | } 22 | 23 | // Add adds an event. 24 | func (s *StartTimes) Add(msg any) { 25 | startTime, ok := msg.(*types.StartEvent) 26 | if !ok { 27 | return 28 | } 29 | 30 | if startTime.GetEvent().GetClient() { 31 | s.clients[startTime.GetEvent().GetID()] = startTime.GetEvent().GetTimestamp().AsTime() 32 | } else { 33 | s.replicas[startTime.GetEvent().GetID()] = startTime.GetEvent().GetTimestamp().AsTime() 34 | } 35 | } 36 | 37 | // Client returns the start time of the client with the specified id. 38 | func (s *StartTimes) Client(id uint32) (t time.Time, ok bool) { 39 | t, ok = s.clients[id] 40 | return 41 | } 42 | 43 | // ClientOffset returns the time offset from the client's start time. 44 | func (s *StartTimes) ClientOffset(id uint32, t time.Time) (offset time.Duration, ok bool) { 45 | startTime, ok := s.clients[id] 46 | if !ok { 47 | return 0, false 48 | } 49 | return t.Sub(startTime), true 50 | } 51 | 52 | // Replica returns the start time of the replica with the specified id. 53 | func (s *StartTimes) Replica(id uint32) (t time.Time, ok bool) { 54 | t, ok = s.replicas[id] 55 | return 56 | } 57 | 58 | // ReplicaOffset returns the time offset from the replica's start time. 59 | func (s *StartTimes) ReplicaOffset(id uint32, t time.Time) (offset time.Duration, ok bool) { 60 | startTime, ok := s.replicas[id] 61 | if !ok { 62 | return 0, false 63 | } 64 | return t.Sub(startTime), true 65 | } 66 | -------------------------------------------------------------------------------- /internal/config/schema.cue: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "list" 4 | 5 | config: { 6 | // List of replica/client hosts (non-empty) 7 | replicaHosts: [_, ...string] 8 | clientHosts: [_, ...string] 9 | 10 | // Number of replicas/clients; must be greater than 0 11 | replicas: int & >0 12 | clients: int & >0 13 | 14 | // `locations` must have exactly `replicas` entries. 15 | _exact: list.MinItems(replicas) & list.MaxItems(replicas) 16 | // `treePositions` must have exactly `replicas` entries and be unique. 17 | _exactAndUnique: _exact & list.UniqueItems() 18 | 19 | // List of integers representing positions in a tree (optional). 20 | // Root, left child, right child, left child of left child, etc. 21 | treePositions?: [...int & >=1 & <=replicas] & _exactAndUnique 22 | 23 | if treePositions == _|_ { 24 | // List of locations; optional when treePositions is not provided. 25 | locations?: [...string] & _exact 26 | } 27 | if treePositions != _|_ { 28 | // List of locations; required when treePositions is provided. 29 | locations!: [...string] & _exact 30 | // Branching factor of the tree; must be greater than 1 and at most half the number of replicas. 31 | branchFactor!: int & >1 & <=div(replicas, 2) 32 | } 33 | 34 | // Consensus algorithm to use. (Default: "chainedhotstuff") 35 | consensus: *"chainedhotstuff" | "simplehotstuff" | "fasthotstuff" 36 | // Leader rotation strategy to use. (Default: "round-robin") 37 | leaderRotation: *"round-robin" | "fixed" | "carousel" | "reputation" 38 | // Cryptographic algorithm to use. (Default: "ecdsa") 39 | crypto: *"ecdsa" | "eddsa" | "bls12" 40 | // Communication protocol to use. (Default: "clique") 41 | communication: *"clique" | "kauri" 42 | 43 | // Byzantine strategy for different replicas (optional). 44 | byzantineStrategy?: { 45 | silentproposer?: [...int & >=1 & <=replicas] 46 | fork?: [...int & >=1 & <=replicas] 47 | increaseview?: [...int & >=1 & <=replicas] 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /protocol/synchronizer/timeout_collector.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import ( 4 | "slices" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core" 8 | ) 9 | 10 | type timeoutCollector struct { 11 | config *core.RuntimeConfig 12 | timeouts []hotstuff.TimeoutMsg 13 | } 14 | 15 | func newTimeoutCollector(config *core.RuntimeConfig) *timeoutCollector { 16 | return &timeoutCollector{ 17 | config: config, 18 | } 19 | } 20 | 21 | // add returns true if a quorum of timeouts has been collected for the view of given timeout message. 22 | func (s *timeoutCollector) add(timeout hotstuff.TimeoutMsg) ([]hotstuff.TimeoutMsg, bool) { 23 | // needs to be done later since the config's quorum size is set after this one's init 24 | if s.timeouts == nil { 25 | s.timeouts = make([]hotstuff.TimeoutMsg, 0, 2*s.config.QuorumSize()) 26 | } 27 | // ignore this timeout if we already have a timeout from this replica in this view 28 | if slices.ContainsFunc(s.timeouts, func(t hotstuff.TimeoutMsg) bool { 29 | return t.View == timeout.View && t.ID == timeout.ID 30 | }) { 31 | return nil, false 32 | } 33 | s.timeouts = append(s.timeouts, timeout) 34 | if len(s.timeouts) < s.config.QuorumSize() { 35 | return nil, false 36 | } 37 | timeoutList := slices.Clone(s.timeouts) 38 | // remove timeouts for this view from the slice, since we now have a quorum 39 | // and we don't need to keep them around anymore. 40 | s.timeouts = slices.DeleteFunc(s.timeouts, func(t hotstuff.TimeoutMsg) bool { return t.View == timeout.View }) 41 | return timeoutList, true 42 | } 43 | 44 | // deleteOldViews removes all timeouts with a view lower than the current view. 45 | // This is used to clean up timeouts that are no longer relevant, as they are from 46 | // an already processed view. 47 | func (s *timeoutCollector) deleteOldViews(currentView hotstuff.View) { 48 | s.timeouts = slices.DeleteFunc(s.timeouts, func(t hotstuff.TimeoutMsg) bool { return t.View < currentView }) 49 | } 50 | -------------------------------------------------------------------------------- /internal/test/name.go: -------------------------------------------------------------------------------- 1 | // Package test provides utilities for generating test names based on fields and values. 2 | package test 3 | 4 | import ( 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | // Name returns the test or benchmark name based on the provided fields and values. 10 | // If the first argument is a string and there is an odd number of arguments, 11 | // it is treated as a prefix. Otherwise, all arguments are treated as field-value pairs. 12 | // Example usages: 13 | // 14 | // Name("MyPrefix", "field1", value1, "field2", value2) 15 | // Name("field1", value1, "field2", value2) 16 | func Name(fieldValuePairs ...any) string { 17 | sep, prefix := "", "" 18 | if len(fieldValuePairs)%2 != 0 { 19 | if p, ok := fieldValuePairs[0].(string); ok { 20 | sep, prefix = "/", p 21 | fieldValuePairs = fieldValuePairs[1:] 22 | } else { 23 | panic("first argument must be a string when there is an odd number of arguments") 24 | } 25 | } 26 | 27 | b := strings.Builder{} 28 | b.WriteString(prefix) 29 | for i := 0; i < len(fieldValuePairs); i += 2 { 30 | field := fieldValuePairs[i] 31 | v := fieldValuePairs[i+1] 32 | switch value := v.(type) { 33 | case []string: 34 | if value == nil { 35 | b.WriteString(fmt.Sprintf("%s%s=", sep, field)) 36 | } else { 37 | b.WriteString(fmt.Sprintf("%s%s=%v", sep, field, value)) 38 | } 39 | case string: 40 | if value == "" { 41 | b.WriteString(fmt.Sprintf("%s%s=%q", sep, field, value)) 42 | } else { 43 | b.WriteString(fmt.Sprintf("%s%s=%s", sep, field, value)) 44 | } 45 | case uint64: 46 | b.WriteString(fmt.Sprintf("%s%s=%d", sep, field, value)) 47 | case int: 48 | b.WriteString(fmt.Sprintf("%s%s=%d", sep, field, value)) 49 | case bool: 50 | b.WriteString(fmt.Sprintf("%s%s=%t", sep, field, value)) 51 | default: 52 | b.WriteString(fmt.Sprintf("%s%s=%v", sep, field, v)) 53 | } 54 | // ensure separator is set to / after the first field 55 | sep = "/" 56 | } 57 | return b.String() 58 | } 59 | -------------------------------------------------------------------------------- /protocol/comm/clique.go: -------------------------------------------------------------------------------- 1 | package comm 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core" 6 | "github.com/relab/hotstuff/protocol/leaderrotation" 7 | "github.com/relab/hotstuff/protocol/votingmachine" 8 | ) 9 | 10 | const NameClique = "clique" 11 | 12 | // Clique implements one-to-all dissemination and all-to-one aggregation. 13 | type Clique struct { 14 | config *core.RuntimeConfig 15 | votingMachine *votingmachine.VotingMachine 16 | leaderRotation leaderrotation.LeaderRotation 17 | sender core.Sender 18 | } 19 | 20 | // NewClique creates a new Clique instance for communicating proposals and votes. 21 | func NewClique( 22 | config *core.RuntimeConfig, 23 | votingMachine *votingmachine.VotingMachine, 24 | leaderRotation leaderrotation.LeaderRotation, 25 | sender core.Sender, 26 | ) *Clique { 27 | return &Clique{ 28 | config: config, 29 | votingMachine: votingMachine, 30 | leaderRotation: leaderRotation, 31 | sender: sender, 32 | } 33 | } 34 | 35 | // Disseminate broadcasts the proposal and aggregates my vote for this proposals. 36 | func (hs *Clique) Disseminate(proposal *hotstuff.ProposeMsg, pc hotstuff.PartialCert) error { 37 | hs.sender.Propose(proposal) 38 | return hs.Aggregate(proposal, pc) 39 | } 40 | 41 | // Aggregate sends the vote or stores it if the replica is leader in the next view. 42 | func (hs *Clique) Aggregate(proposal *hotstuff.ProposeMsg, pc hotstuff.PartialCert) error { 43 | nextView := proposal.Block.View() + 1 44 | leaderID := hs.leaderRotation.GetLeader(nextView) 45 | if leaderID == hs.config.ID() { 46 | // if I am the leader in the next view, collect the vote for myself beforehand. 47 | hs.votingMachine.CollectVote(hotstuff.VoteMsg{ 48 | ID: hs.config.ID(), 49 | PartialCert: pc, 50 | }) 51 | return nil 52 | } 53 | // if I am the one voting, send the vote to next leader over the wire. 54 | return hs.sender.Vote(leaderID, pc) 55 | } 56 | 57 | var ( 58 | _ Aggregator = (*Clique)(nil) 59 | _ Disseminator = (*Clique)(nil) 60 | ) 61 | -------------------------------------------------------------------------------- /wiring/consensus.go: -------------------------------------------------------------------------------- 1 | package wiring 2 | 3 | import ( 4 | "github.com/relab/hotstuff/core" 5 | "github.com/relab/hotstuff/core/eventloop" 6 | "github.com/relab/hotstuff/core/logging" 7 | "github.com/relab/hotstuff/internal/proto/clientpb" 8 | "github.com/relab/hotstuff/protocol" 9 | "github.com/relab/hotstuff/protocol/comm" 10 | "github.com/relab/hotstuff/protocol/consensus" 11 | "github.com/relab/hotstuff/protocol/leaderrotation" 12 | "github.com/relab/hotstuff/security/blockchain" 13 | "github.com/relab/hotstuff/security/cert" 14 | ) 15 | 16 | type Consensus struct { 17 | voter *consensus.Voter 18 | proposer *consensus.Proposer 19 | committer *consensus.Committer 20 | } 21 | 22 | func NewConsensus( 23 | eventLoop *eventloop.EventLoop, 24 | logger logging.Logger, 25 | config *core.RuntimeConfig, 26 | blockchain *blockchain.Blockchain, 27 | auth *cert.Authority, 28 | commandCache *clientpb.CommandCache, 29 | consensusRules consensus.Ruleset, 30 | leaderRotation leaderrotation.LeaderRotation, 31 | viewStates *protocol.ViewStates, 32 | comm comm.Communication, 33 | ) *Consensus { 34 | committer := consensus.NewCommitter( 35 | eventLoop, 36 | logger, 37 | blockchain, 38 | viewStates, 39 | consensusRules, 40 | ) 41 | voter := consensus.NewVoter( 42 | config, 43 | leaderRotation, 44 | consensusRules, 45 | comm, 46 | auth, 47 | committer, 48 | ) 49 | return &Consensus{ 50 | committer: committer, 51 | voter: voter, 52 | proposer: consensus.NewProposer( 53 | eventLoop, 54 | config, 55 | blockchain, 56 | viewStates, 57 | consensusRules, 58 | comm, 59 | voter, 60 | commandCache, 61 | committer, 62 | ), 63 | } 64 | } 65 | 66 | // Proposer returns the proposer instance. 67 | func (p *Consensus) Proposer() *consensus.Proposer { 68 | return p.proposer 69 | } 70 | 71 | // Voter returns the voter instance. 72 | func (p *Consensus) Voter() *consensus.Voter { 73 | return p.voter 74 | } 75 | 76 | // Committer returns the committer instance. 77 | func (p *Consensus) Committer() *consensus.Committer { 78 | return p.committer 79 | } 80 | -------------------------------------------------------------------------------- /core/eventloop/context_test.go: -------------------------------------------------------------------------------- 1 | package eventloop_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/core/eventloop" 9 | "github.com/relab/hotstuff/core/logging" 10 | ) 11 | 12 | // TestTimeoutContext tests that a timeout context is canceled after receiving a timeout event. 13 | func TestTimeoutContext(t *testing.T) { 14 | logger := logging.New("test") 15 | eventloop := eventloop.New(logger, 10) 16 | ctx, cancel := eventloop.TimeoutContext() 17 | defer cancel() 18 | 19 | eventloop.AddEvent(hotstuff.TimeoutEvent{}) 20 | 21 | if ctx.Err() != context.Canceled { 22 | t.Error("Context not canceled") 23 | } 24 | } 25 | 26 | // TestTimeoutContextView tests that a timeout context is canceled after receiving a view change event. 27 | func TestTimeoutContextView(t *testing.T) { 28 | logger := logging.New("test") 29 | eventloop := eventloop.New(logger, 10) 30 | ctx, cancel := eventloop.TimeoutContext() 31 | defer cancel() 32 | 33 | eventloop.AddEvent(hotstuff.ViewChangeEvent{View: 1}) 34 | 35 | if ctx.Err() != context.Canceled { 36 | t.Error("Context not canceled") 37 | } 38 | } 39 | 40 | // TestViewContext tests that a view context is canceled after receiving a view change event. 41 | func TestViewContext(t *testing.T) { 42 | logger := logging.New("test") 43 | eventloop := eventloop.New(logger, 10) 44 | ctx, cancel := eventloop.ViewContext(nil) 45 | defer cancel() 46 | 47 | eventloop.AddEvent(hotstuff.ViewChangeEvent{View: 1}) 48 | 49 | if ctx.Err() != context.Canceled { 50 | t.Error("Context not canceled") 51 | } 52 | } 53 | 54 | // TestViewContextEarlierView tests that a view context is not canceled when receiving a view change event for an earlier view. 55 | func TestViewContextEarlierView(t *testing.T) { 56 | logger := logging.New("test") 57 | eventloop := eventloop.New(logger, 10) 58 | v := hotstuff.View(1) 59 | ctx, cancel := eventloop.ViewContext(&v) 60 | defer cancel() 61 | 62 | eventloop.AddEvent(hotstuff.ViewChangeEvent{View: 0}) 63 | 64 | if ctx.Err() != nil { 65 | t.Error("Context canceled") 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /cmd/plot/main.go: -------------------------------------------------------------------------------- 1 | // Plot is a tool for plotting measurements from a HotStuff experiment. 2 | package main 3 | 4 | import ( 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "time" 10 | 11 | "github.com/relab/hotstuff/metrics/plotting" 12 | 13 | // ensure proto messages are registered so that they can be decoded from measurement.json files 14 | _ "github.com/relab/hotstuff/internal/proto/orchestrationpb" 15 | _ "github.com/relab/hotstuff/metrics/types" 16 | ) 17 | 18 | var ( 19 | interval = flag.Duration("interval", time.Second, "Length of time interval to group measurements by.") 20 | latency = flag.String("latency", "", "File to save latency plot to.") 21 | throughput = flag.String("throughput", "", "File to save throughput plot to.") 22 | throughputVSLatency = flag.String("throughputvslatency", "", "File to save throughput vs latency plot to.") 23 | ) 24 | 25 | func main() { 26 | flag.Usage = func() { 27 | fmt.Fprintf(os.Stderr, "usage: %s [flags] [path to measurements.json]\n", os.Args[0]) 28 | flag.PrintDefaults() 29 | } 30 | flag.Parse() 31 | 32 | srcPath := flag.Arg(0) 33 | if srcPath == "" { 34 | flag.Usage() 35 | os.Exit(1) 36 | } 37 | 38 | file, err := os.Open(srcPath) 39 | if err != nil { 40 | log.Fatalln(err) 41 | } 42 | 43 | latencyPlot := plotting.NewClientLatencyPlot() 44 | throughputPlot := plotting.NewThroughputPlot() 45 | throughputVSLatencyPlot := plotting.NewThroughputVSLatencyPlot() 46 | 47 | reader := plotting.NewReader(file, &latencyPlot, &throughputPlot, &throughputVSLatencyPlot) 48 | if err := reader.ReadAll(); err != nil { 49 | log.Fatalln(err) 50 | } 51 | 52 | if *latency != "" { 53 | if err := latencyPlot.PlotAverage(*latency, *interval); err != nil { 54 | log.Fatalln(err) 55 | } 56 | } 57 | 58 | if *throughput != "" { 59 | if err := throughputPlot.PlotAverage(*throughput, *interval); err != nil { 60 | log.Fatalln(err) 61 | } 62 | } 63 | 64 | if *throughputVSLatency != "" { 65 | if err := throughputVSLatencyPlot.PlotAverage(*throughputVSLatency, *interval); err != nil { 66 | log.Fatalln(err) 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /metrics/registry.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/client" 9 | "github.com/relab/hotstuff/core/eventloop" 10 | "github.com/relab/hotstuff/core/logging" 11 | ) 12 | 13 | // Enable enables logging of the specified client and replica metrics. 14 | // If id is a client.ID, client metrics will be enabled. 15 | // If id is a hotstuff.ID, replica metrics will be enabled. 16 | // The measurementInterval specifies how often measurements are logged. 17 | // Valid metric names are defined as constants in their respective metric files. 18 | func Enable[T client.ID | hotstuff.ID]( 19 | eventLoop *eventloop.EventLoop, 20 | logger logging.Logger, 21 | metricsLogger Logger, 22 | id T, 23 | measurementInterval time.Duration, 24 | metricNames ...string, 25 | ) error { 26 | if len(metricNames) == 0 { 27 | return fmt.Errorf("no metric names provided") 28 | } 29 | enabledMetrics := []string{} 30 | for _, name := range metricNames { 31 | switch name { 32 | case NameClientLatency: 33 | if clientID, ok := any(id).(client.ID); ok { 34 | enableClientLatency(eventLoop, metricsLogger, clientID) 35 | enabledMetrics = append(enabledMetrics, NameClientLatency) 36 | } 37 | case NameViewTimeouts: 38 | if replicaID, ok := any(id).(hotstuff.ID); ok { 39 | enableViewTimeouts(eventLoop, metricsLogger, replicaID) 40 | enabledMetrics = append(enabledMetrics, NameViewTimeouts) 41 | } 42 | case NameThroughput: 43 | if replicaID, ok := any(id).(hotstuff.ID); ok { 44 | enableThroughput(eventLoop, metricsLogger, replicaID) 45 | enabledMetrics = append(enabledMetrics, NameThroughput) 46 | } 47 | case NameConsensusLatency: 48 | if replicaID, ok := any(id).(hotstuff.ID); ok { 49 | enableConsensusLatency(eventLoop, metricsLogger, replicaID) 50 | enabledMetrics = append(enabledMetrics, NameConsensusLatency) 51 | } 52 | default: 53 | return fmt.Errorf("invalid metric: %s", name) 54 | } 55 | } 56 | logger.Infof("Metrics enabled: %v", enabledMetrics) 57 | addTicker(eventLoop, measurementInterval) 58 | return nil 59 | } 60 | -------------------------------------------------------------------------------- /metrics/plotting/reader.go: -------------------------------------------------------------------------------- 1 | package plotting 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | 8 | "google.golang.org/protobuf/encoding/protojson" 9 | "google.golang.org/protobuf/types/known/anypb" 10 | ) 11 | 12 | // Plotter processes measurements from a reader. 13 | type Plotter interface { 14 | // Adds a measurement to the plotter. 15 | Add(any) 16 | } 17 | 18 | // Reader reads measurements from JSON. 19 | type Reader struct { 20 | plotters []Plotter 21 | rd io.Reader 22 | } 23 | 24 | // NewReader returns a new reader that reads from the specified source and adds measurements to the plotters. 25 | func NewReader(rd io.Reader, plotters ...Plotter) *Reader { 26 | return &Reader{ 27 | plotters: plotters, 28 | rd: rd, 29 | } 30 | } 31 | 32 | // ReadAll reads all measurements in the source. 33 | func (r *Reader) ReadAll() error { 34 | decoder := json.NewDecoder(r.rd) 35 | 36 | t, err := decoder.Token() 37 | if err != nil { 38 | return fmt.Errorf("failed to read first JSON token: %w", err) 39 | } 40 | if d, ok := t.(json.Delim); !ok || d != '[' { 41 | return fmt.Errorf("expected first JSON token to be the start of an array") 42 | } 43 | 44 | for decoder.More() { 45 | var b json.RawMessage 46 | err = decoder.Decode(&b) 47 | if err != nil { 48 | return err 49 | } 50 | err = r.read(b) 51 | if err != nil { 52 | return err 53 | } 54 | } 55 | 56 | t, err = decoder.Token() 57 | if err != nil { 58 | return fmt.Errorf("failed to read last JSON token: %w", err) 59 | } 60 | if d, ok := t.(json.Delim); !ok || d != ']' { 61 | return fmt.Errorf("expected last JSON token to be the end of an array") 62 | } 63 | 64 | return nil 65 | } 66 | 67 | func (r *Reader) read(b []byte) error { 68 | anyMsg := &anypb.Any{} 69 | err := protojson.Unmarshal(b, anyMsg) 70 | if err != nil { 71 | return fmt.Errorf("failed to unmarshal JSON message: %w", err) 72 | } 73 | 74 | msg, err := anyMsg.UnmarshalNew() 75 | if err != nil { 76 | return fmt.Errorf("failed to unmarshal Any message: %w", err) 77 | } 78 | 79 | for _, p := range r.plotters { 80 | p.Add(msg) 81 | } 82 | 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /twins/generator_test.go: -------------------------------------------------------------------------------- 1 | package twins 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | "time" 7 | 8 | "github.com/relab/hotstuff/core/logging" 9 | ) 10 | 11 | func TestPartitionsGenerator(t *testing.T) { 12 | partitions := genPartitionScenarios([]NodeID{{1, 1}, {1, 2}}, []NodeID{{2, 3}, {3, 4}, {4, 5}}, 3, 1) 13 | for p := range partitions { 14 | t.Log(partitions[p]) 15 | } 16 | } 17 | 18 | func TestGenerator(t *testing.T) { 19 | g := NewGenerator(logging.New(""), Settings{ 20 | NumNodes: 4, 21 | NumTwins: 1, 22 | Partitions: 3, 23 | Views: 8, 24 | }) 25 | g.Shuffle(time.Now().Unix()) 26 | t.Log(g.NextScenario()) 27 | } 28 | 29 | func TestPartitionSizes(t *testing.T) { 30 | want := [][]uint8{ 31 | {6, 0, 0, 0}, 32 | {5, 1, 0, 0}, 33 | {4, 2, 0, 0}, 34 | {4, 1, 1, 0}, 35 | {3, 3, 0, 0}, 36 | {3, 2, 1, 0}, 37 | {3, 1, 1, 1}, 38 | {2, 2, 2, 0}, 39 | {2, 2, 1, 1}, 40 | } 41 | got := genPartitionSizes(6, 4, 1) 42 | 43 | if !reflect.DeepEqual(got, want) { 44 | for i := range got { 45 | t.Log(got[i]) 46 | } 47 | t.Error("did not get the expected result") 48 | } 49 | } 50 | 51 | func TestAssignNodeIDs(t *testing.T) { 52 | tests := []struct { 53 | name string 54 | numNodes uint8 55 | numTwins uint8 56 | wantNodes []NodeID 57 | wantTwins []NodeID 58 | }{ 59 | {"no twins", 3, 0, []NodeID{{1, 0}, {2, 0}, {3, 0}}, nil}, 60 | {"one twin pair", 2, 1, []NodeID{{2, 0}}, []NodeID{{1, 1}, {1, 2}}}, 61 | {"multiple twin pairs", 4, 2, []NodeID{{3, 0}, {4, 0}}, []NodeID{{1, 1}, {1, 2}, {2, 1}, {2, 2}}}, 62 | {"all twins", 2, 2, nil, []NodeID{{1, 1}, {1, 2}, {2, 1}, {2, 2}}}, 63 | {"edge case: zero nodes", 0, 0, nil, nil}, 64 | } 65 | 66 | for _, tt := range tests { 67 | t.Run(tt.name, func(t *testing.T) { 68 | gotNodes, gotTwins := assignNodeIDs(tt.numNodes, tt.numTwins) 69 | 70 | if !reflect.DeepEqual(gotNodes, tt.wantNodes) { 71 | t.Errorf("assignNodeIDs() gotNodes = %v, want %v", gotNodes, tt.wantNodes) 72 | } 73 | 74 | if !reflect.DeepEqual(gotTwins, tt.wantTwins) { 75 | t.Errorf("assignNodeIDs() gotTwins = %v, want %v", gotTwins, tt.wantTwins) 76 | } 77 | }) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /metrics/plotting/clientlatency.go: -------------------------------------------------------------------------------- 1 | package plotting 2 | 3 | import ( 4 | "fmt" 5 | "path" 6 | "time" 7 | 8 | "github.com/relab/hotstuff/metrics/types" 9 | "gonum.org/v1/plot" 10 | "gonum.org/v1/plot/plotter" 11 | "gonum.org/v1/plot/plotutil" 12 | ) 13 | 14 | // ClientLatencyPlot plots client latency measurements. 15 | type ClientLatencyPlot struct { 16 | startTimes StartTimes 17 | measurements MeasurementMap 18 | } 19 | 20 | // NewClientLatencyPlot returns a new client latency plotter. 21 | func NewClientLatencyPlot() ClientLatencyPlot { 22 | return ClientLatencyPlot{ 23 | startTimes: NewStartTimes(), 24 | measurements: NewMeasurementMap(), 25 | } 26 | } 27 | 28 | // Add adds a measurement to the plot. 29 | func (p *ClientLatencyPlot) Add(measurement any) { 30 | p.startTimes.Add(measurement) 31 | 32 | latency, ok := measurement.(*types.LatencyMeasurement) 33 | if !ok { 34 | return 35 | } 36 | 37 | // only care about client's latency 38 | if !latency.GetEvent().GetClient() { 39 | return 40 | } 41 | id := latency.GetEvent().GetID() 42 | p.measurements.Add(id, latency) 43 | } 44 | 45 | // PlotAverage plots the average latency of all clients within each measurement interval. 46 | func (p *ClientLatencyPlot) PlotAverage(filename string, measurementInterval time.Duration) (err error) { 47 | const ( 48 | xlabel = "Time (seconds)" 49 | ylabel = "Latency (ms)" 50 | ) 51 | if path.Ext(filename) == ".csv" { 52 | return CSVPlot(filename, []string{xlabel, ylabel}, func() plotter.XYer { 53 | return avgLatency(p, measurementInterval) 54 | }) 55 | } 56 | return GonumPlot(filename, xlabel, ylabel, func(plt *plot.Plot) error { 57 | // TODO: error bars 58 | if err := plotutil.AddLinePoints(plt, avgLatency(p, measurementInterval)); err != nil { 59 | return fmt.Errorf("failed to add line plot: %w", err) 60 | } 61 | return nil 62 | }) 63 | } 64 | 65 | func avgLatency(p *ClientLatencyPlot, interval time.Duration) plotter.XYer { 66 | intervals := GroupByTimeInterval(&p.startTimes, p.measurements, interval) 67 | return TimeAndAverage(intervals, func(m Measurement) (float64, uint64) { 68 | latency := m.(*types.LatencyMeasurement) 69 | return latency.GetLatency(), latency.GetCount() 70 | }) 71 | } 72 | -------------------------------------------------------------------------------- /metrics/plotting/throughput.go: -------------------------------------------------------------------------------- 1 | package plotting 2 | 3 | import ( 4 | "fmt" 5 | "path" 6 | "time" 7 | 8 | "github.com/relab/hotstuff/metrics/types" 9 | "gonum.org/v1/plot" 10 | "gonum.org/v1/plot/plotter" 11 | "gonum.org/v1/plot/plotutil" 12 | ) 13 | 14 | // ThroughputPlot is a plotter that plots throughput vs time. 15 | type ThroughputPlot struct { 16 | startTimes StartTimes 17 | measurements MeasurementMap 18 | } 19 | 20 | // NewThroughputPlot returns a new throughput plotter. 21 | func NewThroughputPlot() ThroughputPlot { 22 | return ThroughputPlot{ 23 | startTimes: NewStartTimes(), 24 | measurements: NewMeasurementMap(), 25 | } 26 | } 27 | 28 | // Add adds a measurement to the plotter. 29 | func (p *ThroughputPlot) Add(measurement any) { 30 | p.startTimes.Add(measurement) 31 | 32 | throughput, ok := measurement.(*types.ThroughputMeasurement) 33 | if !ok { 34 | return 35 | } 36 | 37 | if throughput.GetEvent().GetClient() { 38 | // ignoring client events 39 | return 40 | } 41 | 42 | id := throughput.GetEvent().GetID() 43 | p.measurements.Add(id, throughput) 44 | } 45 | 46 | // PlotAverage plots the average throughput of all replicas at specified time intervals. 47 | func (p *ThroughputPlot) PlotAverage(filename string, measurementInterval time.Duration) (err error) { 48 | const ( 49 | xlabel = "Time (seconds)" 50 | ylabel = "Throughput (commands/second)" 51 | ) 52 | if path.Ext(filename) == ".csv" { 53 | return CSVPlot(filename, []string{xlabel, ylabel}, func() plotter.XYer { 54 | return avgThroughput(p, measurementInterval) 55 | }) 56 | } 57 | return GonumPlot(filename, xlabel, ylabel, func(plt *plot.Plot) error { 58 | if err := plotutil.AddLinePoints(plt, avgThroughput(p, measurementInterval)); err != nil { 59 | return fmt.Errorf("failed to add line plot: %w", err) 60 | } 61 | return nil 62 | }) 63 | } 64 | 65 | func avgThroughput(p *ThroughputPlot, interval time.Duration) plotter.XYer { 66 | intervals := GroupByTimeInterval(&p.startTimes, p.measurements, interval) 67 | return TimeAndAverage(intervals, func(m Measurement) (float64, uint64) { 68 | tp := m.(*types.ThroughputMeasurement) 69 | return float64(tp.GetCommands()) / tp.GetDuration().AsDuration().Seconds(), 1 70 | }) 71 | } 72 | -------------------------------------------------------------------------------- /make.ps1: -------------------------------------------------------------------------------- 1 | function Build-Binary { 2 | param ( 3 | $Package, 4 | $Output 5 | ) 6 | go build -o $Output $Package 7 | } 8 | 9 | function Get-ProtoInclude { 10 | $path = go list -m -f '{{.Dir}}' 'github.com/relab/gorums' 11 | return $path 12 | } 13 | 14 | function Build-ProtoFile { 15 | [CmdLetBinding()] 16 | param ( 17 | $SrcFile, 18 | $GoOut, 19 | $GorumsOut 20 | ) 21 | 22 | if ($null -eq $SrcFile) { 23 | return 24 | } 25 | 26 | $protoInclude = Get-ProtoInclude 27 | 28 | if ($null -ne $GoOut -And -Not (Compare-FileWriteTime $GoOut $SrcFile)) { 29 | protoc -I. -Iinternal/proto -I"$protoInclude" --go_out=paths=source_relative:. "$SrcFile" 30 | } 31 | 32 | if ($null -ne $GorumsOut -And -Not (Compare-FileWriteTime $GorumsOut $SrcFile)) { 33 | protoc -I. -Iinternal/proto -I"$protoInclude" --gorums_out=paths=source_relative:. "$SrcFile" 34 | } 35 | } 36 | 37 | 38 | function Compare-FileWriteTime { 39 | param ( 40 | $First, 41 | $Second 42 | ) 43 | 44 | $firstDate = (Get-ItemProperty -Path $First -Name LastWriteTime).LastWriteTime 45 | $secondDate = (Get-ItemProperty -Path $Second -Name LastWriteTime).LastWriteTime 46 | 47 | if ($null -eq $firstDate) { 48 | return $false 49 | } 50 | 51 | if ([datetime]$firstDate -ge [datetime]$secondDate) { 52 | return $true 53 | } 54 | 55 | return $false 56 | } 57 | 58 | go mod download 59 | go install "github.com/relab/gorums/cmd/protoc-gen-gorums" 60 | go install "google.golang.org/protobuf/cmd/protoc-gen-go" 61 | 62 | Build-ProtoFile ` 63 | -SrcFile internal/proto/clientpb/client.proto ` 64 | -GoOut internal/proto/clientpb/client.pb.go ` 65 | -GorumsOut internal/proto/clientpb/client_gorums.pb.go 66 | 67 | Build-ProtoFile ` 68 | -SrcFile internal/proto/hotstuffpb/hotstuff.proto ` 69 | -GoOut internal/proto/hotstuffpb/hotstuff.pb.go ` 70 | -GorumsOut internal/proto/hotstuffpb/hotstuff_gorums.pb.go 71 | 72 | Build-ProtoFile ` 73 | -SrcFile internal/proto/orchestrationpb/orchestration.proto ` 74 | -GoOut internal/proto/orchestrationpb/orchestration.pb.go 75 | 76 | Build-ProtoFile ` 77 | -SrcFile metrics/types/types.proto ` 78 | -GoOut metrics/types/types.pb.go 79 | 80 | Build-Binary ./cmd/hotstuff ./hotstuff.exe 81 | Build-Binary ./cmd/plot ./plot.exe 82 | -------------------------------------------------------------------------------- /protocol/viewstates_test.go: -------------------------------------------------------------------------------- 1 | package protocol_test 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/internal/proto/clientpb" 9 | "github.com/relab/hotstuff/internal/testutil" 10 | "github.com/relab/hotstuff/protocol" 11 | "github.com/relab/hotstuff/security/crypto" 12 | ) 13 | 14 | func TestNextView(t *testing.T) { 15 | essentials := testutil.WireUpEssentials(t, 1, crypto.NameECDSA) 16 | states, err := protocol.NewViewStates(essentials.Blockchain(), essentials.Authority()) 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | newView := states.NextView() 21 | if newView != hotstuff.View(2) { 22 | t.Fail() 23 | } 24 | } 25 | 26 | func TestUpdateCerts(t *testing.T) { 27 | set := testutil.NewEssentialsSet(t, 4, crypto.NameECDSA) 28 | subject := set[0] 29 | states, err := protocol.NewViewStates(subject.Blockchain(), subject.Authority()) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | block := hotstuff.NewBlock( 34 | hotstuff.GetGenesis().Hash(), 35 | hotstuff.GetGenesis().QuorumCert(), 36 | &clientpb.Batch{}, 37 | 1, 38 | 1, 39 | ) 40 | subject.Blockchain().Store(block) 41 | signers := set.Signers() 42 | 43 | // need only 3 for a quorum 44 | qc := testutil.CreateQC(t, block, signers...) 45 | 46 | updated, err := states.UpdateHighQC(qc) 47 | if err != nil { 48 | t.Fatal(err) 49 | } 50 | if !updated { 51 | t.Fatal("UpdateHighQC should have returned true") 52 | } 53 | if !bytes.Equal(qc.ToBytes(), states.HighQC().ToBytes()) { 54 | t.Fatal("quorum cert was not updated") 55 | } 56 | 57 | tc := testutil.CreateTC(t, 1, signers) 58 | states.UpdateHighTC(tc) 59 | if !bytes.Equal(tc.ToBytes(), states.HighTC().ToBytes()) { 60 | t.Fatal("timeout cert was not updated") 61 | } 62 | } 63 | 64 | func TestUpdateCommit(t *testing.T) { 65 | block := hotstuff.NewBlock( 66 | hotstuff.GetGenesis().Hash(), 67 | hotstuff.GetGenesis().QuorumCert(), 68 | &clientpb.Batch{}, 69 | 1, 70 | 1, 71 | ) 72 | essentials := testutil.WireUpEssentials(t, 1, crypto.NameECDSA) 73 | states, err := protocol.NewViewStates(essentials.Blockchain(), essentials.Authority()) 74 | if err != nil { 75 | t.Fatal(err) 76 | } 77 | states.UpdateCommittedBlock(block) 78 | if block != states.CommittedBlock() { 79 | t.Fatal("committed block was not updated") 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /security/crypto/multisignature.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "slices" 5 | 6 | "github.com/relab/hotstuff" 7 | ) 8 | 9 | // Signature is the individual component in MultiSignature 10 | type Signature interface { 11 | Signer() hotstuff.ID 12 | ToBytes() []byte 13 | } 14 | 15 | // Multi is a set of (partial) signatures. 16 | type Multi[T Signature] []T 17 | 18 | // NewMulti creates a new Multi from the given signatures. 19 | // The provided signatures are assumed to be sorted by signer ID. 20 | func NewMulti[T Signature](sigs ...T) Multi[T] { 21 | return Multi[T](sigs) 22 | } 23 | 24 | // NewMultiSorted creates a new Multi from the given signatures 25 | // ensuring they are sorted by signer ID. 26 | func NewMultiSorted[T Signature](sigs ...T) Multi[T] { 27 | slices.SortFunc(sigs, func(a, b T) int { return int(a.Signer()) - int(b.Signer()) }) 28 | return Multi[T](sigs) 29 | } 30 | 31 | // ToBytes returns the object as bytes. 32 | func (sig Multi[T]) ToBytes() []byte { 33 | var b []byte 34 | for _, signature := range sig { 35 | b = append(b, signature.ToBytes()...) 36 | } 37 | return b 38 | } 39 | 40 | // Participants returns the IDs of replicas who participated in the threshold signature. 41 | func (sig Multi[T]) Participants() hotstuff.IDSet { 42 | return sig 43 | } 44 | 45 | // Add is not supported for Multi. 46 | func (sig Multi[T]) Add(_ hotstuff.ID) { 47 | panic("not implemented") 48 | } 49 | 50 | // Contains returns true if the set contains the ID. 51 | func (sig Multi[T]) Contains(id hotstuff.ID) bool { 52 | return slices.ContainsFunc(sig, func(s T) bool { 53 | return s.Signer() == id 54 | }) 55 | } 56 | 57 | // ForEach calls f for each ID in the set. 58 | func (sig Multi[T]) ForEach(f func(hotstuff.ID)) { 59 | for _, s := range sig { 60 | f(s.Signer()) 61 | } 62 | } 63 | 64 | // RangeWhile calls f for each ID in the set until f returns false. 65 | func (sig Multi[T]) RangeWhile(f func(hotstuff.ID) bool) { 66 | for _, s := range sig { 67 | if !f(s.Signer()) { 68 | break 69 | } 70 | } 71 | } 72 | 73 | // Len returns the number of entries in the set. 74 | func (sig Multi[T]) Len() int { 75 | return len(sig) 76 | } 77 | 78 | func (sig Multi[T]) String() string { 79 | return hotstuff.IDSetToString(sig) 80 | } 81 | 82 | var ( 83 | _ hotstuff.QuorumSignature = (*Multi[Signature])(nil) 84 | _ hotstuff.IDSet = (*Multi[Signature])(nil) 85 | ) 86 | -------------------------------------------------------------------------------- /internal/tree/treelatency.go: -------------------------------------------------------------------------------- 1 | package tree 2 | 3 | import ( 4 | "slices" 5 | "time" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/internal/latency" 9 | ) 10 | 11 | // WaitTime returns the expected time to wait for the aggregation of votes. 12 | func (t *Tree) WaitTime() time.Duration { 13 | return t.waitTime 14 | } 15 | 16 | // SetAggregationWaitTime sets the wait time for the aggregation of votes based on the 17 | // highest latency path from node id to its leaf nodes. 18 | // Only one of SetAggregationWaitTime or SetTreeHeightWaitTime should be called. 19 | func (t *Tree) SetAggregationWaitTime(lm latency.Matrix, delta time.Duration) { 20 | t.waitTime = t.aggregationTime(t.id, lm, delta) 21 | } 22 | 23 | // SetTreeHeightWaitTime sets the wait time for the aggregation of votes based on the 24 | // height of the tree. 25 | // Only one of SetAggregationWaitTime or SetTreeHeightWaitTime should be called. 26 | func (t *Tree) SetTreeHeightWaitTime(delta time.Duration) { 27 | t.waitTime = t.treeHeightTime(delta) 28 | } 29 | 30 | // aggregationTime returns the time to wait for the aggregation of votes based on the 31 | // highest latency path from node id to its leaf nodes. 32 | // The id is required because the function is recursive. 33 | // 34 | // If the node is a leaf, it returns 0 as no aggregation is required. 35 | // For other nodes, the aggregation time for a child includes: 36 | // - Round-trip time to the child 37 | // - Aggregation time required by the child node (recursive call) 38 | func (t *Tree) aggregationTime(id hotstuff.ID, lm latency.Matrix, delta time.Duration) time.Duration { 39 | children := t.ChildrenOf(id) 40 | if len(children) == 0 { 41 | return 0 // base case: leaf nodes have zero aggregation latency. 42 | } 43 | // calculate aggregation latencies for each child 44 | latencies := make([]time.Duration, len(children)) 45 | for i, child := range children { 46 | latencies[i] = 2*lm.Latency(id, child) + t.aggregationTime(child, lm, delta) 47 | } 48 | return max(latencies) + delta 49 | } 50 | 51 | // treeHeightTime returns a fixed time to wait based on the height of the tree. 52 | func (t *Tree) treeHeightTime(delta time.Duration) time.Duration { 53 | return time.Duration(2*(t.ReplicaHeight()-1)) * delta 54 | } 55 | 56 | func max(latencies []time.Duration) time.Duration { 57 | if len(latencies) == 0 { 58 | return 0 59 | } 60 | return slices.Max(latencies) 61 | } 62 | -------------------------------------------------------------------------------- /protocol/synchronizer/timeoutrule_simple.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core" 8 | "github.com/relab/hotstuff/security/cert" 9 | ) 10 | 11 | // Simple implements a simple timeout rule. 12 | type Simple struct { 13 | config *core.RuntimeConfig 14 | auth *cert.Authority 15 | } 16 | 17 | // newSimple returns a simple timeout rule instance. 18 | func newSimple( 19 | config *core.RuntimeConfig, 20 | auth *cert.Authority, 21 | ) *Simple { 22 | return &Simple{ 23 | config: config, 24 | auth: auth, 25 | } 26 | } 27 | 28 | func (s *Simple) LocalTimeoutRule(view hotstuff.View, syncInfo hotstuff.SyncInfo) (*hotstuff.TimeoutMsg, error) { 29 | sig, err := s.auth.Sign(view.ToBytes()) 30 | if err != nil { 31 | return nil, fmt.Errorf("failed to sign view %d: %w", view, err) 32 | } 33 | return &hotstuff.TimeoutMsg{ 34 | ID: s.config.ID(), 35 | View: view, 36 | SyncInfo: syncInfo, 37 | ViewSignature: sig, 38 | }, nil 39 | } 40 | 41 | func (s *Simple) RemoteTimeoutRule(_, timeoutView hotstuff.View, timeouts []hotstuff.TimeoutMsg) (hotstuff.SyncInfo, error) { 42 | tc, err := s.auth.CreateTimeoutCert(timeoutView, timeouts) 43 | if err != nil { 44 | return hotstuff.SyncInfo{}, fmt.Errorf("failed to create timeout certificate: %w", err) 45 | } 46 | return hotstuff.NewSyncInfoWith(tc), nil 47 | } 48 | 49 | func (s *Simple) VerifySyncInfo(syncInfo hotstuff.SyncInfo) (qc *hotstuff.QuorumCert, view hotstuff.View, timeout bool, err error) { 50 | if timeoutCert, haveTC := syncInfo.TC(); haveTC { 51 | if err := s.auth.VerifyTimeoutCert(timeoutCert); err != nil { 52 | return nil, 0, timeout, fmt.Errorf("failed to verify timeout certificate: %w", err) 53 | } 54 | view = timeoutCert.View() 55 | timeout = true 56 | } 57 | 58 | if quorumCert, haveQC := syncInfo.QC(); haveQC { 59 | if err := s.auth.VerifyQuorumCert(quorumCert); err != nil { 60 | return nil, 0, timeout, fmt.Errorf("failed to verify quorum certificate: %w", err) 61 | } 62 | // if there is both a TC and a QC, we use the QC if its view is greater or equal to the TC. 63 | if quorumCert.View() >= view { 64 | view = quorumCert.View() 65 | timeout = false 66 | } 67 | return &quorumCert, view, timeout, nil 68 | } 69 | return nil, view, timeout, nil // quorum certificate not present, so no high QC available 70 | } 71 | -------------------------------------------------------------------------------- /core/eventloop/queue.go: -------------------------------------------------------------------------------- 1 | package eventloop 2 | 3 | import "sync" 4 | 5 | // queue is a bounded circular buffer. 6 | // If an entry is pushed to the queue when it is full, the oldest entry will be dropped. 7 | type queue struct { 8 | mut sync.Mutex 9 | entries []any 10 | head int 11 | tail int 12 | readyChan chan struct{} 13 | } 14 | 15 | func newQueue(capacity uint) queue { 16 | if capacity == 0 { 17 | panic("capacity must be greater than 0") 18 | } 19 | 20 | return queue{ 21 | entries: make([]any, capacity), 22 | head: -1, 23 | tail: -1, 24 | readyChan: make(chan struct{}), 25 | } 26 | } 27 | 28 | // push adds an entry to the buffer in a FIFO fashion. If the queue is full, the first 29 | // entry is dropped to make space for the newest entry and returns true. 30 | func (q *queue) push(entry any) (droppedEvent any) { 31 | q.mut.Lock() 32 | defer q.mut.Unlock() 33 | 34 | pos := q.tail + 1 35 | if pos == len(q.entries) { 36 | pos = 0 37 | } 38 | if pos == q.head { 39 | // drop the entry at the head of the queue 40 | q.head++ 41 | if q.head == len(q.entries) { 42 | q.head = 0 43 | } 44 | droppedEvent = q.entries[q.head] 45 | } 46 | q.entries[pos] = entry 47 | q.tail = pos 48 | 49 | if q.head == -1 { 50 | q.head = pos 51 | } 52 | 53 | select { 54 | case q.readyChan <- struct{}{}: 55 | default: 56 | } 57 | return droppedEvent 58 | } 59 | 60 | // pop removes the first entry and returns it. 61 | // If the buffer is empty, nil and false is returned. 62 | func (q *queue) pop() (entry any, ok bool) { 63 | q.mut.Lock() 64 | defer q.mut.Unlock() 65 | 66 | if q.head == -1 { 67 | return nil, false 68 | } 69 | 70 | entry = q.entries[q.head] 71 | 72 | if q.head == q.tail { 73 | q.head = -1 74 | q.tail = -1 75 | } else { 76 | q.head++ 77 | if q.head == len(q.entries) { 78 | q.head = 0 79 | } 80 | } 81 | 82 | return entry, true 83 | } 84 | 85 | // len returns the number of entries in the buffer. 86 | func (q *queue) len() int { 87 | q.mut.Lock() 88 | defer q.mut.Unlock() 89 | 90 | if q.head == -1 { 91 | return 0 92 | } 93 | 94 | if q.head <= q.tail { 95 | return q.tail - q.head + 1 96 | } 97 | 98 | return len(q.entries) - q.head + q.tail + 1 99 | } 100 | 101 | // ready returns a channel that can block when the buffer 102 | // contains at least one item. 103 | func (q *queue) ready() <-chan struct{} { 104 | return q.readyChan 105 | } 106 | -------------------------------------------------------------------------------- /metrics/datalogger.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "sync" 7 | 8 | "github.com/relab/hotstuff/core/logging" 9 | "google.golang.org/protobuf/encoding/protojson" 10 | "google.golang.org/protobuf/proto" 11 | "google.golang.org/protobuf/types/known/anypb" 12 | ) 13 | 14 | // Logger logs data in protobuf message format. 15 | type Logger interface { 16 | Log(proto.Message) 17 | io.Closer 18 | } 19 | 20 | type jsonLogger struct { 21 | logger logging.Logger 22 | 23 | mut sync.Mutex 24 | wr io.Writer 25 | first bool 26 | } 27 | 28 | // NewJSONLogger returns a new metrics logger that logs to the specified writer. 29 | func NewJSONLogger(wr io.Writer, logger logging.Logger) (Logger, error) { 30 | _, err := io.WriteString(wr, "[\n") 31 | if err != nil { 32 | return nil, fmt.Errorf("failed to write start of JSON array: %v", err) 33 | } 34 | return &jsonLogger{logger: logger, wr: wr, first: true}, nil 35 | } 36 | 37 | func (dl *jsonLogger) Log(msg proto.Message) { 38 | var ( 39 | anyMsg *anypb.Any 40 | err error 41 | ok bool 42 | ) 43 | if anyMsg, ok = msg.(*anypb.Any); !ok { 44 | anyMsg, err = anypb.New(msg) 45 | if err != nil { 46 | dl.logger.Errorf("failed to create Any message: %v", err) 47 | return 48 | } 49 | } 50 | err = dl.write(anyMsg) 51 | if err != nil { 52 | dl.logger.Errorf("failed to write message to log: %v", err) 53 | } 54 | } 55 | 56 | func (dl *jsonLogger) write(msg proto.Message) (err error) { 57 | dl.mut.Lock() 58 | defer dl.mut.Unlock() 59 | 60 | if dl.first { 61 | dl.first = false 62 | } else { 63 | // write a comma and newline to separate the messages 64 | _, err := io.WriteString(dl.wr, ",\n") 65 | if err != nil { 66 | return err 67 | } 68 | } 69 | 70 | b, err := protojson.MarshalOptions{ 71 | Indent: "\t", 72 | EmitUnpopulated: true, 73 | }.Marshal(msg) 74 | if err != nil { 75 | return fmt.Errorf("failed to marshal message to JSON: %w", err) 76 | } 77 | _, err = dl.wr.Write(b) 78 | return err 79 | } 80 | 81 | // Close closes the metrics logger 82 | func (dl *jsonLogger) Close() error { 83 | _, err := io.WriteString(dl.wr, "\n]") 84 | return err 85 | } 86 | 87 | type nopLogger struct{} 88 | 89 | func (nopLogger) Log(proto.Message) {} 90 | func (nopLogger) Close() error { return nil } 91 | 92 | // NopLogger returns a metrics logger that discards any messages. 93 | // This is useful for testing and other situations where metrics logging is disabled. 94 | func NopLogger() Logger { 95 | return nopLogger{} 96 | } 97 | -------------------------------------------------------------------------------- /security/crypto/bitfield_test.go: -------------------------------------------------------------------------------- 1 | package crypto_test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/security/crypto" 9 | ) 10 | 11 | // TestBitfieldAdd checks that the bitfield can extend itself to fit larger IDs. 12 | func TestBitfieldAdd(t *testing.T) { 13 | testCases := []struct { 14 | id hotstuff.ID 15 | len int 16 | }{{1, 1}, {9, 2}, {17, 3}} 17 | 18 | var bm crypto.Bitfield 19 | 20 | // initial length should be 0 21 | if len(bm.Bytes()) != 0 { 22 | t.Errorf("Unexpected length: got: %v, want: %v", len(bm.Bytes()), 0) 23 | } 24 | 25 | for _, testCase := range testCases { 26 | bm.Add(testCase.id) 27 | if len(bm.Bytes()) != testCase.len { 28 | t.Errorf("Unexpected length: got: %v, want: %v", len(bm.Bytes()), testCase.len) 29 | } 30 | } 31 | } 32 | 33 | func TestBitfieldContains(t *testing.T) { 34 | random := hotstuff.ID(rand.Intn(254)) + 2 // should not be 0 or 1 35 | testCases := []hotstuff.ID{1, random, random + 1} 36 | 37 | var bm crypto.Bitfield 38 | 39 | // first check that the bitfield returns false for all testCases 40 | for _, testCase := range testCases { 41 | if bm.Contains(testCase) { 42 | t.Errorf("Wrong result for id %d: got: true, want: false", testCase) 43 | } 44 | } 45 | 46 | // add all test cases 47 | for _, testCase := range testCases { 48 | bm.Add(testCase) 49 | } 50 | 51 | // now check that the bitfield contains the test cases 52 | for _, testCase := range testCases { 53 | if !bm.Contains(testCase) { 54 | t.Errorf("Wrong result for id %d: got: false, want: true", testCase) 55 | } 56 | } 57 | } 58 | 59 | func TestBitfieldForEach(t *testing.T) { 60 | random := hotstuff.ID(rand.Intn(254)) + 2 // should not be 0 or 1 61 | testCases := []hotstuff.ID{1, random, random + 1} 62 | 63 | var bm crypto.Bitfield 64 | 65 | // first check that the bitfield is empty 66 | count := 0 67 | bm.ForEach(func(_ hotstuff.ID) { 68 | count++ 69 | }) 70 | 71 | if count != 0 { 72 | t.Error("bitfield was not empty") 73 | } 74 | 75 | // add all test cases 76 | for _, testCase := range testCases { 77 | bm.Add(testCase) 78 | } 79 | 80 | // now check that the bitfield contains the test cases 81 | var got []hotstuff.ID 82 | bm.ForEach(func(i hotstuff.ID) { 83 | got = append(got, i) 84 | }) 85 | 86 | if len(got) != len(testCases) { 87 | t.Fatal("ForEach gave the wrong number of IDs") 88 | } 89 | 90 | for i := range got { 91 | if got[i] != testCases[i] { 92 | t.Errorf("got: %d, want: %d", got[i], testCases[i]) 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /twins/json_test.go: -------------------------------------------------------------------------------- 1 | package twins_test 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/relab/hotstuff/twins" 8 | ) 9 | 10 | const jsonWant = `{ 11 | "num_nodes": 4, 12 | "num_twins": 1, 13 | "partitions": 2, 14 | "views": 7, 15 | "ticks": 100, 16 | "shuffle": false, 17 | "seed": 0, 18 | "scenarios": [ 19 | [{"leader":1,"partitions":[[{"ReplicaID":1,"TwinID":0},{"ReplicaID":2,"TwinID":0},{"ReplicaID":3,"TwinID":0}],[{"ReplicaID":4,"TwinID":0},{"ReplicaID":5,"TwinID":0}]]}] 20 | ] 21 | }` 22 | 23 | var settingsWant = twins.Settings{ 24 | NumNodes: 4, 25 | NumTwins: 1, 26 | Partitions: 2, 27 | Views: 7, 28 | Ticks: 100, 29 | Shuffle: false, 30 | Seed: 0, 31 | } 32 | 33 | var scenarioWant = twins.Scenario{ 34 | twins.View{ 35 | Leader: 1, 36 | Partitions: []twins.NodeSet{ 37 | {twins.NodeID{1, 0}: {}, twins.NodeID{2, 0}: {}, twins.NodeID{3, 0}: {}}, 38 | {twins.NodeID{4, 0}: {}, twins.NodeID{5, 0}: {}}, 39 | }, 40 | }, 41 | } 42 | 43 | func TestFromJSON(t *testing.T) { 44 | source, err := twins.FromJSON(bytes.NewReader([]byte(jsonWant))) 45 | if err != nil { 46 | t.Fatal(err) 47 | } 48 | 49 | if got := source.Settings(); got != settingsWant { 50 | t.Errorf("got: %v, want: %v", got, settingsWant) 51 | } 52 | 53 | scenario, err := source.NextScenario() 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | 58 | if len(scenario) != 1 || scenario[0].Leader != scenarioWant[0].Leader || 59 | !equalPartitions(scenario[0].Partitions, scenarioWant[0].Partitions) { 60 | 61 | t.Errorf("got: %v, want: %v", scenario, scenarioWant) 62 | } 63 | } 64 | 65 | func TestToJSON(t *testing.T) { 66 | var buf bytes.Buffer 67 | wr, err := twins.ToJSON(settingsWant, &buf) 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | err = wr.WriteScenario(scenarioWant) 72 | if err != nil { 73 | t.Fatal(err) 74 | } 75 | err = wr.Close() 76 | if err != nil { 77 | t.Fatal(err) 78 | } 79 | if got := buf.String(); got != jsonWant { 80 | t.Errorf("got: %v, want: %v", got, jsonWant) 81 | } 82 | } 83 | 84 | func equalPartitions(a, b []twins.NodeSet) bool { 85 | if len(a) != len(b) { 86 | return false 87 | } 88 | 89 | pairsFound := 0 90 | 91 | for _, pa := range a { 92 | inner: 93 | for _, pb := range b { 94 | if len(pa) != len(pb) { 95 | continue 96 | } 97 | for node := range pa { 98 | if !pb.Contains(node) { 99 | continue inner 100 | } 101 | } 102 | pairsFound++ 103 | } 104 | } 105 | 106 | return pairsFound == len(a) 107 | } 108 | -------------------------------------------------------------------------------- /core/eventloop/queue_test.go: -------------------------------------------------------------------------------- 1 | package eventloop 2 | 3 | import "testing" 4 | 5 | func TestPopEmptyQueue(t *testing.T) { 6 | q := newQueue(1) 7 | elem, ok := q.pop() 8 | if elem != nil || ok { 9 | t.Error("expected q.pop() to return nil, false") 10 | } 11 | } 12 | 13 | func TestEmptyLen(t *testing.T) { 14 | q := newQueue(1) 15 | 16 | if q.len() != 0 { 17 | t.Error("expected q.len() to return 0") 18 | } 19 | } 20 | 21 | func TestPushAndPopWithCapacity1(t *testing.T) { 22 | q := newQueue(1) 23 | q.push("hello") 24 | 25 | elem, ok := q.pop() 26 | 27 | if elem.(string) != "hello" || !ok { 28 | t.Errorf("expected q.pop() to return \"hello\", true") 29 | } 30 | } 31 | 32 | func TestPushAndThenLen(t *testing.T) { 33 | q := newQueue(1) 34 | q.push("hello") 35 | 36 | if q.len() != 1 { 37 | t.Errorf("expected q.len() to return 1") 38 | } 39 | } 40 | 41 | func TestPushAndThenPopTwice(t *testing.T) { 42 | q := newQueue(1) 43 | q.push("hello") 44 | 45 | elem, ok := q.pop() 46 | if elem.(string) != "hello" || !ok { 47 | t.Errorf("expected q.pop() to return \"hello\", true") 48 | } 49 | 50 | elem, ok = q.pop() 51 | if elem != nil || ok { 52 | t.Error("expected q.pop() to return nil, false") 53 | } 54 | } 55 | 56 | func TestPushWhenFull(t *testing.T) { 57 | q := newQueue(1) 58 | q.push("hello") 59 | dropped := q.push("world") 60 | if dropped.(string) != "hello" { 61 | t.Errorf("expected q.push() to return \"hello\"") 62 | } 63 | 64 | elem, ok := q.pop() 65 | if elem.(string) != "world" || !ok { 66 | t.Errorf("expected q.pop() to return \"world\", true") 67 | } 68 | } 69 | 70 | func TestPushMultiple(t *testing.T) { 71 | q := newQueue(2) 72 | q.push("hello") 73 | q.push("world") 74 | 75 | elem, ok := q.pop() 76 | if elem.(string) != "hello" || !ok { 77 | t.Errorf("expected q.pop() to return \"hello\", true") 78 | } 79 | 80 | elem, ok = q.pop() 81 | if elem.(string) != "world" || !ok { 82 | t.Errorf("expected q.pop() to return \"world\", true") 83 | } 84 | } 85 | 86 | func TestLenWhenTailInFrontOfHead(t *testing.T) { 87 | q := newQueue(2) 88 | 89 | q.push("hello") 90 | q.push("world") 91 | q.pop() 92 | q.push("foo") 93 | 94 | if q.len() != 2 { 95 | t.Error("expected q.len() to return 2") 96 | } 97 | } 98 | 99 | func TestPopWhenTailInFrontOfHead(t *testing.T) { 100 | q := newQueue(2) 101 | 102 | q.push("hello") 103 | q.push("world") 104 | q.pop() 105 | q.push("test") 106 | 107 | elem, ok := q.pop() 108 | if elem.(string) != "world" || !ok { 109 | t.Errorf("expected q.pop() to return \"world\", true") 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /core/config.go: -------------------------------------------------------------------------------- 1 | // Package core provides the runtime configuration for the consensus protocols. 2 | package core 3 | 4 | import ( 5 | "github.com/relab/hotstuff" 6 | "github.com/relab/hotstuff/internal/tree" 7 | ) 8 | 9 | // RuntimeConfig stores runtime configuration settings. 10 | type RuntimeConfig struct { 11 | id hotstuff.ID 12 | privateKey hotstuff.PrivateKey 13 | 14 | aggQC bool 15 | syncVoteVerification bool 16 | 17 | connectionMetadata map[string]string 18 | replicas map[hotstuff.ID]*hotstuff.ReplicaInfo 19 | 20 | sharedRandomSeed int64 21 | 22 | tree *tree.Tree 23 | 24 | // Cache size for crypto operations. 25 | cacheSize uint 26 | } 27 | 28 | func NewRuntimeConfig(id hotstuff.ID, pk hotstuff.PrivateKey, opts ...RuntimeOption) *RuntimeConfig { 29 | if id == hotstuff.ID(0) { 30 | panic("id must be greater than zero") 31 | } 32 | g := &RuntimeConfig{ 33 | id: id, 34 | privateKey: pk, 35 | connectionMetadata: make(map[string]string), 36 | replicas: make(map[hotstuff.ID]*hotstuff.ReplicaInfo), 37 | } 38 | for _, opt := range opts { 39 | opt(g) 40 | } 41 | return g 42 | } 43 | 44 | // ID returns the ID. 45 | func (g *RuntimeConfig) ID() hotstuff.ID { 46 | return g.id 47 | } 48 | 49 | // PrivateKey returns the private key. 50 | func (g *RuntimeConfig) PrivateKey() hotstuff.PrivateKey { 51 | return g.privateKey 52 | } 53 | 54 | // HasAggregateQC returns true if aggregated quorum certificates should be used. 55 | // This is true for Fast-HotStuff: https://arxiv.org/abs/2010.11454 56 | func (g *RuntimeConfig) HasAggregateQC() bool { 57 | return g.aggQC 58 | } 59 | 60 | // SyncVerification returns true if votes should be verified synchronously. 61 | // Enabling this should make the voting machine process votes synchronously. 62 | func (g *RuntimeConfig) SyncVerification() bool { 63 | return g.syncVoteVerification 64 | } 65 | 66 | // SharedRandomSeed returns a random number that is shared between all replicas. 67 | func (g *RuntimeConfig) SharedRandomSeed() int64 { 68 | return g.sharedRandomSeed 69 | } 70 | 71 | // HasKauriTree returns true if a tree was set for the tree-based leader scheme used in Kauri. 72 | // This method also signifies that Kauri is enabled. 73 | func (g *RuntimeConfig) HasKauriTree() bool { 74 | return g.tree != nil 75 | } 76 | 77 | // Tree returns the tree configuration for the tree-based leader scheme. 78 | func (g *RuntimeConfig) Tree() *tree.Tree { 79 | return g.tree 80 | } 81 | 82 | // CacheSize returns the cache size for crypto operations. 83 | // A value of 0 means no cache is used. 84 | func (g *RuntimeConfig) CacheSize() uint { 85 | return g.cacheSize 86 | } 87 | -------------------------------------------------------------------------------- /internal/tree/treelatency_test.go: -------------------------------------------------------------------------------- 1 | package tree 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/internal/latency" 9 | ) 10 | 11 | var ( 12 | cities07 = []string{ 13 | "Melbourne", "Toronto", "Prague", "Paris", 14 | "Tokyo", "Amsterdam", "Auckland", 15 | } 16 | cities15 = []string{ 17 | "Melbourne", "Melbourne", "Toronto", "Toronto", "Prague", 18 | "Prague", "Paris", "Paris", "Tokyo", "Tokyo", "Amsterdam", 19 | "Amsterdam", "Auckland", "Auckland", "Melbourne", 20 | } 21 | ) 22 | 23 | func TestAggregationWaitTime(t *testing.T) { 24 | testDataAggregationTime := []struct { 25 | id hotstuff.ID 26 | locs []string 27 | delta time.Duration 28 | want time.Duration 29 | }{ 30 | {id: 1, locs: cities07, delta: 0, want: 521775000}, 31 | {id: 2, locs: cities07, delta: 0, want: 178253000}, 32 | {id: 3, locs: cities07, delta: 0, want: 279038000}, 33 | {id: 4, locs: cities07, delta: 0, want: 0}, 34 | {id: 1, locs: cities15, delta: 0, want: 607507000}, 35 | {id: 2, locs: cities15, delta: 0, want: 511744000}, 36 | {id: 3, locs: cities15, delta: 0, want: 388915000}, 37 | {id: 4, locs: cities15, delta: 0, want: 178253000}, 38 | {id: 5, locs: cities15, delta: 0, want: 269007000}, 39 | } 40 | for _, test := range testDataAggregationTime { 41 | bf := 2 42 | treePos := DefaultTreePos(len(test.locs)) 43 | tree := NewSimple(test.id, bf, treePos) 44 | lm := latency.MatrixFrom(test.locs) 45 | tree.SetAggregationWaitTime(lm, test.delta) 46 | got := tree.WaitTime() 47 | if got != test.want { 48 | t.Errorf("tree.WaitTime(%v) = %v, want %v", test.delta, got, test.want) 49 | } 50 | } 51 | } 52 | 53 | func TestTreeHeightWaitTime(t *testing.T) { 54 | testDataTreeHeightTime := []struct { 55 | id hotstuff.ID 56 | locs []string 57 | delta time.Duration 58 | want time.Duration 59 | }{ 60 | {id: 1, locs: cities15, delta: 10, want: 60}, 61 | {id: 2, locs: cities15, delta: 10, want: 40}, 62 | {id: 3, locs: cities15, delta: 10, want: 40}, 63 | {id: 4, locs: cities15, delta: 10, want: 20}, 64 | {id: 9, locs: cities15, delta: 10, want: 0}, 65 | {id: 1, locs: cities07, delta: 10, want: 40}, 66 | {id: 2, locs: cities07, delta: 10, want: 20}, 67 | {id: 3, locs: cities07, delta: 10, want: 20}, 68 | {id: 4, locs: cities07, delta: 10, want: 0}, 69 | } 70 | for _, test := range testDataTreeHeightTime { 71 | bf := 2 72 | treePos := DefaultTreePos(len(test.locs)) 73 | tree := NewSimple(test.id, bf, treePos) 74 | tree.SetTreeHeightWaitTime(test.delta) 75 | got := tree.WaitTime() 76 | if got != test.want { 77 | t.Errorf("tree.WaitTime(%v) = %v, want %v", test.delta, got, test.want) 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /protocol/leaderrotation/carousel.go: -------------------------------------------------------------------------------- 1 | // Package leaderrotation provide various leader rotation algorithms. 2 | package leaderrotation 3 | 4 | import ( 5 | "math/rand" 6 | "slices" 7 | 8 | "github.com/relab/hotstuff" 9 | "github.com/relab/hotstuff/core" 10 | "github.com/relab/hotstuff/core/logging" 11 | "github.com/relab/hotstuff/protocol" 12 | "github.com/relab/hotstuff/security/blockchain" 13 | ) 14 | 15 | const NameCarousel = "carousel" 16 | 17 | type Carousel struct { 18 | blockchain *blockchain.Blockchain 19 | viewStates *protocol.ViewStates 20 | config *core.RuntimeConfig 21 | logger logging.Logger 22 | 23 | chainLength int 24 | } 25 | 26 | // NewCarousel returns a new instance of the Carousel leader-election algorithm. 27 | func NewCarousel( 28 | chainLength int, 29 | 30 | blockchain *blockchain.Blockchain, 31 | viewStates *protocol.ViewStates, 32 | config *core.RuntimeConfig, 33 | logger logging.Logger, 34 | ) *Carousel { 35 | return &Carousel{ 36 | blockchain: blockchain, 37 | chainLength: chainLength, 38 | viewStates: viewStates, 39 | config: config, 40 | logger: logger, 41 | } 42 | } 43 | 44 | func (c *Carousel) GetLeader(round hotstuff.View) hotstuff.ID { 45 | commitHead := c.viewStates.CommittedBlock() 46 | 47 | if commitHead.QuorumCert().Signature() == nil { 48 | c.logger.Debug("in startup; using round-robin") 49 | return ChooseRoundRobin(round, c.config.ReplicaCount()) 50 | } 51 | 52 | if commitHead.View() != round-hotstuff.View(c.chainLength) { 53 | c.logger.Debugf("fallback to round-robin (view=%d, commitHead=%d)", round, commitHead.View()) 54 | return ChooseRoundRobin(round, c.config.ReplicaCount()) 55 | } 56 | 57 | c.logger.Debug("proceeding with carousel") 58 | 59 | var ( 60 | block = commitHead 61 | genesis = hotstuff.GetGenesis() 62 | f = hotstuff.NumFaulty(c.config.ReplicaCount()) 63 | lastAuthors = make([]hotstuff.ID, 0, f) 64 | ok = true 65 | ) 66 | for i := 0; ok && i < f && block != genesis; i++ { 67 | lastAuthors = append(lastAuthors, block.Proposer()) 68 | block, ok = c.blockchain.Get(block.Parent()) 69 | } 70 | 71 | candidates := make([]hotstuff.ID, 0, c.config.ReplicaCount()-f) 72 | 73 | commitHead.QuorumCert().Signature().Participants().ForEach(func(id hotstuff.ID) { 74 | if !slices.Contains(lastAuthors, id) { 75 | candidates = append(candidates, id) 76 | } 77 | }) 78 | slices.Sort(candidates) 79 | 80 | seed := c.config.SharedRandomSeed() + int64(round) 81 | rnd := rand.New(rand.NewSource(seed)) 82 | 83 | leader := candidates[rnd.Int()%len(candidates)] 84 | c.logger.Debugf("chose id %d", leader) 85 | 86 | return leader 87 | } 88 | 89 | var _ LeaderRotation = (*Carousel)(nil) 90 | -------------------------------------------------------------------------------- /internal/config/cue.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | _ "embed" 5 | "errors" 6 | "fmt" 7 | "iter" 8 | "os" 9 | 10 | "cuelang.org/go/cue" 11 | "cuelang.org/go/cue/cuecontext" 12 | ) 13 | 14 | //go:embed schema.cue 15 | var schemaFile string 16 | 17 | // Experiments returns an iterator over experiment configurations that yields 18 | // an ExperimentConfig for each experiment, unless there was an error. It processes the 19 | // cue file and validates each configuration against the schema. If [base] is non-nil, 20 | // it is used to overwrite values in the Cue config, e.g., specified on the command line. 21 | func Experiments(filename string, base *ExperimentConfig) iter.Seq2[*ExperimentConfig, error] { 22 | if base == nil { 23 | base = &ExperimentConfig{} 24 | } 25 | return func(yield func(*ExperimentConfig, error) bool) { 26 | ctx := cuecontext.New() 27 | configSchema := ctx.CompileString(schemaFile).LookupPath(cue.ParsePath("config")) 28 | if configSchema.Err() != nil { 29 | yield(nil, configSchema.Err()) 30 | return 31 | } 32 | 33 | b, err := os.ReadFile(filename) 34 | if err != nil { 35 | yield(nil, err) 36 | return 37 | } 38 | elem := ctx.CompileString(string(b), cue.Filename(filename)) 39 | if elem.Err() != nil { 40 | yield(nil, elem.Err()) 41 | return 42 | } 43 | 44 | // check if elem contains an iterator that can yield a list of experiments 45 | it, err := cueIterator(elem) 46 | if err != nil { 47 | // if not an iterator, check if elem is a single config 48 | yield(decodeConfig(elem, configSchema, base)) 49 | return 50 | } 51 | for it.Next() { 52 | config := it.Value() 53 | if !yield(decodeConfig(config, configSchema, base)) { 54 | return 55 | } 56 | } 57 | } 58 | } 59 | 60 | // cueIterator returns an iterator over a list of experiments, or 61 | // one that expands into a list of experiments. 62 | func cueIterator(elem cue.Value) (cue.Iterator, error) { 63 | it, e1 := elem.List() 64 | if e1 == nil { 65 | return it, nil 66 | } 67 | listVal := elem.LookupPath(cue.ParsePath("config.experiments")) 68 | if e2 := listVal.Err(); e2 != nil { 69 | return cue.Iterator{}, errors.Join(e1, e2) 70 | } 71 | return listVal.List() 72 | } 73 | 74 | // decodeConfig decodes a cue.Value into an ExperimentConfig, validating it against the schema. 75 | func decodeConfig(elem, schema cue.Value, base *ExperimentConfig) (*ExperimentConfig, error) { 76 | config := elem.LookupPath(cue.ParsePath("config")) // config is a { config: { … } } 77 | if config.Err() != nil { 78 | return nil, fmt.Errorf("failed to get config from cue file: %w", config.Err()) 79 | } 80 | unified := schema.Unify(config) 81 | if err := unified.Validate(cue.Concrete(true)); err != nil { 82 | return nil, err 83 | } 84 | ec := base.Clone() 85 | if err := unified.Decode(ec); err != nil { 86 | return nil, err 87 | } 88 | return ec, nil 89 | } 90 | -------------------------------------------------------------------------------- /protocol/synchronizer/timeoutrule_aggregate.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/relab/hotstuff" 7 | "github.com/relab/hotstuff/core" 8 | "github.com/relab/hotstuff/security/cert" 9 | ) 10 | 11 | // Aggregate implements an aggregate timeout rule. 12 | type Aggregate struct { 13 | config *core.RuntimeConfig 14 | auth *cert.Authority 15 | } 16 | 17 | // newAggregate returns an aggregate timeout rule instance. 18 | func newAggregate( 19 | config *core.RuntimeConfig, 20 | auth *cert.Authority, 21 | ) *Aggregate { 22 | return &Aggregate{ 23 | config: config, 24 | auth: auth, 25 | } 26 | } 27 | 28 | func (s *Aggregate) LocalTimeoutRule(view hotstuff.View, syncInfo hotstuff.SyncInfo) (*hotstuff.TimeoutMsg, error) { 29 | sig, err := s.auth.Sign(view.ToBytes()) 30 | if err != nil { 31 | return nil, fmt.Errorf("failed to sign view %d: %w", view, err) 32 | } 33 | timeoutMsg := &hotstuff.TimeoutMsg{ 34 | ID: s.config.ID(), 35 | View: view, 36 | SyncInfo: syncInfo, 37 | ViewSignature: sig, 38 | } 39 | 40 | // generate a second signature that will become part of the aggregateQC 41 | sig, err = s.auth.Sign(timeoutMsg.ToBytes()) 42 | if err != nil { 43 | return nil, fmt.Errorf("failed to sign timeout message: %w", err) 44 | } 45 | timeoutMsg.MsgSignature = sig 46 | 47 | return timeoutMsg, nil 48 | } 49 | 50 | func (s *Aggregate) RemoteTimeoutRule(currentView, timeoutView hotstuff.View, timeouts []hotstuff.TimeoutMsg) (hotstuff.SyncInfo, error) { 51 | tc, err := s.auth.CreateTimeoutCert(timeoutView, timeouts) 52 | if err != nil { 53 | return hotstuff.SyncInfo{}, fmt.Errorf("failed to create timeout certificate: %w", err) 54 | } 55 | aggQC, err := s.auth.CreateAggregateQC(currentView, timeouts) 56 | if err != nil { 57 | return hotstuff.SyncInfo{}, fmt.Errorf("failed to create aggregate quorum certificate: %w", err) 58 | } 59 | si := hotstuff.NewSyncInfoWith(tc) 60 | si.SetAggQC(aggQC) 61 | return si, nil 62 | } 63 | 64 | func (s *Aggregate) VerifySyncInfo(syncInfo hotstuff.SyncInfo) (qc *hotstuff.QuorumCert, view hotstuff.View, timeout bool, err error) { 65 | if timeoutCert, haveTC := syncInfo.TC(); haveTC { 66 | if err := s.auth.VerifyTimeoutCert(timeoutCert); err != nil { 67 | return nil, 0, timeout, fmt.Errorf("failed to verify timeout certificate: %w", err) 68 | } 69 | view = timeoutCert.View() 70 | timeout = true 71 | } 72 | 73 | if aggQC, haveQC := syncInfo.AggQC(); haveQC { 74 | highQC, err := s.auth.VerifyAggregateQC(aggQC) 75 | if err != nil { 76 | return nil, 0, timeout, fmt.Errorf("failed to verify aggregate quorum certificate: %w", err) 77 | } 78 | if aggQC.View() >= view { 79 | view = aggQC.View() 80 | timeout = true 81 | } 82 | return &highQC, view, timeout, nil 83 | } 84 | return nil, view, timeout, nil // aggregate quorum certificate not present, so no high QC available 85 | } 86 | -------------------------------------------------------------------------------- /internal/proto/hotstuffpb/hotstuff.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package hotstuffpb; 4 | 5 | import "gorums.proto"; 6 | import "google/protobuf/empty.proto"; 7 | import "internal/proto/clientpb/client.proto"; 8 | import "google/protobuf/timestamp.proto"; 9 | 10 | option go_package = "github.com/relab/hotstuff/internal/proto/hotstuffpb"; 11 | 12 | service Consensus { 13 | rpc Propose(Proposal) returns (google.protobuf.Empty) { 14 | option (gorums.multicast) = true; 15 | } 16 | 17 | rpc Vote(PartialCert) returns (google.protobuf.Empty) { 18 | option (gorums.unicast) = true; 19 | } 20 | 21 | rpc Timeout(TimeoutMsg) returns (google.protobuf.Empty) { 22 | option (gorums.multicast) = true; 23 | } 24 | 25 | rpc NewView(SyncInfo) returns (google.protobuf.Empty) { 26 | option (gorums.unicast) = true; 27 | } 28 | 29 | rpc RequestBlock(BlockHash) returns (Block) { option (gorums.quorumcall) = true; } 30 | } 31 | 32 | message Proposal { 33 | Block Block = 1; 34 | AggQC AggQC = 2; 35 | } 36 | 37 | message BlockHash { bytes Hash = 1; } 38 | 39 | message Block { 40 | bytes Parent = 1; 41 | QuorumCert QC = 2; 42 | uint64 View = 3; 43 | clientpb.Batch Commands = 4; 44 | uint32 Proposer = 5; 45 | google.protobuf.Timestamp Timestamp =6; 46 | } 47 | 48 | message ECDSASignature { 49 | uint32 Signer = 1; 50 | bytes Sig = 2; 51 | } 52 | 53 | message BLS12Signature { bytes Sig = 1; } 54 | 55 | message EDDSASignature { 56 | uint32 Signer = 1; 57 | bytes Sig = 2; 58 | } 59 | 60 | message Signature { 61 | oneof Sig { 62 | ECDSASignature ECDSASig = 1; 63 | BLS12Signature BLS12Sig = 2; 64 | EDDSASignature EDDSASig = 3; 65 | } 66 | } 67 | 68 | message PartialCert { 69 | QuorumSignature Sig = 1; 70 | bytes Hash = 2; 71 | } 72 | 73 | message ECDSAMultiSignature { repeated ECDSASignature Sigs = 1; } 74 | 75 | message EDDSAMultiSignature {repeated EDDSASignature Sigs = 1;} 76 | 77 | message BLS12AggregateSignature { 78 | bytes Sig = 1; 79 | bytes participants = 2; 80 | } 81 | 82 | message QuorumSignature { 83 | oneof Sig { 84 | ECDSAMultiSignature ECDSASigs = 1; 85 | BLS12AggregateSignature BLS12Sig = 2; 86 | EDDSAMultiSignature EDDSASigs =3; 87 | } 88 | } 89 | 90 | message QuorumCert { 91 | QuorumSignature Sig = 1; 92 | uint64 View = 2; 93 | bytes Hash = 3; 94 | } 95 | 96 | message TimeoutCert { 97 | QuorumSignature Sig = 1; 98 | uint64 View = 2; 99 | } 100 | 101 | message TimeoutMsg { 102 | uint64 View = 1; 103 | SyncInfo SyncInfo = 2; 104 | QuorumSignature ViewSig = 3; 105 | QuorumSignature MsgSig = 4; 106 | } 107 | 108 | message SyncInfo { 109 | QuorumCert QC = 1; 110 | TimeoutCert TC = 2; 111 | AggQC AggQC = 3; 112 | } 113 | 114 | message AggQC { 115 | map QCs = 1; 116 | QuorumSignature Sig = 2; 117 | uint64 View = 3; 118 | } 119 | -------------------------------------------------------------------------------- /security/crypto/bitfield.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | ) 6 | 7 | // Bitfield is an IDSet implemented by a bitfield. To check if an ID 'i' is present in the set, we simply check 8 | // if the bit at i-1 is set (because IDs start at 1). This scales poorly if IDs are not sequential. 9 | type Bitfield struct { 10 | data []byte 11 | len int 12 | } 13 | 14 | func (bf *Bitfield) extend(nBytes int) { 15 | // not sure if this is the most efficient way, but it was suggested here: 16 | // https://github.com/golang/go/wiki/SliceTricks#extend 17 | bf.data = append(bf.data, make([]byte, nBytes)...) 18 | } 19 | 20 | func (bf *Bitfield) set(byteIdx, bitIdx int) { 21 | if !bf.isSet(byteIdx, bitIdx) { 22 | bf.len++ 23 | } 24 | bf.data[byteIdx] |= 1 << bitIdx 25 | } 26 | 27 | func (bf Bitfield) isSet(byteIdx, bitIdx int) bool { 28 | return bf.data[byteIdx]&(1< 0, 62 | lm: newLatencies, 63 | locs: locations, 64 | } 65 | } 66 | 67 | // Latency returns the latency between nodes a and b. 68 | // If a or b are not valid nodes, the function will panic. 69 | func (lm Matrix) Latency(a, b hotstuff.ID) time.Duration { 70 | return lm.lm[a-1][b-1] 71 | } 72 | 73 | // Location returns the location of the node with the given ID. 74 | // If the ID is 0 or the latency matrix is not enabled, the function will return the default location. 75 | // If the ID is out of range, the function will panic. 76 | func (lm Matrix) Location(id hotstuff.ID) string { 77 | if id == 0 || !lm.enabled { 78 | return DefaultLocation 79 | } else if int(id) > len(lm.locs) { 80 | panic(fmt.Sprintf("ID %d out of range", id)) 81 | } 82 | return lm.locs[id-1] 83 | } 84 | 85 | // Enabled returns true if a latency matrix was provided. 86 | func (lm Matrix) Enabled() bool { 87 | return lm.enabled 88 | } 89 | 90 | // Delay sleeps for the duration of the latency between nodes a and b. 91 | func (lm Matrix) Delay(a, b hotstuff.ID) { 92 | if !lm.Enabled() { 93 | return 94 | } 95 | delay := lm.Latency(a, b) 96 | time.Sleep(delay) 97 | } 98 | -------------------------------------------------------------------------------- /block.go: -------------------------------------------------------------------------------- 1 | package hotstuff 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/binary" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/relab/hotstuff/internal/proto/clientpb" 10 | ) 11 | 12 | // Block contains a propsed "command", metadata for the protocol, and a link to the "parent" block. 13 | type Block struct { 14 | // keep a copy of the hash to avoid hashing multiple times 15 | hash Hash 16 | parent Hash 17 | proposer ID 18 | batch *clientpb.Batch 19 | cert QuorumCert 20 | view View 21 | ts time.Time 22 | } 23 | 24 | // NewBlock creates a new Block 25 | func NewBlock(parent Hash, cert QuorumCert, batch *clientpb.Batch, view View, proposer ID) *Block { 26 | b := &Block{ 27 | parent: parent, 28 | cert: cert, 29 | batch: batch, 30 | view: view, 31 | proposer: proposer, 32 | ts: time.Now(), 33 | } 34 | // cache the hash immediately because it is too racy to do it in Hash() 35 | b.hash = sha256.Sum256(b.ToBytes()) 36 | return b 37 | } 38 | 39 | func (b *Block) SetTimestamp(ts time.Time) { 40 | b.ts = ts 41 | // recalculate the hash since the timestamp is part of the block 42 | b.hash = sha256.Sum256(b.ToBytes()) 43 | } 44 | 45 | func (b *Block) String() string { 46 | return fmt.Sprintf( 47 | "Block{ hash: %s parent: %s, proposer: %d, view: %d , cert: %v }", 48 | b.Hash().SmallString(), 49 | b.parent.SmallString(), 50 | b.proposer, 51 | b.view, 52 | b.cert, 53 | ) 54 | } 55 | 56 | // Hash returns the hash of the Block 57 | func (b *Block) Hash() Hash { 58 | return b.hash 59 | } 60 | 61 | // Proposer returns the id of the replica who proposed the block. 62 | func (b *Block) Proposer() ID { 63 | return b.proposer 64 | } 65 | 66 | // Parent returns the hash of the parent Block 67 | func (b *Block) Parent() Hash { 68 | return b.parent 69 | } 70 | 71 | // Commands returns a batch of commands. 72 | func (b *Block) Commands() *clientpb.Batch { // TODO(meling): return a slice of commands 73 | return b.batch 74 | } 75 | 76 | // QuorumCert returns the quorum certificate in the block 77 | func (b *Block) QuorumCert() QuorumCert { 78 | return b.cert 79 | } 80 | 81 | // View returns the view in which the Block was proposed 82 | func (b *Block) View() View { 83 | return b.view 84 | } 85 | 86 | // Timestamp returns the timestamp of the block 87 | func (b *Block) Timestamp() time.Time { 88 | return b.ts 89 | } 90 | 91 | // ToBytes returns the raw byte form of the Block, to be used for hashing, etc. 92 | func (b *Block) ToBytes() []byte { 93 | buf := b.parent[:] 94 | var proposerBuf [4]byte 95 | binary.LittleEndian.PutUint32(proposerBuf[:], uint32(b.proposer)) 96 | buf = append(buf, proposerBuf[:]...) 97 | var viewBuf [8]byte 98 | binary.LittleEndian.PutUint64(viewBuf[:], uint64(b.view)) 99 | buf = append(buf, viewBuf[:]...) 100 | buf = append(buf, b.batch.Marshal()...) // may panic 101 | buf = append(buf, b.cert.ToBytes()...) 102 | var tsBuf [8]byte 103 | binary.LittleEndian.PutUint64(tsBuf[:], uint64(b.ts.UnixNano())) 104 | buf = append(buf, tsBuf[:]...) 105 | return buf 106 | } 107 | -------------------------------------------------------------------------------- /metrics/plotting/throughputvslatency.go: -------------------------------------------------------------------------------- 1 | package plotting 2 | 3 | import ( 4 | "fmt" 5 | "path" 6 | "time" 7 | 8 | "github.com/relab/hotstuff/metrics/types" 9 | "gonum.org/v1/plot" 10 | "gonum.org/v1/plot/plotter" 11 | "gonum.org/v1/plot/plotutil" 12 | ) 13 | 14 | // ThroughputVSLatencyPlot is a plotter that plots throughput vs time. 15 | type ThroughputVSLatencyPlot struct { 16 | startTimes StartTimes 17 | measurements MeasurementMap 18 | } 19 | 20 | // NewThroughputVSLatencyPlot returns a new throughput plotter. 21 | func NewThroughputVSLatencyPlot() ThroughputVSLatencyPlot { 22 | return ThroughputVSLatencyPlot{ 23 | startTimes: NewStartTimes(), 24 | measurements: NewMeasurementMap(), 25 | } 26 | } 27 | 28 | // Add adds a measurement to the plotter. 29 | func (p *ThroughputVSLatencyPlot) Add(measurement any) { 30 | p.startTimes.Add(measurement) 31 | 32 | m, ok := measurement.(Measurement) 33 | if !ok { 34 | return 35 | } 36 | 37 | id := m.GetEvent().GetID() 38 | 39 | switch measurement.(type) { 40 | case *types.LatencyMeasurement: 41 | if !m.GetEvent().GetClient() { 42 | // ignore replica latency 43 | return 44 | } 45 | case *types.ThroughputMeasurement: 46 | if m.GetEvent().GetClient() { 47 | // ignore client throughput 48 | return 49 | } 50 | } 51 | 52 | p.measurements.Add(id, m) 53 | } 54 | 55 | // PlotAverage plots the average throughput of all replicas at specified time intervals. 56 | func (p *ThroughputVSLatencyPlot) PlotAverage(filename string, measurementInterval time.Duration) (err error) { 57 | const ( 58 | xlabel = "Throughput (commands/second)" 59 | ylabel = "Latency (ms)" 60 | ) 61 | if path.Ext(filename) == ".csv" { 62 | return CSVPlot(filename, []string{xlabel, ylabel}, func() plotter.XYer { 63 | return avgThroughputVSAvgLatency(p, measurementInterval) 64 | }) 65 | } 66 | return GonumPlot(filename, xlabel, ylabel, func(plt *plot.Plot) error { 67 | if err := plotutil.AddScatters(plt, avgThroughputVSAvgLatency(p, measurementInterval)); err != nil { 68 | return fmt.Errorf("failed to add scatter plot: %w", err) 69 | } 70 | return nil 71 | }) 72 | } 73 | 74 | func avgThroughputVSAvgLatency(p *ThroughputVSLatencyPlot, interval time.Duration) plotter.XYer { 75 | groups := GroupByTimeInterval(&p.startTimes, p.measurements, interval) 76 | points := make(xyer, 0, len(groups)) 77 | for _, group := range groups { 78 | var ( 79 | latencySum float64 80 | latencyNum uint64 81 | throughputSum float64 82 | throughputNum uint64 83 | ) 84 | for _, measurement := range group.Measurements { 85 | switch m := measurement.(type) { 86 | case *types.LatencyMeasurement: 87 | latencySum += m.GetLatency() * float64(m.GetCount()) 88 | latencyNum += m.GetCount() 89 | case *types.ThroughputMeasurement: 90 | throughputSum += float64(m.GetCommands()) / m.GetDuration().AsDuration().Seconds() 91 | throughputNum++ 92 | } 93 | } 94 | if throughputNum > 0 && latencyNum > 0 { 95 | points = append(points, point{ 96 | x: throughputSum / float64(throughputNum), 97 | y: latencySum / float64(latencyNum), 98 | }) 99 | } 100 | } 101 | return points 102 | } 103 | -------------------------------------------------------------------------------- /internal/cli/worker.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "bufio" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | "github.com/relab/hotstuff/core/logging" 10 | "github.com/relab/hotstuff/internal/orchestration" 11 | "github.com/relab/hotstuff/internal/profiling" 12 | "github.com/relab/hotstuff/internal/protostream" 13 | "github.com/relab/hotstuff/metrics" 14 | "github.com/spf13/cobra" 15 | ) 16 | 17 | var ( 18 | dataPath string 19 | cpuProfile string 20 | memProfile string 21 | trace string 22 | fgprofProfile string 23 | 24 | enableMetrics []string 25 | measurementInterval time.Duration 26 | ) 27 | 28 | // workerCmd represents the worker command 29 | var workerCmd = &cobra.Command{ 30 | Hidden: true, 31 | Use: "worker", 32 | Short: "Run a worker.", 33 | Long: `Starts a worker that reads commands from stdin and writes responses to stdout. 34 | This is only intended to be used by a controller (hotstuff run).`, 35 | Run: func(_ *cobra.Command, _ []string) { 36 | runWorker() 37 | }, 38 | } 39 | 40 | func init() { 41 | rootCmd.AddCommand(workerCmd) 42 | 43 | // Here you will define your flags and configuration settings. 44 | 45 | // Cobra supports Persistent Flags which will work for this command 46 | // and all subcommands, e.g.: 47 | // workerCmd.PersistentFlags().String("foo", "", "A help for foo") 48 | 49 | // Cobra supports local flags which will only run when this command 50 | // is called directly, e.g.: 51 | // workerCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") 52 | workerCmd.Flags().StringVar(&dataPath, "data-path", "", "Path to store experiment data.") 53 | workerCmd.Flags().StringVar(&cpuProfile, "cpu-profile", "", "Path to store a CPU profile") 54 | workerCmd.Flags().StringVar(&memProfile, "mem-profile", "", "Path to store a memory profile") 55 | workerCmd.Flags().StringVar(&trace, "trace", "", "Path to store a trace") 56 | workerCmd.Flags().StringVar(&fgprofProfile, "fgprof-profile", "", "Path to store a fgprof profile") 57 | 58 | workerCmd.Flags().StringSliceVar(&enableMetrics, "metrics", nil, "the metrics to enable") 59 | workerCmd.Flags().DurationVar(&measurementInterval, "measurement-interval", 0, "the interval between measurements") 60 | } 61 | 62 | func runWorker() { 63 | stopProfilers, err := profiling.StartProfilers(cpuProfile, memProfile, trace, fgprofProfile) 64 | checkf("failed to start profilers: %v", err) 65 | defer func() { 66 | err = stopProfilers() 67 | checkf("failed to stop profilers: %v", err) 68 | }() 69 | 70 | metricsLogger := metrics.NopLogger() 71 | if dataPath != "" { 72 | f, err := os.OpenFile(dataPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) 73 | checkf("failed to create data path: %v", err) 74 | writer := bufio.NewWriter(f) 75 | metricsLogger, err = metrics.NewJSONLogger(writer, logging.New("json")) 76 | defer func() { 77 | err = metricsLogger.Close() 78 | checkf("failed to close metrics logger: %v", err) 79 | err = writer.Flush() 80 | checkf("failed to flush writer: %v", err) 81 | err = f.Close() 82 | checkf("failed to close writer: %v", err) 83 | }() 84 | } 85 | 86 | worker := orchestration.NewWorker(protostream.NewWriter(os.Stdout), protostream.NewReader(os.Stdin), metricsLogger, enableMetrics, measurementInterval) 87 | err = worker.Run() 88 | if err != nil { 89 | log.Println(err) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /protocol/consensus/committer.go: -------------------------------------------------------------------------------- 1 | package consensus 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/relab/hotstuff" 8 | "github.com/relab/hotstuff/core/eventloop" 9 | "github.com/relab/hotstuff/core/logging" 10 | "github.com/relab/hotstuff/internal/proto/clientpb" 11 | "github.com/relab/hotstuff/protocol" 12 | "github.com/relab/hotstuff/security/blockchain" 13 | ) 14 | 15 | // Committer commits the correct block for a view. 16 | type Committer struct { 17 | eventLoop *eventloop.EventLoop 18 | logger logging.Logger 19 | blockchain *blockchain.Blockchain 20 | viewStates *protocol.ViewStates 21 | ruler CommitRuler 22 | } 23 | 24 | func NewCommitter( 25 | eventLoop *eventloop.EventLoop, 26 | logger logging.Logger, 27 | blockchain *blockchain.Blockchain, 28 | viewStates *protocol.ViewStates, 29 | ruler CommitRuler, 30 | ) *Committer { 31 | return &Committer{ 32 | eventLoop: eventLoop, 33 | blockchain: blockchain, 34 | ruler: ruler, 35 | logger: logger, 36 | viewStates: viewStates, 37 | } 38 | } 39 | 40 | // TryCommit stores the given block in the local blockchain and applies the 41 | // CommitRule to identify the youngest ancestor block eligible to be committed. 42 | // This eligible block is then used as the starting point for recursively 43 | // committing its uncommitted ancestor blocks. 44 | func (cm *Committer) TryCommit(block *hotstuff.Block) error { 45 | cm.logger.Debugf("TryCommit: %v", block) 46 | cm.blockchain.Store(block) 47 | // check commit rule and get the next block to commit. If it was nil, do nothing. 48 | if blockToCommit := cm.ruler.CommitRule(block); blockToCommit != nil { 49 | // recursively commit the block's ancestors before committing the block itself 50 | if err := cm.commit(blockToCommit); err != nil { 51 | return fmt.Errorf("failed to commit: %w", err) 52 | } 53 | } 54 | return nil 55 | } 56 | 57 | // Stores the block before further execution. 58 | func (cm *Committer) commit(block *hotstuff.Block) error { 59 | err := cm.commitInner(block, cm.viewStates.CommittedBlock()) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | forkedBlocks := cm.blockchain.PruneToHeight( 65 | cm.viewStates.CommittedBlock().View(), 66 | block.View(), 67 | ) 68 | for _, block := range forkedBlocks { 69 | cm.eventLoop.AddEvent(clientpb.AbortEvent{ 70 | Batch: block.Commands(), 71 | }) 72 | } 73 | return nil 74 | } 75 | 76 | // recursive helper for commit 77 | func (cm *Committer) commitInner(block, committedBlock *hotstuff.Block) error { 78 | if committedBlock.View() >= block.View() { 79 | return nil 80 | } 81 | if parent, ok := cm.blockchain.Get(block.Parent()); ok { 82 | err := cm.commitInner(parent, committedBlock) 83 | if err != nil { 84 | return err 85 | } 86 | } else { 87 | return fmt.Errorf("failed to locate block: %s", block.Parent().SmallString()) 88 | } 89 | cm.logger.Debug("EXEC: ", block) 90 | batch := block.Commands() 91 | // CommitEvent holds the entire block and is used in twins since it needs the hash. 92 | cm.eventLoop.AddEvent(hotstuff.CommitEvent{Block: block}) 93 | // ExecuteEvent is a solution to cyclic dependencies between hotstuff package and clientpb. 94 | cm.eventLoop.AddEvent(clientpb.ExecuteEvent{Batch: batch}) 95 | cm.eventLoop.AddEvent(hotstuff.ConsensusLatencyEvent{Latency: time.Since(block.Timestamp())}) 96 | cm.viewStates.UpdateCommittedBlock(block) 97 | return nil 98 | } 99 | -------------------------------------------------------------------------------- /internal/latency/latency_test.go: -------------------------------------------------------------------------------- 1 | package latency 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/relab/hotstuff" 7 | ) 8 | 9 | func TestLatencySymmetry(t *testing.T) { 10 | for _, fromLoc := range allLocations { 11 | for _, toLoc := range allLocations { 12 | latency := Between(fromLoc, toLoc) 13 | reverse := Between(toLoc, fromLoc) 14 | if latency != reverse { 15 | t.Errorf("LatencyCity(%s, %s) != LatencyCity(%s, %s) ==> %v != %v", fromLoc, toLoc, toLoc, fromLoc, latency, reverse) 16 | } 17 | } 18 | } 19 | for i := range allLocations { 20 | for j := range allLocations { 21 | latency := allLatencies[i][j] 22 | reverse := allLatencies[j][i] 23 | if latency != reverse { 24 | t.Errorf("Latency(%d, %d) != Latency(%d, %d) ==> %v != %v", i, j, j, i, latency, reverse) 25 | } 26 | } 27 | } 28 | } 29 | 30 | func TestLatencyMatrixFrom(t *testing.T) { 31 | locations := []string{"Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Amsterdam", "Auckland", "Moscow", "Stockholm", "London"} 32 | xm := Matrix{} 33 | if xm.Enabled() { 34 | t.Errorf("Matrix{}.Enabled() = true, want false") 35 | } 36 | lm := MatrixFrom(locations) 37 | if !lm.Enabled() { 38 | t.Errorf("MatrixFrom(%v).Enabled() = false, want true", locations) 39 | } 40 | if len(lm.lm) != len(locations) { 41 | t.Errorf("len(MatrixFrom(%v)) = %d, want %d", locations, len(lm.lm), len(locations)) 42 | } 43 | for i, fromLoc := range locations { 44 | id1 := hotstuff.ID(i + 1) 45 | for j, toLoc := range locations { 46 | id2 := hotstuff.ID(j + 1) 47 | // We can lookup the latency Between location names using the global allLatencies matrix 48 | // or by using the Latency method on the latency.Matrix created by MatrixFrom. 49 | locLatency := Between(fromLoc, toLoc) 50 | lmLatency := lm.Latency(id1, id2) 51 | if locLatency != lmLatency { 52 | t.Errorf("Latency(%s, %s) != lm.LatencyID(%d, %d) ==> %v != %v", fromLoc, toLoc, id1, id2, locLatency, lmLatency) 53 | } 54 | } 55 | } 56 | } 57 | 58 | func TestLatencyMatrixFromDefault(t *testing.T) { 59 | lm := MatrixFrom([]string{DefaultLocation}) 60 | if lm.Enabled() { 61 | t.Errorf("Matrix{}.Enabled() = true, want false") 62 | } 63 | if len(lm.lm) != 0 { 64 | t.Errorf("len(Matrix(%v)) = %d, want 0", []string{DefaultLocation}, len(lm.lm)) 65 | } 66 | } 67 | 68 | func TestLocation(t *testing.T) { 69 | locations := []string{"Melbourne", "Toronto", "Prague", "Paris", "Tokyo", "Amsterdam", "Auckland", "Moscow", "Stockholm", "London"} 70 | lm := MatrixFrom(locations) 71 | for i := range len(locations) { 72 | id := hotstuff.ID(i + 1) 73 | loc := lm.Location(id) 74 | if loc != locations[i] { 75 | t.Errorf("Location(%d) = %s, want %s", id, loc, locations[i]) 76 | } 77 | } 78 | if lm.Location(0) != DefaultLocation { 79 | t.Errorf("Location(0) = %s, want %s", lm.Location(0), DefaultLocation) 80 | } 81 | 82 | // test that lm.Location panics if the ID is out of range 83 | outOfRangeID := hotstuff.ID(len(locations) + 1) 84 | defer func() { 85 | if r := recover(); r != nil { 86 | if r != "ID 11 out of range" { 87 | t.Errorf("Recovered from panic: %v, want ID 11 out of range", r) 88 | } 89 | } else { 90 | t.Errorf("Location(%d) did not panic", outOfRangeID) 91 | } 92 | }() 93 | if loc := lm.Location(outOfRangeID); loc != "" { 94 | t.Errorf("Location(%d) = %s, want empty string and panic", outOfRangeID, loc) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /protocol/leaderrotation/reputation.go: -------------------------------------------------------------------------------- 1 | package leaderrotation 2 | 3 | import ( 4 | "math/rand" 5 | "slices" 6 | 7 | wr "github.com/mroth/weightedrand" 8 | 9 | "github.com/relab/hotstuff" 10 | "github.com/relab/hotstuff/core" 11 | "github.com/relab/hotstuff/core/logging" 12 | "github.com/relab/hotstuff/protocol" 13 | ) 14 | 15 | const NameReputation = "reputation" 16 | 17 | type reputationsMap map[hotstuff.ID]float64 18 | 19 | type RepBased struct { 20 | viewStates *protocol.ViewStates 21 | config *core.RuntimeConfig 22 | logger logging.Logger 23 | 24 | chainLength int 25 | prevCommitHead *hotstuff.Block 26 | reputations reputationsMap // latest reputations 27 | } 28 | 29 | // TODO: should GetLeader be thread-safe? 30 | 31 | // GetLeader returns the id of the leader in the given view 32 | func (r *RepBased) GetLeader(view hotstuff.View) hotstuff.ID { 33 | block := r.viewStates.CommittedBlock() 34 | if block.View() > view-hotstuff.View(r.chainLength) { 35 | // TODO: it could be possible to lookup leaders for older views if we 36 | // store a copy of the reputations in a metadata field of each block. 37 | r.logger.Error("looking up leaders of old views is not supported") 38 | return 0 39 | } 40 | 41 | numReplicas := r.config.ReplicaCount() 42 | // use round-robin for the first few views until we get a signature 43 | if block.QuorumCert().Signature() == nil { 44 | return ChooseRoundRobin(view, numReplicas) 45 | } 46 | 47 | voters := block.QuorumCert().Signature().Participants() 48 | numVotes := voters.Len() 49 | 50 | frac := float64((2.0 / 3.0) * float64(numReplicas)) 51 | reputation := ((float64(numVotes) - frac) / frac) 52 | 53 | weights := make([]wr.Choice, 0, numVotes) 54 | voters.ForEach(func(voterID hotstuff.ID) { 55 | // we should only update the reputations once for each commit head. 56 | if r.prevCommitHead.View() < block.View() { 57 | r.reputations[voterID] += reputation 58 | } 59 | weights = append(weights, wr.Choice{ 60 | Item: voterID, 61 | Weight: uint(r.reputations[voterID] * 10), 62 | }) 63 | }) 64 | 65 | slices.SortFunc(weights, func(a, b wr.Choice) int { 66 | return int(a.Item.(hotstuff.ID) - b.Item.(hotstuff.ID)) 67 | }) 68 | 69 | if r.prevCommitHead.View() < block.View() { 70 | r.prevCommitHead = block 71 | } 72 | 73 | r.logger.Debug(weights) 74 | 75 | chooser, err := wr.NewChooser(weights...) 76 | if err != nil { 77 | r.logger.Error("weightedrand error: ", err) 78 | return 0 79 | } 80 | 81 | seed := r.config.SharedRandomSeed() + int64(view) 82 | rnd := rand.New(rand.NewSource(seed)) 83 | 84 | leader := chooser.PickSource(rnd).(hotstuff.ID) 85 | r.logger.Debugf("picked leader %d for view %d using seed %d", leader, view, seed) 86 | 87 | return leader 88 | } 89 | 90 | // NewRepBased returns a new random reputation-based leader rotation implementation 91 | func NewRepBased( 92 | chainLength int, 93 | 94 | viewStates *protocol.ViewStates, 95 | config *core.RuntimeConfig, 96 | logger logging.Logger, 97 | ) *RepBased { 98 | return &RepBased{ 99 | viewStates: viewStates, 100 | config: config, 101 | logger: logger, 102 | 103 | chainLength: chainLength, 104 | reputations: make(reputationsMap), 105 | prevCommitHead: hotstuff.GetGenesis(), 106 | } 107 | } 108 | 109 | var _ LeaderRotation = (*RepBased)(nil) 110 | -------------------------------------------------------------------------------- /internal/config/viper.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/relab/hotstuff/internal/tree" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | func NewViper() (*ExperimentConfig, error) { 13 | output := viper.GetString("output") 14 | if output != "" { 15 | var err error 16 | output, err = filepath.Abs(output) 17 | if err != nil { 18 | return nil, fmt.Errorf("failed to get absolute path: %v", err) 19 | } 20 | if err = os.MkdirAll(output, 0o755); err != nil { 21 | return nil, fmt.Errorf("failed to create output directory: %v", err) 22 | } 23 | } 24 | 25 | intTreePos := viper.GetIntSlice("tree-pos") 26 | treePos := make([]uint32, len(intTreePos)) 27 | for i, pos := range intTreePos { 28 | treePos[i] = uint32(pos) 29 | } 30 | 31 | cfg := &ExperimentConfig{ 32 | Duration: viper.GetDuration("duration"), 33 | Replicas: viper.GetInt("replicas"), 34 | Clients: viper.GetInt("clients"), 35 | ReplicaHosts: viper.GetStringSlice("replica-hosts"), 36 | ClientHosts: viper.GetStringSlice("client-hosts"), 37 | BranchFactor: viper.GetUint32("bf"), 38 | TreeDelta: viper.GetDuration("tree-delta"), 39 | RandomTree: viper.GetBool("random-tree"), 40 | Output: output, 41 | TreePositions: treePos, 42 | Worker: viper.GetBool("worker"), 43 | Exe: viper.GetString("exe"), 44 | SSHConfig: viper.GetString("ssh-config"), 45 | LogLevel: viper.GetString("log-level"), 46 | CPUProfile: viper.GetBool("cpu-profile"), 47 | MemProfile: viper.GetBool("mem-profile"), 48 | Trace: viper.GetBool("trace"), 49 | FgProfProfile: viper.GetBool("fgprof-profile"), 50 | Metrics: viper.GetStringSlice("metrics"), 51 | MeasurementInterval: viper.GetDuration("measurement-interval"), 52 | BatchSize: viper.GetUint32("batch-size"), 53 | Consensus: viper.GetString("consensus"), 54 | Crypto: viper.GetString("crypto"), 55 | LeaderRotation: viper.GetString("leader-rotation"), 56 | Communication: viper.GetString("communication"), 57 | ConnectTimeout: viper.GetDuration("connect-timeout"), 58 | TimeoutMultiplier: viper.GetFloat64("timeout-multiplier"), 59 | ViewTimeout: viper.GetDuration("view-timeout"), 60 | DurationSamples: viper.GetUint32("duration-samples"), 61 | MaxTimeout: viper.GetDuration("max-timeout"), 62 | FixedTimeout: viper.GetDuration("fixed-timeout"), 63 | SharedSeed: viper.GetInt64("shared-seed"), 64 | PayloadSize: viper.GetUint32("payload-size"), 65 | MaxConcurrent: viper.GetUint32("max-concurrent"), 66 | RateLimit: viper.GetFloat64("rate-limit"), 67 | RateStep: viper.GetFloat64("rate-step"), 68 | RateStepInterval: viper.GetDuration("rate-step-interval"), 69 | ClientTimeout: viper.GetDuration("client-timeout"), 70 | UseAggQC: viper.GetBool("agg-qc"), 71 | UseTLS: true, 72 | } 73 | 74 | if len(cfg.ReplicaHosts) == 0 { 75 | cfg.ReplicaHosts = []string{"localhost"} 76 | } 77 | if len(cfg.ClientHosts) == 0 { 78 | cfg.ClientHosts = []string{"localhost"} 79 | } 80 | if len(cfg.TreePositions) == 0 { 81 | cfg.TreePositions = tree.DefaultTreePosUint32(cfg.Replicas) 82 | } 83 | return cfg, nil 84 | } 85 | -------------------------------------------------------------------------------- /internal/testutil/mocksender_test.go: -------------------------------------------------------------------------------- 1 | package testutil_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "testing" 7 | 8 | "github.com/relab/hotstuff" 9 | "github.com/relab/hotstuff/internal/testutil" 10 | "github.com/relab/hotstuff/security/crypto" 11 | ) 12 | 13 | func TestPropose(t *testing.T) { 14 | r := testutil.WireUpEssentials(t, 1, crypto.NameECDSA) 15 | block := testutil.CreateBlock(t, r.Authority()) 16 | r.MockSender().Propose(&hotstuff.ProposeMsg{ 17 | ID: 1, 18 | Block: block, 19 | }) 20 | // check if a message was sent at all 21 | if len(r.MockSender().MessagesSent()) != 1 { 22 | t.Error("message not sent") 23 | } 24 | // check if it was the correct type of message 25 | msg, ok := r.MockSender().MessagesSent()[0].(hotstuff.ProposeMsg) 26 | if !ok { 27 | t.Error("incorrect message type") 28 | } 29 | // below statements compare the data in the message 30 | if msg.ID != 1 { 31 | t.Error("incorrect sender") 32 | } 33 | if !bytes.Equal(block.ToBytes(), msg.Block.ToBytes()) { 34 | t.Error("incorrect data") 35 | } 36 | } 37 | 38 | func TestVote(t *testing.T) { 39 | r := testutil.WireUpEssentials(t, 1, crypto.NameECDSA) 40 | block := testutil.CreateBlock(t, r.Authority()) 41 | pc := testutil.CreatePC(t, block, r.Authority()) 42 | err := r.MockSender().Vote(2, pc) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | // check if a message was sent at all 47 | if len(r.MockSender().MessagesSent()) != 1 { 48 | t.Error("message not sent") 49 | } 50 | // check if it was the correct type of message 51 | msg, ok := r.MockSender().MessagesSent()[0].(hotstuff.PartialCert) 52 | if !ok { 53 | t.Error("incorrect message type") 54 | } 55 | // below statements compare the data in the message 56 | if msg.Signer() != 1 { 57 | t.Error("incorrect MockSender()") 58 | } 59 | 60 | if !bytes.Equal(msg.ToBytes(), pc.ToBytes()) { 61 | t.Error("incorrect data") 62 | } 63 | } 64 | 65 | func TestTimeout(t *testing.T) { 66 | r := testutil.WireUpEssentials(t, 1, crypto.NameECDSA) 67 | r.MockSender().Timeout(hotstuff.TimeoutMsg{ 68 | ID: 1, 69 | View: 1, 70 | }) 71 | // check if a message was sent at all 72 | if len(r.MockSender().MessagesSent()) != 1 { 73 | t.Error("message not sent") 74 | } 75 | // check if it was the correct type of message 76 | msg, ok := r.MockSender().MessagesSent()[0].(hotstuff.TimeoutMsg) 77 | if !ok { 78 | t.Error("incorrect message type") 79 | } 80 | // below statements compare the data in the message 81 | if msg.ID != 1 { 82 | t.Error("incorrect MockSender()") 83 | } 84 | 85 | if msg.View != 1 { 86 | t.Error("incorrect view") 87 | } 88 | } 89 | 90 | func TestSub(t *testing.T) { 91 | sender := testutil.NewMockSender(1, 2, 3, 4) 92 | var err error 93 | _, err = sender.Sub([]hotstuff.ID{2, 3}) 94 | if err != nil { 95 | t.Fatal(err) 96 | } 97 | 98 | _, err = sender.Sub([]hotstuff.ID{5, 6}) 99 | if err == nil { 100 | t.Fatal("expected an error") 101 | } 102 | } 103 | 104 | func TestRequestBlock(t *testing.T) { 105 | set := testutil.NewEssentialsSet(t, 4, crypto.NameECDSA) 106 | first := set[0] 107 | second := set[1] 108 | block := testutil.CreateBlock(t, second.Authority()) 109 | second.Blockchain().Store(block) 110 | 111 | _, ok := first.MockSender().RequestBlock(context.TODO(), block.Hash()) 112 | if !ok { 113 | t.Fatal("expected a block to be returned") 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /protocol/rules/fasthotstuff.go: -------------------------------------------------------------------------------- 1 | package rules 2 | 3 | import ( 4 | "github.com/relab/hotstuff" 5 | "github.com/relab/hotstuff/core" 6 | "github.com/relab/hotstuff/core/logging" 7 | "github.com/relab/hotstuff/internal/proto/clientpb" 8 | "github.com/relab/hotstuff/protocol/consensus" 9 | "github.com/relab/hotstuff/security/blockchain" 10 | ) 11 | 12 | const NameFastHotStuff = "fasthotstuff" 13 | 14 | // FastHotStuff is an implementation of the Fast-HotStuff protocol. 15 | // See the paper for details: https://arxiv.org/abs/2010.11454 16 | type FastHotStuff struct { 17 | logger logging.Logger 18 | config *core.RuntimeConfig 19 | blockchain *blockchain.Blockchain 20 | } 21 | 22 | // NewFastHotStuff returns a new instance of the FastHotStuff consensus ruleset. 23 | func NewFastHotStuff( 24 | logger logging.Logger, 25 | config *core.RuntimeConfig, 26 | blockchain *blockchain.Blockchain, 27 | ) *FastHotStuff { 28 | if !config.HasAggregateQC() { 29 | panic(NameFastHotStuff + " requires aggregated quorum certificates") 30 | } 31 | return &FastHotStuff{ 32 | logger: logger, 33 | config: config, 34 | blockchain: blockchain, 35 | } 36 | } 37 | 38 | func (fhs *FastHotStuff) qcRef(qc hotstuff.QuorumCert) (*hotstuff.Block, bool) { 39 | if (hotstuff.Hash{}) == qc.BlockHash() { 40 | return nil, false 41 | } 42 | return fhs.blockchain.Get(qc.BlockHash()) 43 | } 44 | 45 | // CommitRule decides whether an ancestor of the block can be committed. 46 | func (fhs *FastHotStuff) CommitRule(block *hotstuff.Block) *hotstuff.Block { 47 | parent, ok := fhs.qcRef(block.QuorumCert()) 48 | if !ok { 49 | return nil 50 | } 51 | fhs.logger.Debug("PRECOMMIT: ", parent) 52 | grandparent, ok := fhs.qcRef(parent.QuorumCert()) 53 | if !ok { 54 | return nil 55 | } 56 | if block.Parent() == parent.Hash() && block.View() == parent.View()+1 && 57 | parent.Parent() == grandparent.Hash() && parent.View() == grandparent.View()+1 { 58 | fhs.logger.Debug("COMMIT: ", grandparent) 59 | return grandparent 60 | } 61 | return nil 62 | } 63 | 64 | // VoteRule decides whether to vote for the proposal or not. 65 | func (fhs *FastHotStuff) VoteRule(view hotstuff.View, proposal hotstuff.ProposeMsg) bool { 66 | // The base implementation verifies both regular QCs and AggregateQCs, and asserts that the QC embedded in the 67 | // block is the same as the highQC found in the aggregateQC. 68 | if proposal.AggregateQC != nil { 69 | hqcBlock, ok := fhs.blockchain.Get(proposal.Block.QuorumCert().BlockHash()) 70 | return ok && fhs.blockchain.Extends(proposal.Block, hqcBlock) 71 | } 72 | return proposal.Block.View() >= view && 73 | proposal.Block.View() == proposal.Block.QuorumCert().View()+1 74 | } 75 | 76 | // ChainLength returns the number of blocks that need to be chained together in order to commit. 77 | func (fhs *FastHotStuff) ChainLength() int { 78 | return 2 79 | } 80 | 81 | // ProposeRule returns a new fast hotstuff proposal based on the current view, (aggregate) quorum certificate, and command batch. 82 | func (fhs *FastHotStuff) ProposeRule(view hotstuff.View, cert hotstuff.SyncInfo, cmd *clientpb.Batch) (proposal hotstuff.ProposeMsg, ok bool) { 83 | qc, ok := cert.QC() 84 | if !ok { 85 | return proposal, false 86 | } 87 | proposal = hotstuff.NewProposeMsg(fhs.config.ID(), view, qc, cmd) 88 | if aggQC, ok := cert.AggQC(); ok { 89 | proposal.AggregateQC = &aggQC 90 | } 91 | return proposal, true 92 | } 93 | 94 | var _ consensus.Ruleset = (*FastHotStuff)(nil) 95 | -------------------------------------------------------------------------------- /protocol/votingmachine/votingmachine_test.go: -------------------------------------------------------------------------------- 1 | package votingmachine_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/relab/hotstuff" 9 | "github.com/relab/hotstuff/core/eventloop" 10 | "github.com/relab/hotstuff/internal/testutil" 11 | "github.com/relab/hotstuff/protocol" 12 | "github.com/relab/hotstuff/protocol/votingmachine" 13 | "github.com/relab/hotstuff/security/crypto" 14 | ) 15 | 16 | func TestCollectVote(t *testing.T) { 17 | signers := testutil.NewEssentialsSet(t, 4, crypto.NameECDSA) 18 | leader := signers[0] 19 | viewStates, err := protocol.NewViewStates( 20 | leader.Blockchain(), 21 | leader.Authority(), 22 | ) 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | votingMachine := votingmachine.New( 27 | leader.Logger(), 28 | leader.EventLoop(), 29 | leader.RuntimeCfg(), 30 | leader.Blockchain(), 31 | leader.Authority(), 32 | viewStates, 33 | ) 34 | 35 | newViewTriggered := false 36 | eventloop.Register(leader.EventLoop(), func(_ hotstuff.NewViewMsg) { 37 | newViewTriggered = true 38 | }) 39 | 40 | block := testutil.CreateBlock(t, leader.Authority()) 41 | leader.Blockchain().Store(block) 42 | 43 | for _, signer := range signers { 44 | pc := testutil.CreatePC(t, block, signer.Authority()) 45 | vote := hotstuff.VoteMsg{ 46 | ID: signer.RuntimeCfg().ID(), 47 | PartialCert: pc, 48 | } 49 | votingMachine.CollectVote(vote) 50 | } 51 | 52 | ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) 53 | defer cancel() 54 | leader.EventLoop().Run(ctx) 55 | 56 | if !newViewTriggered { 57 | t.Fatal("expected advancing the view on quorum") 58 | } 59 | } 60 | 61 | func TestCollectVoteWithDuplicates(t *testing.T) { 62 | signers := testutil.NewEssentialsSet(t, 4, crypto.NameECDSA) 63 | leader := signers[0] 64 | viewStates, err := protocol.NewViewStates( 65 | leader.Blockchain(), 66 | leader.Authority(), 67 | ) 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | votingMachine := votingmachine.New( 72 | leader.Logger(), 73 | leader.EventLoop(), 74 | leader.RuntimeCfg(), 75 | leader.Blockchain(), 76 | leader.Authority(), 77 | viewStates, 78 | ) 79 | 80 | newViewTriggered := false 81 | eventloop.Register(leader.EventLoop(), func(_ hotstuff.NewViewMsg) { 82 | newViewTriggered = true 83 | }) 84 | 85 | block := testutil.CreateBlock(t, leader.Authority()) 86 | leader.Blockchain().Store(block) 87 | 88 | // Send duplicate votes from the first signer 89 | // This will cause an error unless the duplicate is filtered 90 | firstSigner := signers[0] 91 | pc := testutil.CreatePC(t, block, firstSigner.Authority()) 92 | vote := hotstuff.VoteMsg{ 93 | ID: firstSigner.RuntimeCfg().ID(), 94 | PartialCert: pc, 95 | } 96 | votingMachine.CollectVote(vote) 97 | 98 | // Collect votes from all signers 99 | for _, signer := range signers { 100 | pc := testutil.CreatePC(t, block, signer.Authority()) 101 | vote := hotstuff.VoteMsg{ 102 | ID: signer.RuntimeCfg().ID(), 103 | PartialCert: pc, 104 | } 105 | votingMachine.CollectVote(vote) 106 | } 107 | 108 | ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) 109 | defer cancel() 110 | leader.EventLoop().Run(ctx) 111 | 112 | if !newViewTriggered { 113 | t.Fatal("expected advancing the view on quorum even with duplicate votes") 114 | } 115 | } 116 | --------------------------------------------------------------------------------