├── version.json ├── .github └── workflows │ ├── go-check-config.json │ ├── go-test-config.json │ ├── releaser.yml │ ├── tagpush.yml │ ├── go-check.yml │ ├── release-check.yml │ ├── go-test.yml │ ├── go-fuzz.yml │ └── publish-ghcr.yml ├── test ├── util_norace_test.go ├── util_race_test.go ├── ballast_test.go ├── absent_test.go ├── util_test.go ├── constants_test.go ├── drop_test.go ├── deny_test.go ├── withhold_test.go └── repeat_test.go ├── internal ├── gnark │ ├── README.md │ ├── adapter_test.go │ ├── group.go │ ├── adapter.go │ ├── suite_test.go │ ├── suite.go │ ├── scalar.go │ └── gt.go ├── measurements │ ├── util.go │ ├── util_test.go │ ├── sample_set_test.go │ ├── attributes.go │ └── sample_set.go ├── encoding │ ├── encoding_api_test.go │ ├── metrics.go │ └── encoding_test.go ├── consensus │ ├── tipset.go │ └── options.go ├── clock │ └── clock.go ├── caching │ ├── set_bench_test.go │ ├── grouped_cache_bench_test.go │ ├── grouped_cache.go │ ├── set_test.go │ └── set.go ├── lotus │ └── net.go └── psutil │ └── psutil_test.go ├── certexchange ├── polling │ ├── common.go │ ├── pollstatus_string.go │ ├── discovery.go │ ├── metrics.go │ ├── common_test.go │ ├── predictor_test.go │ ├── predictor.go │ └── subscriber_test.go ├── protocol.go └── metrics.go ├── gpbft ├── mock_gen.go ├── legacy.go ├── validator_api_test.go ├── errors_test.go ├── options_test.go ├── vrf.go ├── vrf_test.go ├── progress.go ├── committee.go ├── progress_test.go ├── legacy_test.go ├── errors.go ├── message_builder_test.go ├── message_builder.go └── ticket_rank.go ├── .gitignore ├── sim ├── signing │ ├── signing.go │ └── bls.go ├── storage_power_gen.go ├── latency │ ├── none.go │ ├── latency.go │ ├── zipf.go │ └── log_normal.go ├── adversary │ ├── validated.go │ ├── absent.go │ ├── adversary.go │ ├── drop.go │ ├── spam.go │ └── deny.go ├── tipset_gen.go └── justification.go ├── Dockerfile ├── codecov.yml ├── logging.go ├── chainexchange ├── chainexchange.go ├── metrics.go └── cbor_gen.go ├── wal.go ├── cmd ├── f3 │ ├── pubsub.go │ ├── tools.go │ ├── main.go │ ├── manifest.go │ └── certs.go └── f3sim │ └── main.go ├── tipsettimestamp.go ├── observer ├── metrics.go ├── options_test.go ├── schema.sql └── query.go ├── blssig ├── verifier_test.go ├── signer.go ├── metrics.go ├── cache_test.go ├── verifier.go └── aggregation.go ├── emulator └── message_cache.go ├── certstore └── metrics.go ├── LICENSE-MIT ├── Makefile ├── pmsg ├── metrics.go └── partial_msg_test.go ├── ec ├── ec.go ├── caching.go ├── powerdelta.go └── powerdelta_test.go ├── certchain └── options.go ├── gen └── main.go ├── README.md ├── tipsettimestamp_test.go ├── store.go └── bootstrap_delay_test.go /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "v0.8.10" 3 | } 4 | -------------------------------------------------------------------------------- /.github/workflows/go-check-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "gogenerate": true 3 | } 4 | 5 | -------------------------------------------------------------------------------- /.github/workflows/go-test-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "verbose": false, 3 | "skipOSes": ["windows", "macos"], 4 | "skip32bit": true 5 | } 6 | 7 | -------------------------------------------------------------------------------- /test/util_norace_test.go: -------------------------------------------------------------------------------- 1 | //go:build !race 2 | 3 | package test 4 | 5 | import "testing" 6 | 7 | func SkipInRaceMode(*testing.T) { 8 | } 9 | -------------------------------------------------------------------------------- /internal/gnark/README.md: -------------------------------------------------------------------------------- 1 | # GNARK backend for [dedis/kyber](https://github.com/dedis/kyber) 2 | 3 | Upstream PR: https://github.com/dedis/kyber/pull/551 4 | -------------------------------------------------------------------------------- /certexchange/polling/common.go: -------------------------------------------------------------------------------- 1 | package polling 2 | 3 | import ( 4 | logging "github.com/ipfs/go-log/v2" 5 | ) 6 | 7 | var log = logging.Logger("f3/certexchange") 8 | -------------------------------------------------------------------------------- /gpbft/mock_gen.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | //go:generate go run github.com/vektra/mockery/v2@v2.53.3 --filename mock_host_test.go --name Host --testonly --inpackage --with-expecter 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IDE project files 2 | .idea/ 3 | 4 | # Test coverage output; see `test` target in Makefile. 5 | coverage.txt 6 | 7 | # f3 binary 8 | /f3 9 | # f3 manifest 10 | /manifest.json 11 | -------------------------------------------------------------------------------- /test/util_race_test.go: -------------------------------------------------------------------------------- 1 | //go:build race 2 | 3 | package test 4 | 5 | import "testing" 6 | 7 | func SkipInRaceMode(t *testing.T) { 8 | t.Helper() 9 | t.Skip("skipping in -race mode") 10 | } 11 | -------------------------------------------------------------------------------- /gpbft/legacy.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | // LegacyECChain is the old representation of EC chain in earlier releases, kept for 4 | // wire format backward compatibility with Calibration network. 5 | type LegacyECChain []TipSet 6 | -------------------------------------------------------------------------------- /internal/measurements/util.go: -------------------------------------------------------------------------------- 1 | package measurements 2 | 3 | // Must panics if err is non-nil, otherwise returns v. 4 | func Must[V any](v V, err error) V { 5 | if err != nil { 6 | panic(err) 7 | } 8 | return v 9 | } 10 | -------------------------------------------------------------------------------- /test/ballast_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | var ballast []byte 4 | 5 | func init() { 6 | // Allocate 200MiB memory ballast to reduce the frequency of GC, which reduces 7 | // runtime of tests by ~20%. 8 | ballast = make([]byte, 200<<20) 9 | } 10 | -------------------------------------------------------------------------------- /sim/signing/signing.go: -------------------------------------------------------------------------------- 1 | package signing 2 | 3 | import "github.com/filecoin-project/go-f3/gpbft" 4 | 5 | type Backend interface { 6 | gpbft.Signer 7 | gpbft.Verifier 8 | GenerateKey() (gpbft.PubKey, any) 9 | MarshalPayloadForSigning(nn gpbft.NetworkName, p *gpbft.Payload) []byte 10 | } 11 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24-bullseye AS build 2 | 3 | WORKDIR /go/src/f3 4 | 5 | COPY go.mod go.sum ./ 6 | RUN go mod download 7 | 8 | COPY . . 9 | RUN go build -o /go/bin/f3 ./cmd/f3 10 | 11 | FROM gcr.io/distroless/cc 12 | COPY --from=build /go/bin/f3 /usr/bin/ 13 | 14 | ENTRYPOINT ["/usr/bin/f3"] 15 | -------------------------------------------------------------------------------- /internal/encoding/encoding_api_test.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import "go.opentelemetry.io/otel/attribute" 4 | 5 | // GetMetricAttribute returns the attribute for metric collection, exported for 6 | // testing purposes. 7 | func (c *ZSTD[T]) GetMetricAttribute() attribute.KeyValue { return c.getMetricAttribute() } 8 | -------------------------------------------------------------------------------- /sim/storage_power_gen.go: -------------------------------------------------------------------------------- 1 | package sim 2 | 3 | import "github.com/filecoin-project/go-f3/gpbft" 4 | 5 | type StoragePowerGenerator func(instance uint64, id gpbft.ActorID) gpbft.StoragePower 6 | 7 | func UniformStoragePower(power gpbft.StoragePower) StoragePowerGenerator { 8 | return func(uint64, gpbft.ActorID) gpbft.StoragePower { 9 | return power 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | patch: 4 | default: 5 | target: auto 6 | threshold: 5% 7 | only_pulls: true 8 | project: 9 | default: 10 | target: auto 11 | threshold: 5% 12 | only_pulls: true 13 | ignore: 14 | - "gen" 15 | - "test" 16 | - "sim" 17 | - "cmd" 18 | - "**/*_test.go" 19 | - "**/cbor_gen.go" 20 | -------------------------------------------------------------------------------- /.github/workflows/releaser.yml: -------------------------------------------------------------------------------- 1 | name: Releaser 2 | 3 | on: 4 | push: 5 | paths: [ 'version.json' ] 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: write 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.sha }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | releaser: 17 | uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 -------------------------------------------------------------------------------- /.github/workflows/tagpush.yml: -------------------------------------------------------------------------------- 1 | name: Tag Push Checker 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | permissions: 9 | contents: read 10 | issues: write 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | releaser: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 19 | -------------------------------------------------------------------------------- /sim/latency/none.go: -------------------------------------------------------------------------------- 1 | package latency 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | ) 8 | 9 | var ( 10 | _ Model = (*none)(nil) 11 | 12 | // None represents zero no-op latency model. 13 | None = none{} 14 | ) 15 | 16 | // None represents zero latency model. 17 | type none struct{} 18 | 19 | func (l none) Sample(time.Time, gpbft.ActorID, gpbft.ActorID) time.Duration { return 0 } 20 | -------------------------------------------------------------------------------- /internal/measurements/util_test.go: -------------------------------------------------------------------------------- 1 | package measurements_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/filecoin-project/go-f3/internal/measurements" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestUtils_Must(t *testing.T) { 12 | require.Panics(t, func() { 13 | measurements.Must("fish", errors.New("🐠")) 14 | }) 15 | require.Equal(t, "fish", measurements.Must("fish", nil)) 16 | } 17 | -------------------------------------------------------------------------------- /sim/adversary/validated.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import "github.com/filecoin-project/go-f3/gpbft" 4 | 5 | type validatedMessage struct { 6 | msg *gpbft.GMessage 7 | } 8 | 9 | var _ gpbft.ValidatedMessage = (*validatedMessage)(nil) 10 | 11 | func Validated(msg *gpbft.GMessage) gpbft.ValidatedMessage { 12 | return &validatedMessage{msg: msg} 13 | } 14 | 15 | func (v *validatedMessage) Message() *gpbft.GMessage { 16 | return v.msg 17 | } 18 | -------------------------------------------------------------------------------- /.github/workflows/go-check.yml: -------------------------------------------------------------------------------- 1 | name: Go Checks 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["main"] 7 | workflow_dispatch: 8 | merge_group: 9 | 10 | permissions: 11 | contents: read 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | go-check: 19 | uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 20 | -------------------------------------------------------------------------------- /.github/workflows/release-check.yml: -------------------------------------------------------------------------------- 1 | name: Release Checker 2 | 3 | on: 4 | pull_request_target: 5 | paths: [ 'version.json' ] 6 | types: [ opened, synchronize, reopened, labeled, unlabeled ] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | release-check: 19 | uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 20 | -------------------------------------------------------------------------------- /logging.go: -------------------------------------------------------------------------------- 1 | package f3 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/gpbft" 5 | logging "github.com/ipfs/go-log/v2" 6 | ) 7 | 8 | var log = logging.Logger("f3") 9 | var tracer gpbft.Tracer = (*gpbftTracer)(logging.WithSkip(logging.Logger("f3/gpbft"), 2)) 10 | 11 | // Tracer used by GPBFT, backed by a Zap logger. 12 | type gpbftTracer logging.ZapEventLogger 13 | 14 | // Log fulfills the gpbft.Tracer interface 15 | func (h *gpbftTracer) Log(fmt string, args ...any) { 16 | (*logging.ZapEventLogger)(h).Debugf(fmt, args...) 17 | } 18 | -------------------------------------------------------------------------------- /gpbft/validator_api_test.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import "github.com/filecoin-project/go-f3/internal/caching" 4 | 5 | type Validator interface { 6 | MessageValidator 7 | PartialMessageValidator 8 | } 9 | 10 | // NewValidator creates a new Validator instance with the provided parameters for 11 | // testing purposes. 12 | func NewValidator(nn NetworkName, verifier Verifier, cp CommitteeProvider, progress Progress, cache *caching.GroupedSet, committeeLookback uint64) Validator { 13 | return newValidator(nn, verifier, cp, progress, cache, committeeLookback) 14 | } 15 | -------------------------------------------------------------------------------- /chainexchange/chainexchange.go: -------------------------------------------------------------------------------- 1 | package chainexchange 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | ) 8 | 9 | type Message struct { 10 | Instance uint64 11 | Chain *gpbft.ECChain 12 | Timestamp int64 13 | } 14 | 15 | type ChainExchange interface { 16 | Broadcast(context.Context, Message) error 17 | GetChainByInstance(context.Context, uint64, gpbft.ECChainKey) (*gpbft.ECChain, bool) 18 | RemoveChainsByInstance(context.Context, uint64) error 19 | } 20 | 21 | type Listener interface { 22 | NotifyChainDiscovered(ctx context.Context, instance uint64, chain *gpbft.ECChain) 23 | } 24 | -------------------------------------------------------------------------------- /wal.go: -------------------------------------------------------------------------------- 1 | package f3 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | "github.com/filecoin-project/go-f3/internal/writeaheadlog" 8 | ) 9 | 10 | type walEntry struct { 11 | Message *gpbft.GMessage 12 | } 13 | 14 | var _ = (*writeaheadlog.Entry)(nil) 15 | 16 | func (we *walEntry) WALEpoch() uint64 { 17 | return we.Message.Vote.Instance 18 | } 19 | 20 | func (we *walEntry) MarshalCBOR(w io.Writer) error { 21 | return we.Message.MarshalCBOR(w) 22 | } 23 | 24 | func (we *walEntry) UnmarshalCBOR(r io.Reader) error { 25 | we.Message = &gpbft.GMessage{} 26 | return we.Message.UnmarshalCBOR(r) 27 | } 28 | -------------------------------------------------------------------------------- /cmd/f3/pubsub.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "time" 5 | 6 | pubsub "github.com/libp2p/go-libp2p-pubsub" 7 | ) 8 | 9 | func init() { 10 | // "borrowed" from lotus node/modules/lp2p/pubsub.go 11 | // configure larger overlay parameters 12 | pubsub.GossipSubD = 8 13 | pubsub.GossipSubDscore = 6 14 | pubsub.GossipSubDout = 3 15 | pubsub.GossipSubDlo = 6 16 | pubsub.GossipSubDhi = 12 17 | pubsub.GossipSubDlazy = 12 18 | pubsub.GossipSubDirectConnectInitialDelay = 30 * time.Second 19 | pubsub.GossipSubIWantFollowupTime = 5 * time.Second 20 | pubsub.GossipSubHistoryLength = 10 21 | pubsub.GossipSubGossipFactor = 0.1 22 | } 23 | -------------------------------------------------------------------------------- /tipsettimestamp.go: -------------------------------------------------------------------------------- 1 | package f3 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/filecoin-project/go-f3/ec" 7 | ) 8 | 9 | func computeTipsetTimestampAtEpoch(validTipset ec.TipSet, epoch int64, ECPeriod time.Duration) time.Time { 10 | // validTipset is base and epoch is the head 11 | // timestamp(head) = genesis + epoch(head) * period 12 | // timestamp(base) = genesis + epoch(base) * period + timestamp(head) - timestamp(head) 13 | // timestamp(base) = timestamp(head) + genesis + epoch(base) * period - genesis - epoch(head) * period 14 | // timestamp(base) = timestamp(head) + (epoch(base) - epoch(head)) * period 15 | return validTipset.Timestamp().Add(time.Duration(epoch-validTipset.Epoch()) * ECPeriod) 16 | } 17 | -------------------------------------------------------------------------------- /internal/gnark/adapter_test.go: -------------------------------------------------------------------------------- 1 | package gnark 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | "go.dedis.ch/kyber/v4/util/key" 8 | ) 9 | 10 | func TestAdapter_SuiteBLS12381(t *testing.T) { 11 | suite := NewSuiteBLS12381() 12 | 13 | pair := key.NewKeyPair(suite) 14 | pubkey, err := pair.Public.MarshalBinary() 15 | require.Nil(t, err) 16 | privkey, err := pair.Private.MarshalBinary() 17 | require.Nil(t, err) 18 | 19 | pubhex := suite.Point() 20 | err = pubhex.UnmarshalBinary(pubkey) 21 | require.Nil(t, err) 22 | 23 | privhex := suite.Scalar() 24 | err = privhex.UnmarshalBinary(privkey) 25 | require.Nil(t, err) 26 | 27 | require.Equal(t, "gnark.adapter", suite.String()) 28 | } 29 | -------------------------------------------------------------------------------- /.github/workflows/go-test.yml: -------------------------------------------------------------------------------- 1 | name: Go Test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [ "main" ] 7 | workflow_dispatch: 8 | merge_group: 9 | 10 | permissions: 11 | contents: read 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 15 | cancel-in-progress: true 16 | 17 | env: 18 | # Speed-up tests run with race detector by reducing default exit sleep of 1 s to 0. 19 | # See: https://go.dev/doc/articles/race_detector#Options 20 | GORACE: atexit_sleep_ms=0 21 | 22 | jobs: 23 | go-test: 24 | uses: filecoin-project/unified-github-workflows/.github/workflows/go-test.yml@v1.0 25 | secrets: 26 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 27 | -------------------------------------------------------------------------------- /sim/latency/latency.go: -------------------------------------------------------------------------------- 1 | package latency 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | ) 8 | 9 | // Modeler instantiates a new latency Model. 10 | type Modeler func() (Model, error) 11 | 12 | // Model represents a latency model of cross participant communication. The model 13 | // offers the ability for implementation of varying latency across a simulation, 14 | // as well as specialised latency across specific participants. 15 | // 16 | // See LogNormal, Zipf, None. 17 | type Model interface { 18 | // Sample returns an artificial latency at time t for communications from a 19 | // participant to another participant. 20 | // 21 | // See: gpbft.Host, gpbft.Clock. 22 | Sample(t time.Time, from, to gpbft.ActorID) time.Duration 23 | } 24 | -------------------------------------------------------------------------------- /observer/metrics.go: -------------------------------------------------------------------------------- 1 | package observer 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/internal/measurements" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/attribute" 7 | "go.opentelemetry.io/otel/metric" 8 | ) 9 | 10 | var meter = otel.Meter("f3/observer") 11 | var attrErrorType = attribute.Key("error.type") 12 | 13 | var metrics = struct { 14 | rotations metric.Int64Counter 15 | msgsReceived metric.Int64Counter 16 | }{ 17 | rotations: measurements.Must(meter.Int64Counter( 18 | "f3_observer_rotations", 19 | metric.WithDescription("The number of rotations performed."), 20 | )), 21 | msgsReceived: measurements.Must(meter.Int64Counter( 22 | "f3_observer_msgs_received", 23 | metric.WithDescription("The number of messages received."), 24 | )), 25 | } 26 | -------------------------------------------------------------------------------- /certexchange/polling/pollstatus_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=PollStatus"; DO NOT EDIT. 2 | 3 | package polling 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[PollMiss-0] 12 | _ = x[PollHit-1] 13 | _ = x[PollFailed-2] 14 | _ = x[PollIllegal-3] 15 | } 16 | 17 | const _PollStatus_name = "PollMissPollHitPollFailedPollIllegal" 18 | 19 | var _PollStatus_index = [...]uint8{0, 8, 15, 25, 36} 20 | 21 | func (i PollStatus) String() string { 22 | if i < 0 || i >= PollStatus(len(_PollStatus_index)-1) { 23 | return "PollStatus(" + strconv.FormatInt(int64(i), 10) + ")" 24 | } 25 | return _PollStatus_name[_PollStatus_index[i]:_PollStatus_index[i+1]] 26 | } 27 | -------------------------------------------------------------------------------- /blssig/verifier_test.go: -------------------------------------------------------------------------------- 1 | package blssig 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | "go.dedis.ch/kyber/v4/sign/bdn" 9 | 10 | bls12381 "github.com/filecoin-project/go-f3/internal/gnark" 11 | ) 12 | 13 | func BenchmarkBLSSigning(b *testing.B) { 14 | var ( 15 | blsSuit = bls12381.NewSuiteBLS12381() 16 | blsSchema = bdn.NewSchemeOnG2(blsSuit) 17 | ) 18 | privKey, pubKey := blsSchema.NewKeyPair(blsSuit.RandomStream()) 19 | pubKeyB, err := pubKey.MarshalBinary() 20 | require.NoError(b, err) 21 | signer := SignerWithKeyOnG1(pubKeyB, privKey) 22 | verifier := VerifierWithKeyOnG1() 23 | ctx := context.Background() 24 | 25 | sig, err := signer.Sign(ctx, pubKeyB, pubKeyB) 26 | require.NoError(b, err) 27 | b.ResetTimer() 28 | 29 | for i := 0; i < b.N; i++ { 30 | err := verifier.Verify(pubKeyB, pubKeyB, sig) 31 | require.NoError(b, err) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /sim/adversary/absent.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/filecoin-project/go-f3/gpbft" 8 | ) 9 | 10 | var _ Receiver = (*Absent)(nil) 11 | 12 | type Absent struct{ allowAll } 13 | 14 | func NewAbsentGenerator(power gpbft.StoragePower) Generator { 15 | return func(id gpbft.ActorID, host Host) *Adversary { 16 | return &Adversary{ 17 | Receiver: Absent{}, 18 | Power: power, 19 | ID: id, 20 | } 21 | } 22 | } 23 | 24 | func (Absent) ValidateMessage(_ context.Context, msg *gpbft.GMessage) (gpbft.ValidatedMessage, error) { 25 | return Validated(msg), nil 26 | } 27 | 28 | func (Absent) StartInstanceAt(uint64, time.Time) error { return nil } 29 | func (Absent) ReceiveMessage(context.Context, gpbft.ValidatedMessage) error { return nil } 30 | func (Absent) ReceiveAlarm(context.Context) error { return nil } 31 | -------------------------------------------------------------------------------- /blssig/signer.go: -------------------------------------------------------------------------------- 1 | package blssig 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | 8 | "go.dedis.ch/kyber/v4" 9 | "go.dedis.ch/kyber/v4/sign/bdn" 10 | 11 | "github.com/filecoin-project/go-f3/gpbft" 12 | bls12381 "github.com/filecoin-project/go-f3/internal/gnark" 13 | ) 14 | 15 | var _ gpbft.Signer = (*Signer)(nil) 16 | 17 | type Signer struct { 18 | scheme *bdn.Scheme 19 | pubKey gpbft.PubKey 20 | privKey kyber.Scalar 21 | } 22 | 23 | func SignerWithKeyOnG1(pub gpbft.PubKey, privKey kyber.Scalar) *Signer { 24 | return &Signer{ 25 | scheme: bdn.NewSchemeOnG2(bls12381.NewSuiteBLS12381()), 26 | pubKey: pub, 27 | privKey: privKey, 28 | } 29 | } 30 | 31 | func (s *Signer) Sign(_ context.Context, sender gpbft.PubKey, msg []byte) ([]byte, error) { 32 | if !bytes.Equal(sender, s.pubKey) { 33 | return nil, errors.New("cannot sign: unknown sender") 34 | } 35 | return s.scheme.Sign(s.privKey, msg) 36 | } 37 | -------------------------------------------------------------------------------- /cmd/f3/tools.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/filecoin-project/go-f3/certs" 9 | "github.com/filecoin-project/go-f3/gpbft" 10 | "github.com/urfave/cli/v2" 11 | ) 12 | 13 | var toolsCmd = cli.Command{ 14 | Name: "tools", 15 | Usage: "various tools for f3", 16 | Subcommands: []*cli.Command{ 17 | &ptCidCmd, 18 | }, 19 | } 20 | 21 | var ptCidCmd = cli.Command{ 22 | Name: "ptCid", 23 | Usage: "compute the CID of a json power table", 24 | Action: func(c *cli.Context) error { 25 | var entries gpbft.PowerEntries 26 | err := json.NewDecoder(os.Stdin).Decode(&entries) 27 | if err != nil { 28 | return fmt.Errorf("error while decoding: %w", err) 29 | } 30 | 31 | cid, err := certs.MakePowerTableCID(entries) 32 | if err != nil { 33 | return fmt.Errorf("error while computing CID: %w", err) 34 | } 35 | 36 | fmt.Printf("%s\n", cid) 37 | return nil 38 | }, 39 | } 40 | -------------------------------------------------------------------------------- /emulator/message_cache.go: -------------------------------------------------------------------------------- 1 | package emulator 2 | 3 | import "github.com/filecoin-project/go-f3/gpbft" 4 | 5 | // MessageCache is a repository of messages keyed by their instance, round and 6 | // phase. This cache is used for testing purposes only and has no eviction 7 | // strategy. It is primarily used to store messages from self for rebroadcast. 8 | type MessageCache map[gpbft.Instant]*gpbft.GMessage 9 | 10 | func NewMessageCache() MessageCache { 11 | return make(map[gpbft.Instant]*gpbft.GMessage) 12 | } 13 | 14 | func (mc MessageCache) Get(instant gpbft.Instant) (*gpbft.GMessage, bool) { 15 | msg, found := mc[instant] 16 | return msg, found 17 | } 18 | 19 | func (mc MessageCache) PutIfAbsent(msg *gpbft.GMessage) bool { 20 | key := gpbft.Instant{ 21 | ID: msg.Vote.Instance, 22 | Round: msg.Vote.Round, 23 | Phase: msg.Vote.Phase, 24 | } 25 | if _, found := mc[key]; found { 26 | return false 27 | } 28 | mc[key] = msg 29 | return true 30 | } 31 | -------------------------------------------------------------------------------- /internal/gnark/group.go: -------------------------------------------------------------------------------- 1 | package gnark 2 | 3 | import ( 4 | fr "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" 5 | "go.dedis.ch/kyber/v4" 6 | ) 7 | 8 | var ( 9 | G1 kyber.Group = &groupBls{name: "bls12-381.G1", newPoint: func() kyber.Point { return new(G1Elt).Null() }} 10 | G2 kyber.Group = &groupBls{name: "bls12-381.G2", newPoint: func() kyber.Point { return new(G2Elt).Null() }} 11 | GT kyber.Group = &groupBls{name: "bls12-381.GT", newPoint: func() kyber.Point { return new(GTElt).Null() }} 12 | ) 13 | 14 | type groupBls struct { 15 | name string 16 | newPoint func() kyber.Point 17 | } 18 | 19 | func (g groupBls) String() string { return g.name } 20 | func (g groupBls) ScalarLen() int { return fr.Bytes } 21 | func (g groupBls) Scalar() kyber.Scalar { return new(Scalar).SetInt64(0) } 22 | func (g groupBls) PointLen() int { return g.newPoint().MarshalSize() } 23 | func (g groupBls) Point() kyber.Point { return g.newPoint() } 24 | -------------------------------------------------------------------------------- /observer/options_test.go: -------------------------------------------------------------------------------- 1 | package observer 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestWithBootstrapAddrsFromString_Resolution(t *testing.T) { 10 | for _, tt := range []struct { 11 | name string 12 | addrs []string 13 | }{ 14 | { 15 | name: "dnsaddr", 16 | addrs: []string{"/dnsaddr/api.drand.sh"}, 17 | }, 18 | { 19 | name: "dns", 20 | addrs: []string{"/dns/example.com/tcp/1347/p2p/12D3KooWGotz7nQavdkncoFb8QoX2YmfnjF1RuSz3PfuEhYUGrxr"}, 21 | }, 22 | { 23 | name: "ip", 24 | addrs: []string{"/ip4/127.0.0.1/tcp/1347/p2p/12D3KooWGotz7nQavdkncoFb8QoX2YmfnjF1RuSz3PfuEhYUGrxr"}, 25 | }, 26 | } { 27 | t.Run(tt.name, func(t *testing.T) { 28 | apply := WithBootstrapPeersFromString(5, tt.addrs...) 29 | require.NotNil(t, apply) 30 | got := &options{} 31 | require.NoError(t, apply(got)) 32 | require.NotEmpty(t, tt.addrs, got.connectivityBootstrapPeers) 33 | }) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /gpbft/errors_test.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestValidationError_SentinelValues(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | subject error 14 | }{ 15 | {name: "ErrValidationTooOld", subject: ErrValidationTooOld}, 16 | {name: "ErrValidationNoCommittee", subject: ErrValidationNoCommittee}, 17 | {name: "ErrValidationInvalid", subject: ErrValidationInvalid}, 18 | {name: "ErrValidationWrongBase", subject: ErrValidationWrongBase}, 19 | {name: "ErrValidationWrongSupplement", subject: ErrValidationWrongSupplement}, 20 | {name: "ErrValidationNotRelevant", subject: ErrValidationNotRelevant}, 21 | } 22 | for _, test := range tests { 23 | t.Run(test.name, func(t *testing.T) { 24 | require.True(t, errors.As(test.subject, &ValidationError{})) 25 | require.True(t, errors.As(test.subject, &ValidationError{message: "fish"})) 26 | }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /internal/consensus/tipset.go: -------------------------------------------------------------------------------- 1 | package consensus 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/filecoin-project/go-f3/ec" 7 | "github.com/filecoin-project/go-f3/gpbft" 8 | mbase "github.com/multiformats/go-multibase" 9 | ) 10 | 11 | var _ ec.TipSet = (*tipset)(nil) 12 | 13 | type tipset struct { 14 | tsk []byte 15 | epoch int64 16 | timestamp time.Time 17 | beacon []byte 18 | } 19 | 20 | func (ts *tipset) Key() gpbft.TipSetKey { return ts.tsk } 21 | func (ts *tipset) Epoch() int64 { return ts.epoch } 22 | func (ts *tipset) Beacon() []byte { return ts.beacon } 23 | func (ts *tipset) Timestamp() time.Time { return ts.timestamp } 24 | 25 | func (ts *tipset) String() string { 26 | res, _ := mbase.Encode(mbase.Base32, ts.tsk[:gpbft.CidMaxLen]) 27 | for i := 1; i*gpbft.CidMaxLen < len(ts.tsk); i++ { 28 | enc, _ := mbase.Encode(mbase.Base32, ts.tsk[gpbft.CidMaxLen*i:gpbft.CidMaxLen*(i+1)]) 29 | res += "," + enc 30 | } 31 | return res 32 | } 33 | -------------------------------------------------------------------------------- /test/absent_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/filecoin-project/go-f3/sim" 7 | "github.com/filecoin-project/go-f3/sim/adversary" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func FuzzAbsentAdversary(f *testing.F) { 12 | f.Add(98465230) 13 | f.Add(651) 14 | f.Add(-56) 15 | f.Add(22) 16 | f.Add(0) 17 | f.Add(-855) // Takes 12 rounds to complete. 18 | f.Fuzz(func(t *testing.T, latencySeed int) { 19 | t.Parallel() 20 | sm, err := sim.NewSimulation( 21 | asyncOptions(latencySeed, 22 | // Total network size of 3 + 1, where the adversary has 1/4 of power. 23 | sim.AddHonestParticipants( 24 | 3, 25 | sim.NewUniformECChainGenerator(tipSetGeneratorSeed, 1, 5), 26 | uniformOneStoragePower), 27 | sim.WithAdversary(adversary.NewAbsentGenerator(oneStoragePower)), 28 | )...) 29 | require.NoError(t, err) 30 | require.NoErrorf(t, sm.Run(1, maxRounds+2), "%s", sm.Describe()) 31 | }, 32 | ) 33 | } 34 | -------------------------------------------------------------------------------- /cmd/f3/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "syscall" 9 | 10 | "github.com/urfave/cli/v2" 11 | ) 12 | 13 | func main() { 14 | app := &cli.App{ 15 | Name: "f3", 16 | Usage: "standalone f3 node", 17 | Flags: []cli.Flag{ 18 | &cli.StringFlag{ 19 | Name: "manifest", 20 | Value: "manifest.json", 21 | Usage: "path to the manifest file", 22 | }, 23 | }, 24 | Commands: []*cli.Command{ 25 | &runCmd, 26 | &manifestCmd, 27 | &observerCmd, 28 | &toolsCmd, 29 | &certsCmd, 30 | &aiderCmd, 31 | }, 32 | } 33 | 34 | ctx, cancel := context.WithCancel(context.Background()) 35 | defer cancel() 36 | 37 | sigChan := make(chan os.Signal, 1) 38 | signal.Notify(sigChan, syscall.SIGINT) 39 | 40 | go func() { 41 | <-sigChan 42 | cancel() 43 | }() 44 | 45 | if err := app.RunContext(ctx, os.Args); err != nil { 46 | _, _ = fmt.Fprintf(os.Stderr, "runtime error: %+v\n", err) 47 | os.Exit(1) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /internal/clock/clock.go: -------------------------------------------------------------------------------- 1 | package clock 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/filecoin-project/go-clock" 7 | ) 8 | 9 | type Clock = clock.Clock 10 | type Mock = clock.Mock 11 | type Timer = clock.Timer 12 | 13 | type clockKeyType struct{} 14 | 15 | var clockKey = clockKeyType{} 16 | 17 | // NewMock returns an instance of a mock clock. 18 | // The current time of the mock clock on initialization is the Unix epoch. 19 | func NewMock() *Mock { 20 | return clock.NewMock() 21 | } 22 | 23 | // WithMockClock embeds a mock clock in the context and returns it. 24 | func WithMockClock(ctx context.Context) (context.Context, *Mock) { 25 | clk := clock.NewMock() 26 | return context.WithValue(ctx, clockKey, (Clock)(clk)), clk 27 | } 28 | 29 | var RealClock = clock.New() 30 | 31 | // GetClock either retrieves a mock clock from the context or returns a realtime clock. 32 | func GetClock(ctx context.Context) Clock { 33 | clk := ctx.Value(clockKey) 34 | if clk == nil { 35 | return RealClock 36 | } 37 | return clk.(Clock) 38 | } 39 | -------------------------------------------------------------------------------- /certstore/metrics.go: -------------------------------------------------------------------------------- 1 | package certstore 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/internal/measurements" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/metric" 7 | ) 8 | 9 | var meter = otel.Meter("f3/certstore") 10 | var metrics = struct { 11 | latestInstance metric.Int64Gauge 12 | latestFinalizedEpoch metric.Int64Gauge 13 | tipsetsPerInstance metric.Int64Gauge 14 | }{ 15 | latestInstance: measurements.Must(meter.Int64Gauge("f3_certstore_latest_instance", 16 | metric.WithDescription("The latest instance available in certstore."), 17 | metric.WithUnit("{instance}"), 18 | )), 19 | latestFinalizedEpoch: measurements.Must(meter.Int64Gauge("f3_certstore_latest_finalized_epoch", 20 | metric.WithDescription("The latest finalized epoch."), 21 | metric.WithUnit("{epoch}"), 22 | )), 23 | tipsetsPerInstance: measurements.Must(meter.Int64Gauge("f3_certstore_tipsets_per_instance", 24 | metric.WithDescription("The number of new tipsets finalized per instance."), 25 | metric.WithUnit("{tipset}"), 26 | )), 27 | } 28 | -------------------------------------------------------------------------------- /sim/adversary/adversary.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import "github.com/filecoin-project/go-f3/gpbft" 4 | 5 | type Receiver interface { 6 | gpbft.Receiver 7 | Censorer 8 | } 9 | 10 | type Censorer interface { 11 | AllowMessage(from gpbft.ActorID, to gpbft.ActorID, msg gpbft.GMessage) bool 12 | } 13 | 14 | // Endpoint with which the adversary can control the network 15 | type Host interface { 16 | gpbft.Host 17 | gpbft.Signer 18 | // Sends a message to all other participants, immediately. 19 | // Note that the adversary can subsequently delay delivery to some participants, 20 | // before messages are actually received. 21 | RequestSynchronousBroadcast(mb *gpbft.MessageBuilder) error 22 | } 23 | 24 | type Generator func(gpbft.ActorID, Host) *Adversary 25 | 26 | type Adversary struct { 27 | Receiver 28 | Power gpbft.StoragePower 29 | ID gpbft.ActorID 30 | } 31 | 32 | var _ Censorer = (*allowAll)(nil) 33 | 34 | type allowAll struct{} 35 | 36 | func (allowAll) AllowMessage(gpbft.ActorID, gpbft.ActorID, gpbft.GMessage) bool { return true } 37 | -------------------------------------------------------------------------------- /gpbft/options_test.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func Test_exponentialBackofferMax(t *testing.T) { 11 | maxBackoff := 30 * time.Second 12 | backoffer := exponentialBackoffer(1.3, 0, 3*time.Second, maxBackoff) 13 | var lastBackoff time.Duration 14 | for i := 0; i < 10_000; i++ { 15 | backoff := backoffer(i) 16 | require.Positivef(t, backoff, "at %d", i) 17 | if backoff != maxBackoff { 18 | require.Less(t, backoff, maxBackoff, "at %d", i) 19 | require.Greater(t, backoff, lastBackoff, "at %d", i) 20 | } 21 | } 22 | } 23 | 24 | func Test_exponentialBackofferSpread(t *testing.T) { 25 | maxBackoff := 30 * time.Second 26 | backoffer1 := exponentialBackoffer(1.3, 0.1, 3*time.Second, maxBackoff) 27 | backoffer2 := exponentialBackoffer(1.3, 0.1, 3*time.Second, maxBackoff) 28 | 29 | for i := 0; i < 8; i++ { 30 | backoff1 := backoffer1(i) 31 | backoff2 := backoffer2(i) 32 | require.NotEqual(t, backoff1, backoff2, "backoffs were not randomized") 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /certexchange/protocol.go: -------------------------------------------------------------------------------- 1 | package certexchange 2 | 3 | import ( 4 | "math" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | "github.com/libp2p/go-libp2p/core/protocol" 8 | ) 9 | 10 | func FetchProtocolName(nn gpbft.NetworkName) protocol.ID { 11 | return protocol.ID("/f3/certexch/get/1/" + string(nn)) 12 | } 13 | 14 | // Request unlimited certificates. 15 | const NoLimit uint64 = math.MaxUint64 16 | 17 | type Request struct { 18 | // First instance to fetch. 19 | FirstInstance uint64 20 | // Max number of instances to fetch. The server may respond with fewer certificates than 21 | // requested, even if more are available. 22 | Limit uint64 23 | // Include the full power table needed to validate the first finality certificate. 24 | // Checked by the user against their last finality certificate. 25 | IncludePowerTable bool 26 | } 27 | 28 | type ResponseHeader struct { 29 | // The next instance to be finalized. This is 0 when no instances have been finalized. 30 | PendingInstance uint64 31 | // Power table, if requested, or empty. 32 | PowerTable gpbft.PowerEntries 33 | } 34 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Protocol Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /sim/tipset_gen.go: -------------------------------------------------------------------------------- 1 | package sim 2 | 3 | import "github.com/filecoin-project/go-f3/gpbft" 4 | 5 | var alphanum = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") 6 | 7 | // A tipset generator. 8 | // This uses a fast xorshift PRNG to generate random tipset IDs. 9 | // The statistical properties of these are not important to correctness. 10 | type TipSetGenerator struct { 11 | xorshiftState uint64 12 | } 13 | 14 | func NewTipSetGenerator(seed uint64) *TipSetGenerator { 15 | return &TipSetGenerator{xorshiftState: seed} 16 | } 17 | 18 | func (c *TipSetGenerator) Sample() gpbft.TipSetKey { 19 | b := make([]byte, 8) 20 | for i := range b { 21 | b[i] = alphanum[c.nextN(len(alphanum))] 22 | } 23 | return b 24 | } 25 | 26 | func (c *TipSetGenerator) nextN(n int) uint64 { 27 | bucketSize := uint64(1<<63) / uint64(n) 28 | limit := bucketSize * uint64(n) 29 | for { 30 | x := c.next() 31 | if x < limit { 32 | return x / bucketSize 33 | } 34 | } 35 | } 36 | 37 | func (c *TipSetGenerator) next() uint64 { 38 | x := c.xorshiftState 39 | x ^= x << 13 40 | x ^= x >> 7 41 | x ^= x << 17 42 | c.xorshiftState = x 43 | return x 44 | } 45 | -------------------------------------------------------------------------------- /gpbft/vrf.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | ) 7 | 8 | // A ticket is a signature over some common payload. 9 | type Ticket []byte 10 | 11 | const DomainSeparationTagVRF = "VRF" 12 | 13 | func VerifyTicket(nn NetworkName, beacon []byte, instance uint64, round uint64, source PubKey, verifier Verifier, ticket Ticket) bool { 14 | return verifier.Verify(source, vrfSerializeSigInput(beacon, instance, round, nn), ticket) == nil 15 | } 16 | 17 | // Serializes the input to the VRF signature for the CONVERGE phase of GossiPBFT. 18 | // Only used for VRF ticket creation and/or verification. 19 | func vrfSerializeSigInput(beacon []byte, instance uint64, round uint64, networkName NetworkName) []byte { 20 | const separator = ":" 21 | var buf bytes.Buffer 22 | buf.Grow(len(DomainSeparationTagVRF) + 23 | len(beacon) + 24 | len(networkName) + 25 | len(separator)*3 + 26 | 16) 27 | 28 | buf.WriteString(DomainSeparationTagVRF) 29 | buf.WriteString(separator) 30 | buf.WriteString(string(networkName)) 31 | buf.WriteString(separator) 32 | buf.Write(beacon) 33 | buf.WriteString(separator) 34 | _ = binary.Write(&buf, binary.BigEndian, instance) 35 | _ = binary.Write(&buf, binary.BigEndian, round) 36 | 37 | return buf.Bytes() 38 | } 39 | -------------------------------------------------------------------------------- /blssig/metrics.go: -------------------------------------------------------------------------------- 1 | package blssig 2 | 3 | import ( 4 | logging "github.com/ipfs/go-log/v2" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/attribute" 7 | "go.opentelemetry.io/otel/metric" 8 | 9 | "github.com/filecoin-project/go-f3/internal/measurements" 10 | ) 11 | 12 | var meter = otel.Meter("f3/blssig") 13 | var attrCached = attribute.Key("cached") 14 | 15 | var log = logging.Logger("f3/blssig") 16 | 17 | var metrics = struct { 18 | decompressPoint metric.Int64Counter 19 | verify metric.Int64Counter 20 | verifyAggregate metric.Int64Histogram 21 | aggregate metric.Int64Histogram 22 | }{ 23 | decompressPoint: measurements.Must(meter.Int64Counter( 24 | "f3_blssig_decompress_point", 25 | metric.WithDescription("Number of times we decompress points."), 26 | )), 27 | verify: measurements.Must(meter.Int64Counter( 28 | "f3_blssig_verify", 29 | metric.WithDescription("Number of signatures verified."), 30 | )), 31 | verifyAggregate: measurements.Must(meter.Int64Histogram( 32 | "f3_blssig_verify_aggregate", 33 | metric.WithDescription("Number of aggregate signatures verified."), 34 | )), 35 | aggregate: measurements.Must(meter.Int64Histogram( 36 | "f3_blssig_aggregate", 37 | metric.WithDescription("Number of signatures aggregated."), 38 | )), 39 | } 40 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /usr/bin/env bash 2 | 3 | GOLANGCILINT = go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 4 | 5 | all: generate test fuzz lint 6 | 7 | test: GOGC ?= 1000 # Reduce GC frequency during testing, default to 1000 if unset. 8 | test: 9 | GOGC=$(GOGC) go test $(GOTEST_ARGS) ./... 10 | .PHONY: test 11 | 12 | test/cover: test 13 | test/cover: GOTEST_ARGS=-coverprofile=coverage.txt -covermode=atomic -coverpkg=./... 14 | .PHONY: test/cover 15 | 16 | fuzz: FUZZTIME ?= 10s # The duration to run fuzz testing, default to 10s if unset. 17 | fuzz: # List all fuzz tests across the repo, and run them one at a time with the configured fuzztime. 18 | @set -e; \ 19 | go list ./... | while read -r package; do \ 20 | go test -list '^Fuzz' "$$package" | grep '^Fuzz' | while read -r func; do \ 21 | echo "Running $$package $$func for $(FUZZTIME)..."; \ 22 | GOGC=$(GOGC) go test "$$package" -run '^$$' -fuzz="$$func" -fuzztime=$(FUZZTIME) || exit 1; \ 23 | done; \ 24 | done; 25 | .PHONY: fuzz 26 | 27 | lint: 28 | go mod tidy 29 | $(GOLANGCILINT) run ./... 30 | .PHONY: lint 31 | 32 | generate: 33 | go generate ./... 34 | .PHONY: generate 35 | 36 | build: f3 37 | .PHONY: build 38 | 39 | f3: 40 | go build ./cmd/f3 41 | .PHONY: f3 42 | 43 | gen: 44 | go generate ./... 45 | .PHONY: gen -------------------------------------------------------------------------------- /sim/latency/zipf.go: -------------------------------------------------------------------------------- 1 | package latency 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "math/rand" 7 | "time" 8 | 9 | "github.com/filecoin-project/go-f3/gpbft" 10 | ) 11 | 12 | var _ Model = (*Zipf)(nil) 13 | 14 | // Zipf represents a log normal latency distribution with a configurable 15 | // max latency. This latency model does not specialise based on host clock time 16 | // nor participants. 17 | type Zipf struct { 18 | dist *rand.Zipf 19 | } 20 | 21 | // NewZipf instantiates a new latency model of ZipF latency distribution with the 22 | // given max. 23 | func NewZipf(seed int64, s, v float64, max time.Duration) (*Zipf, error) { 24 | if max < 0 { 25 | return nil, errors.New("max duration cannot be negative") 26 | } 27 | dist := rand.NewZipf(rand.New(rand.NewSource(seed)), s, v, uint64(max)) 28 | if dist == nil { 29 | return nil, fmt.Errorf("zipf parameters are out of band: s=%f, v=%f", s, v) 30 | } 31 | return &Zipf{dist: dist}, nil 32 | } 33 | 34 | // Sample returns latency samples that correspond to this ZipF numerical 35 | // distribution. The samples returned disregard time and participants, i.e. the 36 | // distribution does not vary over time nor for specific participants. 37 | func (l *Zipf) Sample(_ time.Time, from gpbft.ActorID, to gpbft.ActorID) time.Duration { 38 | return time.Duration(l.dist.Uint64()) 39 | } 40 | -------------------------------------------------------------------------------- /pmsg/metrics.go: -------------------------------------------------------------------------------- 1 | package pmsg 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/internal/measurements" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/metric" 7 | ) 8 | 9 | var meter = otel.Meter("f3") 10 | 11 | var metrics = struct { 12 | partialMessages metric.Int64UpDownCounter 13 | partialMessageDuplicates metric.Int64Counter 14 | partialMessagesDropped metric.Int64Counter 15 | partialMessageInstances metric.Int64UpDownCounter 16 | }{ 17 | partialMessages: measurements.Must(meter.Int64UpDownCounter("f3_partial_messages", 18 | metric.WithDescription("Number of partial GPBFT messages pending fulfilment."))), 19 | partialMessageDuplicates: measurements.Must(meter.Int64Counter("f3_partial_message_duplicates", 20 | metric.WithDescription("Number of partial GPBFT messages received that already have an unfulfilled message for the same instance, sender, round and phase."))), 21 | partialMessagesDropped: measurements.Must(meter.Int64Counter("f3_partial_messages_dropped", 22 | metric.WithDescription("Number of partial GPBFT messages or chain broadcasts were dropped due to consumers being too slow."))), 23 | partialMessageInstances: measurements.Must(meter.Int64UpDownCounter("f3_partial_message_instances", 24 | metric.WithDescription("Number of instances with partial GPBFT messages pending fulfilment."))), 25 | } 26 | -------------------------------------------------------------------------------- /pmsg/partial_msg_test.go: -------------------------------------------------------------------------------- 1 | package pmsg 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func Test_roundDownToUnixTime(t *testing.T) { 11 | someTime, err := time.Parse(time.RFC3339Nano, "2024-03-07T15:06:20.522847852Z") 12 | require.NoError(t, err) 13 | 14 | for _, test := range []struct { 15 | name string 16 | at time.Time 17 | interval time.Duration 18 | want int64 19 | }{ 20 | { 21 | name: "millisecond", 22 | at: someTime, 23 | interval: time.Millisecond * 200, 24 | want: 1709823980400, // 2024-03-07 15:06:20.4 25 | }, 26 | { 27 | name: "second", 28 | at: someTime, 29 | interval: time.Second * 7, 30 | want: 1709823976000, // 2024-03-07 15:06:16 31 | }, 32 | { 33 | name: "bigBang", 34 | at: time.Unix(0, 0), 35 | interval: time.Second * 7, 36 | want: 0, 37 | }, 38 | { 39 | name: "justAfterBigBang", 40 | at: time.Unix(5, 0), 41 | interval: time.Second * 5, 42 | want: 5000, 43 | }, 44 | } { 45 | t.Run(test.name, func(t *testing.T) { 46 | got := roundDownToUnixMilliTime(test.at, test.interval) 47 | require.Equal(t, test.want, got) 48 | require.GreaterOrEqual(t, got, time.Microsecond.Milliseconds()) 49 | require.LessOrEqual(t, got, test.at.UnixMilli()) 50 | }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /certexchange/metrics.go: -------------------------------------------------------------------------------- 1 | package certexchange 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/internal/measurements" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/attribute" 7 | "go.opentelemetry.io/otel/metric" 8 | ) 9 | 10 | var meter = otel.Meter("f3/certexchange") 11 | var attrWithPowerTable = attribute.Key("with-power-table") 12 | 13 | var metrics = struct { 14 | requestLatency metric.Float64Histogram 15 | totalResponseTime metric.Float64Histogram 16 | serveTime metric.Float64Histogram 17 | certificatesServed metric.Int64Histogram 18 | }{ 19 | requestLatency: measurements.Must(meter.Float64Histogram( 20 | "f3_certexchange_request_latency", 21 | metric.WithDescription("The outbound request latency."), 22 | metric.WithUnit("s"), 23 | )), 24 | totalResponseTime: measurements.Must(meter.Float64Histogram( 25 | "f3_certexchange_total_response_time", 26 | metric.WithDescription("The total time for outbound requests."), 27 | metric.WithUnit("s"), 28 | )), 29 | serveTime: measurements.Must(meter.Float64Histogram( 30 | "f3_certexchange_serve_time", 31 | metric.WithDescription("The time spent serving requests."), 32 | metric.WithUnit("s"), 33 | )), 34 | certificatesServed: measurements.Must(meter.Int64Histogram( 35 | "f3_certexchange_certificates_served", 36 | metric.WithDescription("The number of certificates served (per request)."), 37 | metric.WithUnit("{certificate}"), 38 | )), 39 | } 40 | -------------------------------------------------------------------------------- /internal/gnark/adapter.go: -------------------------------------------------------------------------------- 1 | package gnark 2 | 3 | import ( 4 | "go.dedis.ch/kyber/v4" 5 | ) 6 | 7 | // SuiteBLS12381 is an adapter that implements the suites.Suite interface so that 8 | // bls12381 can be used as a common suite to generate key pairs for instance but 9 | // still preserves the properties of the pairing (e.g. the Pair function). 10 | // 11 | // It's important to note that the Point function will generate a point 12 | // compatible with public keys only (group G2) where the signature must be 13 | // used as a point from the group G1. 14 | type SuiteBLS12381 struct { 15 | Suite 16 | kyber.Group 17 | } 18 | 19 | // NewSuiteBLS12381 makes a new BN256 suite 20 | func NewSuiteBLS12381() *SuiteBLS12381 { 21 | return &SuiteBLS12381{} 22 | } 23 | 24 | // Point generates a point from the G2 group that can only be used 25 | // for public keys 26 | func (s *SuiteBLS12381) Point() kyber.Point { 27 | return s.G2().Point() 28 | } 29 | 30 | // PointLen returns the length of a G2 point 31 | func (s *SuiteBLS12381) PointLen() int { 32 | return s.G2().PointLen() 33 | } 34 | 35 | // Scalar generates a scalar 36 | func (s *SuiteBLS12381) Scalar() kyber.Scalar { 37 | return s.G1().Scalar() 38 | } 39 | 40 | // ScalarLen returns the length of a scalar 41 | func (s *SuiteBLS12381) ScalarLen() int { 42 | return s.G1().ScalarLen() 43 | } 44 | 45 | // String returns the name of the suite 46 | func (s *SuiteBLS12381) String() string { 47 | return "gnark.adapter" 48 | } 49 | -------------------------------------------------------------------------------- /ec/ec.go: -------------------------------------------------------------------------------- 1 | package ec 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/filecoin-project/go-f3/gpbft" 9 | ) 10 | 11 | type Backend interface { 12 | // GetTipsetByEpoch should return the tipset immediately before the one requested. 13 | // If the epoch requested is null, it returns the latest not-null one. 14 | GetTipsetByEpoch(ctx context.Context, epoch int64) (TipSet, error) 15 | // GetTipset returns the tipset with the given key. 16 | GetTipset(context.Context, gpbft.TipSetKey) (TipSet, error) 17 | // GetHead returns the current head tipset of the chain, which must be a 18 | // descendant of the latest finalized tipset. 19 | // 20 | // See Finalize. 21 | GetHead(context.Context) (TipSet, error) 22 | // GetParent returns the parent of the current tipset. 23 | GetParent(context.Context, TipSet) (TipSet, error) 24 | // GetPowerTable returns the power table at the tipset given as an argument. 25 | GetPowerTable(context.Context, gpbft.TipSetKey) (gpbft.PowerEntries, error) 26 | // Finalize marks the tipset that corresponds to the given key as finalised 27 | // beyond which no forks are allowed to occur. The finalised tipset overrides the 28 | // head tipset if it is not an ancestor of the current head. 29 | // 30 | // See GetHead. 31 | Finalize(context.Context, gpbft.TipSetKey) error 32 | } 33 | 34 | type TipSet interface { 35 | fmt.Stringer 36 | 37 | Key() gpbft.TipSetKey 38 | Beacon() []byte 39 | Epoch() int64 40 | Timestamp() time.Time 41 | } 42 | -------------------------------------------------------------------------------- /internal/encoding/metrics.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/internal/measurements" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/attribute" 7 | "go.opentelemetry.io/otel/metric" 8 | ) 9 | 10 | var ( 11 | attrCodecCbor = attribute.String("codec", "cbor") 12 | attrCodecZstd = attribute.String("codec", "zstd") 13 | attrActionEncode = attribute.String("action", "encode") 14 | attrActionDecode = attribute.String("action", "decode") 15 | 16 | meter = otel.Meter("f3/internal/encoding") 17 | 18 | metrics = struct { 19 | encodingTime metric.Float64Histogram 20 | zstdCompressionRatio metric.Float64Histogram 21 | }{ 22 | encodingTime: measurements.Must(meter.Float64Histogram( 23 | "f3_internal_encoding_time", 24 | metric.WithDescription("The time spent on encoding/decoding in seconds."), 25 | metric.WithUnit("s"), 26 | metric.WithExplicitBucketBoundaries(0.001, 0.003, 0.005, 0.01, 0.03, 0.05, 0.1, 0.3, 0.5, 1.0, 2.0, 5.0, 10.0), 27 | )), 28 | zstdCompressionRatio: measurements.Must(meter.Float64Histogram( 29 | "f3_internal_encoding_zstd_compression_ratio", 30 | metric.WithDescription("The ratio of compressed to uncompressed data size for zstd encoding."), 31 | metric.WithExplicitBucketBoundaries(0.0, 0.1, 0.2, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0), 32 | )), 33 | } 34 | ) 35 | 36 | func attrSuccessFromErr(err error) attribute.KeyValue { 37 | return attribute.Bool("success", err == nil) 38 | } 39 | -------------------------------------------------------------------------------- /certchain/options.go: -------------------------------------------------------------------------------- 1 | package certchain 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/filecoin-project/go-f3/ec" 7 | "github.com/filecoin-project/go-f3/gpbft" 8 | "github.com/filecoin-project/go-f3/manifest" 9 | ) 10 | 11 | type Option func(*options) error 12 | 13 | type SignVerifier interface { 14 | gpbft.Signer 15 | gpbft.Verifier 16 | } 17 | 18 | type options struct { 19 | ec ec.Backend 20 | m manifest.Manifest 21 | sv SignVerifier 22 | seed int64 23 | } 24 | 25 | func newOptions(o ...Option) (*options, error) { 26 | opts := &options{ 27 | m: manifest.LocalDevnetManifest(), 28 | } 29 | for _, apply := range o { 30 | if err := apply(opts); err != nil { 31 | return nil, err 32 | } 33 | } 34 | if opts.ec == nil { 35 | return nil, errors.New("ec backend must be specified") 36 | } 37 | if opts.sv == nil { 38 | return nil, errors.New("sign verifier must be specified") 39 | } 40 | return opts, nil 41 | } 42 | 43 | func WithEC(ec ec.Backend) Option { 44 | return func(o *options) error { 45 | o.ec = ec 46 | return nil 47 | } 48 | } 49 | 50 | func WithManifest(m manifest.Manifest) Option { 51 | return func(o *options) error { 52 | o.m = m 53 | return nil 54 | } 55 | } 56 | 57 | func WithSignVerifier(sv SignVerifier) Option { 58 | return func(o *options) error { 59 | o.sv = sv 60 | return nil 61 | } 62 | } 63 | 64 | func WithSeed(seed int64) Option { 65 | return func(o *options) error { 66 | o.seed = seed 67 | return nil 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /gpbft/vrf_test.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestVrfSerializeSigInput(t *testing.T) { 11 | for _, tc := range []struct { 12 | name string 13 | beacon []byte 14 | instance uint64 15 | round uint64 16 | networkName NetworkName 17 | want []byte 18 | }{ 19 | { 20 | name: "empty", 21 | beacon: []byte{}, 22 | instance: 0, 23 | round: 0, 24 | networkName: "", 25 | want: []byte("VRF:::\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), 26 | }, 27 | { 28 | name: "basic", 29 | beacon: []byte("🥓"), 30 | instance: 1, 31 | round: 2, 32 | networkName: "testnet", 33 | want: []byte("VRF:testnet:🥓:\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02"), 34 | }, 35 | { 36 | name: "long values", 37 | beacon: bytes.Repeat([]byte{0xff}, 20), 38 | instance: 1<<63 - 1, 39 | round: 1<<63 - 1, 40 | networkName: "longnetworkname", 41 | want: append(append([]byte("VRF:longnetworkname:"), bytes.Repeat([]byte{0xff}, 20)...), ':', 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff), 42 | }, 43 | } { 44 | t.Run(tc.name, func(t *testing.T) { 45 | actual := vrfSerializeSigInput(tc.beacon, tc.instance, tc.round, tc.networkName) 46 | require.Equal(t, tc.want, actual) 47 | }) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /test/util_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | "github.com/filecoin-project/go-f3/sim" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | // Expects the decision in the first instance to be one of the given tipsets. 12 | func requireConsensusAtFirstInstance(t *testing.T, sm *sim.Simulation, expectAnyOf ...*gpbft.TipSet) { 13 | t.Helper() 14 | requireConsensusAtInstance(t, sm, 0, expectAnyOf...) 15 | } 16 | 17 | // Expects the decision in an instance to be one of the given tipsets. 18 | func requireConsensusAtInstance(t *testing.T, sm *sim.Simulation, instance uint64, expectAnyOf ...*gpbft.TipSet) { 19 | t.Helper() 20 | inst := sm.GetInstance(instance) 21 | for _, pid := range sm.ListParticipantIDs() { 22 | require.NotNil(t, inst, "no such instance") 23 | decision := inst.GetDecision(pid) 24 | require.NotNil(t, decision, "no decision for participant %d in instance %d", pid, instance) 25 | require.Contains(t, expectAnyOf, decision.Head(), "consensus not reached: participant %d decided %s in instance %d, expected any of %s", 26 | pid, decision.Head(), instance, expectAnyOf) 27 | } 28 | } 29 | 30 | func generateECChain(t *testing.T, tsg *sim.TipSetGenerator) *gpbft.ECChain { 31 | t.Helper() 32 | // TODO: add stochastic chain generation. 33 | chain, err := gpbft.NewChain(&gpbft.TipSet{ 34 | Epoch: 0, 35 | Key: tsg.Sample(), 36 | PowerTable: gpbft.MakeCid([]byte("pt")), 37 | }) 38 | require.NoError(t, err) 39 | return chain 40 | } 41 | -------------------------------------------------------------------------------- /gpbft/progress.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import "sync/atomic" 4 | 5 | var ( 6 | _ ProgressObserver = (*atomicProgression)(nil) 7 | _ Progress = (*atomicProgression)(nil).Get 8 | ) 9 | 10 | type InstanceProgress struct { 11 | Instant 12 | // Input is the initial input chain to the instance. This field may be nil in a 13 | // case where the instance is scheduled to start but has not started yet. 14 | // Because, the input chain is only determined once the start alarm triggers. 15 | // 16 | // See: Participant.StartInstanceAt. 17 | Input *ECChain 18 | } 19 | 20 | // Progress gets the latest GPBFT instance progress. 21 | type Progress func() InstanceProgress 22 | 23 | // ProgressObserver defines an interface for observing and being notified about 24 | // the progress of a GPBFT instance as it advances through different instance, 25 | // rounds or phases. 26 | type ProgressObserver interface { 27 | // NotifyProgress is called to notify the observer about the progress of GPBFT 28 | // instance, round or phase. 29 | NotifyProgress(instant InstanceProgress) 30 | } 31 | 32 | type atomicProgression struct { 33 | progression atomic.Pointer[InstanceProgress] 34 | } 35 | 36 | func newAtomicProgression() *atomicProgression { 37 | return &atomicProgression{} 38 | } 39 | 40 | func (a *atomicProgression) NotifyProgress(instant InstanceProgress) { 41 | a.progression.Store(&instant) 42 | } 43 | 44 | func (a *atomicProgression) Get() (instant InstanceProgress) { 45 | if latest := a.progression.Load(); latest != nil { 46 | instant = *latest 47 | } 48 | return 49 | } 50 | -------------------------------------------------------------------------------- /blssig/cache_test.go: -------------------------------------------------------------------------------- 1 | package blssig 2 | 3 | import ( 4 | "runtime" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | "go.dedis.ch/kyber/v4/sign/bdn" 9 | 10 | "github.com/filecoin-project/go-f3/gpbft" 11 | bls12381 "github.com/filecoin-project/go-f3/internal/gnark" 12 | ) 13 | 14 | const maxCacheMemory uint64 = 10 << 20 // 10MiB 15 | 16 | func TestCacheMemory(t *testing.T) { 17 | suite := bls12381.NewSuiteBLS12381() 18 | scheme := bdn.NewSchemeOnG2(suite) 19 | 20 | rand := suite.RandomStream() 21 | keys := make([]gpbft.PubKey, maxPointCacheSize+1) 22 | for i := range keys { 23 | _, pub := scheme.NewKeyPair(rand) 24 | pubKeyB, err := pub.MarshalBinary() 25 | require.NoError(t, err) 26 | require.Len(t, pubKeyB, 48) 27 | keys[i] = pubKeyB 28 | } 29 | 30 | runtime.GC() 31 | runtime.GC() 32 | var beforeMemStats, afterMemStats runtime.MemStats 33 | runtime.ReadMemStats(&beforeMemStats) 34 | v := VerifierWithKeyOnG1() 35 | for _, k := range keys[1:] { 36 | _, err := v.pubkeyToPoint(k) 37 | require.NoError(t, err) 38 | } 39 | runtime.GC() 40 | runtime.GC() 41 | runtime.ReadMemStats(&afterMemStats) 42 | memUse := afterMemStats.HeapAlloc - beforeMemStats.HeapAlloc 43 | t.Log(memUse) 44 | require.Less(t, memUse, maxCacheMemory) 45 | 46 | require.Len(t, v.pointCache, maxPointCacheSize) 47 | 48 | _, err := v.pubkeyToPoint(keys[1]) 49 | require.NoError(t, err) 50 | 51 | require.Len(t, v.pointCache, maxPointCacheSize) 52 | 53 | _, err = v.pubkeyToPoint(keys[0]) 54 | require.NoError(t, err) 55 | 56 | require.Len(t, v.pointCache, 1) 57 | } 58 | -------------------------------------------------------------------------------- /gpbft/committee.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | ) 8 | 9 | var _ CommitteeProvider = (*cachedCommitteeProvider)(nil) 10 | 11 | type cachedCommitteeProvider struct { 12 | delegate CommitteeProvider 13 | 14 | // mu guards access to committees. 15 | mu sync.Mutex 16 | committees map[uint64]*Committee 17 | } 18 | 19 | func newCachedCommitteeProvider(delegate CommitteeProvider) *cachedCommitteeProvider { 20 | return &cachedCommitteeProvider{ 21 | delegate: delegate, 22 | committees: make(map[uint64]*Committee), 23 | } 24 | } 25 | 26 | func (c *cachedCommitteeProvider) GetCommittee(ctx context.Context, instance uint64) (*Committee, error) { 27 | c.mu.Lock() 28 | defer c.mu.Unlock() 29 | if committee, found := c.committees[instance]; found { 30 | return committee, nil 31 | } 32 | switch committee, err := c.delegate.GetCommittee(ctx, instance); { 33 | case err != nil: 34 | return nil, fmt.Errorf("instance %d: %w: %w", instance, ErrValidationNoCommittee, err) 35 | case committee == nil: 36 | return nil, fmt.Errorf("unexpected nil committee for instance %d", instance) 37 | default: 38 | c.committees[instance] = committee 39 | return committee, nil 40 | } 41 | } 42 | 43 | // EvictCommitteesBefore evicts any cached committees that correspond to 44 | // instances prior to the given instance. 45 | func (c *cachedCommitteeProvider) EvictCommitteesBefore(instance uint64) { 46 | c.mu.Lock() 47 | defer c.mu.Unlock() 48 | for i := range c.committees { 49 | if i < instance { 50 | delete(c.committees, i) 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /gen/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/filecoin-project/go-f3/certexchange" 8 | "github.com/filecoin-project/go-f3/certs" 9 | "github.com/filecoin-project/go-f3/certstore" 10 | "github.com/filecoin-project/go-f3/chainexchange" 11 | "github.com/filecoin-project/go-f3/gpbft" 12 | gen "github.com/whyrusleeping/cbor-gen" 13 | "golang.org/x/sync/errgroup" 14 | ) 15 | 16 | //go:generate go run . 17 | 18 | func main() { 19 | var eg errgroup.Group 20 | eg.Go(func() error { 21 | return gen.WriteTupleEncodersToFile("../gpbft/cbor_gen.go", "gpbft", 22 | gpbft.TipSet{}, 23 | gpbft.LegacyECChain{}, 24 | gpbft.GMessage{}, 25 | gpbft.PartialGMessage{}, 26 | gpbft.SupplementalData{}, 27 | gpbft.Payload{}, 28 | gpbft.Justification{}, 29 | gpbft.PowerEntry{}, 30 | gpbft.PowerEntries{}, 31 | ) 32 | }) 33 | eg.Go(func() error { 34 | return gen.WriteTupleEncodersToFile("../certs/cbor_gen.go", "certs", 35 | certs.PowerTableDelta{}, 36 | certs.PowerTableDiff{}, 37 | certs.FinalityCertificate{}, 38 | ) 39 | }) 40 | eg.Go(func() error { 41 | return gen.WriteTupleEncodersToFile("../certexchange/cbor_gen.go", "certexchange", 42 | certexchange.Request{}, 43 | certexchange.ResponseHeader{}, 44 | ) 45 | }) 46 | eg.Go(func() error { 47 | return gen.WriteTupleEncodersToFile("../chainexchange/cbor_gen.go", "chainexchange", 48 | chainexchange.Message{}, 49 | ) 50 | }) 51 | eg.Go(func() error { 52 | return gen.WriteTupleEncodersToFile("../certstore/cbor_gen.go", "certstore", 53 | certstore.SnapshotHeader{}, 54 | ) 55 | }) 56 | if err := eg.Wait(); err != nil { 57 | fmt.Printf("Failed to complete cborg_gen: %v\n", err) 58 | os.Exit(1) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /internal/gnark/suite_test.go: -------------------------------------------------------------------------------- 1 | package gnark 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/require" 10 | "go.dedis.ch/kyber/v4/pairing" 11 | 12 | "go.dedis.ch/kyber/v4" 13 | ) 14 | 15 | func TestVerifySigOnG2(t *testing.T) { 16 | pk := "868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31" 17 | sig := "8d61d9100567de44682506aea1a7a6fa6e5491cd27a0a0ed349ef6910ac5ac20ff7bc3e09d7c046566c9f7f3c6f3b10104990e7cb424998203d8f7de586fb7fa5f60045417a432684f85093b06ca91c769f0e7ca19268375e659c2a2352b4655" 18 | prevSig := "176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a" 19 | round := uint64(1) 20 | 21 | suite := NewSuite() 22 | pkb, _ := hex.DecodeString(pk) 23 | pubkeyP := suite.G1().Point() 24 | require.NoError(t, pubkeyP.UnmarshalBinary(pkb)) 25 | sigb, _ := hex.DecodeString(sig) 26 | sigP := suite.G2().Point() 27 | require.NoError(t, sigP.UnmarshalBinary(sigb)) 28 | prev, _ := hex.DecodeString(prevSig) 29 | h := sha256.New() 30 | h.Write(prev) 31 | _ = binary.Write(h, binary.BigEndian, round) 32 | msg := h.Sum(nil) 33 | 34 | base := suite.G1().Point().Base().Clone() 35 | MsgP := suite.G2().Point().(kyber.HashablePoint).Hash(msg) 36 | if !suite.ValidatePairing(base, sigP, pubkeyP, MsgP) { 37 | t.Fatalf("Error validating pairing") 38 | } 39 | } 40 | 41 | func TestImplementInterfaces(_ *testing.T) { 42 | var _ kyber.Point = &G1Elt{} 43 | var _ kyber.Point = &G2Elt{} 44 | var _ kyber.Point = >Elt{} 45 | var _ kyber.HashablePoint = &G1Elt{} 46 | var _ kyber.HashablePoint = &G2Elt{} 47 | // var _ kyber.hashablePoint = &KyberGT{} // GT is not hashable for now 48 | var _ kyber.Group = &groupBls{} 49 | var _ pairing.Suite = &Suite{} 50 | } 51 | -------------------------------------------------------------------------------- /sim/signing/bls.go: -------------------------------------------------------------------------------- 1 | package signing 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sync" 7 | 8 | "go.dedis.ch/kyber/v4/pairing" 9 | "go.dedis.ch/kyber/v4/sign/bdn" 10 | 11 | "github.com/filecoin-project/go-f3/blssig" 12 | "github.com/filecoin-project/go-f3/gpbft" 13 | bls12381 "github.com/filecoin-project/go-f3/internal/gnark" 14 | ) 15 | 16 | var _ Backend = (*BLSBackend)(nil) 17 | 18 | type BLSBackend struct { 19 | gpbft.Verifier 20 | suite pairing.Suite 21 | scheme *bdn.Scheme 22 | 23 | // signersMutex guards access to signersByPubKey. 24 | signersMutex sync.RWMutex 25 | signersByPubKey map[string]*blssig.Signer 26 | } 27 | 28 | func (b *BLSBackend) Sign(ctx context.Context, sender gpbft.PubKey, msg []byte) ([]byte, error) { 29 | b.signersMutex.RLock() 30 | signer, known := b.signersByPubKey[string(sender)] 31 | b.signersMutex.RUnlock() 32 | 33 | if !known { 34 | return nil, errors.New("cannot sign: unknown sender") 35 | } 36 | return signer.Sign(ctx, sender, msg) 37 | } 38 | 39 | func NewBLSBackend() *BLSBackend { 40 | suite := bls12381.NewSuiteBLS12381() 41 | return &BLSBackend{ 42 | Verifier: blssig.VerifierWithKeyOnG1(), 43 | signersByPubKey: make(map[string]*blssig.Signer), 44 | suite: suite, 45 | scheme: bdn.NewSchemeOnG2(suite), 46 | } 47 | } 48 | 49 | func (b *BLSBackend) GenerateKey() (gpbft.PubKey, any) { 50 | 51 | priv, pub := b.scheme.NewKeyPair(b.suite.RandomStream()) 52 | pubKeyB, err := pub.MarshalBinary() 53 | if err != nil { 54 | panic(err) 55 | } 56 | 57 | b.signersMutex.Lock() 58 | defer b.signersMutex.Unlock() 59 | b.signersByPubKey[string(pubKeyB)] = blssig.SignerWithKeyOnG1(pubKeyB, priv) 60 | return pubKeyB, priv 61 | } 62 | 63 | func (b *BLSBackend) MarshalPayloadForSigning(nn gpbft.NetworkName, p *gpbft.Payload) []byte { 64 | return p.MarshalForSigning(nn) 65 | } 66 | -------------------------------------------------------------------------------- /test/constants_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | "github.com/filecoin-project/go-f3/sim" 8 | "github.com/filecoin-project/go-f3/sim/latency" 9 | ) 10 | 11 | const ( 12 | // tipSetGeneratorSeed is a test random seed from Drand. 13 | tipSetGeneratorSeed = 0x264803e715714f95 14 | 15 | latencyAsync = 100 * time.Millisecond 16 | maxRounds = 10 17 | EcEpochDuration = 30 * time.Second 18 | EcStabilisationDelay = 3 * time.Second 19 | ) 20 | 21 | var ( 22 | oneStoragePower = gpbft.NewStoragePower(1) 23 | uniformOneStoragePower = sim.UniformStoragePower(oneStoragePower) 24 | 25 | // testGpbftOptions is configuration constants used across most tests. 26 | // These values are not intended to reflect real-world conditions. 27 | // The latency and delta values are similar in order to stress "slow" message paths and interleaving. 28 | // The values are not appropriate for benchmarks. 29 | testGpbftOptions = []gpbft.Option{ 30 | gpbft.WithDelta(200 * time.Millisecond), 31 | gpbft.WithDeltaBackOffExponent(1.300), 32 | gpbft.WithRebroadcastBackoff(1.3, 0, time.Second, 5*time.Second), 33 | } 34 | ) 35 | 36 | func syncOptions(o ...sim.Option) []sim.Option { 37 | return append(o, 38 | sim.WithLatencyModeler(func() (latency.Model, error) { return latency.None, nil }), 39 | sim.WithECEpochDuration(EcEpochDuration), 40 | sim.WitECStabilisationDelay(EcStabilisationDelay), 41 | sim.WithGpbftOptions(testGpbftOptions...), 42 | ) 43 | } 44 | 45 | func asyncOptions(latencySeed int, o ...sim.Option) []sim.Option { 46 | return append(o, 47 | sim.WithLatencyModeler(func() (latency.Model, error) { 48 | return latency.NewLogNormal(int64(latencySeed), latencyAsync), nil 49 | }), 50 | sim.WithECEpochDuration(EcEpochDuration), 51 | sim.WitECStabilisationDelay(EcStabilisationDelay), 52 | sim.WithGpbftOptions(testGpbftOptions...), 53 | ) 54 | } 55 | -------------------------------------------------------------------------------- /ec/caching.go: -------------------------------------------------------------------------------- 1 | package ec 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/filecoin-project/go-f3/gpbft" 8 | lru "github.com/hashicorp/golang-lru/v2" 9 | "golang.org/x/sync/singleflight" 10 | ) 11 | 12 | type PowerCachingECWrapper struct { 13 | Backend 14 | 15 | cache *lru.Cache[string, gpbft.PowerEntries] 16 | 17 | smaphore chan struct{} 18 | dedup singleflight.Group 19 | } 20 | 21 | func NewPowerCachingECWrapper(backend Backend, concurrency int, cacheSize int) *PowerCachingECWrapper { 22 | cache, err := lru.New[string, gpbft.PowerEntries](cacheSize) 23 | if err != nil { 24 | panic(err) 25 | } 26 | smaphore := make(chan struct{}, concurrency) 27 | 28 | return &PowerCachingECWrapper{ 29 | Backend: backend, 30 | cache: cache, 31 | smaphore: smaphore, 32 | } 33 | } 34 | 35 | func (p *PowerCachingECWrapper) GetPowerTable(ctx context.Context, tsk gpbft.TipSetKey) (gpbft.PowerEntries, error) { 36 | entry, ok := p.cache.Get(string(tsk)) 37 | if ok { 38 | return entry, nil 39 | } 40 | 41 | ch := p.dedup.DoChan(string(tsk), 42 | // break context cancellation chain as the dedup group might start with short context and then get called with longer one 43 | func() (any, error) { return p.executeGetPowerTable(context.WithoutCancel(ctx), tsk) }) 44 | 45 | select { 46 | case <-ctx.Done(): 47 | return nil, ctx.Err() 48 | case res := <-ch: 49 | if res.Err != nil { 50 | return nil, fmt.Errorf("getting power table: %w", res.Err) 51 | } 52 | return res.Val.(gpbft.PowerEntries), nil 53 | } 54 | } 55 | 56 | func (p *PowerCachingECWrapper) executeGetPowerTable(ctx context.Context, tsk gpbft.TipSetKey) (gpbft.PowerEntries, error) { 57 | // take semaphore 58 | p.smaphore <- struct{}{} 59 | defer func() { <-p.smaphore }() 60 | 61 | res, err := p.Backend.GetPowerTable(ctx, tsk) 62 | if err != nil { 63 | return nil, fmt.Errorf("getting power table: %w", err) 64 | } 65 | 66 | p.cache.Add(string(tsk), res) 67 | return res, nil 68 | } 69 | -------------------------------------------------------------------------------- /.github/workflows/go-fuzz.yml: -------------------------------------------------------------------------------- 1 | name: Go Fuzz 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | merge_group: 9 | types: 10 | - checks_requested 11 | 12 | jobs: 13 | fuzz: 14 | name: go-fuzz 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: actions/setup-go@v5 19 | with: 20 | go-version: '1.22' 21 | - name: Fuzz 22 | env: 23 | FUZZTIME: 30s 24 | GOGC: '100' 25 | run: make fuzz 26 | - name: Upload fuzz failure seed corpus as run artifact 27 | if: failure() 28 | uses: actions/upload-artifact@v4 29 | id: testdata-upload 30 | with: 31 | name: testdata 32 | path: '**/testdata/fuzz' 33 | - name: Output message 34 | if: failure() 35 | shell: bash 36 | run: | 37 | echo -e 'Fuzz test failed on commit https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}. To troubleshoot locally, use the GitHub CLI to download the seed corpus by running:\n $ gh run download ${ github.run_id } -n testdata\nAlternatively, download from:\n ${{ steps.testdata-upload.outputs.artifact-url }}' 38 | - name: Post PR comment 39 | uses: actions/github-script@v7 40 | if: failure() && github.event_name == 'pull_request' 41 | with: 42 | github-token: ${{ secrets.GITHUB_TOKEN }} 43 | script: | 44 | github.rest.issues.createComment({ 45 | issue_number: context.issue.number, 46 | owner: context.repo.owner, 47 | repo: context.repo.repo, 48 | body: 'Fuzz test failed on commit ${{ github.event.pull_request.head.sha }}. To troubleshoot locally, download the seed corpus using [GitHub CLI](https://cli.github.com) by running:\n```shell\ngh run download ${{ github.run_id }} -n testdata\n```\nAlternatively, download directly from [here](${{ steps.testdata-upload.outputs.artifact-url }}).' 49 | }) 50 | -------------------------------------------------------------------------------- /internal/measurements/sample_set_test.go: -------------------------------------------------------------------------------- 1 | package measurements_test 2 | 3 | import ( 4 | "crypto/rand" 5 | "testing" 6 | 7 | "github.com/filecoin-project/go-f3/internal/measurements" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestSampleSet(t *testing.T) { 12 | subject := measurements.NewSampleSet(2) 13 | 14 | v1 := []byte("fish") 15 | v2 := []byte("lobster") 16 | v3 := []byte("barreleye") 17 | v4 := []byte("lobstermuncher") 18 | 19 | t.Run("does not contain unseen values", func(t *testing.T) { 20 | require.False(t, subject.Contains(v1)) 21 | require.False(t, subject.Contains(v2)) 22 | }) 23 | t.Run("contains seen values", func(t *testing.T) { 24 | require.True(t, subject.Contains(v1)) 25 | require.True(t, subject.Contains(v2)) 26 | }) 27 | t.Run("evicts first half once 2X capacity is reached", func(t *testing.T) { 28 | require.False(t, subject.Contains(v3)) 29 | require.True(t, subject.Contains(v1)) 30 | require.True(t, subject.Contains(v2)) 31 | 32 | require.False(t, subject.Contains(v4)) 33 | require.False(t, subject.Contains(v1)) 34 | require.False(t, subject.Contains(v2)) 35 | }) 36 | t.Run("limits keys to 96 bytes", func(t *testing.T) { 37 | longKey := make([]byte, 100) 38 | n, err := rand.Read(longKey) 39 | require.NoError(t, err) 40 | require.Equal(t, 100, n) 41 | require.False(t, subject.Contains(longKey)) 42 | require.True(t, subject.Contains(longKey)) 43 | require.True(t, subject.Contains(longKey[:96])) 44 | }) 45 | } 46 | 47 | func TestSampleSet_MinSizeIsOne(t *testing.T) { 48 | subject := measurements.NewSampleSet(-1) 49 | require.False(t, subject.Contains([]byte("a"))) 50 | require.False(t, subject.Contains([]byte("b"))) 51 | require.False(t, subject.Contains([]byte("c"))) 52 | 53 | require.False(t, subject.Contains([]byte("a"))) 54 | require.True(t, subject.Contains([]byte("a"))) 55 | 56 | require.False(t, subject.Contains([]byte("b"))) 57 | require.True(t, subject.Contains([]byte("b"))) 58 | 59 | require.False(t, subject.Contains([]byte("c"))) 60 | require.True(t, subject.Contains([]byte("c"))) 61 | } 62 | -------------------------------------------------------------------------------- /ec/powerdelta.go: -------------------------------------------------------------------------------- 1 | package ec 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "maps" 7 | "slices" 8 | "sort" 9 | 10 | "github.com/filecoin-project/go-f3/gpbft" 11 | ) 12 | 13 | func WithModifiedPower(backend Backend, explicitPower gpbft.PowerEntries, ignoreEcPower bool) Backend { 14 | if len(explicitPower) == 0 && !ignoreEcPower { 15 | return backend 16 | } 17 | 18 | explicitPower = slices.Clone(explicitPower) 19 | sort.Sort(explicitPower) 20 | 21 | if ignoreEcPower { 22 | return &withReplacedPower{ 23 | Backend: backend, 24 | power: trimPowerTable(explicitPower), 25 | } 26 | } 27 | 28 | index := make(map[gpbft.ActorID]int, len(explicitPower)) 29 | for i, entry := range explicitPower { 30 | index[entry.ID] = i 31 | } 32 | return &withModifiedPower{ 33 | Backend: backend, 34 | explicit: explicitPower, 35 | explicitIndex: index, 36 | } 37 | } 38 | 39 | type withReplacedPower struct { 40 | Backend 41 | power gpbft.PowerEntries 42 | } 43 | 44 | func (b *withReplacedPower) GetPowerTable(ctx context.Context, ts gpbft.TipSetKey) (gpbft.PowerEntries, error) { 45 | return b.power, nil 46 | } 47 | 48 | type withModifiedPower struct { 49 | Backend 50 | explicit gpbft.PowerEntries 51 | explicitIndex map[gpbft.ActorID]int 52 | } 53 | 54 | func (b *withModifiedPower) GetPowerTable(ctx context.Context, ts gpbft.TipSetKey) (gpbft.PowerEntries, error) { 55 | pt, err := b.Backend.GetPowerTable(ctx, ts) 56 | if err != nil { 57 | return nil, fmt.Errorf("getting power table: %w", err) 58 | } 59 | pt = slices.Clone(pt) 60 | index := maps.Clone(b.explicitIndex) 61 | for i := range pt { 62 | e := &pt[i] 63 | if idx, ok := index[e.ID]; ok { 64 | *e = b.explicit[idx] 65 | delete(index, e.ID) 66 | } 67 | } 68 | for _, idx := range index { 69 | pt = append(pt, b.explicit[idx]) 70 | } 71 | sort.Sort(pt) 72 | return trimPowerTable(pt), nil 73 | } 74 | 75 | func trimPowerTable(pt gpbft.PowerEntries) gpbft.PowerEntries { 76 | newLen := len(pt) 77 | for newLen > 0 && pt[newLen-1].Power.Sign() == 0 { 78 | newLen-- 79 | } 80 | return pt[:newLen] 81 | } 82 | -------------------------------------------------------------------------------- /ec/powerdelta_test.go: -------------------------------------------------------------------------------- 1 | package ec_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/filecoin-project/go-f3/ec" 8 | "github.com/filecoin-project/go-f3/gpbft" 9 | "github.com/filecoin-project/go-f3/internal/consensus" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | var powerTableA = gpbft.PowerEntries{ 15 | {ID: 1, Power: gpbft.NewStoragePower(50)}, 16 | {ID: 2, Power: gpbft.NewStoragePower(30)}, 17 | {ID: 3, Power: gpbft.NewStoragePower(29)}, 18 | } 19 | 20 | var powerTableB = gpbft.PowerEntries{ 21 | {ID: 3, Power: gpbft.NewStoragePower(10)}, 22 | {ID: 4, Power: gpbft.NewStoragePower(4)}, 23 | {ID: 2, Power: gpbft.NewStoragePower(0)}, 24 | } 25 | 26 | var powerTableC = gpbft.PowerEntries{ 27 | {ID: 1, Power: gpbft.NewStoragePower(50)}, 28 | {ID: 3, Power: gpbft.NewStoragePower(10)}, 29 | {ID: 4, Power: gpbft.NewStoragePower(4)}, 30 | } 31 | 32 | func TestReplacePowerTable(t *testing.T) { 33 | backend := consensus.NewFakeEC(consensus.WithInitialPowerTable(powerTableA)) 34 | modifiedBackend := ec.WithModifiedPower(backend, powerTableB, true) 35 | 36 | head, err := modifiedBackend.GetHead(context.Background()) 37 | require.NoError(t, err) 38 | 39 | // Replaces the power table, but doesn't return the "0" entries. 40 | pt, err := modifiedBackend.GetPowerTable(context.Background(), head.Key()) 41 | require.NoError(t, err) 42 | require.EqualValues(t, powerTableB[:2], pt) 43 | } 44 | 45 | func TestModifyPowerTable(t *testing.T) { 46 | backend := consensus.NewFakeEC(consensus.WithInitialPowerTable(powerTableA)) 47 | modifiedBackend := ec.WithModifiedPower(backend, powerTableB, false) 48 | 49 | head, err := modifiedBackend.GetHead(context.Background()) 50 | require.NoError(t, err) 51 | 52 | pt, err := modifiedBackend.GetPowerTable(context.Background(), head.Key()) 53 | require.NoError(t, err) 54 | require.EqualValues(t, powerTableC, pt) 55 | } 56 | 57 | func TestBypassModifiedPowerTable(t *testing.T) { 58 | backend := consensus.NewFakeEC(consensus.WithInitialPowerTable(powerTableA)) 59 | modifiedBackend := ec.WithModifiedPower(backend, nil, false) 60 | require.Equal(t, backend, modifiedBackend) 61 | } 62 | -------------------------------------------------------------------------------- /certexchange/polling/discovery.go: -------------------------------------------------------------------------------- 1 | package polling 2 | 3 | import ( 4 | "context" 5 | "slices" 6 | 7 | "github.com/libp2p/go-libp2p/core/event" 8 | "github.com/libp2p/go-libp2p/core/host" 9 | "github.com/libp2p/go-libp2p/core/peer" 10 | "github.com/libp2p/go-libp2p/core/protocol" 11 | 12 | "github.com/filecoin-project/go-f3/certexchange" 13 | "github.com/filecoin-project/go-f3/gpbft" 14 | ) 15 | 16 | func discoverPeers(ctx context.Context, h host.Host, nn gpbft.NetworkName) (<-chan peer.ID, error) { 17 | out := make(chan peer.ID, 256) 18 | discoveryEvents, err := h.EventBus().Subscribe([]any{ 19 | new(event.EvtPeerIdentificationCompleted), 20 | new(event.EvtPeerProtocolsUpdated), 21 | }) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | targetProtocol := certexchange.FetchProtocolName(nn) 27 | 28 | // record existing peers. 29 | fillInitialPeers: 30 | for _, p := range h.Network().Peers() { 31 | if proto, err := h.Peerstore().FirstSupportedProtocol(p, targetProtocol); err == nil && proto == targetProtocol { 32 | select { 33 | case out <- p: 34 | default: 35 | // Don't block because we've subscribed to libp2p events. 36 | break fillInitialPeers 37 | } 38 | } 39 | } 40 | 41 | // Then start listening for new peers 42 | go func() { 43 | defer close(out) 44 | defer discoveryEvents.Close() 45 | 46 | for { 47 | var ( 48 | evt any 49 | ok bool 50 | ) 51 | select { 52 | case evt, ok = <-discoveryEvents.Out(): 53 | case <-ctx.Done(): 54 | } 55 | if !ok { 56 | return 57 | } 58 | 59 | var protos []protocol.ID 60 | var peer peer.ID 61 | switch e := evt.(type) { 62 | case event.EvtPeerIdentificationCompleted: 63 | protos = e.Protocols 64 | peer = e.Peer 65 | case event.EvtPeerProtocolsUpdated: 66 | protos = e.Added 67 | peer = e.Peer 68 | default: 69 | continue 70 | } 71 | if slices.Contains(protos, targetProtocol) { 72 | // If the channel is full, ignore newly discovered peers. We 73 | // likely have enough anyways and we'll drain the channel 74 | // eventually. 75 | select { 76 | case out <- peer: 77 | default: 78 | } 79 | } 80 | } 81 | }() 82 | return out, nil 83 | } 84 | -------------------------------------------------------------------------------- /gpbft/progress_test.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestAtomicProgression(t *testing.T) { 11 | subject := newAtomicProgression() 12 | t.Run("zero value", func(t *testing.T) { 13 | instant := subject.Get() 14 | require.Equal(t, uint64(0), instant.ID, "Expected initial instance to be 0") 15 | require.Equal(t, uint64(0), instant.Round, "Expected initial round to be 0") 16 | require.Equal(t, INITIAL_PHASE, instant.Phase, "Expected initial phase to be INITIAL_PHASE") 17 | }) 18 | t.Run("notify and get", func(t *testing.T) { 19 | subject.NotifyProgress(InstanceProgress{ 20 | Instant: Instant{1, 10, PREPARE_PHASE}, 21 | }) 22 | instant := subject.Get() 23 | require.Equal(t, uint64(1), instant.ID, "Expected instance to be 1") 24 | require.Equal(t, uint64(10), instant.Round, "Expected round to be 10") 25 | require.Equal(t, PREPARE_PHASE, instant.Phase, "Expected phase to be PREPARE_PHASE") 26 | }) 27 | t.Run("notify and get progresses", func(t *testing.T) { 28 | subject.NotifyProgress(InstanceProgress{ 29 | Instant: Instant{2, 20, COMMIT_PHASE}, 30 | }) 31 | instant := subject.Get() 32 | require.Equal(t, uint64(2), instant.ID, "Expected instance to be updated to 2") 33 | require.Equal(t, uint64(20), instant.Round, "Expected round to be updated to 20") 34 | require.Equal(t, COMMIT_PHASE, instant.Phase, "Expected phase to be updated to COMMIT_PHASE") 35 | }) 36 | t.Run("concurrent update", func(t *testing.T) { 37 | var wg sync.WaitGroup 38 | update := func(inst, rnd uint64, ph Phase) { 39 | defer wg.Done() 40 | subject.NotifyProgress(InstanceProgress{ 41 | Instant: Instant{inst, rnd, ph}, 42 | }) 43 | } 44 | wg.Add(2) 45 | go update(3, 30, COMMIT_PHASE) 46 | go update(4, 40, DECIDE_PHASE) 47 | wg.Wait() 48 | 49 | instant := subject.Get() 50 | require.True(t, instant.ID == 3 || instant.ID == 4, "Instance should match one of the updates") 51 | require.True(t, instant.Round == 30 || instant.Round == 40, "Round should match one of the updates") 52 | require.True(t, instant.Phase == COMMIT_PHASE || instant.Phase == DECIDE_PHASE, "Phase should match one of the updates") 53 | }) 54 | } 55 | -------------------------------------------------------------------------------- /test/drop_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "testing" 7 | 8 | "github.com/filecoin-project/go-f3/gpbft" 9 | "github.com/filecoin-project/go-f3/sim" 10 | "github.com/filecoin-project/go-f3/sim/adversary" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestDrop_ReachesConsensusDespiteMessageLoss(t *testing.T) { 15 | SkipInRaceMode(t) 16 | t.Parallel() 17 | const ( 18 | instanceCount = 5000 19 | gst = 1000 * EcEpochDuration 20 | dropAdversaryTarget = 0 21 | ) 22 | messageLossProbabilities := []float64{0.01, 0.05, 0.1, 0.2, 0.5, 0.8, 1.0} 23 | tests := []struct { 24 | name string 25 | options []sim.Option 26 | }{ 27 | { 28 | name: "sync", 29 | options: syncOptions(), 30 | }, 31 | { 32 | name: "async", 33 | options: asyncOptions(-9856210), 34 | }, 35 | } 36 | for _, test := range tests { 37 | for _, lossProbability := range messageLossProbabilities { 38 | name := fmt.Sprintf("%s %.0f%% loss", test.name, lossProbability*100) 39 | t.Run(name, func(t *testing.T) { 40 | t.Parallel() 41 | ecChainGenerator := sim.NewUniformECChainGenerator(54445, 1, 1) 42 | var opts []sim.Option 43 | opts = append(opts, test.options...) 44 | opts = append(opts, 45 | sim.AddHonestParticipants(5, ecChainGenerator, uniformOneStoragePower), 46 | sim.WithAdversary(adversary.NewDropGenerator(oneStoragePower, 25, lossProbability, gst, dropAdversaryTarget)), 47 | sim.WithGlobalStabilizationTime(gst), 48 | sim.WithIgnoreConsensusFor(dropAdversaryTarget)) 49 | sm, err := sim.NewSimulation(opts...) 50 | require.NoError(t, err) 51 | // Run the simulation for 1 extra instance to give enough time to the drop 52 | // adversary target to complete the instance. Ideally, simulation should take a 53 | // function as the condition to stop the simulation. But for now a quick extra 54 | // round would do the trick. 55 | require.NoErrorf(t, sm.Run(instanceCount+1, maxRounds), "%s", sm.Describe()) 56 | chain := ecChainGenerator.GenerateECChain(instanceCount-1, &gpbft.TipSet{}, math.MaxUint64) 57 | requireConsensusAtInstance(t, sm, instanceCount-1, chain.Head()) 58 | }) 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /internal/gnark/suite.go: -------------------------------------------------------------------------------- 1 | package gnark 2 | 3 | import ( 4 | "crypto/cipher" 5 | "crypto/sha256" 6 | "fmt" 7 | "hash" 8 | "io" 9 | 10 | bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" 11 | "go.dedis.ch/kyber/v4" 12 | "go.dedis.ch/kyber/v4/pairing" 13 | "go.dedis.ch/kyber/v4/util/random" 14 | "go.dedis.ch/kyber/v4/xof/blake2xb" 15 | ) 16 | 17 | var _ pairing.Suite = Suite{} 18 | 19 | type Suite struct{} 20 | 21 | func NewSuite() (s Suite) { return } 22 | 23 | func (s Suite) String() string { return "bls12381" } 24 | func (s Suite) G1() kyber.Group { return G1 } 25 | func (s Suite) G2() kyber.Group { return G2 } 26 | func (s Suite) GT() kyber.Group { return GT } 27 | 28 | func (s Suite) Pair(p1, p2 kyber.Point) kyber.Point { 29 | aa, bb := p1.(*G1Elt), p2.(*G2Elt) 30 | var g1aff bls12381.G1Affine 31 | g1aff.FromJacobian(&aa.inner) 32 | var g2aff bls12381.G2Affine 33 | g2aff.FromJacobian(&bb.inner) 34 | gt, err := bls12381.Pair([]bls12381.G1Affine{g1aff}, []bls12381.G2Affine{g2aff}) 35 | if err != nil { 36 | panic(fmt.Errorf("error in gnark pairing: %w", err)) 37 | } 38 | 39 | return >Elt{gt} 40 | } 41 | 42 | func (s Suite) ValidatePairing(p1, p2, p3, p4 kyber.Point) bool { 43 | a, b := p1.(*G1Elt), p2.(*G2Elt) 44 | c, d := p3.(*G1Elt), p4.(*G2Elt) 45 | 46 | var aAff, cAff bls12381.G1Affine 47 | var bAff, dAff bls12381.G2Affine 48 | aAff.FromJacobian(&a.inner) 49 | bAff.FromJacobian(&b.inner) 50 | cAff.FromJacobian(&c.inner) 51 | dAff.FromJacobian(&d.inner) 52 | 53 | cAff.Neg(&cAff) 54 | 55 | out, err := bls12381.PairingCheck( 56 | []bls12381.G1Affine{aAff, cAff}, 57 | []bls12381.G2Affine{bAff, dAff}, 58 | ) 59 | if err != nil { 60 | panic(fmt.Errorf("error in gnark pairing: %w", err)) 61 | } 62 | return out 63 | } 64 | 65 | func (s Suite) Read(_ io.Reader, _ ...interface{}) error { 66 | panic("Suite.Read(): deprecated in drand") 67 | } 68 | 69 | func (s Suite) Write(_ io.Writer, _ ...interface{}) error { 70 | panic("Suite.Write(): deprecated in drand") 71 | } 72 | 73 | func (s Suite) Hash() hash.Hash { 74 | return sha256.New() 75 | } 76 | 77 | func (s Suite) XOF(seed []byte) kyber.XOF { 78 | return blake2xb.New(seed) 79 | } 80 | 81 | func (s Suite) RandomStream() cipher.Stream { 82 | return random.New() 83 | } 84 | -------------------------------------------------------------------------------- /gpbft/legacy_test.go: -------------------------------------------------------------------------------- 1 | package gpbft_test 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "testing" 7 | 8 | "github.com/filecoin-project/go-f3/gpbft" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestLegacyECChain_Marshaling(t *testing.T) { 13 | var ( 14 | tipset1 = gpbft.TipSet{Epoch: 0, Key: gpbft.MakeCid([]byte("fish")).Bytes(), PowerTable: gpbft.MakeCid([]byte("lobster"))} 15 | tipset2 = gpbft.TipSet{Epoch: 1, Key: gpbft.MakeCid([]byte("fishmuncher")).Bytes(), PowerTable: gpbft.MakeCid([]byte("lobstergobler"))} 16 | subject = gpbft.ECChain{TipSets: []*gpbft.TipSet{&tipset1, &tipset2}} 17 | legacySubject = gpbft.LegacyECChain{tipset1, tipset2} 18 | ) 19 | t.Run("CBOR/to legacy", func(t *testing.T) { 20 | var buf bytes.Buffer 21 | require.NoError(t, subject.MarshalCBOR(&buf)) 22 | 23 | var asOldFormat gpbft.LegacyECChain 24 | require.NoError(t, asOldFormat.UnmarshalCBOR(&buf)) 25 | require.Equal(t, subject.Len(), len(asOldFormat)) 26 | for i, want := range subject.TipSets { 27 | got := &asOldFormat[i] 28 | require.True(t, want.Equal(got)) 29 | } 30 | }) 31 | t.Run("CBOR/from legacy", func(t *testing.T) { 32 | var buf bytes.Buffer 33 | require.NoError(t, legacySubject.MarshalCBOR(&buf)) 34 | 35 | var asNewFormat gpbft.ECChain 36 | require.NoError(t, asNewFormat.UnmarshalCBOR(&buf)) 37 | require.Equal(t, len(legacySubject), asNewFormat.Len()) 38 | for i, want := range subject.TipSets { 39 | got := asNewFormat.TipSets[i] 40 | require.True(t, want.Equal(got)) 41 | } 42 | }) 43 | t.Run("JSON/to legacy", func(t *testing.T) { 44 | data, err := json.Marshal(&subject) 45 | require.NoError(t, err) 46 | 47 | var asOldFormat gpbft.LegacyECChain 48 | require.NoError(t, json.Unmarshal(data, &asOldFormat)) 49 | for i, want := range subject.TipSets { 50 | got := &asOldFormat[i] 51 | require.True(t, want.Equal(got)) 52 | } 53 | }) 54 | t.Run("JSON/from legacy", func(t *testing.T) { 55 | data, err := json.Marshal(legacySubject) 56 | require.NoError(t, err) 57 | 58 | var asNewFormat gpbft.ECChain 59 | require.NoError(t, json.Unmarshal(data, &asNewFormat)) 60 | for i, want := range subject.TipSets { 61 | got := asNewFormat.TipSets[i] 62 | require.True(t, want.Equal(got)) 63 | } 64 | }) 65 | } 66 | -------------------------------------------------------------------------------- /internal/caching/set_bench_test.go: -------------------------------------------------------------------------------- 1 | package caching_test 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | mrand "math/rand" 7 | "testing" 8 | 9 | "github.com/filecoin-project/go-f3/internal/caching" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func BenchmarkSet(b *testing.B) { 14 | benchmarkSetBySize(b, 1, 128) 15 | benchmarkSetBySize(b, 100, 128) 16 | benchmarkSetBySize(b, 10_000, 128) 17 | } 18 | 19 | func benchmarkSetBySize(b *testing.B, maxSetSize, valueLen int) { 20 | b.Run(fmt.Sprintf("%d/%d/ContainsOrAdd_New", maxSetSize, valueLen), func(b *testing.B) { 21 | values, size := generateValues(b, maxSetSize*2, valueLen) 22 | b.SetBytes(size) 23 | b.ResetTimer() 24 | b.ReportAllocs() 25 | b.RunParallel(func(pb *testing.PB) { 26 | for pb.Next() { 27 | subject := caching.NewSet(maxSetSize) 28 | for _, value := range values { 29 | contained, err := subject.ContainsOrAdd(nil, value) 30 | require.NoError(b, err) 31 | require.False(b, contained) 32 | } 33 | } 34 | }) 35 | }) 36 | 37 | b.Run(fmt.Sprintf("%d/%d/ContainsOrAdd_Existing", maxSetSize, valueLen), func(b *testing.B) { 38 | subject := caching.NewSet(maxSetSize) 39 | maxElementsBeforeEviction := (maxSetSize * 2) - 1 40 | values, size := generateValues(b, maxElementsBeforeEviction, valueLen) 41 | for _, value := range values { 42 | contained, err := subject.ContainsOrAdd(nil, value) 43 | require.NoError(b, err) 44 | require.False(b, contained) 45 | } 46 | 47 | b.SetBytes(size) 48 | b.ResetTimer() 49 | b.ReportAllocs() 50 | b.RunParallel(func(pb *testing.PB) { 51 | for pb.Next() { 52 | for _, value := range values { 53 | contained, err := subject.ContainsOrAdd(nil, value) 54 | require.NoError(b, err) 55 | require.True(b, contained) 56 | } 57 | } 58 | }) 59 | }) 60 | } 61 | 62 | func generateValues(b testing.TB, count, len int) ([][]byte, int64) { 63 | values := make([][]byte, count) 64 | for i := 0; i < count; i++ { 65 | value := make([]byte, len) 66 | n, err := rand.Read(value) 67 | require.NoError(b, err) 68 | require.Equal(b, n, len) 69 | values[i] = value 70 | } 71 | mrand.Shuffle(count, func(one, other int) { 72 | values[one], values[other] = values[other], values[one] 73 | }) 74 | return values, int64(count * len) 75 | } 76 | -------------------------------------------------------------------------------- /certexchange/polling/metrics.go: -------------------------------------------------------------------------------- 1 | package polling 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/internal/measurements" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/attribute" 7 | "go.opentelemetry.io/otel/metric" 8 | ) 9 | 10 | var meter = otel.Meter("f3/certexchange/polling") 11 | var metrics = struct { 12 | activePeers metric.Int64Gauge 13 | backoffPeers metric.Int64Gauge 14 | predictedPollingInterval metric.Float64Gauge 15 | pollDuration metric.Float64Histogram 16 | peersPolled metric.Int64Histogram 17 | peersRequiredPerPoll metric.Int64Histogram 18 | pollEfficiency metric.Float64Histogram 19 | }{ 20 | activePeers: measurements.Must(meter.Int64Gauge( 21 | "f3_certexchange_polling_active_peers", 22 | metric.WithDescription("The number of active certificate exchange peers."), 23 | metric.WithUnit("{peer}"), 24 | )), 25 | backoffPeers: measurements.Must(meter.Int64Gauge( 26 | "f3_certexchange_polling_backoff_peers", 27 | metric.WithDescription("The number of active certificate exchange peers on backoff."), 28 | metric.WithUnit("{peer}"), 29 | )), 30 | predictedPollingInterval: measurements.Must(meter.Float64Gauge( 31 | "f3_certexchange_polling_predicted_interval", 32 | metric.WithDescription("The predicted certificate exchange polling interval."), 33 | metric.WithUnit("s"), 34 | )), 35 | pollDuration: measurements.Must(meter.Float64Histogram( 36 | "f3_certexchange_polling_poll_duration", 37 | metric.WithDescription("The certificate exchange total poll duration."), 38 | metric.WithUnit("s"), 39 | )), 40 | peersPolled: measurements.Must(meter.Int64Histogram( 41 | "f3_certexchange_polling_peers_polled", 42 | metric.WithDescription("The number of peers polled per certificate exchange poll."), 43 | metric.WithUnit("{peer}"), 44 | )), 45 | peersRequiredPerPoll: measurements.Must(meter.Int64Histogram( 46 | "f3_certexchange_polling_peers_required_per_poll", 47 | metric.WithDescription("The number of peers we should be selecting per poll (optimally)."), 48 | metric.WithUnit("{peer}"), 49 | )), 50 | pollEfficiency: measurements.Must(meter.Float64Histogram( 51 | "f3_certexchange_polling_poll_efficiency", 52 | metric.WithDescription("The fraction of requests necessary to make progress."), 53 | )), 54 | } 55 | 56 | var attrMadeProgress = attribute.Key("made-progress") 57 | -------------------------------------------------------------------------------- /cmd/f3sim/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "os" 8 | "time" 9 | 10 | "github.com/filecoin-project/go-f3/gpbft" 11 | "github.com/filecoin-project/go-f3/sim" 12 | "github.com/filecoin-project/go-f3/sim/latency" 13 | "github.com/filecoin-project/go-f3/sim/signing" 14 | ) 15 | 16 | func main() { 17 | iterations := flag.Int("iterations", 1, "number of simulation iterations") 18 | participantCount := flag.Int("participants", 3, "number of participants") 19 | latencySeed := flag.Int64("latency-seed", time.Now().UnixMilli(), "random seed for network latency") 20 | latencyMean := flag.Float64("latency-mean", 0.500, "mean network latency in seconds") 21 | maxRounds := flag.Uint64("max-rounds", 10, "max rounds to allow before failing") 22 | traceLevel := flag.Int("trace", sim.TraceNone, "trace verbosity level") 23 | 24 | delta := flag.Duration("delta", 2*time.Second, "bound on message delay") 25 | deltaBackOffExponent := flag.Float64("delta-back-off-exponent", 1.300, "exponential factor adjusting the delta value per round") 26 | flag.Parse() 27 | 28 | for i := 0; i < *iterations; i++ { 29 | // Increment seed for successive iterations. 30 | seed := *latencySeed + int64(i) 31 | fmt.Printf("Iteration %d: seed=%d, mean=%f\n", i, seed, *latencyMean) 32 | 33 | options := []sim.Option{ 34 | sim.WithLatencyModeler(func() (latency.Model, error) { 35 | mean := time.Duration(*latencyMean * float64(time.Second)) 36 | return latency.NewLogNormal(*latencySeed, mean), nil 37 | }), 38 | sim.WithECEpochDuration(30 * time.Second), 39 | sim.WithECStabilisationDelay(0), 40 | sim.AddHonestParticipants( 41 | *participantCount, 42 | sim.NewUniformECChainGenerator(uint64(seed), 1, 10), 43 | sim.UniformStoragePower(gpbft.NewStoragePower(1))), 44 | sim.WithTraceLevel(*traceLevel), 45 | sim.WithGpbftOptions( 46 | gpbft.WithDelta(*delta), 47 | gpbft.WithDeltaBackOffExponent(*deltaBackOffExponent), 48 | ), 49 | } 50 | 51 | if os.Getenv("F3_TEST_USE_BLS") == "1" { 52 | options = append(options, sim.WithSigningBackend(signing.NewBLSBackend())) 53 | } 54 | 55 | sm, err := sim.NewSimulation(options...) 56 | if err != nil { 57 | log.Panicf("failed to instantiate simulation: %v\n", err) 58 | } 59 | 60 | if err := sm.Run(1, *maxRounds); err != nil { 61 | sm.GetInstance(0).Print() 62 | os.Exit(1) 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /chainexchange/metrics.go: -------------------------------------------------------------------------------- 1 | package chainexchange 2 | 3 | import ( 4 | "github.com/filecoin-project/go-f3/internal/measurements" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/attribute" 7 | "go.opentelemetry.io/otel/metric" 8 | ) 9 | 10 | var ( 11 | meter = otel.Meter("f3/chainexchange") 12 | 13 | attrKindWanted = attribute.String("kind", "wanted") 14 | attrKindDiscovered = attribute.String("kind", "discovered") 15 | 16 | metrics = struct { 17 | chains metric.Int64Counter 18 | broadcasts metric.Int64Counter 19 | broadcastChainLen metric.Int64Gauge 20 | notifications metric.Int64Counter 21 | instances metric.Int64UpDownCounter 22 | validatedMessages metric.Int64Counter 23 | validationTime metric.Float64Histogram 24 | }{ 25 | chains: measurements.Must(meter.Int64Counter("f3_chainexchange_chains", metric.WithDescription("Number of chains engaged in chainexhange by status."))), 26 | broadcasts: measurements.Must(meter.Int64Counter("f3_chainexchange_broadcasts", metric.WithDescription("Number of chains broadcasts made by chainexchange."))), 27 | broadcastChainLen: measurements.Must(meter.Int64Gauge("f3_chainexchange_broadcast_chain_length", metric.WithDescription("The latest length of broadcasted chain."))), 28 | notifications: measurements.Must(meter.Int64Counter("f3_chainexchange_notifications", metric.WithDescription("Number of chain discovery notified by chainexchange."))), 29 | instances: measurements.Must(meter.Int64UpDownCounter("f3_chainexchange_instances", metric.WithDescription("Number of instances engaged in chainexchage."))), 30 | validatedMessages: measurements.Must(meter.Int64Counter("f3_chainexchange_validated_messages", metric.WithDescription("Number of pubsub messages validated tagged by result."))), 31 | validationTime: measurements.Must(meter.Float64Histogram("f3_chainexchange_validation_time", 32 | metric.WithDescription("Histogram of time spent validating chainexchange messages in seconds."), 33 | metric.WithExplicitBucketBoundaries(0.001, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0, 10.0), 34 | metric.WithUnit("s"), 35 | )), 36 | } 37 | ) 38 | 39 | func attrFromWantedDiscovered(wanted, discovered bool) attribute.Set { 40 | return attribute.NewSet( 41 | attribute.Bool("wanted", wanted), 42 | attribute.Bool("discovered", discovered), 43 | ) 44 | } 45 | -------------------------------------------------------------------------------- /sim/latency/log_normal.go: -------------------------------------------------------------------------------- 1 | package latency 2 | 3 | import ( 4 | "math" 5 | "math/rand" 6 | "sync" 7 | "time" 8 | 9 | "github.com/filecoin-project/go-f3/gpbft" 10 | ) 11 | 12 | var _ Model = (*LogNormal)(nil) 13 | 14 | // LogNormal represents a log normal latency distribution with a configurable 15 | // mean latency. This latency model does not specialise based on host clock time 16 | // nor participants. 17 | type LogNormal struct { 18 | rng *rand.Rand 19 | mean time.Duration 20 | 21 | // latencyFromToLock protects concurrent access to latencyFromTo map. 22 | latencyFromToLock sync.Mutex 23 | latencyFromTo map[gpbft.ActorID]map[gpbft.ActorID]time.Duration 24 | } 25 | 26 | // NewLogNormal instantiates a new latency model of log normal latency 27 | // distribution with the given mean. This model will always return zero if mean 28 | // latency duration is less than or equal to zero. 29 | func NewLogNormal(seed int64, mean time.Duration) *LogNormal { 30 | return &LogNormal{ 31 | rng: rand.New(rand.NewSource(seed)), 32 | mean: mean, 33 | latencyFromTo: make(map[gpbft.ActorID]map[gpbft.ActorID]time.Duration), 34 | } 35 | } 36 | 37 | // Sample returns latency samples that correspond to the log normal distribution 38 | // with the configured mean. The samples returned disregard time and 39 | // participants, i.e. all the samples returned correspond to a fixed log normal 40 | // distribution. Latency from one participant to another may be asymmetric and 41 | // once generated remains constant for the lifetime of a simulation. 42 | // 43 | // Note, when mean configured latency is not larger than zero the latency sample will 44 | // always be zero. 45 | func (l *LogNormal) Sample(_ time.Time, from gpbft.ActorID, to gpbft.ActorID) time.Duration { 46 | if l.mean <= 0 { 47 | return 0 48 | } 49 | 50 | l.latencyFromToLock.Lock() 51 | defer l.latencyFromToLock.Unlock() 52 | 53 | latencyFrom, latencyFromFound := l.latencyFromTo[from] 54 | if !latencyFromFound { 55 | latencyFrom = make(map[gpbft.ActorID]time.Duration) 56 | l.latencyFromTo[from] = latencyFrom 57 | } 58 | latencyTo, latencyToFound := latencyFrom[to] 59 | if !latencyToFound { 60 | latencyTo = l.generate() 61 | latencyFrom[to] = latencyTo 62 | } 63 | return latencyTo 64 | } 65 | 66 | func (l *LogNormal) generate() time.Duration { 67 | norm := l.rng.NormFloat64() 68 | lognorm := math.Exp(norm) 69 | return time.Duration(lognorm * float64(l.mean)) 70 | } 71 | -------------------------------------------------------------------------------- /sim/justification.go: -------------------------------------------------------------------------------- 1 | package sim 2 | 3 | import ( 4 | "cmp" 5 | "context" 6 | "math/rand" 7 | "slices" 8 | 9 | "github.com/filecoin-project/go-bitfield" 10 | "github.com/filecoin-project/go-f3/certs" 11 | "github.com/filecoin-project/go-f3/gpbft" 12 | "github.com/filecoin-project/go-f3/sim/signing" 13 | ) 14 | 15 | // Generate a justification from the given power table. This assumes the signing backend can sign for all keys. 16 | func MakeJustification(backend signing.Backend, nn gpbft.NetworkName, chain *gpbft.ECChain, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) (*gpbft.Justification, error) { 17 | 18 | scaledPowerTable, totalPower, err := powerTable.Scaled() 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | powerTableCid, err := certs.MakePowerTableCID(nextPowerTable) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | payload := gpbft.Payload{ 29 | Instance: instance, 30 | Round: 0, 31 | Phase: gpbft.DECIDE_PHASE, 32 | SupplementalData: gpbft.SupplementalData{ 33 | PowerTable: powerTableCid, 34 | }, 35 | Value: chain, 36 | } 37 | msg := backend.MarshalPayloadForSigning(nn, &payload) 38 | signers := rand.Perm(len(powerTable)) 39 | signersBitfield := bitfield.New() 40 | var signingPower int64 41 | 42 | type vote struct { 43 | index int 44 | sig []byte 45 | pk gpbft.PubKey 46 | } 47 | 48 | var votes []vote 49 | for _, i := range signers { 50 | pe := powerTable[i] 51 | sig, err := backend.Sign(context.Background(), pe.PubKey, msg) 52 | if err != nil { 53 | return nil, err 54 | } 55 | votes = append(votes, vote{ 56 | index: i, 57 | sig: sig, 58 | pk: pe.PubKey, 59 | }) 60 | 61 | signersBitfield.Set(uint64(i)) 62 | signingPower += scaledPowerTable[i] 63 | if gpbft.IsStrongQuorum(signingPower, totalPower) { 64 | break 65 | } 66 | } 67 | slices.SortFunc(votes, func(a, b vote) int { 68 | return cmp.Compare(a.index, b.index) 69 | }) 70 | signers = signers[:len(votes)] 71 | slices.Sort(signers) 72 | pks := make([]gpbft.PubKey, len(votes)) 73 | sigs := make([][]byte, len(votes)) 74 | for i, vote := range votes { 75 | pks[i] = vote.pk 76 | sigs[i] = vote.sig 77 | } 78 | 79 | agg, err := backend.Aggregate(powerTable.PublicKeys()) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | sig, err := agg.Aggregate(signers, sigs) 85 | if err != nil { 86 | return nil, err 87 | } 88 | 89 | return &gpbft.Justification{ 90 | Vote: payload, 91 | Signers: signersBitfield, 92 | Signature: sig, 93 | }, nil 94 | } 95 | -------------------------------------------------------------------------------- /cmd/f3/manifest.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/filecoin-project/go-f3/manifest" 9 | "github.com/urfave/cli/v2" 10 | ) 11 | 12 | var manifestCmd = cli.Command{ 13 | Name: "manifest", 14 | Subcommands: []*cli.Command{ 15 | &manifestGenCmd, 16 | &manifestCheckCmd, 17 | }, 18 | } 19 | var manifestGenCmd = cli.Command{ 20 | Name: "gen", 21 | Usage: "generates f3 manifest", 22 | Flags: []cli.Flag{ 23 | &cli.IntFlag{ 24 | Name: "N", 25 | Usage: "number of participant", 26 | Value: 2, 27 | }, 28 | }, 29 | 30 | Action: func(c *cli.Context) error { 31 | path := c.String("manifest") 32 | m := manifest.LocalDevnetManifest() 33 | 34 | if err := m.Validate(); err != nil { 35 | return fmt.Errorf("generated invalid manifest: %w", err) 36 | } 37 | 38 | f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0666) 39 | if err != nil { 40 | return fmt.Errorf("opening manifest file for writing: %w", err) 41 | } 42 | err = json.NewEncoder(f).Encode(m) 43 | if err != nil { 44 | return fmt.Errorf("encoding manifest: %w", err) 45 | } 46 | err = f.Close() 47 | if err != nil { 48 | return fmt.Errorf("closing file: %w", err) 49 | } 50 | 51 | return nil 52 | }, 53 | } 54 | 55 | var manifestCheckCmd = cli.Command{ 56 | Name: "check", 57 | Usage: "validates an f3 manifest", 58 | Flags: []cli.Flag{ 59 | &cli.PathFlag{ 60 | Name: "manifest", 61 | Usage: "The path to the manifest file.", 62 | Required: true, 63 | }, 64 | }, 65 | 66 | Action: func(c *cli.Context) error { 67 | path := c.String("manifest") 68 | currentManifest, err := loadManifest(path) 69 | if err != nil { 70 | return err 71 | } 72 | _, _ = fmt.Fprintf(c.App.Writer, "✅ valid manifest for %s\n", currentManifest.NetworkName) 73 | return nil 74 | }, 75 | } 76 | 77 | func getManifest(c *cli.Context) (manifest.Manifest, error) { 78 | manifestPath := c.String("manifest") 79 | return loadManifest(manifestPath) 80 | } 81 | 82 | func loadManifest(path string) (manifest.Manifest, error) { 83 | f, err := os.Open(path) 84 | if err != nil { 85 | return manifest.Manifest{}, fmt.Errorf("opening %s to load manifest: %w", path, err) 86 | } 87 | defer func() { _ = f.Close() }() 88 | unmarshalled, err := manifest.Unmarshal(f) 89 | if err != nil { 90 | return manifest.Manifest{}, fmt.Errorf("unmarshalling %s to manifest: %w", path, err) 91 | } 92 | if unmarshalled == nil { 93 | return manifest.Manifest{}, fmt.Errorf("unmarshalling %s to manifest: manifest must be specified", path) 94 | } 95 | return *unmarshalled, nil 96 | } 97 | -------------------------------------------------------------------------------- /internal/caching/grouped_cache_bench_test.go: -------------------------------------------------------------------------------- 1 | package caching_test 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | 8 | "github.com/filecoin-project/go-f3/internal/caching" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func BenchmarkGroupedSet(b *testing.B) { 13 | benchmarkGroupedSetBySize(b, 1, 1000, 128) 14 | benchmarkGroupedSetBySize(b, 10, 1000, 128) 15 | benchmarkGroupedSetBySize(b, 100, 1000, 128) 16 | } 17 | 18 | func benchmarkGroupedSetBySize(b *testing.B, maxGroups, maxSetSize, valueLen int) { 19 | b.Run(fmt.Sprintf("%d/%d/%d/Add", maxGroups, maxSetSize, valueLen), func(b *testing.B) { 20 | groupedValues, size := generateGroupedValues(b, maxGroups, maxSetSize*2, valueLen) 21 | b.SetBytes(size) 22 | b.ResetTimer() 23 | b.ReportAllocs() 24 | b.RunParallel(func(pb *testing.PB) { 25 | for pb.Next() { 26 | subject := caching.NewGroupedSet(maxGroups, maxSetSize) 27 | for group, values := range groupedValues { 28 | for _, value := range values { 29 | added, err := subject.Add(uint64(group), nil, value) 30 | require.NoError(b, err) 31 | require.True(b, added) 32 | } 33 | } 34 | } 35 | }) 36 | }) 37 | 38 | b.Run(fmt.Sprintf("%d/%d/%d/Contains_Existing", maxGroups, maxSetSize, valueLen), func(b *testing.B) { 39 | subject := caching.NewGroupedSet(maxGroups, maxSetSize) 40 | maxElementsBeforeEviction := (maxSetSize * 2) - 1 41 | groupedValues, size := generateGroupedValues(b, maxGroups, maxElementsBeforeEviction, valueLen) 42 | for group, values := range groupedValues { 43 | for _, value := range values { 44 | added, err := subject.Add(uint64(group), nil, value) 45 | require.NoError(b, err) 46 | require.True(b, added) 47 | } 48 | } 49 | 50 | b.SetBytes(size) 51 | b.ResetTimer() 52 | b.ReportAllocs() 53 | b.RunParallel(func(pb *testing.PB) { 54 | for pb.Next() { 55 | for group, values := range groupedValues { 56 | for _, value := range values { 57 | contains, err := subject.Contains(uint64(group), nil, value) 58 | require.NoError(b, err) 59 | require.True(b, contains) 60 | } 61 | } 62 | } 63 | }) 64 | }) 65 | } 66 | 67 | func generateGroupedValues(b testing.TB, groups, count, len int) ([][][]byte, int64) { 68 | values := make([][][]byte, count) 69 | var totalSize int64 70 | for i := 0; i < groups; i++ { 71 | value, size := generateValues(b, count, len) 72 | values[i] = value 73 | totalSize += size 74 | } 75 | rand.Shuffle(groups, func(one, other int) { 76 | values[one], values[other] = values[other], values[one] 77 | }) 78 | return values, totalSize 79 | } 80 | -------------------------------------------------------------------------------- /internal/measurements/attributes.go: -------------------------------------------------------------------------------- 1 | package measurements 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | "strings" 8 | 9 | "github.com/ipfs/go-datastore" 10 | pubsub "github.com/libp2p/go-libp2p-pubsub" 11 | "go.opentelemetry.io/otel/attribute" 12 | ) 13 | 14 | var ( 15 | AttrStatusSuccess = attribute.String("status", "success") 16 | AttrStatusError = attribute.String("status", "error-other") 17 | AttrStatusPanic = attribute.String("status", "error-panic") 18 | AttrStatusCanceled = attribute.String("status", "error-canceled") 19 | AttrStatusTimeout = attribute.String("status", "error-timeout") 20 | AttrStatusInternalError = attribute.String("status", "error-internal") 21 | AttrStatusNotFound = attribute.String("status", "error-not-found") 22 | 23 | AttrDialSucceeded = attribute.Key("dial-succeeded") 24 | ) 25 | 26 | func Status(ctx context.Context, err error) attribute.KeyValue { 27 | switch cErr := ctx.Err(); { 28 | case err == nil: 29 | return AttrStatusSuccess 30 | case errors.Is(err, datastore.ErrNotFound): 31 | return AttrStatusNotFound 32 | case errors.As(err, &pubsub.ValidationError{}): 33 | // There are no sentinel errors for pubsub validation errors, unfortunately. 34 | // Hence, the string gymnastics. 35 | switch errMsg := err.Error(); { 36 | case strings.Contains(errMsg, pubsub.RejectValidationIgnored): 37 | return attribute.String("status", "error-pubsub-validation-ignored") 38 | case strings.Contains(errMsg, pubsub.RejectValidationFailed): 39 | return attribute.String("status", "error-pubsub-validation-failed") 40 | case strings.Contains(errMsg, pubsub.RejectValidationThrottled): 41 | return attribute.String("status", "error-pubsub-validation-throttled") 42 | case strings.Contains(errMsg, pubsub.RejectValidationQueueFull): 43 | return attribute.String("status", "error-pubsub-validation-q-full") 44 | default: 45 | return attribute.String("status", "error-pubsub-validation-other") 46 | } 47 | case os.IsTimeout(err), 48 | errors.Is(err, os.ErrDeadlineExceeded), 49 | errors.Is(cErr, context.DeadlineExceeded): 50 | return AttrStatusTimeout 51 | case errors.Is(cErr, context.Canceled): 52 | return AttrStatusCanceled 53 | default: 54 | return AttrStatusError 55 | } 56 | } 57 | 58 | func AttrFromPubSubValidationResult(result pubsub.ValidationResult) attribute.KeyValue { 59 | var v string 60 | switch result { 61 | case pubsub.ValidationAccept: 62 | v = "accepted" 63 | case pubsub.ValidationReject: 64 | v = "rejected" 65 | case pubsub.ValidationIgnore: 66 | v = "ignored" 67 | default: 68 | v = "unknown" 69 | } 70 | return attribute.String("result", v) 71 | } 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Go implementation of Fast Finality in Filecoin 2 | 3 | [![Go Test](https://github.com/filecoin-project/go-f3/actions/workflows/go-test.yml/badge.svg)](https://github.com/filecoin-project/go-f3/actions/workflows/go-test.yml) [![codecov](https://codecov.io/gh/filecoin-project/go-f3/graph/badge.svg?token=6uD131t7gs)](https://codecov.io/gh/filecoin-project/go-f3) 4 | 5 | This repository contains the golang implementation of the Fast Finality (F3) protocol for Filecoin as specified 6 | by [FIP-0086](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0086.md). This protocol uses GossipPBFT 7 | consensus protocol to finalize tipsets when voted for by more than two-thirds of the storage power. 8 | 9 | ## Key Features 10 | 11 | - **Core Implementation of GossipBFT Consensus Protocol**: The heart of Go-F3. 12 | - **F3 Filecoin Integration Module**: Streamlines the integration of the F3 protocol within the broader Filecoin 13 | ecosystem, specifically Lotus and other Filecoin full nodes. 14 | - **Simulation Package**: Includes a robust simulation environment with various adversary models, enabling rigorous 15 | testing of the protocol under different network conditions and attack scenarios. 16 | - **Emulator Package**: Facilitates message-by-message interaction with the GossipPBFT protocol, providing a detailed 17 | view of protocol mechanics and performance. 18 | - **Standalone F3 Participant Implementation**: A complete implementation of an F3 protocol participant, capable of 19 | operating independently within the Filecoin network. 20 | - **Finality Certificate**: Implements the generation and management of finality certificates, which cary transportable 21 | proofs of finality. 22 | - **Finality Certificate Exchange Protocol**: Features an adaptive self-configuring polling mechanism, enhancing the 23 | efficiency and reliability of certificate exchange among participants. 24 | 25 | ## Status 26 | 27 | **🚀 Live on Mainnet** 28 | 29 | Go-F3 was successfully activated on Filecoin mainnet on April 29, 2025 on epoch 4920480. 30 | 31 | ## Project Structure 32 | 33 | - `blssig`: BLS signature schemes. 34 | - `certexchange`: Certificate exchange mechanisms. 35 | - `certstore`: Certificate storage. 36 | - `cmd`: Command line to run a standalone F3 participant. 37 | - `ec`: Expected Consensus utilities. 38 | - `emulator`: Network emulation tools. 39 | - `gpbft`: GossipPBFT protocol implementation. 40 | - `merkle`: Merkle tree implementations. 41 | - `sim`: Simulation harness. 42 | - `test`: Test suite for various components. 43 | 44 | ## License 45 | 46 | This project is dual-licensed under the MIT and Apache 2.0 licenses. See [LICENSE-APACHE](LICENSE-APACHE) 47 | and [LICENSE-MIT](LICENSE-MIT) for more details. 48 | -------------------------------------------------------------------------------- /sim/adversary/drop.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | 7 | "github.com/filecoin-project/go-f3/gpbft" 8 | ) 9 | 10 | var _ Receiver = (*Drop)(nil) 11 | 12 | // Drop adversary stochastically drops messages to/from a given set of 13 | // participants for a configured duration of time, mimicking at-most-once message 14 | // delivery semantics across a simulation network. 15 | // 16 | // When no participants are set, all exchanged messages will be targeted by this 17 | // adversary. For this adversary to take effect, global stabilization time must be 18 | // configured to be at least as long as the configured drop duration. 19 | // 20 | // See sim.WithGlobalStabilizationTime. 21 | type Drop struct { 22 | host Host 23 | targetsByID map[gpbft.ActorID]struct{} 24 | gst time.Time 25 | rng *rand.Rand 26 | dropProbability float64 27 | 28 | Absent 29 | } 30 | 31 | func NewDrop(host Host, seed int64, dropProbability float64, dropDuration time.Duration, targets ...gpbft.ActorID) *Drop { 32 | targetsByID := make(map[gpbft.ActorID]struct{}) 33 | for _, target := range targets { 34 | targetsByID[target] = struct{}{} 35 | } 36 | return &Drop{ 37 | host: host, 38 | rng: rand.New(rand.NewSource(seed)), 39 | dropProbability: dropProbability, 40 | targetsByID: targetsByID, 41 | gst: time.Time{}.Add(dropDuration), 42 | } 43 | } 44 | 45 | func NewDropGenerator(power gpbft.StoragePower, seed int64, dropProbability float64, dropDuration time.Duration, targets ...gpbft.ActorID) Generator { 46 | return func(id gpbft.ActorID, host Host) *Adversary { 47 | return &Adversary{ 48 | Receiver: NewDrop(host, seed, dropProbability, dropDuration, targets...), 49 | Power: power, 50 | ID: id, 51 | } 52 | } 53 | } 54 | 55 | func (d *Drop) AllowMessage(from gpbft.ActorID, to gpbft.ActorID, _ gpbft.GMessage) bool { 56 | // Stochastically drop messages until Global Stabilisation Time has 57 | // elapsed, except messages to self. 58 | switch { 59 | case from == to, d.host.Time().After(d.gst), !d.isTargeted(to) && !d.isTargeted(from): 60 | return true 61 | default: 62 | return d.allowStochastically() 63 | } 64 | } 65 | 66 | func (d *Drop) allowStochastically() bool { 67 | switch { 68 | case d.dropProbability <= 0: 69 | return true 70 | case d.dropProbability >= 1.0: 71 | return false 72 | default: 73 | return d.rng.Float64() > d.dropProbability 74 | } 75 | } 76 | 77 | func (d *Drop) isTargeted(id gpbft.ActorID) bool { 78 | if len(d.targetsByID) == 0 { 79 | // Target all participants if no explicit IDs are set. 80 | return true 81 | } 82 | _, found := d.targetsByID[id] 83 | return found 84 | } 85 | -------------------------------------------------------------------------------- /gpbft/errors.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "runtime/debug" 7 | ) 8 | 9 | var ( 10 | _ error = (*ValidationError)(nil) 11 | 12 | // ErrValidationTooOld signals that a message is invalid because belongs to prior 13 | // instances of gpbft. 14 | ErrValidationTooOld = newValidationError("message is for prior instance") 15 | // ErrValidationNoCommittee signals that a message is invalid because there is no 16 | // committee for the instance to which it belongs. 17 | // 18 | // See: CommitteeProvider. 19 | ErrValidationNoCommittee = newValidationError("no committee for instance") 20 | // ErrValidationInvalid signals that a message violates the validity rules of 21 | // gpbft protocol. 22 | ErrValidationInvalid = newValidationError("message invalid") 23 | // ErrValidationWrongBase signals that a message is invalid due to having an 24 | // unexpected base ECChain. 25 | // 26 | // See: ECChain, TipSet, ECChain.Base 27 | ErrValidationWrongBase = newValidationError("unexpected base chain") 28 | //ErrValidationWrongSupplement signals that a message is invalid due to unexpected supplemental data. 29 | // 30 | // See SupplementalData. 31 | ErrValidationWrongSupplement = newValidationError("unexpected supplemental data") 32 | // ErrValidationNotRelevant signals that a message is not relevant at the current 33 | // instance, and is not worth propagating to others. 34 | ErrValidationNotRelevant = newValidationError("message is valid but not relevant") 35 | 36 | // ErrReceivedWrongInstance signals that a message is received with mismatching instance ID. 37 | ErrReceivedWrongInstance = errors.New("received message for wrong instance") 38 | // ErrReceivedAfterTermination signals that a message is received after the gpbft instance is terminated. 39 | ErrReceivedAfterTermination = errors.New("received message after terminating") 40 | // ErrReceivedInternalError signals that an error has occurred during message processing. 41 | ErrReceivedInternalError = errors.New("error processing message") 42 | ) 43 | 44 | // ValidationError signals that an error has occurred while validating a GMessage. 45 | type ValidationError struct{ message string } 46 | 47 | type PanicError struct { 48 | Cause any 49 | stackTrace string 50 | } 51 | 52 | func newValidationError(message string) ValidationError { return ValidationError{message: message} } 53 | func (e ValidationError) Error() string { return e.message } 54 | 55 | func newPanicError(cause any) *PanicError { 56 | return &PanicError{ 57 | Cause: cause, 58 | stackTrace: string(debug.Stack()), 59 | } 60 | } 61 | 62 | func (e *PanicError) Error() string { 63 | return fmt.Sprintf("participant panicked: %v\n%v", e.Cause, e.stackTrace) 64 | } 65 | -------------------------------------------------------------------------------- /tipsettimestamp_test.go: -------------------------------------------------------------------------------- 1 | package f3 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/filecoin-project/go-f3/gpbft" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | type tipset struct { 13 | genesis time.Time 14 | period time.Duration 15 | epoch int64 16 | } 17 | 18 | func (ts *tipset) String() string { 19 | return fmt.Sprintf("epoch %d, timestamp %s", ts.epoch, ts.Timestamp()) 20 | } 21 | 22 | func (ts *tipset) Key() gpbft.TipSetKey { 23 | panic("not implemented") 24 | } 25 | 26 | func (ts *tipset) Beacon() []byte { 27 | panic("not implemented") 28 | } 29 | 30 | func (ts *tipset) Epoch() int64 { 31 | return ts.epoch 32 | } 33 | 34 | func (ts *tipset) Timestamp() time.Time { 35 | return ts.genesis.Add(time.Duration(ts.epoch) * ts.period) 36 | } 37 | 38 | func tipsetGenerator(genesis time.Time, period time.Duration) func(epoch int64) *tipset { 39 | return func(epoch int64) *tipset { 40 | return &tipset{ 41 | genesis: genesis, 42 | period: period, 43 | epoch: epoch, 44 | } 45 | } 46 | } 47 | 48 | func TestComputeTipsetTimestampAtEpoch(t *testing.T) { 49 | genesis := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) 50 | period := 30 * time.Second 51 | 52 | generateTipset := tipsetGenerator(genesis, period) 53 | tipset := generateTipset(10) 54 | 55 | t.Run("Basic Functionality", func(t *testing.T) { 56 | targetEpoch := int64(15) 57 | expected := generateTipset(targetEpoch).Timestamp() 58 | actual := computeTipsetTimestampAtEpoch(tipset, targetEpoch, period) 59 | assert.Equal(t, expected, actual) 60 | }) 61 | 62 | t.Run("Zero Epoch", func(t *testing.T) { 63 | targetEpoch := int64(0) 64 | expected := generateTipset(targetEpoch).Timestamp() 65 | actual := computeTipsetTimestampAtEpoch(tipset, targetEpoch, period) 66 | assert.Equal(t, expected, actual) 67 | }) 68 | 69 | t.Run("Large Epoch", func(t *testing.T) { 70 | largeEpoch := int64(1e6) 71 | expected := generateTipset(largeEpoch).Timestamp() 72 | actual := computeTipsetTimestampAtEpoch(tipset, largeEpoch, period) 73 | assert.Equal(t, expected, actual) 74 | }) 75 | 76 | t.Run("Boundary Condition", func(t *testing.T) { 77 | boundaryEpoch := int64(1e3) 78 | expected := generateTipset(boundaryEpoch).Timestamp() 79 | actual := computeTipsetTimestampAtEpoch(tipset, boundaryEpoch, period) 80 | assert.Equal(t, expected, actual) 81 | }) 82 | 83 | t.Run("Consistency", func(t *testing.T) { 84 | targetEpoch := int64(20) 85 | expected := generateTipset(targetEpoch).Timestamp() 86 | actual1 := computeTipsetTimestampAtEpoch(tipset, targetEpoch, period) 87 | actual2 := computeTipsetTimestampAtEpoch(tipset, targetEpoch, period) 88 | assert.Equal(t, expected, actual1) 89 | assert.Equal(t, expected, actual2) 90 | }) 91 | } 92 | -------------------------------------------------------------------------------- /internal/caching/grouped_cache.go: -------------------------------------------------------------------------------- 1 | package caching 2 | 3 | import ( 4 | "container/list" 5 | "sync" 6 | 7 | logging "github.com/ipfs/go-log/v2" 8 | ) 9 | 10 | var log = logging.Logger("f3/internal/caching") 11 | 12 | type GroupedSet struct { 13 | maxGroups int 14 | setPool sync.Pool 15 | 16 | mu sync.Mutex 17 | groups map[uint64]*orderedSet 18 | recency *list.List 19 | } 20 | 21 | type orderedSet struct { 22 | order *list.Element 23 | *Set 24 | } 25 | 26 | func (os *orderedSet) Clear() { 27 | os.order = nil 28 | os.Set.Clear() 29 | } 30 | 31 | func NewGroupedSet(maxGroups, maxSetSize int) *GroupedSet { 32 | return &GroupedSet{ 33 | maxGroups: maxGroups, 34 | groups: make(map[uint64]*orderedSet, maxGroups), 35 | setPool: sync.Pool{ 36 | New: func() any { 37 | return &orderedSet{ 38 | Set: NewSet(maxSetSize), 39 | } 40 | }, 41 | }, 42 | recency: list.New(), 43 | } 44 | } 45 | 46 | // Contains checks if the given value at given group is present, and if so 47 | // updates its recency. Otherwise, returns false. 48 | func (gs *GroupedSet) Contains(g uint64, namespace, v []byte) (bool, error) { 49 | gs.mu.Lock() 50 | defer gs.mu.Unlock() 51 | 52 | if set, exists := gs.groups[g]; exists { 53 | gs.recency.MoveToFront(set.order) 54 | return set.Contains(namespace, v) 55 | } 56 | return false, nil 57 | } 58 | 59 | // Add attempts to add the given value for the given group if not already present. 60 | func (gs *GroupedSet) Add(g uint64, namespace, v []byte) (bool, error) { 61 | gs.mu.Lock() 62 | defer gs.mu.Unlock() 63 | 64 | set, exists := gs.groups[g] 65 | if !exists { 66 | if len(gs.groups) >= gs.maxGroups { 67 | if evictee := gs.recency.Back(); evictee != nil { 68 | gs.evict(evictee.Value.(uint64)) 69 | } 70 | } 71 | set = gs.setPool.Get().(*orderedSet) 72 | set.order = gs.recency.PushFront(g) 73 | gs.groups[g] = set 74 | } 75 | contained, err := set.ContainsOrAdd(namespace, v) 76 | if err != nil { 77 | return false, err 78 | } 79 | return !contained, nil 80 | } 81 | 82 | func (gs *GroupedSet) RemoveGroupsLessThan(group uint64) bool { 83 | gs.mu.Lock() 84 | defer gs.mu.Unlock() 85 | var evictedAtLeastOne bool 86 | for g := range gs.groups { 87 | if g < group { 88 | evictedAtLeastOne = gs.evict(g) || evictedAtLeastOne 89 | } 90 | } 91 | return evictedAtLeastOne 92 | } 93 | 94 | func (gs *GroupedSet) evict(group uint64) bool { 95 | set, exists := gs.groups[group] 96 | if !exists { 97 | return false 98 | } 99 | gs.recency.Remove(set.order) 100 | delete(gs.groups, group) 101 | 102 | set.Clear() 103 | gs.setPool.Put(set) 104 | log.Debugw("Evicted grouped set from cache", "group", group) 105 | return true 106 | } 107 | -------------------------------------------------------------------------------- /sim/adversary/spam.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/filecoin-project/go-f3/gpbft" 8 | ) 9 | 10 | var _ Receiver = (*Spam)(nil) 11 | 12 | // Spam is an adversary that propagates COMMIT messages for bottom for a 13 | // configured number of future rounds. 14 | type Spam struct { 15 | host Host 16 | roundsAhead uint64 17 | latestObservedInstance uint64 18 | 19 | Absent 20 | allowAll 21 | } 22 | 23 | // NewSpam instantiates a new Spam adversary that spams the network with 24 | // spammable messages (i.e. COMMIT for bottom) for the configured number of 25 | // roundsAhead via either synchronous or regular broadcast. This adversary 26 | // resigns the spammable messages as its own to mimic messages with valid 27 | // signature but for future rounds. 28 | func NewSpam(host Host, roundsAhead uint64) *Spam { 29 | return &Spam{ 30 | host: host, 31 | roundsAhead: roundsAhead, 32 | } 33 | } 34 | 35 | func NewSpamGenerator(power gpbft.StoragePower, roundsAhead uint64) Generator { 36 | return func(id gpbft.ActorID, host Host) *Adversary { 37 | return &Adversary{ 38 | Receiver: NewSpam(host, roundsAhead), 39 | Power: power, 40 | ID: id, 41 | } 42 | } 43 | } 44 | 45 | func (s *Spam) StartInstanceAt(instance uint64, _when time.Time) error { 46 | // Immediately start spamming the network. 47 | s.latestObservedInstance = instance 48 | s.spamAtInstance(context.Background(), s.latestObservedInstance) 49 | return nil 50 | } 51 | 52 | func (s *Spam) ReceiveMessage(ctx context.Context, vmsg gpbft.ValidatedMessage) error { 53 | msg := vmsg.Message() 54 | // Watch for increase in instance, and when increased spam again. 55 | if msg.Vote.Instance > s.latestObservedInstance { 56 | s.spamAtInstance(ctx, msg.Vote.Instance) 57 | s.latestObservedInstance = msg.Vote.Instance 58 | } 59 | return nil 60 | } 61 | 62 | func (s *Spam) spamAtInstance(ctx context.Context, instance uint64) { 63 | // Spam the network with COMMIT messages by incrementing rounds up to 64 | // roundsAhead. 65 | supplementalData, _, err := s.host.GetProposal(ctx, instance) 66 | if err != nil { 67 | panic(err) 68 | } 69 | committee, err := s.host.GetCommittee(ctx, instance) 70 | if err != nil { 71 | panic(err) 72 | } 73 | for spamRound := uint64(0); spamRound < s.roundsAhead; spamRound++ { 74 | p := gpbft.Payload{ 75 | Instance: instance, 76 | Round: spamRound, 77 | SupplementalData: *supplementalData, 78 | Phase: gpbft.COMMIT_PHASE, 79 | } 80 | mt := &gpbft.MessageBuilder{ 81 | NetworkName: s.host.NetworkName(), 82 | PowerTable: committee.PowerTable, 83 | Payload: p, 84 | } 85 | if err := s.host.RequestBroadcast(mt); err != nil { 86 | panic(err) 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /test/deny_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "testing" 7 | 8 | "github.com/filecoin-project/go-f3/gpbft" 9 | "github.com/filecoin-project/go-f3/sim" 10 | "github.com/filecoin-project/go-f3/sim/adversary" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestDeny_SkipsToFuture(t *testing.T) { 15 | t.Parallel() 16 | const ( 17 | instanceCount = 2000 18 | maxRounds = 30 19 | denialTarget = 0 20 | gst = 100 * EcEpochDuration 21 | ) 22 | 23 | ecChainGenerator := sim.NewUniformECChainGenerator(54445, 1, 5) 24 | sm, err := sim.NewSimulation( 25 | asyncOptions(2342342, 26 | sim.AddHonestParticipants(6, ecChainGenerator, uniformOneStoragePower), 27 | sim.WithAdversary(adversary.NewDenyGenerator(oneStoragePower, gst, adversary.DenyAllMessages, adversary.DenyToOrFrom, denialTarget)), 28 | sim.WithGlobalStabilizationTime(gst), 29 | sim.WithIgnoreConsensusFor(denialTarget), 30 | )..., 31 | ) 32 | require.NoError(t, err) 33 | require.NoErrorf(t, sm.Run(instanceCount, maxRounds), "%s", sm.Describe()) 34 | chain := ecChainGenerator.GenerateECChain(instanceCount-1, &gpbft.TipSet{}, math.MaxUint64) 35 | requireConsensusAtInstance(t, sm, instanceCount-1, chain.TipSets...) 36 | } 37 | 38 | func TestDenyPhase(t *testing.T) { 39 | t.Parallel() 40 | const ( 41 | instanceCount = 20 42 | maxRounds = 30 43 | gst = 10 * EcEpochDuration 44 | participants = 50 45 | ) 46 | 47 | for _, phase := range []gpbft.Phase{gpbft.QUALITY_PHASE, gpbft.PREPARE_PHASE, gpbft.CONVERGE_PHASE, gpbft.COMMIT_PHASE} { 48 | for _, denyMode := range []adversary.DenyTargetMode{adversary.DenyToOrFrom, adversary.DenyFrom, adversary.DenyTo} { 49 | t.Run(fmt.Sprintf("%s/%s", denyMode, phase), func(t *testing.T) { 50 | t.Parallel() 51 | ecGen := sim.NewUniformECChainGenerator(4332432, 1, 5) 52 | 53 | var attacked []gpbft.ActorID 54 | for i := gpbft.ActorID(1); i <= participants; i++ { 55 | attacked = append(attacked, i) 56 | } 57 | sm, err := sim.NewSimulation( 58 | syncOptions( 59 | sim.AddHonestParticipants(1, ecGen, sim.UniformStoragePower(gpbft.NewStoragePower(100*participants))), 60 | sim.AddHonestParticipants(participants, ecGen, sim.UniformStoragePower(gpbft.NewStoragePower(100))), 61 | sim.WithAdversary(adversary.NewDenyGenerator(gpbft.NewStoragePower(1), gst, adversary.DenyPhase(phase), denyMode, attacked...)), 62 | sim.WithGlobalStabilizationTime(gst), 63 | )..., 64 | ) 65 | require.NoError(t, err) 66 | require.NoErrorf(t, sm.Run(instanceCount, maxRounds), "%s", sm.Describe()) 67 | chain := ecGen.GenerateECChain(instanceCount-1, &gpbft.TipSet{}, math.MaxUint64) 68 | requireConsensusAtInstance(t, sm, instanceCount-1, chain.TipSets...) 69 | }) 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /gpbft/message_builder_test.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/filecoin-project/go-state-types/big" 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestMessageBuilder(t *testing.T) { 12 | pt := NewPowerTable() 13 | err := pt.Add([]PowerEntry{ 14 | { 15 | ID: 0, 16 | PubKey: PubKey{0}, 17 | Power: big.NewInt(1), 18 | }, 19 | { 20 | ID: 1, 21 | PubKey: PubKey{1}, 22 | Power: big.NewInt(1), 23 | }, 24 | }...) 25 | assert.NoError(t, err) 26 | payload := Payload{ 27 | Instance: 1, 28 | Round: 0, 29 | } 30 | nn := NetworkName("test") 31 | 32 | mt := &MessageBuilder{ 33 | NetworkName: nn, 34 | PowerTable: pt, 35 | Payload: payload, 36 | } 37 | 38 | _, err = mt.PrepareSigningInputs(2) 39 | require.Error(t, err, "unknown ID should return an error") 40 | 41 | st, err := mt.PrepareSigningInputs(0) 42 | require.NoError(t, err) 43 | 44 | require.Equal(t, st.Payload, payload) 45 | require.Equal(t, st.ParticipantID, ActorID(0)) 46 | require.Equal(t, st.PubKey, PubKey{0}) 47 | require.NotNil(t, st.PayloadToSign) 48 | require.Nil(t, st.VRFToSign) 49 | 50 | st, err = mt.PrepareSigningInputs(1) 51 | require.NoError(t, err) 52 | 53 | require.Equal(t, st.Payload, payload) 54 | require.Equal(t, st.ParticipantID, ActorID(1)) 55 | require.Equal(t, st.PubKey, PubKey{1}) 56 | require.NotNil(t, st.PayloadToSign) 57 | require.Nil(t, st.VRFToSign) 58 | } 59 | 60 | func TestMessageBuilderWithVRF(t *testing.T) { 61 | pt := NewPowerTable() 62 | err := pt.Add([]PowerEntry{ 63 | { 64 | ID: 0, 65 | PubKey: PubKey{0}, 66 | Power: big.NewInt(1), 67 | }, 68 | { 69 | ID: 1, 70 | PubKey: PubKey{1}, 71 | Power: big.NewInt(1), 72 | }, 73 | }...) 74 | assert.NoError(t, err) 75 | payload := Payload{ 76 | Instance: 1, 77 | Round: 0, 78 | } 79 | 80 | nn := NetworkName("test") 81 | mt := &MessageBuilder{ 82 | NetworkName: nn, 83 | PowerTable: pt, 84 | Payload: payload, 85 | BeaconForTicket: []byte{0xbe, 0xac, 0x04}, 86 | } 87 | 88 | st, err := mt.PrepareSigningInputs(0) 89 | require.NoError(t, err) 90 | 91 | require.Equal(t, st.Payload, payload) 92 | require.Equal(t, st.ParticipantID, ActorID(0)) 93 | require.Equal(t, st.PubKey, PubKey{0}) 94 | require.NotNil(t, st.PayloadToSign) 95 | require.NotNil(t, st.VRFToSign) 96 | 97 | st, err = mt.PrepareSigningInputs(1) 98 | require.NoError(t, err) 99 | 100 | require.Equal(t, st.Payload, payload) 101 | require.Equal(t, st.ParticipantID, ActorID(1)) 102 | require.Equal(t, st.PubKey, PubKey{1}) 103 | require.NotNil(t, st.PayloadToSign) 104 | require.NotNil(t, st.VRFToSign) 105 | } 106 | -------------------------------------------------------------------------------- /store.go: -------------------------------------------------------------------------------- 1 | package f3 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | 8 | "github.com/filecoin-project/go-f3/certexchange" 9 | "github.com/filecoin-project/go-f3/certs" 10 | "github.com/filecoin-project/go-f3/certstore" 11 | "github.com/filecoin-project/go-f3/ec" 12 | "github.com/filecoin-project/go-f3/gpbft" 13 | "github.com/filecoin-project/go-f3/manifest" 14 | 15 | "github.com/ipfs/go-datastore" 16 | "github.com/ipfs/go-datastore/namespace" 17 | ) 18 | 19 | // openCertstore opens the certificate store for the specific manifest (namespaced by the network 20 | // name). 21 | func openCertstore(ctx context.Context, ec ec.Backend, ds datastore.Datastore, 22 | m manifest.Manifest, certClient certexchange.Client) (*certstore.Store, error) { 23 | ds = namespace.Wrap(ds, m.DatastorePrefix()) 24 | 25 | if cs, err := certstore.OpenStore(ctx, ds); err == nil { 26 | return cs, nil 27 | } else if !errors.Is(err, certstore.ErrNotInitialized) { 28 | return nil, err 29 | } 30 | 31 | var initialPowerTable gpbft.PowerEntries 32 | initialPowerTable, err := loadInitialPowerTable(ctx, ec, m, certClient) 33 | if err != nil { 34 | return nil, fmt.Errorf("getting initial power table: %w", err) 35 | } 36 | 37 | return certstore.CreateStore(ctx, ds, m.InitialInstance, initialPowerTable) 38 | } 39 | 40 | func loadInitialPowerTable(ctx context.Context, ec ec.Backend, m manifest.Manifest, certClient certexchange.Client) (gpbft.PowerEntries, error) { 41 | epoch := m.BootstrapEpoch - m.EC.Finality 42 | if ts, err := ec.GetTipsetByEpoch(ctx, epoch); err != nil { 43 | // This is odd because we usually keep the entire chain, just not the state. 44 | // Odd but not fatal here. 45 | log.Warnw("failed to find bootstrap tipset for F3", "error", err, "epoch", epoch) 46 | } else if pt, err := ec.GetPowerTable(ctx, ts.Key()); err != nil { 47 | log.Debugw("failed to load the bootstrap power table for F3 from state", "error", err) 48 | } else { 49 | if ptCid, err := certs.MakePowerTableCID(pt); err == nil { 50 | log.Infof("loaded initial power table at epoch %d: %s", epoch, ptCid) 51 | if m.InitialPowerTable.Defined() && m.InitialPowerTable != ptCid { 52 | log.Warnf("initial power table mismatch, loaded from EC: %s, from manifest: %s", ptCid, m.InitialPowerTable) 53 | } 54 | } 55 | return pt, nil 56 | } 57 | if !m.InitialPowerTable.Defined() { 58 | return nil, fmt.Errorf("failed to load the F3 bootstrap power table and none is specified in the manifest") 59 | } 60 | 61 | log.Infow("loading the F3 bootstrap power table", "epoch", epoch, "cid", m.InitialPowerTable) 62 | 63 | pt, err := certexchange.FindInitialPowerTable(ctx, certClient, m.InitialPowerTable, m.EC.Period) 64 | if err != nil { 65 | return nil, fmt.Errorf("could not get initial power table from finality exchange: %w", err) 66 | } 67 | return pt, nil 68 | } 69 | -------------------------------------------------------------------------------- /observer/schema.sql: -------------------------------------------------------------------------------- 1 | -- TODO: define a PAYLOAD type and refactor the duplicate STRUCT type definition for gpbft.Payload. 2 | -- Note that TYPE in duckdb does not support IF NOT EXISTS clause so there is a need to check 3 | -- if type exits and only create it if it does not... which is more hassle than it's worth. 4 | -- Hence, the duplicate STRUCT definition of Payload. 5 | 6 | CREATE TABLE IF NOT EXISTS latest_messages ( 7 | Timestamp TIMESTAMP, 8 | NetworkName VARCHAR, 9 | Sender BIGINT, 10 | Vote STRUCT( 11 | Instance BIGINT, 12 | Round BIGINT, 13 | Phase ENUM( 14 | 'INITIAL', 15 | 'QUALITY', 16 | 'CONVERGE', 17 | 'PREPARE', 18 | 'COMMIT', 19 | 'DECIDE', 20 | 'TERMINATED' 21 | ), 22 | SupplementalData STRUCT( 23 | Commitments BLOB, 24 | PowerTable VARCHAR 25 | ), 26 | Value STRUCT( 27 | Epoch BIGINT, 28 | Key BLOB, 29 | Commitments BLOB, 30 | PowerTable VARCHAR 31 | )[] 32 | ), 33 | Signature BLOB, 34 | Ticket BLOB, 35 | Justification STRUCT( 36 | Vote STRUCT( 37 | Instance BIGINT, 38 | Round BIGINT, 39 | Phase ENUM( 40 | 'INITIAL', 41 | 'QUALITY', 42 | 'CONVERGE', 43 | 'PREPARE', 44 | 'COMMIT', 45 | 'DECIDE', 46 | 'TERMINATED' 47 | ), 48 | SupplementalData STRUCT( 49 | Commitments BLOB, 50 | PowerTable VARCHAR 51 | ), 52 | Value STRUCT( 53 | Epoch BIGINT, 54 | Key BLOB, 55 | Commitments BLOB, 56 | PowerTable VARCHAR 57 | )[] 58 | ), 59 | Signers BIGINT[], 60 | Signature BLOB 61 | ) NULL 62 | ); 63 | 64 | -- Add Key column to latest_messages table to accommodate partial messages. 65 | ALTER TABLE latest_messages 66 | ADD COLUMN IF NOT EXISTS VoteValueKey BLOB; 67 | 68 | CREATE TABLE IF NOT EXISTS finality_certificates ( 69 | Timestamp TIMESTAMP, 70 | NetworkName VARCHAR, 71 | Instance BIGINT, 72 | ECChain STRUCT( 73 | Epoch BIGINT, 74 | Key BLOB, 75 | Commitments BLOB, 76 | PowerTable VARCHAR 77 | )[], 78 | SupplementalData STRUCT( 79 | Commitments BLOB, 80 | PowerTable VARCHAR 81 | ), 82 | Signers BIGINT[], 83 | Signature BLOB, 84 | PowerTableDelta STRUCT( 85 | ParticipantID BIGINT, 86 | PowerDelta BIGINT, 87 | SigningKey BLOB 88 | )[], 89 | ); 90 | 91 | CREATE TABLE IF NOT EXISTS chain_exchanges ( 92 | Timestamp TIMESTAMP, 93 | NetworkName VARCHAR, 94 | Instance BIGINT, 95 | VoteValueKey BLOB, 96 | VoteValue STRUCT( 97 | Epoch BIGINT, 98 | Key BLOB, 99 | Commitments BLOB, 100 | PowerTable VARCHAR 101 | )[], 102 | PRIMARY KEY (NetworkName, Instance, VoteValueKey) 103 | ); 104 | -------------------------------------------------------------------------------- /.github/workflows/publish-ghcr.yml: -------------------------------------------------------------------------------- 1 | name: Container 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | tags: 8 | - 'v*' 9 | workflow_run: 10 | workflows: [ Releaser ] 11 | types: 12 | - completed 13 | pull_request: 14 | workflow_dispatch: 15 | inputs: 16 | sha: 17 | description: 'Commit SHA to publish' 18 | required: true 19 | 20 | jobs: 21 | prepare-checkout: 22 | if: github.event_name != 'workflow_run' || github.event.workflow_run.conclusion == 'success' 23 | name: Prepare ref 24 | runs-on: ubuntu-latest 25 | outputs: 26 | ref: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.sha || github.event_name != 'workflow_run' && github.ref || steps.releaser.outputs.version }} 27 | steps: 28 | - name: Get version tag from releaser 29 | id: releaser 30 | if: github.event_name == 'workflow_run' 31 | uses: ipdxco/unified-github-workflows/.github/actions/inspect-releaser@v1.0 32 | with: 33 | artifacts-url: ${{ github.event.workflow_run.artifacts_url }} 34 | publish: 35 | name: Publish 36 | needs: [ prepare-checkout ] 37 | runs-on: ubuntu-latest 38 | permissions: 39 | contents: read 40 | packages: write 41 | steps: 42 | - name: Checkout 43 | uses: actions/checkout@v4 44 | with: 45 | ref: ${{ needs.prepare-checkout.outputs.ref }} 46 | - name: Set up QEMU 47 | uses: docker/setup-qemu-action@v3 48 | with: 49 | # Use QEMU v8 to work around build issues. See: 50 | # * https://github.com/docker/build-push-action/issues/1309#issuecomment-2618650540 51 | image: tonistiigi/binfmt:qemu-v8.1.5 52 | - name: Set up Docker Buildx 53 | uses: docker/setup-buildx-action@v3 54 | - name: Log in to the Container registry 55 | uses: docker/login-action@v3 56 | with: 57 | registry: ghcr.io 58 | username: ${{ github.actor }} 59 | password: ${{ github.token }} 60 | - name: Extract metadata 61 | id: meta 62 | uses: docker/metadata-action@v5 63 | with: 64 | images: ghcr.io/${{ github.repository }} 65 | tags: | 66 | type=semver,pattern={{raw}} 67 | type=ref,event=branch 68 | type=sha,format=long 69 | type=raw,value=${{ needs.prepare-checkout.outputs.ref }} 70 | - name: Build and push Docker image 71 | uses: docker/build-push-action@v6 72 | with: 73 | context: . 74 | cache-from: type=gha 75 | cache-to: type=gha,mode=max 76 | platforms: linux/amd64,linux/arm64 77 | push: ${{ github.event_name != 'pull_request' }} 78 | tags: ${{ steps.meta.outputs.tags }} 79 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /internal/measurements/sample_set.go: -------------------------------------------------------------------------------- 1 | package measurements 2 | 3 | import "sync" 4 | 5 | // SampleSet stores a bounded set of samples and exposes the ability to check 6 | // whether it contains a given sample. See SampleSet.contains. 7 | // 8 | // Internally, SampleSet uses two maps to store samples, each of which can grow 9 | // up to the specified max size. When one map fills up, the SampleSet switches to 10 | // the other, effectively flipping between them. This allows the set to check for 11 | // sample existence across a range of approximately max size to twice the max size, 12 | // offering a larger sample set compared to implementations that track insertion 13 | // order with similar memory footprint. 14 | // 15 | // The worst case memory footprint of SampleSet is around 2 * maxSize * 96 16 | // bytes. 17 | type SampleSet struct { 18 | 19 | // We can use existing LRU implementations for this at the price of slightly 20 | // higher memory footprint and explanation that recency is unused. Hence the 21 | // hand-rolled data structure here. 22 | 23 | // maxSize defines the maximum number of samples to store per each internal set. 24 | maxSize int 25 | // mu protects access to flip and flop. 26 | mu sync.Mutex 27 | // flip stores one set of samples until until it reaches maxSize. 28 | flip map[string]struct{} 29 | // flop stores another set of samples until it reaches maxSize. 30 | flop map[string]struct{} 31 | } 32 | 33 | // NewSampleSet creates a new SampleSet with a specified max size per sample 34 | // subset. 35 | func NewSampleSet(maxSize int) *SampleSet { 36 | maxSize = max(1, maxSize) 37 | return &SampleSet{ 38 | maxSize: maxSize, 39 | flip: make(map[string]struct{}, maxSize), 40 | flop: make(map[string]struct{}, maxSize), 41 | } 42 | } 43 | 44 | // Contains checks if the given sample v is contained within sample set, and if 45 | // not adds it. 46 | func (ss *SampleSet) Contains(v []byte) bool { 47 | // The number 96 comes from the length of BLS signatures, the kind of value we 48 | // expect as the argument. Defensively re-slice it if it is larger at the price 49 | // of losing accuracy. 50 | // 51 | // Alternatively we could hash the values but considering the memory footprint of 52 | // these measurements (sub 10MB for a total of 50K samples) we choose larger 53 | // memory consumption over CPU footprint. 54 | key := string(v[:min(len(v), 96)]) 55 | 56 | ss.mu.Lock() 57 | defer ss.mu.Unlock() 58 | 59 | // Check if the sample exists in either sets and if not insert it. 60 | if _, exists := ss.flip[key]; exists { 61 | return true 62 | } 63 | if _, exists := ss.flop[key]; exists { 64 | return true 65 | } 66 | ss.flip[key] = struct{}{} 67 | 68 | // Check if flip exceeds maxSize and if so do the flippity flop. 69 | if len(ss.flip) >= ss.maxSize { 70 | clear(ss.flop) 71 | ss.flop, ss.flip = ss.flip, ss.flop 72 | } 73 | return false 74 | } 75 | -------------------------------------------------------------------------------- /certexchange/polling/common_test.go: -------------------------------------------------------------------------------- 1 | package polling 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | 7 | "github.com/filecoin-project/go-f3/certs" 8 | "github.com/filecoin-project/go-f3/gpbft" 9 | "github.com/filecoin-project/go-f3/sim" 10 | "github.com/filecoin-project/go-f3/sim/signing" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | // The network name used in tests. 16 | const TestNetworkName gpbft.NetworkName = "testnet" 17 | 18 | func MakeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base *gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *certs.FinalityCertificate { 19 | chainLen := rng.Intn(23) + 1 20 | chain, err := gpbft.NewChain(base) 21 | require.NoError(t, err) 22 | 23 | for i := 0; i < chainLen; i++ { 24 | chain = chain.Extend(tsg.Sample()) 25 | } 26 | 27 | j, err := sim.MakeJustification(backend, TestNetworkName, chain, instance, powerTable, nextPowerTable) 28 | require.NoError(t, err) 29 | 30 | c, err := certs.NewFinalityCertificate(certs.MakePowerTableDiff(powerTable, nextPowerTable), j) 31 | require.NoError(t, err) 32 | 33 | return c 34 | } 35 | 36 | func RandomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries { 37 | powerTable := make(gpbft.PowerEntries, entries) 38 | 39 | for i := range powerTable { 40 | key, _ := backend.GenerateKey() 41 | powerTable[i] = gpbft.PowerEntry{ 42 | ID: gpbft.ActorID(i + 1), 43 | // Power chosen such that: 44 | // - No small subset dominates the power table. 45 | // - Lots of duplicate power values. 46 | Power: gpbft.NewStoragePower(int64(len(powerTable)*2 - i/2)), 47 | PubKey: key, 48 | } 49 | } 50 | return powerTable 51 | } 52 | 53 | func MakeCertificates(t *testing.T, rng *rand.Rand, backend signing.Backend) *CertificateGenerator { 54 | powerTable := RandomPowerTable(backend, 10) 55 | tableCid, err := certs.MakePowerTableCID(powerTable) 56 | require.NoError(t, err) 57 | 58 | tsg := sim.NewTipSetGenerator(rng.Uint64()) 59 | base := &gpbft.TipSet{Epoch: 0, Key: tsg.Sample(), PowerTable: tableCid} 60 | 61 | return &CertificateGenerator{ 62 | PowerTable: powerTable, 63 | t: t, 64 | rng: rng, 65 | backend: backend, 66 | tsg: tsg, 67 | base: base, 68 | NextInstance: 0, 69 | } 70 | } 71 | 72 | type CertificateGenerator struct { 73 | PowerTable gpbft.PowerEntries 74 | NextInstance uint64 75 | 76 | t *testing.T 77 | rng *rand.Rand 78 | backend signing.Backend 79 | tsg *sim.TipSetGenerator 80 | base *gpbft.TipSet 81 | } 82 | 83 | func (cg *CertificateGenerator) MakeCertificate() *certs.FinalityCertificate { 84 | cert := MakeCertificate(cg.t, cg.rng, cg.tsg, cg.backend, cg.base, cg.NextInstance, cg.PowerTable, cg.PowerTable) 85 | cg.base = cert.ECChain.Head() 86 | cg.NextInstance++ 87 | return cert 88 | } 89 | -------------------------------------------------------------------------------- /internal/caching/set_test.go: -------------------------------------------------------------------------------- 1 | package caching_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/filecoin-project/go-f3/internal/caching" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestSet(t *testing.T) { 11 | subject := caching.NewSet(2) 12 | values, _ := generateValues(t, 4, 100) 13 | 14 | t.Run("does not contain unseen values", func(t *testing.T) { 15 | contains, err := subject.ContainsOrAdd(nil, values[0]) 16 | require.NoError(t, err) 17 | require.False(t, contains) 18 | contains, err = subject.ContainsOrAdd(nil, values[1]) 19 | require.NoError(t, err) 20 | require.False(t, contains) 21 | }) 22 | t.Run("contains seen values", func(t *testing.T) { 23 | contains, err := subject.Contains(nil, values[0]) 24 | require.NoError(t, err) 25 | require.True(t, contains) 26 | contains, err = subject.Contains(nil, values[1]) 27 | require.NoError(t, err) 28 | require.True(t, contains) 29 | }) 30 | t.Run("evicts first half once 2X capacity is reached", func(t *testing.T) { 31 | contains, err := subject.ContainsOrAdd(nil, values[2]) 32 | require.NoError(t, err) 33 | require.False(t, contains) 34 | contains, err = subject.Contains(nil, values[0]) 35 | require.NoError(t, err) 36 | require.True(t, contains) 37 | contains, err = subject.Contains(nil, values[1]) 38 | require.NoError(t, err) 39 | require.True(t, contains) 40 | 41 | contains, err = subject.ContainsOrAdd(nil, values[3]) 42 | require.NoError(t, err) 43 | require.False(t, contains) 44 | contains, err = subject.ContainsOrAdd(nil, values[0]) 45 | require.NoError(t, err) 46 | require.False(t, contains) 47 | contains, err = subject.ContainsOrAdd(nil, values[1]) 48 | require.NoError(t, err) 49 | require.False(t, contains) 50 | }) 51 | } 52 | 53 | func TestSet_MinSizeIsOne(t *testing.T) { 54 | subject := caching.NewSet(-1) 55 | contains, err := subject.ContainsOrAdd(nil, []byte("a")) 56 | require.NoError(t, err) 57 | require.False(t, contains) 58 | contains, err = subject.ContainsOrAdd(nil, []byte("b")) 59 | require.NoError(t, err) 60 | require.False(t, contains) 61 | contains, err = subject.ContainsOrAdd(nil, []byte("c")) 62 | require.NoError(t, err) 63 | require.False(t, contains) 64 | 65 | contains, err = subject.ContainsOrAdd(nil, []byte("a")) 66 | require.NoError(t, err) 67 | require.False(t, contains) 68 | contains, err = subject.ContainsOrAdd(nil, []byte("a")) 69 | require.NoError(t, err) 70 | require.True(t, contains) 71 | 72 | contains, err = subject.ContainsOrAdd(nil, []byte("b")) 73 | require.NoError(t, err) 74 | require.False(t, contains) 75 | contains, err = subject.ContainsOrAdd(nil, []byte("b")) 76 | require.NoError(t, err) 77 | require.True(t, contains) 78 | 79 | contains, err = subject.ContainsOrAdd(nil, []byte("c")) 80 | require.NoError(t, err) 81 | require.False(t, contains) 82 | contains, err = subject.ContainsOrAdd(nil, []byte("c")) 83 | require.NoError(t, err) 84 | require.True(t, contains) 85 | } 86 | -------------------------------------------------------------------------------- /bootstrap_delay_test.go: -------------------------------------------------------------------------------- 1 | package f3 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/filecoin-project/go-f3/ec" 8 | "github.com/filecoin-project/go-f3/internal/clock" 9 | "github.com/filecoin-project/go-f3/manifest" 10 | ) 11 | 12 | var _ ec.TipSet = (*tipset)(nil) 13 | 14 | func TestComputeBootstrapDelay(t *testing.T) { 15 | period := 30 * time.Second 16 | bootstrapEpoch := 1000 17 | m := manifest.Manifest{ 18 | BootstrapEpoch: int64(bootstrapEpoch), 19 | EC: manifest.EcConfig{ 20 | Period: period, 21 | }, 22 | } 23 | 24 | clock := clock.NewMock() 25 | genesis := time.Date(2020, time.January, 12, 01, 01, 01, 00, time.UTC) 26 | 27 | tt := []struct { 28 | name string 29 | time time.Time 30 | ts tipset 31 | want time.Duration 32 | }{ 33 | { 34 | name: "in sync - right before bootstrap", 35 | time: genesis.Add(time.Duration(bootstrapEpoch-1) * period), 36 | ts: tipset{genesis: genesis, epoch: int64(bootstrapEpoch - 1), period: period}, 37 | want: period, 38 | }, 39 | { 40 | name: "in sync - right at bootstrap", 41 | time: genesis.Add(time.Duration(bootstrapEpoch) * period), 42 | ts: tipset{genesis: genesis, epoch: int64(bootstrapEpoch), period: period}, 43 | want: 0, 44 | }, 45 | { 46 | name: "in sync - right after bootstrap", 47 | time: genesis.Add(time.Duration(bootstrapEpoch+1) * period), 48 | ts: tipset{genesis: genesis, epoch: int64(bootstrapEpoch + 1), period: period}, 49 | want: 0, 50 | }, 51 | { 52 | name: "in sync - right before bootstrap (offset)", 53 | time: genesis.Add(time.Duration(bootstrapEpoch-1)*period + 15*time.Second), 54 | ts: tipset{genesis: genesis, epoch: int64(bootstrapEpoch - 1), period: period}, 55 | want: 15 * time.Second, 56 | }, 57 | { 58 | name: "in sync - right after bootstrap (offset)", 59 | time: genesis.Add(time.Duration(bootstrapEpoch)*period + 1*time.Second), 60 | ts: tipset{genesis: genesis, epoch: int64(bootstrapEpoch), period: period}, 61 | want: 0 * time.Second, 62 | }, 63 | { 64 | name: "out of sync - way after bootstrap", 65 | time: genesis.Add(time.Duration(bootstrapEpoch+100)*period + 1*time.Second), 66 | ts: tipset{genesis: genesis, epoch: int64(bootstrapEpoch - 100), period: period}, 67 | want: 1 * time.Nanosecond, // we don't start immediately as the tipset we need is not available yet 68 | }, 69 | { 70 | name: "out of sync - way before bootstrap", 71 | time: genesis.Add(time.Duration(bootstrapEpoch-30)*period + 1*time.Second), 72 | ts: tipset{genesis: genesis, epoch: int64(bootstrapEpoch - 100), period: period}, 73 | want: 30*period - 1*time.Second, 74 | }, 75 | } 76 | 77 | for _, tc := range tt { 78 | t.Run(tc.name, func(t *testing.T) { 79 | clock.Set(tc.time) 80 | got := computeBootstrapDelay(&tc.ts, clock, m) 81 | if got != tc.want { 82 | t.Errorf("computeBootstrapDelay(%s, %v, %v) = %v, want %v", &tc.ts, tc.time, period, got, tc.want) 83 | } 84 | }) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /test/withhold_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "testing" 7 | "time" 8 | 9 | "github.com/filecoin-project/go-f3/gpbft" 10 | "github.com/filecoin-project/go-f3/sim" 11 | "github.com/filecoin-project/go-f3/sim/adversary" 12 | "github.com/filecoin-project/go-f3/sim/latency" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | func TestWitholdCommitAdversary(t *testing.T) { 17 | t.Parallel() 18 | 19 | tests := []struct { 20 | name string 21 | gst time.Duration 22 | }{ 23 | { 24 | name: "immediately stable", 25 | }, 26 | { 27 | name: "global stabilisation after ec stabilisation", 28 | gst: EcStabilisationDelay, 29 | }, 30 | { 31 | name: "global stabilisation after 1 epoch", 32 | gst: EcEpochDuration, 33 | }, 34 | { 35 | name: "never stable", 36 | gst: math.MaxInt16 * time.Hour, 37 | }, 38 | } 39 | for _, test := range tests { 40 | t.Run(test.name, func(t *testing.T) { 41 | t.Parallel() 42 | nearSynchrony := func() (latency.Model, error) { 43 | return latency.NewLogNormal(1413, 10*time.Millisecond), nil 44 | } 45 | tsg := sim.NewTipSetGenerator(tipSetGeneratorSeed) 46 | baseChain := generateECChain(t, tsg) 47 | a := baseChain.Extend(tsg.Sample()) 48 | b := baseChain.Extend(tsg.Sample()) 49 | victims := []gpbft.ActorID{0, 1, 2, 3} 50 | sm, err := sim.NewSimulation( 51 | sim.WithLatencyModeler(nearSynchrony), 52 | sim.WithECEpochDuration(EcEpochDuration), 53 | sim.WitECStabilisationDelay(EcStabilisationDelay), 54 | sim.WithGpbftOptions(testGpbftOptions...), 55 | sim.WithBaseChain(baseChain), 56 | sim.AddHonestParticipants(4, sim.NewFixedECChainGenerator(a), uniformOneStoragePower), 57 | sim.AddHonestParticipants(3, sim.NewFixedECChainGenerator(b), uniformOneStoragePower), 58 | sim.WithGlobalStabilizationTime(test.gst), 59 | // Adversary has 30% of 10 total power. 60 | // Of 7 nodes, 4 victims will prefer chain A, 3 others will prefer chain B. 61 | // The adversary will target the first to decide, and withhold COMMIT from the rest. 62 | // After the victim decides in round 0, the adversary stops participating. 63 | // Now there are 3 nodes on each side (and one decided), with total power 6/10, less than quorum. 64 | // The B side must be swayed to the A side by observing that some nodes on the A side reached a COMMIT. 65 | sim.WithAdversary(adversary.NewWitholdCommitGenerator(gpbft.NewStoragePower(3), victims, a)), 66 | ) 67 | require.NoError(t, err) 68 | 69 | err = sm.Run(1, maxRounds) 70 | if err != nil { 71 | fmt.Printf("%s", sm.Describe()) 72 | sm.GetInstance(0).Print() 73 | } 74 | // The adversary could convince the victim to decide a, so all must decide a. 75 | require.NoError(t, err) 76 | for _, victim := range victims { 77 | decision := sm.GetInstance(0).GetDecision(victim) 78 | require.NotNil(t, decision) 79 | require.Equal(t, a, decision) 80 | } 81 | }) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /internal/encoding/encoding_test.go: -------------------------------------------------------------------------------- 1 | package encoding_test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "testing" 7 | 8 | "github.com/filecoin-project/go-f3/internal/encoding" 9 | "github.com/klauspost/compress/zstd" 10 | "github.com/stretchr/testify/require" 11 | cbg "github.com/whyrusleeping/cbor-gen" 12 | "go.opentelemetry.io/otel/attribute" 13 | ) 14 | 15 | var ( 16 | _ cbg.CBORMarshaler = (*testValue)(nil) 17 | _ cbg.CBORUnmarshaler = (*testValue)(nil) 18 | ) 19 | 20 | type testValue struct { 21 | Value string 22 | } 23 | 24 | func (m *testValue) MarshalCBOR(w io.Writer) error { 25 | return cbg.WriteByteArray(w, []byte(m.Value)) 26 | } 27 | 28 | func (m *testValue) UnmarshalCBOR(r io.Reader) error { 29 | data, err := cbg.ReadByteArray(r, cbg.MaxLength) 30 | if err != nil { 31 | return err 32 | } 33 | m.Value = string(data) 34 | return err 35 | } 36 | 37 | func TestCBOR(t *testing.T) { 38 | subject := encoding.NewCBOR[*testValue]() 39 | data := &testValue{Value: "fish"} 40 | encoded, err := subject.Encode(data) 41 | require.NoError(t, err) 42 | decoded := &testValue{} 43 | err = subject.Decode(encoded, decoded) 44 | require.NoError(t, err) 45 | require.Equal(t, data.Value, decoded.Value) 46 | } 47 | 48 | func TestZSTD(t *testing.T) { 49 | encoder, err := encoding.NewZSTD[*testValue]() 50 | require.NoError(t, err) 51 | data := &testValue{Value: "lobster"} 52 | encoded, err := encoder.Encode(data) 53 | require.NoError(t, err) 54 | decoded := &testValue{} 55 | err = encoder.Decode(encoded, decoded) 56 | require.NoError(t, err) 57 | require.Equal(t, data.Value, decoded.Value) 58 | } 59 | 60 | func TestZSTDLimits(t *testing.T) { 61 | subject, err := encoding.NewZSTD[*testValue]() 62 | require.NoError(t, err) 63 | 64 | writer, err := zstd.NewWriter(nil) 65 | require.NoError(t, err) 66 | 67 | var v testValue 68 | v.Value = string(make([]byte, cbg.ByteArrayMaxLen*2)) 69 | 70 | var buf bytes.Buffer 71 | require.NoError(t, v.MarshalCBOR(&buf)) 72 | 73 | tooLargeACompression := writer.EncodeAll(buf.Bytes(), nil) 74 | // Assert the compressed size is less than 1MiB, in other words, transportable by 75 | // the default GossipSub message size limit. 76 | require.Less(t, len(tooLargeACompression), 1<<20) 77 | 78 | var dest testValue 79 | require.ErrorContains(t, subject.Decode(tooLargeACompression, &dest), "decompressed size exceeds configured limit") 80 | } 81 | 82 | func TestZSTD_GetMetricAttribute(t *testing.T) { 83 | t.Run("By Pointer", func(t *testing.T) { 84 | subject, err := encoding.NewZSTD[*testValue]() 85 | require.NoError(t, err) 86 | require.Equal(t, attribute.String("type", "testValue"), subject.GetMetricAttribute()) 87 | }) 88 | t.Run("By Value", func(t *testing.T) { 89 | type anotherTestValue struct { 90 | cbg.CBORUnmarshaler 91 | cbg.CBORMarshaler 92 | } 93 | subject, err := encoding.NewZSTD[anotherTestValue]() 94 | require.NoError(t, err) 95 | require.Equal(t, attribute.String("type", "anotherTestValue"), subject.GetMetricAttribute()) 96 | }) 97 | } 98 | -------------------------------------------------------------------------------- /internal/gnark/scalar.go: -------------------------------------------------------------------------------- 1 | package gnark 2 | 3 | import ( 4 | "crypto/cipher" 5 | "io" 6 | "math/big" 7 | 8 | fr "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" 9 | "go.dedis.ch/kyber/v4" 10 | "go.dedis.ch/kyber/v4/util/random" 11 | ) 12 | 13 | var _ kyber.Scalar = &Scalar{} 14 | 15 | type Scalar struct{ inner fr.Element } 16 | 17 | func (s *Scalar) MarshalBinary() (data []byte, err error) { res := s.inner.Bytes(); return res[:], nil } 18 | 19 | func (s *Scalar) UnmarshalBinary(data []byte) error { s.inner.SetBytes(data); return nil } 20 | 21 | func (s *Scalar) String() string { return s.inner.String() } 22 | 23 | func (s *Scalar) MarshalSize() int { return fr.Bytes } 24 | 25 | func (s *Scalar) MarshalTo(w io.Writer) (int, error) { 26 | buf := s.inner.Bytes() 27 | return w.Write(buf[:]) 28 | } 29 | 30 | func (s *Scalar) UnmarshalFrom(r io.Reader) (int, error) { 31 | buf := make([]byte, s.MarshalSize()) 32 | n, err := io.ReadFull(r, buf) 33 | if err != nil { 34 | return n, err 35 | } 36 | s.inner.SetBytes(buf) 37 | return n, nil 38 | } 39 | 40 | func (s *Scalar) Equal(s2 kyber.Scalar) bool { 41 | x := s2.(*Scalar) 42 | return s.inner.Cmp(&x.inner) == 0 43 | } 44 | 45 | func (s *Scalar) Set(a kyber.Scalar) kyber.Scalar { 46 | aa := a.(*Scalar) 47 | s.inner.Set(&aa.inner) 48 | return s 49 | } 50 | 51 | func (s *Scalar) Clone() kyber.Scalar { return new(Scalar).Set(s) } 52 | 53 | func (s *Scalar) SetInt64(v int64) kyber.Scalar { 54 | s.inner.SetInt64(v) 55 | 56 | return s 57 | } 58 | 59 | func (s *Scalar) Zero() kyber.Scalar { s.inner.SetUint64(0); return s } 60 | 61 | func (s *Scalar) Add(a, b kyber.Scalar) kyber.Scalar { 62 | aa, bb := a.(*Scalar), b.(*Scalar) 63 | s.inner.Add(&aa.inner, &bb.inner) 64 | return s 65 | } 66 | 67 | func (s *Scalar) Sub(a, b kyber.Scalar) kyber.Scalar { 68 | aa, bb := a.(*Scalar), b.(*Scalar) 69 | s.inner.Sub(&aa.inner, &bb.inner) 70 | return s 71 | } 72 | 73 | func (s *Scalar) Neg(a kyber.Scalar) kyber.Scalar { 74 | s.Set(a) 75 | s.inner.Neg(&s.inner) 76 | return s 77 | } 78 | 79 | func (s *Scalar) One() kyber.Scalar { s.inner.SetUint64(1); return s } 80 | 81 | func (s *Scalar) Mul(a, b kyber.Scalar) kyber.Scalar { 82 | aa, bb := a.(*Scalar), b.(*Scalar) 83 | s.inner.Mul(&aa.inner, &bb.inner) 84 | return s 85 | } 86 | 87 | func (s *Scalar) Div(a, b kyber.Scalar) kyber.Scalar { return s.Mul(new(Scalar).Inv(b), a) } 88 | 89 | func (s *Scalar) Inv(a kyber.Scalar) kyber.Scalar { 90 | aa := a.(*Scalar) 91 | s.inner.Inverse(&aa.inner) 92 | return s 93 | } 94 | 95 | func (s *Scalar) Pick(stream cipher.Stream) kyber.Scalar { 96 | n := random.Int(fr.Modulus(), stream) 97 | s.inner.SetBigInt(n) 98 | return s 99 | } 100 | 101 | func (s *Scalar) SetBytes(data []byte) kyber.Scalar { s.inner.SetBytes(data); return s } 102 | 103 | func (s *Scalar) ByteOrder() kyber.ByteOrder { 104 | return kyber.BigEndian 105 | } 106 | 107 | func (s *Scalar) GroupOrder() *big.Int { 108 | return fr.Modulus() 109 | } 110 | -------------------------------------------------------------------------------- /internal/consensus/options.go: -------------------------------------------------------------------------------- 1 | package consensus 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | "github.com/filecoin-project/go-f3/internal/clock" 8 | ) 9 | 10 | type fakeECOptions struct { 11 | clock clock.Clock 12 | seed int64 13 | initialPowerTable gpbft.PowerEntries 14 | evolvePowerTable PowerTableMutator 15 | bootstrapEpoch int64 16 | ecPeriod time.Duration 17 | ecMaxLookback int64 18 | nullTipsetProbability float64 19 | forkAfterEpochs int64 20 | forkSeed int64 21 | } 22 | 23 | type FakeECOption func(*fakeECOptions) 24 | type PowerTableMutator func(epoch int64, pt gpbft.PowerEntries) gpbft.PowerEntries 25 | 26 | func newFakeECOptions(o ...FakeECOption) *fakeECOptions { 27 | opts := &fakeECOptions{ 28 | clock: clock.RealClock, 29 | ecPeriod: 30 * time.Second, 30 | seed: time.Now().UnixNano(), 31 | forkSeed: time.Now().UnixNano() / 2, 32 | nullTipsetProbability: 0.015, 33 | } 34 | for _, apply := range o { 35 | apply(opts) 36 | } 37 | return opts 38 | } 39 | 40 | func WithBootstrapEpoch(epoch int64) FakeECOption { 41 | return func(ec *fakeECOptions) { 42 | ec.bootstrapEpoch = epoch 43 | } 44 | } 45 | 46 | func WithSeed(seed int64) FakeECOption { 47 | return func(ec *fakeECOptions) { 48 | ec.seed = seed 49 | } 50 | } 51 | 52 | func WithInitialPowerTable(initialPowerTable gpbft.PowerEntries) FakeECOption { 53 | return func(ec *fakeECOptions) { 54 | ec.initialPowerTable = initialPowerTable 55 | } 56 | } 57 | 58 | func WithECPeriod(ecPeriod time.Duration) FakeECOption { 59 | return func(ec *fakeECOptions) { 60 | ec.ecPeriod = ecPeriod 61 | } 62 | } 63 | 64 | func WithMaxLookback(distance int64) FakeECOption { 65 | return func(ec *fakeECOptions) { 66 | ec.ecMaxLookback = distance 67 | } 68 | } 69 | 70 | func WithEvolvingPowerTable(fn PowerTableMutator) FakeECOption { 71 | return func(ec *fakeECOptions) { 72 | ec.evolvePowerTable = fn 73 | } 74 | } 75 | 76 | // WithClock sets the clock used to determine the current time. This is 77 | // useful for testing purposes, as it allows you to control the time 78 | // progression of the EC. The default clock is the system clock. 79 | func WithClock(clock clock.Clock) FakeECOption { 80 | return func(ec *fakeECOptions) { 81 | ec.clock = clock 82 | } 83 | } 84 | 85 | // WithForkSeed sets the seed used to generate fork chains. For this option to 86 | // take effect, WithForkAfterEpochs must be set to a value greater than 0. 87 | func WithForkSeed(e int64) FakeECOption { 88 | return func(ec *fakeECOptions) { 89 | ec.forkSeed = e 90 | } 91 | } 92 | 93 | // WithForkAfterEpochs sets the minimum number of epochs from the latest 94 | // finalized tipset key after which this EC may fork away. 95 | func WithForkAfterEpochs(e int64) FakeECOption { 96 | return func(ec *fakeECOptions) { 97 | ec.forkAfterEpochs = e 98 | } 99 | } 100 | 101 | func WithNullTipsetProbability(p float64) FakeECOption { 102 | return func(ec *fakeECOptions) { 103 | ec.nullTipsetProbability = p 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /internal/lotus/net.go: -------------------------------------------------------------------------------- 1 | package lotus 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | 11 | "github.com/filecoin-project/go-f3/gpbft" 12 | logging "github.com/ipfs/go-log/v2" 13 | "github.com/libp2p/go-libp2p/core/peer" 14 | ) 15 | 16 | var logger = logging.Logger("lotus") 17 | 18 | type resultOrError[R any] struct { 19 | Result R `json:"result"` 20 | Error *struct { 21 | Message string `json:"message"` 22 | } 23 | } 24 | 25 | func ListAllPeers(ctx context.Context, apiEndpoints ...string) []peer.AddrInfo { 26 | if len(apiEndpoints) == 0 { 27 | return nil 28 | } 29 | const netPeers = `{"method":"Filecoin.NetPeers","params":[],"id":2,"jsonrpc":"2.0"}` 30 | var addrs []peer.AddrInfo 31 | seen := make(map[string]struct{}) 32 | for _, endpoint := range apiEndpoints { 33 | peers, err := doJsonRpcRequest[[]peer.AddrInfo](ctx, endpoint, netPeers) 34 | if err != nil { 35 | logger.Errorw("failed to get net peers from endpoint", "endpoint", endpoint, "err", err) 36 | continue 37 | } 38 | for _, addr := range peers { 39 | k := addr.ID.String() 40 | if _, found := seen[k]; !found { 41 | addrs = append(addrs, addr) 42 | seen[k] = struct{}{} 43 | } 44 | } 45 | } 46 | return addrs 47 | } 48 | 49 | func GetF3Progress(ctx context.Context, apiEndpoints ...string) []gpbft.InstanceProgress { 50 | if len(apiEndpoints) == 0 { 51 | return nil 52 | } 53 | const getF3Progress = `{"method":"Filecoin.F3GetProgress","params":[],"id":2,"jsonrpc":"2.0"}` 54 | progresses := make([]gpbft.InstanceProgress, 0, len(apiEndpoints)) 55 | for _, endpoint := range apiEndpoints { 56 | progress, err := doJsonRpcRequest[gpbft.InstanceProgress](ctx, endpoint, getF3Progress) 57 | if err != nil { 58 | logger.Errorw("failed to get F3 progress from endpoint", "endpoint", endpoint, "err", err) 59 | continue 60 | } 61 | progresses = append(progresses, progress) 62 | } 63 | return progresses 64 | } 65 | 66 | func doJsonRpcRequest[R any](ctx context.Context, endpoint string, body string) (R, error) { 67 | var zeroResult R 68 | req, err := http.NewRequestWithContext(ctx, "POST", endpoint, bytes.NewReader([]byte(body))) 69 | if err != nil { 70 | return zeroResult, fmt.Errorf("failed to construct request: %w", err) 71 | } 72 | req.Header.Set("Content-Type", `application/json`) 73 | 74 | resp, err := http.DefaultClient.Do(req) 75 | if err != nil { 76 | return zeroResult, fmt.Errorf("failed to execute the request: %w", err) 77 | } 78 | defer func() { _ = resp.Body.Close() }() 79 | respBody, err := io.ReadAll(resp.Body) 80 | if err != nil { 81 | return zeroResult, fmt.Errorf("failed to read response body: %w", err) 82 | } 83 | if resp.StatusCode != 200 { 84 | return zeroResult, fmt.Errorf("unsuccessful response status %d: %s", resp.StatusCode, string(respBody)) 85 | } 86 | var roe resultOrError[R] 87 | if err := json.Unmarshal(respBody, &roe); err != nil { 88 | return zeroResult, fmt.Errorf("failed to unmarshal response as json: %s", string(respBody)) 89 | } 90 | if roe.Error != nil { 91 | logger.Errorf("failed to discover peers from lotus daemon %s: %s", endpoint, roe.Error.Message) 92 | return zeroResult, fmt.Errorf("json rpc error: %s", roe.Error.Message) 93 | } 94 | return roe.Result, nil 95 | } 96 | -------------------------------------------------------------------------------- /blssig/verifier.go: -------------------------------------------------------------------------------- 1 | package blssig 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "fmt" 7 | "runtime/debug" 8 | "sync" 9 | 10 | "go.dedis.ch/kyber/v4" 11 | "go.dedis.ch/kyber/v4/sign/bdn" 12 | "go.opentelemetry.io/otel/metric" 13 | 14 | "github.com/filecoin-project/go-f3/gpbft" 15 | bls12381 "github.com/filecoin-project/go-f3/internal/gnark" 16 | "github.com/filecoin-project/go-f3/internal/measurements" 17 | ) 18 | 19 | type Verifier struct { 20 | scheme *bdn.Scheme 21 | keyGroup kyber.Group 22 | 23 | mu sync.RWMutex 24 | pointCache map[string]kyber.Point 25 | } 26 | 27 | func VerifierWithKeyOnG1() *Verifier { 28 | suite := bls12381.NewSuiteBLS12381() 29 | return &Verifier{ 30 | scheme: bdn.NewSchemeOnG2(suite), 31 | keyGroup: suite.G1(), 32 | } 33 | } 34 | 35 | func (v *Verifier) pubkeyToPoint(p gpbft.PubKey) (kyber.Point, error) { 36 | if len(p) != 48 { 37 | return nil, fmt.Errorf("public key is too large: %d > 96", len(p)) 38 | } 39 | 40 | var point kyber.Point 41 | cached := true 42 | defer func() { 43 | metrics.decompressPoint.Add(context.TODO(), 1, metric.WithAttributes(attrCached.Bool(cached))) 44 | }() 45 | 46 | v.mu.RLock() 47 | point, cached = v.pointCache[string(p)] 48 | v.mu.RUnlock() 49 | if cached { 50 | return point.Clone(), nil 51 | } 52 | 53 | point = v.keyGroup.Point() 54 | err := point.UnmarshalBinary(p) 55 | if err != nil { 56 | return nil, fmt.Errorf("unarshalling pubkey: %w", err) 57 | } 58 | if point.Equal(v.keyGroup.Point().Null()) { 59 | return nil, fmt.Errorf("public key is a null point") 60 | } 61 | v.mu.Lock() 62 | 63 | // Initialize the cache, or re-initialize it if we've grown too big. We don't expect the 64 | // latter to happen in practice (would need over 10k participants), but better be safe than 65 | // sorry. We could, alternatively, use an LRU but... that's not worth the overhead for 66 | // something that shouldn't happen. 67 | if v.pointCache == nil || len(v.pointCache) >= maxPointCacheSize { 68 | v.pointCache = make(map[string]kyber.Point) 69 | } 70 | 71 | _, cached = v.pointCache[string(p)] // for accurate metrics 72 | if !cached { 73 | v.pointCache[string(p)] = point 74 | } 75 | v.mu.Unlock() 76 | 77 | return point.Clone(), nil 78 | } 79 | 80 | func (v *Verifier) Verify(pubKey gpbft.PubKey, msg, sig []byte) (_err error) { 81 | defer func() { 82 | status := measurements.AttrStatusSuccess 83 | if _err != nil { 84 | status = measurements.AttrStatusError 85 | } 86 | if perr := recover(); perr != nil { 87 | msgStr := base64.StdEncoding.EncodeToString(msg) 88 | sigStr := base64.StdEncoding.EncodeToString(sig) 89 | pubKeyStr := base64.StdEncoding.EncodeToString(pubKey) 90 | _err = fmt.Errorf("panicked validating signature %q for message %q from %q: %v\n%s", 91 | sigStr, msgStr, pubKeyStr, perr, string(debug.Stack())) 92 | log.Error(_err) 93 | status = measurements.AttrStatusPanic 94 | } 95 | metrics.verify.Add(context.TODO(), 1, metric.WithAttributes(status)) 96 | }() 97 | 98 | point, err := v.pubkeyToPoint(pubKey) 99 | if err != nil { 100 | return fmt.Errorf("unarshalling public key: %w", err) 101 | } 102 | 103 | return v.scheme.Verify(point, msg, sig) 104 | } 105 | -------------------------------------------------------------------------------- /certexchange/polling/predictor_test.go: -------------------------------------------------------------------------------- 1 | package polling 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestPredictor(t *testing.T) { 13 | p := newPredictor(time.Second, 30*time.Second, 120*time.Second) 14 | 15 | // Progress of 1 shouldn't update anything. 16 | require.Equal(t, 30*time.Second, p.update(1)) 17 | require.Equal(t, 30*time.Second, p.update(1)) 18 | 19 | // Progress of 0 should predict the same interval, then twice that, etc. 20 | require.Equal(t, 30*time.Second, p.update(0)) 21 | require.Equal(t, 60*time.Second, p.update(0)) 22 | 23 | // After that, the intervalA should increase slightly. 24 | intervalA := p.update(1) 25 | require.Less(t, 30*time.Second, intervalA) 26 | require.Greater(t, 40*time.Second, intervalA) 27 | 28 | // If the interval is too large, it should decrease, but not by as much because we're 29 | // switching direction. 30 | intervalB := p.update(2) 31 | require.Less(t, 30*time.Second, intervalB) 32 | require.Greater(t, intervalA, intervalB) 33 | 34 | // It should keep getting smaller. 35 | intervalC := p.update(2) 36 | require.Greater(t, intervalB, intervalC) 37 | 38 | // Until we stabilize. 39 | require.Equal(t, intervalC, p.update(1)) 40 | 41 | // We should always stay above the minimum. 42 | for i := 0; i < 100; i++ { 43 | require.LessOrEqual(t, time.Second, p.update(2)) 44 | } 45 | require.Equal(t, time.Second, p.update(1)) 46 | 47 | // And below the maximum (unless backing off). 48 | for i := 0; i < 100; i++ { 49 | require.GreaterOrEqual(t, 120*time.Second, p.update(0)) 50 | require.GreaterOrEqual(t, 120*time.Second, p.update(1)) 51 | } 52 | require.Equal(t, 120*time.Second, p.update(1)) 53 | 54 | // But backoff should go much higher. 55 | for i := 0; i < 100; i++ { 56 | require.GreaterOrEqual(t, 10*120*time.Second, p.update(0)) 57 | } 58 | require.Equal(t, 10*120*time.Second, p.update(0)) 59 | 60 | // And revert to the old time when done. 61 | require.Equal(t, 120*time.Second, p.update(1)) 62 | } 63 | 64 | func TestPredictorConverges(t *testing.T) { 65 | const minSeconds = 1 66 | const maxSeconds = 120 67 | p := newPredictor(minSeconds*time.Second, 30*time.Second, maxSeconds*time.Second) 68 | 69 | converge := func(interval time.Duration, n int) (time.Duration, time.Duration, int) { 70 | currentTime := time.Duration(0) 71 | updatesSeen := uint64(0) 72 | for i := 0; i < n; i++ { 73 | newUpdatesSeen := uint64(currentTime / interval) 74 | currentTime += p.update(newUpdatesSeen - updatesSeen) 75 | updatesSeen = newUpdatesSeen 76 | } 77 | return p.update(1), currentTime, int(currentTime / interval) 78 | } 79 | 80 | // Converges from 30s -> 5s very quickly. 81 | { 82 | res, ellapsed, count := converge(5*time.Second, 10) 83 | assert.InDelta(t, 5*time.Second, res, float64(1*time.Second)) 84 | assert.Less(t, ellapsed, 3*time.Minute) 85 | assert.Less(t, count, 30) 86 | } 87 | 88 | r := rand.New(rand.NewSource(0xdeadbeef)) 89 | numbers := r.Perm(maxSeconds - minSeconds) 90 | for _, n := range numbers { 91 | eventInterval := time.Duration(n+minSeconds) * time.Second 92 | result, _, _ := converge(eventInterval, 300) 93 | assert.InEpsilon(t, eventInterval, result, 0.05, "actual %s, expected %s", result, eventInterval) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /internal/gnark/gt.go: -------------------------------------------------------------------------------- 1 | package gnark 2 | 3 | import ( 4 | "crypto/cipher" 5 | "io" 6 | "math/big" 7 | 8 | bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" 9 | "go.dedis.ch/kyber/v4" 10 | ) 11 | 12 | var gtBase *bls12381.GT 13 | 14 | func init() { 15 | _, _, g1, g2 := bls12381.Generators() 16 | gt, err := bls12381.Pair([]bls12381.G1Affine{g1}, []bls12381.G2Affine{g2}) 17 | if err != nil { 18 | panic(err) 19 | } 20 | gtBase = > 21 | } 22 | 23 | var _ kyber.Point = >Elt{} 24 | 25 | // GTElt is a wrapper around the Circl Gt point type. 26 | type GTElt struct{ inner bls12381.GT } 27 | 28 | // MarshalBinary returns a compressed point, without any domain separation tag information 29 | func (p *GTElt) MarshalBinary() (data []byte, err error) { 30 | res := p.inner.Bytes() 31 | return res[:], nil 32 | } 33 | 34 | // UnmarshalBinary populates the point from a compressed point representation. 35 | func (p *GTElt) UnmarshalBinary(data []byte) error { return p.inner.Unmarshal(data) } 36 | 37 | func (p *GTElt) String() string { return p.inner.String() } 38 | 39 | func (p *GTElt) MarshalSize() int { return bls12381.SizeOfGT } 40 | 41 | // MarshalTo writes a compressed point to the Writer, without any domain separation tag information 42 | func (p *GTElt) MarshalTo(w io.Writer) (int, error) { 43 | buf, err := p.MarshalBinary() 44 | if err != nil { 45 | return 0, err 46 | } 47 | return w.Write(buf) 48 | } 49 | 50 | // UnmarshalFrom populates the point from a compressed point representation read from the Reader. 51 | func (p *GTElt) UnmarshalFrom(r io.Reader) (int, error) { 52 | buf := make([]byte, p.MarshalSize()) 53 | n, err := io.ReadFull(r, buf) 54 | if err != nil { 55 | return n, err 56 | } 57 | return n, p.UnmarshalBinary(buf) 58 | } 59 | 60 | func (p *GTElt) Equal(p2 kyber.Point) bool { x := p2.(*GTElt); return p.inner.Equal(&x.inner) } 61 | 62 | func (p *GTElt) Null() kyber.Point { p.inner.SetOne(); return p } 63 | 64 | func (p *GTElt) Base() kyber.Point { p.inner = *gtBase; return p } 65 | 66 | func (p *GTElt) Pick(_ cipher.Stream) kyber.Point { 67 | panic("bls12-381: unsupported operation") 68 | } 69 | 70 | func (p *GTElt) Set(p2 kyber.Point) kyber.Point { p.inner = p2.(*GTElt).inner; return p } 71 | 72 | func (p *GTElt) Clone() kyber.Point { return new(GTElt).Set(p) } 73 | 74 | func (p *GTElt) EmbedLen() int { 75 | panic("bls12-381: unsupported operation") 76 | } 77 | 78 | func (p *GTElt) Embed(_ []byte, _ cipher.Stream) kyber.Point { 79 | panic("bls12-381: unsupported operation") 80 | } 81 | 82 | func (p *GTElt) Data() ([]byte, error) { 83 | panic("bls12-381: unsupported operation") 84 | } 85 | 86 | func (p *GTElt) Add(a, b kyber.Point) kyber.Point { 87 | aa, bb := a.(*GTElt), b.(*GTElt) 88 | p.inner.Mul(&aa.inner, &bb.inner) 89 | return p 90 | } 91 | 92 | func (p *GTElt) Sub(a, b kyber.Point) kyber.Point { 93 | return p.Add(a, new(GTElt).Neg(b)) 94 | } 95 | 96 | func (p *GTElt) Neg(a kyber.Point) kyber.Point { 97 | aa := a.(*GTElt) 98 | p.inner.Inverse(&aa.inner) 99 | return p 100 | } 101 | 102 | func (p *GTElt) Mul(s kyber.Scalar, q kyber.Point) kyber.Point { 103 | qq, ss := q.(*GTElt), s.(*Scalar) 104 | var scalar big.Int 105 | ss.inner.BigInt(&scalar) 106 | p.inner.Exp(qq.inner, &scalar) 107 | return p 108 | } 109 | -------------------------------------------------------------------------------- /internal/caching/set.go: -------------------------------------------------------------------------------- 1 | package caching 2 | 3 | import ( 4 | "sync" 5 | 6 | "golang.org/x/crypto/blake2b" 7 | ) 8 | 9 | type Set struct { 10 | // maxSize defines the maximum number of samples to store per each internal set. 11 | maxSize int 12 | // mu protects access to flip and flop. 13 | mu sync.Mutex 14 | // flip stores one set of samples until until it reaches maxSize. 15 | flip map[[32]byte]struct{} 16 | // flop stores another set of samples until it reaches maxSize. 17 | flop map[[32]byte]struct{} 18 | } 19 | 20 | // NewSet creates a new Set with a specified max size per sample subset. The max 21 | // size cannot be less than 1; if it is the cache will silently be instantiated 22 | // with max size of 1. 23 | func NewSet(maxSize int) *Set { 24 | maxSize = max(1, maxSize) 25 | return &Set{ 26 | maxSize: maxSize, 27 | flip: make(map[[32]byte]struct{}, maxSize), 28 | flop: make(map[[32]byte]struct{}, maxSize), 29 | } 30 | } 31 | 32 | // Contains checks if the given sample v is contained within sample set. A 33 | // namespace may optionally be specified for the value. Namespace must be between 34 | // 0 to 64 bytes long. 35 | func (ss *Set) Contains(namespace, v []byte) (bool, error) { 36 | key, err := ss.newKey(namespace, v) 37 | if err != nil { 38 | return false, err 39 | } 40 | ss.mu.Lock() 41 | defer ss.mu.Unlock() 42 | 43 | if _, exists := ss.flip[key]; exists { 44 | return true, nil 45 | } 46 | if _, exists := ss.flop[key]; exists { 47 | return true, nil 48 | } 49 | return false, nil 50 | } 51 | 52 | // ContainsOrAdd checks if the given sample v is contained within sample set, and 53 | // if not adds it. The namespace may be optionally supplied; it must be between 54 | // zero to 64 bytes. 55 | func (ss *Set) ContainsOrAdd(namespace, v []byte) (bool, error) { 56 | key, err := ss.newKey(namespace, v) 57 | if err != nil { 58 | return false, err 59 | } 60 | 61 | ss.mu.Lock() 62 | defer ss.mu.Unlock() 63 | 64 | // Check if the sample exists in either sets and if not insert it. 65 | if _, exists := ss.flip[key]; exists { 66 | return true, nil 67 | } 68 | if _, exists := ss.flop[key]; exists { 69 | return true, nil 70 | } 71 | ss.flip[key] = struct{}{} 72 | 73 | // Check if flip exceeds maxSize and if so do the flippity flop. 74 | if len(ss.flip) >= ss.maxSize { 75 | clear(ss.flop) 76 | ss.flop, ss.flip = ss.flip, ss.flop 77 | log.Debugw("Cleared flop and swapped subsets as max size is reached", "maxSize", ss.maxSize) 78 | } 79 | return false, nil 80 | } 81 | 82 | func (ss *Set) newKey(namespace, v []byte) ([32]byte, error) { 83 | hasher, err := blake2b.New(blake2b.Size256, namespace) 84 | if err != nil { 85 | return [32]byte{}, err 86 | } 87 | _, err = hasher.Write(v) 88 | if err != nil { 89 | return [32]byte{}, err 90 | } 91 | var key [32]byte 92 | keyB := hasher.Sum(key[:0]) 93 | copy(key[:], keyB) 94 | // TODO: using blake's built-in keys means we lose the nice [32]byte return types 95 | // from Sum256. Consider pooling byte slices. 96 | return key, nil 97 | } 98 | 99 | // Clear removes all elements in the set. 100 | func (ss *Set) Clear() { 101 | ss.mu.Lock() 102 | defer ss.mu.Unlock() 103 | ss.flip = make(map[[32]byte]struct{}) 104 | ss.flop = make(map[[32]byte]struct{}) 105 | } 106 | -------------------------------------------------------------------------------- /gpbft/message_builder.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | ) 8 | 9 | // ErrNoPower is returned by the MessageBuilder if the specified participant has no power. 10 | var ErrNoPower = errors.New("no power") 11 | 12 | type MessageBuilder struct { 13 | NetworkName NetworkName 14 | PowerTable powerTableAccessor 15 | Payload Payload 16 | BeaconForTicket []byte 17 | Justification *Justification 18 | } 19 | 20 | type powerTableAccessor interface { 21 | Get(ActorID) (int64, PubKey) 22 | } 23 | 24 | // Build uses the builder and a signer interface to build GMessage 25 | // It is a shortcut for when separated flow is not required 26 | func (mt *MessageBuilder) Build(ctx context.Context, signer Signer, id ActorID) (*GMessage, error) { 27 | st, err := mt.PrepareSigningInputs(id) 28 | if err != nil { 29 | return nil, fmt.Errorf("preparing signing inputs: %w", err) 30 | } 31 | 32 | payloadSig, vrf, err := st.Sign(ctx, signer) 33 | if err != nil { 34 | return nil, fmt.Errorf("signing message builder: %w", err) 35 | } 36 | 37 | return st.Build(payloadSig, vrf), nil 38 | } 39 | 40 | // SignatureBuilder's fields are exposed to facilitate JSON encoding 41 | type SignatureBuilder struct { 42 | NetworkName NetworkName 43 | 44 | ParticipantID ActorID 45 | Payload Payload 46 | Justification *Justification 47 | PubKey PubKey 48 | PayloadToSign []byte 49 | VRFToSign []byte 50 | } 51 | 52 | func (mb *MessageBuilder) PrepareSigningInputs(id ActorID) (*SignatureBuilder, error) { 53 | effectivePower, pubKey := mb.PowerTable.Get(id) 54 | if pubKey == nil || effectivePower == 0 { 55 | return nil, fmt.Errorf("could not find pubkey for actor %d: %w", id, ErrNoPower) 56 | } 57 | sb := SignatureBuilder{ 58 | ParticipantID: id, 59 | NetworkName: mb.NetworkName, 60 | Payload: mb.Payload, 61 | Justification: mb.Justification, 62 | 63 | PubKey: pubKey, 64 | } 65 | 66 | sb.PayloadToSign = mb.Payload.MarshalForSigning(mb.NetworkName) 67 | if mb.BeaconForTicket != nil { 68 | sb.VRFToSign = vrfSerializeSigInput(mb.BeaconForTicket, mb.Payload.Instance, mb.Payload.Round, mb.NetworkName) 69 | } 70 | return &sb, nil 71 | } 72 | 73 | // Sign creates the signed payload from the signature builder and returns the payload 74 | // and VRF signatures. These signatures can be used independent of the builder. 75 | func (st *SignatureBuilder) Sign(ctx context.Context, signer Signer) ([]byte, []byte, error) { 76 | payloadSignature, err := signer.Sign(ctx, st.PubKey, st.PayloadToSign) 77 | if err != nil { 78 | return nil, nil, fmt.Errorf("signing payload: %w", err) 79 | } 80 | var vrf []byte 81 | if st.VRFToSign != nil { 82 | vrf, err = signer.Sign(ctx, st.PubKey, st.VRFToSign) 83 | if err != nil { 84 | return nil, nil, fmt.Errorf("signing vrf: %w", err) 85 | } 86 | } 87 | return payloadSignature, vrf, nil 88 | } 89 | 90 | // Build takes the template and signatures and builds GMessage out of them 91 | func (st *SignatureBuilder) Build(payloadSignature []byte, vrf []byte) *GMessage { 92 | return &GMessage{ 93 | Sender: st.ParticipantID, 94 | Vote: st.Payload, 95 | Signature: payloadSignature, 96 | Ticket: vrf, 97 | Justification: st.Justification, 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /gpbft/ticket_rank.go: -------------------------------------------------------------------------------- 1 | package gpbft 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "math/big" 7 | 8 | "golang.org/x/crypto/blake2b" 9 | ) 10 | 11 | // ComputeTicketRank computes a rank for the given ticket, weighted by a 12 | // participant's power. A lower rank value indicates a better ranking. The 13 | // process involves the following phases: 14 | // 15 | // 1. Hash the ticket using the Blake2b256 hash function. 16 | // 2. Extract the low 128 bits from the hash and interpret them as a Q.128 17 | // fixed-point number in the range [0, 1). 18 | // 3. Convert this uniform distribution to an exponential distribution using 19 | // the inverse distribution function -log(x). 20 | // 21 | // The exponential distribution has the property that the minimum of two 22 | // exponentially distributed random variables is also exponentially distributed. 23 | // This allows us to weight ranks according to the participant's power by using 24 | // the formula: `-log(ticket) / power`, where `ticket` is in the range [0, 1). 25 | // 26 | // We use a base-2 logarithm instead of a natural logarithm for ease of 27 | // implementation. The choice of logarithm base only affects all ranks linearly 28 | // and does not alter their order. 29 | func ComputeTicketRank(ticket Ticket, scaledPower int64) float64 { 30 | if scaledPower <= 0 { 31 | return math.Inf(1) 32 | } 33 | // we could use Blake2b-128 but 256 is more common and more widely supported 34 | ticketHash := blake2b.Sum256(ticket) 35 | rank := linearToExpDist(ticketHash[:16]) 36 | return rank / float64(scaledPower) 37 | } 38 | 39 | // if ticket length is not 16, linearToExpDist will panic 40 | func linearToExpDist(ticket []byte) float64 { 41 | if len(ticket) != 16 { 42 | panic(fmt.Sprintf("expected ticket to be 16 bytes, got: %d", len(ticket))) 43 | } 44 | // we are interpreting the ticket as fixed-point number with 128 fractional bits 45 | // and adjusting using exponential distribution inverse function, -log(x) 46 | // we are computing Log2 of it with the adjustment that Log2(0) == -129 47 | // we can use Log2 instead of Ln as the difference is linear transform between them which 48 | // has no relative effect 49 | asInt := new(big.Int).SetBytes(ticket) // interpret at Q.128 50 | log2Int, log2Frac := bigLog2(asInt) 51 | // combine integer and fractional parts, in theory we could operate on them separately 52 | // but the 7bit gain on top of 52bits is minor 53 | log2 := float64(log2Int) + log2Frac 54 | return -log2 55 | } 56 | 57 | // bigLog2 takes an approximate logarithm of the big integer interpreted as Q.128 58 | // If the input is zero, the output is [-129, 0.f). 59 | // The result is an integer and fraction, where fraction is in [0, 1) 60 | func bigLog2(asInt *big.Int) (int64, float64) { 61 | bitLen := uint(asInt.BitLen()) 62 | if bitLen == 0 { 63 | return -129, 0. 64 | } 65 | log2Int := -int64(128 - bitLen + 1) //integer part of the Log2 66 | // now that we saved the integer part, we want to interpret it as [1,2) 67 | // so it will be Q.(bitlen-1) 68 | // to convert to float exactly, we need to bring it down to 53 bits 69 | if bitLen > 53 { 70 | asInt = asInt.Rsh(asInt, bitLen-53) 71 | } else if bitLen < 53 { 72 | asInt = asInt.Lsh(asInt, 53-bitLen) 73 | } 74 | if asInt.BitLen() != 53 { 75 | panic(fmt.Sprintf("wrong bitlen: %v", asInt.BitLen())) 76 | } 77 | asFloat := float64(asInt.Uint64()) / (1 << 52) 78 | if asFloat < 1 || asFloat >= 2 { 79 | panic("wrong range") 80 | } 81 | return log2Int, math.Log2(asFloat) 82 | } 83 | -------------------------------------------------------------------------------- /chainexchange/cbor_gen.go: -------------------------------------------------------------------------------- 1 | // Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. 2 | 3 | package chainexchange 4 | 5 | import ( 6 | "fmt" 7 | "io" 8 | "math" 9 | "sort" 10 | 11 | gpbft "github.com/filecoin-project/go-f3/gpbft" 12 | cid "github.com/ipfs/go-cid" 13 | cbg "github.com/whyrusleeping/cbor-gen" 14 | xerrors "golang.org/x/xerrors" 15 | ) 16 | 17 | var _ = xerrors.Errorf 18 | var _ = cid.Undef 19 | var _ = math.E 20 | var _ = sort.Sort 21 | 22 | var lengthBufMessage = []byte{131} 23 | 24 | func (t *Message) MarshalCBOR(w io.Writer) error { 25 | if t == nil { 26 | _, err := w.Write(cbg.CborNull) 27 | return err 28 | } 29 | 30 | cw := cbg.NewCborWriter(w) 31 | 32 | if _, err := cw.Write(lengthBufMessage); err != nil { 33 | return err 34 | } 35 | 36 | // t.Instance (uint64) (uint64) 37 | 38 | if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Instance)); err != nil { 39 | return err 40 | } 41 | 42 | // t.Chain (gpbft.ECChain) (struct) 43 | if err := t.Chain.MarshalCBOR(cw); err != nil { 44 | return err 45 | } 46 | 47 | // t.Timestamp (int64) (int64) 48 | if t.Timestamp >= 0 { 49 | if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { 50 | return err 51 | } 52 | } else { 53 | if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { 54 | return err 55 | } 56 | } 57 | 58 | return nil 59 | } 60 | 61 | func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { 62 | *t = Message{} 63 | 64 | cr := cbg.NewCborReader(r) 65 | 66 | maj, extra, err := cr.ReadHeader() 67 | if err != nil { 68 | return err 69 | } 70 | defer func() { 71 | if err == io.EOF { 72 | err = io.ErrUnexpectedEOF 73 | } 74 | }() 75 | 76 | if maj != cbg.MajArray { 77 | return fmt.Errorf("cbor input should be of type array") 78 | } 79 | 80 | if extra != 3 { 81 | return fmt.Errorf("cbor input had wrong number of fields") 82 | } 83 | 84 | // t.Instance (uint64) (uint64) 85 | 86 | { 87 | 88 | maj, extra, err = cr.ReadHeader() 89 | if err != nil { 90 | return err 91 | } 92 | if maj != cbg.MajUnsignedInt { 93 | return fmt.Errorf("wrong type for uint64 field") 94 | } 95 | t.Instance = uint64(extra) 96 | 97 | } 98 | // t.Chain (gpbft.ECChain) (struct) 99 | 100 | { 101 | 102 | b, err := cr.ReadByte() 103 | if err != nil { 104 | return err 105 | } 106 | if b != cbg.CborNull[0] { 107 | if err := cr.UnreadByte(); err != nil { 108 | return err 109 | } 110 | t.Chain = new(gpbft.ECChain) 111 | if err := t.Chain.UnmarshalCBOR(cr); err != nil { 112 | return xerrors.Errorf("unmarshaling t.Chain pointer: %w", err) 113 | } 114 | } 115 | 116 | } 117 | // t.Timestamp (int64) (int64) 118 | { 119 | maj, extra, err := cr.ReadHeader() 120 | if err != nil { 121 | return err 122 | } 123 | var extraI int64 124 | switch maj { 125 | case cbg.MajUnsignedInt: 126 | extraI = int64(extra) 127 | if extraI < 0 { 128 | return fmt.Errorf("int64 positive overflow") 129 | } 130 | case cbg.MajNegativeInt: 131 | extraI = int64(extra) 132 | if extraI < 0 { 133 | return fmt.Errorf("int64 negative overflow") 134 | } 135 | extraI = -1 - extraI 136 | default: 137 | return fmt.Errorf("wrong type for int64 field: %d", maj) 138 | } 139 | 140 | t.Timestamp = int64(extraI) 141 | } 142 | return nil 143 | } 144 | -------------------------------------------------------------------------------- /internal/psutil/psutil_test.go: -------------------------------------------------------------------------------- 1 | package psutil_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/filecoin-project/go-f3/internal/psutil" 7 | pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestManifestMessageIdFn(t *testing.T) { 12 | for _, test := range []struct { 13 | name string 14 | one *pubsub_pb.Message 15 | other *pubsub_pb.Message 16 | expectEqualID bool 17 | }{ 18 | { 19 | name: "same topic different data", 20 | one: &pubsub_pb.Message{ 21 | Topic: topic("fish"), 22 | From: []byte("barreleye"), 23 | Data: []byte("undadasea"), 24 | }, 25 | other: &pubsub_pb.Message{ 26 | Topic: topic("fish"), 27 | From: []byte("barreleye"), 28 | Data: []byte("lobstermuncher"), 29 | }, 30 | expectEqualID: false, 31 | }, 32 | { 33 | name: "same data different topic", 34 | one: &pubsub_pb.Message{ 35 | Topic: topic("fish"), 36 | From: []byte("barreleye"), 37 | Data: []byte("undadasea"), 38 | }, 39 | other: &pubsub_pb.Message{ 40 | Topic: topic("lobster"), 41 | From: []byte("barreleye"), 42 | Data: []byte("undadasea"), 43 | }, 44 | expectEqualID: false, 45 | }, 46 | { 47 | name: "same data and topic different sender", 48 | one: &pubsub_pb.Message{ 49 | Topic: topic("fish"), 50 | From: []byte("barreleye"), 51 | Data: []byte("undadasea"), 52 | }, 53 | other: &pubsub_pb.Message{ 54 | Topic: topic("fish"), 55 | From: []byte("fisherman"), 56 | Data: []byte("undadasea"), 57 | }, 58 | expectEqualID: false, 59 | }, 60 | } { 61 | t.Run(test.name, func(t *testing.T) { 62 | this, that := psutil.ManifestMessageIdFn(test.one), psutil.ManifestMessageIdFn(test.other) 63 | require.Equal(t, test.expectEqualID, this == that) 64 | }) 65 | } 66 | } 67 | 68 | func TestGPBFTMessageIdFn(t *testing.T) { 69 | for _, test := range []struct { 70 | name string 71 | one *pubsub_pb.Message 72 | other *pubsub_pb.Message 73 | expectEqualID bool 74 | }{ 75 | { 76 | name: "same topic different data", 77 | one: &pubsub_pb.Message{ 78 | Topic: topic("fish"), 79 | Data: []byte("undadasea"), 80 | }, 81 | other: &pubsub_pb.Message{ 82 | Topic: topic("fish"), 83 | Data: []byte("lobstermuncher"), 84 | }, 85 | expectEqualID: false, 86 | }, 87 | { 88 | name: "same data different topic", 89 | one: &pubsub_pb.Message{ 90 | Topic: topic("fish"), 91 | Data: []byte("undadasea"), 92 | }, 93 | other: &pubsub_pb.Message{ 94 | Topic: topic("lobster"), 95 | Data: []byte("undadasea"), 96 | }, 97 | expectEqualID: false, 98 | }, 99 | { 100 | name: "same data and topic different sender", 101 | one: &pubsub_pb.Message{ 102 | Topic: topic("fish"), 103 | From: []byte("barreleye"), 104 | Data: []byte("undadasea"), 105 | }, 106 | other: &pubsub_pb.Message{ 107 | Topic: topic("fish"), 108 | From: []byte("fisherman"), 109 | Data: []byte("undadasea"), 110 | }, 111 | expectEqualID: true, 112 | }, 113 | } { 114 | t.Run(test.name, func(t *testing.T) { 115 | this, that := psutil.GPBFTMessageIdFn(test.one), psutil.GPBFTMessageIdFn(test.other) 116 | require.Equal(t, test.expectEqualID, this == that) 117 | }) 118 | } 119 | } 120 | 121 | func topic(s string) *string { return &s } 122 | -------------------------------------------------------------------------------- /certexchange/polling/predictor.go: -------------------------------------------------------------------------------- 1 | package polling 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | const maxBackoffMultiplier = 10 8 | 9 | func newPredictor(minInterval, defaultInterval, maxInterval time.Duration) *predictor { 10 | return &predictor{ 11 | minInterval: minInterval, 12 | maxInterval: maxInterval, 13 | interval: defaultInterval, 14 | exploreDistance: defaultInterval / 2, 15 | } 16 | } 17 | 18 | // An interval predictor that tries to predict the time between instances. It can't predict the time 19 | // an instance will be available, but it'll keep adjusting the interval until we receive one 20 | // instance per interval. 21 | type predictor struct { 22 | minInterval, maxInterval time.Duration 23 | 24 | interval time.Duration 25 | wasIncreasing bool 26 | exploreDistance time.Duration 27 | 28 | backoff time.Duration 29 | } 30 | 31 | // Update the predictor. The one argument indicates how many certificates we received since the last 32 | // update. 33 | // 34 | // - 2+ -> interval is too long. 35 | // - 1 -> interval is perfect. 36 | // - 0 -> interval is too short. 37 | // 38 | // We don't actually know the _offset_... but whatever. We can keep up +/- one instance and that's 39 | // fine (especially because of the power table lag, etc.). 40 | func (p *predictor) update(progress uint64) time.Duration { 41 | if p.backoff > 0 { 42 | if progress > 0 { 43 | p.backoff = 0 44 | } 45 | } else if progress != 1 { 46 | // If we've made too much progress (interval too long) or made no progress (interval 47 | // too short), explore to find the right interval. 48 | 49 | if p.wasIncreasing == (progress > 1) { 50 | // We switched directions which means we're circling the target, shrink the 51 | // explore distance. 52 | p.exploreDistance /= 3 53 | } else if progress <= 2 { 54 | // We repeatedly made no progress, or slightly too much progress. Double the 55 | // explore distance. 56 | p.exploreDistance *= 2 57 | } else { 58 | // We far away from our target and aren't polling often enough. Reset to a 59 | // sane estimate. 60 | p.interval /= time.Duration(progress) 61 | p.exploreDistance = p.interval / 2 62 | } 63 | 64 | // Make sure the explore distance doesn't get too short/long. 65 | if p.exploreDistance < p.minInterval/100 { 66 | p.exploreDistance = p.minInterval / 100 67 | } else if p.exploreDistance > p.maxInterval/2 { 68 | p.exploreDistance = p.maxInterval / 2 69 | } 70 | 71 | // Then update the interval. 72 | if progress == 0 { 73 | // If we fail to make progress, enter "backoff" mode. We'll leave backoff 74 | // mode next time we receive a certificate. Otherwise, we'd end up quickly 75 | // skewing our belief of the correct interval e.g., due to a skipped 76 | // instance. 77 | p.backoff = p.interval 78 | p.interval += p.exploreDistance 79 | p.wasIncreasing = true 80 | } else { 81 | p.interval -= p.exploreDistance 82 | p.wasIncreasing = false 83 | } 84 | 85 | // Clamp between min/max 86 | if p.interval < p.minInterval { 87 | p.interval = p.minInterval 88 | } else if p.interval > p.maxInterval { 89 | p.interval = p.maxInterval 90 | } 91 | } 92 | 93 | // Apply either the backoff or predicted the interval. 94 | nextInterval := p.interval 95 | if p.backoff > 0 { 96 | nextInterval = p.backoff 97 | p.backoff = min(2*p.backoff, maxBackoffMultiplier*p.maxInterval) 98 | } 99 | return nextInterval 100 | 101 | } 102 | -------------------------------------------------------------------------------- /sim/adversary/deny.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/filecoin-project/go-f3/gpbft" 7 | ) 8 | 9 | var ( 10 | _ Receiver = (*Deny)(nil) 11 | DenyAllMessages DenyMessageMatcher = func(*gpbft.GMessage) bool { return true } 12 | ) 13 | 14 | // Deny adversary denies all messages to/from a given set of participants for a 15 | // configured duration of time. 16 | // 17 | // For this adversary to take effect, global stabilization time must be configured 18 | // to be at least as long as the configured deny duration. 19 | // 20 | // See sim.WithGlobalStabilizationTime. 21 | type Deny struct { 22 | host Host 23 | targetsByID map[gpbft.ActorID]struct{} 24 | gst time.Time 25 | msgMatched DenyMessageMatcher 26 | mode DenyTargetMode 27 | 28 | Absent 29 | } 30 | 31 | // DenyMessageMatcher checks whether a message should be denied by the Deny adversary or not. 32 | // 33 | // See: DenyAllMessages, DenyPhase. 34 | type DenyMessageMatcher func(*gpbft.GMessage) bool 35 | 36 | type DenyTargetMode int 37 | 38 | func (m DenyTargetMode) String() string { 39 | switch m { 40 | case DenyToOrFrom: 41 | return "deny to or from" 42 | case DenyTo: 43 | return "deny to" 44 | case DenyFrom: 45 | return "deny from" 46 | default: 47 | panic("unknown case") 48 | } 49 | } 50 | 51 | const ( 52 | // DenyToOrFrom denies message to or from target actor IDs. 53 | DenyToOrFrom DenyTargetMode = iota 54 | // DenyTo only denies messages destined to target actor IDs. 55 | DenyTo 56 | // DenyFrom only denies messages sent from target actor IDs. 57 | DenyFrom 58 | ) 59 | 60 | // DenyPhase denies all messages at the given phase. 61 | func DenyPhase(phase gpbft.Phase) DenyMessageMatcher { 62 | return func(message *gpbft.GMessage) bool { 63 | return message.Vote.Phase == phase 64 | } 65 | } 66 | 67 | func NewDeny(host Host, denialDuration time.Duration, msgMatcher DenyMessageMatcher, mode DenyTargetMode, targets ...gpbft.ActorID) *Deny { 68 | targetsByID := make(map[gpbft.ActorID]struct{}) 69 | for _, target := range targets { 70 | targetsByID[target] = struct{}{} 71 | } 72 | return &Deny{ 73 | host: host, 74 | targetsByID: targetsByID, 75 | gst: time.Time{}.Add(denialDuration), 76 | msgMatched: msgMatcher, 77 | mode: mode, 78 | } 79 | } 80 | 81 | func NewDenyGenerator(power gpbft.StoragePower, denialDuration time.Duration, msgMatcher DenyMessageMatcher, mode DenyTargetMode, targets ...gpbft.ActorID) Generator { 82 | return func(id gpbft.ActorID, host Host) *Adversary { 83 | return &Adversary{ 84 | Receiver: NewDeny(host, denialDuration, msgMatcher, mode, targets...), 85 | Power: power, 86 | ID: id, 87 | } 88 | } 89 | } 90 | 91 | func (d *Deny) AllowMessage(from gpbft.ActorID, to gpbft.ActorID, msg gpbft.GMessage) bool { 92 | // Deny all messages to or from targets until Global Stabilisation Time has 93 | // elapsed, except messages to self. 94 | switch { 95 | case from == to, d.host.Time().After(d.gst): 96 | return true 97 | case d.mode == DenyTo: 98 | return !d.isTargeted(to, &msg) 99 | case d.mode == DenyFrom: 100 | return !d.isTargeted(from, &msg) 101 | case d.mode == DenyToOrFrom: 102 | return !(d.isTargeted(from, &msg) || d.isTargeted(to, &msg)) 103 | default: 104 | panic("unexpected denial case") 105 | } 106 | } 107 | 108 | func (d *Deny) isTargeted(id gpbft.ActorID, msg *gpbft.GMessage) bool { 109 | switch _, found := d.targetsByID[id]; { 110 | case found: 111 | return d.msgMatched(msg) 112 | default: 113 | return false 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /observer/query.go: -------------------------------------------------------------------------------- 1 | package observer 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "mime" 8 | "net/http" 9 | "strings" 10 | ) 11 | 12 | type QueryRequest struct { 13 | Query string `json:"Query"` 14 | } 15 | 16 | func (o *Observer) queryHandler(w http.ResponseWriter, r *http.Request) { 17 | const ( 18 | contentTypeJson = "application/json" 19 | contentTypeText = "text/plain" 20 | ) 21 | switch r.Method { 22 | case http.MethodOptions: 23 | allowedMethods := []string{http.MethodOptions, http.MethodPost} 24 | acceptedContentTypes := []string{contentTypeJson, contentTypeText} 25 | r.Header.Set("Accept", strings.Join(acceptedContentTypes, ",")) 26 | r.Header.Set("Allow", strings.Join(allowedMethods, ",")) 27 | return 28 | case http.MethodPost: 29 | // Proceed to parse the request. 30 | default: 31 | http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) 32 | return 33 | } 34 | 35 | // Infer database query depending on request content type, accepting: 36 | // * application/json: that corresponds to QueryRequest. 37 | // * text/plain: where the request body is the database query. 38 | // * unspecified: which is interpreted as text/plain. 39 | var requestMediaType, dbQuery string 40 | if requestContentType := r.Header.Get("Content-Type"); requestContentType != "" { 41 | var err error 42 | requestMediaType, _, err = mime.ParseMediaType(requestContentType) 43 | if err != nil { 44 | http.Error(w, fmt.Sprintf("invalid content type: %s", err), http.StatusBadRequest) 45 | return 46 | } 47 | } 48 | switch requestMediaType { 49 | case contentTypeJson: 50 | var req QueryRequest 51 | if err := json.NewDecoder(r.Body).Decode(&req); err != nil { 52 | http.Error(w, fmt.Sprintf("Invalid request: %s", err), http.StatusBadRequest) 53 | return 54 | } 55 | dbQuery = req.Query 56 | case contentTypeText, "": // Interpret unspecified content type as text/plain 57 | body, err := io.ReadAll(r.Body) 58 | if err != nil { 59 | http.Error(w, fmt.Sprintf("Cannot read request: %s", err), http.StatusBadRequest) 60 | return 61 | } 62 | dbQuery = string(body) 63 | default: 64 | http.Error(w, fmt.Sprintf("%s: %s", http.StatusText(http.StatusUnsupportedMediaType), requestMediaType), http.StatusUnsupportedMediaType) 65 | return 66 | } 67 | 68 | rows, err := o.db.QueryContext(r.Context(), dbQuery) 69 | if err != nil { 70 | logger.Errorw("Failed to execute query", "err", err) 71 | http.Error(w, fmt.Sprintf("Failed to execute query: %s", err), http.StatusInternalServerError) 72 | return 73 | } 74 | defer func() { _ = rows.Close() }() 75 | 76 | columns, err := rows.Columns() 77 | if err != nil { 78 | logger.Errorw("Failed to get columns", "err", err) 79 | http.Error(w, fmt.Sprintf("Failed to get columns: %s", err), http.StatusInternalServerError) 80 | return 81 | } 82 | 83 | var results []map[string]any 84 | for rows.Next() { 85 | row := make([]any, len(columns)) 86 | rowPointers := make([]any, len(columns)) 87 | for i := range row { 88 | rowPointers[i] = &row[i] 89 | } 90 | if err := rows.Scan(rowPointers...); err != nil { 91 | http.Error(w, fmt.Sprintf("Failed to scan row: %s", err), http.StatusInternalServerError) 92 | return 93 | } 94 | rowByColumn := make(map[string]any) 95 | for i, name := range columns { 96 | rowByColumn[name] = row[i] 97 | } 98 | results = append(results, rowByColumn) 99 | } 100 | 101 | w.Header().Set("Content-Type", contentTypeJson) 102 | if err := json.NewEncoder(w).Encode(results); err != nil { 103 | logger.Errorw("Failed to write response", "err", err) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /test/repeat_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | 7 | "github.com/filecoin-project/go-f3/gpbft" 8 | "github.com/filecoin-project/go-f3/sim" 9 | "github.com/filecoin-project/go-f3/sim/adversary" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | var ( 14 | repeatOnce = func(int) adversary.RepetitionSampler { 15 | return func(*gpbft.GMessage) int { 16 | return 1 17 | } 18 | } 19 | repeatBoundedRandom = func(seed int) adversary.RepetitionSampler { 20 | return newBoundedRepeater(int64(seed), 10, 50) 21 | } 22 | repeatZipF = func(seed int) adversary.RepetitionSampler { 23 | rng := rand.New(rand.NewSource(int64(seed))) 24 | zipf := rand.NewZipf(rng, 1.2, 1.0, 100) 25 | return func(*gpbft.GMessage) int { 26 | return int(zipf.Uint64()) 27 | } 28 | } 29 | repeatBoundedQuality = func(seed int) adversary.RepetitionSampler { 30 | boundedRepeater := newBoundedRepeater(int64(seed), 10, 50) 31 | return func(msg *gpbft.GMessage) int { 32 | if msg.Vote.Phase != gpbft.QUALITY_PHASE { 33 | return 0 34 | } 35 | return boundedRepeater(msg) 36 | } 37 | } 38 | repeatBoundedCommit = func(seed int) adversary.RepetitionSampler { 39 | boundedRepeater := newBoundedRepeater(int64(seed), 10, 50) 40 | return func(msg *gpbft.GMessage) int { 41 | if msg.Vote.Phase != gpbft.COMMIT_PHASE { 42 | return 0 43 | } 44 | return boundedRepeater(msg) 45 | } 46 | } 47 | repeatAdversaryTestHonestCounts = []int{ 48 | 2, // 1/3 adversary power 49 | 3, // 1/4 adversary power 50 | 4, // 1/5 adversary power 51 | } 52 | ) 53 | 54 | func FuzzRepeatAdversary(f *testing.F) { 55 | tests := []struct { 56 | name string 57 | repetitionSampler func(int) adversary.RepetitionSampler 58 | maxRounds uint64 59 | }{ 60 | { 61 | name: "once", 62 | repetitionSampler: repeatOnce, 63 | maxRounds: maxRounds, 64 | }, 65 | { 66 | name: "bounded uniform random", 67 | repetitionSampler: repeatBoundedRandom, 68 | maxRounds: maxRounds, 69 | }, 70 | { 71 | name: "zipf", 72 | repetitionSampler: repeatZipF, 73 | maxRounds: maxRounds, 74 | }, 75 | { 76 | name: "QUALITY Repeater", 77 | repetitionSampler: repeatBoundedQuality, 78 | maxRounds: maxRounds * 2, 79 | }, 80 | { 81 | name: "COMMIT Repeater", 82 | repetitionSampler: repeatBoundedCommit, 83 | maxRounds: maxRounds * 3, 84 | }, 85 | } 86 | f.Add(68465) 87 | f.Add(-5) 88 | f.Add(-5454) 89 | f.Add(-5467) 90 | f.Fuzz(func(t *testing.T, seed int) { 91 | t.Parallel() 92 | for _, hc := range repeatAdversaryTestHonestCounts { 93 | for _, test := range tests { 94 | t.Run(test.name, func(t *testing.T) { 95 | t.Parallel() 96 | rng := rand.New(rand.NewSource(int64(seed))) 97 | dist := test.repetitionSampler(seed) 98 | sm, err := sim.NewSimulation(asyncOptions(rng.Int(), 99 | sim.AddHonestParticipants( 100 | hc, 101 | sim.NewUniformECChainGenerator(rng.Uint64(), 1, 4), 102 | uniformOneStoragePower), 103 | sim.WithAdversary(adversary.NewRepeatGenerator(oneStoragePower, dist)), 104 | )...) 105 | require.NoError(t, err) 106 | require.NoErrorf(t, sm.Run(1, test.maxRounds), "%s", sm.Describe()) 107 | }) 108 | } 109 | } 110 | }) 111 | } 112 | 113 | func newBoundedRepeater(rngSeed int64, min, max int) adversary.RepetitionSampler { 114 | rng := rand.New(rand.NewSource(rngSeed)) 115 | return func(*gpbft.GMessage) int { 116 | return int(rng.Uint64())%(max-min+1) + min 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /cmd/f3/certs.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "strconv" 8 | "time" 9 | 10 | "github.com/filecoin-project/go-f3/certexchange" 11 | "github.com/filecoin-project/go-f3/certs" 12 | "github.com/filecoin-project/go-f3/gpbft" 13 | "github.com/ipfs/go-cid" 14 | "github.com/libp2p/go-libp2p" 15 | "github.com/libp2p/go-libp2p/core/peer" 16 | "github.com/urfave/cli/v2" 17 | ) 18 | 19 | var ( 20 | certFrom *peer.AddrInfo 21 | 22 | certsCmd = cli.Command{ 23 | Name: "certs", 24 | Subcommands: []*cli.Command{ 25 | { 26 | Name: "list-from", 27 | Flags: []cli.Flag{ 28 | limitFlag, 29 | fromAddrFlag, 30 | networkNameFlag, 31 | includePowerTableFlag, 32 | }, 33 | Action: func(cctx *cli.Context) error { 34 | instanceArg := cctx.Args().First() 35 | if instanceArg == "" { 36 | return errors.New("missing instance as first argument") 37 | } 38 | instance, err := strconv.ParseUint(instanceArg, 10, 64) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | if certFrom == nil { 44 | return errors.New("--from addrinfo is required") 45 | } 46 | 47 | host, err := libp2p.New() 48 | if err != nil { 49 | return err 50 | } 51 | defer func() { _ = host.Close() }() 52 | 53 | if err := host.Connect(cctx.Context, *certFrom); err != nil { 54 | return err 55 | } 56 | client := certexchange.Client{ 57 | Host: host, 58 | NetworkName: gpbft.NetworkName(cctx.String(networkNameFlag.Name)), 59 | RequestTimeout: cctx.Duration(timeoutFlag.Name), 60 | } 61 | 62 | rh, ch, err := client.Request(cctx.Context, certFrom.ID, &certexchange.Request{ 63 | FirstInstance: instance, 64 | Limit: cctx.Uint64(limitFlag.Name), 65 | IncludePowerTable: true, 66 | }) 67 | if err != nil { 68 | return err 69 | } 70 | var result = struct { 71 | *certexchange.ResponseHeader 72 | PowerTableCID cid.Cid 73 | Certificates []*certs.FinalityCertificate 74 | }{ 75 | ResponseHeader: rh, 76 | } 77 | 78 | result.PowerTableCID, err = certs.MakePowerTableCID(rh.PowerTable) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | for certificate := range ch { 84 | result.Certificates = append(result.Certificates, certificate) 85 | } 86 | output, err := json.MarshalIndent(result, "", " ") 87 | if err != nil { 88 | return err 89 | } 90 | _, _ = fmt.Fprintln(cctx.App.Writer, string(output)) 91 | return nil 92 | }, 93 | }, 94 | }, 95 | } 96 | 97 | limitFlag = &cli.Uint64Flag{ 98 | Name: "limit", 99 | Usage: "Maximum number of certificates to list from the peer", 100 | Value: 100, 101 | } 102 | fromAddrFlag = &cli.StringFlag{ 103 | Name: "peer", 104 | Aliases: []string{"p"}, 105 | Usage: "The addrinfo of the peer to get the certificate from", 106 | Action: func(cctx *cli.Context, v string) (err error) { 107 | certFrom, err = peer.AddrInfoFromString(v) 108 | return 109 | }, 110 | Required: true, 111 | } 112 | networkNameFlag = &cli.StringFlag{ 113 | Name: "networkName", 114 | Aliases: []string{"nn"}, 115 | Usage: "The network name", 116 | Required: true, 117 | } 118 | 119 | includePowerTableFlag = &cli.BoolFlag{ 120 | Name: "includePowerTable", 121 | Aliases: []string{"pt"}, 122 | Usage: "Whether to include the power table in the results", 123 | } 124 | timeoutFlag = &cli.DurationFlag{ 125 | Name: "timeout", 126 | Usage: "Request timeout.", 127 | Value: 30 * time.Second, 128 | } 129 | ) 130 | -------------------------------------------------------------------------------- /certexchange/polling/subscriber_test.go: -------------------------------------------------------------------------------- 1 | package polling_test 2 | 3 | import ( 4 | "context" 5 | "math/rand" 6 | "slices" 7 | "testing" 8 | "time" 9 | 10 | "github.com/filecoin-project/go-f3/certexchange" 11 | "github.com/filecoin-project/go-f3/certexchange/polling" 12 | "github.com/filecoin-project/go-f3/certstore" 13 | "github.com/filecoin-project/go-f3/internal/clock" 14 | "github.com/filecoin-project/go-f3/sim/signing" 15 | 16 | "github.com/ipfs/go-datastore" 17 | ds_sync "github.com/ipfs/go-datastore/sync" 18 | mocknetwork "github.com/libp2p/go-libp2p/p2p/net/mock" 19 | "github.com/stretchr/testify/require" 20 | ) 21 | 22 | func TestSubscriber(t *testing.T) { 23 | backend := signing.NewFakeBackend() 24 | rng := rand.New(rand.NewSource(1234)) 25 | 26 | cg := polling.MakeCertificates(t, rng, backend) 27 | 28 | ctx, cancel := context.WithCancel(context.Background()) 29 | ctx, clk := clock.WithMockClock(ctx) 30 | defer cancel() 31 | 32 | mocknet := mocknetwork.New() 33 | 34 | clientHost, err := mocknet.GenPeer() 35 | require.NoError(t, err) 36 | 37 | servers := make([]*certexchange.Server, 100) 38 | for i := range servers { 39 | h, err := mocknet.GenPeer() 40 | require.NoError(t, err) 41 | 42 | ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) 43 | cs, err := certstore.CreateStore(ctx, ds, 0, cg.PowerTable) 44 | require.NoError(t, err) 45 | 46 | servers[i] = &certexchange.Server{ 47 | NetworkName: polling.TestNetworkName, 48 | Host: h, 49 | Store: cs, 50 | } 51 | } 52 | 53 | require.NoError(t, mocknet.LinkAll()) 54 | 55 | for _, server := range servers { 56 | require.NoError(t, server.Start(ctx)) 57 | t.Cleanup(func() { require.NoError(t, server.Stop(context.Background())) }) 58 | } 59 | 60 | clientDs := ds_sync.MutexWrap(datastore.NewMapDatastore()) 61 | clientCs, err := certstore.CreateStore(ctx, clientDs, 0, cg.PowerTable) 62 | require.NoError(t, err) 63 | 64 | client := certexchange.Client{ 65 | Host: clientHost, 66 | NetworkName: polling.TestNetworkName, 67 | } 68 | 69 | subscriber := polling.Subscriber{ 70 | Client: client, 71 | Store: clientCs, 72 | SignatureVerifier: backend, 73 | MinimumPollInterval: time.Millisecond, 74 | MaximumPollInterval: time.Second, 75 | InitialPollInterval: 100 * time.Millisecond, 76 | } 77 | 78 | require.NoError(t, subscriber.Start(ctx)) 79 | 80 | t.Cleanup(func() { require.NoError(t, subscriber.Stop(context.Background())) }) 81 | 82 | require.NoError(t, mocknet.ConnectAllButSelf()) 83 | 84 | liveServers := slices.Clone(servers) 85 | lastPoll := time.Now() 86 | i := 0 87 | for len(liveServers) > 0 { 88 | now := time.Now() 89 | timeDelta := now.Sub(lastPoll) 90 | certCount := timeDelta/subscriber.InitialPollInterval + 1 91 | waitTime := certCount * subscriber.InitialPollInterval 92 | for target := i + int(certCount); i < target; i++ { 93 | cert := cg.MakeCertificate() 94 | for _, s := range liveServers { 95 | require.NoError(t, s.Store.Put(ctx, cert)) 96 | } 97 | } 98 | 99 | clk.Add(waitTime) 100 | 101 | require.Eventually(t, func() bool { 102 | latest := clientCs.Latest() 103 | if latest != nil && latest.GPBFTInstance == uint64(i-1) { 104 | return true 105 | } 106 | clk.WaitForAllTimers() 107 | return false 108 | }, 10*time.Second, time.Millisecond) 109 | 110 | // After we settle for a bit, every 4 instances, stop updating 20% of the 111 | // network. 112 | if i > 10 && i%4 == 0 { 113 | rand.Shuffle(len(liveServers), func(a, b int) { 114 | liveServers[a], liveServers[b] = liveServers[b], liveServers[a] 115 | }) 116 | liveServers = liveServers[:8*len(liveServers)/10] 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /blssig/aggregation.go: -------------------------------------------------------------------------------- 1 | package blssig 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "fmt" 7 | "runtime/debug" 8 | 9 | "go.dedis.ch/kyber/v4" 10 | "go.dedis.ch/kyber/v4/sign/bdn" 11 | "go.opentelemetry.io/otel/metric" 12 | 13 | "github.com/filecoin-project/go-f3/gpbft" 14 | "github.com/filecoin-project/go-f3/internal/measurements" 15 | ) 16 | 17 | // Max size of the point cache. 18 | const maxPointCacheSize = 10_000 19 | 20 | type aggregation struct { 21 | mask *bdn.Mask 22 | scheme *bdn.Scheme 23 | } 24 | 25 | func (a *aggregation) Aggregate(mask []int, signatures [][]byte) (_agg []byte, _err error) { 26 | defer func() { 27 | status := measurements.AttrStatusSuccess 28 | if _err != nil { 29 | status = measurements.AttrStatusError 30 | } 31 | 32 | if perr := recover(); perr != nil { 33 | _err = fmt.Errorf("panicked aggregating signatures: %v\n%s", 34 | perr, string(debug.Stack())) 35 | log.Error(_err) 36 | status = measurements.AttrStatusPanic 37 | } 38 | 39 | metrics.aggregate.Record( 40 | context.TODO(), int64(len(mask)), 41 | metric.WithAttributes(status), 42 | ) 43 | }() 44 | 45 | if len(mask) != len(signatures) { 46 | return nil, fmt.Errorf("lengths of pubkeys and sigs does not match %d != %d", 47 | len(mask), len(signatures)) 48 | } 49 | 50 | bdnMask := a.mask.Clone() 51 | for _, bit := range mask { 52 | if err := bdnMask.SetBit(bit, true); err != nil { 53 | return nil, err 54 | } 55 | } 56 | 57 | aggSigPoint, err := a.scheme.AggregateSignatures(signatures, bdnMask) 58 | if err != nil { 59 | return nil, fmt.Errorf("computing aggregate signature: %w", err) 60 | } 61 | aggSig, err := aggSigPoint.MarshalBinary() 62 | if err != nil { 63 | return nil, fmt.Errorf("marshaling signature data: %w", err) 64 | } 65 | 66 | return aggSig, nil 67 | } 68 | 69 | func (a *aggregation) VerifyAggregate(mask []int, msg []byte, signature []byte) (_err error) { 70 | defer func() { 71 | status := measurements.AttrStatusSuccess 72 | if _err != nil { 73 | status = measurements.AttrStatusError 74 | } 75 | 76 | if perr := recover(); perr != nil { 77 | msgStr := base64.StdEncoding.EncodeToString(msg) 78 | _err = fmt.Errorf("panicked verifying aggregate signature of %q: %v\n%s", 79 | msgStr, perr, string(debug.Stack())) 80 | log.Error(_err) 81 | status = measurements.AttrStatusPanic 82 | } 83 | 84 | metrics.verifyAggregate.Record( 85 | context.Background(), int64(len(mask)), 86 | metric.WithAttributes(status), 87 | ) 88 | }() 89 | 90 | bdnMask := a.mask.Clone() 91 | for _, bit := range mask { 92 | if err := bdnMask.SetBit(bit, true); err != nil { 93 | return err 94 | } 95 | } 96 | 97 | aggPubKey, err := a.scheme.AggregatePublicKeys(bdnMask) 98 | if err != nil { 99 | return fmt.Errorf("aggregating public keys: %w", err) 100 | } 101 | 102 | return a.scheme.Verify(aggPubKey, msg, signature) 103 | } 104 | 105 | func (v *Verifier) Aggregate(pubkeys []gpbft.PubKey) (_agg gpbft.Aggregate, _err error) { 106 | defer func() { 107 | if perr := recover(); perr != nil { 108 | _err = fmt.Errorf("panicked aggregating public keys: %v\n%s", 109 | perr, string(debug.Stack())) 110 | log.Error(_err) 111 | } 112 | }() 113 | 114 | kPubkeys := make([]kyber.Point, 0, len(pubkeys)) 115 | for i, p := range pubkeys { 116 | point, err := v.pubkeyToPoint(p) 117 | if err != nil { 118 | return nil, fmt.Errorf("pubkey %d: %w", i, err) 119 | } 120 | kPubkeys = append(kPubkeys, point.Clone()) 121 | } 122 | 123 | mask, err := bdn.NewMask(v.keyGroup, kPubkeys, nil) 124 | if err != nil { 125 | return nil, fmt.Errorf("creating key mask: %w", err) 126 | } 127 | return &aggregation{ 128 | mask: mask, 129 | scheme: v.scheme, 130 | }, nil 131 | } 132 | --------------------------------------------------------------------------------