├── .github └── workflows │ └── docker.yml ├── .gitignore ├── .sfreleaser ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── UPDATE.md ├── battlefield ├── battlefield.go ├── compare_blocks.go └── config.go ├── blockpoller ├── cursor.go ├── fetcher.go ├── handler.go ├── handler_test.go ├── init_test.go ├── options.go ├── poller.go ├── poller_client_test.go ├── poller_test.go ├── state_file.go └── state_file_test.go ├── chain.go ├── cmd ├── apps │ ├── auth.go │ ├── firehose.go │ ├── index_builder.go │ ├── merger.go │ ├── metering.go │ ├── reader_node.go │ ├── reader_node_firehose.go │ ├── reader_node_stdin.go │ ├── reader_node_test.go │ ├── relayer.go │ ├── start.go │ ├── substreams_common.go │ ├── substreams_tier1.go │ └── substreams_tier2.go ├── firecore │ └── main.go ├── main.go ├── setup.go └── tools │ ├── check │ ├── blocks.go │ ├── check.go │ ├── merged_batch.go │ └── mergedbatch.go │ ├── compare │ └── tools_compare_blocks.go │ ├── firehose │ ├── client.go │ ├── firehose.go │ ├── prometheus_exporter.go │ ├── single_block_client.go │ └── tools_download_from_firehose.go │ ├── fix │ ├── legacy_2_block_any.go │ └── tools_fix_bloated_merged_blocks.go │ ├── mergeblock │ ├── tools_merge_blocks.go │ ├── tools_unmerge_blocks.go │ └── tools_upgrade_merged_blocks.go │ ├── print │ ├── printer.go │ ├── printer_bytes.go │ ├── printer_enum.go │ ├── printer_json.go │ ├── printer_protojson.go │ ├── printer_text.go │ ├── tools_print.go │ └── tools_print_test.go │ ├── relayer │ └── tools_relayer.go │ └── tools.go ├── config └── examples │ └── merge-accumulated-one-blocks.yaml ├── consolereader.go ├── consolereader_test.go ├── constants.go ├── devel ├── firecore ├── reader-firehose │ ├── reader-firehose.yaml │ └── start.sh ├── reader-stdin │ ├── reader-stdin.yaml │ ├── sample.eth.firelog │ └── start.sh ├── standard │ ├── standard.yaml │ └── start.sh └── substreams │ ├── start.sh │ └── substreams.yaml ├── docker ├── 99-firehose-core.sh └── motd ├── firehose ├── app │ └── firehose │ │ └── app.go ├── block_getter.go ├── client │ └── client.go ├── info │ ├── endpoint_info.go │ └── info_filler.go ├── init_test.go ├── metrics │ └── metrics.go ├── rate │ └── limiter.go └── server │ ├── blocks.go │ ├── errors.go │ └── server.go ├── flags.go ├── go.mod ├── go.sum ├── index-builder ├── app │ └── index-builder │ │ ├── app.go │ │ └── logging.go ├── healthz.go ├── index-builder.go └── metrics │ └── metrics.go ├── internal └── utils │ ├── utils.go │ └── utils_test.go ├── json └── marshallers.go ├── launcher ├── app.go ├── app_enum.go ├── config.go ├── init_test.go ├── launcher.go ├── logging.go ├── readiness.go ├── registry.go ├── registry_test.go ├── runtime.go ├── setup.go └── tracing.go ├── merged_blocks_writer.go ├── merger ├── app │ └── merger │ │ ├── app.go │ │ └── logging.go ├── bundler.go ├── bundler_test.go ├── bundlereader.go ├── bundlereader_test.go ├── consts.go ├── healthz.go ├── healthz_test.go ├── init_test.go ├── merger.go ├── merger_io.go ├── merger_io_test.go ├── metrics │ └── metrics.go ├── server.go ├── test_data │ ├── 0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin │ ├── 0000000002-20150730T152657.0-044698c9-13406cb6.dbin │ └── 0000000003-20150730T152728.0-a88cf741-044698c9.dbin └── utils.go ├── metering └── metering.go ├── metrics.go ├── node-manager ├── app │ ├── firehose_reader │ │ ├── app.go │ │ ├── metrics.go │ │ └── syncer.go │ ├── node_manager │ │ └── app.go │ └── node_reader_stdin │ │ └── app.go ├── log_plugin │ ├── keep_last_lines_log_plugin.go │ ├── keep_last_lines_log_plugin_test.go │ ├── line_ring_buffer.go │ ├── log_plugin.go │ ├── to_console_log_plugin.go │ ├── to_zap_log_plugin.go │ └── to_zap_log_plugin_test.go ├── metrics │ └── common.go ├── mindreader │ ├── archiver.go │ ├── file_uploader.go │ ├── file_uploader_test.go │ ├── init_test.go │ ├── logging.go │ ├── mindreader.go │ └── mindreader_test.go ├── monitor.go ├── operator │ ├── backuper.go │ ├── backuper_test.go │ ├── bootstrap.go │ ├── errors.go │ ├── http_server.go │ └── operator.go ├── superviser.go ├── superviser │ ├── superviser.go │ └── superviser_test.go ├── types.go └── utils.go ├── proto ├── README.md ├── generator │ ├── generator.go │ └── template.gotmpl ├── registry.go ├── registry_test.go ├── testdata │ ├── acme │ │ └── acme.proto │ ├── override │ │ └── sf │ │ │ └── ethereum │ │ │ └── type │ │ │ └── v2 │ │ │ └── type.proto │ └── override_acme │ │ └── acme.proto ├── utils.go ├── well_known.go └── well_known_types.go ├── reader_node.go ├── reader_node_bootstrap.go ├── reader_node_bootstrapper_bash.go ├── reader_node_bootstrapper_tarball.go ├── relayer ├── app │ └── relayer │ │ ├── app.go │ │ └── logging.go ├── healthz.go ├── logging.go ├── metrics │ └── metrics.go └── relayer.go ├── rpc ├── client_test.go ├── clients.go ├── rolling_strategy.go ├── rolling_strategy_test.go ├── sort.go └── sort_test.go ├── storage.go ├── storage_test.go ├── stream_factory.go ├── superviser ├── genericsupervisor.go └── logging.go ├── test ├── integration_test.go ├── metering_server.go ├── substreams_acme │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ ├── README.md │ ├── buf.gen.yaml │ ├── generator.json │ ├── proto │ │ ├── sf │ │ │ └── acme │ │ │ │ └── type │ │ │ │ └── v1 │ │ │ │ └── type.proto │ │ └── testdata.proto │ ├── src │ │ ├── lib.rs │ │ └── pb │ │ │ ├── mod.rs │ │ │ ├── sf.acme.type.v1.rs │ │ │ └── testdata.v1.rs │ └── substreams.yaml ├── type_test.pb.go └── utils.go ├── types.go ├── types ├── block_range.go ├── block_range_enum.go ├── flags.go ├── types.go ├── types_test.go ├── utils.go └── utils_test.go ├── unsafe_extensions.go └── utils.go /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .vscode 3 | /build 4 | /dist 5 | .envrc 6 | .env 7 | .DS_Store 8 | firehose-data* 9 | /.fleet/settings.json 10 | /firecore 11 | /firehose.yaml 12 | -------------------------------------------------------------------------------- /.sfreleaser: -------------------------------------------------------------------------------- 1 | global: 2 | binary: firecore 3 | language: golang 4 | variant: application 5 | release: 6 | goreleaser-docker-image: goreleaser/goreleaser-cross:v1.23 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24-bookworm AS build 2 | WORKDIR /app 3 | 4 | COPY go.mod go.sum ./ 5 | RUN go mod download 6 | 7 | COPY . ./ 8 | 9 | ARG VERSION="dev" 10 | RUN apt-get update && apt-get install git 11 | RUN go build -v -ldflags "-X main.version=${VERSION}" ./cmd/firecore 12 | 13 | FROM ubuntu:24.10 14 | 15 | ARG TARGETPLATFORM 16 | 17 | RUN apt-get update && apt-get -y install ca-certificates htop iotop sysstat strace lsof curl jq tzdata file 18 | 19 | RUN mkdir -p /app/ && \ 20 | export repository="https://github.com/grpc-ecosystem/grpc-health-probe/releases/download" && \ 21 | export version="v0.4.12" && \ 22 | curl --fail-with-body -Lo /app/grpc_health_probe "$repository/$version/grpc_health_probe-$(echo $TARGETPLATFORM | sed 's|/|-|')" && \ 23 | chmod +x /app/grpc_health_probe 24 | 25 | WORKDIR /app 26 | 27 | COPY --from=build /app/firecore /app/firecore 28 | 29 | ENV PATH="$PATH:/app" 30 | 31 | COPY docker/motd /etc/motd 32 | COPY docker/99-firehose-core.sh /etc/profile.d/ 33 | RUN echo ". /etc/profile.d/99-firehose-core.sh" > /root/.bash_aliases 34 | 35 | ENTRYPOINT [ "/app/firecore" ] 36 | -------------------------------------------------------------------------------- /battlefield/battlefield.go: -------------------------------------------------------------------------------- 1 | package battlefield 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/spf13/cobra" 7 | "github.com/streamingfast/cli" 8 | . "github.com/streamingfast/cli" 9 | ) 10 | 11 | func BattlefieldCmd(binaryName string, config *Config) cli.CommandOption { 12 | return Group( 13 | "battlefield", 14 | "Battlefield regression tests commands", 15 | Command( 16 | runE, 17 | "run", 18 | "Run battlefield regression test suite against oracle", 19 | RangeArgs(0, 1), 20 | ), 21 | ) 22 | } 23 | 24 | func runE(cmd *cobra.Command, args []string) error { 25 | variant := "" 26 | if len(args) > 0 { 27 | variant = args[0] 28 | } 29 | 30 | fmt.Println("Variant", variant) 31 | return nil 32 | } 33 | -------------------------------------------------------------------------------- /battlefield/compare_blocks.go: -------------------------------------------------------------------------------- 1 | package battlefield 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "os/exec" 8 | 9 | "github.com/streamingfast/cli" 10 | "github.com/stretchr/testify/assert" 11 | "go.uber.org/zap" 12 | ) 13 | 14 | func CompareBlockFiles(referenceBlockFile, otherBlockFile string, processFileContent func(cntA, cntB []byte) (interface{}, interface{}, error), logger *zap.Logger) (bool, error) { 15 | logger.Info("comparing block files", 16 | zap.String("reference_block_file", referenceBlockFile), 17 | zap.String("other_block_file", otherBlockFile), 18 | ) 19 | 20 | refCnt, err := os.ReadFile(referenceBlockFile) 21 | if err != nil { 22 | return false, fmt.Errorf("unable to read block file %q: %w", referenceBlockFile, err) 23 | } 24 | 25 | otherCnt, err := os.ReadFile(otherBlockFile) 26 | if err != nil { 27 | return false, fmt.Errorf("unable to read block file %q: %w", otherBlockFile, err) 28 | } 29 | 30 | var refBlocksJsonInterface, otherBlocksJsonInterface interface{} 31 | if processFileContent == nil { 32 | if err = json.Unmarshal(refCnt, &refBlocksJsonInterface); err != nil { 33 | return false, fmt.Errorf("unable to unmarshal block %q: %w", referenceBlockFile, err) 34 | } 35 | 36 | if err = json.Unmarshal(otherCnt, &otherBlocksJsonInterface); err != nil { 37 | return false, fmt.Errorf("unable to unmarshal block %q: %w", otherBlockFile, err) 38 | } 39 | } else { 40 | refBlocksJsonInterface, otherBlocksJsonInterface, err = processFileContent(refCnt, otherCnt) 41 | if err != nil { 42 | return false, fmt.Errorf("failed to process blocks content file: %w", err) 43 | } 44 | } 45 | 46 | if assert.ObjectsAreEqualValues(refBlocksJsonInterface, otherBlocksJsonInterface) { 47 | fmt.Println("Files are equal, all good") 48 | return true, nil 49 | } 50 | 51 | useBash := true 52 | command := fmt.Sprintf("diff -C 5 \"%s\" \"%s\" | less", referenceBlockFile, otherBlockFile) 53 | if os.Getenv("DIFF_EDITOR") != "" { 54 | command = fmt.Sprintf("%s \"%s\" \"%s\"", os.Getenv("DIFF_EDITOR"), referenceBlockFile, otherBlockFile) 55 | } 56 | 57 | showDiff, wasAnswered := cli.AskConfirmation(`File %q and %q differs, do you want to see the difference now`, referenceBlockFile, otherBlockFile) 58 | if wasAnswered && showDiff { 59 | diffCmd := exec.Command(command) 60 | if useBash { 61 | diffCmd = exec.Command("bash", "-c", command) 62 | } 63 | 64 | diffCmd.Stdout = os.Stdout 65 | diffCmd.Stderr = os.Stderr 66 | 67 | if err := diffCmd.Run(); err != nil { 68 | return false, fmt.Errorf("diff command failed to run properly") 69 | } 70 | 71 | fmt.Println("You can run the following command to see it manually later:") 72 | } else { 73 | fmt.Println("Not showing diff between files, run the following command to see it manually:") 74 | } 75 | 76 | fmt.Println() 77 | fmt.Printf(" %s\n", command) 78 | fmt.Println("") 79 | return false, nil 80 | } 81 | -------------------------------------------------------------------------------- /battlefield/config.go: -------------------------------------------------------------------------------- 1 | package battlefield 2 | 3 | type Config struct { 4 | } 5 | -------------------------------------------------------------------------------- /blockpoller/cursor.go: -------------------------------------------------------------------------------- 1 | package blockpoller 2 | 3 | import ( 4 | "github.com/streamingfast/bstream" 5 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 6 | "go.uber.org/zap" 7 | ) 8 | 9 | type State string 10 | 11 | const ( 12 | ContinuousSegState State = "CONTINUOUS" 13 | IncompleteSegState State = "INCOMPLETE" 14 | ) 15 | 16 | func (s State) String() string { 17 | return string(s) 18 | } 19 | 20 | type cursor struct { 21 | currentBlk bstream.BlockRef 22 | currentIncompleteSeg *bstream.BasicBlockRef 23 | state State 24 | logger *zap.Logger 25 | } 26 | 27 | func (s *cursor) addBlk(blk *pbbstream.Block, blockSeen bool, parentSeen bool) { 28 | blkRef := blk.AsRef() 29 | logger := s.logger.With( 30 | zap.Stringer("blk", blkRef), 31 | zap.Stringer("parent_blk", blk.PreviousRef()), 32 | zap.Bool("seen_blk", blockSeen), 33 | zap.Bool("seen_parent", parentSeen), 34 | zap.Stringer("previous_state", s.state), 35 | ) 36 | if s.currentIncompleteSeg != nil { 37 | logger = logger.With(zap.Stringer("current_incomplete_seg", *s.currentIncompleteSeg)) 38 | } else { 39 | logger = logger.With(zap.String("current_incomplete_seg", "none")) 40 | 41 | } 42 | 43 | if s.state == IncompleteSegState && blockSeen && parentSeen { 44 | // if we are checking an incomplete segement, and we get a block that is already in the forkdb 45 | // and whose parent is also in the forkdb, then we are back on a continuous segment 46 | s.state = ContinuousSegState 47 | } 48 | s.currentBlk = blkRef 49 | logger.Debug("received block", zap.Stringer("current_state", s.state)) 50 | } 51 | 52 | func (s *cursor) getBlkSegmentNum() bstream.BlockRef { 53 | if s.state == IncompleteSegState { 54 | if s.currentIncompleteSeg == nil { 55 | panic("current incomplete segment is nil, when cursor is incomplete segment, this should never happen") 56 | } 57 | return *s.currentIncompleteSeg 58 | } 59 | return s.currentBlk 60 | } 61 | 62 | func (s *cursor) blkIsConnectedToLib() { 63 | s.state = ContinuousSegState 64 | s.currentIncompleteSeg = nil 65 | } 66 | 67 | func (s *cursor) blkIsNotConnectedToLib() { 68 | if s.state != IncompleteSegState { 69 | s.state = IncompleteSegState 70 | // we don't want to point the current blk since that will change 71 | v := bstream.NewBlockRef(s.currentBlk.ID(), s.currentBlk.Num()) 72 | s.currentIncompleteSeg = &v 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /blockpoller/fetcher.go: -------------------------------------------------------------------------------- 1 | package blockpoller 2 | 3 | import ( 4 | "context" 5 | 6 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 7 | ) 8 | 9 | type BlockFetcher[C any] interface { 10 | IsBlockAvailable(requestedSlot uint64) bool 11 | Fetch(ctx context.Context, client C, blkNum uint64) (b *pbbstream.Block, skipped bool, err error) 12 | } 13 | 14 | type HeadBlockNumberFetcher[C any] interface { 15 | FetchHeadBlockNumber(ctx context.Context, client C) (uint64, error) 16 | } 17 | -------------------------------------------------------------------------------- /blockpoller/handler.go: -------------------------------------------------------------------------------- 1 | package blockpoller 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | "strings" 7 | "sync" 8 | 9 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 10 | ) 11 | 12 | type BlockHandler interface { 13 | Init() 14 | Handle(blk *pbbstream.Block) error 15 | } 16 | 17 | var _ BlockHandler = (*FireBlockHandler)(nil) 18 | 19 | type FireBlockHandler struct { 20 | blockTypeURL string 21 | init sync.Once 22 | } 23 | 24 | func NewFireBlockHandler(blockTypeURL string) *FireBlockHandler { 25 | return &FireBlockHandler{ 26 | blockTypeURL: clean(blockTypeURL), 27 | } 28 | } 29 | 30 | func (f *FireBlockHandler) Init() { 31 | fmt.Println("FIRE INIT 3.0", f.blockTypeURL) 32 | } 33 | 34 | func (f *FireBlockHandler) Handle(b *pbbstream.Block) error { 35 | typeURL := clean(b.Payload.TypeUrl) 36 | if typeURL != f.blockTypeURL { 37 | return fmt.Errorf("block type url %q does not match expected type %q", typeURL, f.blockTypeURL) 38 | } 39 | 40 | blockLine := fmt.Sprintf( 41 | "FIRE BLOCK %d %s %d %s %d %d %s", 42 | b.Number, 43 | b.Id, 44 | b.ParentNum, 45 | b.ParentId, 46 | b.LibNum, 47 | b.Timestamp.AsTime().UnixNano(), 48 | base64.StdEncoding.EncodeToString(b.Payload.Value), 49 | ) 50 | 51 | fmt.Println(blockLine) 52 | return nil 53 | } 54 | 55 | func clean(in string) string { 56 | return strings.Replace(in, "type.googleapis.com/", "", 1) 57 | } 58 | -------------------------------------------------------------------------------- /blockpoller/handler_test.go: -------------------------------------------------------------------------------- 1 | package blockpoller 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestFireBlockHandler_clean(t *testing.T) { 10 | tests := []struct { 11 | in string 12 | expect string 13 | }{ 14 | {"type.googleapis.com/sf.bstream.v2.Block", "sf.bstream.v2.Block"}, 15 | {"sf.bstream.v2.Block", "sf.bstream.v2.Block"}, 16 | } 17 | 18 | for _, test := range tests { 19 | t.Run(test.in, func(t *testing.T) { 20 | assert.Equal(t, test.expect, clean(test.in)) 21 | }) 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /blockpoller/init_test.go: -------------------------------------------------------------------------------- 1 | package blockpoller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 10 | "github.com/streamingfast/derr" 11 | "github.com/streamingfast/logging" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/test-go/testify/require" 14 | "go.uber.org/zap/zapcore" 15 | ) 16 | 17 | var logger, tracer = logging.PackageLogger("forkhandler", "github.com/streamingfast/firehose-core/forkhandler.test") 18 | 19 | func init() { 20 | logging.InstantiateLoggers(logging.WithDefaultLevel(zapcore.DebugLevel)) 21 | } 22 | 23 | var TestErrCompleteDone = fmt.Errorf("test complete done") 24 | 25 | type TestBlock struct { 26 | expect *pbbstream.Block 27 | send *pbbstream.Block 28 | } 29 | 30 | var _ BlockFetcher[any] = &TestBlockFetcher[any]{} 31 | 32 | type TestBlockFetcher[C any] struct { 33 | t *testing.T 34 | blocks []*TestBlock 35 | idx uint64 36 | completed bool 37 | } 38 | 39 | func newTestBlockFetcher[C any](t *testing.T, blocks []*TestBlock) *TestBlockFetcher[C] { 40 | return &TestBlockFetcher[C]{ 41 | t: t, 42 | blocks: blocks, 43 | } 44 | } 45 | 46 | func (b *TestBlockFetcher[C]) PollingInterval() time.Duration { 47 | return 0 48 | } 49 | 50 | func (b *TestBlockFetcher[C]) IsBlockAvailable(requestedSlot uint64) bool { 51 | return true 52 | } 53 | 54 | func (b *TestBlockFetcher[C]) Fetch(ctx context.Context, c C, blkNum uint64) (*pbbstream.Block, bool, error) { 55 | if len(b.blocks) == 0 { 56 | assert.Fail(b.t, fmt.Sprintf("should not have fetched block %d", blkNum)) 57 | } 58 | 59 | if b.idx >= uint64(len(b.blocks)) { 60 | return nil, false, derr.NewFatalError(TestErrCompleteDone) 61 | } 62 | 63 | if blkNum != b.blocks[b.idx].expect.Number { 64 | assert.Fail(b.t, fmt.Sprintf("expected to fetch block %d, got %d", b.blocks[b.idx].expect.Number, blkNum)) 65 | } 66 | 67 | blkToSend := b.blocks[b.idx].send 68 | b.idx++ 69 | return blkToSend, false, nil 70 | } 71 | 72 | func (b *TestBlockFetcher[C]) check(t *testing.T) { 73 | t.Helper() 74 | require.Equal(b.t, uint64(len(b.blocks)), b.idx, "we should have fetched all %d blocks, only fired %d blocks", len(b.blocks), b.idx) 75 | } 76 | 77 | var _ BlockHandler = &TestBlockFinalizer{} 78 | 79 | type TestBlockFinalizer struct { 80 | t *testing.T 81 | fireBlocks []*pbbstream.Block 82 | idx uint64 83 | } 84 | 85 | func newTestBlockFinalizer(t *testing.T, fireBlocks []*pbbstream.Block) *TestBlockFinalizer { 86 | return &TestBlockFinalizer{ 87 | t: t, 88 | fireBlocks: fireBlocks, 89 | } 90 | } 91 | 92 | func (t *TestBlockFinalizer) Init() { 93 | } 94 | 95 | func (t *TestBlockFinalizer) Handle(blk *pbbstream.Block) error { 96 | if len(t.fireBlocks) == 0 { 97 | assert.Fail(t.t, fmt.Sprintf("should not have fired block %s", blk.AsRef())) 98 | } 99 | 100 | if t.idx >= uint64(len(t.fireBlocks)) { 101 | return TestErrCompleteDone 102 | } 103 | 104 | if blk.Number != t.fireBlocks[t.idx].Number { 105 | assert.Fail(t.t, fmt.Sprintf("expected to fetch block %d, got %d", t.fireBlocks[t.idx].Number, blk.Number)) 106 | } 107 | t.idx++ 108 | return nil 109 | } 110 | 111 | func (b *TestBlockFinalizer) check(t *testing.T) { 112 | t.Helper() 113 | require.Equal(b.t, uint64(len(b.fireBlocks)), b.idx, "we should have fired all %d blocks, only fired %d blocks", len(b.fireBlocks), b.idx) 114 | } 115 | 116 | var _ BlockHandler = &TestNoopBlockFinalizer{} 117 | 118 | type TestNoopBlockFinalizer struct{} 119 | 120 | func (t *TestNoopBlockFinalizer) Init() {} 121 | func (t *TestNoopBlockFinalizer) Handle(blk *pbbstream.Block) error { return nil } 122 | -------------------------------------------------------------------------------- /blockpoller/options.go: -------------------------------------------------------------------------------- 1 | package blockpoller 2 | 3 | import ( 4 | "time" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | type Option[C any] func(*BlockPoller[C]) 10 | 11 | func WithDelayBetweenFetch[C any](v time.Duration) Option[C] { 12 | return func(p *BlockPoller[C]) { 13 | p.delayBetweenFetch = v 14 | } 15 | } 16 | 17 | func WithBlockFetchRetryCount[C any](v uint64) Option[C] { 18 | return func(p *BlockPoller[C]) { 19 | p.fetchBlockRetryCount = v 20 | } 21 | } 22 | 23 | func WithStoringState[C any](stateStorePath string) Option[C] { 24 | return func(p *BlockPoller[C]) { 25 | p.stateStorePath = stateStorePath 26 | } 27 | } 28 | 29 | // IgnoreCursor ensures the poller will ignore the cursor and start from the startBlockNum 30 | // the cursor will still be saved as the poller progresses 31 | func IgnoreCursor[C any]() Option[C] { 32 | return func(p *BlockPoller[C]) { 33 | p.ignoreCursor = true 34 | } 35 | } 36 | 37 | func WithLogger[C any](logger *zap.Logger) Option[C] { 38 | return func(p *BlockPoller[C]) { 39 | p.logger = logger 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /cmd/apps/auth.go: -------------------------------------------------------------------------------- 1 | package apps 2 | 3 | import "net/url" 4 | 5 | func authPluginScheme(input string) string { 6 | parsedURL, err := url.Parse(input) 7 | if err != nil { 8 | return "" 9 | } 10 | 11 | return parsedURL.Scheme 12 | } 13 | -------------------------------------------------------------------------------- /cmd/apps/index_builder.go: -------------------------------------------------------------------------------- 1 | package apps 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/spf13/cobra" 8 | "github.com/spf13/viper" 9 | "github.com/streamingfast/bstream" 10 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 11 | bstransform "github.com/streamingfast/bstream/transform" 12 | firecore "github.com/streamingfast/firehose-core" 13 | index_builder "github.com/streamingfast/firehose-core/index-builder/app/index-builder" 14 | "github.com/streamingfast/firehose-core/launcher" 15 | "go.uber.org/zap" 16 | ) 17 | 18 | func RegisterIndexBuilderApp[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { 19 | launcher.RegisterApp(rootLog, &launcher.AppDef{ 20 | ID: "index-builder", 21 | Title: "Index Builder", 22 | Description: "App the builds indexes out of Firehose blocks", 23 | RegisterFlags: func(cmd *cobra.Command) error { 24 | cmd.Flags().String("index-builder-grpc-listen-addr", firecore.IndexBuilderServiceAddr, "Address to listen for grpc-based healthz check") 25 | cmd.Flags().Uint64("index-builder-index-size", 10000, "Size of index bundles that will be created") 26 | cmd.Flags().Uint64("index-builder-start-block", 0, "Block number to start indexing") 27 | cmd.Flags().Uint64("index-builder-stop-block", 0, "Block number to stop indexing") 28 | return nil 29 | }, 30 | InitFunc: func(runtime *launcher.Runtime) error { 31 | return nil 32 | }, 33 | FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { 34 | mergedBlocksStoreURL, _, _, err := firecore.GetCommonStoresURLs(runtime.AbsDataDir) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | indexStore, lookupIdxSizes, err := firecore.GetIndexStore(runtime.AbsDataDir) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | // Chain must have been validated, so if we are here, it's because there is 45 | // exactly one index in BlockIndexerFactories map. 46 | indexShortName, indexerFactory, found := getMapFirst(chain.BlockIndexerFactories) 47 | if !found { 48 | return nil, fmt.Errorf("no indexer factory found but one should be defined at this point") 49 | } 50 | 51 | startBlockResolver := func(ctx context.Context) (uint64, error) { 52 | select { 53 | case <-ctx.Done(): 54 | return 0, ctx.Err() 55 | default: 56 | } 57 | 58 | startBlockNum := bstransform.FindNextUnindexed( 59 | ctx, 60 | viper.GetUint64("index-builder-start-block"), 61 | lookupIdxSizes, 62 | indexShortName, 63 | indexStore, 64 | ) 65 | 66 | return startBlockNum, nil 67 | } 68 | stopBlockNum := viper.GetUint64("index-builder-stop-block") 69 | 70 | indexer, err := indexerFactory(indexStore, viper.GetUint64("index-builder-index-size")) 71 | if err != nil { 72 | return nil, fmt.Errorf("unable to create indexer: %w", err) 73 | } 74 | 75 | handler := bstream.HandlerFunc(func(blk *pbbstream.Block, _ interface{}) error { 76 | var b = chain.BlockFactory() 77 | if err := blk.Payload.UnmarshalTo(b); err != nil { 78 | return err 79 | } 80 | return indexer.ProcessBlock(any(b).(B)) 81 | }) 82 | 83 | app := index_builder.New(&index_builder.Config{ 84 | BlockHandler: handler, 85 | StartBlockResolver: startBlockResolver, 86 | EndBlock: stopBlockNum, 87 | MergedBlocksStoreURL: mergedBlocksStoreURL, 88 | GRPCListenAddr: viper.GetString("index-builder-grpc-listen-addr"), 89 | }) 90 | 91 | return app, nil 92 | }, 93 | }) 94 | } 95 | 96 | func getMapFirst[K comparable, V any](m map[K]V) (k K, v V, found bool) { 97 | for k := range m { 98 | return k, m[k], true 99 | } 100 | 101 | return k, v, false 102 | } 103 | -------------------------------------------------------------------------------- /cmd/apps/merger.go: -------------------------------------------------------------------------------- 1 | package apps 2 | 3 | import ( 4 | "time" 5 | 6 | firecore "github.com/streamingfast/firehose-core" 7 | 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/viper" 10 | "github.com/streamingfast/firehose-core/launcher" 11 | "github.com/streamingfast/firehose-core/merger/app/merger" 12 | "go.uber.org/zap" 13 | ) 14 | 15 | func RegisterMergerApp(rootLog *zap.Logger) { 16 | launcher.RegisterApp(rootLog, &launcher.AppDef{ 17 | ID: "merger", 18 | Title: "Merger", 19 | Description: "Produces merged block files from single-block files", 20 | RegisterFlags: func(cmd *cobra.Command) error { 21 | cmd.Flags().String("merger-grpc-listen-addr", firecore.MergerServingAddr, "Address to listen for incoming gRPC requests") 22 | cmd.Flags().Uint64("merger-prune-forked-blocks-after", 50000, "Number of blocks that must pass before we delete old forks (one-block-files lingering)") 23 | cmd.Flags().Uint64("merger-stop-block", 0, "If non-zero, merger will trigger shutdown when blocks have been merged up to this block") 24 | cmd.Flags().Duration("merger-time-between-store-lookups", 1*time.Second, "Delay between source store polling (should be higher for remote storage)") 25 | cmd.Flags().Duration("merger-time-between-store-pruning", time.Minute, "Delay between source store pruning loops") 26 | cmd.Flags().Int("merger-delete-threads", 8, "Number of threads for deleting files in parallel (increase this in case the merger isn't able to keep up with deleting one-block files).") 27 | return nil 28 | }, 29 | FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { 30 | mergedBlocksStoreURL, oneBlocksStoreURL, forkedBlocksStoreURL, err := firecore.GetCommonStoresURLs(runtime.AbsDataDir) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return merger.New(&merger.Config{ 36 | GRPCListenAddr: viper.GetString("merger-grpc-listen-addr"), 37 | PruneForkedBlocksAfter: viper.GetUint64("merger-prune-forked-blocks-after"), 38 | StorageOneBlockFilesPath: oneBlocksStoreURL, 39 | StorageMergedBlocksFilesPath: mergedBlocksStoreURL, 40 | StorageForkedBlocksFilesPath: forkedBlocksStoreURL, 41 | StopBlock: viper.GetUint64("merger-stop-block"), 42 | TimeBetweenPruning: viper.GetDuration("merger-time-between-store-pruning"), 43 | TimeBetweenPolling: viper.GetDuration("merger-time-between-store-lookups"), 44 | FilesDeleteThreads: viper.GetInt("merger-delete-threads"), 45 | }), nil 46 | }, 47 | }) 48 | } 49 | -------------------------------------------------------------------------------- /cmd/apps/metering.go: -------------------------------------------------------------------------------- 1 | package apps 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/spf13/cobra" 8 | "github.com/spf13/viper" 9 | "github.com/streamingfast/dmetering" 10 | "go.uber.org/zap" 11 | ) 12 | 13 | // GetCommonMeteringPlugin returns the common metering plugin value to use 14 | // for the application. It reads the `common-metering-plugin` flag 15 | // from the command and returns the plugin after expanding the 16 | // environment variables in it meaning 'paymentGateway://test?token=${TOKEN}'. 17 | func GetCommonMeteringPluginValue() string { 18 | plugin := viper.GetString("common-metering-plugin") 19 | return os.ExpandEnv(plugin) 20 | } 21 | 22 | // GetCommonMeteringPlugin returns the common metering plugin to use 23 | // for the application. It reads the `common-metering-plugin` flag 24 | // from the command and returns the plugin after expanding the 25 | // environment variables in it meaning 'paymentGateway://test?token=${TOKEN}'. 26 | func GetCommonMeteringPlugin(cmd *cobra.Command, logger *zap.Logger) (dmetering.EventEmitter, error) { 27 | // We keep cmd as argument for future proofing, at which point we are going to break 28 | // GetCommonMeteringPluginValue above. 29 | _ = cmd 30 | 31 | eventEmitter, err := dmetering.New(GetCommonMeteringPluginValue(), logger) 32 | if err != nil { 33 | return nil, fmt.Errorf("new metering plugin: %w", err) 34 | } 35 | 36 | return eventEmitter, nil 37 | } 38 | -------------------------------------------------------------------------------- /cmd/apps/reader_node_stdin.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package apps 16 | 17 | import ( 18 | "github.com/spf13/cobra" 19 | "github.com/spf13/viper" 20 | firecore "github.com/streamingfast/firehose-core" 21 | "github.com/streamingfast/firehose-core/launcher" 22 | nodeManager "github.com/streamingfast/firehose-core/node-manager" 23 | nodeReaderStdinApp "github.com/streamingfast/firehose-core/node-manager/app/node_reader_stdin" 24 | "github.com/streamingfast/firehose-core/node-manager/metrics" 25 | "github.com/streamingfast/firehose-core/node-manager/mindreader" 26 | "github.com/streamingfast/logging" 27 | "go.uber.org/zap" 28 | ) 29 | 30 | func RegisterReaderNodeStdinApp[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { 31 | appLogger, appTracer := logging.PackageLogger("reader-node-stdin", chain.LoggerPackageID("reader-node-stdin")) 32 | 33 | launcher.RegisterApp(rootLog, &launcher.AppDef{ 34 | ID: "reader-node-stdin", 35 | Title: "Reader Node (stdin)", 36 | Description: "Blocks reading node, unmanaged, reads Firehose logs from standard input and transform them into Firehose chain specific blocks", 37 | RegisterFlags: func(cmd *cobra.Command) error { return nil }, 38 | FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { 39 | sfDataDir := runtime.AbsDataDir 40 | archiveStoreURL := firecore.MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")) 41 | consoleReaderFactory := func(lines chan string) (mindreader.ConsolerReader, error) { 42 | return chain.ConsoleReaderFactory(lines, chain.BlockEncoder, appLogger, appTracer) 43 | } 44 | 45 | metricID := "reader-node-stdin" 46 | headBlockTimeDrift := metrics.NewHeadBlockTimeDrift(metricID) 47 | headBlockNumber := metrics.NewHeadBlockNumber(metricID) 48 | appReadiness := metrics.NewAppReadiness(metricID) 49 | metricsAndReadinessManager := nodeManager.NewMetricsAndReadinessManager(headBlockTimeDrift, headBlockNumber, appReadiness, viper.GetDuration("reader-node-readiness-max-latency")) 50 | 51 | return nodeReaderStdinApp.New(&nodeReaderStdinApp.Config{ 52 | GRPCAddr: viper.GetString("reader-node-grpc-listen-addr"), 53 | OneBlocksStoreURL: archiveStoreURL, 54 | MindReadBlocksChanCapacity: viper.GetInt("reader-node-blocks-chan-capacity"), 55 | StartBlockNum: viper.GetUint64("reader-node-start-block-num"), 56 | StopBlockNum: viper.GetUint64("reader-node-stop-block-num"), 57 | WorkingDir: firecore.MustReplaceDataDir(sfDataDir, viper.GetString("reader-node-working-dir")), 58 | OneBlockSuffix: viper.GetString("reader-node-one-block-suffix"), 59 | MaxLineLengthInBytes: int64(viper.GetUint64("reader-node-line-buffer-size")), 60 | }, &nodeReaderStdinApp.Modules{ 61 | ConsoleReaderFactory: consoleReaderFactory, 62 | MetricsAndReadinessManager: metricsAndReadinessManager, 63 | }, appLogger, appTracer), nil 64 | }, 65 | }) 66 | } 67 | -------------------------------------------------------------------------------- /cmd/apps/reader_node_test.go: -------------------------------------------------------------------------------- 1 | package apps 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func Test_buildNodeArguments(t *testing.T) { 12 | dataDir := "/data" 13 | nodeDataDir := "/data/node" 14 | hostname := "host" 15 | 16 | envVar := func(k string) string { 17 | switch k { 18 | case "myhostname": 19 | return "host with spaces" 20 | } 21 | 22 | return "" 23 | } 24 | 25 | tests := []struct { 26 | name string 27 | args string 28 | withEnv func(k string) string 29 | want []string 30 | firstStreamableBlock uint64 31 | startBlockNum uint64 32 | stopBlockNum uint64 33 | assertion require.ErrorAssertionFunc 34 | }{ 35 | {"no variables", "arg1 arg2", nil, []string{"arg1", "arg2"}, 0, 10, 20, require.NoError}, 36 | {"variable data-dir", "{data-dir} arg2", nil, []string{"/data", "arg2"}, 0, 10, 20, require.NoError}, 37 | {"variable node-data-dir", "{node-data-dir} arg2", nil, []string{"/data/node", "arg2"}, 0, 10, 20, require.NoError}, 38 | {"variable hostname", "{hostname} arg2", nil, []string{"host", "arg2"}, 0, 10, 20, require.NoError}, 39 | {"variable first-streamable-block", "{first-streamable-block} arg2", nil, []string{"0", "arg2"}, 0, 10, 20, require.NoError}, 40 | {"variable start block num", "{start-block-num} arg2", nil, []string{"10", "arg2"}, 0, 10, 20, require.NoError}, 41 | {"variable stop block num", "{stop-block-num} arg2", nil, []string{"20", "arg2"}, 0, 10, 20, require.NoError}, 42 | {"variable data-dir double quotes", `"{hostname} with spaces" arg2`, nil, []string{"host with spaces", "arg2"}, 0, 10, 20, require.NoError}, 43 | {"variable all", `--home="{data-dir}" --data={node-data-dir} --id={hostname} --other --start={start-block-num} -stop {stop-block-num} --foo`, nil, []string{ 44 | "--home=/data", 45 | "--data=/data/node", 46 | "--id=host", 47 | "--other", 48 | "--start=10", 49 | "-stop", 50 | "20", 51 | "--foo", 52 | }, 0, 10, 20, require.NoError}, 53 | 54 | {"env variable plain", `--endpoint=${myhostname}`, envVar, []string{"--endpoint=host with spaces"}, 0, 10, 20, require.NoError}, 55 | {"env variable that expand with spaces is split correctly", `"${myhostname}" arg2`, envVar, []string{"host with spaces", "arg2"}, 0, 10, 20, require.NoError}, 56 | } 57 | for _, tt := range tests { 58 | t.Run(tt.name, func(t *testing.T) { 59 | resolver := createNodeArgumentsResolver(dataDir, nodeDataDir, hostname, tt.firstStreamableBlock, tt.startBlockNum, tt.stopBlockNum) 60 | 61 | if tt.withEnv != nil { 62 | osEnvExpandGetter = tt.withEnv 63 | t.Cleanup(func() { osEnvExpandGetter = os.Getenv }) 64 | } 65 | 66 | args, err := buildNodeArguments(tt.args, resolver) 67 | tt.assertion(t, err) 68 | 69 | assert.Equal(t, tt.want, args) 70 | }) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /cmd/apps/relayer.go: -------------------------------------------------------------------------------- 1 | package apps 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/spf13/cobra" 7 | "github.com/spf13/viper" 8 | firecore "github.com/streamingfast/firehose-core" 9 | "github.com/streamingfast/firehose-core/launcher" 10 | "github.com/streamingfast/firehose-core/relayer/app/relayer" 11 | "go.uber.org/zap" 12 | ) 13 | 14 | func RegisterRelayerApp(rootLog *zap.Logger) { 15 | launcher.RegisterApp(rootLog, &launcher.AppDef{ 16 | ID: "relayer", 17 | Title: "Relayer", 18 | Description: "Serves blocks as a stream, with a buffer", 19 | RegisterFlags: func(cmd *cobra.Command) error { 20 | cmd.Flags().String("relayer-grpc-listen-addr", firecore.RelayerServingAddr, "Address to listen for incoming gRPC requests") 21 | cmd.Flags().StringSlice("relayer-source", []string{firecore.ReaderNodeGRPCAddr}, "List of live sources (reader(s)) to connect to for live block feeds (repeat flag as needed)") 22 | cmd.Flags().Duration("relayer-max-source-latency", 999999*time.Hour, "Max latency tolerated to connect to a source. A performance optimization for when you have redundant sources and some may not have caught up") 23 | return nil 24 | }, 25 | FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { 26 | sfDataDir := runtime.AbsDataDir 27 | 28 | sourcesAddr := viper.GetStringSlice("relayer-source") 29 | 30 | return relayer.New(&relayer.Config{ 31 | SourcesAddr: sourcesAddr, 32 | OneBlocksURL: firecore.MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")), 33 | GRPCListenAddr: viper.GetString("relayer-grpc-listen-addr"), 34 | MaxSourceLatency: viper.GetDuration("relayer-max-source-latency"), 35 | }), nil 36 | }, 37 | }) 38 | } 39 | -------------------------------------------------------------------------------- /cmd/apps/substreams_common.go: -------------------------------------------------------------------------------- 1 | package apps 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var registerSSOnce sync.Once 11 | 12 | func registerCommonSubstreamsFlags(cmd *cobra.Command) { 13 | registerSSOnce.Do(func() { 14 | cmd.Flags().Uint64("substreams-state-bundle-size", uint64(1_000), "Interval in blocks at which to save store snapshots and output caches") 15 | cmd.Flags().String("substreams-state-store-url", "{sf-data-dir}/localdata", "where substreams state data are stored") 16 | cmd.Flags().String("substreams-state-store-default-tag", "", "If non-empty, will be appended to {substreams-state-store-url} (ex: 'v1'). Can be overriden per-request with 'X-Sf-Substreams-Cache-Tag' header") 17 | cmd.Flags().Duration("substreams-block-execution-timeout", 3*time.Minute, "Maximum execution time for a block before the request is canceled") 18 | }) 19 | } 20 | -------------------------------------------------------------------------------- /cmd/firecore/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 5 | firecore "github.com/streamingfast/firehose-core" 6 | fhCMD "github.com/streamingfast/firehose-core/cmd" 7 | info "github.com/streamingfast/firehose-core/firehose/info" 8 | ) 9 | 10 | func main() { 11 | firecore.UnsafeRunningFromFirecore = true 12 | firecore.UnsafeAllowExecutableNameToBeEmpty = true 13 | 14 | fhCMD.Main(&firecore.Chain[*pbbstream.Block]{ 15 | ShortName: "core", 16 | LongName: "CORE", //only used to compose cmd title and description 17 | FullyQualifiedModule: "github.com/streamingfast/firehose-core", 18 | Version: version, 19 | BlockFactory: func() firecore.Block { return new(pbbstream.Block) }, 20 | ConsoleReaderFactory: firecore.NewConsoleReader, 21 | InfoResponseFiller: info.DefaultInfoResponseFiller, 22 | Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, 23 | }) 24 | } 25 | 26 | // Version value, injected via go build `ldflags` at build time, **must** not be removed or inlined 27 | var version = "dev" 28 | -------------------------------------------------------------------------------- /cmd/tools/check/mergedbatch.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package check 16 | 17 | import ( 18 | "strconv" 19 | 20 | "github.com/spf13/cobra" 21 | "github.com/streamingfast/cli/sflags" 22 | "github.com/streamingfast/firehose-core/types" 23 | ) 24 | 25 | func newCheckMergedBlockBatchCmd() *cobra.Command { 26 | var toolsCheckMergedBlocksBatchCmd = &cobra.Command{ 27 | Use: "merged-blocks-batch ", 28 | Short: "Checks for any missing, disordered or duplicate blocks in merged blocks files", 29 | Args: cobra.ExactArgs(3), 30 | RunE: checkMergedBlocksBatchRunE, 31 | } 32 | toolsCheckMergedBlocksBatchCmd.PersistentFlags().String("output-to-store", "", "If non-empty, an empty file called .broken will be created for every problematic merged-blocks-file. This is a convenient way to gather the results from multiple parallel processes.") 33 | return toolsCheckMergedBlocksBatchCmd 34 | 35 | } 36 | 37 | func checkMergedBlocksBatchRunE(cmd *cobra.Command, args []string) error { 38 | storeURL := args[0] 39 | start, err := strconv.ParseUint(args[1], 10, 64) 40 | if err != nil { 41 | return err 42 | } 43 | stop, err := strconv.ParseUint(args[2], 10, 64) 44 | if err != nil { 45 | return err 46 | } 47 | fileBlockSize := uint64(100) 48 | 49 | blockRange := types.BlockRange{ 50 | Start: int64(start), 51 | Stop: &stop, 52 | } 53 | 54 | resultsStoreURL := sflags.MustGetString(cmd, "output-to-store") 55 | 56 | return CheckMergedBlocksBatch(cmd.Context(), storeURL, resultsStoreURL, fileBlockSize, blockRange) 57 | } 58 | -------------------------------------------------------------------------------- /cmd/tools/firehose/single_block_client.go: -------------------------------------------------------------------------------- 1 | package firehose 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/spf13/cobra" 11 | "github.com/streamingfast/cli" 12 | firecore "github.com/streamingfast/firehose-core" 13 | "github.com/streamingfast/firehose-core/cmd/tools/print" 14 | "github.com/streamingfast/logging" 15 | pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" 16 | "go.uber.org/zap" 17 | ) 18 | 19 | // You should add your custom 'transforms' flags to this command in your init(), then parse them in transformsSetter 20 | func NewToolsFirehoseSingleBlockClientCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { 21 | cmd := &cobra.Command{ 22 | Use: "firehose-single-block-client {endpoint} {block_num|block_num:block_id|cursor}", 23 | Short: "Performs a FetchClient#Block call against a Firehose endpoint and print the response", 24 | Long: string(cli.Description(` 25 | Performs a sf.firehose.v2.Fetch/Block call against a Firehose endpoint and print the full response 26 | object. 27 | 28 | By default, the response is printed in JSON format, but you can use the --output flag to 29 | choose a different output format (text, json, jsonl, protojson, protojsonl). 30 | `)), 31 | Args: cobra.ExactArgs(2), 32 | RunE: getFirehoseSingleBlockClientE(chain, zlog, tracer), 33 | Example: firecore.ExamplePrefixed(chain, "tools ", ` 34 | firehose-single-block-client --compression=gzip my.firehose.endpoint:443 2344:0x32d8e8d98a798da98d6as9d69899as86s9898d8ss8d87 35 | `), 36 | } 37 | 38 | addFirehoseFetchClientFlagsToSet(cmd.Flags(), chain) 39 | 40 | return cmd 41 | } 42 | 43 | func getFirehoseSingleBlockClientE[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) func(cmd *cobra.Command, args []string) error { 44 | return func(cmd *cobra.Command, args []string) error { 45 | ctx := context.Background() 46 | 47 | endpoint := args[0] 48 | firehoseClient, connClose, requestInfo, err := getFirehoseFetchClientFromCmd(cmd, zlog, endpoint, chain) 49 | if err != nil { 50 | return err 51 | } 52 | defer connClose() 53 | 54 | req := &pbfirehose.SingleBlockRequest{} 55 | 56 | ref := args[1] 57 | if num, err := strconv.ParseUint(ref, 10, 64); err == nil { 58 | req.Reference = &pbfirehose.SingleBlockRequest_BlockNumber_{ 59 | BlockNumber: &pbfirehose.SingleBlockRequest_BlockNumber{ 60 | Num: num, 61 | }, 62 | } 63 | } else if parts := strings.Split(ref, ":"); len(parts) == 2 { 64 | num, err := strconv.ParseUint(parts[0], 10, 64) 65 | if err != nil { 66 | return fmt.Errorf("invalid block reference, cannot decode first part as block_num: %s, %w", ref, err) 67 | } 68 | req.Reference = &pbfirehose.SingleBlockRequest_BlockHashAndNumber_{ 69 | BlockHashAndNumber: &pbfirehose.SingleBlockRequest_BlockHashAndNumber{ 70 | Num: num, 71 | Hash: parts[1], 72 | }, 73 | } 74 | 75 | } else { 76 | req.Reference = &pbfirehose.SingleBlockRequest_Cursor_{ 77 | Cursor: &pbfirehose.SingleBlockRequest_Cursor{ 78 | Cursor: ref, 79 | }, 80 | } 81 | } 82 | 83 | resp, err := firehoseClient.Block(ctx, req, requestInfo.GRPCCallOpts...) 84 | if err != nil { 85 | return err 86 | } 87 | 88 | printer, err := print.GetOutputPrinter(cmd, chain.BlockFileDescriptor()) 89 | cli.NoError(err, "Unable to get output printer") 90 | 91 | cli.NoError(printer.PrintTo(resp, os.Stdout), "Unable to print block") 92 | 93 | return nil 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /cmd/tools/mergeblock/tools_merge_blocks.go: -------------------------------------------------------------------------------- 1 | package mergeblock 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "strconv" 8 | 9 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 10 | 11 | "github.com/streamingfast/bstream" 12 | 13 | "github.com/spf13/cobra" 14 | "github.com/streamingfast/dstore" 15 | firecore "github.com/streamingfast/firehose-core" 16 | "go.uber.org/zap" 17 | ) 18 | 19 | func NewToolsMergeBlocksCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "merge-blocks ", 22 | Short: "Merges one-block files into merged-block file", 23 | Args: cobra.ExactArgs(3), 24 | RunE: runMergeBlocksE(zlog), 25 | } 26 | 27 | return cmd 28 | } 29 | 30 | func runMergeBlocksE(zlog *zap.Logger) firecore.CommandExecutor { 31 | return func(cmd *cobra.Command, args []string) error { 32 | ctx := cmd.Context() 33 | 34 | srcStore, err := dstore.NewDBinStore(args[0]) 35 | if err != nil { 36 | return fmt.Errorf("unable to create source store: %w", err) 37 | } 38 | 39 | destStore, err := dstore.NewDBinStore(args[1]) 40 | if err != nil { 41 | return fmt.Errorf("unable to create destination store: %w", err) 42 | } 43 | 44 | lowBundary, err := strconv.ParseUint(args[2], 10, 64) 45 | if err != nil { 46 | return fmt.Errorf("converting low bundary string to uint64: %w", err) 47 | } 48 | 49 | mergeWriter := &firecore.MergedBlocksWriter{ 50 | Store: destStore, 51 | LowBlockNum: lowBundary, 52 | StopBlockNum: 0, 53 | Logger: zlog, 54 | Cmd: cmd, 55 | } 56 | 57 | zlog.Info("starting block merger process", zap.String("source", args[0]), zap.String("dest", args[1])) 58 | 59 | var lastFilename string 60 | var blockCount int 61 | var previousBlockNumber uint64 62 | err = srcStore.WalkFrom(ctx, "", fmt.Sprintf("%010d", lowBundary), func(filename string) error { 63 | var currentBlockNumber uint64 64 | currentBlockNumber, _, _, _, _, err = bstream.ParseFilename(filename) 65 | if err != nil { 66 | return fmt.Errorf("parsing filename %s: %w", filename, err) 67 | } 68 | 69 | if previousBlockNumber == currentBlockNumber { 70 | zlog.Warn("skipping duplicate block", zap.String("filename", filename)) 71 | return nil 72 | } 73 | 74 | if currentBlockNumber > lowBundary+100 { 75 | return dstore.StopIteration 76 | } 77 | 78 | var fileReader io.Reader 79 | fileReader, err = srcStore.OpenObject(ctx, filename) 80 | if err != nil { 81 | return fmt.Errorf("creating reader: %w", err) 82 | } 83 | 84 | var blockReader *bstream.DBinBlockReader 85 | blockReader, err = bstream.NewDBinBlockReader(fileReader) 86 | if err != nil { 87 | return fmt.Errorf("creating block reader: %w", err) 88 | } 89 | 90 | var currentBlock *pbbstream.Block 91 | currentBlock, err = blockReader.Read() 92 | if err != nil { 93 | return fmt.Errorf("reading block: %w", err) 94 | } 95 | 96 | if err = mergeWriter.ProcessBlock(currentBlock, nil); err != nil { 97 | return fmt.Errorf("processing block: %w", err) 98 | } 99 | 100 | lastFilename = filename 101 | blockCount += 1 102 | 103 | previousBlockNumber = currentBlockNumber 104 | return nil 105 | }) 106 | 107 | mergeWriter.Logger = mergeWriter.Logger.With(zap.String("last_filename", lastFilename), zap.Int("block_count", blockCount)) 108 | if err != nil { 109 | if errors.Is(err, dstore.StopIteration) { 110 | err = mergeWriter.WriteBundle() 111 | if err != nil { 112 | return fmt.Errorf("writing bundle: %w", err) 113 | } 114 | fmt.Println("done") 115 | } 116 | return fmt.Errorf("walking source store: %w", err) 117 | } 118 | 119 | err = mergeWriter.WriteBundle() 120 | if err != nil { 121 | return fmt.Errorf("writing bundle: %w", err) 122 | } 123 | 124 | return nil 125 | } 126 | 127 | } 128 | -------------------------------------------------------------------------------- /cmd/tools/mergeblock/tools_upgrade_merged_blocks.go: -------------------------------------------------------------------------------- 1 | package mergeblock 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "strconv" 9 | 10 | "github.com/spf13/cobra" 11 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 12 | "github.com/streamingfast/bstream/stream" 13 | "github.com/streamingfast/dstore" 14 | firecore "github.com/streamingfast/firehose-core" 15 | "go.uber.org/zap" 16 | ) 17 | 18 | func NewToolsUpgradeMergedBlocksCmd[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) *cobra.Command { 19 | return &cobra.Command{ 20 | Use: "upgrade-merged-blocks ", 21 | Short: "From a merged-blocks source, rewrite blocks to a new merged-blocks destination, while applying all possible upgrades", 22 | Args: cobra.ExactArgs(4), 23 | RunE: getMergedBlockUpgrader(chain.Tools.MergedBlockUpgrader, rootLog), 24 | } 25 | } 26 | 27 | func getMergedBlockUpgrader(tweakFunc func(block *pbbstream.Block) (*pbbstream.Block, error), rootLog *zap.Logger) func(cmd *cobra.Command, args []string) error { 28 | return func(cmd *cobra.Command, args []string) error { 29 | source := args[0] 30 | sourceStore, err := dstore.NewDBinStore(source) 31 | if err != nil { 32 | return fmt.Errorf("reading source store: %w", err) 33 | } 34 | 35 | dest := args[1] 36 | destStore, err := dstore.NewStore(dest, "dbin.zst", "zstd", true) 37 | if err != nil { 38 | return fmt.Errorf("reading destination store: %w", err) 39 | } 40 | 41 | start, err := strconv.ParseUint(args[2], 10, 64) 42 | if err != nil { 43 | return fmt.Errorf("parsing start block num: %w", err) 44 | } 45 | stop, err := strconv.ParseUint(args[3], 10, 64) 46 | if err != nil { 47 | return fmt.Errorf("parsing stop block num: %w", err) 48 | } 49 | 50 | rootLog.Info("starting block upgrader process", zap.Uint64("start", start), zap.Uint64("stop", stop), zap.String("source", source), zap.String("dest", dest)) 51 | writer := &firecore.MergedBlocksWriter{ 52 | Cmd: cmd, 53 | Store: destStore, 54 | LowBlockNum: firecore.LowBoundary(start), 55 | StopBlockNum: stop, 56 | TweakBlock: tweakFunc, 57 | Logger: rootLog, 58 | } 59 | stream := stream.New(nil, sourceStore, nil, int64(start), writer, stream.WithFinalBlocksOnly()) 60 | 61 | err = stream.Run(context.Background()) 62 | if errors.Is(err, io.EOF) { 63 | rootLog.Info("Complete!") 64 | return nil 65 | } 66 | return err 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /cmd/tools/print/printer_bytes.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package print 16 | 17 | import ( 18 | "encoding/base64" 19 | "encoding/hex" 20 | "fmt" 21 | "io" 22 | 23 | "github.com/mr-tron/base58" 24 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 25 | fcproto "github.com/streamingfast/firehose-core/proto" 26 | pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" 27 | "google.golang.org/protobuf/proto" 28 | ) 29 | 30 | var _ OutputPrinter = (*BytesOutputPrinter)(nil) 31 | 32 | type BytesOutputPrinter struct { 33 | bytesEncoding string 34 | registry *fcproto.Registry 35 | } 36 | 37 | func NewBytesOutputPrinter(bytesEncoding string, registry *fcproto.Registry) *BytesOutputPrinter { 38 | return &BytesOutputPrinter{ 39 | bytesEncoding: bytesEncoding, 40 | registry: registry, 41 | } 42 | } 43 | 44 | func (p *BytesOutputPrinter) PrintTo(input any, out io.Writer) error { 45 | if pbblock, ok := input.(*pbbstream.Block); ok { 46 | return p.printBytes(pbblock.Payload.Value, out) 47 | } 48 | 49 | if v, ok := input.(*pbfirehose.Response); ok { 50 | return p.printBytes(v.Block.Value, out) 51 | } 52 | 53 | if v, ok := input.(*pbfirehose.SingleBlockResponse); ok { 54 | return p.printBytes(v.Block.Value, out) 55 | } 56 | 57 | if v, ok := input.(proto.Message); ok { 58 | data, err := proto.Marshal(v) 59 | if err != nil { 60 | return fmt.Errorf("unable to marshal proto message: %w", err) 61 | } 62 | 63 | return p.printBytes(data, out) 64 | } 65 | 66 | return fmt.Errorf("unsupported type %T", input) 67 | } 68 | 69 | var base64Encoding = base64.StdEncoding 70 | 71 | func (p *BytesOutputPrinter) printBytes(data []byte, out io.Writer) error { 72 | var err error 73 | switch p.bytesEncoding { 74 | case "hex": 75 | err = p.printBytesHex(data, out) 76 | case "base58": 77 | err = p.printBytesBase58(data, out) 78 | case "base64": 79 | err = p.printBytesBase64(data, out) 80 | default: 81 | return fmt.Errorf("unsupported bytes encoding %q", p.bytesEncoding) 82 | } 83 | if err != nil { 84 | return fmt.Errorf("unable to print bytes: %w", err) 85 | } 86 | 87 | return writeStringToWriter(out, "") 88 | } 89 | 90 | func (p *BytesOutputPrinter) printBytesHex(data []byte, out io.Writer) error { 91 | encoder := hex.NewEncoder(out) 92 | _, err := encoder.Write(data) 93 | return err 94 | } 95 | 96 | func (p *BytesOutputPrinter) printBytesBase58(data []byte, out io.Writer) error { 97 | return writeStringToWriter(out, base58.Encode(data)) 98 | } 99 | 100 | func (p *BytesOutputPrinter) printBytesBase64(data []byte, out io.Writer) error { 101 | encoder := base64.NewEncoder(base64Encoding, out) 102 | // This flushes the base64 encoder but doesn't close the underlying writer, which is 103 | // what we want here 104 | defer encoder.Close() 105 | 106 | _, err := encoder.Write(data) 107 | return err 108 | } 109 | -------------------------------------------------------------------------------- /cmd/tools/print/printer_json.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package print 16 | 17 | import ( 18 | "fmt" 19 | "io" 20 | 21 | "github.com/go-json-experiment/json" 22 | "github.com/go-json-experiment/json/jsontext" 23 | fcjson "github.com/streamingfast/firehose-core/json" 24 | fcproto "github.com/streamingfast/firehose-core/proto" 25 | ) 26 | 27 | var _ OutputPrinter = (*JSONOutputPrinter)(nil) 28 | 29 | type JSONOutputPrinter struct { 30 | singleLine bool 31 | marshaller *fcjson.Marshaller 32 | } 33 | 34 | func NewJSONOutputPrinter(bytesEncoding string, singleLine bool, registry *fcproto.Registry) (OutputPrinter, error) { 35 | var options []fcjson.MarshallerOption 36 | 37 | if bytesEncoding != "" { 38 | options = append(options, fcjson.WithBytesEncoding(bytesEncoding)) 39 | } 40 | 41 | return &JSONOutputPrinter{ 42 | singleLine: singleLine, 43 | marshaller: fcjson.NewMarshaller(registry, options...), 44 | }, nil 45 | } 46 | 47 | func (p *JSONOutputPrinter) PrintTo(input any, w io.Writer) error { 48 | var encoderOptions []json.Options 49 | if !p.singleLine { 50 | encoderOptions = append(encoderOptions, jsontext.WithIndent(" ")) 51 | } 52 | 53 | out, err := p.marshaller.MarshalToString(input, encoderOptions...) 54 | if err != nil { 55 | return fmt.Errorf("marshalling block to json: %w", err) 56 | } 57 | 58 | return writeStringToWriter(w, out) 59 | } 60 | -------------------------------------------------------------------------------- /cmd/tools/print/printer_protojson.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package print 16 | 17 | import ( 18 | "fmt" 19 | "io" 20 | 21 | fcproto "github.com/streamingfast/firehose-core/proto" 22 | "google.golang.org/protobuf/encoding/protojson" 23 | "google.golang.org/protobuf/proto" 24 | ) 25 | 26 | var _ OutputPrinter = (*ProtoJSONOutputPrinter)(nil) 27 | 28 | type ProtoJSONOutputPrinter struct { 29 | marshaller protojson.MarshalOptions 30 | } 31 | 32 | func NewProtoJSONOutputPrinter(indent string, registry *fcproto.Registry) *ProtoJSONOutputPrinter { 33 | return &ProtoJSONOutputPrinter{ 34 | marshaller: protojson.MarshalOptions{ 35 | Resolver: registry, 36 | Indent: indent, 37 | EmitDefaultValues: true, 38 | }, 39 | } 40 | } 41 | 42 | func (p *ProtoJSONOutputPrinter) PrintTo(input any, w io.Writer) error { 43 | v, ok := input.(proto.Message) 44 | if !ok { 45 | return fmt.Errorf("we accept only proto.Message input") 46 | } 47 | 48 | out, err := p.marshaller.Marshal(v) 49 | if err != nil { 50 | return fmt.Errorf("marshalling block to protojson: %w", err) 51 | } 52 | 53 | return writeBytesToWriter(w, out) 54 | } 55 | -------------------------------------------------------------------------------- /cmd/tools/print/tools_print_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package print 16 | 17 | import "testing" 18 | 19 | func Test_doesLookLikeStoreURLFile(t *testing.T) { 20 | type args struct { 21 | path string 22 | } 23 | tests := []struct { 24 | name string 25 | args args 26 | want bool 27 | }{ 28 | { 29 | name: "just block number", 30 | args: args{"gs://bucket/path/to/file/0000000001"}, 31 | want: true, 32 | }, 33 | { 34 | name: "block number + dbin", 35 | args: args{"gs://bucket/path/to/file/0000000001.dbin"}, 36 | want: true, 37 | }, 38 | { 39 | name: "block number + dbin +zst", 40 | args: args{"gs://bucket/path/to/file/0000000001.dbin.zst"}, 41 | want: true, 42 | }, 43 | { 44 | name: "wrong block prefix, alone", 45 | args: args{"gs://bucket/path/to/file/v2"}, 46 | want: false, 47 | }, 48 | { 49 | name: "wrong block prefix + dbing", 50 | args: args{"gs://bucket/path/to/file/v2.dbin"}, 51 | want: false, 52 | }, 53 | { 54 | name: "wrong block prefix + dbin + zst", 55 | args: args{"gs://bucket/path/to/file/v2.dbin.zst"}, 56 | want: false, 57 | }, 58 | } 59 | for _, tt := range tests { 60 | t.Run(tt.name, func(t *testing.T) { 61 | if got := looksLikeMergedBlocksFile(tt.args.path); got != tt.want { 62 | t.Errorf("doesLookLikeStoreURLFile() = %v, want %v", got, tt.want) 63 | } 64 | }) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /config/examples/merge-accumulated-one-blocks.yaml: -------------------------------------------------------------------------------- 1 | start: 2 | args: 3 | - merger 4 | flags: 5 | log-to-file: false 6 | data-dir: /data//firehose-data 7 | 8 | # Use the first rounded block number as the starting point from the one-blocks file you 9 | # want to merge. For example, if you have in your one-blocks folder (in this config 10 | # it would be /data//firehose-data/storage/one-blocks) the files: 11 | # - 0005222014-a4f7d8b748525f5f-9c0632eec4c64c8b-5221815-default.dbin.zst 12 | # - 0005222015-ba4f7d8b748525f5f-ac0632eec4c64c8b-5221817-default.dbin.zst 13 | # - 0005222016-c4f7d8b748525f5f-ac0632eec4c64c8b-5221817-default.dbin.zst 14 | # - ... 15 | # 16 | # Then the upper 100s rounded block number is 5222100 so you would use 17 | # the config value 'common-first-streamable-block: 5222100'. 18 | # 19 | # If the block you have is exactly 5222100, then you should use 5222100 as 20 | # the starting point. 21 | # 22 | # You can use `find /data//firehose-data/storage/one-blocks -name "*.zst" | sort | head -1` 23 | # (or your appropriate 'common-one-block-store-url' value) to find the first block 24 | # you have. The block number is the first value in the file name (the others are 25 | # block's hash, last irreversible block's hash, last irreversible block's num). 26 | # 27 | # The range before the staring point will need to be reprocessed based on 28 | # the chain's recovery mechanism which is out of scope here. 29 | common-first-streamable-block: 100 30 | 31 | # You should set this to the last block number you have in your one-blocks folder 32 | # rounded down to 100s. 33 | # 34 | # For example, if you have in your one-blocks folder last block 8725, 35 | # then you should use 8700 as the stop block. 36 | merger-stop-block: 8700 37 | -------------------------------------------------------------------------------- /consolereader_test.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/hex" 6 | "fmt" 7 | "testing" 8 | "time" 9 | 10 | "github.com/streamingfast/firehose-core/test" 11 | "github.com/streamingfast/logging" 12 | "github.com/stretchr/testify/require" 13 | "google.golang.org/protobuf/types/known/anypb" 14 | ) 15 | 16 | var zlogTest, tracerTest = logging.PackageLogger("test", "github.com/streamingfast/firehose-core/firecore") 17 | 18 | func Test_Ctx_readBlock(t *testing.T) { 19 | reader := &ConsoleReader{ 20 | logger: zlogTest, 21 | tracer: tracerTest, 22 | 23 | readerProtocolVersion: "1.0", 24 | protoMessageType: "type.googleapis.com/sf.ethereum.type.v2.Block", 25 | } 26 | 27 | blockHash := "d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659" 28 | blockHashBytes, err := hex.DecodeString(blockHash) 29 | blockNumber := uint64(18571000) 30 | 31 | parentHash := "55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81" 32 | parentBlockNumber := 18570999 33 | 34 | libNumber := 18570800 35 | 36 | pbBlock := test.Block{ 37 | Hash: blockHashBytes, 38 | Number: blockNumber, 39 | } 40 | 41 | anypbBlock, err := anypb.New(&pbBlock) 42 | 43 | require.NoError(t, err) 44 | nowNano := time.Now().UnixNano() 45 | line := fmt.Sprintf( 46 | "%d %s %d %s %d %d %s", 47 | blockNumber, 48 | blockHash, 49 | parentBlockNumber, 50 | parentHash, 51 | libNumber, 52 | nowNano, 53 | base64.StdEncoding.EncodeToString(anypbBlock.Value), 54 | ) 55 | 56 | block, err := reader.readBlock(line) 57 | require.NoError(t, err) 58 | 59 | require.Equal(t, blockNumber, block.Number) 60 | require.Equal(t, blockHash, block.Id) 61 | require.Equal(t, parentHash, block.ParentId) 62 | require.Equal(t, uint64(libNumber), block.LibNum) 63 | require.Equal(t, int32(time.Unix(0, nowNano).Nanosecond()), block.Timestamp.Nanos) 64 | 65 | require.NoError(t, err) 66 | require.Equal(t, anypbBlock.GetValue(), block.Payload.Value) 67 | 68 | } 69 | 70 | func Test_GetNext(t *testing.T) { 71 | lines := make(chan string, 2) 72 | reader := newConsoleReader(lines, zlogTest, tracerTest) 73 | 74 | initLine := "FIRE INIT 1.0 sf.ethereum.type.v2.Block" 75 | blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" 76 | 77 | lines <- initLine 78 | lines <- blockLine 79 | close(lines) 80 | 81 | block, err := reader.ReadBlock() 82 | require.NoError(t, err) 83 | 84 | require.Equal(t, uint64(18571000), block.Number) 85 | require.Equal(t, "d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659", block.Id) 86 | require.Equal(t, "55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81", block.ParentId) 87 | require.Equal(t, uint64(18570800), block.LibNum) 88 | require.Equal(t, int32(time.Unix(0, 1699992393935935000).Nanosecond()), block.Timestamp.Nanos) 89 | } 90 | -------------------------------------------------------------------------------- /constants.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | // Those are `var` and globally available so that some chains to keep backward-compatibility can 4 | // change them. This is not advertised and should **not** be used by new chain. 5 | var ( 6 | MaxUint64 = ^uint64(0) 7 | // Common ports 8 | MetricsListenAddr string = ":9102" 9 | 10 | // Firehose chain specific port 11 | IndexBuilderServiceAddr string = ":10009" 12 | ReaderNodeGRPCAddr string = ":10010" 13 | ReaderNodeManagerAPIAddr string = ":10011" 14 | MergerServingAddr string = ":10012" 15 | RelayerServingAddr string = ":10014" 16 | FirehoseGRPCServingAddr string = ":10015" 17 | SubstreamsTier1GRPCServingAddr string = ":10016" 18 | SubstreamsTier2GRPCServingAddr string = ":10017" 19 | 20 | // Data storage default locations 21 | BlocksCacheDirectory string = "file://{data-dir}/storage/blocks-cache" 22 | MergedBlocksStoreURL string = "file://{data-dir}/storage/merged-blocks" 23 | OneBlockStoreURL string = "file://{data-dir}/storage/one-blocks" 24 | ForkedBlocksStoreURL string = "file://{data-dir}/storage/forked-blocks" 25 | IndexStoreURL string = "file://{data-dir}/storage/index" 26 | TmpDir string = "{data-dir}/tmp" 27 | ) 28 | -------------------------------------------------------------------------------- /devel/firecore: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 4 | 5 | active_pid= 6 | binary=firecore 7 | 8 | main() { 9 | set -e 10 | 11 | version="unknown" 12 | if [[ -f .version ]]; then 13 | version=`cat .version` 14 | fi 15 | 16 | pushd "$ROOT" &> /dev/null 17 | go install -ldflags "-X main.Version=$version" ./cmd/$binary 18 | popd &> /dev/null 19 | 20 | if [[ $KILL_AFTER != "" ]]; then 21 | "`go env GOPATH`/bin/$binary" "$@" & 22 | active_pid=$! 23 | 24 | sleep $KILL_AFTER 25 | kill -s TERM $active_pid &> /dev/null || true 26 | else 27 | exec "`go env GOPATH`/bin/$binary" "$@" 28 | fi 29 | } 30 | 31 | main "$@" 32 | -------------------------------------------------------------------------------- /devel/reader-firehose/reader-firehose.yaml: -------------------------------------------------------------------------------- 1 | start: 2 | args: 3 | - reader-node-firehose 4 | - merger 5 | - relayer 6 | - firehose 7 | - substreams-tier1 8 | - substreams-tier2 9 | flags: 10 | advertise-block-features: extended 11 | 12 | reader-node-firehose-endpoint: "polygon.streamingfast.io:443" 13 | reader-node-firehose-insecure: false 14 | reader-node-firehose-plaintext: false -------------------------------------------------------------------------------- /devel/reader-firehose/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | clean= 6 | firecore="$ROOT/../firecore" 7 | 8 | main() { 9 | pushd "$ROOT" &> /dev/null 10 | 11 | while getopts "hc" opt; do 12 | case $opt in 13 | h) usage && exit 0;; 14 | c) clean=true;; 15 | \?) usage_error "Invalid option: -$OPTARG";; 16 | esac 17 | done 18 | shift $((OPTIND-1)) 19 | [[ $1 = "--" ]] && shift 20 | 21 | set -e 22 | 23 | if [[ $clean == "true" ]]; then 24 | rm -rf firehose-data &> /dev/null || true 25 | fi 26 | 27 | exec $firecore -c $(basename $ROOT).yaml start "$@" 28 | } 29 | 30 | usage_error() { 31 | message="$1" 32 | exit_code="$2" 33 | 34 | echo "ERROR: $message" 35 | echo "" 36 | usage 37 | exit ${exit_code:-1} 38 | } 39 | 40 | usage() { 41 | echo "usage: start.sh [-c]" 42 | echo "" 43 | echo "Start $(basename $ROOT) environment." 44 | echo "" 45 | echo "Options" 46 | echo " -c Clean actual data directory first" 47 | } 48 | 49 | main "$@" -------------------------------------------------------------------------------- /devel/reader-stdin/reader-stdin.yaml: -------------------------------------------------------------------------------- 1 | start: 2 | args: 3 | - reader-node-stdin 4 | - merger 5 | - relayer 6 | - firehose 7 | - substreams-tier1 8 | - substreams-tier2 9 | flags: 10 | advertise-block-features: extended -------------------------------------------------------------------------------- /devel/reader-stdin/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | clean= 6 | firecore="$ROOT/../firecore" 7 | 8 | main() { 9 | pushd "$ROOT" &> /dev/null 10 | 11 | while getopts "hc" opt; do 12 | case $opt in 13 | h) usage && exit 0;; 14 | c) clean=true;; 15 | \?) usage_error "Invalid option: -$OPTARG";; 16 | esac 17 | done 18 | shift $((OPTIND-1)) 19 | [[ $1 = "--" ]] && shift 20 | 21 | set -e 22 | 23 | if [[ $clean == "true" ]]; then 24 | rm -rf firehose-data &> /dev/null || true 25 | fi 26 | 27 | cat "$ROOT/sample.eth.firelog" | $firecore -c $(basename $ROOT).yaml start "$@" 28 | } 29 | 30 | usage_error() { 31 | message="$1" 32 | exit_code="$2" 33 | 34 | echo "ERROR: $message" 35 | echo "" 36 | usage 37 | exit ${exit_code:-1} 38 | } 39 | 40 | usage() { 41 | echo "usage: start.sh [-c]" 42 | echo "" 43 | echo "Start $(basename $ROOT) environment." 44 | echo "" 45 | echo "Options" 46 | echo " -c Clean actual data directory first" 47 | } 48 | 49 | main "$@" -------------------------------------------------------------------------------- /devel/standard/standard.yaml: -------------------------------------------------------------------------------- 1 | start: 2 | args: 3 | - reader-node 4 | - merger 5 | - relayer 6 | - firehose 7 | - substreams-tier1 8 | - substreams-tier2 9 | flags: 10 | advertise-block-id-encoding: "hex" 11 | advertise-chain-name: "acme-dummy-blockchain" 12 | # Specifies the path to the binary, we assume you did 13 | # `go install github.com/streamingfast/dummy-blockchain@latest` (and that you have value 14 | # of `go env GOPATH` in your environment). 15 | reader-node-path: "dummy-blockchain" 16 | reader-node-data-dir: "{data-dir}/reader-node" 17 | 18 | # Flags that will be added to the dummy chain process command 19 | reader-node-arguments: 20 | start 21 | --tracer=firehose 22 | --store-dir="{node-data-dir}" 23 | --block-rate=120 24 | --genesis-height=0 25 | --genesis-block-burst=100 26 | -------------------------------------------------------------------------------- /devel/standard/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | clean= 6 | firecore="$ROOT/../firecore" 7 | 8 | main() { 9 | pushd "$ROOT" &> /dev/null 10 | 11 | while getopts "hc" opt; do 12 | case $opt in 13 | h) usage && exit 0;; 14 | c) clean=true;; 15 | \?) usage_error "Invalid option: -$OPTARG";; 16 | esac 17 | done 18 | shift $((OPTIND-1)) 19 | [[ $1 = "--" ]] && shift 20 | 21 | set -e 22 | 23 | if [[ $clean == "true" ]]; then 24 | rm -rf firehose-data &> /dev/null || true 25 | fi 26 | 27 | exec $firecore -c $(basename $ROOT).yaml start "$@" 28 | } 29 | 30 | usage_error() { 31 | message="$1" 32 | exit_code="$2" 33 | 34 | echo "ERROR: $message" 35 | echo "" 36 | usage 37 | exit ${exit_code:-1} 38 | } 39 | 40 | usage() { 41 | echo "usage: start.sh [-c]" 42 | echo "" 43 | echo "Start $(basename $ROOT) environment." 44 | echo "" 45 | echo "Options" 46 | echo " -c Clean actual data directory first" 47 | } 48 | 49 | main "$@" -------------------------------------------------------------------------------- /devel/substreams/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | clean= 6 | firecore="$ROOT/../firecore" 7 | 8 | main() { 9 | pushd "$ROOT" &> /dev/null 10 | 11 | while getopts "hc" opt; do 12 | case $opt in 13 | h) usage && exit 0;; 14 | c) clean=true;; 15 | \?) usage_error "Invalid option: -$OPTARG";; 16 | esac 17 | done 18 | shift $((OPTIND-1)) 19 | [[ $1 = "--" ]] && shift 20 | 21 | set -e 22 | 23 | if [[ $clean == "true" ]]; then 24 | rm -rf firehose-data &> /dev/null || true 25 | fi 26 | 27 | exec $firecore -c $(basename $ROOT).yaml start "$@" 28 | } 29 | 30 | usage_error() { 31 | message="$1" 32 | exit_code="$2" 33 | 34 | echo "ERROR: $message" 35 | echo "" 36 | usage 37 | exit ${exit_code:-1} 38 | } 39 | 40 | usage() { 41 | echo "usage: start.sh [-c]" 42 | echo "" 43 | echo "Start $(basename $ROOT) environment." 44 | echo "" 45 | echo "Options" 46 | echo " -c Clean actual data directory first" 47 | } 48 | 49 | main "$@" -------------------------------------------------------------------------------- /devel/substreams/substreams.yaml: -------------------------------------------------------------------------------- 1 | start: 2 | args: 3 | - substreams-tier1 4 | - substreams-tier2 5 | flags: 6 | common-live-blocks-addr: 7 | common-auth-plugin: trust:// 8 | common-merged-blocks-store-url: "$COMMON_MERGED_BLOCKS_STORE_URL" 9 | substreams-tier1-block-type: "sf.ethereum.type.v2.Block" 10 | # Also set FIRECORE_COMMON_FIRST_STREAMABLE_BLOCK to your local value, if not starting at 0 11 | ignore-advertise-validation: true 12 | substreams-tier1-grpc-listen-addr: :9000* 13 | substreams-tier1-subrequests-insecure: false 14 | substreams-tier1-subrequests-plaintext: true 15 | substreams-tier1-subrequests-endpoint: :9001 16 | 17 | substreams-tier1-global-worker-pool-address: :9002 18 | substreams-tier1-global-request-pool-address: :9002 19 | substreams-tier1-global-worker-pool-keep-alive-delay: 1s 20 | 21 | substreams-tier2-grpc-listen-addr: :9001 22 | -------------------------------------------------------------------------------- /docker/99-firehose-core.sh: -------------------------------------------------------------------------------- 1 | ## 2 | # This is place inside `/etc/profile.d/99-firehose-core.sh` 3 | # on built system an executed to provide message to use when they 4 | # connect on the box. 5 | export PATH=$PATH:/app 6 | 7 | cat /etc/motd 8 | -------------------------------------------------------------------------------- /docker/motd: -------------------------------------------------------------------------------- 1 | _____ __ 2 | / __(_)______ / / ___ ___ ___ 3 | / _// / __/ -_) _ \/ _ \(_- g.hub.LowestBlockNum() { 52 | if blk := g.hub.GetBlock(num, id); blk != nil { 53 | reqLogger.Info("single block request", zap.String("source", "hub"), zap.Bool("found", true)) 54 | return blk, nil 55 | } 56 | reqLogger.Info("single block request", zap.String("source", "hub"), zap.Bool("found", false)) 57 | return nil, status.Error(codes.NotFound, "live block not found in hub") 58 | } 59 | 60 | mergedBlocksStore := g.mergedBlocksStore 61 | if clonable, ok := mergedBlocksStore.(dstore.Clonable); ok { 62 | var err error 63 | mergedBlocksStore, err = clonable.Clone(ctx, metering.WithBlockBytesReadMeteringOptions(dmetering.GetBytesMeter(ctx), logger)...) 64 | if err != nil { 65 | return nil, err 66 | } 67 | 68 | //todo: (deprecated) remove this 69 | mergedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) 70 | } 71 | 72 | // check for block in mergedBlocksStore 73 | err = derr.RetryContext(ctx, 3, func(ctx context.Context) error { 74 | blk, err := bstream.FetchBlockFromMergedBlocksStore(ctx, num, mergedBlocksStore) 75 | if err != nil { 76 | if errors.Is(err, dstore.ErrNotFound) { 77 | return derr.NewFatalError(err) 78 | } 79 | return err 80 | } 81 | if id == "" || blk.Id == id { 82 | reqLogger.Info("single block request", zap.String("source", "merged_blocks"), zap.Bool("found", true)) 83 | out = blk 84 | return nil 85 | } 86 | return derr.NewFatalError(fmt.Errorf("wrong block: found %s, expecting %s", blk.Id, id)) 87 | }) 88 | if out != nil { 89 | return out, nil 90 | } 91 | 92 | // check for block in forkedBlocksStore 93 | if g.forkedBlocksStore != nil { 94 | forkedBlocksStore := g.forkedBlocksStore 95 | if clonable, ok := forkedBlocksStore.(dstore.Clonable); ok { 96 | var err error 97 | forkedBlocksStore, err = clonable.Clone(ctx, metering.WithForkedBlockBytesReadMeteringOptions(dmetering.GetBytesMeter(ctx), logger)...) 98 | if err != nil { 99 | return nil, err 100 | } 101 | 102 | //todo: (deprecated) remove this 103 | forkedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) 104 | } 105 | 106 | if blk, _ := bstream.FetchBlockFromOneBlockStore(ctx, num, id, forkedBlocksStore); blk != nil { 107 | reqLogger.Info("single block request", zap.String("source", "forked_blocks"), zap.Bool("found", true)) 108 | return blk, nil 109 | } 110 | } 111 | 112 | reqLogger.Info("single block request", zap.Bool("found", false), zap.Error(err)) 113 | return nil, status.Error(codes.NotFound, "block not found in files") 114 | } 115 | -------------------------------------------------------------------------------- /firehose/init_test.go: -------------------------------------------------------------------------------- 1 | package firehose 2 | 3 | import ( 4 | "github.com/streamingfast/logging" 5 | ) 6 | 7 | func init() { 8 | logging.InstantiateLoggers() 9 | } 10 | -------------------------------------------------------------------------------- /firehose/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/streamingfast/dmetrics" 5 | ) 6 | 7 | var Metricset = dmetrics.NewSet() 8 | 9 | var AppReadiness = Metricset.NewAppReadiness("firehose") 10 | var ActiveRequests = Metricset.NewGauge("firehose_active_requests", "Number of active requests") 11 | var RequestCounter = Metricset.NewCounter("firehose_requests_counter", "Request count") 12 | 13 | // var CurrentListeners = Metricset.NewGaugeVec("current_listeners", []string{"req_type"}, "...") 14 | // var TimedOutPushingTrxCount = Metricset.NewCounterVec("something", []string{"guarantee"}, "Number of requests for push_transaction timed out while submitting") 15 | -------------------------------------------------------------------------------- /firehose/rate/limiter.go: -------------------------------------------------------------------------------- 1 | package rate 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | type Limiter interface { 10 | Take(ctx context.Context, id string, method string) (allow bool) 11 | Return() 12 | String() string 13 | } 14 | 15 | type token bool 16 | 17 | type leakyBucketLimiter struct { 18 | tokens chan token 19 | 20 | dripInterval time.Duration 21 | } 22 | 23 | func NewLeakyBucketLimiter(size int, dripInterval time.Duration) Limiter { 24 | tks := make(chan token, size) 25 | for i := 0; i < size; i++ { 26 | tks <- token(true) 27 | } 28 | 29 | go func() { 30 | for { 31 | select { 32 | case <-time.After(dripInterval): 33 | select { 34 | case tks <- token(true): 35 | // 36 | default: 37 | // 38 | } 39 | } 40 | } 41 | }() 42 | 43 | return &leakyBucketLimiter{ 44 | tokens: tks, 45 | dripInterval: dripInterval, 46 | } 47 | } 48 | 49 | func (l *leakyBucketLimiter) Take(ctx context.Context, id string, method string) (allow bool) { 50 | select { 51 | case <-l.tokens: 52 | return true 53 | case <-ctx.Done(): 54 | return false 55 | default: 56 | return false 57 | } 58 | } 59 | 60 | func (l *leakyBucketLimiter) Return() { 61 | select { 62 | case l.tokens <- token(true): 63 | // 64 | default: 65 | // 66 | } 67 | } 68 | 69 | func (l *leakyBucketLimiter) String() string { 70 | return fmt.Sprintf("leaky-bucket-limiter(len=%d, cap=%d, drip-interval=%s)", len(l.tokens), cap(l.tokens), l.dripInterval) 71 | } 72 | -------------------------------------------------------------------------------- /firehose/server/errors.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type ErrSendBlock struct { 8 | inner error 9 | } 10 | 11 | func NewErrSendBlock(inner error) ErrSendBlock { 12 | return ErrSendBlock{ 13 | inner: inner, 14 | } 15 | } 16 | 17 | func (e ErrSendBlock) Error() string { 18 | return fmt.Sprintf("send error: %s", e.inner) 19 | } 20 | -------------------------------------------------------------------------------- /flags.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import "github.com/spf13/cobra" 4 | 5 | // globalFlagsHiddenOnChildCmd represents the list of global flags that should be hidden on child commands 6 | var globalFlagsHiddenOnChildCmd = []string{ 7 | "log-level-switcher-listen-addr", 8 | "metrics-listen-addr", 9 | "pprof-listen-addr", 10 | "startup-delay", 11 | } 12 | 13 | func HideGlobalFlagsOnChildCmd(cmd *cobra.Command) { 14 | actual := cmd.HelpFunc() 15 | cmd.SetHelpFunc(func(command *cobra.Command, strings []string) { 16 | for _, flag := range globalFlagsHiddenOnChildCmd { 17 | command.Flags().MarkHidden(flag) 18 | } 19 | 20 | actual(command, strings) 21 | }) 22 | } 23 | -------------------------------------------------------------------------------- /index-builder/app/index-builder/app.go: -------------------------------------------------------------------------------- 1 | package index_builder 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/streamingfast/bstream" 8 | "github.com/streamingfast/dgrpc" 9 | "github.com/streamingfast/dmetrics" 10 | "github.com/streamingfast/dstore" 11 | index_builder "github.com/streamingfast/firehose-core/index-builder" 12 | "github.com/streamingfast/firehose-core/index-builder/metrics" 13 | "github.com/streamingfast/shutter" 14 | "go.uber.org/zap" 15 | pbhealth "google.golang.org/grpc/health/grpc_health_v1" 16 | ) 17 | 18 | type Config struct { 19 | BlockHandler bstream.Handler 20 | StartBlockResolver func(ctx context.Context) (uint64, error) 21 | EndBlock uint64 22 | MergedBlocksStoreURL string 23 | ForkedBlocksStoreURL string 24 | GRPCListenAddr string 25 | } 26 | 27 | type App struct { 28 | *shutter.Shutter 29 | config *Config 30 | readinessProbe pbhealth.HealthClient 31 | } 32 | 33 | func New(config *Config) *App { 34 | return &App{ 35 | Shutter: shutter.New(), 36 | config: config, 37 | } 38 | } 39 | 40 | func (a *App) Run() error { 41 | blockStore, err := dstore.NewDBinStore(a.config.MergedBlocksStoreURL) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | ctx, cancel := context.WithCancel(context.Background()) 47 | a.OnTerminating(func(error) { 48 | cancel() 49 | }) 50 | 51 | startBlock, err := a.config.StartBlockResolver(ctx) 52 | if err != nil { 53 | return fmt.Errorf("resolve start block: %w", err) 54 | } 55 | 56 | indexBuilder := index_builder.NewIndexBuilder( 57 | zlog, 58 | a.config.BlockHandler, 59 | startBlock, 60 | a.config.EndBlock, 61 | blockStore, 62 | ) 63 | 64 | gs, err := dgrpc.NewInternalClient(a.config.GRPCListenAddr) 65 | if err != nil { 66 | return fmt.Errorf("cannot create readiness probe") 67 | } 68 | a.readinessProbe = pbhealth.NewHealthClient(gs) 69 | 70 | dmetrics.Register(metrics.MetricSet) 71 | 72 | a.OnTerminating(indexBuilder.Shutdown) 73 | indexBuilder.OnTerminated(a.Shutdown) 74 | 75 | go indexBuilder.Launch() 76 | 77 | zlog.Info("index builder running") 78 | return nil 79 | } 80 | 81 | func (a *App) IsReady() bool { 82 | if a.readinessProbe == nil { 83 | return false 84 | } 85 | 86 | resp, err := a.readinessProbe.Check(context.Background(), &pbhealth.HealthCheckRequest{}) 87 | if err != nil { 88 | zlog.Info("index-builder readiness probe error", zap.Error(err)) 89 | return false 90 | } 91 | 92 | if resp.Status == pbhealth.HealthCheckResponse_SERVING { 93 | return true 94 | } 95 | 96 | return false 97 | } 98 | -------------------------------------------------------------------------------- /index-builder/app/index-builder/logging.go: -------------------------------------------------------------------------------- 1 | package index_builder 2 | 3 | import ( 4 | "github.com/streamingfast/logging" 5 | ) 6 | 7 | var zlog, tracer = logging.PackageLogger("index-builder", "github.com/streamingfast/firehose-core/index-builder/app/index-builder") 8 | -------------------------------------------------------------------------------- /index-builder/healthz.go: -------------------------------------------------------------------------------- 1 | package index_builder 2 | 3 | // Copyright 2019 dfuse Platform Inc. 4 | // 5 | // Licensed under the Apache License, Version 2.0 (the "License"); 6 | // you may not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, software 12 | // distributed under the License is distributed on an "AS IS" BASIS, 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | // See the License for the specific language governing permissions and 15 | // limitations under the License. 16 | 17 | import ( 18 | "context" 19 | 20 | pbhealth "google.golang.org/grpc/health/grpc_health_v1" 21 | ) 22 | 23 | // Check is basic GRPC Healthcheck 24 | func (app *IndexBuilder) Check(ctx context.Context, in *pbhealth.HealthCheckRequest) (*pbhealth.HealthCheckResponse, error) { 25 | status := pbhealth.HealthCheckResponse_SERVING 26 | return &pbhealth.HealthCheckResponse{ 27 | Status: status, 28 | }, nil 29 | } 30 | 31 | // Watch is basic GRPC Healthcheck as a stream 32 | func (app *IndexBuilder) Watch(req *pbhealth.HealthCheckRequest, stream pbhealth.Health_WatchServer) error { 33 | err := stream.Send(&pbhealth.HealthCheckResponse{ 34 | Status: pbhealth.HealthCheckResponse_SERVING, 35 | }) 36 | if err != nil { 37 | return err 38 | } 39 | 40 | <-stream.Context().Done() 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /index-builder/index-builder.go: -------------------------------------------------------------------------------- 1 | package index_builder 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | 8 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 9 | firecore "github.com/streamingfast/firehose-core" 10 | 11 | "github.com/streamingfast/bstream" 12 | "github.com/streamingfast/bstream/stream" 13 | "github.com/streamingfast/dstore" 14 | "github.com/streamingfast/firehose-core/index-builder/metrics" 15 | pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" 16 | "github.com/streamingfast/shutter" 17 | "go.uber.org/zap" 18 | ) 19 | 20 | type IndexBuilder struct { 21 | *shutter.Shutter 22 | logger *zap.Logger 23 | 24 | startBlockNum uint64 25 | stopBlockNum uint64 26 | 27 | handler bstream.Handler 28 | 29 | blocksStore dstore.Store 30 | } 31 | 32 | func NewIndexBuilder(logger *zap.Logger, handler bstream.Handler, startBlockNum, stopBlockNum uint64, blockStore dstore.Store) *IndexBuilder { 33 | return &IndexBuilder{ 34 | Shutter: shutter.New(), 35 | startBlockNum: startBlockNum, 36 | stopBlockNum: stopBlockNum, 37 | handler: handler, 38 | blocksStore: blockStore, 39 | 40 | logger: logger, 41 | } 42 | } 43 | 44 | func (app *IndexBuilder) Launch() { 45 | err := app.launch() 46 | if errors.Is(err, stream.ErrStopBlockReached) { 47 | app.logger.Info("index builder reached stop block", zap.Uint64("stop_block_num", app.stopBlockNum)) 48 | err = nil 49 | } 50 | app.logger.Info("index builder exited", zap.Error(err)) 51 | app.Shutdown(err) 52 | } 53 | 54 | func (app *IndexBuilder) launch() error { 55 | startBlockNum := app.startBlockNum 56 | stopBlockNum := app.stopBlockNum 57 | 58 | streamFactory := firecore.NewStreamFactory( 59 | app.blocksStore, 60 | nil, 61 | nil, 62 | nil, 63 | ) 64 | ctx := context.Background() 65 | 66 | req := &pbfirehose.Request{ 67 | StartBlockNum: int64(startBlockNum), 68 | StopBlockNum: stopBlockNum, 69 | FinalBlocksOnly: true, 70 | } 71 | 72 | handlerFunc := func(block *pbbstream.Block, obj interface{}) error { 73 | app.logger.Debug("handling block", zap.Uint64("block_num", block.Number)) 74 | 75 | metrics.HeadBlockNumber.SetUint64(block.Number) 76 | metrics.HeadBlockTimeDrift.SetBlockTime(block.Time()) 77 | metrics.AppReadiness.SetReady() 78 | 79 | app.logger.Debug("updated head block metrics", zap.Uint64("block_num", block.Number), zap.Time("block_time", block.Time())) 80 | 81 | return app.handler.ProcessBlock(block, obj) 82 | } 83 | 84 | stream, err := streamFactory.New( 85 | ctx, 86 | bstream.HandlerFunc(handlerFunc), 87 | req, 88 | app.logger, 89 | ) 90 | 91 | if err != nil { 92 | return fmt.Errorf("getting firehose stream: %w", err) 93 | } 94 | 95 | return stream.Run(ctx) 96 | } 97 | -------------------------------------------------------------------------------- /index-builder/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import "github.com/streamingfast/dmetrics" 4 | 5 | var MetricSet = dmetrics.NewSet() 6 | 7 | var HeadBlockTimeDrift = MetricSet.NewHeadTimeDrift("block-indexer") 8 | var HeadBlockNumber = MetricSet.NewHeadBlockNumber("block-indexer") 9 | var AppReadiness = MetricSet.NewAppReadiness("block-indexer") 10 | -------------------------------------------------------------------------------- /internal/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "os" 5 | "strconv" 6 | 7 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 8 | ) 9 | 10 | func GetEnvForceFinalityAfterBlocks() *uint64 { 11 | if fin := os.Getenv("FORCE_FINALITY_AFTER_BLOCKS"); fin != "" { 12 | if fin64, err := strconv.ParseInt(fin, 10, 64); err == nil { 13 | finu64 := uint64(fin64) 14 | return &finu64 15 | } 16 | } 17 | return nil 18 | } 19 | 20 | func TweakBlockFinality(blk *pbbstream.Block, maxDistanceToBlock uint64) { 21 | if blk.LibNum > blk.Number { 22 | panic("libnum cannot be greater than block number") 23 | } 24 | if blk.Number < maxDistanceToBlock { 25 | return // prevent uin64 underflow at the beginning of the chain 26 | } 27 | if (blk.Number - blk.LibNum) >= maxDistanceToBlock { 28 | blk.LibNum = blk.Number - maxDistanceToBlock // force finality 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /internal/utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strconv" 7 | "testing" 8 | 9 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestGetEnvForceFinalityAfterBlocks(t *testing.T) { 14 | // Set up test case 15 | expected := uint64(10) 16 | os.Setenv("FORCE_FINALITY_AFTER_BLOCKS", strconv.FormatUint(expected, 10)) 17 | defer os.Unsetenv("FORCE_FINALITY_AFTER_BLOCKS") 18 | 19 | // Call the function 20 | result := GetEnvForceFinalityAfterBlocks() 21 | 22 | // Check the result 23 | if result == nil { 24 | t.Errorf("Expected non-nil result, got nil") 25 | } else if *result != expected { 26 | t.Errorf("Expected %d, got %d", expected, *result) 27 | } 28 | } 29 | func TestTweakBlockFinality(t *testing.T) { 30 | // Define test cases 31 | testCases := []struct { 32 | blk *pbbstream.Block 33 | maxDistanceToBlock uint64 34 | expectedLibNum uint64 35 | }{ 36 | { 37 | blk: &pbbstream.Block{ 38 | Number: 100, 39 | LibNum: 80, 40 | }, 41 | maxDistanceToBlock: 10, 42 | expectedLibNum: 90, 43 | }, 44 | { 45 | blk: &pbbstream.Block{ 46 | Number: 100, 47 | LibNum: 80, 48 | }, 49 | maxDistanceToBlock: 200, 50 | expectedLibNum: 80, 51 | }, 52 | { 53 | blk: &pbbstream.Block{ 54 | Number: 100, 55 | LibNum: 0, 56 | }, 57 | maxDistanceToBlock: 200, 58 | expectedLibNum: 0, 59 | }, 60 | { 61 | blk: &pbbstream.Block{ 62 | Number: 100, 63 | LibNum: 0, 64 | }, 65 | maxDistanceToBlock: 10, 66 | expectedLibNum: 90, 67 | }, 68 | } 69 | 70 | for i, tc := range testCases { 71 | t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { 72 | TweakBlockFinality(tc.blk, tc.maxDistanceToBlock) 73 | assert.Equal(t, tc.expectedLibNum, tc.blk.LibNum) 74 | }) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /launcher/app.go: -------------------------------------------------------------------------------- 1 | package launcher 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | type AppDef struct { 10 | ID string 11 | Title string 12 | Description string 13 | RegisterFlags func(cmd *cobra.Command) error 14 | InitFunc func(runtime *Runtime) error 15 | FactoryFunc func(runtime *Runtime) (App, error) 16 | } 17 | 18 | func (a *AppDef) String() string { 19 | return fmt.Sprintf("%s (%s)", a.ID, a.Title) 20 | } 21 | 22 | type App interface { 23 | Terminating() <-chan struct{} 24 | Terminated() <-chan struct{} 25 | Shutdown(err error) 26 | Err() error 27 | Run() error 28 | } 29 | 30 | //go:generate go-enum -f=$GOFILE --marshal --names 31 | 32 | // ENUM( 33 | // 34 | // NotFound 35 | // Created 36 | // Running 37 | // Warning 38 | // Stopped 39 | // 40 | // ) 41 | type AppStatus uint 42 | 43 | type AppInfo struct { 44 | ID string 45 | Status AppStatus 46 | } 47 | -------------------------------------------------------------------------------- /launcher/app_enum.go: -------------------------------------------------------------------------------- 1 | // Code generated by go-enum DO NOT EDIT. 2 | // Version: 3 | // Revision: 4 | // Build Date: 5 | // Built By: 6 | 7 | package launcher 8 | 9 | import ( 10 | "fmt" 11 | "strings" 12 | ) 13 | 14 | const ( 15 | // AppStatusNotFound is a AppStatus of type NotFound. 16 | AppStatusNotFound AppStatus = iota 17 | // AppStatusCreated is a AppStatus of type Created. 18 | AppStatusCreated 19 | // AppStatusRunning is a AppStatus of type Running. 20 | AppStatusRunning 21 | // AppStatusWarning is a AppStatus of type Warning. 22 | AppStatusWarning 23 | // AppStatusStopped is a AppStatus of type Stopped. 24 | AppStatusStopped 25 | ) 26 | 27 | var ErrInvalidAppStatus = fmt.Errorf("not a valid AppStatus, try [%s]", strings.Join(_AppStatusNames, ", ")) 28 | 29 | const _AppStatusName = "NotFoundCreatedRunningWarningStopped" 30 | 31 | var _AppStatusNames = []string{ 32 | _AppStatusName[0:8], 33 | _AppStatusName[8:15], 34 | _AppStatusName[15:22], 35 | _AppStatusName[22:29], 36 | _AppStatusName[29:36], 37 | } 38 | 39 | // AppStatusNames returns a list of possible string values of AppStatus. 40 | func AppStatusNames() []string { 41 | tmp := make([]string, len(_AppStatusNames)) 42 | copy(tmp, _AppStatusNames) 43 | return tmp 44 | } 45 | 46 | var _AppStatusMap = map[AppStatus]string{ 47 | AppStatusNotFound: _AppStatusName[0:8], 48 | AppStatusCreated: _AppStatusName[8:15], 49 | AppStatusRunning: _AppStatusName[15:22], 50 | AppStatusWarning: _AppStatusName[22:29], 51 | AppStatusStopped: _AppStatusName[29:36], 52 | } 53 | 54 | // String implements the Stringer interface. 55 | func (x AppStatus) String() string { 56 | if str, ok := _AppStatusMap[x]; ok { 57 | return str 58 | } 59 | return fmt.Sprintf("AppStatus(%d)", x) 60 | } 61 | 62 | // IsValid provides a quick way to determine if the typed value is 63 | // part of the allowed enumerated values 64 | func (x AppStatus) IsValid() bool { 65 | _, ok := _AppStatusMap[x] 66 | return ok 67 | } 68 | 69 | var _AppStatusValue = map[string]AppStatus{ 70 | _AppStatusName[0:8]: AppStatusNotFound, 71 | _AppStatusName[8:15]: AppStatusCreated, 72 | _AppStatusName[15:22]: AppStatusRunning, 73 | _AppStatusName[22:29]: AppStatusWarning, 74 | _AppStatusName[29:36]: AppStatusStopped, 75 | } 76 | 77 | // ParseAppStatus attempts to convert a string to a AppStatus. 78 | func ParseAppStatus(name string) (AppStatus, error) { 79 | if x, ok := _AppStatusValue[name]; ok { 80 | return x, nil 81 | } 82 | return AppStatus(0), fmt.Errorf("%s is %w", name, ErrInvalidAppStatus) 83 | } 84 | 85 | // MarshalText implements the text marshaller method. 86 | func (x AppStatus) MarshalText() ([]byte, error) { 87 | return []byte(x.String()), nil 88 | } 89 | 90 | // UnmarshalText implements the text unmarshaller method. 91 | func (x *AppStatus) UnmarshalText(text []byte) error { 92 | name := string(text) 93 | tmp, err := ParseAppStatus(name) 94 | if err != nil { 95 | return err 96 | } 97 | *x = tmp 98 | return nil 99 | } 100 | -------------------------------------------------------------------------------- /launcher/config.go: -------------------------------------------------------------------------------- 1 | package launcher 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "gopkg.in/yaml.v2" 8 | ) 9 | 10 | var Config map[string]*CommandConfig 11 | 12 | type CommandConfig struct { 13 | Args []string `json:"args"` 14 | Flags map[string]any `json:"flags"` 15 | } 16 | 17 | // Load reads a YAML config, and sets the global DfuseConfig variable 18 | // Use the raw JSON form to provide to the 19 | // different plugins and apps for them to load their config. 20 | func LoadConfigFile(filename string) (err error) { 21 | yamlBytes, err := os.ReadFile(filename) 22 | if err != nil { 23 | return err 24 | } 25 | 26 | err = yaml.Unmarshal(yamlBytes, &Config) 27 | if err != nil { 28 | return fmt.Errorf("reading json: %s", err) 29 | } 30 | 31 | return nil 32 | } 33 | -------------------------------------------------------------------------------- /launcher/init_test.go: -------------------------------------------------------------------------------- 1 | package launcher 2 | 3 | import "github.com/streamingfast/logging" 4 | 5 | var zlog, _ = logging.PackageLogger("launcher", "github.com/streamingfast/firehose-core/launcher") 6 | 7 | func init() { 8 | logging.InstantiateLoggers() 9 | } 10 | -------------------------------------------------------------------------------- /launcher/logging.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package launcher 16 | 17 | import ( 18 | "path/filepath" 19 | 20 | "github.com/streamingfast/logging" 21 | "go.uber.org/zap" 22 | ) 23 | 24 | const DefaultLogFile = "app.log.json" 25 | 26 | type LoggingOptions struct { 27 | WorkingDir string // the folder where the data will be stored, in our case will be used to store the logger 28 | Verbosity int // verbosity level 29 | LogFormat string // specifies the log format 30 | LogToFile bool // specifies if we should store the logs on disk 31 | LogListenAddr string // address that listens to change the logs 32 | LogToStderr bool // determines if the standard console logger should log to Stderr (defaults is to log in Stdout) 33 | } 34 | 35 | func SetupLogger(rootLogger *zap.Logger, opts *LoggingOptions) { 36 | options := []logging.InstantiateOption{ 37 | logging.WithLogLevelSwitcherServerAutoStart(), 38 | logging.WithDefaultSpec(defaultSpecForVerbosity(opts.Verbosity)...), 39 | logging.WithConsoleToStdout(), 40 | } 41 | 42 | if opts.LogToStderr { 43 | options = append(options, logging.WithConsoleToStderr()) 44 | } 45 | 46 | if opts.LogListenAddr != "" { 47 | options = append(options, logging.WithLogLevelSwitcherServerListeningAddress(opts.LogListenAddr)) 48 | } 49 | 50 | if opts.LogFormat == "stackdriver" || opts.LogFormat == "json" { 51 | options = append(options, logging.WithProductionLogger()) 52 | } 53 | 54 | if opts.LogToFile { 55 | options = append(options, logging.WithOutputToFile(filepath.Join(opts.WorkingDir, DefaultLogFile))) 56 | } 57 | 58 | logging.InstantiateLoggers(options...) 59 | 60 | // Hijack standard Golang `log` and redirect it to our common logger 61 | zap.RedirectStdLogAt(rootLogger, zap.DebugLevel) 62 | } 63 | 64 | func defaultSpecForVerbosity(verbosity int) []string { 65 | switch verbosity { 66 | case 0: 67 | return nil 68 | 69 | case 1: 70 | return []string{"*=info"} 71 | 72 | default: 73 | return []string{"*=debug"} 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /launcher/readiness.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package launcher 16 | 17 | import ( 18 | "sync" 19 | 20 | "go.uber.org/zap" 21 | ) 22 | 23 | type subscription struct { 24 | IncomingAppInfo chan *AppInfo 25 | Closed bool 26 | QuitOnce sync.Once 27 | 28 | logger *zap.Logger 29 | } 30 | 31 | func newSubscription(logger *zap.Logger, chanSize int) (out *subscription) { 32 | return &subscription{ 33 | IncomingAppInfo: make(chan *AppInfo, chanSize), 34 | } 35 | } 36 | 37 | func (s *subscription) Push(app *AppInfo) { 38 | if s.Closed { 39 | return 40 | } 41 | 42 | s.logger.Debug("pushing app readiness state to subscriber", 43 | zap.Reflect("response", app), 44 | ) 45 | if len(s.IncomingAppInfo) == cap(s.IncomingAppInfo) { 46 | s.QuitOnce.Do(func() { 47 | s.logger.Debug("reach max buffer size for readiness stream, closing channel") 48 | close(s.IncomingAppInfo) 49 | s.Closed = true 50 | }) 51 | return 52 | } 53 | 54 | // Clean up 55 | s.IncomingAppInfo <- app 56 | } 57 | -------------------------------------------------------------------------------- /launcher/registry.go: -------------------------------------------------------------------------------- 1 | package launcher 2 | 3 | import ( 4 | "sort" 5 | "strings" 6 | 7 | "github.com/spf13/cobra" 8 | "go.uber.org/zap" 9 | ) 10 | 11 | var AppRegistry = map[string]*AppDef{} 12 | 13 | func RegisterApp(logger *zap.Logger, appDef *AppDef) { 14 | logger.Debug("registering app", zap.Stringer("app", appDef)) 15 | AppRegistry[appDef.ID] = appDef 16 | } 17 | 18 | var RegisterCommonFlags func(logger *zap.Logger, cmd *cobra.Command) error 19 | 20 | func RegisterFlags(logger *zap.Logger, cmd *cobra.Command) error { 21 | for _, appDef := range AppRegistry { 22 | logger.Debug("trying to register flags", zap.String("app_id", appDef.ID)) 23 | if appDef.RegisterFlags != nil { 24 | logger.Debug("found non nil flags, registering", zap.String("app_id", appDef.ID)) 25 | err := appDef.RegisterFlags(cmd) 26 | if err != nil { 27 | return err 28 | } 29 | } 30 | } 31 | 32 | if RegisterCommonFlags != nil { 33 | if err := RegisterCommonFlags(logger, cmd); err != nil { 34 | return err 35 | } 36 | } 37 | 38 | return nil 39 | } 40 | 41 | func ParseAppsFromArgs(args []string, runByDefault func(string) bool) (apps []string) { 42 | if len(args) == 0 { 43 | return ParseAppsFromArgs([]string{"all"}, runByDefault) 44 | } 45 | 46 | for _, arg := range args { 47 | chunks := strings.Split(arg, ",") 48 | for _, app := range chunks { 49 | app = strings.TrimSpace(app) 50 | if app == "all" { 51 | for app := range AppRegistry { 52 | if !runByDefault(app) { 53 | continue 54 | } 55 | apps = append(apps, app) 56 | } 57 | } else { 58 | if strings.HasPrefix(app, "-") { 59 | removeApp := app[1:] 60 | apps = removeElement(apps, removeApp) 61 | } else { 62 | apps = append(apps, app) 63 | } 64 | } 65 | 66 | } 67 | } 68 | 69 | sort.Strings(apps) 70 | 71 | return 72 | } 73 | 74 | func removeElement(lst []string, el string) (out []string) { 75 | for _, l := range lst { 76 | if l != el { 77 | out = append(out, l) 78 | } 79 | } 80 | return 81 | } 82 | -------------------------------------------------------------------------------- /launcher/registry_test.go: -------------------------------------------------------------------------------- 1 | package launcher 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestParseFromArgs(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | input []string 14 | expect []string 15 | }{ 16 | { 17 | input: []string{"all,-app2"}, 18 | expect: []string{"app1", "app3"}, 19 | }, 20 | { 21 | input: []string{"all", " -app2 "}, 22 | expect: []string{"app1", "app3"}, 23 | }, 24 | { 25 | input: []string{"all"}, 26 | expect: []string{"app1", "app2", "app3"}, 27 | }, 28 | { 29 | input: []string{" app1", " app2"}, 30 | expect: []string{"app1", "app2"}, 31 | }, 32 | { 33 | input: []string{" app1, app2"}, 34 | expect: []string{"app1", "app2"}, 35 | }, 36 | { 37 | input: []string{"app2", "appnodefault"}, 38 | expect: []string{"app2", "appnodefault"}, 39 | }, 40 | } 41 | 42 | RegisterApp(zlog, &AppDef{ID: "app1"}) 43 | RegisterApp(zlog, &AppDef{ID: "app2"}) 44 | RegisterApp(zlog, &AppDef{ID: "app3"}) 45 | RegisterApp(zlog, &AppDef{ID: "appnodefault"}) 46 | 47 | for idx, test := range tests { 48 | t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { 49 | res := ParseAppsFromArgs(test.input, func(app string) bool { 50 | if app == "appnodefault" { 51 | return false 52 | } 53 | return true 54 | }) 55 | assert.Equal(t, test.expect, res) 56 | }) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /launcher/runtime.go: -------------------------------------------------------------------------------- 1 | package launcher 2 | 3 | import "github.com/streamingfast/firehose-core/firehose/info" 4 | 5 | type Runtime struct { 6 | AbsDataDir string 7 | InfoServer *info.InfoServer 8 | 9 | // IsPendingShutdown is a function that is going to return true as soon as the initial SIGINT signal is 10 | // received which can be used to turn a healthz monitor as unhealthy so that a load balancer can 11 | // remove the node from the pool and has 'common-system-shutdown-signal-delay' to do it. 12 | IsPendingShutdown func() bool 13 | } 14 | -------------------------------------------------------------------------------- /launcher/tracing.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package launcher 16 | 17 | import ( 18 | "github.com/streamingfast/derr" 19 | "github.com/streamingfast/dtracing" 20 | "go.opencensus.io/trace" 21 | ) 22 | 23 | func SetupTracing(name string) { 24 | err := dtracing.SetupTracing(name, trace.ProbabilitySampler(1/8.0)) 25 | derr.Check("unable to setup tracing correctly", err) 26 | } 27 | -------------------------------------------------------------------------------- /merged_blocks_writer.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/spf13/cobra" 9 | "github.com/streamingfast/bstream" 10 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 11 | "github.com/streamingfast/dstore" 12 | "go.uber.org/zap" 13 | ) 14 | 15 | type MergedBlocksWriter struct { 16 | Store dstore.Store 17 | LowBlockNum uint64 18 | StopBlockNum uint64 19 | 20 | blocks []*pbbstream.Block 21 | Logger *zap.Logger 22 | Cmd *cobra.Command 23 | 24 | TweakBlock func(*pbbstream.Block) (*pbbstream.Block, error) 25 | } 26 | 27 | func (w *MergedBlocksWriter) ProcessBlock(blk *pbbstream.Block, obj interface{}) error { 28 | if w.TweakBlock != nil { 29 | b, err := w.TweakBlock(blk) 30 | if err != nil { 31 | return fmt.Errorf("tweaking block: %w", err) 32 | } 33 | blk = b 34 | } 35 | 36 | if w.LowBlockNum == 0 && blk.Number > 99 { // initial block 37 | if blk.Number%100 != 0 && blk.Number != bstream.GetProtocolFirstStreamableBlock { 38 | return fmt.Errorf("received unexpected block %s (not a boundary, not the first streamable block %d)", blk, bstream.GetProtocolFirstStreamableBlock) 39 | } 40 | w.LowBlockNum = LowBoundary(blk.Number) 41 | w.Logger.Debug("setting initial boundary to %d upon seeing block %s", zap.Uint64("low_boundary", w.LowBlockNum), zap.Uint64("blk_num", blk.Number)) 42 | } 43 | 44 | if blk.Number > w.LowBlockNum+99 { 45 | w.Logger.Debug("bundling because we saw block %s from next bundle (%d was not seen, it must not exist on this chain)", zap.Uint64("blk_num", blk.Number), zap.Uint64("last_bundle_block", w.LowBlockNum+99)) 46 | if err := w.WriteBundle(); err != nil { 47 | return err 48 | } 49 | } 50 | 51 | if w.StopBlockNum > 0 && blk.Number >= w.StopBlockNum { 52 | return io.EOF 53 | } 54 | 55 | w.blocks = append(w.blocks, blk) 56 | 57 | if blk.Number == w.LowBlockNum+99 { 58 | w.Logger.Debug("bundling on last bundle block", zap.Uint64("last_bundle_block", w.LowBlockNum+99)) 59 | if err := w.WriteBundle(); err != nil { 60 | return err 61 | } 62 | return nil 63 | } 64 | 65 | return nil 66 | } 67 | 68 | func (w *MergedBlocksWriter) WriteBundle() error { 69 | file := filename(w.LowBlockNum) 70 | w.Logger.Info("writing merged file to store (suffix: .dbin.zst)", zap.String("filename", file), zap.Uint64("lowBlockNum", w.LowBlockNum)) 71 | 72 | if len(w.blocks) == 0 { 73 | return fmt.Errorf("no blocks to write to bundle") 74 | } 75 | 76 | pr, pw := io.Pipe() 77 | 78 | go func() { 79 | var err error 80 | defer func() { 81 | pw.CloseWithError(err) 82 | }() 83 | 84 | blockWriter, err := bstream.NewDBinBlockWriter(pw) 85 | if err != nil { 86 | return 87 | } 88 | 89 | for _, blk := range w.blocks { 90 | err = blockWriter.Write(blk) 91 | if err != nil { 92 | return 93 | } 94 | } 95 | }() 96 | 97 | err := w.Store.WriteObject(context.Background(), file, pr) 98 | if err != nil { 99 | w.Logger.Error("writing to store", zap.Error(err)) 100 | } 101 | 102 | w.LowBlockNum += 100 103 | w.blocks = nil 104 | 105 | return err 106 | } 107 | func filename(num uint64) string { 108 | return fmt.Sprintf("%010d", num) 109 | } 110 | 111 | func LowBoundary(i uint64) uint64 { 112 | return i - (i % 100) 113 | } 114 | -------------------------------------------------------------------------------- /merger/app/merger/logging.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package merger 16 | 17 | import ( 18 | "github.com/streamingfast/logging" 19 | ) 20 | 21 | var zlog, tracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger/app/merger") 22 | -------------------------------------------------------------------------------- /merger/consts.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package merger 16 | 17 | import "time" 18 | 19 | var ListFilesTimeout = 10 * time.Minute 20 | var WriteObjectTimeout = 5 * time.Minute 21 | var GetObjectTimeout = 5 * time.Minute 22 | var DeleteObjectTimeout = 5 * time.Minute 23 | 24 | const ParallelOneBlockDownload = 2 25 | -------------------------------------------------------------------------------- /merger/healthz.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package merger 16 | 17 | import ( 18 | "context" 19 | 20 | pbhealth "google.golang.org/grpc/health/grpc_health_v1" 21 | ) 22 | 23 | // Check is basic GRPC Healthcheck 24 | func (m *Merger) Check(ctx context.Context, in *pbhealth.HealthCheckRequest) (*pbhealth.HealthCheckResponse, error) { 25 | status := pbhealth.HealthCheckResponse_SERVING 26 | return &pbhealth.HealthCheckResponse{ 27 | Status: status, 28 | }, nil 29 | } 30 | 31 | func (m *Merger) List(ctx context.Context, in *pbhealth.HealthListRequest) (*pbhealth.HealthListResponse, error) { 32 | status := pbhealth.HealthCheckResponse_SERVING 33 | return &pbhealth.HealthListResponse{ 34 | Statuses: map[string]*pbhealth.HealthCheckResponse{ 35 | "merger": &pbhealth.HealthCheckResponse{ 36 | Status: status, 37 | }, 38 | }, 39 | }, nil 40 | } 41 | 42 | // Watch is basic GRPC Healthcheck as a stream 43 | func (m *Merger) Watch(req *pbhealth.HealthCheckRequest, stream pbhealth.Health_WatchServer) error { 44 | err := stream.Send(&pbhealth.HealthCheckResponse{ 45 | Status: pbhealth.HealthCheckResponse_SERVING, 46 | }) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | // The merger is always serving, so just want until this stream is canceled out 52 | <-stream.Context().Done() 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /merger/healthz_test.go: -------------------------------------------------------------------------------- 1 | package merger 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/require" 9 | pbhealth "google.golang.org/grpc/health/grpc_health_v1" 10 | ) 11 | 12 | func TestHealthz_Check(t *testing.T) { 13 | ctx := context.Background() 14 | m := NewMerger( 15 | testLogger, 16 | "6969", 17 | nil, 18 | 1, 19 | 100, 20 | 100, 21 | time.Second, 22 | time.Second, 23 | 0, 24 | ) 25 | request := &pbhealth.HealthCheckRequest{} 26 | resp, err := m.Check(ctx, request) 27 | if err != nil { 28 | panic(err) 29 | } 30 | 31 | require.Equal(t, resp.Status, pbhealth.HealthCheckResponse_SERVING) 32 | } 33 | -------------------------------------------------------------------------------- /merger/init_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package merger 16 | 17 | import ( 18 | "github.com/streamingfast/logging" 19 | ) 20 | 21 | var testLogger, testTracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger_tests") 22 | 23 | func init() { 24 | logging.InstantiateLoggers() 25 | } 26 | -------------------------------------------------------------------------------- /merger/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package metrics 16 | 17 | import "github.com/streamingfast/dmetrics" 18 | 19 | var MetricSet = dmetrics.NewSet() 20 | 21 | var HeadBlockTimeDrift = MetricSet.NewHeadTimeDrift("merger") 22 | var HeadBlockNumber = MetricSet.NewHeadBlockNumber("merger") 23 | var AppReadiness = MetricSet.NewAppReadiness("merger") 24 | -------------------------------------------------------------------------------- /merger/server.go: -------------------------------------------------------------------------------- 1 | package merger 2 | 3 | import ( 4 | dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" 5 | pbhealth "google.golang.org/grpc/health/grpc_health_v1" 6 | ) 7 | 8 | func (m *Merger) startGRPCServer() { 9 | gs := dgrpcfactory.ServerFromOptions() 10 | gs.OnTerminated(m.Shutdown) 11 | m.logger.Info("grpc server created") 12 | 13 | m.OnTerminated(func(_ error) { 14 | gs.Shutdown(0) 15 | }) 16 | pbhealth.RegisterHealthServer(gs.ServiceRegistrar(), m) 17 | m.logger.Info("server registered") 18 | 19 | go gs.Launch(m.grpcListenAddr) 20 | 21 | } 22 | -------------------------------------------------------------------------------- /merger/test_data/0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/streamingfast/firehose-core/545b2f20616dc54fdbf7e59638a59f06a79c54ae/merger/test_data/0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin -------------------------------------------------------------------------------- /merger/test_data/0000000002-20150730T152657.0-044698c9-13406cb6.dbin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/streamingfast/firehose-core/545b2f20616dc54fdbf7e59638a59f06a79c54ae/merger/test_data/0000000002-20150730T152657.0-044698c9-13406cb6.dbin -------------------------------------------------------------------------------- /merger/test_data/0000000003-20150730T152728.0-a88cf741-044698c9.dbin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/streamingfast/firehose-core/545b2f20616dc54fdbf7e59638a59f06a79c54ae/merger/test_data/0000000003-20150730T152728.0-a88cf741-044698c9.dbin -------------------------------------------------------------------------------- /merger/utils.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package merger 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | "time" 21 | 22 | "github.com/streamingfast/bstream" 23 | "go.uber.org/zap" 24 | "gopkg.in/olivere/elastic.v3/backoff" 25 | ) 26 | 27 | func fileNameForBlocksBundle(blockNum uint64) string { 28 | return fmt.Sprintf("%010d", blockNum) 29 | } 30 | 31 | func toBaseNum(in uint64, bundleSize uint64) uint64 { 32 | return in / bundleSize * bundleSize 33 | } 34 | 35 | func Retry(logger *zap.Logger, attempts int, sleep time.Duration, function func() error) (err error) { 36 | b := backoff.NewExponentialBackoff(sleep, 5*time.Second) 37 | for i := 0; ; i++ { 38 | err = function() 39 | if err == nil { 40 | return 41 | } 42 | 43 | if i >= (attempts - 1) { 44 | break 45 | } 46 | 47 | time.Sleep(b.Next()) 48 | 49 | logger.Warn("retrying after error", zap.Error(err)) 50 | } 51 | return fmt.Errorf("after %d attempts, last error: %s", attempts, err) 52 | } 53 | 54 | type TestMergerIO struct { 55 | NextBundleFunc func(ctx context.Context, lowestBaseBlock uint64) (baseBlock uint64, lastIrreversibleBlock bstream.BlockRef, err error) 56 | WalkOneBlockFilesFunc func(ctx context.Context, inclusiveLowerBlock uint64, callback func(*bstream.OneBlockFile) error) error 57 | MergeAndStoreFunc func(ctx context.Context, inclusiveLowerBlock uint64, oneBlockFiles []*bstream.OneBlockFile) (err error) 58 | DownloadOneBlockFileFunc func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) 59 | DeleteAsyncFunc func(oneBlockFiles []*bstream.OneBlockFile) error 60 | } 61 | 62 | func (io *TestMergerIO) NextBundle(ctx context.Context, lowestBaseBlock uint64) (baseBlock uint64, lastIrreversibleBlock bstream.BlockRef, err error) { 63 | if io.NextBundleFunc != nil { 64 | return io.NextBundleFunc(ctx, lowestBaseBlock) 65 | } 66 | return lowestBaseBlock, nil, nil 67 | } 68 | 69 | func (io *TestMergerIO) MergeAndStore(ctx context.Context, inclusiveLowerBlock uint64, oneBlockFiles []*bstream.OneBlockFile) (err error) { 70 | if io.MergeAndStoreFunc != nil { 71 | return io.MergeAndStoreFunc(ctx, inclusiveLowerBlock, oneBlockFiles) 72 | } 73 | return nil 74 | } 75 | 76 | func (io *TestMergerIO) DownloadOneBlockFile(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { 77 | if io.DownloadOneBlockFileFunc != nil { 78 | return io.DownloadOneBlockFileFunc(ctx, oneBlockFile) 79 | } 80 | 81 | return nil, nil 82 | } 83 | 84 | func (io *TestMergerIO) WalkOneBlockFiles(ctx context.Context, inclusiveLowerBlock uint64, callback func(*bstream.OneBlockFile) error) error { 85 | if io.WalkOneBlockFilesFunc != nil { 86 | return io.WalkOneBlockFilesFunc(ctx, inclusiveLowerBlock, callback) 87 | } 88 | return nil 89 | } 90 | func (io *TestMergerIO) DeleteAsync(oneBlockFiles []*bstream.OneBlockFile) error { 91 | if io.DeleteAsyncFunc != nil { 92 | return io.DeleteAsyncFunc(oneBlockFiles) 93 | } 94 | return nil 95 | } 96 | -------------------------------------------------------------------------------- /metering/metering.go: -------------------------------------------------------------------------------- 1 | package metering 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "go.uber.org/zap" 8 | 9 | "github.com/streamingfast/dstore" 10 | 11 | "github.com/streamingfast/dmetering" 12 | "github.com/streamingfast/substreams/reqctx" 13 | "google.golang.org/protobuf/proto" 14 | ) 15 | 16 | const ( 17 | MeterLiveUncompressedReadBytes = "live_uncompressed_read_bytes" 18 | MeterLiveUncompressedReadForkedBytes = "live_uncompressed_read_forked_bytes" 19 | 20 | MeterFileUncompressedReadBytes = "file_uncompressed_read_bytes" 21 | MeterFileUncompressedReadForkedBytes = "file_uncompressed_read_forked_bytes" 22 | MeterFileCompressedReadForkedBytes = "file_compressed_read_forked_bytes" 23 | MeterFileCompressedReadBytes = "file_compressed_read_bytes" 24 | 25 | TotalReadBytes = "total_read_bytes" 26 | ) 27 | 28 | func WithBlockBytesReadMeteringOptions(meter dmetering.Meter, logger *zap.Logger) []dstore.Option { 29 | return []dstore.Option{dstore.WithCompressedReadCallback(func(ctx context.Context, n int) { 30 | meter.CountInc(MeterFileCompressedReadBytes, n) 31 | })} 32 | } 33 | 34 | func WithForkedBlockBytesReadMeteringOptions(meter dmetering.Meter, logger *zap.Logger) []dstore.Option { 35 | return []dstore.Option{dstore.WithCompressedReadCallback(func(ctx context.Context, n int) { 36 | meter.CountInc(MeterFileCompressedReadForkedBytes, n) 37 | })} 38 | } 39 | 40 | func GetTotalBytesRead(meter dmetering.Meter) uint64 { 41 | total := uint64(meter.GetCount(TotalReadBytes)) 42 | return total 43 | } 44 | 45 | func Send(ctx context.Context, meter dmetering.Meter, userID, apiKeyID, ip, userMeta, endpoint string, resp proto.Message) { 46 | bytesRead := meter.BytesReadDelta() 47 | bytesWritten := meter.BytesWrittenDelta() 48 | egressBytes := proto.Size(resp) 49 | 50 | liveUncompressedReadBytes := meter.GetCountAndReset(MeterLiveUncompressedReadBytes) 51 | liveUncompressedReadForkedBytes := meter.GetCountAndReset(MeterLiveUncompressedReadForkedBytes) 52 | 53 | fileUncompressedReadBytes := meter.GetCountAndReset(MeterFileUncompressedReadBytes) 54 | fileUncompressedReadForkedBytes := meter.GetCountAndReset(MeterFileUncompressedReadForkedBytes) 55 | fileCompressedReadForkedBytes := meter.GetCountAndReset(MeterFileCompressedReadForkedBytes) 56 | fileCompressedReadBytes := meter.GetCountAndReset(MeterFileCompressedReadBytes) 57 | 58 | totalReadBytes := fileCompressedReadBytes + fileCompressedReadForkedBytes + liveUncompressedReadBytes + liveUncompressedReadForkedBytes 59 | 60 | meter.CountInc(TotalReadBytes, int(totalReadBytes)) 61 | 62 | event := dmetering.Event{ 63 | UserID: userID, 64 | ApiKeyID: apiKeyID, 65 | IpAddress: ip, 66 | Meta: userMeta, 67 | 68 | Endpoint: endpoint, 69 | Metrics: map[string]float64{ 70 | "egress_bytes": float64(egressBytes), 71 | "written_bytes": float64(bytesWritten), 72 | "read_bytes": float64(bytesRead), 73 | MeterLiveUncompressedReadBytes: float64(liveUncompressedReadBytes), 74 | MeterLiveUncompressedReadForkedBytes: float64(liveUncompressedReadForkedBytes), 75 | MeterFileUncompressedReadBytes: float64(fileUncompressedReadBytes), 76 | MeterFileUncompressedReadForkedBytes: float64(fileUncompressedReadForkedBytes), 77 | MeterFileCompressedReadForkedBytes: float64(fileCompressedReadForkedBytes), 78 | MeterFileCompressedReadBytes: float64(fileCompressedReadBytes), 79 | "block_count": 1, 80 | }, 81 | Timestamp: time.Now(), 82 | } 83 | 84 | emitter := reqctx.Emitter(ctx) 85 | if emitter == nil { 86 | dmetering.Emit(context.WithoutCancel(ctx), event) 87 | } else { 88 | emitter.Emit(context.WithoutCancel(ctx), event) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /metrics.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import "github.com/streamingfast/dmetrics" 4 | 5 | func RegisterMetrics() { 6 | metrics.Register() 7 | } 8 | 9 | var metrics = dmetrics.NewSet() 10 | 11 | var ConsoleReaderBlockReadCount = metrics.NewCounter("firecore_console_reader_block_read_count", "Number of blocks read by the console reader") 12 | -------------------------------------------------------------------------------- /node-manager/app/firehose_reader/metrics.go: -------------------------------------------------------------------------------- 1 | package firehose_reader 2 | 3 | import "github.com/streamingfast/dmetrics" 4 | 5 | var metrics = dmetrics.NewSet(dmetrics.PrefixNameWith("reader_node_firehose")) 6 | 7 | func init() { 8 | metrics.Register() 9 | } 10 | 11 | const HeadDriftServiceName = "reader_node_firehose" 12 | 13 | var BlockWriteCount = metrics.NewCounter("block_write_count", "The number of blocks written by the Firehose reader to one-block store") 14 | var HeadBlockTimeDrift = metrics.NewHeadTimeDrift(HeadDriftServiceName) 15 | var HeadBlockNumber = metrics.NewHeadBlockNumber(HeadDriftServiceName) 16 | var AppReadiness = metrics.NewAppReadiness(HeadDriftServiceName) 17 | -------------------------------------------------------------------------------- /node-manager/log_plugin/keep_last_lines_log_plugin.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package logplugin 16 | 17 | import ( 18 | "github.com/streamingfast/shutter" 19 | ) 20 | 21 | // KeepLastLinesLogPlugin takes a line and keep the last N lines as requested by the caller. 22 | type KeepLastLinesLogPlugin struct { 23 | *shutter.Shutter 24 | lastLines *lineRingBuffer 25 | includeDeepMindLines bool 26 | } 27 | 28 | func NewKeepLastLinesLogPlugin(lineCount int, includeDeepMindLines bool) *KeepLastLinesLogPlugin { 29 | plugin := &KeepLastLinesLogPlugin{ 30 | Shutter: shutter.New(), 31 | lastLines: &lineRingBuffer{maxCount: lineCount}, 32 | includeDeepMindLines: includeDeepMindLines, 33 | } 34 | 35 | return plugin 36 | } 37 | func (p *KeepLastLinesLogPlugin) Name() string { 38 | return "KeepLastLinesLogPlugin" 39 | } 40 | func (p *KeepLastLinesLogPlugin) Launch() {} 41 | func (p KeepLastLinesLogPlugin) Stop() {} 42 | func (p *KeepLastLinesLogPlugin) DebugDeepMind(enabled bool) { 43 | p.includeDeepMindLines = enabled 44 | } 45 | 46 | func (p *KeepLastLinesLogPlugin) LastLines() []string { 47 | return p.lastLines.lines() 48 | } 49 | 50 | //func (p *KeepLastLinesLogPlugin) Close(_ error) { 51 | //} 52 | 53 | func (p *KeepLastLinesLogPlugin) LogLine(in string) { 54 | if readerInstrumentationPrefixRegex.MatchString(in) && !p.includeDeepMindLines { 55 | // It's a deep mind log line and we don't care about it, skip 56 | return 57 | } 58 | 59 | p.lastLines.append(in) 60 | } 61 | -------------------------------------------------------------------------------- /node-manager/log_plugin/keep_last_lines_log_plugin_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package logplugin 16 | 17 | import ( 18 | "testing" 19 | 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | func TestKeepLastLinesLogPlugin(t *testing.T) { 24 | tests := []struct { 25 | name string 26 | in []string 27 | maxLine int 28 | includeDeepMindLines bool 29 | out []string 30 | }{ 31 | {"empty", []string{}, 3, false, nil}, 32 | {"single, not reached", []string{"a"}, 3, false, []string{"a"}}, 33 | {"flush, not reached", []string{"a", "b", "c"}, 3, false, []string{"a", "b", "c"}}, 34 | {"over, count", []string{"a", "b", "c", "d"}, 3, false, []string{"b", "c", "d"}}, 35 | {"multiple over count", []string{"a", "b", "c", "d", "e", "f", "g"}, 3, false, []string{"e", "f", "g"}}, 36 | 37 | {"max count 0 keeps nothing", []string{"a", "b", "c", "d", "e", "f", "g"}, 0, false, nil}, 38 | 39 | {"dm exclude, multiple over count", []string{"a", "b", "DMLOG a", "c", "d", "e", "f", "g", "DMLOG b"}, 3, false, []string{"e", "f", "g"}}, 40 | {"dm include, multiple over count", []string{"a", "b", "DMLOG a", "c", "d", "e", "f", "g", "DMLOG b"}, 3, true, []string{"f", "g", "DMLOG b"}}, 41 | } 42 | 43 | for _, test := range tests { 44 | t.Run(test.name, func(t *testing.T) { 45 | plugin := NewKeepLastLinesLogPlugin(test.maxLine, test.includeDeepMindLines) 46 | 47 | for _, line := range test.in { 48 | plugin.LogLine(line) 49 | } 50 | 51 | assert.Equal(t, test.out, plugin.LastLines()) 52 | }) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /node-manager/log_plugin/line_ring_buffer.go: -------------------------------------------------------------------------------- 1 | package logplugin 2 | 3 | type bufferElement struct { 4 | previous *bufferElement 5 | next *bufferElement 6 | line string 7 | } 8 | 9 | type lineRingBuffer struct { 10 | maxCount int 11 | 12 | count int 13 | tail *bufferElement 14 | head *bufferElement 15 | } 16 | 17 | func (b *lineRingBuffer) lines() (out []string) { 18 | if b.count == 0 { 19 | return nil 20 | } 21 | 22 | if b.count == 1 { 23 | return []string{b.head.line} 24 | } 25 | 26 | i := 0 27 | out = make([]string, b.count) 28 | for current := b.tail; current != nil; current = current.next { 29 | out[i] = current.line 30 | i++ 31 | } 32 | 33 | return 34 | } 35 | 36 | func (b *lineRingBuffer) append(line string) { 37 | // If we keep nothing, there is nothing to do here 38 | if b.maxCount == 0 { 39 | return 40 | } 41 | 42 | oldHead := b.head 43 | b.head = &bufferElement{line: line, previous: oldHead} 44 | 45 | if oldHead != nil { 46 | oldHead.next = b.head 47 | } 48 | 49 | if b.tail == nil { 50 | b.tail = b.head 51 | } 52 | 53 | if b.count == b.maxCount { 54 | // We are full, we need to rotate stuff a bit 55 | b.tail = b.tail.next 56 | } else { 57 | // We are not full, let's just append a new line (so only update count) 58 | b.count++ 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /node-manager/log_plugin/log_plugin.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package logplugin 16 | 17 | import ( 18 | "regexp" 19 | 20 | "github.com/streamingfast/bstream/blockstream" 21 | ) 22 | 23 | var readerInstrumentationPrefixRegex = regexp.MustCompile("^(DMLOG|FIRE) ") 24 | 25 | type LogPlugin interface { 26 | Name() string 27 | Launch() 28 | LogLine(in string) 29 | //Close(err error) 30 | Shutdown(err error) 31 | IsTerminating() bool 32 | Stop() 33 | } 34 | 35 | type Shutter interface { 36 | Terminated() <-chan struct{} 37 | OnTerminating(f func(error)) 38 | OnTerminated(f func(error)) 39 | IsTerminating() bool 40 | Shutdown(err error) 41 | } 42 | 43 | type BlockStreamer interface { 44 | Run(blockServer *blockstream.Server) 45 | } 46 | 47 | type LogPluginFunc func(line string) 48 | 49 | func (f LogPluginFunc) Launch() {} 50 | func (f LogPluginFunc) LogLine(line string) { f(line) } 51 | func (f LogPluginFunc) Name() string { return "log plug func" } 52 | func (f LogPluginFunc) Stop() {} 53 | func (f LogPluginFunc) Shutdown(_ error) {} 54 | func (f LogPluginFunc) Terminated() <-chan struct{} { 55 | ch := make(chan struct{}) 56 | close(ch) 57 | return ch 58 | } 59 | 60 | func (f LogPluginFunc) IsTerminating() bool { 61 | return false 62 | } 63 | 64 | func (f LogPluginFunc) OnTerminating(_ func(error)) { 65 | } 66 | 67 | func (f LogPluginFunc) OnTerminated(_ func(error)) { 68 | } 69 | -------------------------------------------------------------------------------- /node-manager/log_plugin/to_console_log_plugin.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package logplugin 16 | 17 | import ( 18 | "fmt" 19 | "os" 20 | "strconv" 21 | 22 | "github.com/streamingfast/shutter" 23 | ) 24 | 25 | var DebugLineLength = int64(4096) 26 | 27 | func init() { 28 | if os.Getenv("DEBUG_LINE_LENGTH") != "" { 29 | value, err := strconv.ParseInt(os.Getenv("DEBUG_LINE_LENGTH"), 10, 64) 30 | if err == nil { 31 | DebugLineLength = value 32 | } 33 | } 34 | } 35 | 36 | // ToConsoleLogPlugin takes a line, and if it's not a FIRE (or DMLOG) line or 37 | // if we are actively debugging deep mind, will print the line to the 38 | // standard output 39 | type ToConsoleLogPlugin struct { 40 | *shutter.Shutter 41 | debugDeepMind bool 42 | skipBlankLines bool 43 | } 44 | 45 | func NewToConsoleLogPlugin(debugDeepMind bool) *ToConsoleLogPlugin { 46 | return &ToConsoleLogPlugin{ 47 | Shutter: shutter.New(), 48 | debugDeepMind: debugDeepMind, 49 | } 50 | } 51 | 52 | func (p *ToConsoleLogPlugin) SetSkipBlankLines(skip bool) { 53 | p.skipBlankLines = skip 54 | } 55 | 56 | func (p *ToConsoleLogPlugin) Launch() {} 57 | func (p ToConsoleLogPlugin) Stop() {} 58 | func (p *ToConsoleLogPlugin) Name() string { 59 | return "ToConsoleLogPlugin" 60 | } 61 | func (p *ToConsoleLogPlugin) DebugDeepMind(enabled bool) { 62 | p.debugDeepMind = enabled 63 | } 64 | 65 | func (p *ToConsoleLogPlugin) LogLine(in string) { 66 | if in == "" && p.skipBlankLines { 67 | return 68 | } 69 | 70 | if p.debugDeepMind || !readerInstrumentationPrefixRegex.MatchString(in) { 71 | logLineLength := int64(len(in)) 72 | 73 | // We really want to write lines to stdout and not through our logger, it's the purpose of our plugin! 74 | if logLineLength > DebugLineLength { 75 | fmt.Printf("%s ... bytes: %d\n", in[:DebugLineLength], (logLineLength - DebugLineLength)) 76 | } else { 77 | fmt.Println(in) 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /node-manager/metrics/common.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package metrics 16 | 17 | import ( 18 | "github.com/streamingfast/dmetrics" 19 | ) 20 | 21 | var Metricset = dmetrics.NewSet() 22 | 23 | func NewHeadBlockTimeDrift(serviceName string) *dmetrics.HeadTimeDrift { 24 | return Metricset.NewHeadTimeDrift(serviceName) 25 | } 26 | 27 | func NewHeadBlockNumber(serviceName string) *dmetrics.HeadBlockNum { 28 | return Metricset.NewHeadBlockNumber(serviceName) 29 | } 30 | 31 | func NewAppReadiness(serviceName string) *dmetrics.AppReadiness { 32 | return Metricset.NewAppReadiness(serviceName) 33 | } 34 | -------------------------------------------------------------------------------- /node-manager/mindreader/archiver.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package mindreader 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | "io" 21 | 22 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 23 | 24 | "github.com/streamingfast/bstream" 25 | "github.com/streamingfast/dstore" 26 | "github.com/streamingfast/logging" 27 | "github.com/streamingfast/shutter" 28 | "go.uber.org/zap" 29 | ) 30 | 31 | type Archiver struct { 32 | *shutter.Shutter 33 | 34 | startBlock uint64 35 | oneblockSuffix string 36 | 37 | localOneBlocksStore dstore.Store 38 | 39 | fileUploader *FileUploader 40 | logger *zap.Logger 41 | tracer logging.Tracer 42 | } 43 | 44 | func NewArchiver( 45 | startBlock uint64, 46 | oneblockSuffix string, 47 | localOneBlocksStore dstore.Store, 48 | remoteOneBlocksStore dstore.Store, 49 | logger *zap.Logger, 50 | tracer logging.Tracer, 51 | ) *Archiver { 52 | 53 | fileUploader := NewFileUploader( 54 | localOneBlocksStore, 55 | remoteOneBlocksStore, 56 | logger) 57 | 58 | a := &Archiver{ 59 | Shutter: shutter.New(), 60 | startBlock: startBlock, 61 | oneblockSuffix: oneblockSuffix, 62 | localOneBlocksStore: localOneBlocksStore, 63 | fileUploader: fileUploader, 64 | logger: logger, 65 | tracer: tracer, 66 | } 67 | 68 | return a 69 | } 70 | 71 | func (a *Archiver) Start(ctx context.Context) { 72 | a.OnTerminating(func(err error) { 73 | a.logger.Info("archiver selector is terminating", zap.Error(err)) 74 | }) 75 | 76 | a.OnTerminated(func(err error) { 77 | a.logger.Info("archiver selector is terminated", zap.Error(err)) 78 | }) 79 | go a.fileUploader.Start(ctx) 80 | } 81 | 82 | func (a *Archiver) StoreBlock(ctx context.Context, block *pbbstream.Block) error { 83 | if block.Number < a.startBlock { 84 | a.logger.Debug("skipping block below start_block", zap.Uint64("block_num", block.Number), zap.Uint64("start_block", a.startBlock)) 85 | return nil 86 | } 87 | 88 | pipeRead, pipeWrite := io.Pipe() 89 | 90 | // We are in a pipe context and `a.blockWriterFactory.New(pipeWrite)` writes some bytes to the writer when called. 91 | // To avoid blocking everything, we must start reading bytes in a goroutine first to ensure the called is not block 92 | // forever because nobody is reading the pipe. 93 | writeObjectErrChan := make(chan error) 94 | go func() { 95 | writeObjectErrChan <- a.localOneBlocksStore.WriteObject(ctx, bstream.BlockFileNameWithSuffix(block, a.oneblockSuffix), pipeRead) 96 | }() 97 | 98 | blockWriter, err := bstream.NewDBinBlockWriter(pipeWrite) 99 | if err != nil { 100 | return fmt.Errorf("write block factory: %w", err) 101 | } 102 | 103 | // If `blockWriter.Write()` emits `nil`, the fact that we close with a `nil` error will actually forwards 104 | // `io.EOF` to the `pipeRead` (e.g. our `WriteObject` call above) which is what we want. If it emits a non 105 | // `nil`, it will be forwarded to the `pipeRead` which is also correct. 106 | pipeWrite.CloseWithError(blockWriter.Write(block)) 107 | 108 | // We are in a pipe context here, wait until the `WriteObject` call has finished 109 | err = <-writeObjectErrChan 110 | if err != nil { 111 | return err 112 | } 113 | 114 | return nil 115 | } 116 | -------------------------------------------------------------------------------- /node-manager/mindreader/file_uploader.go: -------------------------------------------------------------------------------- 1 | package mindreader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/abourget/llerrgroup" 10 | "github.com/streamingfast/dstore" 11 | "github.com/streamingfast/shutter" 12 | "go.uber.org/zap" 13 | ) 14 | 15 | type FileUploader struct { 16 | *shutter.Shutter 17 | mutex sync.Mutex 18 | localStore dstore.Store 19 | destinationStore dstore.Store 20 | logger *zap.Logger 21 | complete chan struct{} 22 | } 23 | 24 | func NewFileUploader(localStore dstore.Store, destinationStore dstore.Store, logger *zap.Logger) *FileUploader { 25 | return &FileUploader{ 26 | Shutter: shutter.New(), 27 | complete: make(chan struct{}), 28 | localStore: localStore, 29 | destinationStore: destinationStore, 30 | logger: logger, 31 | } 32 | } 33 | 34 | func (fu *FileUploader) Start(ctx context.Context) { 35 | defer close(fu.complete) 36 | 37 | fu.OnTerminating(func(_ error) { 38 | <-fu.complete 39 | }) 40 | 41 | if fu.IsTerminating() { 42 | return 43 | } 44 | 45 | var terminating bool 46 | for { 47 | err := fu.uploadFiles(ctx) 48 | if err != nil { 49 | fu.logger.Warn("failed to upload file", zap.Error(err)) 50 | } 51 | 52 | if terminating { 53 | return 54 | } 55 | 56 | select { 57 | case <-fu.Terminating(): 58 | fu.logger.Info("terminating upload loop on next pass") 59 | terminating = true 60 | case <-time.After(500 * time.Millisecond): 61 | } 62 | } 63 | } 64 | 65 | func (fu *FileUploader) uploadFiles(ctx context.Context) error { 66 | fu.mutex.Lock() 67 | defer fu.mutex.Unlock() 68 | 69 | eg := llerrgroup.New(200) 70 | _ = fu.localStore.Walk(ctx, "", func(filename string) (err error) { 71 | if eg.Stop() { 72 | return nil 73 | } 74 | eg.Go(func() error { 75 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) 76 | defer cancel() 77 | 78 | if traceEnabled { 79 | fu.logger.Debug("uploading file to storage", zap.String("local_file", filename)) 80 | } 81 | 82 | if err = fu.destinationStore.PushLocalFile(ctx, fu.localStore.ObjectPath(filename), filename); err != nil { 83 | return fmt.Errorf("moving file %q to storage: %w", filename, err) 84 | } 85 | return nil 86 | }) 87 | 88 | return nil 89 | }) 90 | 91 | return eg.Wait() 92 | } 93 | -------------------------------------------------------------------------------- /node-manager/mindreader/file_uploader_test.go: -------------------------------------------------------------------------------- 1 | package mindreader 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/streamingfast/dstore" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestFileUploader(t *testing.T) { 13 | localStore := dstore.NewMockStore(nil) 14 | localStore.SetFile("test1", nil) 15 | localStore.SetFile("test2", nil) 16 | localStore.SetFile("test3", nil) 17 | 18 | destinationStore := dstore.NewMockStore(nil) 19 | 20 | done := make(chan interface{}) 21 | out := make(chan bool, 3) 22 | 23 | destinationStore.PushLocalFileFunc = func(_ context.Context, _, _ string) (err error) { 24 | out <- true 25 | return nil 26 | } 27 | go func() { 28 | for i := 0; i < 3; i++ { 29 | <-out 30 | } 31 | close(done) 32 | }() 33 | 34 | uploader := NewFileUploader(localStore, destinationStore, testLogger) 35 | err := uploader.uploadFiles(context.Background()) 36 | require.NoError(t, err) 37 | 38 | select { 39 | case <-done: 40 | case <-time.After(5 * time.Second): 41 | t.Error("took took long") 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /node-manager/mindreader/init_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package mindreader 16 | 17 | import ( 18 | "github.com/streamingfast/logging" 19 | ) 20 | 21 | var testLogger, testTracer = logging.PackageLogger("node-manager", "github.com/streamingfast/firehose-core/node_manager/mindreader/tests") 22 | 23 | func init() { 24 | logging.InstantiateLoggers() 25 | } 26 | -------------------------------------------------------------------------------- /node-manager/mindreader/logging.go: -------------------------------------------------------------------------------- 1 | package mindreader 2 | 3 | import "os" 4 | 5 | var traceEnabled bool 6 | 7 | func init() { 8 | traceEnabled = os.Getenv("TRACE") == "true" 9 | } 10 | -------------------------------------------------------------------------------- /node-manager/monitor.go: -------------------------------------------------------------------------------- 1 | package node_manager 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/streamingfast/bstream" 7 | pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 8 | 9 | "github.com/streamingfast/dmetrics" 10 | "go.uber.org/atomic" 11 | ) 12 | 13 | type Readiness interface { 14 | IsReady() bool 15 | } 16 | 17 | type MetricsAndReadinessManager struct { 18 | headBlockChan chan *pbbstream.Block 19 | headBlockTimeDrift *dmetrics.HeadTimeDrift 20 | headBlockNumber *dmetrics.HeadBlockNum 21 | appReadiness *dmetrics.AppReadiness 22 | readinessProbe *atomic.Bool 23 | 24 | // ReadinessMaxLatency is the max delta between head block time and 25 | // now before /healthz starts returning success 26 | readinessMaxLatency time.Duration 27 | 28 | lastSeenBlock *atomic.Pointer[pbbstream.Block] 29 | } 30 | 31 | func NewMetricsAndReadinessManager(headBlockTimeDrift *dmetrics.HeadTimeDrift, headBlockNumber *dmetrics.HeadBlockNum, appReadiness *dmetrics.AppReadiness, readinessMaxLatency time.Duration) *MetricsAndReadinessManager { 32 | return &MetricsAndReadinessManager{ 33 | headBlockChan: make(chan *pbbstream.Block, 1), // just for non-blocking, saving a few nanoseconds here 34 | readinessProbe: atomic.NewBool(false), 35 | appReadiness: appReadiness, 36 | headBlockTimeDrift: headBlockTimeDrift, 37 | headBlockNumber: headBlockNumber, 38 | readinessMaxLatency: readinessMaxLatency, 39 | 40 | lastSeenBlock: atomic.NewPointer[pbbstream.Block](nil), 41 | } 42 | } 43 | 44 | func (m *MetricsAndReadinessManager) setReadinessProbeOn() { 45 | m.readinessProbe.CompareAndSwap(false, true) 46 | m.appReadiness.SetReady() 47 | } 48 | 49 | func (m *MetricsAndReadinessManager) setReadinessProbeOff() { 50 | m.readinessProbe.CompareAndSwap(true, false) 51 | m.appReadiness.SetNotReady() 52 | } 53 | 54 | func (m *MetricsAndReadinessManager) IsReady() bool { 55 | return m.readinessProbe.Load() 56 | } 57 | 58 | func (m *MetricsAndReadinessManager) Launch() { 59 | for { 60 | select { 61 | case block := <-m.headBlockChan: 62 | m.lastSeenBlock.Store(block) 63 | case <-time.After(time.Second): 64 | } 65 | 66 | block := m.lastSeenBlock.Load() 67 | if block == nil { 68 | continue 69 | } 70 | 71 | // metrics 72 | if m.headBlockNumber != nil { 73 | m.headBlockNumber.SetUint64(block.Number) 74 | } 75 | 76 | if block.Time().IsZero() { // never act upon zero timestamps 77 | continue 78 | } 79 | if m.headBlockTimeDrift != nil { 80 | m.headBlockTimeDrift.SetBlockTime(block.Time()) 81 | } 82 | 83 | // readiness 84 | if m.readinessMaxLatency == 0 || time.Since(block.Time()) < m.readinessMaxLatency { 85 | m.setReadinessProbeOn() 86 | } else { 87 | m.setReadinessProbeOff() 88 | } 89 | } 90 | } 91 | 92 | func (m *MetricsAndReadinessManager) UpdateHeadBlock(block *pbbstream.Block) error { 93 | m.headBlockChan <- block 94 | return nil 95 | } 96 | 97 | func (m *MetricsAndReadinessManager) GetHeadBlock() bstream.BlockRef { 98 | block := m.lastSeenBlock.Load() 99 | if block == nil { 100 | return bstream.BlockRefEmpty 101 | } 102 | 103 | return block.AsRef() 104 | } 105 | -------------------------------------------------------------------------------- /node-manager/operator/backuper_test.go: -------------------------------------------------------------------------------- 1 | package operator 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestParseKVConfigString(t *testing.T) { 11 | cases := []struct { 12 | name string 13 | in string 14 | expected map[string]string 15 | expectError bool 16 | }{ 17 | { 18 | "vanilla", 19 | "type=pitreos store=file:///var/backups", 20 | map[string]string{"type": "pitreos", "store": "file:///var/backups"}, 21 | false, 22 | }, 23 | { 24 | "missing type", 25 | "store=file:///var/backups", 26 | nil, 27 | true, 28 | }, 29 | { 30 | "empty type", 31 | "type= store=file:///var/backups", 32 | nil, 33 | true, 34 | }, 35 | { 36 | "empty", 37 | "", 38 | nil, 39 | true, 40 | }, 41 | { 42 | "invalid", 43 | "type=blah store=file:///var/backups something", 44 | nil, 45 | true, 46 | }, 47 | { 48 | "multispace_ok", 49 | "type=blah store=file:///var/backups ", 50 | map[string]string{"type": "blah", "store": "file:///var/backups"}, 51 | false, 52 | }, 53 | { 54 | "emptystring ok", 55 | "type=blah store= freq=", 56 | map[string]string{"type": "blah", "store": "", "freq": ""}, 57 | false, 58 | }, 59 | } 60 | 61 | for _, tc := range cases { 62 | t.Run(tc.name, func(t *testing.T) { 63 | out, err := parseKVConfigString(tc.in) 64 | if tc.expectError { 65 | require.Error(t, err) 66 | return 67 | } 68 | 69 | require.NoError(t, err) 70 | assert.Equal(t, tc.expected, out) 71 | }) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /node-manager/operator/bootstrap.go: -------------------------------------------------------------------------------- 1 | package operator 2 | 3 | type Bootstrapper interface { 4 | Bootstrap() error 5 | } 6 | -------------------------------------------------------------------------------- /node-manager/operator/errors.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package operator 16 | 17 | import "errors" 18 | 19 | var ErrCleanExit = errors.New("clean exit") 20 | -------------------------------------------------------------------------------- /node-manager/superviser.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package node_manager 16 | 17 | import ( 18 | "time" 19 | 20 | logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" 21 | ) 22 | 23 | type StartOptions struct { 24 | EnableDebugDeepmind bool 25 | ExtraEnv map[string]string 26 | } 27 | 28 | type StartOption interface { 29 | Apply(opts *StartOptions) 30 | } 31 | 32 | type startOptionFunc func(opts *StartOptions) 33 | 34 | func (f startOptionFunc) Apply(opts *StartOptions) { 35 | f(opts) 36 | } 37 | 38 | var EnableDebugDeepmindOption = startOptionFunc(func(opts *StartOptions) { 39 | opts.EnableDebugDeepmind = true 40 | }) 41 | var DisableDebugDeepmindOption = startOptionFunc(func(opts *StartOptions) { 42 | opts.EnableDebugDeepmind = false 43 | }) 44 | 45 | type ExtraEnvOption map[string]string 46 | 47 | func (f ExtraEnvOption) Apply(opts *StartOptions) { 48 | opts.ExtraEnv = map[string]string(f) 49 | } 50 | 51 | type ShutterInterface interface { 52 | Shutdown(error) 53 | OnTerminating(func(error)) 54 | OnTerminated(func(error)) 55 | IsTerminated() bool 56 | IsTerminating() bool 57 | Terminated() <-chan struct{} 58 | } 59 | 60 | type ChainSuperviser interface { 61 | ShutterInterface 62 | 63 | GetCommand() string 64 | GetName() string 65 | ServerID() (string, error) 66 | 67 | RegisterLogPlugin(plugin logplugin.LogPlugin) 68 | Start(options ...StartOption) error 69 | Stop() error 70 | 71 | IsRunning() bool 72 | Stopped() <-chan struct{} 73 | 74 | LastExitCode() int 75 | LastLogLines() []string 76 | LastSeenBlockNum() uint64 77 | } 78 | 79 | type MonitorableChainSuperviser interface { 80 | Monitor() 81 | } 82 | 83 | type ProducerChainSuperviser interface { 84 | IsProducing() (bool, error) 85 | IsActiveProducer() bool 86 | 87 | ResumeProduction() error 88 | PauseProduction() error 89 | 90 | WaitUntilEndOfNextProductionRound(timeout time.Duration) error 91 | } 92 | 93 | type ProductionState int 94 | 95 | const ( 96 | StatePre ProductionState = iota // Just before we produce, don't restart 97 | StateProducing // We're producing right now 98 | StatePost // Right after production 99 | StateStale // We haven't produced for 12 minutes 100 | ) 101 | 102 | func (s ProductionState) String() string { 103 | switch s { 104 | case StatePre: 105 | return "pre" 106 | case StateProducing: 107 | return "producing" 108 | case StatePost: 109 | return "post" 110 | case StateStale: 111 | return "stale" 112 | default: 113 | return "unknown" 114 | } 115 | } 116 | 117 | type ProductionEvent int 118 | 119 | const ( 120 | EventProduced ProductionEvent = iota 121 | EventReceived 122 | ) 123 | -------------------------------------------------------------------------------- /node-manager/types.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package node_manager 16 | 17 | import pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" 18 | 19 | type DeepMindDebuggable interface { 20 | DebugDeepMind(enabled bool) 21 | } 22 | 23 | type HeadBlockUpdater func(block *pbbstream.Block) error 24 | 25 | type OnBlockWritten func(block *pbbstream.Block) error 26 | -------------------------------------------------------------------------------- /node-manager/utils.go: -------------------------------------------------------------------------------- 1 | package node_manager 2 | 3 | import ( 4 | "fmt" 5 | "syscall" 6 | ) 7 | 8 | func AugmentStackSizeLimit() error { 9 | // Set ulimit for stack 10 | var rLimit syscall.Rlimit 11 | err := syscall.Getrlimit(syscall.RLIMIT_STACK, &rLimit) 12 | if err != nil { 13 | return fmt.Errorf("getting rlimit: %w", err) 14 | } 15 | rLimit.Cur = 67104768 16 | 17 | err = syscall.Setrlimit(syscall.RLIMIT_STACK, &rLimit) 18 | if err != nil { 19 | return fmt.Errorf("setting rlimit: %w", err) 20 | } 21 | 22 | return nil 23 | } 24 | -------------------------------------------------------------------------------- /proto/README.md: -------------------------------------------------------------------------------- 1 | ### Protobuf Registry 2 | 3 | The well-known Protobuf definitions are pulled from Buf Build Registry. This makes `firehose-core` able to decode some well-known block files directly. 4 | 5 | #### Re-generate 6 | 7 | To re-generate the well-known types, simply do: 8 | 9 | ```bash 10 | go generate ./protoregistry 11 | ``` 12 | 13 | While being at the root of the **project** (if you run from this directory here, adjust `./protoregistry` to `.`). Before re-generating, ensure you have push to Buf Registry the latest version of the definitions. 14 | 15 | #### Add new well-known types 16 | 17 | Push your definitions to the Buf Registry. Edit file [./generator/generator.go](./generator/generator.go) and add the Buf Registry path of the package in `wellKnownProtoRepos` variables. 18 | 19 | Then [re-generate Protobuf definitions](#re-generate) and send a PR with the changes. 20 | -------------------------------------------------------------------------------- /proto/generator/template.gotmpl: -------------------------------------------------------------------------------- 1 | // Code generated by 'go run github.com/streamingfast/firehose-core/protoregistry/generator well_known.go protoregistry', DO NOT EDIT! 2 | package {{.Package}} 3 | 4 | var wellKnownTypes []*WellKnownType 5 | 6 | func init() { 7 | wellKnownTypes = []*WellKnownType{ 8 | {{- range .ProtoFiles}} 9 | { 10 | // {{.Name}} ({{.BufRegistryPackageURL}}) 11 | proto: "{{.Data | toHex}}", 12 | }, 13 | {{- end}} 14 | } 15 | } -------------------------------------------------------------------------------- /proto/registry_test.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | "google.golang.org/protobuf/reflect/protoreflect" 9 | "google.golang.org/protobuf/types/dynamicpb" 10 | "google.golang.org/protobuf/types/known/anypb" 11 | ) 12 | 13 | func TestUnmarshal(t *testing.T) { 14 | acme := readTestProto(t, "testdata/acme") 15 | 16 | type args struct { 17 | typeURL string 18 | value []byte 19 | } 20 | tests := []struct { 21 | name string 22 | protoPaths []string 23 | want func(tt *testing.T, out *dynamicpb.Message) 24 | assertion require.ErrorAssertionFunc 25 | value []byte 26 | typeURL string 27 | }{ 28 | { 29 | name: "chain alone", 30 | typeURL: "sf.acme.type.v1.Block", 31 | want: func(tt *testing.T, out *dynamicpb.Message) { 32 | h := out.Get(out.Descriptor().Fields().ByName("hash")).String() 33 | blockNum := out.Get(out.Descriptor().Fields().ByName("num")).Uint() 34 | assert.Equal(tt, "", h) 35 | assert.Equal(tt, uint64(0), blockNum) 36 | }, 37 | assertion: require.NoError, 38 | }, 39 | { 40 | name: "overriding built-in chain with proto path", 41 | protoPaths: []string{"testdata/override_acme"}, 42 | typeURL: "sf.acme.type.v1.Block", 43 | want: func(tt *testing.T, out *dynamicpb.Message) { 44 | // If you reach this point following a panic in the Go test, the reason there 45 | // is a panic here is because the override_ethereum.proto file is taking 46 | // precedence over the ethereum.proto file, which is not what we want. 47 | h := out.Get(out.Descriptor().Fields().ByName("hash_custom")).String() 48 | blockNum := out.Get(out.Descriptor().Fields().ByName("num_custom")).Uint() 49 | assert.Equal(tt, "", h) 50 | assert.Equal(tt, uint64(0), blockNum) 51 | }, 52 | assertion: require.NoError, 53 | }, 54 | { 55 | name: "well-know chain (ethereum)", 56 | typeURL: "sf.ethereum.type.v2.Block", 57 | value: []byte{0x18, 0x0a}, 58 | want: func(tt *testing.T, out *dynamicpb.Message) { 59 | // If you reach this point following a panic in the Go test, the reason there 60 | // is a panic here is because the override_ethereum.proto file is taking 61 | // precedence over the ethereum.proto file, which is not what we want. 62 | cn := out.Get(out.Descriptor().Fields().ByName("number")).Uint() 63 | assert.Equal(tt, uint64(10), cn) 64 | }, 65 | assertion: require.NoError, 66 | }, 67 | { 68 | name: "overridding well-know chain (ethereum) with proto path", 69 | protoPaths: []string{"testdata/override"}, 70 | typeURL: "sf.ethereum.type.v2.Block", 71 | value: []byte{0x18, 0x0a}, 72 | want: func(tt *testing.T, out *dynamicpb.Message) { 73 | // If you reach this point following a panic in the Go test, the reason there 74 | // is a panic here is because the override_ethereum.proto file is taking 75 | // precedence over the ethereum.proto file, which is not what we want. 76 | cn := out.Get(out.Descriptor().Fields().ByName("number_custom")).Uint() 77 | assert.Equal(tt, uint64(10), cn) 78 | }, 79 | assertion: require.NoError, 80 | }, 81 | } 82 | for _, tt := range tests { 83 | t.Run(tt.name, func(t *testing.T) { 84 | registry, err := NewRegistry(acme, tt.protoPaths...) 85 | require.NoError(t, err) 86 | 87 | a := &anypb.Any{TypeUrl: "type.googleapis.com/" + tt.typeURL, Value: tt.value} 88 | out, err := registry.Unmarshal(a) 89 | tt.assertion(t, err) 90 | 91 | tt.want(t, out) 92 | }) 93 | } 94 | } 95 | func readTestProto(t *testing.T, file string) protoreflect.FileDescriptor { 96 | t.Helper() 97 | 98 | descs, err := parseProtoFiles([]string{file}) 99 | require.NoError(t, err) 100 | 101 | return descs[0] 102 | } 103 | -------------------------------------------------------------------------------- /proto/testdata/acme/acme.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package sf.acme.type.v1; 4 | 5 | message Block { 6 | string hash = 1; 7 | uint64 num = 2; 8 | } -------------------------------------------------------------------------------- /proto/testdata/override_acme/acme.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package sf.acme.type.v1; 4 | 5 | message Block { 6 | string hash_custom = 1; 7 | uint64 num_custom = 2; 8 | } -------------------------------------------------------------------------------- /proto/utils.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | 9 | "github.com/jhump/protoreflect/desc/protoparse" 10 | "google.golang.org/protobuf/reflect/protoreflect" 11 | ) 12 | 13 | func parseProtoFiles(importPaths []string) (fds []protoreflect.FileDescriptor, err error) { 14 | userDir, err := os.UserHomeDir() 15 | if err != nil { 16 | return nil, fmt.Errorf("get user home dir: %w", err) 17 | } 18 | 19 | var ip []string 20 | for _, importPath := range importPaths { 21 | if importPath == "~" { 22 | importPath = userDir 23 | } else if strings.HasPrefix(importPath, "~/") { 24 | importPath = filepath.Join(userDir, importPath[2:]) 25 | } 26 | 27 | importPath, err = filepath.Abs(importPath) 28 | if err != nil { 29 | return nil, fmt.Errorf("getting absolute path for %q: %w", importPath, err) 30 | } 31 | 32 | if !strings.HasSuffix(importPath, "/") { 33 | importPath += "/" 34 | } 35 | ip = append(ip, importPath) 36 | } 37 | 38 | parser := protoparse.Parser{ 39 | ImportPaths: ip, 40 | } 41 | 42 | var protoFiles []string 43 | for _, importPath := range ip { 44 | err := filepath.Walk(importPath, 45 | func(path string, info os.FileInfo, err error) error { 46 | if err != nil { 47 | return err 48 | } 49 | if strings.HasSuffix(path, ".proto") && !info.IsDir() { 50 | protoFiles = append(protoFiles, strings.TrimPrefix(path, importPath)) 51 | } 52 | return nil 53 | }) 54 | if err != nil { 55 | return nil, fmt.Errorf("walking import path %q: %w", importPath, err) 56 | } 57 | } 58 | 59 | parsed, err := parser.ParseFiles(protoFiles...) 60 | if err != nil { 61 | return nil, fmt.Errorf("parsing proto files: %w", err) 62 | } 63 | 64 | for _, fd := range parsed { 65 | fds = append(fds, fd.UnwrapFile()) 66 | } 67 | return 68 | 69 | } 70 | -------------------------------------------------------------------------------- /proto/well_known.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | 7 | "google.golang.org/protobuf/proto" 8 | "google.golang.org/protobuf/reflect/protodesc" 9 | "google.golang.org/protobuf/reflect/protoreflect" 10 | "google.golang.org/protobuf/types/descriptorpb" 11 | ) 12 | 13 | type WellKnownType struct { 14 | proto string 15 | BytesEncoding string 16 | } 17 | 18 | func RegisterWellKnownFileDescriptors(registry *Registry) error { 19 | for _, wt := range wellKnownTypes { 20 | fd, err := protoToFileDescriptor(registry, wt.proto) 21 | if err != nil { 22 | return fmt.Errorf("generating proto file: %w", err) 23 | } 24 | err = registry.RegisterFileDescriptor(fd) 25 | if err != nil { 26 | return fmt.Errorf("registering file descriptor: %w", err) 27 | } 28 | 29 | } 30 | return nil 31 | } 32 | 33 | func protoToFileDescriptor(registry *Registry, in string) (protoreflect.FileDescriptor, error) { 34 | protoBytes, err := hex.DecodeString(in) 35 | if err != nil { 36 | panic(fmt.Errorf("failed to hex decode payload: %w", err)) 37 | } 38 | 39 | fileDescriptorProto := &descriptorpb.FileDescriptorProto{} 40 | if err := proto.Unmarshal(protoBytes, fileDescriptorProto); err != nil { 41 | return nil, fmt.Errorf("failed to unmarshal file descriptor: %w", err) 42 | } 43 | 44 | fd, err := protodesc.NewFile(fileDescriptorProto, registry.Files) 45 | if err != nil { 46 | return nil, fmt.Errorf("creating new file descriptor: %w", err) 47 | 48 | } 49 | return fd, nil 50 | } 51 | -------------------------------------------------------------------------------- /reader_node.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import "golang.org/x/exp/maps" 4 | 5 | var ReaderNodeVariablesDocumentation = map[string]string{ 6 | "{data-dir}": "The current data-dir path defined by the flag 'data-dir'", 7 | "{node-data-dir}": "The node data dir path defined by the flag 'reader-node-data-dir'", 8 | "{hostname}": "The machine's hostname", 9 | "{start-block-num}": "The resolved start block number defined by the flag 'reader-node-start-block-num' (can be overwritten)", 10 | "{stop-block-num}": "The stop block number defined by the flag 'reader-node-stop-block-num'", 11 | } 12 | 13 | var ReaderNodeVariables = maps.Keys(ReaderNodeVariablesDocumentation) 14 | 15 | func ReaderNodeVariablesValues(resolver ReaderNodeArgumentResolver) map[string]string { 16 | values := make(map[string]string, len(ReaderNodeVariables)) 17 | for _, variable := range ReaderNodeVariables { 18 | values[variable] = resolver(variable) 19 | } 20 | 21 | return values 22 | } 23 | -------------------------------------------------------------------------------- /reader_node_bootstrapper_tarball.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "fmt" 7 | "io" 8 | "os" 9 | "path/filepath" 10 | "time" 11 | 12 | "github.com/streamingfast/dstore" 13 | "go.uber.org/zap" 14 | ) 15 | 16 | func NewTarballReaderNodeBootstrapper( 17 | url string, 18 | dataDir string, 19 | logger *zap.Logger, 20 | ) *TarballNodeBootstrapper { 21 | return &TarballNodeBootstrapper{ 22 | url: url, 23 | dataDir: dataDir, 24 | logger: logger, 25 | } 26 | } 27 | 28 | type TarballNodeBootstrapper struct { 29 | url string 30 | dataDir string 31 | logger *zap.Logger 32 | } 33 | 34 | func (b *TarballNodeBootstrapper) isBootstrapped() bool { 35 | return isBootstrapped(b.dataDir, b.logger) 36 | } 37 | 38 | func (b *TarballNodeBootstrapper) Bootstrap() error { 39 | if b.isBootstrapped() { 40 | return nil 41 | } 42 | 43 | b.logger.Info("bootstrapping native node chain data from pre-built archive", zap.String("bootstrap_data_url", b.url)) 44 | 45 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) 46 | defer cancel() 47 | 48 | reader, _, _, err := dstore.OpenObject(ctx, b.url, dstore.Compression("zstd")) 49 | if err != nil { 50 | return fmt.Errorf("cannot get snapshot from gstore: %w", err) 51 | } 52 | defer reader.Close() 53 | 54 | b.createChainData(reader) 55 | return nil 56 | } 57 | 58 | func (b *TarballNodeBootstrapper) createChainData(reader io.Reader) error { 59 | err := os.MkdirAll(b.dataDir, os.ModePerm) 60 | if err != nil { 61 | return fmt.Errorf("unable to create blocks log file: %w", err) 62 | } 63 | 64 | b.logger.Info("extracting bootstrapping data into node data directory", zap.String("data_dir", b.dataDir)) 65 | tr := tar.NewReader(reader) 66 | for { 67 | header, err := tr.Next() 68 | if err != nil { 69 | if err == io.EOF { 70 | return nil 71 | } 72 | 73 | return err 74 | } 75 | 76 | path := filepath.Join(b.dataDir, header.Name) 77 | b.logger.Debug("about to write content of entry", zap.String("name", header.Name), zap.String("path", path), zap.Bool("is_dir", header.FileInfo().IsDir())) 78 | if header.FileInfo().IsDir() { 79 | err = os.MkdirAll(path, os.ModePerm) 80 | if err != nil { 81 | return fmt.Errorf("unable to create directory: %w", err) 82 | } 83 | 84 | continue 85 | } 86 | 87 | file, err := os.Create(path) 88 | if err != nil { 89 | return fmt.Errorf("unable to create file: %w", err) 90 | } 91 | 92 | if _, err := io.Copy(file, tr); err != nil { 93 | file.Close() 94 | return err 95 | } 96 | file.Close() 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /relayer/app/relayer/app.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package relayer 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | "time" 21 | 22 | "github.com/streamingfast/bstream" 23 | "github.com/streamingfast/dmetrics" 24 | "github.com/streamingfast/dstore" 25 | "github.com/streamingfast/firehose-core/relayer" 26 | "github.com/streamingfast/firehose-core/relayer/metrics" 27 | "github.com/streamingfast/shutter" 28 | "go.uber.org/zap" 29 | pbhealth "google.golang.org/grpc/health/grpc_health_v1" 30 | ) 31 | 32 | var RelayerStartAborted = fmt.Errorf("getting start block aborted by relayer application terminating signal") 33 | 34 | type Config struct { 35 | SourcesAddr []string 36 | GRPCListenAddr string 37 | SourceRequestBurst int 38 | MaxSourceLatency time.Duration 39 | OneBlocksURL string 40 | } 41 | 42 | func (c *Config) ZapFields() []zap.Field { 43 | return []zap.Field{ 44 | zap.Strings("sources_addr", c.SourcesAddr), 45 | zap.String("grpc_listen_addr", c.GRPCListenAddr), 46 | zap.Int("source_request_burst", c.SourceRequestBurst), 47 | zap.Duration("max_source_latency", c.MaxSourceLatency), 48 | zap.String("one_blocks_url", c.OneBlocksURL), 49 | } 50 | } 51 | 52 | type App struct { 53 | *shutter.Shutter 54 | config *Config 55 | 56 | relayer *relayer.Relayer 57 | } 58 | 59 | func New(config *Config) *App { 60 | return &App{ 61 | Shutter: shutter.New(), 62 | config: config, 63 | } 64 | } 65 | 66 | func (a *App) Run() error { 67 | dmetrics.Register(metrics.MetricSet) 68 | 69 | oneBlocksStore, err := dstore.NewDBinStore(a.config.OneBlocksURL) 70 | if err != nil { 71 | return fmt.Errorf("getting block store: %w", err) 72 | } 73 | 74 | liveSourceFactory := bstream.SourceFactory(func(h bstream.Handler) bstream.Source { 75 | return relayer.NewMultiplexedSource( 76 | h, 77 | a.config.SourcesAddr, 78 | a.config.MaxSourceLatency, 79 | a.config.SourceRequestBurst, 80 | ) 81 | }) 82 | 83 | zlog.Info("starting relayer", a.config.ZapFields()...) 84 | a.relayer = relayer.NewRelayer( 85 | liveSourceFactory, 86 | a.config.GRPCListenAddr, 87 | oneBlocksStore, 88 | ) 89 | 90 | a.OnTerminating(a.relayer.Shutdown) 91 | a.relayer.OnTerminated(a.Shutdown) 92 | 93 | a.relayer.Run() 94 | return nil 95 | } 96 | 97 | var emptyHealthCheckRequest = &pbhealth.HealthCheckRequest{} 98 | 99 | func (a *App) IsReady() bool { 100 | if a.relayer == nil { 101 | return false 102 | } 103 | 104 | resp, err := a.relayer.Check(context.Background(), emptyHealthCheckRequest) 105 | if err != nil { 106 | zlog.Info("readiness check failed", zap.Error(err)) 107 | return false 108 | } 109 | 110 | return resp.Status == pbhealth.HealthCheckResponse_SERVING 111 | } 112 | -------------------------------------------------------------------------------- /relayer/app/relayer/logging.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package relayer 16 | 17 | import ( 18 | "github.com/streamingfast/logging" 19 | ) 20 | 21 | var zlog, _ = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer/app/relayer") 22 | -------------------------------------------------------------------------------- /relayer/healthz.go: -------------------------------------------------------------------------------- 1 | package relayer 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | pbhealth "google.golang.org/grpc/health/grpc_health_v1" 8 | ) 9 | 10 | func (r *Relayer) List(ctx context.Context, in *pbhealth.HealthListRequest) (*pbhealth.HealthListResponse, error) { 11 | return &pbhealth.HealthListResponse{ 12 | Statuses: map[string]*pbhealth.HealthCheckResponse{ 13 | "relayer": &pbhealth.HealthCheckResponse{ 14 | Status: r.healthStatus(), 15 | }, 16 | }, 17 | }, nil 18 | } 19 | 20 | func (r *Relayer) Check(ctx context.Context, in *pbhealth.HealthCheckRequest) (*pbhealth.HealthCheckResponse, error) { 21 | return &pbhealth.HealthCheckResponse{ 22 | Status: r.healthStatus(), 23 | }, nil 24 | } 25 | 26 | func (r *Relayer) Watch(req *pbhealth.HealthCheckRequest, stream pbhealth.Health_WatchServer) error { 27 | currentStatus := pbhealth.HealthCheckResponse_SERVICE_UNKNOWN 28 | waitTime := 0 * time.Second 29 | 30 | for { 31 | select { 32 | case <-stream.Context().Done(): 33 | return nil 34 | case <-time.After(waitTime): 35 | newStatus := r.healthStatus() 36 | waitTime = 5 * time.Second 37 | 38 | if newStatus != currentStatus { 39 | currentStatus = newStatus 40 | 41 | if err := stream.Send(&pbhealth.HealthCheckResponse{Status: currentStatus}); err != nil { 42 | return err 43 | } 44 | } 45 | } 46 | } 47 | } 48 | 49 | func (r *Relayer) healthStatus() pbhealth.HealthCheckResponse_ServingStatus { 50 | status := pbhealth.HealthCheckResponse_NOT_SERVING 51 | if r.ready { 52 | status = pbhealth.HealthCheckResponse_SERVING 53 | } 54 | 55 | return status 56 | } 57 | -------------------------------------------------------------------------------- /relayer/logging.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package relayer 16 | 17 | import ( 18 | "github.com/streamingfast/logging" 19 | ) 20 | 21 | var zlog, ztrace = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer") 22 | -------------------------------------------------------------------------------- /relayer/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 dfuse Platform Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package metrics 16 | 17 | import ( 18 | "github.com/streamingfast/dmetrics" 19 | ) 20 | 21 | var MetricSet = dmetrics.NewSet() 22 | 23 | var HeadBlockTimeDrift = MetricSet.NewHeadTimeDrift("relayer") 24 | var HeadBlockNumber = MetricSet.NewHeadBlockNumber("relayer") 25 | var AppReadiness = MetricSet.NewAppReadiness("relayer") 26 | -------------------------------------------------------------------------------- /rpc/client_test.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | -------------------------------------------------------------------------------- /rpc/clients.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sync" 7 | "time" 8 | 9 | "github.com/hashicorp/go-multierror" 10 | "go.uber.org/zap" 11 | ) 12 | 13 | var ErrorNoMoreClient = errors.New("no more clients") 14 | 15 | type Clients[C any] struct { 16 | clients []C 17 | maxBlockFetchDuration time.Duration 18 | rollingStrategy RollingStrategy[C] 19 | lock sync.Mutex 20 | logger *zap.Logger 21 | } 22 | 23 | func NewClients[C any](maxBlockFetchDuration time.Duration, rollingStrategy RollingStrategy[C], logger *zap.Logger) *Clients[C] { 24 | return &Clients[C]{ 25 | maxBlockFetchDuration: maxBlockFetchDuration, 26 | rollingStrategy: rollingStrategy, 27 | logger: logger, 28 | } 29 | } 30 | 31 | func (c *Clients[C]) StartSorting(ctx context.Context, direction SortDirection, sortValueFetcher SortValueFetcher[C], every time.Duration) { 32 | go func() { 33 | for { 34 | c.logger.Info("sorting clients") 35 | err := Sort(ctx, c, sortValueFetcher, direction) 36 | if err != nil { 37 | c.logger.Warn("sorting", zap.Error(err)) 38 | } 39 | 40 | switch s := c.rollingStrategy.(type) { 41 | case *StickyRollingStrategy[C]: 42 | s.firstCallToNewClient = true 43 | s.usedClientCount = 0 44 | s.nextClientIndex = 0 45 | case *RollingStrategyAlwaysUseFirst[C]: 46 | s.nextIndex = 0 47 | } 48 | 49 | time.Sleep(every) 50 | } 51 | }() 52 | } 53 | 54 | func (c *Clients[C]) Add(client C) { 55 | c.lock.Lock() 56 | defer c.lock.Unlock() 57 | c.clients = append(c.clients, client) 58 | } 59 | 60 | func WithClients[C any, V any](clients *Clients[C], f func(context.Context, C) (v V, err error)) (v V, err error) { 61 | clients.lock.Lock() 62 | defer clients.lock.Unlock() 63 | var errs error 64 | 65 | clients.rollingStrategy.reset() 66 | client, err := clients.rollingStrategy.next(clients) 67 | if err != nil { 68 | errs = multierror.Append(errs, err) 69 | return v, errs 70 | } 71 | 72 | for { 73 | 74 | ctx := context.Background() 75 | ctx, cancel := context.WithTimeout(ctx, clients.maxBlockFetchDuration) 76 | 77 | v, err := f(ctx, client) 78 | cancel() 79 | 80 | if err != nil { 81 | errs = multierror.Append(errs, err) 82 | client, err = clients.rollingStrategy.next(clients) 83 | if err != nil { 84 | errs = multierror.Append(errs, err) 85 | return v, errs 86 | } 87 | 88 | continue 89 | } 90 | return v, nil 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /rpc/rolling_strategy.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | type RollingStrategy[C any] interface { 4 | reset() 5 | next(clients *Clients[C]) (C, error) 6 | } 7 | 8 | type StickyRollingStrategy[C any] struct { 9 | firstCallToNewClient bool 10 | usedClientCount int 11 | nextClientIndex int 12 | } 13 | 14 | func NewStickyRollingStrategy[C any]() *StickyRollingStrategy[C] { 15 | return &StickyRollingStrategy[C]{ 16 | firstCallToNewClient: true, 17 | } 18 | } 19 | 20 | func (s *StickyRollingStrategy[C]) reset() { 21 | s.usedClientCount = 0 22 | } 23 | 24 | func (s *StickyRollingStrategy[C]) next(clients *Clients[C]) (client C, err error) { 25 | if len(clients.clients) == s.usedClientCount { 26 | return client, ErrorNoMoreClient 27 | } 28 | 29 | if s.firstCallToNewClient { 30 | s.firstCallToNewClient = false 31 | client = clients.clients[0] 32 | s.usedClientCount = s.usedClientCount + 1 33 | s.nextClientIndex = s.nextClientIndex + 1 34 | return client, nil 35 | } 36 | 37 | if s.nextClientIndex == len(clients.clients) { //roll to 1st client 38 | s.nextClientIndex = 0 39 | } 40 | 41 | if s.usedClientCount == 0 { //just been reset 42 | s.nextClientIndex = s.prevIndex(clients) 43 | client = clients.clients[s.nextClientIndex] 44 | s.usedClientCount = s.usedClientCount + 1 45 | s.nextClientIndex = s.nextClientIndex + 1 46 | return client, nil 47 | } 48 | 49 | if s.nextClientIndex == len(clients.clients) { //roll to 1st client 50 | client = clients.clients[0] 51 | s.usedClientCount = s.usedClientCount + 1 52 | return client, nil 53 | } 54 | 55 | client = clients.clients[s.nextClientIndex] 56 | s.usedClientCount = s.usedClientCount + 1 57 | s.nextClientIndex = s.nextClientIndex + 1 58 | return client, nil 59 | } 60 | 61 | func (s *StickyRollingStrategy[C]) prevIndex(clients *Clients[C]) int { 62 | if s.nextClientIndex == 0 { 63 | return len(clients.clients) - 1 64 | } 65 | return s.nextClientIndex - 1 66 | } 67 | 68 | type RollingStrategyAlwaysUseFirst[C any] struct { 69 | nextIndex int 70 | } 71 | 72 | func NewRollingStrategyAlwaysUseFirst[C any]() *RollingStrategyAlwaysUseFirst[C] { 73 | return &RollingStrategyAlwaysUseFirst[C]{} 74 | } 75 | 76 | func (s *RollingStrategyAlwaysUseFirst[C]) reset() { 77 | s.nextIndex = 0 78 | } 79 | 80 | func (s *RollingStrategyAlwaysUseFirst[C]) next(c *Clients[C]) (client C, err error) { 81 | 82 | if len(c.clients) <= s.nextIndex { 83 | return client, ErrorNoMoreClient 84 | } 85 | client = c.clients[s.nextIndex] 86 | s.nextIndex++ 87 | return client, nil 88 | 89 | } 90 | -------------------------------------------------------------------------------- /rpc/rolling_strategy_test.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "go.uber.org/zap" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | type rollClient struct { 15 | callCount int 16 | name string 17 | sortValue uint64 18 | } 19 | 20 | func (r *rollClient) fetchSortValue(_ context.Context) (sortValue uint64, err error) { 21 | return r.sortValue, nil 22 | } 23 | 24 | func TestStickyRollingStrategy(t *testing.T) { 25 | 26 | rollingStrategy := NewStickyRollingStrategy[*rollClient]() 27 | rollingStrategy.reset() 28 | 29 | clients := NewClients(2*time.Second, rollingStrategy, zap.NewNop()) 30 | clients.Add(&rollClient{name: "c.1"}) 31 | clients.Add(&rollClient{name: "c.2"}) 32 | clients.Add(&rollClient{name: "c.3"}) 33 | clients.Add(&rollClient{name: "c.a"}) 34 | clients.Add(&rollClient{name: "c.b"}) 35 | 36 | var clientNames []string 37 | _, err := WithClients(clients, func(ctx context.Context, client *rollClient) (v any, err error) { 38 | clientNames = append(clientNames, client.name) 39 | if client.name == "c.3" { 40 | return nil, nil 41 | } 42 | 43 | return nil, fmt.Errorf("next please") 44 | }) 45 | 46 | require.NoError(t, err) 47 | //require.ErrorIs(t, err, ErrorNoMoreClient) 48 | require.Equal(t, []string{"c.1", "c.2", "c.3"}, clientNames) 49 | 50 | _, err = WithClients(clients, func(ctx context.Context, client *rollClient) (v any, err error) { 51 | clientNames = append(clientNames, client.name) 52 | return nil, fmt.Errorf("next please") 53 | }) 54 | 55 | require.ErrorIs(t, err, ErrorNoMoreClient) 56 | require.Equal(t, []string{"c.1", "c.2", "c.3", "c.3", "c.a", "c.b", "c.1", "c.2"}, clientNames) 57 | 58 | } 59 | -------------------------------------------------------------------------------- /rpc/sort.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | ) 7 | 8 | type SortValueFetcher[C any] interface { 9 | FetchSortValue(ctx context.Context, client C) (sortValue uint64, err error) 10 | } 11 | 12 | type SortDirection int 13 | 14 | const ( 15 | SortDirectionAscending SortDirection = iota 16 | SortDirectionDescending 17 | ) 18 | 19 | func Sort[C any](ctx context.Context, clients *Clients[C], sortValueFetch SortValueFetcher[C], direction SortDirection) error { 20 | type sortable struct { 21 | clientIndex int 22 | sortValue uint64 23 | } 24 | var sortableValues []sortable 25 | for i, client := range clients.clients { 26 | var v uint64 27 | var err error 28 | v, err = sortValueFetch.FetchSortValue(ctx, client) 29 | if err != nil { 30 | //do nothing 31 | } 32 | sortableValues = append(sortableValues, sortable{i, v}) 33 | } 34 | 35 | sort.Slice(sortableValues, func(i, j int) bool { 36 | if direction == SortDirectionAscending { 37 | return sortableValues[i].sortValue < sortableValues[j].sortValue 38 | } 39 | return sortableValues[i].sortValue > sortableValues[j].sortValue 40 | }) 41 | 42 | var sorted []C 43 | for _, v := range sortableValues { 44 | sorted = append(sorted, clients.clients[v.clientIndex]) 45 | } 46 | 47 | clients.lock.Lock() 48 | defer clients.lock.Unlock() 49 | clients.clients = sorted 50 | 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /rpc/sort_test.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "go.uber.org/zap" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | type testSortFetcher struct { 14 | } 15 | 16 | func (t *testSortFetcher) FetchSortValue(ctx context.Context, client *rollClient) (sortValue uint64, err error) { 17 | return client.sortValue, nil 18 | } 19 | 20 | func TestClientsSort(t *testing.T) { 21 | rollingStrategy := NewStickyRollingStrategy[*rollClient]() 22 | rollingStrategy.reset() 23 | 24 | clients := NewClients(2*time.Second, rollingStrategy, zap.NewNop()) 25 | clients.Add(&rollClient{name: "c.1", sortValue: 100}) 26 | clients.Add(&rollClient{name: "c.2", sortValue: 101}) 27 | clients.Add(&rollClient{name: "c.3", sortValue: 102}) 28 | clients.Add(&rollClient{name: "c.a", sortValue: 103}) 29 | clients.Add(&rollClient{name: "c.b", sortValue: 104}) 30 | 31 | fetcher := &testSortFetcher{} 32 | 33 | err := Sort(context.Background(), clients, fetcher, SortDirectionDescending) 34 | require.NoError(t, err) 35 | 36 | var names []string 37 | for _, client := range clients.clients { 38 | names = append(names, client.name) 39 | } 40 | 41 | require.Equal(t, []string{"c.b", "c.a", "c.3", "c.2", "c.1"}, names) 42 | 43 | err = Sort(context.Background(), clients, fetcher, SortDirectionAscending) 44 | require.NoError(t, err) 45 | 46 | names = []string{} 47 | for _, client := range clients.clients { 48 | names = append(names, client.name) 49 | } 50 | 51 | require.Equal(t, []string{"c.1", "c.2", "c.3", "c.a", "c.b"}, names) 52 | 53 | } 54 | -------------------------------------------------------------------------------- /storage_test.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import ( 4 | "path/filepath" 5 | "testing" 6 | 7 | "github.com/spf13/viper" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func Test_searchBlockNum(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | startBlockNum uint64 16 | lastBlockNum *uint64 17 | expect int 18 | expectErr bool 19 | }{ 20 | {"golden path", 1_690_600, uptr(208_853_300), 208_853_300, false}, 21 | {"no block file found", 1_690_600, nil, 1_690_600, false}, 22 | {"block file greater then start block", 0, uptr(100), 100, false}, 23 | {"block file less then start block", 200, uptr(100), 200, false}, 24 | {"golden path 2", 0, uptr(17821900), 17821900, false}, 25 | } 26 | for _, tt := range tests { 27 | t.Run("", func(t *testing.T) { 28 | dstoreOpt := 0 29 | v, err := searchBlockNum(tt.startBlockNum, func(i uint64) (bool, error) { 30 | dstoreOpt++ 31 | if tt.lastBlockNum == nil { 32 | return false, nil 33 | } 34 | if i > *tt.lastBlockNum { 35 | return false, nil 36 | } 37 | return true, nil 38 | }) 39 | if tt.expectErr { 40 | require.Error(t, err) 41 | } else { 42 | require.NoError(t, err) 43 | assert.Equal(t, tt.expect, int(v)) 44 | } 45 | }) 46 | } 47 | } 48 | 49 | func uptr(v uint64) *uint64 { 50 | return &v 51 | } 52 | 53 | func TestGetTmpDir(t *testing.T) { 54 | dataDir := t.TempDir() 55 | testSetViper(t, "common-tmp-dir", "{data-dir}/value") 56 | 57 | dir, err := GetTmpDir(dataDir) 58 | assert.NoError(t, err) 59 | assert.Equal(t, filepath.Join(dataDir, "value"), dir) 60 | 61 | dir2, err2 := GetTmpDir(dataDir) 62 | assert.NoError(t, err2) 63 | assert.Equal(t, filepath.Join(dataDir, "value"), dir2) 64 | } 65 | 66 | func testSetViper(t *testing.T, key string, value string) { 67 | current := viper.Get(key) 68 | t.Cleanup(func() { 69 | viper.Set(key, current) 70 | }) 71 | viper.Set(key, value) 72 | } 73 | -------------------------------------------------------------------------------- /superviser/genericsupervisor.go: -------------------------------------------------------------------------------- 1 | package superviser 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/ShinyTrinkets/overseer" 7 | nodeManager "github.com/streamingfast/firehose-core/node-manager" 8 | "github.com/streamingfast/firehose-core/node-manager/superviser" 9 | "go.uber.org/zap" 10 | ) 11 | 12 | var ( 13 | SupervisorFactory = newGenericSupervisor 14 | ) 15 | 16 | type GenericSuperviser struct { 17 | *superviser.Superviser 18 | 19 | binary string 20 | arguments []string 21 | name string 22 | } 23 | 24 | // This is the default implementation of the Chain Supervisor. If you wish to override the implementation for 25 | // your given chain you can override the 'SupervisorFactory' variable 26 | func newGenericSupervisor(name, binary string, arguments []string, lineBufferSize uint64, appLogger *zap.Logger) nodeManager.ChainSuperviser { 27 | if overseer.DEFAULT_LINE_BUFFER_SIZE < int(lineBufferSize) { 28 | overseer.DEFAULT_LINE_BUFFER_SIZE = int(lineBufferSize) 29 | } 30 | 31 | return &GenericSuperviser{ 32 | Superviser: superviser.New(appLogger, binary, arguments), 33 | name: name, 34 | binary: binary, 35 | arguments: arguments, 36 | } 37 | } 38 | 39 | func (g *GenericSuperviser) GetCommand() string { 40 | return g.binary + " " + strings.Join(g.arguments, " ") 41 | } 42 | 43 | func (g *GenericSuperviser) GetName() string { 44 | return g.name 45 | } 46 | 47 | func (g *GenericSuperviser) ServerID() (string, error) { 48 | return "", nil 49 | } 50 | -------------------------------------------------------------------------------- /superviser/logging.go: -------------------------------------------------------------------------------- 1 | package superviser 2 | 3 | import ( 4 | logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" 5 | ) 6 | 7 | // This file configures a logging reader that transforms log lines received from the blockchain process running 8 | // and then logs them inside the Firehose stack logging system. 9 | // 10 | // A default implementation uses a regex to identify the level of the line and turn it into our internal level value. 11 | // 12 | // You should override the `GetLogLevelFunc` above to determine the log level for your speficic chain 13 | func NewNodeLogPlugin(debugFirehose bool) logplugin.LogPlugin { 14 | return logplugin.NewToConsoleLogPlugin(debugFirehose) 15 | } 16 | -------------------------------------------------------------------------------- /test/metering_server.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "testing" 7 | 8 | "github.com/test-go/testify/require" 9 | 10 | pbmetering "github.com/streamingfast/dmetering/pb/sf/metering/v1" 11 | "google.golang.org/grpc" 12 | "google.golang.org/protobuf/types/known/emptypb" 13 | ) 14 | 15 | type MeteringTestServer struct { 16 | pbmetering.UnimplementedMeteringServer 17 | httpListenAddr string 18 | t *testing.T 19 | bufferedEvents []*pbmetering.Events 20 | } 21 | 22 | func NewMeteringServer(t *testing.T, httpListenAddr string) *MeteringTestServer { 23 | return &MeteringTestServer{ 24 | t: t, 25 | httpListenAddr: httpListenAddr, 26 | bufferedEvents: make([]*pbmetering.Events, 0), 27 | } 28 | } 29 | 30 | func (s *MeteringTestServer) Run() { 31 | lis, err := net.Listen("tcp", s.httpListenAddr) 32 | if err != nil { 33 | require.NoError(s.t, err) 34 | } 35 | 36 | grpcServer := grpc.NewServer() 37 | 38 | pbmetering.RegisterMeteringServer(grpcServer, s) 39 | 40 | s.t.Logf("[Metering]: Server listening port %s", s.httpListenAddr) 41 | if err = grpcServer.Serve(lis); err != nil { 42 | require.NoError(s.t, err) 43 | } 44 | } 45 | 46 | func (s *MeteringTestServer) Emit(ctx context.Context, events *pbmetering.Events) (*emptypb.Empty, error) { 47 | s.bufferedEvents = append(s.bufferedEvents, events) 48 | return &emptypb.Empty{}, nil 49 | } 50 | 51 | func (s *MeteringTestServer) mustEmbedUnimplementedMeteringServer() { 52 | panic("implement me") 53 | } 54 | 55 | func (s *MeteringTestServer) clearBufferedEvents() { 56 | s.bufferedEvents = make([]*pbmetering.Events, 0) 57 | } 58 | -------------------------------------------------------------------------------- /test/substreams_acme/.gitignore: -------------------------------------------------------------------------------- 1 | # substreams auth file 2 | .substreams.env 3 | 4 | # Compiled source files 5 | target/ 6 | 7 | # Sink data when running any sinker 8 | sink-data/ 9 | 10 | # The spkg packed by the subtreams cli 11 | *.spkg -------------------------------------------------------------------------------- /test/substreams_acme/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "substreams_acme" 3 | version = "0.0.1" 4 | edition = "2021" 5 | 6 | [lib] 7 | name = "substreams" 8 | crate-type = ["cdylib"] 9 | 10 | [dependencies] 11 | ethabi = "17" 12 | hex-literal = "0.3.4" 13 | num-bigint = "0.4" 14 | num-traits = "0.2.15" 15 | prost = "0.11" 16 | prost-types = "0.11" 17 | substreams = "0.5.21" 18 | substreams-ethereum = "0.9" 19 | substreams-database-change = "1" 20 | substreams-entity-change = "1" 21 | 22 | # Required so that ethabi > ethereum-types build correctly under wasm32-unknown-unknown 23 | [target.wasm32-unknown-unknown.dependencies] 24 | getrandom = { version = "0.2", features = ["custom"] } 25 | 26 | [build-dependencies] 27 | anyhow = "1" 28 | substreams-ethereum = "0.9" 29 | regex = "1.8" 30 | 31 | [profile.release] 32 | lto = true 33 | opt-level = 's' 34 | strip = "debuginfo" 35 | -------------------------------------------------------------------------------- /test/substreams_acme/README.md: -------------------------------------------------------------------------------- 1 | # substreams_acme Substreams modules 2 | 3 | This package was initialized via `substreams init`, using the `evm-minimal` template. 4 | 5 | ## Usage 6 | 7 | ```bash 8 | substreams build 9 | substreams auth 10 | substreams gui 11 | ``` 12 | 13 | ## Modules 14 | 15 | ### `map_my_data` 16 | 17 | This module extracts small bits of block data, and does simple computations over the 18 | number of **transactions** in each block. 19 | -------------------------------------------------------------------------------- /test/substreams_acme/buf.gen.yaml: -------------------------------------------------------------------------------- 1 | 2 | version: v1 3 | plugins: 4 | - plugin: buf.build/community/neoeinstein-prost:v0.2.2 5 | out: /Users/arnaudberger/StreamingFast/firehose-core/test/substreams_acme/src/pb 6 | opt: 7 | - file_descriptor_set=false 8 | 9 | - plugin: buf.build/community/neoeinstein-prost-crate:v0.3.1 10 | out: /Users/arnaudberger/StreamingFast/firehose-core/test/substreams_acme/src/pb 11 | opt: 12 | - no_features 13 | -------------------------------------------------------------------------------- /test/substreams_acme/generator.json: -------------------------------------------------------------------------------- 1 | { 2 | "generator": "evm-minimal", 3 | "state": { 4 | "name": "substreams_acme", 5 | "chainName": "mainnet" 6 | } 7 | } -------------------------------------------------------------------------------- /test/substreams_acme/proto/sf/acme/type/v1/type.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package sf.acme.type.v1; 4 | 5 | option go_package = "github.com/streamingfast/firehose-acme/pb/sf/acme/type/v1;pbacme"; 6 | 7 | message BlockHeader { 8 | uint64 height = 1; 9 | string hash = 2; 10 | optional uint64 previous_num = 3; 11 | optional string previous_hash = 4; 12 | uint64 final_num = 5; 13 | string final_hash = 6; 14 | uint64 timestamp = 7; 15 | } 16 | 17 | message Block { 18 | BlockHeader header = 1; 19 | repeated Transaction transactions = 2; 20 | } 21 | 22 | message Transaction { 23 | string type = 1; 24 | string hash = 2; 25 | string sender = 3; 26 | string receiver = 4; 27 | BigInt amount = 5; 28 | BigInt fee = 6; 29 | bool success = 7; 30 | repeated Event events = 8; 31 | } 32 | 33 | message Event { 34 | string type = 1; 35 | repeated Attribute attributes = 2; 36 | } 37 | 38 | message Attribute { 39 | string key = 1; 40 | string value = 2; 41 | } 42 | 43 | message BigInt { 44 | bytes bytes = 1; 45 | } 46 | -------------------------------------------------------------------------------- /test/substreams_acme/proto/testdata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "google/protobuf/timestamp.proto"; 4 | 5 | package testdata.v1; 6 | 7 | message TestData { 8 | string block_hash = 1; 9 | uint64 block_number = 2; 10 | google.protobuf.Timestamp block_timestamp = 3; 11 | uint64 transactions_len = 4; 12 | } -------------------------------------------------------------------------------- /test/substreams_acme/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod pb; 2 | 3 | use num_traits::ToPrimitive; 4 | use pb::testdata::v1 as testdata; 5 | use pb::sf::acme::r#type::v1::Block; 6 | 7 | use substreams::Hex; 8 | 9 | 10 | substreams_ethereum::init!(); 11 | 12 | #[substreams::handlers::map] 13 | fn map_test_data(blk: Block) -> testdata::TestData { 14 | let mut test_data = testdata::TestData::default(); 15 | let header = blk.header.clone().unwrap(); 16 | test_data.block_hash = Hex(header.hash).to_string(); 17 | test_data.block_number = header.height.to_u64().unwrap(); 18 | test_data 19 | } 20 | -------------------------------------------------------------------------------- /test/substreams_acme/src/pb/mod.rs: -------------------------------------------------------------------------------- 1 | // @generated 2 | pub mod sf { 3 | pub mod acme { 4 | pub mod r#type { 5 | // @@protoc_insertion_point(attribute:sf.acme.type.v1) 6 | pub mod v1 { 7 | include!("sf.acme.type.v1.rs"); 8 | // @@protoc_insertion_point(sf.acme.type.v1) 9 | } 10 | } 11 | } 12 | } 13 | pub mod testdata { 14 | // @@protoc_insertion_point(attribute:testdata.v1) 15 | pub mod v1 { 16 | include!("testdata.v1.rs"); 17 | // @@protoc_insertion_point(testdata.v1) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /test/substreams_acme/src/pb/sf.acme.type.v1.rs: -------------------------------------------------------------------------------- 1 | // @generated 2 | #[allow(clippy::derive_partial_eq_without_eq)] 3 | #[derive(Clone, PartialEq, ::prost::Message)] 4 | pub struct BlockHeader { 5 | #[prost(uint64, tag="1")] 6 | pub height: u64, 7 | #[prost(string, tag="2")] 8 | pub hash: ::prost::alloc::string::String, 9 | #[prost(uint64, optional, tag="3")] 10 | pub previous_num: ::core::option::Option, 11 | #[prost(string, optional, tag="4")] 12 | pub previous_hash: ::core::option::Option<::prost::alloc::string::String>, 13 | #[prost(uint64, tag="5")] 14 | pub final_num: u64, 15 | #[prost(string, tag="6")] 16 | pub final_hash: ::prost::alloc::string::String, 17 | #[prost(uint64, tag="7")] 18 | pub timestamp: u64, 19 | } 20 | #[allow(clippy::derive_partial_eq_without_eq)] 21 | #[derive(Clone, PartialEq, ::prost::Message)] 22 | pub struct Block { 23 | #[prost(message, optional, tag="1")] 24 | pub header: ::core::option::Option, 25 | #[prost(message, repeated, tag="2")] 26 | pub transactions: ::prost::alloc::vec::Vec, 27 | } 28 | #[allow(clippy::derive_partial_eq_without_eq)] 29 | #[derive(Clone, PartialEq, ::prost::Message)] 30 | pub struct Transaction { 31 | #[prost(string, tag="1")] 32 | pub r#type: ::prost::alloc::string::String, 33 | #[prost(string, tag="2")] 34 | pub hash: ::prost::alloc::string::String, 35 | #[prost(string, tag="3")] 36 | pub sender: ::prost::alloc::string::String, 37 | #[prost(string, tag="4")] 38 | pub receiver: ::prost::alloc::string::String, 39 | #[prost(message, optional, tag="5")] 40 | pub amount: ::core::option::Option, 41 | #[prost(message, optional, tag="6")] 42 | pub fee: ::core::option::Option, 43 | #[prost(bool, tag="7")] 44 | pub success: bool, 45 | #[prost(message, repeated, tag="8")] 46 | pub events: ::prost::alloc::vec::Vec, 47 | } 48 | #[allow(clippy::derive_partial_eq_without_eq)] 49 | #[derive(Clone, PartialEq, ::prost::Message)] 50 | pub struct Event { 51 | #[prost(string, tag="1")] 52 | pub r#type: ::prost::alloc::string::String, 53 | #[prost(message, repeated, tag="2")] 54 | pub attributes: ::prost::alloc::vec::Vec, 55 | } 56 | #[allow(clippy::derive_partial_eq_without_eq)] 57 | #[derive(Clone, PartialEq, ::prost::Message)] 58 | pub struct Attribute { 59 | #[prost(string, tag="1")] 60 | pub key: ::prost::alloc::string::String, 61 | #[prost(string, tag="2")] 62 | pub value: ::prost::alloc::string::String, 63 | } 64 | #[allow(clippy::derive_partial_eq_without_eq)] 65 | #[derive(Clone, PartialEq, ::prost::Message)] 66 | pub struct BigInt { 67 | #[prost(bytes="vec", tag="1")] 68 | pub bytes: ::prost::alloc::vec::Vec, 69 | } 70 | // @@protoc_insertion_point(module) 71 | -------------------------------------------------------------------------------- /test/substreams_acme/src/pb/testdata.v1.rs: -------------------------------------------------------------------------------- 1 | // @generated 2 | #[allow(clippy::derive_partial_eq_without_eq)] 3 | #[derive(Clone, PartialEq, ::prost::Message)] 4 | pub struct TestData { 5 | #[prost(string, tag="1")] 6 | pub block_hash: ::prost::alloc::string::String, 7 | #[prost(uint64, tag="2")] 8 | pub block_number: u64, 9 | #[prost(message, optional, tag="3")] 10 | pub block_timestamp: ::core::option::Option<::prost_types::Timestamp>, 11 | #[prost(uint64, tag="4")] 12 | pub transactions_len: u64, 13 | } 14 | // @@protoc_insertion_point(module) 15 | -------------------------------------------------------------------------------- /test/substreams_acme/substreams.yaml: -------------------------------------------------------------------------------- 1 | specVersion: v0.1.0 2 | package: 3 | name: substreams_acme 4 | version: v0.1.0 5 | 6 | protobuf: 7 | files: 8 | - testdata.proto 9 | - sf/acme/type/v1/type.proto 10 | importPaths: 11 | - ./proto 12 | excludePaths: 13 | - sf/substreams 14 | - google 15 | 16 | binaries: 17 | default: 18 | type: wasm/rust-v1 19 | file: ./target/wasm32-unknown-unknown/release/substreams.wasm 20 | 21 | modules: 22 | - name: map_test_data 23 | kind: map 24 | inputs: 25 | - source: sf.acme.type.v1.Block 26 | output : 27 | type: proto:testdata.v1.TestData 28 | 29 | network: mainnet 30 | -------------------------------------------------------------------------------- /test/utils.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os/exec" 8 | "testing" 9 | ) 10 | 11 | func loggingStdout(t *testing.T, stdoutPipe io.ReadCloser, instance string) { 12 | go func() { 13 | scanner := bufio.NewScanner(stdoutPipe) 14 | for scanner.Scan() { 15 | // Log the stdout output as it comes in 16 | t.Logf("[%s stdout]: %s", instance, scanner.Text()) 17 | } 18 | if err := scanner.Err(); err != nil { 19 | t.Logf("Error reading %s stdout: %v", instance, err) 20 | } 21 | }() 22 | } 23 | 24 | func loggingStderr(t *testing.T, stderrPipe io.ReadCloser, instance string) { 25 | go func() { 26 | scanner := bufio.NewScanner(stderrPipe) 27 | for scanner.Scan() { 28 | t.Logf("[%s stderr]: %s", instance, scanner.Text()) 29 | } 30 | if err := scanner.Err(); err != nil { 31 | t.Logf("Error reading %s stderr: %v", instance, err) 32 | } 33 | }() 34 | } 35 | 36 | func handlingTestInstance(t *testing.T, cmd *exec.Cmd, instance string, withLog bool) error { 37 | stdoutPipe, err := cmd.StdoutPipe() 38 | if err != nil { 39 | return err 40 | } 41 | stderrPipe, err := cmd.StderrPipe() 42 | if err != nil { 43 | return err 44 | } 45 | 46 | if withLog { 47 | loggingStdout(t, stdoutPipe, instance) 48 | loggingStderr(t, stderrPipe, instance) 49 | } 50 | 51 | if err = cmd.Start(); err != nil { 52 | return err 53 | } 54 | 55 | if err = cmd.Wait(); err != nil { 56 | return fmt.Errorf("%s process failed: %w", instance, err) 57 | } 58 | 59 | return err 60 | } 61 | -------------------------------------------------------------------------------- /types/block_range.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | 7 | "github.com/streamingfast/bstream" 8 | ) 9 | 10 | //go:generate go-enum -f=$GOFILE --marshal --names --nocase 11 | 12 | // ENUM( 13 | // 14 | // Inclusive 15 | // Exclusive 16 | // 17 | // ) 18 | type RangeBoundary int 19 | 20 | const ( 21 | EndBoundaryInclusive RangeBoundary = RangeBoundaryInclusive 22 | EndBoundaryExclusive = RangeBoundaryExclusive 23 | ) 24 | 25 | // BlockRange is actually an UnresolvedBlockRange so both the start and end could be 26 | // negative values. 27 | // 28 | // This is in opposition to `bstream.Range` which is a resolved range meaning that start/stop 29 | // values will never be negative. 30 | type BlockRange struct { 31 | Start int64 32 | Stop *uint64 33 | } 34 | 35 | func NewOpenRange(start int64) BlockRange { 36 | return BlockRange{Start: int64(start), Stop: nil} 37 | } 38 | 39 | func NewClosedRange(start int64, stop uint64) BlockRange { 40 | return BlockRange{Start: start, Stop: &stop} 41 | } 42 | 43 | // IsResolved returns true if the range is both closed and fully 44 | // resolved (e.g. both start and stop are positive values). Returns 45 | // false otherwise. 46 | func (b BlockRange) IsResolved() bool { 47 | return b.Start >= 0 && b.IsClosed() 48 | } 49 | 50 | func (b BlockRange) IsOpen() bool { 51 | return b.Stop == nil 52 | } 53 | 54 | func (b BlockRange) IsClosed() bool { 55 | return b.Stop != nil 56 | } 57 | 58 | func (b BlockRange) GetStartBlock() int64 { 59 | return b.Start 60 | } 61 | 62 | func (b BlockRange) BlockCount() int64 { 63 | if !b.IsResolved() { 64 | return math.MaxInt64 65 | } 66 | 67 | return int64(*b.Stop) - b.Start + 1 68 | } 69 | 70 | func (b BlockRange) GetStopBlockOr(defaultIfOpenRange uint64) uint64 { 71 | if b.IsOpen() { 72 | return defaultIfOpenRange 73 | } 74 | 75 | return *b.Stop 76 | } 77 | 78 | func (b BlockRange) MustGetStopBlock() uint64 { 79 | if b.IsOpen() { 80 | panic("cannot get stop block of an open range") 81 | } 82 | 83 | return *b.Stop 84 | } 85 | 86 | func (b BlockRange) ReprocRange() string { 87 | if !b.IsClosed() { 88 | return "" 89 | } 90 | 91 | if !b.IsResolved() { 92 | return "" 93 | } 94 | 95 | return fmt.Sprintf("%d:%d", b.Start, *b.Stop+1) 96 | } 97 | 98 | func (b BlockRange) Contains(blockNum uint64, endBoundary RangeBoundary) bool { 99 | if blockNum < uint64(b.Start) { 100 | return false 101 | } 102 | 103 | if b.Stop == nil { 104 | return true 105 | } 106 | 107 | endBlock := *b.Stop 108 | if blockNum > endBlock { 109 | return false 110 | } 111 | if endBoundary == RangeBoundaryExclusive && blockNum == endBlock { 112 | return false 113 | } 114 | 115 | return true 116 | } 117 | 118 | func (b BlockRange) Split(chunkSize uint64, endBoundary RangeBoundary) ([]BlockRange, error) { 119 | segments, err := b.ToBstreamRange(endBoundary).Split(chunkSize) 120 | if err != nil { 121 | return nil, fmt.Errorf("splitting ranges: %w", err) 122 | } 123 | 124 | out := make([]BlockRange, len(segments)) 125 | for i, segment := range segments { 126 | out[i] = NewClosedRange(int64(segment.StartBlock()), *segment.EndBlock()) 127 | } 128 | 129 | return out, nil 130 | } 131 | 132 | func (b BlockRange) ToBstreamRange(endBoundary RangeBoundary) *bstream.Range { 133 | if b.Start < 0 { 134 | panic(fmt.Errorf("cannot convert unresolved block range to bstream.Range: %s", b)) 135 | } 136 | 137 | if b.IsOpen() { 138 | return bstream.NewOpenRange(uint64(b.Start)) 139 | } 140 | 141 | if endBoundary == RangeBoundaryExclusive { 142 | return bstream.NewRangeExcludingEnd(uint64(b.Start), *b.Stop) 143 | } 144 | 145 | return bstream.NewInclusiveRange(uint64(b.Start), *b.Stop) 146 | } 147 | 148 | func (b BlockRange) String() string { 149 | if b.IsOpen() { 150 | return fmt.Sprintf("[%s, +∞]", BlockNum(b.Start)) 151 | } 152 | 153 | return fmt.Sprintf("[%s, %s]", BlockNum(b.Start), BlockNum(*b.Stop)) 154 | } 155 | -------------------------------------------------------------------------------- /types/block_range_enum.go: -------------------------------------------------------------------------------- 1 | // Code generated by go-enum DO NOT EDIT. 2 | // Version: 3 | // Revision: 4 | // Build Date: 5 | // Built By: 6 | 7 | package types 8 | 9 | import ( 10 | "fmt" 11 | "strings" 12 | ) 13 | 14 | const ( 15 | // RangeBoundaryInclusive is a RangeBoundary of type Inclusive. 16 | RangeBoundaryInclusive RangeBoundary = iota 17 | // RangeBoundaryExclusive is a RangeBoundary of type Exclusive. 18 | RangeBoundaryExclusive 19 | ) 20 | 21 | var ErrInvalidRangeBoundary = fmt.Errorf("not a valid RangeBoundary, try [%s]", strings.Join(_RangeBoundaryNames, ", ")) 22 | 23 | const _RangeBoundaryName = "InclusiveExclusive" 24 | 25 | var _RangeBoundaryNames = []string{ 26 | _RangeBoundaryName[0:9], 27 | _RangeBoundaryName[9:18], 28 | } 29 | 30 | // RangeBoundaryNames returns a list of possible string values of RangeBoundary. 31 | func RangeBoundaryNames() []string { 32 | tmp := make([]string, len(_RangeBoundaryNames)) 33 | copy(tmp, _RangeBoundaryNames) 34 | return tmp 35 | } 36 | 37 | var _RangeBoundaryMap = map[RangeBoundary]string{ 38 | RangeBoundaryInclusive: _RangeBoundaryName[0:9], 39 | RangeBoundaryExclusive: _RangeBoundaryName[9:18], 40 | } 41 | 42 | // String implements the Stringer interface. 43 | func (x RangeBoundary) String() string { 44 | if str, ok := _RangeBoundaryMap[x]; ok { 45 | return str 46 | } 47 | return fmt.Sprintf("RangeBoundary(%d)", x) 48 | } 49 | 50 | // IsValid provides a quick way to determine if the typed value is 51 | // part of the allowed enumerated values 52 | func (x RangeBoundary) IsValid() bool { 53 | _, ok := _RangeBoundaryMap[x] 54 | return ok 55 | } 56 | 57 | var _RangeBoundaryValue = map[string]RangeBoundary{ 58 | _RangeBoundaryName[0:9]: RangeBoundaryInclusive, 59 | strings.ToLower(_RangeBoundaryName[0:9]): RangeBoundaryInclusive, 60 | _RangeBoundaryName[9:18]: RangeBoundaryExclusive, 61 | strings.ToLower(_RangeBoundaryName[9:18]): RangeBoundaryExclusive, 62 | } 63 | 64 | // ParseRangeBoundary attempts to convert a string to a RangeBoundary. 65 | func ParseRangeBoundary(name string) (RangeBoundary, error) { 66 | if x, ok := _RangeBoundaryValue[name]; ok { 67 | return x, nil 68 | } 69 | // Case insensitive parse, do a separate lookup to prevent unnecessary cost of lowercasing a string if we don't need to. 70 | if x, ok := _RangeBoundaryValue[strings.ToLower(name)]; ok { 71 | return x, nil 72 | } 73 | return RangeBoundary(0), fmt.Errorf("%s is %w", name, ErrInvalidRangeBoundary) 74 | } 75 | 76 | // MarshalText implements the text marshaller method. 77 | func (x RangeBoundary) MarshalText() ([]byte, error) { 78 | return []byte(x.String()), nil 79 | } 80 | 81 | // UnmarshalText implements the text unmarshaller method. 82 | func (x *RangeBoundary) UnmarshalText(text []byte) error { 83 | name := string(text) 84 | tmp, err := ParseRangeBoundary(name) 85 | if err != nil { 86 | return err 87 | } 88 | *x = tmp 89 | return nil 90 | } 91 | -------------------------------------------------------------------------------- /types/flags.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/spf13/cobra" 8 | "github.com/streamingfast/bstream" 9 | "github.com/streamingfast/cli/sflags" 10 | ) 11 | 12 | // GetBlockRangeFromArg returns the block range from the given argument or the range 13 | // [HEAD, +∞] if the argument is empty. 14 | func GetBlockRangeFromArg(in string) (out BlockRange, err error) { 15 | return ParseBlockRangeDefault(in, bstream.GetProtocolFirstStreamableBlock, NewOpenRange(-1)) 16 | } 17 | 18 | // GetBlockRangeFromArgDefault returns a block range from a string argument, using the default block range 19 | // `defaultRange` if the input is empty. 20 | func GetBlockRangeFromArgDefault(in string, defaultRange BlockRange) (out BlockRange, err error) { 21 | return ParseBlockRangeDefault(in, bstream.GetProtocolFirstStreamableBlock, defaultRange) 22 | } 23 | 24 | // GetBlockRangeFromFlag returns the block range from the given flag name or the range 25 | // [HEAD, +∞] if the flag is not set. 26 | func GetBlockRangeFromFlag(cmd *cobra.Command, flagName string) (out BlockRange, err error) { 27 | return GetBlockRangeFromFlagDefault(cmd, flagName, NewOpenRange(-1)) 28 | } 29 | 30 | // GetBlockRangeFromFlagDefault returns a block range from a flag, using the default block range 31 | // `defaultRange` if the flag is not set at all. 32 | func GetBlockRangeFromFlagDefault(cmd *cobra.Command, flagName string, defaultRange BlockRange) (out BlockRange, err error) { 33 | stringRange := sflags.MustGetString(cmd, flagName) 34 | 35 | rawRanges := strings.Split(stringRange, ",") 36 | if len(rawRanges) == 0 { 37 | return defaultRange, nil 38 | } 39 | 40 | if len(rawRanges) > 1 { 41 | return out, fmt.Errorf("accepting a single range for now, got %d", len(rawRanges)) 42 | } 43 | 44 | out, err = ParseBlockRangeDefault(rawRanges[0], bstream.GetProtocolFirstStreamableBlock, defaultRange) 45 | if err != nil { 46 | return out, fmt.Errorf("decode range: %w", err) 47 | } 48 | 49 | return 50 | } 51 | -------------------------------------------------------------------------------- /types/types.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "strings" 7 | 8 | "github.com/dustin/go-humanize" 9 | ) 10 | 11 | type BlockNum int64 12 | 13 | var HeadBlockNum BlockNum = -1 14 | 15 | func (b BlockNum) String() string { 16 | if b < 0 { 17 | if b == HeadBlockNum { 18 | return "HEAD" 19 | } 20 | 21 | return fmt.Sprintf("HEAD - %d", uint64(math.Abs(float64(b)))) 22 | } 23 | 24 | return "#" + strings.ReplaceAll(humanize.Comma(int64(b)), ",", " ") 25 | } 26 | -------------------------------------------------------------------------------- /types/types_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestBlockRange_String(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | blockRange BlockRange 13 | want string 14 | }{ 15 | {"open range", NewOpenRange(5000), "[#5 000, +∞]"}, 16 | {"open range head", NewOpenRange(-1), "[HEAD, +∞]"}, 17 | {"open range head - 2", NewOpenRange(-2), "[HEAD - 2, +∞]"}, 18 | {"closed range", NewClosedRange(5000, 10000), "[#5 000, #10 000]"}, 19 | } 20 | for _, tt := range tests { 21 | t.Run(tt.name, func(t *testing.T) { 22 | assert.Equal(t, tt.want, tt.blockRange.String()) 23 | }) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /types/utils_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | // func Test_BlockRange_String(t *testing.T) { 11 | // } 12 | 13 | func Test_readBlockRange(t *testing.T) { 14 | errorIs := func(errString string) require.ErrorAssertionFunc { 15 | return func(tt require.TestingT, err error, i ...interface{}) { 16 | require.EqualError(tt, err, errString, i...) 17 | } 18 | } 19 | 20 | headBlock := int64(HeadBlockNum) 21 | noRange := BlockRange{} 22 | 23 | openRange := NewOpenRange 24 | closedRange := NewClosedRange 25 | 26 | type args struct { 27 | chainFirstStreamableBlock uint64 28 | blockRangeArg string 29 | } 30 | tests := []struct { 31 | name string 32 | args args 33 | want BlockRange 34 | assertion require.ErrorAssertionFunc 35 | }{ 36 | // Single 37 | {"single empty is full range", args{5, ""}, openRange(headBlock), nil}, 38 | {"single -1 is full range", args{5, "-1"}, openRange(headBlock), nil}, 39 | {"single : is open range from chain genesis", args{5, ":"}, openRange(5), nil}, 40 | {"single is start block", args{5, "11"}, openRange(11), nil}, 41 | {"single is start block and can be negative", args{5, "-2"}, openRange(-2), nil}, 42 | 43 | {"range start, stop", args{5, "10:12"}, closedRange(10, 12), nil}, 44 | {"range , stop", args{5, ":12"}, closedRange(5, 12), nil}, 45 | {"range start, ", args{5, "10:"}, openRange(10), nil}, 46 | {"range start, stop+", args{5, ":+10"}, closedRange(5, 15), nil}, 47 | {"range start, stop+", args{5, "10:+10"}, closedRange(10, 20), nil}, 48 | {"range, equal start & end", args{0, "10:10"}, closedRange(10, 10), nil}, 49 | {"range start, stop == -1", args{5, "10:-1"}, openRange(10), nil}, 50 | 51 | {"error range start, stop-", args{5, "10:-2"}, noRange, errorIs("invalid range: stop block of a range cannot be negative")}, 52 | {"error range start+, ", args{5, "+10:"}, noRange, errorIs("invalid range: start block of a range cannot be positively relative (so starting with a + sign)")}, 53 | {"error range start+, stop", args{5, "+10:20"}, noRange, errorIs("invalid range: start block of a range cannot be positively relative (so starting with a + sign)")}, 54 | {"error single +relative is stop block, start inferred", args{5, "+10"}, noRange, errorIs("invalid range: a single block cannot be positively relative (so starting with a + sign)")}, 55 | {"error range start+, stop+", args{5, "+10:+10"}, noRange, errorIs("invalid range: start block of a range cannot be positively relative (so starting with a + sign)")}, 56 | {"error invalid range, over", args{0, "11:10"}, noRange, errorIs("invalid range: start block 11 is above stop block 10 (inclusive)")}, 57 | } 58 | for _, tt := range tests { 59 | t.Run(tt.name, func(t *testing.T) { 60 | got, err := ParseBlockRange(tt.args.blockRangeArg, tt.args.chainFirstStreamableBlock) 61 | 62 | if tt.assertion == nil { 63 | tt.assertion = require.NoError 64 | } 65 | 66 | tt.assertion(t, err) 67 | assert.Equal(t, tt.want, got) 68 | }) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /unsafe_extensions.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/streamingfast/firehose-core/launcher" 7 | "go.uber.org/zap" 8 | ) 9 | 10 | // UnsafeRunningFromFirecore is used internally and should not be altered. 11 | var UnsafeRunningFromFirecore = false 12 | 13 | // UnsafeAllowedExecutableNameToBeEmpty is used internally and should not be altered. 14 | var UnsafeAllowExecutableNameToBeEmpty = false 15 | 16 | // UnsafeResolveReaderNodeStartBlock is a function that resolved the reader node start block num, by default it simply 17 | // returns the value of the 'reader-node-start-block-num'. However, the function may be overwritten in certain chains 18 | // to perform a more complex resolution logic. 19 | var UnsafeResolveReaderNodeStartBlock = func(ctx context.Context, startBlockNum uint64, firstStreamableBlock uint64, runtime *launcher.Runtime, rootLog *zap.Logger) (uint64, error) { 20 | return startBlockNum, nil 21 | } 22 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package firecore 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/streamingfast/cli" 11 | ) 12 | 13 | func mkdirStorePathIfLocal(storeURL string) (err error) { 14 | if dirs := getDirsToMake(storeURL); len(dirs) > 0 { 15 | err = MakeDirs(dirs) 16 | } 17 | 18 | return 19 | } 20 | 21 | func getDirsToMake(storeURL string) []string { 22 | parts := strings.Split(storeURL, "://") 23 | if len(parts) > 1 { 24 | if parts[0] != "file" { 25 | // Not a local store, nothing to do 26 | return nil 27 | } 28 | storeURL = parts[1] 29 | } 30 | 31 | // Some of the store URL are actually a file directly, let's try our best to cope for that case 32 | filename := filepath.Base(storeURL) 33 | if strings.Contains(filename, ".") { 34 | storeURL = filepath.Dir(storeURL) 35 | } 36 | 37 | // If we reach here, it's a local store path 38 | return []string{storeURL} 39 | } 40 | 41 | func MakeDirs(directories []string) error { 42 | for _, directory := range directories { 43 | err := os.MkdirAll(directory, 0755) 44 | if err != nil { 45 | return fmt.Errorf("failed to create directory %q: %w", directory, err) 46 | } 47 | } 48 | 49 | return nil 50 | } 51 | 52 | // MustReplaceDataDir replaces `{data-dir}` from within the `in` received argument by the 53 | // `dataDir` argument. 54 | // 55 | // MustReplaceDataDir("/tmp/data", "{data-dir}/subdir") == "/tmp/data/subdir" 56 | func MustReplaceDataDir(dataDir, in string) string { 57 | d, err := filepath.Abs(dataDir) 58 | if err != nil { 59 | panic(fmt.Errorf("file path abs: %w", err)) 60 | } 61 | 62 | in = strings.Replace(in, "{data-dir}", d, -1) 63 | 64 | // Some legacy code still uses '{sf-data-dir}' (firehose-ethereum/firehose-near for example), so let's replace it 65 | // also to keep it compatible even though it's not advertised anymore 66 | in = strings.Replace(in, "{sf-data-dir}", d, -1) 67 | 68 | return in 69 | } 70 | 71 | var Example = func(in string) string { 72 | return string(cli.Example(in)) 73 | } 74 | 75 | func ExamplePrefixed[B Block](chain *Chain[B], prefix, in string) string { 76 | return string(cli.ExamplePrefixed(chain.BinaryName()+" "+prefix, in)) 77 | } 78 | 79 | func MustParseUint64(s string) uint64 { 80 | i, err := strconv.Atoi(s) 81 | cli.NoError(err, "Unable to parse %q as uint64", s) 82 | 83 | return uint64(i) 84 | } 85 | --------------------------------------------------------------------------------