├── .github └── workflows │ ├── build.yaml │ └── pr.yaml ├── .gitignore ├── .golangci.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── client ├── duneapi │ ├── batch.go │ ├── batch_test.go │ ├── client.go │ ├── metrics.go │ └── models.go └── jsonrpc │ ├── arbitrum_nitro.go │ ├── arbitrum_nitro_test.go │ ├── client.go │ ├── httpclient.go │ ├── httpclient_test.go │ ├── metrics.go │ ├── models.go │ ├── opstack.go │ ├── opstack_test.go │ └── testdata │ ├── arbitrumnitro-DEGEN-block-0x16870e9-debug_traceBlockByNumber.json │ ├── arbitrumnitro-DEGEN-block-0x16870e9-eth_getBlockByNumber.json │ ├── arbitrumnitro-DEGEN-block-0x16870e9-eth_getTransactionReceipt-0x0.json │ ├── arbitrumnitro-DEGEN-block-0x16870e9-eth_getTransactionReceipt-0x1.json │ ├── opstack-MODE-block-0x7a549b-debug_traceBlockByNumber.json │ ├── opstack-MODE-block-0x7a549b-eth_getBlockByNumber.json │ └── opstack-MODE-block-0x7a549b-eth_getBlockReceipts.json ├── cmd └── main.go ├── config └── config.go ├── go.mod ├── go.sum ├── ingester ├── ingester.go ├── mainloop.go ├── mainloop_test.go ├── metrics.go ├── models.go ├── models_test.go └── send.go ├── lib ├── dlq │ └── dlq.go └── hexutils │ └── numbers.go ├── mocks ├── duneapi │ └── client.go └── jsonrpc │ ├── httpclient.go │ └── rpcnode.go └── models ├── block.go ├── evm.go ├── gaps.go └── progress.go /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Build" 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | tags: 9 | - '*' 10 | 11 | jobs: 12 | build: 13 | name: "Build Docker image and push to Dockerhub" 14 | runs-on: ubuntu-latest 15 | env: 16 | AWS_REGION: us-east-1 # This is the region to authenticate against for public ECR repositories 17 | # These permissions are needed to interact with GitHub's OIDC Token endpoint. 18 | permissions: 19 | id-token: write 20 | contents: read 21 | steps: 22 | - uses: actions/checkout@v3 23 | 24 | - name: Configure AWS Credentials 25 | uses: aws-actions/configure-aws-credentials@v4 26 | with: 27 | role-to-assume: arn:aws:iam::118330671040:role/node-indexer-ci 28 | aws-region: ${{ env.AWS_REGION }} 29 | 30 | - name: Login to Amazon ECR 31 | id: login-ecr 32 | uses: aws-actions/amazon-ecr-login@v2 33 | with: 34 | registry-type: public 35 | 36 | - name: Build and push Docker images 37 | env: 38 | DOCKER_HUB_KEY: ${{ secrets.DOCKER_HUB_KEY }} 39 | TAG: "${{ github.ref_name == 'main' && 'latest' || github.ref_name }}" 40 | run: | 41 | make image-push 42 | -------------------------------------------------------------------------------- /.github/workflows/pr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Pull Request" 3 | 4 | on: pull_request 5 | 6 | jobs: 7 | test: 8 | name: "Lint and test" 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v3 12 | 13 | - name: Install Go 14 | uses: actions/setup-go@v5 15 | with: 16 | go-version: 1.22 17 | 18 | - name: Lint 19 | run: make lint 20 | 21 | - name: Test 22 | run: make test 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | go.work.sum 23 | 24 | # Binary 25 | indexer 26 | bin 27 | 28 | .idea 29 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | linters: 3 | disable: 4 | - errcheck 5 | enable: 6 | - forbidigo 7 | - gofmt 8 | - gofumpt 9 | - goimports 10 | - gosimple 11 | - govet 12 | - ineffassign 13 | - lll 14 | - prealloc 15 | - predeclared 16 | - revive 17 | - staticcheck 18 | - stylecheck 19 | - unused 20 | 21 | issues: 22 | exclude-rules: 23 | # Exclude some linters from running on tests files. 24 | - path: mocks/ 25 | linters: 26 | - deadcode 27 | - gofumpt 28 | - goimports 29 | - gosimple (megacheck) 30 | - govet (vet, vetshadow) 31 | - ineffassign 32 | - lll 33 | - revive 34 | - staticcheck (megacheck) 35 | - structcheck 36 | - stylecheck 37 | - typecheck 38 | - unused (megacheck) 39 | - varcheck 40 | - linters: 41 | - lll 42 | source: "// nolint:lll" 43 | - linters: 44 | - stylecheck 45 | source: "// nolint:stylecheck" 46 | - linters: 47 | - unused 48 | - deadcode 49 | - varcheck 50 | - revive 51 | source: "// nolint:unused" 52 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.22-alpine AS builder 2 | 3 | # dependencies to build the project & dependencies 4 | RUN apk add --no-cache git make curl gcc musl-dev binutils-gold bash 5 | 6 | # First copy just enough to pull all dependencies, to cache this layer 7 | COPY go.mod go.sum Makefile /app/ 8 | WORKDIR /app/ 9 | RUN make setup 10 | 11 | # Copy the rest of the source code 12 | COPY . . 13 | 14 | # Build 15 | RUN make build 16 | 17 | # Stage 2: Create a minimal runtime image 18 | FROM alpine:latest 19 | 20 | # Install ca-certificates 21 | RUN apk add --no-cache ca-certificates 22 | 23 | COPY --from=builder /app/indexer / 24 | ENTRYPOINT ["/indexer"] 25 | EXPOSE 2112 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Dune Analytics 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all setup lint build test image-build image-push 2 | 3 | TEST_TIMEOUT := 10s 4 | SHELL := /bin/bash 5 | TAG ?= latest 6 | 7 | all: lint test build 8 | 9 | setup: bin/golangci-lint bin/gofumpt bin/moq 10 | go mod download 11 | 12 | bin: 13 | mkdir -p bin 14 | 15 | bin/moq: bin 16 | GOBIN=$(PWD)/bin go install github.com/matryer/moq@v0.3.4 17 | bin/golangci-lint: bin 18 | GOBIN=$(PWD)/bin go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.1 19 | bin/gofumpt: bin 20 | GOBIN=$(PWD)/bin go install mvdan.cc/gofumpt@v0.6.0 21 | 22 | build: cmd/main.go 23 | CGO_ENABLED=0 go build -ldflags="-X github.com/duneanalytics/blockchain-ingester/client/duneapi.commitHash=$(shell git rev-parse --short HEAD)" -o indexer cmd/main.go 24 | 25 | lint: bin/golangci-lint bin/gofumpt 26 | go fmt ./... 27 | go vet ./... 28 | bin/golangci-lint -c .golangci.yml run ./... 29 | bin/gofumpt -l -e -d ./ 30 | go mod tidy 31 | 32 | test: 33 | go mod tidy 34 | CGO_ENABLED=1 go test -timeout=$(TEST_TIMEOUT) -race -bench=. -benchmem -cover ./... 35 | 36 | gen-mocks: bin/moq ./client/jsonrpc/ ./client/duneapi/ 37 | ./bin/moq -pkg jsonrpc_mock -out ./mocks/jsonrpc/httpclient.go ./client/jsonrpc HTTPClient 38 | ./bin/moq -pkg jsonrpc_mock -out ./mocks/jsonrpc/rpcnode.go ./client/jsonrpc BlockchainClient 39 | ./bin/moq -pkg duneapi_mock -out ./mocks/duneapi/client.go ./client/duneapi BlockchainIngester 40 | 41 | image-build: 42 | @echo "# Building Docker images" 43 | docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v8 -t "duneanalytics/node-indexer:${TAG}" -f Dockerfile . 44 | 45 | image-push: 46 | @echo "# Pushing Docker images to Docker Hub (after building)" 47 | echo -n "${DOCKER_HUB_KEY}" | docker login --username duneanalytics --password-stdin 48 | docker buildx create --name mybuilder 49 | docker buildx use mybuilder 50 | docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v8 -t "duneanalytics/node-indexer:${TAG}" -f Dockerfile --push . 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Blockchain node indexer 2 | A program that indexes blockchain data into http://dune.com by connecting directly to an RPC node. 3 | 4 | # Limitations 5 | This program works with EVM compatible blockchains, doing direct, EVM-specific JSON-RPC calls to the Node RPC endpoint. 6 | 7 | 8 | # How to use: 9 | There are only 3 required arguments for running the indexer: 10 | 1. DUNE_API_KEY: Your Dune API Key, you can get this at: https://dune.com/settings/api 11 | 1. BLOCKCHAIN_NAME: The name of the blockchain as configured on Dune (for example: "ethereum" blockchain) 12 | 1. RPC_NODE_URL: The URL of the NODE RPC endpoint, for example: https://sepolia.optimism.io/ 13 | 14 | For more details see the configuration options section below. 15 | 16 | ## Docker container 17 | You can run our [public container image on DockerHub](https://hub.docker.com/r/duneanalytics/node-indexer) as such: 18 | 19 | ```bash 20 | docker run -e BLOCKCHAIN_NAME='foo' -e RPC_NODE_URL='http://localhost:8545' -e DUNE_API_KEY='your-key-here' duneanalytics/node-indexer 21 | ``` 22 | 23 | ## Binary executable 24 | You can also just build and run a binary executable after cloning this repository: 25 | 26 | Build the binary for your OS: 27 | ```bash 28 | $ make build 29 | 30 | $ BLOCKCHAIN_NAME='foo' RPC_NODE_URL='http://localhost:8545' DUNE_API_KEY='your-key-here' LOG=debug ./indexer 31 | ``` 32 | 33 | Or run it directly with `go run`: 34 | ```bash 35 | $ go run cmd/main.go --blockchain-name foo ... 36 | ``` 37 | 38 | ## Configuration options 39 | You can see all the configuration options by using the `--help` argument: 40 | ```bash 41 | docker run duneanalytics/node-indexer --help 42 | ``` 43 | 44 | Also, we mention some of the options here: 45 | 46 | ### Log level 47 | The `log` flag (environment variable `LOG`) controls the log level. Use `--log debug`/`LOG=debug` to emit more logs than the default `info` level. To emit less logs, use `warn`, or `error` (least). 48 | 49 | ### Tuning RPC concurrency 50 | The flag `--rpc-concurrency` (environment variable `RPC_CONCURRENCY`) specifies the number of threads (goroutines) to run concurrently to perform RPC node requests. See `--help` for up to date default value. 51 | 52 | ### Tuning for throughput 53 | Throughput depends on: latency & request rate between RPC <-> Node Indexer <--> DuneAPI and can be tuned via a combination of: 54 | 1. RPC_CONCURRENCY, higher values feed more blocks into the node indexer to process 55 | 1. MAX_BATCH_SIZE, higher values send more blocks per request to DuneAPI 56 | 1. BLOCK_SUBMIT_INTERVAL, the interval at which blocks to DuneAPI 57 | See `--help` for up to date default values. 58 | 59 | 60 | 61 | ### RPC poll interval 62 | The flag `--rpc-poll-interval` (environment variable `RPC_POLL_INTERVAL`) specifies the duration to wait before checking 63 | if the RPC node has a new block. Default is `300ms`. 64 | 65 | ### Adding extra HTTP headers to RPC requests 66 | If you wish to add HTTP headers to RPC requests you can do so by using the flag `--rpc-http-header` (once per header). 67 | 68 | ``` 69 | go run cmd/main.go ... --rpc-http-header header1:value1 --rpc-http-header header2:value2` 70 | ``` 71 | 72 | Or with the environment variable `RPC_HTTP_HEADERS='header1:value1|header2:value2|...'`, i.e. a `|` separated list of pairs, 73 | where each pair is separated by `:` (make sure to quote the full string to avoid creating a pipe). 74 | 75 | ``` 76 | docker run --env RPC_HTTP_HEADERS='header1:value1|header2:value2' ... duneanalytics/node-indexer: 77 | ``` 78 | 79 | ## Metrics 80 | The process exposes prometheus metrics which you can scrape and explore: `http://localhost:2112/metrics` 81 | -------------------------------------------------------------------------------- /client/duneapi/batch.go: -------------------------------------------------------------------------------- 1 | package duneapi 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | 7 | "github.com/duneanalytics/blockchain-ingester/models" 8 | ) 9 | 10 | type BlockBatchHeader struct { 11 | BlockSizes []int `json:"block_sizes"` 12 | } 13 | 14 | func WriteBlockBatch(out io.Writer, payloads []models.RPCBlock, disableHeader bool) error { 15 | // we write a batch header (single line, NDJSON) with the size of each block payload and then concatenate the payloads 16 | header := BlockBatchHeader{ 17 | BlockSizes: make([]int, len(payloads)), 18 | } 19 | for i, block := range payloads { 20 | header.BlockSizes[i] = len(block.Payload) 21 | } 22 | // allow disabling the header for testing/backwards compatibility 23 | if !disableHeader { 24 | buf, err := json.Marshal(header) 25 | if err != nil { 26 | return err 27 | } 28 | _, err = out.Write(buf) 29 | if err != nil { 30 | return err 31 | } 32 | _, err = out.Write([]byte("\n")) 33 | if err != nil { 34 | return err 35 | } 36 | } 37 | for _, block := range payloads { 38 | _, err := out.Write(block.Payload) 39 | if err != nil { 40 | return err 41 | } 42 | } 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /client/duneapi/batch_test.go: -------------------------------------------------------------------------------- 1 | package duneapi_test 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/json" 7 | "io" 8 | "testing" 9 | 10 | "github.com/duneanalytics/blockchain-ingester/client/duneapi" 11 | "github.com/duneanalytics/blockchain-ingester/models" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestWriteBlockBatch(t *testing.T) { 16 | tests := []struct { 17 | name string 18 | payloads []models.RPCBlock 19 | expected string 20 | }{ 21 | { 22 | name: "single payload", 23 | payloads: []models.RPCBlock{ 24 | {Payload: []byte(`{"block":1}`)}, 25 | }, 26 | expected: `{"block_sizes":[11]} 27 | {"block":1}`, 28 | }, 29 | { 30 | name: "multiple payloads, with new lines", 31 | payloads: []models.RPCBlock{ 32 | {Payload: []byte(`{"block":1}` + "\n")}, 33 | {Payload: []byte(`{"block":2}` + "\n")}, 34 | }, 35 | expected: `{"block_sizes":[12,12]} 36 | {"block":1} 37 | {"block":2} 38 | `, 39 | }, 40 | { 41 | name: "multiple payloads, no newlines", 42 | payloads: []models.RPCBlock{ 43 | {Payload: []byte(`{"block":1}`)}, 44 | {Payload: []byte(`{"block":2}`)}, 45 | }, 46 | expected: `{"block_sizes":[11,11]} 47 | {"block":1}{"block":2}`, 48 | }, 49 | { 50 | name: "empty payloads", 51 | payloads: []models.RPCBlock{}, 52 | expected: `{"block_sizes":[]} 53 | `, 54 | }, 55 | } 56 | 57 | for _, tt := range tests { 58 | t.Run(tt.name, func(t *testing.T) { 59 | var buf bytes.Buffer 60 | err := duneapi.WriteBlockBatch(&buf, tt.payloads, false) 61 | require.NoError(t, err) 62 | 63 | require.Equal(t, tt.expected, buf.String()) 64 | rebuilt, err := ReadBlockBatch(&buf) 65 | require.NoError(t, err) 66 | require.EqualValues(t, tt.payloads, rebuilt) 67 | }) 68 | } 69 | } 70 | 71 | func ReadBlockBatch(buf *bytes.Buffer) ([]models.RPCBlock, error) { 72 | reader := bufio.NewReader(buf) 73 | headerLine, err := reader.ReadString('\n') 74 | if err != nil { 75 | return nil, err 76 | } 77 | 78 | var header duneapi.BlockBatchHeader 79 | err = json.Unmarshal([]byte(headerLine), &header) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | payloads := make([]models.RPCBlock, len(header.BlockSizes)) 85 | for i, size := range header.BlockSizes { 86 | payload := make([]byte, size) 87 | _, err := io.ReadFull(reader, payload) 88 | if err != nil { 89 | return nil, err 90 | } 91 | payloads[i] = models.RPCBlock{Payload: payload} 92 | } 93 | 94 | return payloads, nil 95 | } 96 | -------------------------------------------------------------------------------- /client/duneapi/client.go: -------------------------------------------------------------------------------- 1 | package duneapi 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "log/slog" 10 | "net/http" 11 | "strings" 12 | "sync" 13 | "time" 14 | 15 | "github.com/duneanalytics/blockchain-ingester/models" 16 | "github.com/hashicorp/go-retryablehttp" 17 | "github.com/klauspost/compress/zstd" 18 | ) 19 | 20 | const ( 21 | MaxRetries = 20 // try really hard to send the block 22 | MinWaitDur = 100 * time.Millisecond 23 | MaxWaitDur = 5 * time.Second 24 | ) 25 | 26 | type BlockchainIngester interface { 27 | // SendBlock sends a batch of blocks to DuneAPI 28 | SendBlocks(ctx context.Context, payloads []models.RPCBlock) error 29 | 30 | // GetProgressReport gets a progress report from DuneAPI 31 | GetProgressReport(ctx context.Context) (*models.BlockchainIndexProgress, error) 32 | 33 | // PostProgressReport sends a progress report to DuneAPI 34 | PostProgressReport(ctx context.Context, progress models.BlockchainIndexProgress) error 35 | 36 | GetBlockGaps(ctx context.Context) (*models.BlockchainGaps, error) 37 | 38 | // - API to discover the latest block number ingested 39 | // this can also provide "next block ranges" to push to DuneAPI 40 | // - log/metrics on catching up/falling behind, distance from tip of chain 41 | } 42 | 43 | type client struct { 44 | log *slog.Logger 45 | httpClient *retryablehttp.Client 46 | cfg Config 47 | compressor *zstd.Encoder 48 | bufPool *sync.Pool 49 | } 50 | 51 | var _ BlockchainIngester = &client{} 52 | 53 | func New(log *slog.Logger, cfg Config) (*client, error) { // revive:disable-line:unexported-return 54 | comp, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedFastest)) 55 | if err != nil { 56 | return nil, err 57 | } 58 | httpClient := retryablehttp.NewClient() 59 | httpClient.RetryMax = MaxRetries 60 | httpClient.Logger = log 61 | checkRetry := func(ctx context.Context, resp *http.Response, err error) (bool, error) { 62 | yes, err2 := retryablehttp.DefaultRetryPolicy(ctx, resp, err) 63 | if yes { 64 | if resp == nil { 65 | log.Warn("Retrying request to Dune API", "error", err2) 66 | } else { 67 | log.Warn("Retrying request to Dune API", "statusCode", resp.Status, "error", err2) 68 | } 69 | } 70 | return yes, err2 71 | } 72 | 73 | httpClient.CheckRetry = checkRetry 74 | httpClient.Backoff = retryablehttp.LinearJitterBackoff 75 | httpClient.RetryWaitMin = MinWaitDur 76 | httpClient.RetryWaitMax = MaxWaitDur 77 | return &client{ 78 | log: log.With("module", "duneapi"), 79 | httpClient: httpClient, 80 | cfg: cfg, 81 | compressor: comp, 82 | bufPool: &sync.Pool{ 83 | New: func() interface{} { 84 | return new(bytes.Buffer) 85 | }, 86 | }, 87 | }, nil 88 | } 89 | 90 | // SendBlock sends a block to DuneAPI 91 | func (c *client) SendBlocks(ctx context.Context, payloads []models.RPCBlock) error { 92 | buffer := c.bufPool.Get().(*bytes.Buffer) 93 | defer c.bufPool.Put(buffer) 94 | 95 | request, err := c.buildRequest(payloads, buffer) 96 | if err != nil { 97 | return err 98 | } 99 | return c.sendRequest(ctx, *request) 100 | } 101 | 102 | func (c *client) buildRequest(payloads []models.RPCBlock, buffer *bytes.Buffer) (*BlockchainIngestRequest, error) { 103 | request := &BlockchainIngestRequest{} 104 | var err error 105 | 106 | buffer.Reset() 107 | // not thread safe, multiple calls to the compressor here 108 | if c.cfg.DisableCompression { 109 | err = WriteBlockBatch(buffer, payloads, c.cfg.DisableBatchHeader) 110 | if err != nil { 111 | return nil, err 112 | } 113 | } else { 114 | c.compressor.Reset(buffer) 115 | err = WriteBlockBatch(c.compressor, payloads, c.cfg.DisableBatchHeader) 116 | if err != nil { 117 | return nil, err 118 | } 119 | err := c.compressor.Close() 120 | if err != nil { 121 | return nil, err 122 | } 123 | request.ContentEncoding = "application/zstd" 124 | } 125 | request.Payload = buffer.Bytes() 126 | 127 | numbers := make([]string, len(payloads)) 128 | for i, payload := range payloads { 129 | numbers[i] = fmt.Sprintf("%d", payload.BlockNumber) 130 | } 131 | blockNumbers := strings.Join(numbers, ",") 132 | request.BlockNumbers = blockNumbers 133 | request.IdempotencyKey = c.idempotencyKey(*request) 134 | request.EVMStack = c.cfg.Stack.String() 135 | request.BatchSize = len(payloads) 136 | return request, nil 137 | } 138 | 139 | // We inject the commit hash here at build time, using the -X linker flag, so we can use it in the User-Agent header 140 | var ( 141 | commitHash string 142 | userAgent = fmt.Sprintf("node-indexer/%s", commitHash) 143 | ) 144 | 145 | func (c *client) sendRequest(ctx context.Context, request BlockchainIngestRequest) error { 146 | start := time.Now() 147 | var err error 148 | var response BlockchainIngestResponse 149 | var responseStatus string 150 | defer func() { 151 | if err != nil { 152 | c.log.Error("INGEST FAILED", 153 | "blockNumbers", request.BlockNumbers, 154 | "error", err, 155 | "statusCode", responseStatus, 156 | "payloadSize", len(request.Payload), 157 | "duration", time.Since(start), 158 | ) 159 | } else { 160 | c.log.Debug("INGEST SUCCESS", 161 | "blockNumbers", request.BlockNumbers, 162 | "response", response.String(), 163 | "payloadSize", len(request.Payload), 164 | "duration", time.Since(start), 165 | ) 166 | } 167 | }() 168 | 169 | url := fmt.Sprintf("%s/api/beta/blockchain/%s/ingest", c.cfg.URL, c.cfg.BlockchainName) 170 | if c.cfg.DryRun { 171 | url = fmt.Sprintf("%s/api/beta/blockchain/%s/ingest/dry-run", c.cfg.URL, c.cfg.BlockchainName) 172 | } 173 | c.log.Debug("Sending request", "url", url) 174 | req, err := retryablehttp.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(request.Payload)) 175 | if err != nil { 176 | return err 177 | } 178 | if request.ContentEncoding != "" { 179 | req.Header.Set("Content-Encoding", request.ContentEncoding) 180 | } 181 | req.Header.Set("Content-Type", "application/x-ndjson") 182 | req.Header.Set("User-Agent", userAgent) 183 | req.Header.Set("x-idempotency-key", request.IdempotencyKey) 184 | req.Header.Set("x-dune-evm-stack", request.EVMStack) 185 | req.Header.Set("x-dune-api-key", c.cfg.APIKey) 186 | req.Header.Set("x-dune-batch-size", fmt.Sprintf("%d", request.BatchSize)) 187 | req = req.WithContext(ctx) 188 | 189 | t0 := time.Now() 190 | resp, err := c.httpClient.Do(req) 191 | if err != nil { 192 | observeSendBlocksRequestErr(err, request.BatchSize, t0) 193 | return err 194 | } 195 | defer resp.Body.Close() 196 | observeSendBlocksRequestCode(resp.StatusCode, request.BatchSize, t0) 197 | 198 | responseStatus = resp.Status 199 | 200 | if resp.StatusCode != http.StatusOK { 201 | bs, err := io.ReadAll(resp.Body) 202 | responseBody := string(bs) 203 | if err != nil { 204 | return err 205 | } 206 | // We mutate the global err here because we have deferred a log message where we check for non-nil err 207 | err = fmt.Errorf("unexpected status code: %v, %v with body '%s'", resp.StatusCode, resp.Status, responseBody) 208 | return err 209 | } 210 | 211 | err = json.NewDecoder(resp.Body).Decode(&response) 212 | if err != nil { 213 | return err 214 | } 215 | 216 | return nil 217 | } 218 | 219 | func (c *client) idempotencyKey(r BlockchainIngestRequest) string { 220 | // for idempotency we use the block numbers in the request 221 | // (should we use also the date?, or a startup timestamp?) 222 | return r.BlockNumbers 223 | } 224 | 225 | func (c *client) Close() error { 226 | return c.compressor.Close() 227 | } 228 | 229 | func (c *client) PostProgressReport(ctx context.Context, progress models.BlockchainIndexProgress) error { 230 | if c.cfg.DryRun { 231 | return nil 232 | } 233 | 234 | var request PostBlockchainProgressRequest 235 | var err error 236 | var responseStatus string 237 | var responseBody string 238 | start := time.Now() 239 | 240 | // Log response 241 | defer func() { 242 | if err != nil { 243 | c.log.Error("Sending progress report failed", 244 | "lastIngestedBlockNumber", request.LastIngestedBlockNumber, 245 | "error", err, 246 | "statusCode", responseStatus, 247 | "duration", time.Since(start), 248 | "responseBody", responseBody, 249 | ) 250 | } else { 251 | c.log.Info("Sent progress report", 252 | "lastIngestedBlockNumber", request.LastIngestedBlockNumber, 253 | "latestBlockNumber", request.LatestBlockNumber, 254 | "errors", len(request.Errors), 255 | "duration", time.Since(start), 256 | ) 257 | } 258 | }() 259 | 260 | errors := make([]BlockchainError, len(progress.Errors)) 261 | for i, e := range progress.Errors { 262 | errors[i] = BlockchainError{ 263 | Timestamp: e.Timestamp, 264 | BlockNumbers: e.BlockNumbers, 265 | Error: e.Error, 266 | Source: e.Source, 267 | } 268 | } 269 | request = PostBlockchainProgressRequest{ 270 | LastIngestedBlockNumber: progress.LastIngestedBlockNumber, 271 | LatestBlockNumber: progress.LatestBlockNumber, 272 | Errors: errors, 273 | DuneErrorCounts: progress.DuneErrorCounts, 274 | RPCErrorCounts: progress.RPCErrorCounts, 275 | Since: progress.Since, 276 | } 277 | url := fmt.Sprintf("%s/api/beta/blockchain/%s/ingest/progress", c.cfg.URL, c.cfg.BlockchainName) 278 | payload, err := json.Marshal(request) 279 | if err != nil { 280 | return err 281 | } 282 | c.log.Debug("Sending request", "url", url, "payload", string(payload)) 283 | req, err := retryablehttp.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) 284 | if err != nil { 285 | return err 286 | } 287 | req.Header.Set("User-Agent", userAgent) 288 | req.Header.Set("Content-Type", "application/json") 289 | req.Header.Set("x-dune-api-key", c.cfg.APIKey) 290 | req = req.WithContext(ctx) 291 | resp, err := c.httpClient.Do(req) 292 | if err != nil { 293 | return err 294 | } 295 | defer resp.Body.Close() 296 | responseStatus = resp.Status 297 | 298 | if resp.StatusCode != http.StatusOK { 299 | bs, _ := io.ReadAll(resp.Body) 300 | responseBody := string(bs) 301 | // We mutate the global err here because we have deferred a log message where we check for non-nil err 302 | err = fmt.Errorf("unexpected status code: %v, %v with body '%s'", resp.StatusCode, resp.Status, responseBody) 303 | return err 304 | } 305 | 306 | return nil 307 | } 308 | 309 | func (c *client) GetProgressReport(ctx context.Context) (*models.BlockchainIndexProgress, error) { 310 | if c.cfg.DryRun { 311 | return &models.BlockchainIndexProgress{ 312 | BlockchainName: c.cfg.BlockchainName, 313 | EVMStack: c.cfg.Stack.String(), 314 | LastIngestedBlockNumber: -1, // no block ingested 315 | LatestBlockNumber: 0, 316 | }, nil 317 | } 318 | 319 | var response GetBlockchainProgressResponse 320 | var err error 321 | var responseStatus string 322 | start := time.Now() 323 | 324 | // Log response 325 | defer func() { 326 | if err != nil { 327 | c.log.Error("Getting progress report failed", 328 | "error", err, 329 | "statusCode", responseStatus, 330 | "duration", time.Since(start), 331 | ) 332 | } else { 333 | c.log.Info("Got progress report", 334 | "progress", response.String(), 335 | "duration", time.Since(start), 336 | ) 337 | } 338 | }() 339 | 340 | url := fmt.Sprintf("%s/api/beta/blockchain/%s/ingest/progress", c.cfg.URL, c.cfg.BlockchainName) 341 | c.log.Debug("Sending request", "url", url) 342 | req, err := retryablehttp.NewRequestWithContext(ctx, "GET", url, nil) // nil: empty body 343 | if err != nil { 344 | return nil, err 345 | } 346 | req.Header.Set("x-dune-api-key", c.cfg.APIKey) 347 | req = req.WithContext(ctx) 348 | resp, err := c.httpClient.Do(req) 349 | if err != nil { 350 | return nil, err 351 | } 352 | defer resp.Body.Close() 353 | 354 | responseBody, err := io.ReadAll(resp.Body) 355 | if err != nil { 356 | return nil, err 357 | } 358 | 359 | if resp.StatusCode == http.StatusNotFound { 360 | // no progress yet, first ingest for this chain 361 | return &models.BlockchainIndexProgress{ 362 | BlockchainName: c.cfg.BlockchainName, 363 | EVMStack: c.cfg.Stack.String(), 364 | LastIngestedBlockNumber: -1, // no block ingested 365 | LatestBlockNumber: 0, 366 | }, nil 367 | } 368 | if resp.StatusCode != http.StatusOK { 369 | bs, _ := io.ReadAll(resp.Body) 370 | responseBody := string(bs) 371 | // We mutate the global err here because we have deferred a log message where we check for non-nil err 372 | err = fmt.Errorf("unexpected status code: %v, %v with body '%s'", resp.StatusCode, resp.Status, responseBody) 373 | return nil, err 374 | } 375 | 376 | err = json.Unmarshal(responseBody, &response) 377 | if err != nil { 378 | return nil, err 379 | } 380 | 381 | progress := &models.BlockchainIndexProgress{ 382 | BlockchainName: c.cfg.BlockchainName, 383 | EVMStack: c.cfg.Stack.String(), 384 | LastIngestedBlockNumber: response.LastIngestedBlockNumber, 385 | LatestBlockNumber: response.LatestBlockNumber, 386 | } 387 | return progress, nil 388 | } 389 | 390 | func (c *client) GetBlockGaps(ctx context.Context) (*models.BlockchainGaps, error) { 391 | if c.cfg.DryRun { 392 | return &models.BlockchainGaps{}, nil 393 | } 394 | 395 | var response BlockchainGapsResponse 396 | var err error 397 | var responseStatus string 398 | start := time.Now() 399 | 400 | // Log response 401 | defer func() { 402 | if err != nil { 403 | c.log.Error("Getting block gaps failed", 404 | "error", err, 405 | "statusCode", responseStatus, 406 | "duration", time.Since(start), 407 | ) 408 | } else { 409 | c.log.Info("Got block gaps", 410 | "blockGaps", response.String(), 411 | "duration", time.Since(start), 412 | ) 413 | } 414 | }() 415 | 416 | url := fmt.Sprintf("%s/api/beta/blockchain/%s/gaps", c.cfg.URL, c.cfg.BlockchainName) 417 | c.log.Debug("Sending request", "url", url) 418 | req, err := retryablehttp.NewRequestWithContext(ctx, "GET", url, nil) // nil: empty body 419 | if err != nil { 420 | return nil, err 421 | } 422 | req.Header.Set("x-dune-api-key", c.cfg.APIKey) 423 | req = req.WithContext(ctx) 424 | resp, err := c.httpClient.Do(req) 425 | if err != nil { 426 | return nil, err 427 | } 428 | defer resp.Body.Close() 429 | 430 | responseBody, err := io.ReadAll(resp.Body) 431 | if err != nil { 432 | return nil, err 433 | } 434 | 435 | if resp.StatusCode != http.StatusOK { 436 | bs, _ := io.ReadAll(resp.Body) 437 | responseBody := string(bs) 438 | // We mutate the global err here because we have deferred a log message where we check for non-nil err 439 | err = fmt.Errorf("unexpected status code: %v, %v with body '%s'", resp.StatusCode, resp.Status, responseBody) 440 | return nil, err 441 | } 442 | 443 | err = json.Unmarshal(responseBody, &response) 444 | if err != nil { 445 | return nil, err 446 | } 447 | 448 | gaps := &models.BlockchainGaps{ 449 | Gaps: mapSlice(response.Gaps, func(gap BlockGap) models.BlockGap { 450 | return models.BlockGap{ 451 | FirstMissing: gap.FirstMissing, 452 | LastMissing: gap.LastMissing, 453 | } 454 | }), 455 | } 456 | return gaps, nil 457 | } 458 | 459 | func mapSlice[T any, U any](slice []T, mapper func(T) U) []U { 460 | result := make([]U, len(slice)) 461 | for i, v := range slice { 462 | result[i] = mapper(v) 463 | } 464 | return result 465 | } 466 | -------------------------------------------------------------------------------- /client/duneapi/metrics.go: -------------------------------------------------------------------------------- 1 | package duneapi 2 | 3 | import ( 4 | "errors" 5 | "net/url" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promauto" 11 | ) 12 | 13 | var metricSendBlockCount = promauto.NewCounterVec( 14 | prometheus.CounterOpts{ 15 | Namespace: "node_indexer", 16 | Subsystem: "dune_client", 17 | Name: "sent_block_total", 18 | Help: "Total number of blocks sent in requests", 19 | }, 20 | []string{"status"}, 21 | ) 22 | 23 | var metricSendBlockBatchSize = promauto.NewHistogramVec( 24 | prometheus.HistogramOpts{ 25 | Namespace: "node_indexer", 26 | Subsystem: "dune_client", 27 | Name: "block_per_batch", 28 | Help: "Number of blocks per batch", 29 | Buckets: []float64{1, 2, 4, 8, 16, 32, 64, 128, 256}, 30 | }, 31 | []string{"status"}, 32 | ) 33 | 34 | var metricSendRequestsCount = promauto.NewCounterVec( 35 | 36 | prometheus.CounterOpts{ 37 | Namespace: "node_indexer", 38 | Subsystem: "dune_client", 39 | Name: "send_requests_total", 40 | Help: "Number of send requests", 41 | }, 42 | []string{"status"}, 43 | ) 44 | 45 | var metricSendBlockBatchDurationMillis = promauto.NewHistogramVec( 46 | prometheus.HistogramOpts{ 47 | Namespace: "node_indexer", 48 | Subsystem: "dune_client", 49 | Name: "send_block_batch_duration_millis", 50 | Help: "Duration of a send block batch request in milliseconds", 51 | Buckets: []float64{10, 25, 50, 100, 250, 500, 1000, 2000, 4000}, 52 | }, 53 | []string{"status"}, 54 | ) 55 | 56 | func observeSendBlocksRequest(status string, numberOfBlocks int, t0 time.Time) { 57 | metricSendBlockCount.WithLabelValues(status).Inc() 58 | metricSendBlockBatchSize.WithLabelValues(status).Observe(float64(numberOfBlocks)) 59 | metricSendBlockBatchDurationMillis.WithLabelValues(status).Observe(float64(time.Since(t0).Milliseconds())) 60 | metricSendRequestsCount.WithLabelValues(status).Add(float64(numberOfBlocks)) 61 | } 62 | 63 | func observeSendBlocksRequestCode(statusCode int, numberOfBlocks int, t0 time.Time) { 64 | observeSendBlocksRequest(strconv.Itoa(statusCode), numberOfBlocks, t0) 65 | } 66 | 67 | func observeSendBlocksRequestErr(err error, numberOfBlocks int, t0 time.Time) { 68 | observeSendBlocksRequest(errorToStatus(err), numberOfBlocks, t0) 69 | } 70 | 71 | func errorToStatus(err error) string { 72 | status := "unknown_error" 73 | var urlErr *url.Error 74 | if errors.As(err, &urlErr) { 75 | if urlErr.Timeout() { 76 | status = "timeout" 77 | } else { 78 | status = "connection_refused" 79 | } 80 | } 81 | return status 82 | } 83 | -------------------------------------------------------------------------------- /client/duneapi/models.go: -------------------------------------------------------------------------------- 1 | package duneapi 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | "time" 8 | 9 | "github.com/duneanalytics/blockchain-ingester/models" 10 | ) 11 | 12 | type Config struct { 13 | APIKey string 14 | URL string 15 | 16 | // this is used by DuneAPI to determine the logic used to decode the EVM transactions 17 | Stack models.EVMStack 18 | // the name of this blockchain as it will be stored in DuneAPI 19 | BlockchainName string 20 | 21 | // RPCBlock json payloads can be very large, we default to compressing for better throughput 22 | // - lowers latency 23 | // - reduces bandwidth 24 | DisableCompression bool 25 | 26 | DryRun bool 27 | 28 | DisableBatchHeader bool // for testing/backwards compatibility 29 | } 30 | 31 | // The response from the DuneAPI ingest endpoint. 32 | type BlockchainIngestResponse struct { 33 | Tables []IngestedTableInfo `json:"tables"` 34 | } 35 | 36 | type IngestedTableInfo struct { 37 | Name string `json:"name"` 38 | Rows int `json:"rows"` 39 | } 40 | 41 | func (b *BlockchainIngestResponse) String() string { 42 | sort.Slice(b.Tables, func(i, j int) bool { 43 | return b.Tables[i].Name < b.Tables[j].Name 44 | }) 45 | s := strings.Builder{} 46 | s.WriteString("[") 47 | for i, t := range b.Tables { 48 | if i > 0 { 49 | s.WriteString(", ") 50 | } 51 | s.WriteString(fmt.Sprintf("%s: %d", t.Name, t.Rows)) 52 | } 53 | s.WriteString("]") 54 | return s.String() 55 | } 56 | 57 | type BlockchainIngestRequest struct { 58 | BlockNumbers string 59 | BatchSize int // number of blocks in the batch 60 | ContentEncoding string 61 | EVMStack string 62 | IdempotencyKey string 63 | Payload []byte 64 | } 65 | 66 | type GetBlockchainProgressResponse struct { 67 | LastIngestedBlockNumber int64 `json:"last_ingested_block_number,omitempty"` 68 | LatestBlockNumber int64 `json:"latest_block_number,omitempty"` 69 | } 70 | 71 | func (p *GetBlockchainProgressResponse) String() string { 72 | return fmt.Sprintf("%+v", *p) 73 | } 74 | 75 | type PostBlockchainProgressRequest struct { 76 | LastIngestedBlockNumber int64 `json:"last_ingested_block_number,omitempty"` 77 | LatestBlockNumber int64 `json:"latest_block_number,omitempty"` 78 | Errors []BlockchainError `json:"errors"` 79 | DuneErrorCounts int `json:"dune_error_counts"` 80 | RPCErrorCounts int `json:"rpc_error_counts"` 81 | Since time.Time `json:"since"` 82 | } 83 | 84 | type BlockchainError struct { 85 | Timestamp time.Time `json:"timestamp"` 86 | BlockNumbers string `json:"block_numbers"` 87 | Error string `json:"error"` 88 | Source string `json:"source"` 89 | } 90 | 91 | type BlockchainGapsResponse struct { 92 | Gaps []BlockGap `json:"gaps"` 93 | } 94 | 95 | // BlockGap declares an inclusive range of missing block numbers 96 | type BlockGap struct { 97 | FirstMissing int64 `json:"first_missing"` 98 | LastMissing int64 `json:"last_missing"` 99 | } 100 | 101 | func (b *BlockchainGapsResponse) String() string { 102 | return fmt.Sprintf("%+v", *b) 103 | } 104 | -------------------------------------------------------------------------------- /client/jsonrpc/arbitrum_nitro.go: -------------------------------------------------------------------------------- 1 | package jsonrpc 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "time" 9 | 10 | "github.com/duneanalytics/blockchain-ingester/models" 11 | "golang.org/x/sync/errgroup" 12 | ) 13 | 14 | type ArbitrumNitroClient struct { 15 | rpcClient 16 | } 17 | 18 | var _ BlockchainClient = &ArbitrumNitroClient{} 19 | 20 | // BlockByNumber returns the block with the given blockNumber. 21 | // it uses 3 different methods to get the block: 22 | // 1. eth_getBlockByNumber 23 | // 2. debug_traceBlockByNumber with tracer "callTracer" 24 | // TODO: this method should be optional 25 | // 2. call to eth_getTransactionReceipt for each Tx present in the Block 26 | // 27 | // We encode the payload in NDJSON 28 | func (c *ArbitrumNitroClient) BlockByNumber(ctx context.Context, blockNumber int64) (models.RPCBlock, error) { 29 | tStart := time.Now() 30 | defer func() { 31 | c.log.Debug("BlockByNumber", "blockNumber", blockNumber, "duration", time.Since(tStart)) 32 | }() 33 | 34 | blockNumberHex := fmt.Sprintf("0x%x", blockNumber) 35 | 36 | results := make([]*bytes.Buffer, 0, 8) 37 | 38 | // eth_getBlockByNumber and extract the transaction hashes 39 | getBlockNumberResponse := c.bufPool.Get().(*bytes.Buffer) 40 | defer c.putBuffer(getBlockNumberResponse) 41 | results = append(results, getBlockNumberResponse) 42 | err := c.getResponseBody(ctx, "eth_getBlockByNumber", []any{blockNumberHex, true}, getBlockNumberResponse) 43 | if err != nil { 44 | c.log.Error("Failed to get response for jsonRPC", 45 | "blockNumber", blockNumber, 46 | "method", "eth_getBlockByNumber", 47 | "error", err, 48 | ) 49 | return models.RPCBlock{}, err 50 | } 51 | txHashes, err := getTransactionHashes(getBlockNumberResponse.Bytes()) 52 | if err != nil { 53 | return models.RPCBlock{}, err 54 | } 55 | 56 | c.log.Debug("BlockByNumber", "blockNumber", blockNumber, "txCount", len(txHashes)) 57 | group, groupCtx := errgroup.WithContext(ctx) 58 | 59 | // debug_traceBlockByNumber 60 | result := c.bufPool.Get().(*bytes.Buffer) 61 | defer c.putBuffer(result) 62 | results = append(results, result) 63 | 64 | c.GroupedJSONrpc( 65 | groupCtx, 66 | group, 67 | "debug_traceBlockByNumber", 68 | []any{blockNumberHex, map[string]string{"tracer": "callTracer"}}, 69 | result, 70 | blockNumber, 71 | ) 72 | for _, tx := range txHashes { 73 | result := c.bufPool.Get().(*bytes.Buffer) 74 | defer c.putBuffer(result) 75 | results = append(results, result) 76 | 77 | c.GroupedJSONrpc(groupCtx, group, "eth_getTransactionReceipt", []any{tx.Hash}, result, blockNumber) 78 | } 79 | if err := group.Wait(); err != nil { 80 | return models.RPCBlock{}, err 81 | } 82 | 83 | return c.buildRPCBlockResponse(blockNumber, results) 84 | } 85 | 86 | type transactionHash struct { 87 | Hash string `json:"hash"` 88 | } 89 | 90 | func getTransactionHashes(blockResp []byte) ([]transactionHash, error) { 91 | // minimal parse the block response to extract the transaction hashes 92 | type blockResponse struct { 93 | Result struct { 94 | Transactions []transactionHash `json:"transactions"` 95 | } `json:"result"` 96 | } 97 | var resp blockResponse 98 | err := json.Unmarshal(blockResp, &resp) 99 | if err != nil { 100 | return nil, fmt.Errorf("failed to parse eth_getBlockByNumber response: %w", err) 101 | } 102 | return resp.Result.Transactions, nil 103 | } 104 | -------------------------------------------------------------------------------- /client/jsonrpc/arbitrum_nitro_test.go: -------------------------------------------------------------------------------- 1 | package jsonrpc_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "testing" 7 | 8 | "github.com/duneanalytics/blockchain-ingester/models" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestArbitrumNitroBasic(t *testing.T) { 13 | getBlockByNumberResponse := readFileForTest( 14 | "testdata/arbitrumnitro-DEGEN-block-0x16870e9-eth_getBlockByNumber.json") 15 | debugtraceBlockByNumberResponse := readFileForTest( 16 | "testdata/arbitrumnitro-DEGEN-block-0x16870e9-debug_traceBlockByNumber.json") 17 | tx0ReceiptResponse := readFileForTest( 18 | "testdata/arbitrumnitro-DEGEN-block-0x16870e9-eth_getTransactionReceipt-0x0.json") 19 | tx1ReceiptResponse := readFileForTest( 20 | "testdata/arbitrumnitro-DEGEN-block-0x16870e9-eth_getTransactionReceipt-0x1.json") 21 | 22 | var expectedPayload bytes.Buffer 23 | expectedPayload.Write(getBlockByNumberResponse.Bytes()) 24 | expectedPayload.Write(debugtraceBlockByNumberResponse.Bytes()) 25 | expectedPayload.Write(tx0ReceiptResponse.Bytes()) 26 | expectedPayload.Write(tx1ReceiptResponse.Bytes()) 27 | expectedPayloadBytes := expectedPayload.Bytes() 28 | 29 | tx0Hash := "0x19ee83020d4dad7e96dbb2c01ce2441e75717ee038a022fc6a3b61300b1b801c" 30 | tx1Hash := "0x4e805891b568698f8419f8e162d70ed9675e42a32e4972cbeb7f78d7fd51de76" 31 | blockNumberHex := "0x16870e9" 32 | blockNumber := int64(23621865) 33 | httpClientMock := MockHTTPRequests( 34 | []MockedRequest{ 35 | { 36 | Req: jsonRPCRequest{ 37 | Method: "eth_getBlockByNumber", 38 | Params: []interface{}{blockNumberHex, true}, 39 | }, 40 | Resp: jsonRPCResponse{ 41 | Body: getBlockByNumberResponse, 42 | }, 43 | }, 44 | { 45 | Req: jsonRPCRequest{ 46 | Method: "debug_traceBlockByNumber", 47 | Params: []interface{}{blockNumberHex, map[string]string{"tracer": "callTracer"}}, 48 | }, 49 | Resp: jsonRPCResponse{ 50 | Body: debugtraceBlockByNumberResponse, 51 | }, 52 | }, 53 | { 54 | Req: jsonRPCRequest{ 55 | Method: "eth_getTransactionReceipt", 56 | Params: []interface{}{tx0Hash}, 57 | }, 58 | Resp: jsonRPCResponse{ 59 | Body: tx0ReceiptResponse, 60 | }, 61 | }, 62 | { 63 | Req: jsonRPCRequest{ 64 | Method: "eth_getTransactionReceipt", 65 | Params: []interface{}{tx1Hash}, 66 | }, 67 | Resp: jsonRPCResponse{ 68 | Body: tx1ReceiptResponse, 69 | }, 70 | }, 71 | }) 72 | 73 | opstack, err := NewTestRPCClient(httpClientMock, models.ArbitrumNitro) 74 | require.NoError(t, err) 75 | 76 | block, err := opstack.BlockByNumber(context.Background(), blockNumber) 77 | require.NoError(t, err) 78 | require.NotNil(t, block) 79 | require.Equal(t, blockNumber, block.BlockNumber) 80 | require.False(t, block.Errored()) 81 | require.Equal(t, expectedPayloadBytes, block.Payload) 82 | } 83 | -------------------------------------------------------------------------------- /client/jsonrpc/client.go: -------------------------------------------------------------------------------- 1 | package jsonrpc 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "log/slog" 9 | "net/http" 10 | "sync" 11 | "time" 12 | 13 | "github.com/duneanalytics/blockchain-ingester/lib/hexutils" 14 | "github.com/duneanalytics/blockchain-ingester/models" 15 | "github.com/hashicorp/go-retryablehttp" 16 | "github.com/panjf2000/ants/v2" 17 | "golang.org/x/sync/errgroup" 18 | ) 19 | 20 | type BlockchainClient interface { 21 | LatestBlockNumber() (int64, error) 22 | BlockByNumber(ctx context.Context, blockNumber int64) (models.RPCBlock, error) 23 | Close() error 24 | } 25 | 26 | const ( 27 | MaxRetries = 10 28 | DefaultRequestTimeout = 30 * time.Second 29 | DefaultMaxRPCConcurrency = 50 // safe default 30 | ) 31 | 32 | type rpcClient struct { 33 | bufPool *sync.Pool 34 | cfg Config 35 | client HTTPClient 36 | httpHeaders map[string]string 37 | log *slog.Logger 38 | wrkPool *ants.Pool 39 | } 40 | 41 | func NewClient(log *slog.Logger, cfg Config) (BlockchainClient, error) { 42 | // use the production http client w/ retries 43 | return NewRPCClient(log, NewHTTPClient(log), cfg) 44 | } 45 | 46 | func NewRPCClient(log *slog.Logger, client HTTPClient, cfg Config) (BlockchainClient, error) { 47 | rpcClient, err := newClient(log.With("module", "jsonrpc"), client, cfg) 48 | if err != nil { 49 | return nil, err 50 | } 51 | switch cfg.EVMStack { 52 | case models.OpStack: 53 | return &OpStackClient{*rpcClient}, nil 54 | case models.ArbitrumNitro: 55 | return &ArbitrumNitroClient{*rpcClient}, nil 56 | default: 57 | return nil, fmt.Errorf("unsupported EVM stack: %s", cfg.EVMStack) 58 | } 59 | } 60 | 61 | func newClient(log *slog.Logger, client HTTPClient, cfg Config, 62 | ) (*rpcClient, error) { // revive:disable-line:unexported-return 63 | if cfg.TotalRPCConcurrency == 0 { 64 | cfg.TotalRPCConcurrency = DefaultMaxRPCConcurrency 65 | } 66 | wkrPool, err := ants.NewPool(cfg.TotalRPCConcurrency) 67 | if err != nil { 68 | return nil, fmt.Errorf("failed to create worker pool: %w", err) 69 | } 70 | 71 | rpc := &rpcClient{ 72 | client: client, 73 | cfg: cfg, 74 | log: log, 75 | bufPool: &sync.Pool{ 76 | New: func() interface{} { 77 | return new(bytes.Buffer) 78 | }, 79 | }, 80 | httpHeaders: cfg.HTTPHeaders, 81 | wrkPool: wkrPool, 82 | } 83 | // Ensure RPC node is up & reachable 84 | _, err = rpc.LatestBlockNumber() 85 | if err != nil { 86 | return nil, fmt.Errorf("failed to connect to jsonrpc: %w", err) 87 | } 88 | log.Info("Initialized and Connected to node jsonRPC", "config", fmt.Sprintf("%+v", cfg)) 89 | return rpc, nil 90 | } 91 | 92 | func (c *rpcClient) LatestBlockNumber() (int64, error) { 93 | buf := c.bufPool.Get().(*bytes.Buffer) 94 | defer c.putBuffer(buf) 95 | 96 | err := c.getResponseBody(context.Background(), "eth_blockNumber", []any{}, buf) 97 | if err != nil { 98 | c.log.Error("Failed to get response for jsonRPC", 99 | "method", "eth_blockNumber", 100 | "error", err, 101 | ) 102 | return 0, err 103 | } 104 | resp := struct { 105 | Result string `json:"result"` 106 | }{} 107 | if err := json.NewDecoder(buf).Decode(&resp); err != nil { 108 | c.log.Error("Failed to decode response for jsonRPC", "error", err) 109 | return 0, err 110 | } 111 | return hexutils.IntFromHex(resp.Result) 112 | } 113 | 114 | // GroupedJSONrpc is a helper function to spawn multiple calls belonging to the same group. 115 | // errors are propagated to the errgroup. 116 | // concurrency is managed by the worker pool. 117 | func (c *rpcClient) GroupedJSONrpc( 118 | ctx context.Context, 119 | group *errgroup.Group, 120 | method string, 121 | args []any, 122 | output *bytes.Buffer, 123 | debugBlockNumber int64, 124 | ) { 125 | c.execOnPoolInGroup(group, func() error { 126 | err := c.getResponseBody(ctx, method, args, output) 127 | if err != nil { 128 | c.log.Error("Failed to get response for jsonRPC", 129 | "blockNumber", debugBlockNumber, 130 | "method", method, 131 | "error", err, 132 | ) 133 | } 134 | return err 135 | }) 136 | } 137 | 138 | func (c *rpcClient) execOnPoolInGroup( 139 | group *errgroup.Group, 140 | function func() error, 141 | ) { 142 | group.Go(func() error { 143 | errCh := make(chan error, 1) 144 | c.wrkPool.Submit(func() { 145 | defer close(errCh) 146 | err := function() 147 | if err != nil { 148 | errCh <- err 149 | } else { 150 | errCh <- nil 151 | } 152 | }) 153 | return <-errCh 154 | }) 155 | } 156 | 157 | // getResponseBody sends a request to the server and returns the response body 158 | func (c *rpcClient) getResponseBody( 159 | ctx context.Context, method string, params []interface{}, output *bytes.Buffer, 160 | ) error { 161 | reqData := map[string]interface{}{ 162 | "jsonrpc": "2.0", 163 | "id": 1, 164 | "method": method, 165 | "params": params, 166 | } 167 | encoder := json.NewEncoder(output) 168 | if err := encoder.Encode(reqData); err != nil { 169 | return err 170 | } 171 | req, err := retryablehttp.NewRequestWithContext(ctx, http.MethodPost, c.cfg.URL, output) 172 | if err != nil { 173 | return err 174 | } 175 | if c.httpHeaders != nil { 176 | for k, v := range c.httpHeaders { 177 | req.Header.Set(k, v) 178 | } 179 | } 180 | 181 | t0 := time.Now() 182 | resp, err := c.client.Do(req) 183 | if err != nil { 184 | observeRPCRequestErr(err, method, t0) 185 | return fmt.Errorf("failed to send request for method %s: %w", method, err) 186 | } 187 | defer resp.Body.Close() 188 | observeRPCRequestCode(resp.StatusCode, method, t0) 189 | 190 | if resp.StatusCode != http.StatusOK { 191 | return fmt.Errorf("response for method %s has status code %d", method, resp.StatusCode) 192 | } 193 | 194 | output.Reset() 195 | if _, err := output.ReadFrom(resp.Body); err != nil { 196 | return fmt.Errorf("failed to read response body for method %s: %w", method, err) 197 | } 198 | return nil 199 | } 200 | 201 | func (c *rpcClient) Close() error { 202 | c.wrkPool.Release() 203 | return nil 204 | } 205 | 206 | func (c *rpcClient) buildRPCBlockResponse(number int64, results []*bytes.Buffer) (models.RPCBlock, error) { 207 | var buffer bytes.Buffer 208 | for _, res := range results { 209 | buffer.Grow(res.Len()) 210 | buffer.ReadFrom(res) 211 | } 212 | return models.RPCBlock{ 213 | BlockNumber: number, 214 | Payload: buffer.Bytes(), 215 | }, nil 216 | } 217 | 218 | func (c *rpcClient) putBuffer(buf *bytes.Buffer) { 219 | buf.Reset() 220 | c.bufPool.Put(buf) 221 | } 222 | -------------------------------------------------------------------------------- /client/jsonrpc/httpclient.go: -------------------------------------------------------------------------------- 1 | package jsonrpc 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "net/http" 7 | 8 | "github.com/hashicorp/go-retryablehttp" 9 | ) 10 | 11 | type HTTPClient interface { 12 | Do(req *retryablehttp.Request) (*http.Response, error) 13 | } 14 | 15 | func NewHTTPClient(log *slog.Logger) *retryablehttp.Client { 16 | client := retryablehttp.NewClient() 17 | client.RetryMax = MaxRetries 18 | client.Logger = log 19 | checkRetry := func(ctx context.Context, resp *http.Response, err error) (bool, error) { 20 | yes, err2 := retryablehttp.DefaultRetryPolicy(ctx, resp, err) 21 | if yes { 22 | if resp == nil { 23 | log.Warn("Retrying request to RPC client", "error", err2) 24 | } else { 25 | log.Warn("Retrying request to RPC client", "statusCode", resp.Status, "error", err2) 26 | } 27 | } 28 | return yes, err2 29 | } 30 | client.CheckRetry = checkRetry 31 | client.Backoff = retryablehttp.LinearJitterBackoff 32 | client.HTTPClient.Timeout = DefaultRequestTimeout 33 | return client 34 | } 35 | -------------------------------------------------------------------------------- /client/jsonrpc/httpclient_test.go: -------------------------------------------------------------------------------- 1 | package jsonrpc_test 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "log" 9 | "log/slog" 10 | "net/http" 11 | "os" 12 | 13 | "github.com/duneanalytics/blockchain-ingester/client/jsonrpc" 14 | jsonrpc_mock "github.com/duneanalytics/blockchain-ingester/mocks/jsonrpc" 15 | "github.com/duneanalytics/blockchain-ingester/models" 16 | "github.com/hashicorp/go-retryablehttp" 17 | ) 18 | 19 | type jsonRPCRequest struct { 20 | Method string `json:"method"` 21 | Params []interface{} `json:"params"` 22 | HTTPHeaders http.Header 23 | } 24 | 25 | type jsonRPCResponse struct { 26 | Body io.Reader 27 | StatusCode int // optional, default to 200 28 | ContentType string // optional, default to "application/json" 29 | } 30 | 31 | type MockedRequest struct { 32 | Req jsonRPCRequest 33 | Resp jsonRPCResponse 34 | } 35 | 36 | func MockHTTPRequests(requests []MockedRequest) *jsonrpc_mock.HTTPClientMock { 37 | // helper function to setup a mock http client with recorded request responses 38 | // non-registered requests will return an error 39 | return &jsonrpc_mock.HTTPClientMock{ 40 | DoFunc: func(req *retryablehttp.Request) (*http.Response, error) { 41 | if req.Method != http.MethodPost { 42 | return nil, fmt.Errorf("expected POST method, got %s", req.Method) 43 | } 44 | // we use httpretryable.Client, so we can't use req.Body directly 45 | // we need to read the body and then reset it 46 | 47 | body, err := req.BodyBytes() 48 | if err != nil { 49 | return nil, err 50 | } 51 | var jsonReq jsonRPCRequest 52 | if err := json.Unmarshal(body, &jsonReq); err != nil { 53 | return nil, err 54 | } 55 | jsonReqParams := fmt.Sprintf("%+v", jsonReq.Params) 56 | // looking for a matching request 57 | for _, r := range requests { 58 | if r.Req.Method == jsonReq.Method { 59 | // we do this because reflect.DeepEquals() Comparison fails on map[string]any != map[string]string 60 | if jsonReqParams != fmt.Sprintf("%+v", r.Req.Params) { 61 | continue 62 | } 63 | // this is a match, validate registered headers 64 | for k, v := range r.Req.HTTPHeaders { 65 | if req.Header.Get(k) != v[0] { 66 | return nil, fmt.Errorf("expected header %s to be %s, got %s", k, v[0], req.Header.Get(k)) 67 | } 68 | } 69 | // all headers match, return the response 70 | resp := &http.Response{ 71 | StatusCode: 200, 72 | Body: io.NopCloser(r.Resp.Body), 73 | Header: make(http.Header), 74 | } 75 | if r.Resp.StatusCode != 0 { 76 | resp.StatusCode = r.Resp.StatusCode 77 | } 78 | resp.Header.Set("Content-Type", "application/json") 79 | if r.Resp.ContentType != "" { 80 | resp.Header.Set("Content-Type", r.Resp.ContentType) 81 | } 82 | return resp, nil 83 | } 84 | } 85 | // for simplificy, we include a default response for eth_blockNumber with a valid response 86 | if jsonReq.Method == "eth_blockNumber" { 87 | resp := &http.Response{ 88 | StatusCode: 200, 89 | Body: io.NopCloser(bytes.NewReader([]byte(`{"jsonrpc":"2.0","id":1,"result":"0x7a549b"}`))), 90 | } 91 | return resp, nil 92 | } 93 | return nil, fmt.Errorf("no matching request found, req: %+v", jsonReq) 94 | }, 95 | } 96 | } 97 | 98 | func NewTestLogger() *slog.Logger { 99 | return slog.New(slog.NewTextHandler(io.Discard, nil)) 100 | } 101 | 102 | func NewTestRPCClient(httpClient jsonrpc.HTTPClient, stack models.EVMStack) (jsonrpc.BlockchainClient, error) { 103 | return jsonrpc.NewRPCClient(NewTestLogger(), httpClient, jsonrpc.Config{EVMStack: stack}) 104 | } 105 | 106 | func readFileForTest(filename string) *bytes.Buffer { 107 | data, err := os.ReadFile(filename) 108 | if err != nil { 109 | log.Panicf("Failed to read file: %v", err) 110 | } 111 | return bytes.NewBuffer(data) 112 | } 113 | -------------------------------------------------------------------------------- /client/jsonrpc/metrics.go: -------------------------------------------------------------------------------- 1 | package jsonrpc 2 | 3 | import ( 4 | "errors" 5 | "net/url" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promauto" 11 | ) 12 | 13 | var rpcRequestCount = promauto.NewCounterVec( 14 | prometheus.CounterOpts{ 15 | Namespace: "node_indexer", 16 | Subsystem: "rpc_client", 17 | Name: "request_total", 18 | Help: "Total number of RPC node requests", 19 | }, 20 | []string{"status", "method"}, 21 | ) 22 | 23 | var rpcRequestDurationMillis = promauto.NewHistogramVec( 24 | prometheus.HistogramOpts{ 25 | Namespace: "node_indexer", 26 | Subsystem: "rpc_client", 27 | Name: "request_duration_millis", 28 | Help: "Duration of RPC node requests in milliseconds", 29 | Buckets: []float64{10, 25, 50, 100, 250, 500, 1000, 2000, 4000}, 30 | }, 31 | []string{"status", "method"}, 32 | ) 33 | 34 | func observeRPCRequest(status string, method string, t0 time.Time) { 35 | rpcRequestCount.WithLabelValues(status, method).Inc() 36 | rpcRequestDurationMillis.WithLabelValues(status, method).Observe(float64(time.Since(t0).Milliseconds())) 37 | } 38 | 39 | func observeRPCRequestCode(statusCode int, method string, t0 time.Time) { 40 | observeRPCRequest(strconv.Itoa(statusCode), method, t0) 41 | } 42 | 43 | func observeRPCRequestErr(err error, method string, t0 time.Time) { 44 | observeRPCRequest(errorToStatus(err), method, t0) 45 | } 46 | 47 | func errorToStatus(err error) string { 48 | status := "unknown_error" 49 | var urlErr *url.Error 50 | if errors.As(err, &urlErr) { 51 | if urlErr.Timeout() { 52 | status = "timeout" 53 | } else { 54 | status = "connection_refused" 55 | } 56 | } 57 | return status 58 | } 59 | -------------------------------------------------------------------------------- /client/jsonrpc/models.go: -------------------------------------------------------------------------------- 1 | package jsonrpc 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/duneanalytics/blockchain-ingester/models" 7 | ) 8 | 9 | type Config struct { 10 | URL string 11 | PollInterval time.Duration 12 | HTTPHeaders map[string]string 13 | EVMStack models.EVMStack 14 | // rpcClient is used in parallel by the ingester to fetch blocks 15 | // but it also has internal request concurrency on handling each block 16 | // to avoid spawning too many http requests to the RPC node we set here an upper limit 17 | TotalRPCConcurrency int 18 | } 19 | -------------------------------------------------------------------------------- /client/jsonrpc/opstack.go: -------------------------------------------------------------------------------- 1 | package jsonrpc 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/duneanalytics/blockchain-ingester/models" 10 | "golang.org/x/sync/errgroup" 11 | ) 12 | 13 | type OpStackClient struct { 14 | rpcClient 15 | } 16 | 17 | var _ BlockchainClient = &OpStackClient{} 18 | 19 | // BlockByNumber returns the block with the given blockNumber. 20 | // it uses 3 different methods to get the block: 21 | // 1. eth_getBlockByNumber 22 | // 2. eth_getBlockReceipts 23 | // 3. debug_traceBlockByNumber with tracer "callTracer" 24 | // We encode the payload in NDJSON, in this order. 25 | // TODO: debug_traceBlockByNumber should be optional 26 | // 27 | // we should handle the case where it is not available 28 | func (c *OpStackClient) BlockByNumber(ctx context.Context, blockNumber int64) (models.RPCBlock, error) { 29 | tStart := time.Now() 30 | defer func() { 31 | c.log.Debug("BlockByNumber", "blockNumber", blockNumber, "duration", time.Since(tStart)) 32 | }() 33 | blockNumberHex := fmt.Sprintf("0x%x", blockNumber) 34 | 35 | // TODO: split this into mandatory and optional methods 36 | methods := []string{ 37 | "eth_getBlockByNumber", 38 | "eth_getBlockReceipts", 39 | "debug_traceBlockByNumber", 40 | } 41 | methodArgs := map[string][]any{ 42 | "eth_getBlockByNumber": {blockNumberHex, true}, 43 | "eth_getBlockReceipts": {blockNumberHex}, 44 | "debug_traceBlockByNumber": {blockNumberHex, map[string]string{"tracer": "callTracer"}}, 45 | } 46 | group, ctx := errgroup.WithContext(ctx) 47 | results := make([]*bytes.Buffer, len(methods)) 48 | for i, method := range methods { 49 | results[i] = c.bufPool.Get().(*bytes.Buffer) 50 | defer c.putBuffer(results[i]) 51 | 52 | c.GroupedJSONrpc(ctx, group, method, methodArgs[method], results[i], blockNumber) 53 | } 54 | 55 | if err := group.Wait(); err != nil { 56 | return models.RPCBlock{}, err 57 | } 58 | 59 | return c.buildRPCBlockResponse(blockNumber, results) 60 | } 61 | -------------------------------------------------------------------------------- /client/jsonrpc/opstack_test.go: -------------------------------------------------------------------------------- 1 | package jsonrpc_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "testing" 7 | 8 | "github.com/duneanalytics/blockchain-ingester/models" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestOpStackBasic(t *testing.T) { 13 | getBlockByNumberResponse := readFileForTest( 14 | "testdata/opstack-MODE-block-0x7a549b-eth_getBlockByNumber.json") 15 | getBlockReceiptsResponse := readFileForTest( 16 | "testdata/opstack-MODE-block-0x7a549b-eth_getBlockReceipts.json") 17 | debugtraceBlockByNumberResponse := readFileForTest( 18 | "testdata/opstack-MODE-block-0x7a549b-debug_traceBlockByNumber.json") 19 | 20 | var expectedPayload bytes.Buffer 21 | expectedPayload.Write(getBlockByNumberResponse.Bytes()) 22 | expectedPayload.Write(getBlockReceiptsResponse.Bytes()) 23 | expectedPayload.Write(debugtraceBlockByNumberResponse.Bytes()) 24 | expectedPayloadBytes := expectedPayload.Bytes() 25 | 26 | blockNumberHex := "0x7a549b" 27 | blockNumber := int64(8017051) 28 | httpClientMock := MockHTTPRequests( 29 | []MockedRequest{ 30 | { 31 | Req: jsonRPCRequest{ 32 | Method: "eth_getBlockByNumber", 33 | Params: []interface{}{blockNumberHex, true}, 34 | }, 35 | Resp: jsonRPCResponse{ 36 | Body: getBlockByNumberResponse, 37 | }, 38 | }, 39 | { 40 | Req: jsonRPCRequest{ 41 | Method: "eth_getBlockReceipts", 42 | Params: []interface{}{blockNumberHex}, 43 | }, 44 | Resp: jsonRPCResponse{ 45 | Body: getBlockReceiptsResponse, 46 | }, 47 | }, 48 | { 49 | Req: jsonRPCRequest{ 50 | Method: "debug_traceBlockByNumber", 51 | Params: []interface{}{blockNumberHex, map[string]string{"tracer": "callTracer"}}, 52 | }, 53 | Resp: jsonRPCResponse{ 54 | Body: debugtraceBlockByNumberResponse, 55 | }, 56 | }, 57 | }) 58 | 59 | opstack, err := NewTestRPCClient(httpClientMock, models.OpStack) 60 | require.NoError(t, err) 61 | 62 | block, err := opstack.BlockByNumber(context.Background(), blockNumber) 63 | require.NoError(t, err) 64 | require.NotNil(t, block) 65 | require.Equal(t, blockNumber, block.BlockNumber) 66 | require.False(t, block.Errored()) 67 | require.Equal(t, expectedPayloadBytes, block.Payload) 68 | } 69 | -------------------------------------------------------------------------------- /client/jsonrpc/testdata/arbitrumnitro-DEGEN-block-0x16870e9-debug_traceBlockByNumber.json: -------------------------------------------------------------------------------- 1 | {"jsonrpc":"2.0","result":[{"result":{"afterEVMTransfers":[],"beforeEVMTransfers":[],"from":"0x00000000000000000000000000000000000a4b05","gas":"0x0","gasUsed":"0x0","input":"0x6bf6a42d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010a1d9000000000000000000000000000000000000000000000000000000000016870e90000000000000000000000000000000000000000000000000000000000000006","to":"0x00000000000000000000000000000000000a4b05","type":"CALL","value":"0x0"},"txHash":"0x19ee83020d4dad7e96dbb2c01ce2441e75717ee038a022fc6a3b61300b1b801c"},{"result":{"afterEVMTransfers":[{"from":null,"purpose":"gasRefund","to":"0xE68ca824c376eA70c439Fd7F3C978772903E7f9d","value":"0x0"},{"from":null,"purpose":"feeCollection","to":"0x6CBb552855CE5Eb70af49B76a8048be8E3799A05","value":"0x1f2d8b299eb000"},{"from":null,"purpose":"feeCollection","to":"0xa4B00000000000000000000000000000000000F6","value":"0x0"}],"beforeEVMTransfers":[{"from":"0xE68ca824c376eA70c439Fd7F3C978772903E7f9d","purpose":"feePayment","to":null,"value":"0x1f2d8b299eb000"}],"calls":[{"from":"0x831f011b38fd707229b2d1fcf3c8a1964200c9fe","gas":"0xe898","gasUsed":"0xe66d","input":"0x6a62784200000000000000000000000031613e09f7e42cf1130ee49dcafad8f72d13411a","to":"0x1f48d1d51d8530c51e052bb423b1ae46e18668c3","type":"CALL","value":"0x0"}],"from":"0xe68ca824c376ea70c439fd7f3c978772903e7f9d","gas":"0x156ce","gasUsed":"0x156ce","input":"0xae152cf30000000000000000000000001f48d1d51d8530c51e052bb423b1ae46e18668c300000000000000000000000031613e09f7e42cf1130ee49dcafad8f72d13411a","to":"0x831f011b38fd707229b2d1fcf3c8a1964200c9fe","type":"CALL","value":"0x0"},"txHash":"0x4e805891b568698f8419f8e162d70ed9675e42a32e4972cbeb7f78d7fd51de76"}],"id":1} 2 | -------------------------------------------------------------------------------- /client/jsonrpc/testdata/arbitrumnitro-DEGEN-block-0x16870e9-eth_getBlockByNumber.json: -------------------------------------------------------------------------------- 1 | {"jsonrpc":"2.0","result":{"baseFeePerGas":"0x174876e800","difficulty":"0x1","extraData":"0x1f843dfe91368c7b0fe8fc5b8fe4b9015064a5b391552dc3b655d8be7a67e447","gasLimit":"0x4000000000000","gasUsed":"0x156ce","hash":"0xd00591af64d4426dff9597120c1800de53d249f90f89732cb73387879daceb29","l1BlockNumber":"0x10a1d90","logsBloom":"0x00000000000000000000000000000000000000008010080000000000000000000000000000008000000000000000000000000000000000000000000000000000000000002000000000000008000000000000000000000000000000000000002010000000020000000000000000000800000000000000000000000010000000000000000000000000020000000000000000000000000000000000000000000000000000000100000000100000000000000000000004008000000000800000000000000013000000000000000000000000000000000000000000000000002020000000000000000000000000000000000004000000000000000000080000000000","miner":"0xa4b000000000000000000073657175656e636572","mixHash":"0x000000000000529c00000000010a1d9000000000000000140000000000000000","nonce":"0x0000000000021135","number":"0x16870e9","parentHash":"0x40221a13bf33965af67788ef34350eaa1e4217ce292bdda64c5c1a247fe026f2","receiptsRoot":"0xc7a9596b2cde78841ad26d844f022f1e8448884dfa94c031af2f21d5cfa946fb","sendCount":"0x529c","sendRoot":"0x1f843dfe91368c7b0fe8fc5b8fe4b9015064a5b391552dc3b655d8be7a67e447","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x37c","stateRoot":"0x604316caa3dc894abb531902a7b47e455a4a7dba004a51e4e59bb5be490a3518","timestamp":"0x669e9808","totalDifficulty":"0x16870ea","transactions":[{"blockHash":"0xd00591af64d4426dff9597120c1800de53d249f90f89732cb73387879daceb29","blockNumber":"0x16870e9","chainId":"0x27bc86aa","from":"0x00000000000000000000000000000000000a4b05","gas":"0x0","gasPrice":"0x0","hash":"0x19ee83020d4dad7e96dbb2c01ce2441e75717ee038a022fc6a3b61300b1b801c","input":"0x6bf6a42d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010a1d9000000000000000000000000000000000000000000000000000000000016870e90000000000000000000000000000000000000000000000000000000000000006","nonce":"0x0","r":"0x0","s":"0x0","to":"0x00000000000000000000000000000000000a4b05","transactionIndex":"0x0","type":"0x6a","v":"0x0","value":"0x0"},{"accessList":[],"blockHash":"0xd00591af64d4426dff9597120c1800de53d249f90f89732cb73387879daceb29","blockNumber":"0x16870e9","chainId":"0x27bc86aa","from":"0xe68ca824c376ea70c439fd7f3c978772903e7f9d","gas":"0x156ce","gasPrice":"0x17a1df1700","hash":"0x4e805891b568698f8419f8e162d70ed9675e42a32e4972cbeb7f78d7fd51de76","input":"0xae152cf30000000000000000000000001f48d1d51d8530c51e052bb423b1ae46e18668c300000000000000000000000031613e09f7e42cf1130ee49dcafad8f72d13411a","maxFeePerGas":"0x2eea55ff00","maxPriorityFeePerGas":"0x59682f00","nonce":"0x8fed7","r":"0x4053346a2e1e4eee95bf434c77fe54638bce942fa0da17bbc9dd47a2615cc5c4","s":"0x14c3dbdcb2ae020e3b2dc7fb3dd825e3c61f53619de1a56c43cdf8bf1af9e099","to":"0x831f011b38fd707229b2d1fcf3c8a1964200c9fe","transactionIndex":"0x1","type":"0x2","v":"0x0","value":"0x0","yParity":"0x0"}],"transactionsRoot":"0x54421d2781309c17000f621600f43db048aa3d1205cf1bc08f65bc37e2e48ab0","uncles":[]},"id":1} 2 | -------------------------------------------------------------------------------- /client/jsonrpc/testdata/arbitrumnitro-DEGEN-block-0x16870e9-eth_getTransactionReceipt-0x0.json: -------------------------------------------------------------------------------- 1 | {"jsonrpc":"2.0","result":{"blockHash":"0xd00591af64d4426dff9597120c1800de53d249f90f89732cb73387879daceb29","blockNumber":"0x16870e9","contractAddress":null,"cumulativeGasUsed":"0x0","effectiveGasPrice":"0x174876e800","from":"0x00000000000000000000000000000000000a4b05","gasUsed":"0x0","gasUsedForL1":"0x0","l1BlockNumber":"0x10a1d90","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":"0x00000000000000000000000000000000000a4b05","transactionHash":"0x19ee83020d4dad7e96dbb2c01ce2441e75717ee038a022fc6a3b61300b1b801c","transactionIndex":"0x0","type":"0x6a"},"id":1} 2 | -------------------------------------------------------------------------------- /client/jsonrpc/testdata/arbitrumnitro-DEGEN-block-0x16870e9-eth_getTransactionReceipt-0x1.json: -------------------------------------------------------------------------------- 1 | {"jsonrpc":"2.0","result":{"blockHash":"0xd00591af64d4426dff9597120c1800de53d249f90f89732cb73387879daceb29","blockNumber":"0x16870e9","contractAddress":null,"cumulativeGasUsed":"0x156ce","effectiveGasPrice":"0x174876e800","from":"0xe68ca824c376ea70c439fd7f3c978772903e7f9d","gasUsed":"0x156ce","gasUsedForL1":"0x0","l1BlockNumber":"0x10a1d90","logs":[{"address":"0x1f48d1d51d8530c51e052bb423b1ae46e18668c3","blockHash":"0xd00591af64d4426dff9597120c1800de53d249f90f89732cb73387879daceb29","blockNumber":"0x16870e9","data":"0x","logIndex":"0x0","removed":false,"topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000000000000000000000000000000000000000000000","0x00000000000000000000000031613e09f7e42cf1130ee49dcafad8f72d13411a","0x000000000000000000000000000000000000000000000000000000000000001b"],"transactionHash":"0x4e805891b568698f8419f8e162d70ed9675e42a32e4972cbeb7f78d7fd51de76","transactionIndex":"0x1"},{"address":"0x831f011b38fd707229b2d1fcf3c8a1964200c9fe","blockHash":"0xd00591af64d4426dff9597120c1800de53d249f90f89732cb73387879daceb29","blockNumber":"0x16870e9","data":"0x00000000000000000000000031613e09f7e42cf1130ee49dcafad8f72d13411a","logIndex":"0x1","removed":false,"topics":["0x4765472e8d7a205f24a8863709e382af628b1267cefb31a8d59dfeec4c042433","0x0000000000000000000000001f48d1d51d8530c51e052bb423b1ae46e18668c3"],"transactionHash":"0x4e805891b568698f8419f8e162d70ed9675e42a32e4972cbeb7f78d7fd51de76","transactionIndex":"0x1"}],"logsBloom":"0x00000000000000000000000000000000000000008010080000000000000000000000000000008000000000000000000000000000000000000000000000000000000000002000000000000008000000000000000000000000000000000000002010000000020000000000000000000800000000000000000000000010000000000000000000000000020000000000000000000000000000000000000000000000000000000100000000100000000000000000000004008000000000800000000000000013000000000000000000000000000000000000000000000000002020000000000000000000000000000000000004000000000000000000080000000000","status":"0x1","to":"0x831f011b38fd707229b2d1fcf3c8a1964200c9fe","transactionHash":"0x4e805891b568698f8419f8e162d70ed9675e42a32e4972cbeb7f78d7fd51de76","transactionIndex":"0x1","type":"0x2"},"id":1} 2 | -------------------------------------------------------------------------------- /client/jsonrpc/testdata/opstack-MODE-block-0x7a549b-eth_getBlockByNumber.json: -------------------------------------------------------------------------------- 1 | { 2 | "jsonrpc": "2.0", 3 | "result": { 4 | "baseFeePerGas": "0x13a2", 5 | "blobGasUsed": "0x0", 6 | "difficulty": "0x0", 7 | "excessBlobGas": "0x0", 8 | "extraData": "0x", 9 | "gasLimit": "0x1c9c380", 10 | "gasUsed": "0x442974", 11 | "hash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 12 | "logsBloom": "0x00000000000000000000000000000002000000000000000000008000000000000000000000000000000000004000000000000000000000000000000000000000000000000000100000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001020000000000000000000000000000000000000002000001000000000000000000100100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000", 13 | "miner": "0x4200000000000000000000000000000000000011", 14 | "mixHash": "0x134657ec80b014632c560af436524a999ec3e3961e9911d21fb413f050b12d2f", 15 | "nonce": "0x0000000000000000", 16 | "number": "0x7a549b", 17 | "parentBeaconBlockRoot": "0xd38373593b0d5e9eb20998264fde22b642f6658f7a2975897a7ade285e4b4daf", 18 | "parentHash": "0x98b4922cd437bbd00d09c72eaa4569de934918cb52ddfcf5c765928d86f96306", 19 | "receiptsRoot": "0xaec6d23829974cc7f55ab728fe1e87615d195fc5d0460fd5d42febfd892ba298", 20 | "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", 21 | "size": "0x5fe", 22 | "stateRoot": "0xed73bfa7d6bc0535f1b1318d1ae58983ade52d55c5005c3257fa66e5a930ce09", 23 | "timestamp": "0x664b28d5", 24 | "totalDifficulty": "0x0", 25 | "transactions": [ 26 | { 27 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 28 | "blockNumber": "0x7a549b", 29 | "depositReceiptVersion": "0x1", 30 | "from": "0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001", 31 | "gas": "0xf4240", 32 | "gasPrice": "0x0", 33 | "hash": "0x6711626fa36243fc18c0f8bac16ab9fcf017c35ec6e340a495ec16151e453757", 34 | "input": "0x440a5e2000004e2000095506000000000000000000000000664b289700000000012fcfc700000000000000000000000000000000000000000000000000000001d6fedf000000000000000000000000000000000000000000000000000000000000000001b7c4005bbc6eee3d76205edaacc42eca5c8fbac4b6b3e0affcd75a864b43bc8000000000000000000000000099199a22125034c808ff20f377d91187e8050f2e", 35 | "mint": "0x0", 36 | "nonce": "0x7a549b", 37 | "r": "0x0", 38 | "s": "0x0", 39 | "sourceHash": "0x5cde6945af90e967d107d56df1c22d0f2844047a38cf655bebfb4ed38e8832a0", 40 | "to": "0x4200000000000000000000000000000000000015", 41 | "transactionIndex": "0x0", 42 | "type": "0x7e", 43 | "v": "0x0", 44 | "value": "0x0" 45 | }, 46 | { 47 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 48 | "blockNumber": "0x7a549b", 49 | "chainId": "0x868b", 50 | "from": "0x3342ac381db93eb7768d093e2f1231a2da425d0d", 51 | "gas": "0x5eef", 52 | "gasPrice": "0x10de7b", 53 | "hash": "0x9e88acd51d0f1f2244d278026376105b6b6569b5395c48aa52f2030f866734ef", 54 | "input": "0x646174613a2c7b2270223a226d6f64652d3230222c226f70223a226d696e74222c227469636b223a226d6f646573222c22616d74223a2231227d", 55 | "nonce": "0x24b", 56 | "r": "0x90df8b4c39cc49b799e9eb1efaff48a3037b97cbda14553a221a02b991551c2a", 57 | "s": "0x43abc34cce779a1affc0e9bd5a5a4f213a0361dabaac299cd0668d6c7e9b5a57", 58 | "to": "0x3342ac381db93eb7768d093e2f1231a2da425d0d", 59 | "transactionIndex": "0x1", 60 | "type": "0x0", 61 | "v": "0x10d3a", 62 | "value": "0x0" 63 | }, 64 | { 65 | "accessList": [], 66 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 67 | "blockNumber": "0x7a549b", 68 | "chainId": "0x868b", 69 | "from": "0x8677f549789e7981f76d4d80c7c54b2dfaa060af", 70 | "gas": "0x22047", 71 | "gasPrice": "0x19a42", 72 | "hash": "0x6aef02d81df05983cbd528a7e1d57696cef3f0f3f5f7a8c5a9b1f7d4b6a6d8bd", 73 | "input": "0x17835d1c000000000000000000000000000000000000000000000000002f61ab04005df300000000000000000000000000000000000000000000000000000000664b28cf", 74 | "maxFeePerGas": "0x5968564a", 75 | "maxPriorityFeePerGas": "0x186a0", 76 | "nonce": "0x12510", 77 | "r": "0x31ad32847e970beb40026e6f11ebcbc9eb40d0b8cf78976281400cf43c3bfb2b", 78 | "s": "0x153b2f2741f9afceb91b9fd45915673092f52c794a39f7ccc185915d0d76ba5d", 79 | "to": "0x3180341afdd106f14d224ec96c9a17420ab5f33d", 80 | "transactionIndex": "0x2", 81 | "type": "0x2", 82 | "v": "0x0", 83 | "value": "0x0", 84 | "yParity": "0x0" 85 | }, 86 | { 87 | "accessList": [], 88 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 89 | "blockNumber": "0x7a549b", 90 | "chainId": "0x868b", 91 | "from": "0x5a178ae7d1e3fc42a8425a0c455e3b6c3157ff0c", 92 | "gas": "0x1ab3f00", 93 | "gasPrice": "0x2710", 94 | "hash": "0x310b151c69b810e900cd72c6b0335d8e9dafb783e793eae48b72aa3b6314a704", 95 | "input": "0x00", 96 | "maxFeePerGas": "0x2710", 97 | "maxPriorityFeePerGas": "0x270f", 98 | "nonce": "0x1e59f", 99 | "r": "0x2a307446b54789023633695694108ff5d1c02dc37fa87dd31d7750a134353ee8", 100 | "s": "0x1e5a395c8e0f6943ff04f4d1db127887e6961aed6d8fce80d9a677a73fb065ba", 101 | "to": "0xe6dce473803a7fee2805f02c0140ce01bc738186", 102 | "transactionIndex": "0x3", 103 | "type": "0x2", 104 | "v": "0x0", 105 | "value": "0x0", 106 | "yParity": "0x0" 107 | }, 108 | { 109 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 110 | "blockNumber": "0x7a549b", 111 | "chainId": "0x868b", 112 | "from": "0x93df6a148a6dd5b162ae1f49d03a981e13afc8f5", 113 | "gas": "0x1312d00", 114 | "gasPrice": "0x2710", 115 | "hash": "0x27c04bd94b2404b0e9b898394c72cdf95471f2a4a57ca563be44a899d9b91a9a", 116 | "input": "0x00000000", 117 | "nonce": "0x29a2c", 118 | "r": "0xd58aa4ca52e46514b9ceb18502775ac0fb1653721b8fd0fc36cf52c0031b52b", 119 | "s": "0xdf6eea19e8925e5618509d8b1b68d58fb43c70c593a743f1ac712b2cbf9ec2c", 120 | "to": "0x4397da4e02198b79e6934ad10e395d43816b553d", 121 | "transactionIndex": "0x4", 122 | "type": "0x0", 123 | "v": "0x10d3a", 124 | "value": "0x0" 125 | }, 126 | { 127 | "accessList": [], 128 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 129 | "blockNumber": "0x7a549b", 130 | "chainId": "0x868b", 131 | "from": "0x1e8248888f342a4fdc91b4de3a883193dc8539bb", 132 | "gas": "0x12e1fc0", 133 | "gasPrice": "0x13a3", 134 | "hash": "0x42c16a4a0d2129017f90755da9727d1a77e2d4ae3289489c0ce356b414941746", 135 | "input": "0x4e71d92d", 136 | "maxFeePerGas": "0x5f6084a", 137 | "maxPriorityFeePerGas": "0x1", 138 | "nonce": "0x27320", 139 | "r": "0xce51449548a7d61c6db165e3d03ed629c21733971f1daae3d9f1ac9248733381", 140 | "s": "0x1b9b2dbe8ceed702f54b3c124dfb72405f6ea26357c58a056b983aee4c00a4d0", 141 | "to": "0x2d58e04e43411c58e781339d70f8ebe05aa17b68", 142 | "transactionIndex": "0x5", 143 | "type": "0x2", 144 | "v": "0x0", 145 | "value": "0x0", 146 | "yParity": "0x0" 147 | } 148 | ], 149 | "transactionsRoot": "0xb710d62aa870d3c6b90f1ef85e285ac91e23f3bcc61f1d181d22ee884f65bd98", 150 | "uncles": [], 151 | "withdrawals": [], 152 | "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" 153 | }, 154 | "id": 1 155 | } 156 | -------------------------------------------------------------------------------- /client/jsonrpc/testdata/opstack-MODE-block-0x7a549b-eth_getBlockReceipts.json: -------------------------------------------------------------------------------- 1 | { 2 | "jsonrpc": "2.0", 3 | "result": [ 4 | { 5 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 6 | "blockNumber": "0x7a549b", 7 | "contractAddress": null, 8 | "cumulativeGasUsed": "0xcbf7", 9 | "depositNonce": "0x7a549b", 10 | "depositReceiptVersion": "0x1", 11 | "effectiveGasPrice": "0x0", 12 | "from": "0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001", 13 | "gasUsed": "0xcbf7", 14 | "logs": [], 15 | "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 16 | "status": "0x1", 17 | "to": "0x4200000000000000000000000000000000000015", 18 | "transactionHash": "0x6711626fa36243fc18c0f8bac16ab9fcf017c35ec6e340a495ec16151e453757", 19 | "transactionIndex": "0x0", 20 | "type": "0x7e" 21 | }, 22 | { 23 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 24 | "blockNumber": "0x7a549b", 25 | "contractAddress": null, 26 | "cumulativeGasUsed": "0x1219f", 27 | "effectiveGasPrice": "0x10de7b", 28 | "from": "0x3342ac381db93eb7768d093e2f1231a2da425d0d", 29 | "gasUsed": "0x55a8", 30 | "l1Fee": "0x608dd93c8d", 31 | "l1GasPrice": "0x1d6fedf00", 32 | "l1GasUsed": "0xa40", 33 | "logs": [], 34 | "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 35 | "status": "0x1", 36 | "to": "0x3342ac381db93eb7768d093e2f1231a2da425d0d", 37 | "transactionHash": "0x9e88acd51d0f1f2244d278026376105b6b6569b5395c48aa52f2030f866734ef", 38 | "transactionIndex": "0x1", 39 | "type": "0x0" 40 | }, 41 | { 42 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 43 | "blockNumber": "0x7a549b", 44 | "contractAddress": null, 45 | "cumulativeGasUsed": "0x33d7b", 46 | "effectiveGasPrice": "0x19a42", 47 | "from": "0x8677f549789e7981f76d4d80c7c54b2dfaa060af", 48 | "gasUsed": "0x21bdc", 49 | "l1Fee": "0x53e5661f8a", 50 | "l1GasPrice": "0x1d6fedf00", 51 | "l1GasUsed": "0x8e8", 52 | "logs": [ 53 | { 54 | "address": "0x3180341afdd106f14d224ec96c9a17420ab5f33d", 55 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 56 | "blockNumber": "0x7a549b", 57 | "data": "0x000000000000000000000000cdd475325d6f564d27247d1dddbb0dac6fa0a5cf00000000000000000000000000000000000cea5044471bf5a7169dace880000000000000000000000000000000000000000ced28894f0dac2b3cb76828000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008da", 58 | "logIndex": "0x0", 59 | "removed": false, 60 | "topics": [ 61 | "0x23b9387f81fca646aac1dc4487ede045c65f5f7445482906565f01e05afdb3a8" 62 | ], 63 | "transactionHash": "0x6aef02d81df05983cbd528a7e1d57696cef3f0f3f5f7a8c5a9b1f7d4b6a6d8bd", 64 | "transactionIndex": "0x2" 65 | }, 66 | { 67 | "address": "0x1d610fd6a8cb065658c7e6ce5ea268310dc8043e", 68 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 69 | "blockNumber": "0x7a549b", 70 | "data": "0x000000000000000000000000cdd475325d6f564d27247d1dddbb0dac6fa0a5cf00000000000000000000000000000000000cede883782b9b651fda18380000000000000000000000000000003180341afdd106f14d224ec96c9a17420ab5f33d", 71 | "logIndex": "0x1", 72 | "removed": false, 73 | "topics": [ 74 | "0xc37a77b91cc3fc2d0e4b43fd2f347ec67adda10e39215de4742836cc3e42c97a" 75 | ], 76 | "transactionHash": "0x6aef02d81df05983cbd528a7e1d57696cef3f0f3f5f7a8c5a9b1f7d4b6a6d8bd", 77 | "transactionIndex": "0x2" 78 | }, 79 | { 80 | "address": "0x3180341afdd106f14d224ec96c9a17420ab5f33d", 81 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 82 | "blockNumber": "0x7a549b", 83 | "data": "0x000000000000000000000000420000000000000000000000000000000000000600000000000000000000000000000000000099059e68e694a21e2c04d6800000000000000000000000000000000000000000990249e930e17af3272730000000000000000000000000000000000000000000000000000000000000000000054600000000000000000000000000000000000000000000000000000000000016b8", 84 | "logIndex": "0x2", 85 | "removed": false, 86 | "topics": [ 87 | "0x23b9387f81fca646aac1dc4487ede045c65f5f7445482906565f01e05afdb3a8" 88 | ], 89 | "transactionHash": "0x6aef02d81df05983cbd528a7e1d57696cef3f0f3f5f7a8c5a9b1f7d4b6a6d8bd", 90 | "transactionIndex": "0x2" 91 | }, 92 | { 93 | "address": "0x1d610fd6a8cb065658c7e6ce5ea268310dc8043e", 94 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 95 | "blockNumber": "0x7a549b", 96 | "data": "0x00000000000000000000000042000000000000000000000000000000000000060000000000000000000000000000000000009919122e32544c0b5496f80000000000000000000000000000003180341afdd106f14d224ec96c9a17420ab5f33d", 97 | "logIndex": "0x3", 98 | "removed": false, 99 | "topics": [ 100 | "0xc37a77b91cc3fc2d0e4b43fd2f347ec67adda10e39215de4742836cc3e42c97a" 101 | ], 102 | "transactionHash": "0x6aef02d81df05983cbd528a7e1d57696cef3f0f3f5f7a8c5a9b1f7d4b6a6d8bd", 103 | "transactionIndex": "0x2" 104 | } 105 | ], 106 | "logsBloom": "0x00000000000000000000000000000002000000000000000000008000000000000000000000000000000000004000000000000000000000000000000000000000000000000000100000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001020000000000000000000000000000000000000002000001000000000000000000100100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000", 107 | "status": "0x1", 108 | "to": "0x3180341afdd106f14d224ec96c9a17420ab5f33d", 109 | "transactionHash": "0x6aef02d81df05983cbd528a7e1d57696cef3f0f3f5f7a8c5a9b1f7d4b6a6d8bd", 110 | "transactionIndex": "0x2", 111 | "type": "0x2" 112 | }, 113 | { 114 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 115 | "blockNumber": "0x7a549b", 116 | "contractAddress": null, 117 | "cumulativeGasUsed": "0x78f1d", 118 | "effectiveGasPrice": "0x2710", 119 | "from": "0x5a178ae7d1e3fc42a8425a0c455e3b6c3157ff0c", 120 | "gasUsed": "0x451a2", 121 | "l1Fee": "0x410e677162", 122 | "l1GasPrice": "0x1d6fedf00", 123 | "l1GasUsed": "0x6e8", 124 | "logs": [], 125 | "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 126 | "status": "0x1", 127 | "to": "0xe6dce473803a7fee2805f02c0140ce01bc738186", 128 | "transactionHash": "0x310b151c69b810e900cd72c6b0335d8e9dafb783e793eae48b72aa3b6314a704", 129 | "transactionIndex": "0x3", 130 | "type": "0x2" 131 | }, 132 | { 133 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 134 | "blockNumber": "0x7a549b", 135 | "contractAddress": null, 136 | "cumulativeGasUsed": "0x2c6865", 137 | "effectiveGasPrice": "0x2710", 138 | "from": "0x93df6a148a6dd5b162ae1f49d03a981e13afc8f5", 139 | "gasUsed": "0x24d948", 140 | "l1Fee": "0x3eb3879b9d", 141 | "l1GasPrice": "0x1d6fedf00", 142 | "l1GasUsed": "0x6a8", 143 | "logs": [], 144 | "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 145 | "status": "0x1", 146 | "to": "0x4397da4e02198b79e6934ad10e395d43816b553d", 147 | "transactionHash": "0x27c04bd94b2404b0e9b898394c72cdf95471f2a4a57ca563be44a899d9b91a9a", 148 | "transactionIndex": "0x4", 149 | "type": "0x0" 150 | }, 151 | { 152 | "blockHash": "0x4dc3e5326b8d6e7eb4d0a9220381d911b09431a2b27ee6ab06a6249a5195f436", 153 | "blockNumber": "0x7a549b", 154 | "contractAddress": null, 155 | "cumulativeGasUsed": "0x442974", 156 | "effectiveGasPrice": "0x13a3", 157 | "from": "0x1e8248888f342a4fdc91b4de3a883193dc8539bb", 158 | "gasUsed": "0x17c10f", 159 | "l1Fee": "0x43da513f3c", 160 | "l1GasPrice": "0x1d6fedf00", 161 | "l1GasUsed": "0x734", 162 | "logs": [], 163 | "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 164 | "status": "0x1", 165 | "to": "0x2d58e04e43411c58e781339d70f8ebe05aa17b68", 166 | "transactionHash": "0x42c16a4a0d2129017f90755da9727d1a77e2d4ae3289489c0ce356b414941746", 167 | "transactionIndex": "0x5", 168 | "type": "0x2" 169 | } 170 | ], 171 | "id": 1 172 | } 173 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // ingester is a "synchronizer" that ingests into DuneAPI the blocks from the blockchain. 4 | // it has the ability to resume and catch up with the the head of the blockchain. 5 | 6 | import ( 7 | "context" 8 | stdlog "log" 9 | "log/slog" 10 | "net/http" 11 | "os" 12 | "os/signal" 13 | "strings" 14 | stdsync "sync" 15 | "syscall" 16 | "time" 17 | 18 | "github.com/prometheus/client_golang/prometheus/promhttp" 19 | 20 | "github.com/duneanalytics/blockchain-ingester/client/duneapi" 21 | "github.com/duneanalytics/blockchain-ingester/client/jsonrpc" 22 | "github.com/duneanalytics/blockchain-ingester/config" 23 | "github.com/duneanalytics/blockchain-ingester/ingester" 24 | "github.com/duneanalytics/blockchain-ingester/lib/dlq" 25 | "github.com/duneanalytics/blockchain-ingester/models" 26 | ) 27 | 28 | func init() { 29 | // always use UTC 30 | time.Local = time.UTC 31 | } 32 | 33 | func main() { 34 | cfg, err := config.Parse() 35 | if err != nil { 36 | stdlog.Fatal(err) 37 | } 38 | logOptions := &slog.HandlerOptions{} 39 | switch cfg.LogLevel { 40 | case "debug": 41 | logOptions.Level = slog.LevelDebug 42 | case "info": 43 | logOptions.Level = slog.LevelInfo 44 | case "warn": 45 | logOptions.Level = slog.LevelWarn 46 | case "error": 47 | logOptions.Level = slog.LevelError 48 | default: 49 | stdlog.Fatalf("unsupported log level: '%s'", cfg.LogLevel) 50 | } 51 | logger := slog.New(slog.NewTextHandler(os.Stderr, logOptions)) 52 | slog.SetDefault(logger) 53 | 54 | duneClient, err := duneapi.New(logger, duneapi.Config{ 55 | APIKey: cfg.Dune.APIKey, 56 | URL: cfg.Dune.URL, 57 | BlockchainName: cfg.BlockchainName, 58 | Stack: cfg.RPCStack, 59 | DisableCompression: cfg.DisableCompression, 60 | DisableBatchHeader: cfg.Dune.DisableBatchHeader, 61 | DryRun: cfg.DryRun, 62 | }) 63 | if err != nil { 64 | stdlog.Fatal(err) 65 | } 66 | defer duneClient.Close() 67 | 68 | // Create an extra Dune API client for DLQ processing since it is not thread-safe yet 69 | duneClientDLQ, err := duneapi.New(logger, duneapi.Config{ 70 | APIKey: cfg.Dune.APIKey, 71 | URL: cfg.Dune.URL, 72 | BlockchainName: cfg.BlockchainName, 73 | Stack: cfg.RPCStack, 74 | DisableCompression: cfg.DisableCompression, 75 | DryRun: cfg.DryRun, 76 | }) 77 | if err != nil { 78 | stdlog.Fatal(err) 79 | } 80 | defer duneClientDLQ.Close() 81 | 82 | var wg stdsync.WaitGroup 83 | var rpcClient jsonrpc.BlockchainClient 84 | 85 | rpcHTTPHeaders := make(map[string]string) 86 | for _, header := range cfg.RPCNode.ExtraHTTPHeaders { 87 | pair := strings.Split(header, ":") 88 | // We've validated this list has two elements in `config.HasError()` 89 | key := strings.Trim(pair[0], " ") 90 | value := strings.Trim(pair[1], " ") 91 | logger.Info("Adding extra HTTP header to RPC requests", "key", key, "value", value) 92 | rpcHTTPHeaders[key] = value 93 | } 94 | rpcClient, err = jsonrpc.NewClient(logger, jsonrpc.Config{ 95 | URL: cfg.RPCNode.NodeURL, 96 | HTTPHeaders: rpcHTTPHeaders, 97 | EVMStack: cfg.RPCStack, 98 | // real max request concurrency to RPP node 99 | // each block requires multiple RPC requests 100 | TotalRPCConcurrency: cfg.RPCConcurrency, 101 | }) 102 | if err != nil { 103 | stdlog.Fatal(err) 104 | } 105 | 106 | ctx, cancel := context.WithCancel(context.Background()) 107 | 108 | go func() { 109 | http.Handle("/metrics", promhttp.Handler()) 110 | err = http.ListenAndServe(":2112", nil) 111 | if err != nil { 112 | cancel() 113 | stdlog.Fatal(err) 114 | } 115 | }() 116 | 117 | // Get stored progress unless config indicates we should start from 0 118 | var startBlockNumber int64 119 | // Default to -1 to start where the ingester left off 120 | var progress *models.BlockchainIndexProgress 121 | if cfg.BlockHeight == -1 { 122 | progress, err = duneClient.GetProgressReport(ctx) 123 | if err != nil { 124 | stdlog.Fatal(err) 125 | } else { 126 | startBlockNumber = progress.LastIngestedBlockNumber + 1 127 | } 128 | } else { 129 | startBlockNumber = cfg.BlockHeight 130 | } 131 | 132 | dlqBlockNumbers := dlq.NewDLQWithDelay[int64](dlq.RetryDelayLinear(cfg.DLQRetryInterval)) 133 | 134 | if !cfg.DisableGapsQuery { 135 | blockGaps, err := duneClient.GetBlockGaps(ctx) 136 | if err != nil { 137 | stdlog.Fatal(err) 138 | } else { 139 | ingester.AddBlockGaps(dlqBlockNumbers, blockGaps.Gaps) 140 | } 141 | } 142 | 143 | maxCount := int64(0) // 0 means ingest until cancelled 144 | ingester := ingester.New( 145 | logger, 146 | rpcClient, 147 | duneClient, 148 | duneClientDLQ, 149 | ingester.Config{ 150 | // OpStack does 3 requests per block, ArbitrumNova is variable 151 | // leave some room for other requests 152 | MaxConcurrentBlocks: cfg.RPCConcurrency / 4, 153 | DLQMaxConcurrentBlocks: cfg.DLQBlockConcurrency, 154 | MaxBatchSize: cfg.MaxBatchSize, 155 | ReportProgressInterval: cfg.ReportProgressInterval, 156 | PollInterval: cfg.PollInterval, 157 | PollDLQInterval: cfg.PollDLQInterval, 158 | Stack: cfg.RPCStack, 159 | BlockchainName: cfg.BlockchainName, 160 | BlockSubmitInterval: cfg.BlockSubmitInterval, 161 | SkipFailedBlocks: cfg.RPCNode.SkipFailedBlocks, 162 | DLQOnly: cfg.DLQOnly, 163 | }, 164 | progress, 165 | dlqBlockNumbers, 166 | ) 167 | 168 | wg.Add(1) 169 | go func() { 170 | defer wg.Done() 171 | err := ingester.Run(ctx, startBlockNumber, maxCount) 172 | logger.Info("Ingester finished", "err", err) 173 | cancel() 174 | }() 175 | 176 | defer ingester.Close() 177 | 178 | // TODO: add a metrics exporter or healthcheck http endpoint ? 179 | 180 | quit := make(chan os.Signal, 1) 181 | // handle Interrupt (ctrl-c) Term, used by `kill` et al, HUP which is commonly used to reload configs 182 | signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) 183 | select { 184 | case <-ctx.Done(): 185 | logger.Warn("Context done") 186 | case s := <-quit: 187 | logger.Warn("Caught UNIX signal", "signal", s) 188 | cancel() 189 | } 190 | 191 | // wait for Run to finish 192 | wg.Wait() 193 | } 194 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/duneanalytics/blockchain-ingester/models" 10 | flags "github.com/jessevdk/go-flags" 11 | ) 12 | 13 | type DuneClient struct { 14 | APIKey string `long:"dune-api-key" env:"DUNE_API_KEY" description:"API key for DuneAPI"` 15 | URL string `long:"dune-api-url" env:"DUNE_API_URL" description:"URL for DuneAPI" default:"https://api.dune.com"` // nolint:lll 16 | DisableBatchHeader bool `long:"duneapi-disable-batch-header" env:"DUNEAPI_DISABLE_BATCH_HEADERS" description:"Disable batch headers on DuneAPI request payload"` // nolint:lll 17 | } 18 | 19 | func (d DuneClient) HasError() error { 20 | if d.APIKey == "" { 21 | return errors.New("DuneAPI key is required") 22 | } 23 | return nil 24 | } 25 | 26 | type RPCClient struct { 27 | NodeURL string `long:"rpc-node-url" env:"RPC_NODE_URL" description:"URL for the blockchain node"` 28 | ExtraHTTPHeaders []string `long:"rpc-http-header" env:"RPC_HTTP_HEADERS" env-delim:"|" description:"Extra HTTP headers to send with RPC requests. Each header pair must be on the form 'key:value'"` // nolint:lll 29 | SkipFailedBlocks bool `long:"rpc-skip-failed-blocks" env:"RPC_SKIP_FAILED_BLOCKS" description:"Skip blocks that we fail to get from RPC. If false (default), we crash on RPC request failure"` // nolint:lll 30 | } 31 | 32 | func (r RPCClient) HasError() error { 33 | if r.NodeURL == "" { 34 | return errors.New("RPC node URL is required") 35 | } 36 | for _, header := range r.ExtraHTTPHeaders { 37 | pair := strings.Split(header, ":") 38 | if len(pair) != 2 { 39 | return fmt.Errorf("invalid rpc http headers: expected 'key:value', got '%s'", pair) 40 | } 41 | } 42 | return nil 43 | } 44 | 45 | type Config struct { 46 | BlockHeight int64 `long:"block-height" env:"BLOCK_HEIGHT" description:"block height to start from" default:"-1"` // nolint:lll 47 | BlockchainName string `long:"blockchain-name" env:"BLOCKCHAIN_NAME" description:"name of the blockchain" required:"true"` // nolint:lll 48 | DisableCompression bool `long:"disable-compression" env:"DISABLE_COMPRESSION" description:"disable compression when sending data to Dune"` // nolint:lll 49 | DisableGapsQuery bool `long:"disable-gaps-query" env:"DISABLE_GAPS_QUERY" description:"disable gaps query used to populate the initial DLQ"` // nolint:lll 50 | DLQOnly bool `long:"dlq-only" env:"DLQ_ONLY" description:"Runs just the DLQ processing on its own"` // nolint:lll 51 | DryRun bool `long:"dry-run" env:"DRY_RUN" description:"When enabled, data is sent to Dune for validation but is not written to Dune tables"` // nolint:lll 52 | Dune DuneClient 53 | PollInterval time.Duration `long:"rpc-poll-interval" env:"RPC_POLL_INTERVAL" description:"Interval to poll the blockchain node" default:"300ms"` // nolint:lll 54 | PollDLQInterval time.Duration `long:"dlq-poll-interval" env:"DLQ_POLL_INTERVAL" description:"Interval to poll the dlq" default:"300ms"` // nolint:lll 55 | DLQRetryInterval time.Duration `long:"dlq-retry-interval" env:"DLQ_RETRY_INTERVAL" description:"Interval for linear backoff in DLQ " default:"1m"` // nolint:lll 56 | ReportProgressInterval time.Duration `long:"report-progress-interval" env:"REPORT_PROGRESS_INTERVAL" description:"Interval to report progress" default:"30s"` // nolint:lll 57 | RPCNode RPCClient 58 | RPCStack models.EVMStack `long:"rpc-stack" env:"RPC_STACK" description:"Stack for the RPC client" default:"opstack"` // nolint:lll 59 | RPCConcurrency int `long:"rpc-concurrency" env:"RPC_CONCURRENCY" description:"Number of maximum concurrent jsonRPC requests to the RPC node" default:"80"` // nolint:lll 60 | DLQBlockConcurrency int `long:"dlq-concurrency" env:"DLQ_CONCURRENCY" description:"Number of concurrent block requests to the RPC node for DLQ processing" default:"2"` // nolint:lll 61 | BlockSubmitInterval time.Duration `long:"block-submit-interval" env:"BLOCK_SUBMIT_INTERVAL" description:"Interval at which to submit batched blocks to Dune" default:"500ms"` // nolint:lll 62 | LogLevel string `long:"log" env:"LOG" description:"Log level" choice:"info" choice:"debug" choice:"warn" choice:"error" default:"info"` // nolint:lll 63 | MaxBatchSize int `long:"max-batch-size" env:"MAX_BATCH_SIZE" description:"Max number of blocks to send per batch (max:256)" default:"128"` // nolint:lll 64 | } 65 | 66 | func (c Config) HasError() error { 67 | if err := c.Dune.HasError(); err != nil { 68 | return err 69 | } 70 | if err := c.RPCNode.HasError(); err != nil { 71 | return err 72 | } 73 | if c.BlockchainName == "" { 74 | return errors.New("blockchain name is required") 75 | } 76 | 77 | return nil 78 | } 79 | 80 | func Parse() (*Config, error) { 81 | var config Config 82 | parser := flags.NewParser(&config, flags.Default) 83 | _, err := parser.Parse() 84 | if err != nil { 85 | return nil, err 86 | } 87 | if err := config.HasError(); err != nil { 88 | return nil, err 89 | } 90 | return &config, nil 91 | } 92 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/duneanalytics/blockchain-ingester 2 | 3 | go 1.22.2 4 | 5 | require ( 6 | github.com/emirpasic/gods v1.18.1 7 | github.com/go-errors/errors v1.5.1 8 | github.com/hashicorp/go-retryablehttp v0.7.7 9 | github.com/jessevdk/go-flags v1.5.0 10 | github.com/klauspost/compress v1.17.8 11 | github.com/panjf2000/ants/v2 v2.10.0 12 | github.com/prometheus/client_golang v1.19.1 13 | github.com/stretchr/testify v1.9.0 14 | golang.org/x/sync v0.7.0 15 | ) 16 | 17 | require ( 18 | github.com/beorn7/perks v1.0.1 // indirect 19 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 20 | github.com/davecgh/go-spew v1.1.1 // indirect 21 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 22 | github.com/kr/text v0.2.0 // indirect 23 | github.com/pmezard/go-difflib v1.0.0 // indirect 24 | github.com/prometheus/client_model v0.5.0 // indirect 25 | github.com/prometheus/common v0.48.0 // indirect 26 | github.com/prometheus/procfs v0.12.0 // indirect 27 | golang.org/x/sys v0.20.0 // indirect 28 | google.golang.org/protobuf v1.33.0 // indirect 29 | gopkg.in/yaml.v3 v3.0.1 // indirect 30 | ) 31 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 2 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 3 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 4 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 5 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 6 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= 10 | github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= 11 | github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= 12 | github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= 13 | github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= 14 | github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= 15 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 16 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 17 | github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= 18 | github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= 19 | github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= 20 | github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= 21 | github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= 22 | github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= 23 | github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= 24 | github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= 25 | github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= 26 | github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= 27 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 28 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 29 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 30 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 31 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 32 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 33 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 34 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 35 | github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8= 36 | github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= 37 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 38 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 39 | github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= 40 | github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= 41 | github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= 42 | github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= 43 | github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= 44 | github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= 45 | github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= 46 | github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= 47 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 48 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 49 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 50 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 51 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 52 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 53 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 54 | github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 55 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 56 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 57 | golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= 58 | golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= 59 | golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 60 | golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 61 | golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= 62 | golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 63 | google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= 64 | google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= 65 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 66 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 67 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 68 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 69 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 70 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 71 | -------------------------------------------------------------------------------- /ingester/ingester.go: -------------------------------------------------------------------------------- 1 | package ingester 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "time" 7 | 8 | "github.com/duneanalytics/blockchain-ingester/lib/dlq" 9 | 10 | "github.com/duneanalytics/blockchain-ingester/client/duneapi" 11 | "github.com/duneanalytics/blockchain-ingester/client/jsonrpc" 12 | "github.com/duneanalytics/blockchain-ingester/models" 13 | ) 14 | 15 | type Ingester interface { 16 | // Run starts the ingester and blocks until the context is cancelled or maxCount blocks are ingested 17 | Run(ctx context.Context, startBlockNumber int64, maxCount int64) error 18 | 19 | // ProduceBlockNumbers sends block numbers from startBlockNumber to endBlockNumber to outChan, inclusive. 20 | // If endBlockNumber is -1, it sends blocks from startBlockNumber to the tip of the chain 21 | // it will run continuously until the context is cancelled 22 | ProduceBlockNumbers(ctx context.Context, outChan chan int64, startBlockNumber int64, endBlockNumber int64) error 23 | 24 | // FetchBlockLoop fetches blocks sent on the channel and sends them on the other channel. 25 | // It will run continuously until the context is cancelled, or the channel is closed. 26 | // It can safely be run concurrently. 27 | FetchBlockLoop(context.Context, chan int64, chan models.RPCBlock) error 28 | 29 | // SendBlocks consumes RPCBlocks from the channel, reorders them, and sends batches to DuneAPI in an endless loop 30 | // it will block until: 31 | // - the context is cancelled 32 | // - channel is closed 33 | // - a fatal error occurs 34 | SendBlocks(ctx context.Context, blocksCh <-chan models.RPCBlock, startFrom int64) error 35 | 36 | // ProduceBlockNumbersDLQ sends block numbers from the DLQ to outChan. 37 | // It will run continuously until the context is cancelled. 38 | // When the DLQ does not return an eligible next block, it waits for PollDLQInterval before trying again 39 | ProduceBlockNumbersDLQ(ctx context.Context, outChan chan dlq.Item[int64]) error 40 | 41 | // FetchBlockLoopDLQ fetches blocks sent on the channel and sends them on the other channel. 42 | // It will run continuously until the context is cancelled, or the channel is closed. 43 | // It can safely be run concurrently. 44 | FetchBlockLoopDLQ(ctx context.Context, 45 | blockNumbers <-chan dlq.Item[int64], 46 | blocks chan<- dlq.Item[models.RPCBlock], 47 | ) error 48 | 49 | // SendBlocksDLQ pushes one RPCBlock at a time to DuneAPI in the order they are received in 50 | SendBlocksDLQ(ctx context.Context, blocks <-chan dlq.Item[models.RPCBlock]) error 51 | 52 | Close() error 53 | } 54 | 55 | const ( 56 | defaultReportProgressInterval = 30 * time.Second 57 | ) 58 | 59 | type Config struct { 60 | MaxConcurrentBlocks int 61 | DLQMaxConcurrentBlocks int 62 | PollInterval time.Duration 63 | PollDLQInterval time.Duration 64 | ReportProgressInterval time.Duration 65 | Stack models.EVMStack 66 | BlockchainName string 67 | BlockSubmitInterval time.Duration 68 | SkipFailedBlocks bool 69 | DLQOnly bool 70 | MaxBatchSize int 71 | } 72 | 73 | type ingester struct { 74 | log *slog.Logger 75 | node jsonrpc.BlockchainClient 76 | dune duneapi.BlockchainIngester 77 | duneDLQ duneapi.BlockchainIngester 78 | cfg Config 79 | info Info 80 | dlq *dlq.DLQ[int64] 81 | } 82 | 83 | func New( 84 | log *slog.Logger, 85 | node jsonrpc.BlockchainClient, 86 | dune duneapi.BlockchainIngester, 87 | duneDLQ duneapi.BlockchainIngester, 88 | cfg Config, 89 | progress *models.BlockchainIndexProgress, 90 | dlq *dlq.DLQ[int64], 91 | ) Ingester { 92 | info := NewInfo(cfg.BlockchainName, cfg.Stack.String()) 93 | if progress != nil { 94 | info.LatestBlockNumber = progress.LatestBlockNumber 95 | info.IngestedBlockNumber = progress.LastIngestedBlockNumber 96 | } 97 | ing := &ingester{ 98 | log: log.With("module", "ingester"), 99 | node: node, 100 | dune: dune, 101 | duneDLQ: duneDLQ, 102 | cfg: cfg, 103 | info: info, 104 | dlq: dlq, 105 | } 106 | if ing.cfg.ReportProgressInterval == 0 { 107 | ing.cfg.ReportProgressInterval = defaultReportProgressInterval 108 | } 109 | if ing.cfg.MaxBatchSize <= 0 { 110 | ing.cfg.MaxBatchSize = maxBatchSize 111 | } else if ing.cfg.MaxBatchSize > maxBatchSize { 112 | ing.cfg.MaxBatchSize = maxBatchSize 113 | } 114 | return ing 115 | } 116 | -------------------------------------------------------------------------------- /ingester/mainloop.go: -------------------------------------------------------------------------------- 1 | package ingester 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | "slices" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/duneanalytics/blockchain-ingester/lib/dlq" 12 | "github.com/emirpasic/gods/utils" 13 | 14 | "github.com/duneanalytics/blockchain-ingester/models" 15 | "github.com/go-errors/errors" 16 | "golang.org/x/sync/errgroup" 17 | ) 18 | 19 | // Run fetches blocks from a node RPC and sends them in order to the Dune API. 20 | // 21 | // ProduceBlockNumbers (blockNumbers channel) -> FetchBlockLoop (blocks channel) -> SendBlocks -> Dune 22 | // 23 | // We produce block numbers to fetch on an unbuffered channel (ProduceBlockNumbers), 24 | // and each concurrent FetchBlockLoop goroutine gets a block number from that channel. 25 | // The SendBlocks goroutine receives all blocks on an unbuffered channel, 26 | // but buffers them in a map until they can be sent in order. 27 | func (i *ingester) Run(ctx context.Context, startBlockNumber int64, maxCount int64) error { 28 | registerIngesterMetrics(i) 29 | 30 | if i.cfg.DLQOnly { 31 | i.cfg.MaxConcurrentBlocks = 0 // if running DLQ Only mode, ignore the MaxConcurrentRequests and set this to 0 32 | } else { 33 | if i.cfg.MaxConcurrentBlocks <= 0 { 34 | return errors.Errorf("MaxConcurrentRequests must be > 0") 35 | } 36 | } 37 | if i.cfg.DLQMaxConcurrentBlocks <= 0 { 38 | return errors.Errorf("MaxConcurrentRequestsDLQ must be > 0") 39 | } 40 | 41 | ctx, cancel := context.WithCancel(ctx) 42 | defer cancel() 43 | errGroup, ctx := errgroup.WithContext(ctx) 44 | 45 | blockNumbers := make(chan int64) 46 | defer close(blockNumbers) 47 | 48 | // We buffer the block channel so that RPC requests can be made concurrently with sending blocks to Dune. 49 | // We limit the buffer size to the k * maxBatchSize, 50 | // so we exert some backpressure. 51 | // but having enough to feed full batches to DuneAPI 52 | blocks := make(chan models.RPCBlock, 2*maxBatchSize) 53 | defer close(blocks) 54 | 55 | // Start MaxConcurrentBlocks goroutines to consume blocks concurrently 56 | for range i.cfg.MaxConcurrentBlocks { 57 | errGroup.Go(func() error { 58 | return i.FetchBlockLoop(ctx, blockNumbers, blocks) 59 | }) 60 | } 61 | errGroup.Go(func() error { 62 | return i.ReportProgress(ctx) 63 | }) 64 | errGroup.Go(func() error { 65 | return i.SendBlocks(ctx, blocks, startBlockNumber) 66 | }) 67 | 68 | // Start DLQ processing 69 | 70 | blockNumbersDLQ := make(chan dlq.Item[int64]) 71 | defer close(blockNumbersDLQ) 72 | 73 | blocksDLQ := make(chan dlq.Item[models.RPCBlock], i.cfg.DLQMaxConcurrentBlocks+1) 74 | defer close(blocksDLQ) 75 | 76 | errGroup.Go(func() error { 77 | return i.SendBlocksDLQ(ctx, blocksDLQ) 78 | }) 79 | for range i.cfg.DLQMaxConcurrentBlocks { 80 | errGroup.Go(func() error { 81 | return i.FetchBlockLoopDLQ(ctx, blockNumbersDLQ, blocksDLQ) 82 | }) 83 | } 84 | errGroup.Go(func() error { 85 | return i.ProduceBlockNumbersDLQ(ctx, blockNumbersDLQ) 86 | }) 87 | 88 | // Ingest until endBlockNumber, inclusive. If maxCount is <= 0, we ingest forever 89 | endBlockNumber := startBlockNumber - 1 + maxCount 90 | i.log.Info("Starting ingester", 91 | "runForever", maxCount <= 0, 92 | "startBlockNumber", startBlockNumber, 93 | "endBlockNumber", endBlockNumber, 94 | "maxConcurrency", i.cfg.MaxConcurrentBlocks, 95 | ) 96 | 97 | // Produce block numbers in the main goroutine 98 | err := i.ProduceBlockNumbers(ctx, blockNumbers, startBlockNumber, endBlockNumber) 99 | i.log.Info("ProduceBlockNumbers is done", "error", err) 100 | i.log.Info("Cancelling context") 101 | cancel() 102 | 103 | return errGroup.Wait() 104 | } 105 | 106 | var ErrFinishedFetchBlockLoop = errors.New("finished FetchBlockLoop") 107 | 108 | // ProduceBlockNumbers to be consumed by multiple goroutines running FetchBlockLoop 109 | func (i *ingester) ProduceBlockNumbers( 110 | ctx context.Context, blockNumbers chan int64, startBlockNumber int64, endBlockNumber int64, 111 | ) error { 112 | latestBlockNumber := i.tryUpdateLatestBlockNumber() 113 | 114 | // Helper function 115 | waitForBlock := func(ctx context.Context, blockNumber int64, latestBlockNumber int64) int64 { 116 | for blockNumber > latestBlockNumber { 117 | select { 118 | case <-ctx.Done(): 119 | return latestBlockNumber 120 | case <-time.After(i.cfg.PollInterval): 121 | } 122 | i.log.Debug( 123 | "Waiting for block to be available", 124 | "waitTime", i.cfg.PollInterval.String(), 125 | "blockNumber", blockNumber, 126 | "latestBlockNumber", latestBlockNumber, 127 | ) 128 | latestBlockNumber = i.tryUpdateLatestBlockNumber() 129 | } 130 | return latestBlockNumber 131 | } 132 | 133 | // Consume blocks forever if end is before start. This happens if Run is called with a maxCount of <= 0 134 | dontStop := endBlockNumber < startBlockNumber 135 | i.log.Debug("Produce block numbers from", "startBlockNumber", startBlockNumber, "endBlockNumber", endBlockNumber) 136 | for blockNumber := startBlockNumber; dontStop || blockNumber <= endBlockNumber; blockNumber++ { 137 | latestBlockNumber = waitForBlock(ctx, blockNumber, latestBlockNumber) 138 | 139 | select { 140 | case <-ctx.Done(): 141 | i.log.Debug("ProduceBlockNumbers: Context canceled, stopping") 142 | return ctx.Err() 143 | case blockNumbers <- blockNumber: 144 | } 145 | } 146 | i.log.Debug("Finished producing block numbers") 147 | return ErrFinishedFetchBlockLoop 148 | } 149 | 150 | // FetchBlockLoop from the RPC node. This can be run in multiple goroutines to parallelize block fetching. 151 | func (i *ingester) FetchBlockLoop( 152 | ctx context.Context, blockNumbers chan int64, blocks chan models.RPCBlock, 153 | ) error { 154 | for { 155 | select { 156 | case <-ctx.Done(): 157 | i.log.Info("FetchBlockLoop: context is done") 158 | return ctx.Err() 159 | case blockNumber := <-blockNumbers: 160 | startTime := time.Now() 161 | 162 | block, err := i.node.BlockByNumber(ctx, blockNumber) 163 | if err != nil { 164 | if errors.Is(err, context.Canceled) { 165 | i.log.Error("FetchBlockLoop: Context canceled, stopping") 166 | return ctx.Err() 167 | } 168 | 169 | i.log.Error("Failed to get block by number", 170 | "blockNumber", blockNumber, 171 | "continuing", i.cfg.SkipFailedBlocks, 172 | "elapsed", time.Since(startTime), 173 | "error", err, 174 | ) 175 | if !i.cfg.SkipFailedBlocks { 176 | return err 177 | } 178 | select { 179 | case <-ctx.Done(): 180 | i.log.Debug("FetchBlockLoop: Channel is closed, not sending block to channel", "blockNumber", block.BlockNumber) 181 | return ctx.Err() 182 | case blocks <- models.RPCBlock{BlockNumber: blockNumber, Error: err}: 183 | } 184 | continue 185 | } 186 | 187 | atomic.StoreInt64(&i.info.ConsumedBlockNumber, block.BlockNumber) 188 | getBlockElapsed := time.Since(startTime) 189 | select { 190 | case <-ctx.Done(): 191 | i.log.Debug("FetchBlockLoop: Channel is closed, not sending block to channel", "blockNumber", block.BlockNumber) 192 | return ctx.Err() 193 | case blocks <- block: 194 | i.log.Debug( 195 | "FetchBlockLoop: Got and sent block", 196 | "blockNumber", blockNumber, 197 | "getBlockElapsed", getBlockElapsed, 198 | ) 199 | } 200 | } 201 | } 202 | } 203 | 204 | func (i *ingester) tryUpdateLatestBlockNumber() int64 { 205 | latest, err := i.node.LatestBlockNumber() 206 | if err != nil { 207 | i.log.Error("Failed to get latest block number, continuing..", "error", err) 208 | return atomic.LoadInt64(&i.info.LatestBlockNumber) 209 | } 210 | atomic.StoreInt64(&i.info.LatestBlockNumber, latest) 211 | return latest 212 | } 213 | 214 | func (i *ingester) ReportProgress(ctx context.Context) error { 215 | timer := time.NewTicker(i.cfg.ReportProgressInterval) 216 | defer timer.Stop() 217 | 218 | previousTime := time.Now() 219 | previousHoursToCatchUp := float64(0) 220 | previousIngested := atomic.LoadInt64(&i.info.IngestedBlockNumber) 221 | 222 | for { 223 | select { 224 | case <-ctx.Done(): 225 | return ctx.Err() 226 | case tNow := <-timer.C: 227 | latest := atomic.LoadInt64(&i.info.LatestBlockNumber) 228 | lastIngested := atomic.LoadInt64(&i.info.IngestedBlockNumber) 229 | 230 | blocksPerSec := float64(lastIngested-previousIngested) / tNow.Sub(previousTime).Seconds() 231 | newDistance := latest - lastIngested 232 | 233 | fields := []interface{}{ 234 | "blocksPerSec", fmt.Sprintf("%.2f", blocksPerSec), 235 | "latestBlockNumber", latest, 236 | "ingestedBlockNumber", lastIngested, 237 | } 238 | if newDistance > 1 { 239 | etaHours := time.Duration(float64(newDistance) / blocksPerSec * float64(time.Second)).Hours() 240 | fields = append(fields, "hoursToCatchUp", fmt.Sprintf("%.1f", etaHours)) 241 | if previousHoursToCatchUp < (0.8 * etaHours) { 242 | fields = append(fields, "fallingBehind", true) 243 | } 244 | previousHoursToCatchUp = etaHours 245 | } 246 | if i.info.Errors.RPCErrorCount > 0 { 247 | fields = append(fields, "rpcErrors", i.info.Errors.RPCErrorCount) 248 | } 249 | if i.info.Errors.DuneErrorCount > 0 { 250 | fields = append(fields, "duneErrors", i.info.Errors.DuneErrorCount) 251 | } 252 | 253 | i.log.Info("PROGRESS REPORT", fields...) 254 | previousIngested = lastIngested 255 | previousTime = tNow 256 | 257 | err := i.dune.PostProgressReport(ctx, i.info.ToProgressReport()) 258 | if err != nil { 259 | i.log.Error("Failed to post progress report", "error", err) 260 | } else { 261 | i.log.Debug("Posted progress report") 262 | i.info.ResetErrors() 263 | } 264 | } 265 | } 266 | } 267 | 268 | func (i *ingester) ProduceBlockNumbersDLQ(ctx context.Context, outChan chan dlq.Item[int64]) error { 269 | for { 270 | select { 271 | case <-ctx.Done(): 272 | i.log.Debug("ProduceBlockNumbersDLQ: Context canceled, stopping") 273 | return ctx.Err() 274 | default: 275 | block, ok := i.dlq.GetNextItem() 276 | 277 | if ok { 278 | if i.log.Enabled(ctx, slog.LevelDebug) { 279 | i.log.Debug("ProduceBlockNumbersDLQ: Reprocessing block", "block", block, 280 | "dlqSize", i.dlq.Size()) 281 | } 282 | select { 283 | case outChan <- *block: 284 | // Successfully sent the block to the out channel 285 | case <-ctx.Done(): 286 | i.log.Debug("ProduceBlockNumbersDLQ: Context canceled while sending block, stopping") 287 | return ctx.Err() 288 | } 289 | } else { 290 | if i.log.Enabled(ctx, slog.LevelDebug) { 291 | i.log.Debug("ProduceBlockNumbersDLQ: No eligible blocks in the DLQ so sleeping", 292 | "dlqSize", i.dlq.Size()) 293 | } 294 | select { 295 | case <-time.After(i.cfg.PollDLQInterval): // Polling interval when DLQ is empty 296 | case <-ctx.Done(): 297 | i.log.Debug("ProduceBlockNumbersDLQ: Context canceled while sleeping, stopping") 298 | return ctx.Err() 299 | } 300 | } 301 | } 302 | } 303 | } 304 | 305 | func (i *ingester) FetchBlockLoopDLQ(ctx context.Context, blockNumbers <-chan dlq.Item[int64], 306 | blocks chan<- dlq.Item[models.RPCBlock], 307 | ) error { 308 | for { 309 | select { 310 | case <-ctx.Done(): 311 | i.log.Info("FetchBlockLoopDLQ: context is done") 312 | return ctx.Err() 313 | case blockNumber := <-blockNumbers: 314 | startTime := time.Now() 315 | block, err := i.node.BlockByNumber(ctx, blockNumber.Value) 316 | if err != nil { 317 | if errors.Is(err, context.Canceled) { 318 | i.log.Error("FetchBlockLoopDLQ: Context canceled, stopping") 319 | return ctx.Err() 320 | } 321 | i.log.Error("FetchBlockLoopDLQ: Failed to get block by number", 322 | "blockNumber", blockNumber, 323 | "elapsed", time.Since(startTime), 324 | "error", err, 325 | ) 326 | select { 327 | case <-ctx.Done(): 328 | i.log.Debug("FetchBlockLoop: Channel is closed, not sending block to channel", "blockNumber", block.BlockNumber) 329 | return ctx.Err() 330 | case blocks <- dlq.MapItem(blockNumber, func(blockNumber int64) models.RPCBlock { 331 | return models.RPCBlock{BlockNumber: blockNumber, Error: err} 332 | }): 333 | } 334 | continue 335 | } 336 | getBlockElapsed := time.Since(startTime) 337 | select { 338 | case <-ctx.Done(): 339 | i.log.Debug("FetchBlockLoopDLQ: Channel is closed, not sending block to channel", "blockNumber", block.BlockNumber) 340 | return ctx.Err() 341 | case blocks <- dlq.MapItem(blockNumber, func(_ int64) models.RPCBlock { 342 | return block 343 | }): 344 | i.log.Debug( 345 | "FetchBlockLoopDLQ: Got and sent block", 346 | "blockNumber", blockNumber, 347 | "getBlockElapsed", getBlockElapsed, 348 | ) 349 | } 350 | } 351 | } 352 | } 353 | 354 | func (i *ingester) SendBlocksDLQ(ctx context.Context, blocks <-chan dlq.Item[models.RPCBlock]) error { 355 | i.log.Debug("SendBlocksDLQ: Starting to receive blocks") 356 | for { 357 | select { 358 | case <-ctx.Done(): 359 | i.log.Debug("SendBlocksDLQ: Context canceled, stopping") 360 | return ctx.Err() 361 | case block, ok := <-blocks: 362 | if !ok { 363 | i.log.Debug("SendBlocksDLQ: Channel is closed, returning") 364 | return nil 365 | } 366 | if block.Value.Errored() { 367 | i.dlq.AddItem(block.Value.BlockNumber, block.Retries) 368 | i.log.Error("Received FAILED block", "number", block.Value.BlockNumber) 369 | // TODO: report error once ErrorState struct is made thread-safe 370 | } else { 371 | i.log.Debug( 372 | "SendBlocksDLQ: Received block", 373 | "blockNumber", block.Value.BlockNumber, 374 | ) 375 | if err := i.duneDLQ.SendBlocks(ctx, []models.RPCBlock{block.Value}); err != nil { 376 | if errors.Is(err, context.Canceled) { 377 | i.log.Info("SendBlocksDLQ: Context canceled, stopping") 378 | return ctx.Err() 379 | } 380 | i.log.Error("SendBlocksDLQ: Failed to send block, requeueing...", "block", block.Value.BlockNumber, "error", err) 381 | i.dlq.AddItem(block.Value.BlockNumber, block.Retries) 382 | // TODO: report error once ErrorState struct is made thread-safe 383 | } 384 | } 385 | } 386 | } 387 | } 388 | 389 | func (i *ingester) Close() error { 390 | // Send a final progress report to flush progress 391 | ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) 392 | defer cancel() 393 | i.log.Info("Sending final progress report") 394 | err := i.dune.PostProgressReport(ctx, i.info.ToProgressReport()) 395 | i.log.Info("Closing node") 396 | if err != nil { 397 | _ = i.node.Close() 398 | return err 399 | } 400 | 401 | return i.node.Close() 402 | } 403 | 404 | func AddBlockGaps(dlq *dlq.DLQ[int64], gaps []models.BlockGap) { 405 | // queue these in reverse so that recent blocks are retried first 406 | slices.SortFunc(gaps, func(a, b models.BlockGap) int { 407 | return -utils.Int64Comparator(a.FirstMissing, b.FirstMissing) 408 | }) 409 | 410 | for _, gap := range gaps { 411 | for i := gap.FirstMissing; i <= gap.LastMissing; i++ { 412 | dlq.AddItem(i, 0) 413 | } 414 | } 415 | } 416 | -------------------------------------------------------------------------------- /ingester/mainloop_test.go: -------------------------------------------------------------------------------- 1 | package ingester_test 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "log/slog" 7 | "math/rand" 8 | "sync" 9 | "sync/atomic" 10 | "testing" 11 | "time" 12 | 13 | "github.com/prometheus/client_golang/prometheus" 14 | 15 | "github.com/duneanalytics/blockchain-ingester/lib/dlq" 16 | 17 | "github.com/duneanalytics/blockchain-ingester/ingester" 18 | duneapi_mock "github.com/duneanalytics/blockchain-ingester/mocks/duneapi" 19 | jsonrpc_mock "github.com/duneanalytics/blockchain-ingester/mocks/jsonrpc" 20 | "github.com/duneanalytics/blockchain-ingester/models" 21 | "github.com/go-errors/errors" 22 | "github.com/stretchr/testify/require" 23 | "golang.org/x/sync/errgroup" 24 | ) 25 | 26 | func TestRunUntilCancel(t *testing.T) { 27 | t.Cleanup(resetDefaultRegistry) 28 | ctx, cancel := context.WithCancel(context.Background()) 29 | maxBlockNumber := int64(10) 30 | sentBlockNumber := int64(0) 31 | producedBlockNumber := int64(0) 32 | duneapi := &duneapi_mock.BlockchainIngesterMock{ 33 | SendBlocksFunc: func(_ context.Context, blocks []models.RPCBlock) error { 34 | if len(blocks) == 0 { 35 | return nil 36 | } 37 | 38 | next := sentBlockNumber + 1 39 | for _, block := range blocks { 40 | // We cannot send blocks out of order to DuneAPI 41 | require.Equalf(t, next, block.BlockNumber, "expected block %d, got %d", next, block.BlockNumber) 42 | next++ 43 | } 44 | 45 | lastBlockNumber := blocks[len(blocks)-1].BlockNumber 46 | atomic.StoreInt64(&sentBlockNumber, lastBlockNumber) 47 | if lastBlockNumber >= maxBlockNumber { 48 | // cancel execution when we have sent the last block 49 | cancel() 50 | return context.Canceled 51 | } 52 | 53 | return nil 54 | }, 55 | PostProgressReportFunc: func(_ context.Context, _ models.BlockchainIndexProgress) error { 56 | return nil 57 | }, 58 | } 59 | rpcClient := &jsonrpc_mock.BlockchainClientMock{ 60 | LatestBlockNumberFunc: func() (int64, error) { 61 | return maxBlockNumber + 1, nil 62 | }, 63 | BlockByNumberFunc: func(_ context.Context, blockNumber int64) (models.RPCBlock, error) { 64 | atomic.StoreInt64(&producedBlockNumber, blockNumber) 65 | return models.RPCBlock{ 66 | BlockNumber: blockNumber, 67 | Payload: []byte(`block`), 68 | }, nil 69 | }, 70 | CloseFunc: func() error { 71 | return nil 72 | }, 73 | } 74 | // Swap these to see logs 75 | // logOutput := os.Stderr 76 | logOutput := io.Discard 77 | ing := ingester.New( 78 | slog.New(slog.NewTextHandler(logOutput, nil)), 79 | rpcClient, 80 | duneapi, 81 | duneapi, 82 | ingester.Config{ 83 | BlockSubmitInterval: time.Nanosecond, 84 | MaxConcurrentBlocks: 10, 85 | DLQMaxConcurrentBlocks: 2, 86 | SkipFailedBlocks: false, 87 | }, 88 | nil, // progress 89 | dlq.NewDLQ[int64](), 90 | ) 91 | 92 | err := ing.Run(ctx, 1, -1) // run until canceled 93 | require.ErrorIs(t, err, context.Canceled) // this is expected 94 | require.GreaterOrEqual(t, sentBlockNumber, maxBlockNumber) 95 | } 96 | 97 | func TestProduceBlockNumbers(t *testing.T) { 98 | t.Cleanup(resetDefaultRegistry) 99 | duneapi := &duneapi_mock.BlockchainIngesterMock{ 100 | PostProgressReportFunc: func(_ context.Context, _ models.BlockchainIndexProgress) error { 101 | return nil 102 | }, 103 | } 104 | rpcClient := &jsonrpc_mock.BlockchainClientMock{ 105 | LatestBlockNumberFunc: func() (int64, error) { 106 | return 100_000, nil 107 | }, 108 | BlockByNumberFunc: func(_ context.Context, blockNumber int64) (models.RPCBlock, error) { 109 | return models.RPCBlock{BlockNumber: blockNumber}, nil 110 | }, 111 | CloseFunc: func() error { 112 | return nil 113 | }, 114 | } 115 | // Swap these to see logs 116 | logOutput := io.Discard 117 | // logOutput := os.Stderr 118 | ing := ingester.New( 119 | slog.New(slog.NewTextHandler(logOutput, nil)), 120 | rpcClient, 121 | duneapi, 122 | duneapi, 123 | ingester.Config{ 124 | BlockSubmitInterval: time.Nanosecond, 125 | }, 126 | nil, // progress 127 | dlq.NewDLQ[int64](), 128 | ) 129 | blockNumbers := make(chan int64) 130 | var wg sync.WaitGroup 131 | wg.Add(1) 132 | go func() { 133 | defer wg.Done() 134 | ing.ProduceBlockNumbers(context.Background(), blockNumbers, 1, 100_000) 135 | }() 136 | for i := 1; i <= 100_000; i++ { 137 | require.Equal(t, int64(i), <-blockNumbers) 138 | } 139 | wg.Wait() 140 | } 141 | 142 | func TestSendBlocks(t *testing.T) { 143 | t.Cleanup(resetDefaultRegistry) 144 | sentBlockNumber := int64(0) 145 | duneapi := &duneapi_mock.BlockchainIngesterMock{ 146 | SendBlocksFunc: func(_ context.Context, blocks []models.RPCBlock) error { 147 | if len(blocks) == 0 { 148 | return nil 149 | } 150 | 151 | next := sentBlockNumber + 1 152 | for _, block := range blocks { 153 | // We cannot send blocks out of order to DuneAPI 154 | require.Equalf(t, next, block.BlockNumber, "expected block %d, got %d", next, block.BlockNumber) 155 | next++ 156 | } 157 | 158 | lastBlockNumber := blocks[len(blocks)-1].BlockNumber 159 | atomic.StoreInt64(&sentBlockNumber, lastBlockNumber) 160 | return nil 161 | }, 162 | } 163 | // Swap these to see logs 164 | // logOutput := os.Stderr 165 | logOutput := io.Discard 166 | ing := ingester.New( 167 | slog.New(slog.NewTextHandler(logOutput, nil)), 168 | nil, // node client isn't used in this unit test 169 | duneapi, 170 | duneapi, 171 | ingester.Config{ 172 | BlockSubmitInterval: time.Nanosecond, 173 | }, 174 | nil, // progress 175 | dlq.NewDLQ[int64](), 176 | ) 177 | 178 | blocks := make(chan models.RPCBlock) 179 | 180 | startFromBlock := 1 181 | 182 | group, ctx := errgroup.WithContext(context.Background()) 183 | group.Go(func() error { 184 | return ing.SendBlocks(context.Background(), blocks, int64(startFromBlock)) 185 | }) 186 | 187 | // Send blocks except the next block, ensure none are sent to the API 188 | for _, n := range []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 19} { 189 | select { 190 | case <-ctx.Done(): // if error group fails, its context is canceled 191 | require.Fail(t, "context was canceled") 192 | case blocks <- models.RPCBlock{BlockNumber: n, Payload: []byte("block")}: 193 | // Sent block 194 | } 195 | require.Equal(t, int64(0), sentBlockNumber) 196 | } 197 | // Now send the first block 198 | blocks <- models.RPCBlock{BlockNumber: 1, Payload: []byte("block")} 199 | time.Sleep(time.Millisecond) // Allow enough time for the tick before closing the channel 200 | close(blocks) 201 | require.NoError(t, group.Wait()) 202 | 203 | // Ensure the last correct block was sent 204 | require.Equal(t, int64(10), sentBlockNumber) 205 | } 206 | 207 | // TestRunBlocksOutOfOrder asserts that we can fetch blocks concurrently and that we ingest them in order 208 | // even if they are produced out of order. We ensure they are produced out of order by sleeping a random amount of time. 209 | func TestRunBlocksOutOfOrder(t *testing.T) { 210 | t.Cleanup(resetDefaultRegistry) 211 | ctx, cancel := context.WithCancel(context.Background()) 212 | maxBlockNumber := int64(1000) 213 | sentBlockNumber := int64(0) 214 | producedBlockNumber := int64(0) 215 | duneapi := &duneapi_mock.BlockchainIngesterMock{ 216 | SendBlocksFunc: func(_ context.Context, blocks []models.RPCBlock) error { 217 | if len(blocks) == 0 { 218 | return nil 219 | } 220 | 221 | next := sentBlockNumber + 1 222 | for _, block := range blocks { 223 | // We cannot send blocks out of order to DuneAPI 224 | require.Equalf(t, next, block.BlockNumber, "expected block %d, got %d", next, block.BlockNumber) 225 | next++ 226 | } 227 | 228 | lastBlockNumber := blocks[len(blocks)-1].BlockNumber 229 | atomic.StoreInt64(&sentBlockNumber, lastBlockNumber) 230 | if lastBlockNumber >= maxBlockNumber { 231 | // cancel execution when we have sent the last block 232 | cancel() 233 | return context.Canceled 234 | } 235 | 236 | return nil 237 | }, 238 | PostProgressReportFunc: func(_ context.Context, _ models.BlockchainIndexProgress) error { 239 | return nil 240 | }, 241 | } 242 | rpcClient := &jsonrpc_mock.BlockchainClientMock{ 243 | LatestBlockNumberFunc: func() (int64, error) { 244 | return maxBlockNumber + 1, nil 245 | }, 246 | BlockByNumberFunc: func(_ context.Context, blockNumber int64) (models.RPCBlock, error) { 247 | // Get blocks out of order by sleeping for a random amount of time 248 | time.Sleep(time.Duration(rand.Intn(10)) * time.Nanosecond) 249 | atomic.StoreInt64(&producedBlockNumber, blockNumber) 250 | return models.RPCBlock{BlockNumber: blockNumber, Payload: []byte("block")}, nil 251 | }, 252 | CloseFunc: func() error { 253 | return nil 254 | }, 255 | } 256 | // Swap these to see logs 257 | // logOutput := os.Stderr 258 | logOutput := io.Discard 259 | ing := ingester.New( 260 | slog.New(slog.NewTextHandler(logOutput, nil)), 261 | rpcClient, 262 | duneapi, 263 | duneapi, 264 | ingester.Config{ 265 | MaxConcurrentBlocks: 20, 266 | DLQMaxConcurrentBlocks: 2, // fetch blocks in multiple goroutines 267 | // big enough compared to the time spent in block by number to ensure batching. We panic 268 | // in the mocked Dune client if we don't get a batch of blocks (more than one block). 269 | BlockSubmitInterval: 50 * time.Millisecond, 270 | SkipFailedBlocks: false, 271 | }, 272 | nil, // progress 273 | dlq.NewDLQ[int64](), 274 | ) 275 | 276 | err := ing.Run(ctx, 1, -1) // run until canceled 277 | require.ErrorIs(t, err, context.Canceled) // this is expected 278 | require.GreaterOrEqual(t, sentBlockNumber, maxBlockNumber) 279 | } 280 | 281 | // TestRunRPCNodeFails shows that we crash if the RPC client fails to fetch a block 282 | func TestRunRPCNodeFails(t *testing.T) { 283 | t.Cleanup(resetDefaultRegistry) 284 | ctx, cancel := context.WithCancel(context.Background()) 285 | defer cancel() 286 | maxBlockNumber := int64(1000) 287 | someRPCError := errors.Errorf("some RPC error") 288 | duneapi := &duneapi_mock.BlockchainIngesterMock{ 289 | SendBlocksFunc: func(_ context.Context, _ []models.RPCBlock) error { 290 | return nil 291 | }, 292 | PostProgressReportFunc: func(_ context.Context, _ models.BlockchainIndexProgress) error { 293 | return nil 294 | }, 295 | } 296 | rpcClient := &jsonrpc_mock.BlockchainClientMock{ 297 | LatestBlockNumberFunc: func() (int64, error) { 298 | return maxBlockNumber + 1, nil 299 | }, 300 | BlockByNumberFunc: func(_ context.Context, _ int64) (models.RPCBlock, error) { 301 | // Get blocks out of order by sleeping for a random amount of time 302 | time.Sleep(time.Duration(rand.Intn(10)) * time.Nanosecond) 303 | return models.RPCBlock{}, someRPCError 304 | }, 305 | CloseFunc: func() error { 306 | return nil 307 | }, 308 | } 309 | // Swap these to see logs 310 | // logOutput := os.Stderr 311 | logOutput := io.Discard 312 | ing := ingester.New( 313 | slog.New(slog.NewTextHandler(logOutput, nil)), 314 | rpcClient, 315 | duneapi, 316 | duneapi, 317 | ingester.Config{ 318 | MaxConcurrentBlocks: 10, 319 | DLQMaxConcurrentBlocks: 2, 320 | BlockSubmitInterval: time.Millisecond, 321 | SkipFailedBlocks: false, 322 | }, 323 | nil, // progress 324 | dlq.NewDLQ[int64](), 325 | ) 326 | 327 | err := ing.Run(ctx, 1, -1) // run until canceled 328 | require.ErrorIs(t, err, someRPCError) 329 | } 330 | 331 | // TestRunRPCNodeFails shows that we crash if the RPC client fails to fetch a block 332 | func TestRunFailsIfNoConcurrentRequests(t *testing.T) { 333 | t.Cleanup(resetDefaultRegistry) 334 | logOutput := io.Discard 335 | ing := ingester.New( 336 | slog.New(slog.NewTextHandler(logOutput, nil)), 337 | nil, 338 | nil, 339 | nil, 340 | ingester.Config{ 341 | MaxConcurrentBlocks: 0, 342 | }, 343 | nil, // progress 344 | dlq.NewDLQ[int64](), 345 | ) 346 | 347 | err := ing.Run(context.Background(), 1, -1) // run until canceled 348 | require.ErrorContains(t, err, "MaxConcurrentRequests must be > 0") 349 | } 350 | 351 | func TestRunFailsIfNoConcurrentRequestsDLQ(t *testing.T) { 352 | t.Cleanup(resetDefaultRegistry) 353 | logOutput := io.Discard 354 | ing := ingester.New( 355 | slog.New(slog.NewTextHandler(logOutput, nil)), 356 | nil, 357 | nil, 358 | nil, 359 | ingester.Config{ 360 | MaxConcurrentBlocks: 10, 361 | DLQMaxConcurrentBlocks: 0, 362 | }, 363 | nil, // progress 364 | dlq.NewDLQ[int64](), 365 | ) 366 | 367 | err := ing.Run(context.Background(), 1, -1) // run until canceled 368 | require.ErrorContains(t, err, "MaxConcurrentRequestsDLQ must be > 0") 369 | } 370 | 371 | func TestRunWithDLQ(t *testing.T) { 372 | t.Cleanup(resetDefaultRegistry) 373 | ctx, cancel := context.WithCancel(context.Background()) 374 | maxBlockNumber := int64(1000) 375 | startBlockNumber := int64(10) 376 | 377 | // Initial DLQ 378 | dlqBlockNumbers := dlq.NewDLQWithDelay[int64](dlq.RetryDelayLinear(time.Duration(10) * time.Millisecond)) 379 | gaps := []models.BlockGap{ 380 | { 381 | FirstMissing: 9, 382 | LastMissing: 9, 383 | }, { 384 | FirstMissing: 3, 385 | LastMissing: 7, 386 | }, { 387 | FirstMissing: 0, 388 | LastMissing: 0, 389 | }, 390 | } 391 | ingester.AddBlockGaps(dlqBlockNumbers, gaps) 392 | 393 | // blockNumber int64 -> timesSubmitted int 394 | var blocksIndexed sync.Map 395 | // Prepopulate expected blocks 396 | for i := int64(0); i < maxBlockNumber; i++ { 397 | blocksIndexed.Store(i, 0) 398 | } 399 | // Add those that aren't considered as previous gaps 400 | incrementAndGet(&blocksIndexed, int64(1)) 401 | incrementAndGet(&blocksIndexed, int64(2)) 402 | incrementAndGet(&blocksIndexed, int64(8)) 403 | 404 | // Dune API Mocking 405 | var sendBlocksRequests sync.Map 406 | duneapi := &duneapi_mock.BlockchainIngesterMock{ 407 | SendBlocksFunc: func(_ context.Context, blocks []models.RPCBlock) error { 408 | if len(blocks) == 0 { 409 | return nil 410 | } 411 | // Count Requests by block number 412 | for _, block := range blocks { 413 | incrementAndGet(&sendBlocksRequests, block.BlockNumber) 414 | } 415 | 416 | // Fail if this batch contains a block number that hasn't been requested at least twice before this call 417 | for _, block := range blocks { 418 | requests, _ := sendBlocksRequests.Load(block.BlockNumber) 419 | if requests.(int) <= 2 { 420 | return errors.Errorf("failing batch due to %v having only been requested %v times", 421 | block.BlockNumber, requests) 422 | } 423 | } 424 | 425 | // Count blocks as indexed by block number 426 | for _, block := range blocks { 427 | incrementAndGet(&blocksIndexed, block.BlockNumber) 428 | } 429 | 430 | // Look for gaps 431 | if !duneStoreContainsGaps(&blocksIndexed, maxBlockNumber) { 432 | // cancel execution when we have sent all blocks 433 | cancel() 434 | return context.Canceled 435 | } 436 | 437 | return nil 438 | }, 439 | PostProgressReportFunc: func(_ context.Context, _ models.BlockchainIndexProgress) error { 440 | return nil 441 | }, 442 | } 443 | 444 | // RPC Mocking 445 | var rpcBlocksRequests sync.Map 446 | rpcClient := &jsonrpc_mock.BlockchainClientMock{ 447 | LatestBlockNumberFunc: func() (int64, error) { 448 | return maxBlockNumber + 1, nil 449 | }, 450 | BlockByNumberFunc: func(_ context.Context, blockNumber int64) (models.RPCBlock, error) { 451 | incrementAndGet(&rpcBlocksRequests, blockNumber) 452 | 453 | // Fail every 10th block numbers the first 2 times 454 | if blockNumber%10 == 0 { 455 | requests, _ := rpcBlocksRequests.Load(blockNumber) 456 | if requests.(int) <= 2 { 457 | return models.RPCBlock{}, 458 | errors.Errorf("failing rpc request due to %v having only been requested %v times", 459 | blockNumber, requests) 460 | } 461 | } 462 | 463 | return models.RPCBlock{ 464 | BlockNumber: blockNumber, 465 | Payload: []byte(`block`), 466 | }, nil 467 | }, 468 | CloseFunc: func() error { 469 | return nil 470 | }, 471 | } 472 | // Swap these to see logs 473 | // logOutput := os.Stderr 474 | logOutput := io.Discard 475 | ing := ingester.New( 476 | slog.New(slog.NewTextHandler(logOutput, nil)), 477 | rpcClient, 478 | duneapi, 479 | duneapi, 480 | ingester.Config{ 481 | BlockSubmitInterval: time.Nanosecond, 482 | MaxConcurrentBlocks: 10, 483 | DLQMaxConcurrentBlocks: 1, 484 | DLQOnly: false, 485 | SkipFailedBlocks: true, 486 | }, 487 | nil, // progress 488 | dlqBlockNumbers, 489 | ) 490 | 491 | err := ing.Run(ctx, startBlockNumber, -1) // run until canceled 492 | require.False(t, duneStoreContainsGaps(&blocksIndexed, maxBlockNumber)) 493 | require.GreaterOrEqual(t, lenSyncMap(&blocksIndexed), int(maxBlockNumber)) 494 | require.ErrorIs(t, err, context.Canceled) // this is expected 495 | } 496 | 497 | func duneStoreContainsGaps(blocksIndexed *sync.Map, maxBlockNumber int64) bool { 498 | containsGap := false 499 | blocksIndexed.Range(func(key, value any) bool { 500 | blockNumber := key.(int64) 501 | count := value.(int) 502 | if blockNumber <= maxBlockNumber && count < 1 { 503 | containsGap = true 504 | return false 505 | } 506 | return true 507 | }) 508 | return containsGap 509 | } 510 | 511 | func incrementAndGet(m *sync.Map, key interface{}) int { 512 | for { 513 | // Load the current value associated with the key else initialise 514 | currentValue, _ := m.LoadOrStore(key, 0) 515 | 516 | // Increment the current value. 517 | newValue := currentValue.(int) + 1 518 | 519 | // Attempt to store the new value back into the sync.Map. Compare-and-swap (CAS) approach ensures atomicity. 520 | if m.CompareAndSwap(key, currentValue, newValue) { 521 | // If the swap succeeded, return the new value. 522 | return newValue 523 | } 524 | // If the swap failed, it means the value was updated by another goroutine. Retry the operation. 525 | } 526 | } 527 | 528 | func lenSyncMap(m *sync.Map) int { 529 | length := 0 530 | m.Range(func(_, _ interface{}) bool { 531 | length++ 532 | return true 533 | }) 534 | return length 535 | } 536 | 537 | // resetDefaultRegistry resets the default Prometheus registry. 538 | func resetDefaultRegistry() { 539 | registry := prometheus.NewRegistry() 540 | prometheus.DefaultRegisterer = registry 541 | prometheus.DefaultGatherer = registry 542 | } 543 | -------------------------------------------------------------------------------- /ingester/metrics.go: -------------------------------------------------------------------------------- 1 | package ingester 2 | 3 | import ( 4 | "sync/atomic" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promauto" 8 | ) 9 | 10 | func registerIngesterMetrics(i *ingester) { 11 | registerLatestBlockNumberGauge(func() int64 { 12 | return atomic.LoadInt64(&i.info.LatestBlockNumber) 13 | }) 14 | registerIngestedBlockNumberGauge(func() int64 { 15 | return atomic.LoadInt64(&i.info.IngestedBlockNumber) 16 | }) 17 | registerDlqSizeGauge(func() int { 18 | return i.dlq.Size() 19 | }) 20 | } 21 | 22 | func registerLatestBlockNumberGauge(function func() int64) { 23 | promauto.NewGaugeFunc(prometheus.GaugeOpts{ 24 | Namespace: "node_indexer", 25 | Name: "latest_block_number", 26 | Help: "The latest known block number for the chain", 27 | }, func() float64 { 28 | return float64(function()) 29 | }) 30 | } 31 | 32 | func registerIngestedBlockNumberGauge(function func() int64) { 33 | promauto.NewGaugeFunc(prometheus.GaugeOpts{ 34 | Namespace: "node_indexer", 35 | Name: "ingested_block_number", 36 | Help: "The highest block number ingested so far", 37 | }, func() float64 { 38 | return float64(function()) 39 | }) 40 | } 41 | 42 | func registerDlqSizeGauge(function func() int) { 43 | promauto.NewGaugeFunc(prometheus.GaugeOpts{ 44 | Namespace: "node_indexer", 45 | Name: "dlq_size", 46 | Help: "The number of blocks in the DLQ", 47 | }, func() float64 { 48 | return float64(function()) 49 | }) 50 | } 51 | -------------------------------------------------------------------------------- /ingester/models.go: -------------------------------------------------------------------------------- 1 | package ingester 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/duneanalytics/blockchain-ingester/models" 7 | ) 8 | 9 | type Info struct { 10 | BlockchainName string 11 | Stack string 12 | LatestBlockNumber int64 13 | IngestedBlockNumber int64 14 | ConsumedBlockNumber int64 15 | Errors ErrorState 16 | Since time.Time 17 | } 18 | 19 | func NewInfo(blockchain string, stack string) Info { 20 | return Info{ 21 | BlockchainName: blockchain, 22 | Stack: stack, 23 | Errors: ErrorState{ 24 | RPCErrors: make([]ErrorInfo, 0, 100), 25 | DuneErrors: make([]ErrorInfo, 0, 100), 26 | RPCErrorCount: 0, 27 | DuneErrorCount: 0, 28 | }, 29 | Since: time.Now(), 30 | } 31 | } 32 | 33 | func (info *Info) ToProgressReport() models.BlockchainIndexProgress { 34 | return models.BlockchainIndexProgress{ 35 | BlockchainName: info.BlockchainName, 36 | EVMStack: info.Stack, 37 | LastIngestedBlockNumber: info.IngestedBlockNumber, 38 | LatestBlockNumber: info.LatestBlockNumber, 39 | Errors: info.ProgressReportErrors(), 40 | DuneErrorCounts: info.Errors.DuneErrorCount, 41 | RPCErrorCounts: info.Errors.RPCErrorCount, 42 | Since: info.Since, 43 | } 44 | } 45 | 46 | func (info *Info) ResetErrors() { 47 | info.Since = time.Now() 48 | info.Errors.Reset() 49 | } 50 | 51 | type ErrorState struct { 52 | RPCErrors []ErrorInfo 53 | DuneErrors []ErrorInfo 54 | RPCErrorCount int 55 | DuneErrorCount int 56 | } 57 | 58 | // ProgressReportErrors returns a combined list of errors from RPC requests and Dune requests 59 | func (info Info) ProgressReportErrors() []models.BlockchainIndexError { 60 | errors := make([]models.BlockchainIndexError, 0, len(info.Errors.RPCErrors)+len(info.Errors.DuneErrors)) 61 | for _, e := range info.Errors.RPCErrors { 62 | errors = append(errors, models.BlockchainIndexError{ 63 | Timestamp: e.Timestamp, 64 | BlockNumbers: e.BlockNumbers, 65 | Error: e.Error.Error(), 66 | Source: "rpc", 67 | }) 68 | } 69 | for _, e := range info.Errors.DuneErrors { 70 | errors = append(errors, models.BlockchainIndexError{ 71 | Timestamp: e.Timestamp, 72 | BlockNumbers: e.BlockNumbers, 73 | Error: e.Error.Error(), 74 | Source: "dune", 75 | }) 76 | } 77 | return errors 78 | } 79 | 80 | func (es *ErrorState) Reset() { 81 | es.RPCErrors = es.RPCErrors[:0] 82 | es.DuneErrors = es.DuneErrors[:0] 83 | es.RPCErrorCount = 0 84 | es.DuneErrorCount = 0 85 | } 86 | 87 | func (es *ErrorState) ObserveRPCError(err ErrorInfo) { 88 | es.RPCErrorCount++ 89 | err.Timestamp = time.Now() 90 | 91 | // If we have filled the slice, remove the oldest error 92 | if len(es.RPCErrors) == cap(es.RPCErrors) { 93 | tmp := make([]ErrorInfo, len(es.RPCErrors)-1, cap(es.RPCErrors)) 94 | copy(tmp, es.RPCErrors[1:]) 95 | es.RPCErrors = tmp 96 | } 97 | es.RPCErrors = append(es.RPCErrors, err) 98 | } 99 | 100 | func (es *ErrorState) ObserveDuneError(err ErrorInfo) { 101 | es.DuneErrorCount++ 102 | err.Timestamp = time.Now() 103 | 104 | // If we have filled the slice, remove the oldest error 105 | if len(es.DuneErrors) == cap(es.DuneErrors) { 106 | tmp := make([]ErrorInfo, len(es.DuneErrors)-1, cap(es.DuneErrors)) 107 | copy(tmp, es.DuneErrors[1:]) 108 | es.DuneErrors = tmp 109 | } 110 | es.DuneErrors = append(es.DuneErrors, err) 111 | } 112 | 113 | type ErrorInfo struct { 114 | Timestamp time.Time 115 | BlockNumbers string 116 | Error error 117 | } 118 | -------------------------------------------------------------------------------- /ingester/models_test.go: -------------------------------------------------------------------------------- 1 | package ingester_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/duneanalytics/blockchain-ingester/ingester" 7 | "github.com/go-errors/errors" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | // TestInfoErrors ensures that we never allow the error slices to grow indefinitely 12 | func TestInfoErrors(t *testing.T) { 13 | info := ingester.NewInfo("test", "test") 14 | for j := 0; j < 2; j++ { 15 | for i := 0; i < 200; i++ { 16 | require.Len(t, info.Errors.RPCErrors, min(i, 100)) 17 | require.Len(t, info.Errors.DuneErrors, min(i, 100)) 18 | info.Errors.ObserveDuneError(ingester.ErrorInfo{}) 19 | info.Errors.ObserveRPCError(ingester.ErrorInfo{}) 20 | require.Equal(t, 100, cap(info.Errors.RPCErrors)) 21 | require.Equal(t, 100, cap(info.Errors.DuneErrors)) 22 | } 23 | info.ResetErrors() 24 | require.Len(t, info.Errors.RPCErrors, 0) 25 | require.Len(t, info.Errors.DuneErrors, 0) 26 | require.Equal(t, 100, cap(info.Errors.RPCErrors)) 27 | require.Equal(t, 100, cap(info.Errors.DuneErrors)) 28 | } 29 | } 30 | 31 | func TestProgressReportErrors(t *testing.T) { 32 | info := ingester.NewInfo("test", "test") 33 | info.Errors.ObserveDuneError(ingester.ErrorInfo{Error: errors.New("foo")}) 34 | info.Errors.ObserveRPCError(ingester.ErrorInfo{Error: errors.New("bar")}) 35 | errors := info.ProgressReportErrors() 36 | require.Len(t, errors, 2) 37 | } 38 | 39 | func TestInfoToProgressReport(t *testing.T) { 40 | info := ingester.NewInfo("test", "test") 41 | info.IngestedBlockNumber = 1 42 | info.LatestBlockNumber = 2 43 | info.Errors.ObserveDuneError(ingester.ErrorInfo{Error: errors.New("foo")}) 44 | report := info.ToProgressReport() 45 | require.Equal(t, "test", report.BlockchainName) 46 | require.Equal(t, "test", report.EVMStack) 47 | require.Equal(t, int64(1), report.LastIngestedBlockNumber) 48 | require.Equal(t, int64(2), report.LatestBlockNumber) 49 | require.Len(t, report.Errors, 1) 50 | require.Equal(t, 1, report.DuneErrorCounts) 51 | require.Equal(t, 0, report.RPCErrorCounts) 52 | } 53 | -------------------------------------------------------------------------------- /ingester/send.go: -------------------------------------------------------------------------------- 1 | package ingester 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "sync/atomic" 8 | "time" 9 | 10 | "github.com/duneanalytics/blockchain-ingester/models" 11 | "github.com/go-errors/errors" 12 | ) 13 | 14 | const maxBatchSize = 256 15 | 16 | // SendBlocks to Dune. We receive blocks from the FetchBlockLoop goroutines, potentially out of order. 17 | // We buffer the blocks in a map until we have no gaps, so that we can send them in order to Dune. 18 | func (i *ingester) SendBlocks(ctx context.Context, blocks <-chan models.RPCBlock, startBlockNumber int64) error { 19 | // Buffer for temporarily storing blocks that have arrived out of order 20 | collectedBlocks := make(map[int64]models.RPCBlock, i.cfg.MaxBatchSize) 21 | nextNumberToSend := startBlockNumber 22 | batchTimer := time.NewTicker(i.cfg.BlockSubmitInterval) 23 | defer batchTimer.Stop() 24 | 25 | i.log.Debug("SendBlocks: Starting to receive blocks") 26 | for { 27 | // Either receive a block, send blocks, or shut down (if the context is done, or the channel is closed). 28 | select { 29 | case <-ctx.Done(): 30 | i.log.Debug("SendBlocks: Context canceled, stopping") 31 | return ctx.Err() 32 | case block, ok := <-blocks: 33 | if !ok { 34 | i.log.Debug("SendBlocks: Channel is closed, returning") 35 | return nil 36 | } 37 | 38 | if block.Errored() { 39 | i.info.Errors.ObserveRPCError(ErrorInfo{ 40 | BlockNumbers: fmt.Sprintf("%d", block.BlockNumber), 41 | Error: block.Error, 42 | }) 43 | 44 | i.log.Error("Received FAILED block", "number", block.BlockNumber) 45 | } 46 | 47 | collectedBlocks[block.BlockNumber] = block 48 | i.log.Debug( 49 | "SendBlocks: Received block", 50 | "blockNumber", block.BlockNumber, 51 | "bufferSize", len(collectedBlocks), 52 | ) 53 | case <-batchTimer.C: 54 | var err error 55 | nextNumberToSend, err = i.trySendCompletedBlocks(ctx, collectedBlocks, nextNumberToSend) 56 | if err != nil { 57 | return errors.Errorf("send blocks: %w", err) 58 | } 59 | } 60 | } 61 | } 62 | 63 | // trySendCompletedBlocks sends all blocks that can be sent in order from the blockMap. 64 | // Once we have sent all blocks, if any, we return with the nextNumberToSend. 65 | // We return the next numberToSend such that the caller can continue from there. 66 | func (i *ingester) trySendCompletedBlocks( 67 | ctx context.Context, 68 | collectedBlocks map[int64]models.RPCBlock, 69 | nextBlockToSend int64, 70 | ) (int64, error) { 71 | for { 72 | nextBlock, err := i.trySendBlockBatch(ctx, collectedBlocks, nextBlockToSend, i.cfg.MaxBatchSize) 73 | if err != nil || nextBlock == nextBlockToSend { 74 | return nextBlock, err 75 | } 76 | nextBlockToSend = nextBlock 77 | } 78 | } 79 | 80 | func (i *ingester) trySendBlockBatch( 81 | ctx context.Context, 82 | collectedBlocks map[int64]models.RPCBlock, 83 | nextBlockToSend int64, 84 | maxBatchSize int, 85 | ) (int64, error) { 86 | startTime := time.Now() 87 | 88 | // Collect a blocks of blocks to send, only send those which are in order 89 | // Collect a batch to send, only send those which are in order 90 | blockBatch := make([]models.RPCBlock, 0, maxBatchSize) 91 | for block, ok := collectedBlocks[nextBlockToSend]; ok; block, ok = collectedBlocks[nextBlockToSend] { 92 | // Skip Failed block if we're configured to skip Failed blocks 93 | if i.cfg.SkipFailedBlocks && block.Errored() { 94 | i.log.Error("SendBlocks: RPCBlock has an error, requeueing...", "block", block.BlockNumber, "error", block.Error) 95 | i.dlq.AddItemHighPriority(block.BlockNumber) 96 | delete(collectedBlocks, nextBlockToSend) 97 | nextBlockToSend++ 98 | continue 99 | } 100 | 101 | blockBatch = append(blockBatch, block) 102 | delete(collectedBlocks, nextBlockToSend) 103 | nextBlockToSend++ 104 | 105 | if len(blockBatch) == maxBatchSize { 106 | break 107 | } 108 | } 109 | 110 | if len(blockBatch) == 0 { 111 | return nextBlockToSend, nil 112 | } 113 | 114 | // Send the batch 115 | lastBlockNumber := blockBatch[len(blockBatch)-1].BlockNumber 116 | if !i.cfg.SkipFailedBlocks && lastBlockNumber != nextBlockToSend-1 { 117 | panic("unexpected last block number") 118 | } 119 | if err := i.dune.SendBlocks(ctx, blockBatch); err != nil { 120 | if errors.Is(err, context.Canceled) { 121 | i.log.Info("SendBlocks: Context canceled, stopping") 122 | return nextBlockToSend, nil 123 | } 124 | 125 | i.log.Error("SendBlocks: Failed to send batch, requeueing...", 126 | "firstBlockInBatch", blockBatch[0].BlockNumber, 127 | "lastBlockInBatch", lastBlockNumber, "error", err) 128 | blockNumbers := make([]string, len(blockBatch)) 129 | for n, block := range blockBatch { 130 | i.dlq.AddItemHighPriority(block.BlockNumber) 131 | blockNumbers[n] = fmt.Sprintf("%d", block.BlockNumber) 132 | } 133 | 134 | i.info.Errors.ObserveDuneError(ErrorInfo{ 135 | Error: err, 136 | BlockNumbers: strings.Join(blockNumbers, ","), 137 | }) 138 | 139 | if i.cfg.SkipFailedBlocks { 140 | i.log.Error("SendBlocks: Failed to send batch, continuing", "error", err) 141 | return nextBlockToSend, nil 142 | } 143 | 144 | err := errors.Errorf("failed to send batch: %w", err) 145 | i.log.Error("SendBlocks: Failed to send batch, exiting", "error", err) 146 | return nextBlockToSend, err 147 | } 148 | atomic.StoreInt64(&i.info.IngestedBlockNumber, lastBlockNumber) 149 | i.log.Info( 150 | "Sent blocks to DuneAPI", 151 | "batchSize", len(blockBatch), 152 | "nextBlockToSend", nextBlockToSend, 153 | "elapsed", time.Since(startTime), 154 | ) 155 | return nextBlockToSend, nil 156 | } 157 | -------------------------------------------------------------------------------- /lib/dlq/dlq.go: -------------------------------------------------------------------------------- 1 | package dlq 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | pq "github.com/emirpasic/gods/queues/priorityqueue" 8 | "github.com/emirpasic/gods/utils" 9 | ) 10 | 11 | type DLQ[T any] struct { 12 | priorityQueue pq.Queue // structure not thread safe 13 | mutex sync.Mutex 14 | retryDelay RetryStrategy 15 | } 16 | 17 | // RetryStrategy takes the number of retries and returns a Duration. 18 | // This allows the caller to implement any strategy they like, be it constant, linear, exponential, etc. 19 | type RetryStrategy func(retries int) time.Duration 20 | 21 | // Item This is generic so that metadata about retries can be maintained in an envelope during processing for when 22 | // an item needs to make its way back onto the DLQ later 23 | type Item[T any] struct { 24 | Value T 25 | Retries int 26 | nextRunTime time.Time 27 | } 28 | 29 | func MapItem[T, U any](b Item[T], mapper func(T) U) Item[U] { 30 | return Item[U]{ 31 | Value: mapper(b.Value), 32 | Retries: b.Retries, 33 | nextRunTime: b.nextRunTime, 34 | } 35 | } 36 | 37 | func NewDLQ[T any]() *DLQ[T] { 38 | return NewDLQWithDelay[T](RetryDelayLinear(time.Minute)) 39 | } 40 | 41 | func RetryDelayLinear(backoff time.Duration) RetryStrategy { 42 | return func(retries int) time.Duration { 43 | return time.Duration(retries) * backoff // retries must be converted to a Duration for multiplication 44 | } 45 | } 46 | 47 | func NewDLQWithDelay[T any](retryDelay func(retries int) time.Duration) *DLQ[T] { 48 | return &DLQ[T]{priorityQueue: *pq.NewWith(byNextRunTime[T]), retryDelay: retryDelay} 49 | } 50 | 51 | // Comparator function (sort by nextRunTime in ascending order) 52 | func byNextRunTime[T any](a, b interface{}) int { 53 | return utils.TimeComparator(a.(Item[T]).nextRunTime, b.(Item[T]).nextRunTime) 54 | } 55 | 56 | func (dlq *DLQ[T]) AddItem(item T, retries int) { 57 | nextRunTime := time.Now().Add(dlq.retryDelay(retries)) 58 | 59 | dlq.mutex.Lock() 60 | defer dlq.mutex.Unlock() 61 | 62 | dlq.priorityQueue.Enqueue(Item[T]{Value: item, Retries: retries + 1, nextRunTime: nextRunTime}) 63 | } 64 | 65 | func (dlq *DLQ[T]) AddItemHighPriority(item T) { 66 | dlq.mutex.Lock() 67 | defer dlq.mutex.Unlock() 68 | 69 | dlq.priorityQueue.Enqueue(Item[T]{Value: item, Retries: 0, nextRunTime: time.Time{}}) 70 | } 71 | 72 | func (dlq *DLQ[T]) GetNextItem() (value *Item[T], ok bool) { 73 | dlq.mutex.Lock() 74 | defer dlq.mutex.Unlock() 75 | 76 | peek, ok := dlq.priorityQueue.Peek() 77 | if !ok || peek.(Item[T]).nextRunTime.After(time.Now()) { 78 | return nil, false 79 | } 80 | 81 | item, ok := dlq.priorityQueue.Dequeue() 82 | if ok { 83 | itemCasted := item.(Item[T]) 84 | return &itemCasted, ok 85 | } 86 | return nil, ok 87 | } 88 | 89 | func (dlq *DLQ[T]) Size() int { 90 | dlq.mutex.Lock() 91 | defer dlq.mutex.Unlock() 92 | 93 | return dlq.priorityQueue.Size() 94 | } 95 | -------------------------------------------------------------------------------- /lib/hexutils/numbers.go: -------------------------------------------------------------------------------- 1 | package hexutils 2 | 3 | import ( 4 | "math/big" 5 | "strconv" 6 | 7 | "github.com/go-errors/errors" 8 | ) 9 | 10 | func IntFromHex(hexNumber string) (int64, error) { 11 | // Empty string is OK 12 | if len(hexNumber) == 0 { 13 | return 0, nil 14 | } 15 | if len(hexNumber) < 2 || hexNumber[:2] != "0x" { 16 | return 0, errors.Errorf("couldn't parse '%s' as number, must start with '0x'", hexNumber) 17 | } 18 | n, err := strconv.ParseInt(hexNumber[2:], 16, 64) 19 | if err != nil { 20 | return 0, errors.Errorf("failed to parse '%s' as int: %w", hexNumber, err) 21 | } 22 | return n, nil 23 | } 24 | 25 | func BigIntFromHex(hexNumber string) (string, error) { 26 | // Empty string is OK 27 | if len(hexNumber) == 0 { 28 | return "", nil 29 | } 30 | if len(hexNumber) < 2 || hexNumber[:2] != "0x" { 31 | return "", errors.Errorf("couldn't parse '%s' as number, must start with '0x'", hexNumber) 32 | } 33 | n := &big.Int{} 34 | if _, ok := n.SetString(hexNumber[2:], 16); !ok { 35 | return "", errors.Errorf("failed to parse '%s' as number", hexNumber) 36 | } 37 | return n.Text(10), nil 38 | } 39 | -------------------------------------------------------------------------------- /mocks/duneapi/client.go: -------------------------------------------------------------------------------- 1 | // Code generated by moq; DO NOT EDIT. 2 | // github.com/matryer/moq 3 | 4 | package duneapi_mock 5 | 6 | import ( 7 | "context" 8 | "github.com/duneanalytics/blockchain-ingester/client/duneapi" 9 | "github.com/duneanalytics/blockchain-ingester/models" 10 | "sync" 11 | ) 12 | 13 | // Ensure, that BlockchainIngesterMock does implement duneapi.BlockchainIngester. 14 | // If this is not the case, regenerate this file with moq. 15 | var _ duneapi.BlockchainIngester = &BlockchainIngesterMock{} 16 | 17 | // BlockchainIngesterMock is a mock implementation of duneapi.BlockchainIngester. 18 | // 19 | // func TestSomethingThatUsesBlockchainIngester(t *testing.T) { 20 | // 21 | // // make and configure a mocked duneapi.BlockchainIngester 22 | // mockedBlockchainIngester := &BlockchainIngesterMock{ 23 | // GetBlockGapsFunc: func(ctx context.Context) (*models.BlockchainGaps, error) { 24 | // panic("mock out the GetBlockGaps method") 25 | // }, 26 | // GetProgressReportFunc: func(ctx context.Context) (*models.BlockchainIndexProgress, error) { 27 | // panic("mock out the GetProgressReport method") 28 | // }, 29 | // PostProgressReportFunc: func(ctx context.Context, progress models.BlockchainIndexProgress) error { 30 | // panic("mock out the PostProgressReport method") 31 | // }, 32 | // SendBlocksFunc: func(ctx context.Context, payloads []models.RPCBlock) error { 33 | // panic("mock out the SendBlocks method") 34 | // }, 35 | // } 36 | // 37 | // // use mockedBlockchainIngester in code that requires duneapi.BlockchainIngester 38 | // // and then make assertions. 39 | // 40 | // } 41 | type BlockchainIngesterMock struct { 42 | // GetBlockGapsFunc mocks the GetBlockGaps method. 43 | GetBlockGapsFunc func(ctx context.Context) (*models.BlockchainGaps, error) 44 | 45 | // GetProgressReportFunc mocks the GetProgressReport method. 46 | GetProgressReportFunc func(ctx context.Context) (*models.BlockchainIndexProgress, error) 47 | 48 | // PostProgressReportFunc mocks the PostProgressReport method. 49 | PostProgressReportFunc func(ctx context.Context, progress models.BlockchainIndexProgress) error 50 | 51 | // SendBlocksFunc mocks the SendBlocks method. 52 | SendBlocksFunc func(ctx context.Context, payloads []models.RPCBlock) error 53 | 54 | // calls tracks calls to the methods. 55 | calls struct { 56 | // GetBlockGaps holds details about calls to the GetBlockGaps method. 57 | GetBlockGaps []struct { 58 | // Ctx is the ctx argument value. 59 | Ctx context.Context 60 | } 61 | // GetProgressReport holds details about calls to the GetProgressReport method. 62 | GetProgressReport []struct { 63 | // Ctx is the ctx argument value. 64 | Ctx context.Context 65 | } 66 | // PostProgressReport holds details about calls to the PostProgressReport method. 67 | PostProgressReport []struct { 68 | // Ctx is the ctx argument value. 69 | Ctx context.Context 70 | // Progress is the progress argument value. 71 | Progress models.BlockchainIndexProgress 72 | } 73 | // SendBlocks holds details about calls to the SendBlocks method. 74 | SendBlocks []struct { 75 | // Ctx is the ctx argument value. 76 | Ctx context.Context 77 | // Payloads is the payloads argument value. 78 | Payloads []models.RPCBlock 79 | } 80 | } 81 | lockGetBlockGaps sync.RWMutex 82 | lockGetProgressReport sync.RWMutex 83 | lockPostProgressReport sync.RWMutex 84 | lockSendBlocks sync.RWMutex 85 | } 86 | 87 | // GetBlockGaps calls GetBlockGapsFunc. 88 | func (mock *BlockchainIngesterMock) GetBlockGaps(ctx context.Context) (*models.BlockchainGaps, error) { 89 | if mock.GetBlockGapsFunc == nil { 90 | panic("BlockchainIngesterMock.GetBlockGapsFunc: method is nil but BlockchainIngester.GetBlockGaps was just called") 91 | } 92 | callInfo := struct { 93 | Ctx context.Context 94 | }{ 95 | Ctx: ctx, 96 | } 97 | mock.lockGetBlockGaps.Lock() 98 | mock.calls.GetBlockGaps = append(mock.calls.GetBlockGaps, callInfo) 99 | mock.lockGetBlockGaps.Unlock() 100 | return mock.GetBlockGapsFunc(ctx) 101 | } 102 | 103 | // GetBlockGapsCalls gets all the calls that were made to GetBlockGaps. 104 | // Check the length with: 105 | // 106 | // len(mockedBlockchainIngester.GetBlockGapsCalls()) 107 | func (mock *BlockchainIngesterMock) GetBlockGapsCalls() []struct { 108 | Ctx context.Context 109 | } { 110 | var calls []struct { 111 | Ctx context.Context 112 | } 113 | mock.lockGetBlockGaps.RLock() 114 | calls = mock.calls.GetBlockGaps 115 | mock.lockGetBlockGaps.RUnlock() 116 | return calls 117 | } 118 | 119 | // GetProgressReport calls GetProgressReportFunc. 120 | func (mock *BlockchainIngesterMock) GetProgressReport(ctx context.Context) (*models.BlockchainIndexProgress, error) { 121 | if mock.GetProgressReportFunc == nil { 122 | panic("BlockchainIngesterMock.GetProgressReportFunc: method is nil but BlockchainIngester.GetProgressReport was just called") 123 | } 124 | callInfo := struct { 125 | Ctx context.Context 126 | }{ 127 | Ctx: ctx, 128 | } 129 | mock.lockGetProgressReport.Lock() 130 | mock.calls.GetProgressReport = append(mock.calls.GetProgressReport, callInfo) 131 | mock.lockGetProgressReport.Unlock() 132 | return mock.GetProgressReportFunc(ctx) 133 | } 134 | 135 | // GetProgressReportCalls gets all the calls that were made to GetProgressReport. 136 | // Check the length with: 137 | // 138 | // len(mockedBlockchainIngester.GetProgressReportCalls()) 139 | func (mock *BlockchainIngesterMock) GetProgressReportCalls() []struct { 140 | Ctx context.Context 141 | } { 142 | var calls []struct { 143 | Ctx context.Context 144 | } 145 | mock.lockGetProgressReport.RLock() 146 | calls = mock.calls.GetProgressReport 147 | mock.lockGetProgressReport.RUnlock() 148 | return calls 149 | } 150 | 151 | // PostProgressReport calls PostProgressReportFunc. 152 | func (mock *BlockchainIngesterMock) PostProgressReport(ctx context.Context, progress models.BlockchainIndexProgress) error { 153 | if mock.PostProgressReportFunc == nil { 154 | panic("BlockchainIngesterMock.PostProgressReportFunc: method is nil but BlockchainIngester.PostProgressReport was just called") 155 | } 156 | callInfo := struct { 157 | Ctx context.Context 158 | Progress models.BlockchainIndexProgress 159 | }{ 160 | Ctx: ctx, 161 | Progress: progress, 162 | } 163 | mock.lockPostProgressReport.Lock() 164 | mock.calls.PostProgressReport = append(mock.calls.PostProgressReport, callInfo) 165 | mock.lockPostProgressReport.Unlock() 166 | return mock.PostProgressReportFunc(ctx, progress) 167 | } 168 | 169 | // PostProgressReportCalls gets all the calls that were made to PostProgressReport. 170 | // Check the length with: 171 | // 172 | // len(mockedBlockchainIngester.PostProgressReportCalls()) 173 | func (mock *BlockchainIngesterMock) PostProgressReportCalls() []struct { 174 | Ctx context.Context 175 | Progress models.BlockchainIndexProgress 176 | } { 177 | var calls []struct { 178 | Ctx context.Context 179 | Progress models.BlockchainIndexProgress 180 | } 181 | mock.lockPostProgressReport.RLock() 182 | calls = mock.calls.PostProgressReport 183 | mock.lockPostProgressReport.RUnlock() 184 | return calls 185 | } 186 | 187 | // SendBlocks calls SendBlocksFunc. 188 | func (mock *BlockchainIngesterMock) SendBlocks(ctx context.Context, payloads []models.RPCBlock) error { 189 | if mock.SendBlocksFunc == nil { 190 | panic("BlockchainIngesterMock.SendBlocksFunc: method is nil but BlockchainIngester.SendBlocks was just called") 191 | } 192 | callInfo := struct { 193 | Ctx context.Context 194 | Payloads []models.RPCBlock 195 | }{ 196 | Ctx: ctx, 197 | Payloads: payloads, 198 | } 199 | mock.lockSendBlocks.Lock() 200 | mock.calls.SendBlocks = append(mock.calls.SendBlocks, callInfo) 201 | mock.lockSendBlocks.Unlock() 202 | return mock.SendBlocksFunc(ctx, payloads) 203 | } 204 | 205 | // SendBlocksCalls gets all the calls that were made to SendBlocks. 206 | // Check the length with: 207 | // 208 | // len(mockedBlockchainIngester.SendBlocksCalls()) 209 | func (mock *BlockchainIngesterMock) SendBlocksCalls() []struct { 210 | Ctx context.Context 211 | Payloads []models.RPCBlock 212 | } { 213 | var calls []struct { 214 | Ctx context.Context 215 | Payloads []models.RPCBlock 216 | } 217 | mock.lockSendBlocks.RLock() 218 | calls = mock.calls.SendBlocks 219 | mock.lockSendBlocks.RUnlock() 220 | return calls 221 | } 222 | -------------------------------------------------------------------------------- /mocks/jsonrpc/httpclient.go: -------------------------------------------------------------------------------- 1 | // Code generated by moq; DO NOT EDIT. 2 | // github.com/matryer/moq 3 | 4 | package jsonrpc_mock 5 | 6 | import ( 7 | "github.com/duneanalytics/blockchain-ingester/client/jsonrpc" 8 | "github.com/hashicorp/go-retryablehttp" 9 | "net/http" 10 | "sync" 11 | ) 12 | 13 | // Ensure, that HTTPClientMock does implement jsonrpc.HTTPClient. 14 | // If this is not the case, regenerate this file with moq. 15 | var _ jsonrpc.HTTPClient = &HTTPClientMock{} 16 | 17 | // HTTPClientMock is a mock implementation of jsonrpc.HTTPClient. 18 | // 19 | // func TestSomethingThatUsesHTTPClient(t *testing.T) { 20 | // 21 | // // make and configure a mocked jsonrpc.HTTPClient 22 | // mockedHTTPClient := &HTTPClientMock{ 23 | // DoFunc: func(req *retryablehttp.Request) (*http.Response, error) { 24 | // panic("mock out the Do method") 25 | // }, 26 | // } 27 | // 28 | // // use mockedHTTPClient in code that requires jsonrpc.HTTPClient 29 | // // and then make assertions. 30 | // 31 | // } 32 | type HTTPClientMock struct { 33 | // DoFunc mocks the Do method. 34 | DoFunc func(req *retryablehttp.Request) (*http.Response, error) 35 | 36 | // calls tracks calls to the methods. 37 | calls struct { 38 | // Do holds details about calls to the Do method. 39 | Do []struct { 40 | // Req is the req argument value. 41 | Req *retryablehttp.Request 42 | } 43 | } 44 | lockDo sync.RWMutex 45 | } 46 | 47 | // Do calls DoFunc. 48 | func (mock *HTTPClientMock) Do(req *retryablehttp.Request) (*http.Response, error) { 49 | if mock.DoFunc == nil { 50 | panic("HTTPClientMock.DoFunc: method is nil but HTTPClient.Do was just called") 51 | } 52 | callInfo := struct { 53 | Req *retryablehttp.Request 54 | }{ 55 | Req: req, 56 | } 57 | mock.lockDo.Lock() 58 | mock.calls.Do = append(mock.calls.Do, callInfo) 59 | mock.lockDo.Unlock() 60 | return mock.DoFunc(req) 61 | } 62 | 63 | // DoCalls gets all the calls that were made to Do. 64 | // Check the length with: 65 | // 66 | // len(mockedHTTPClient.DoCalls()) 67 | func (mock *HTTPClientMock) DoCalls() []struct { 68 | Req *retryablehttp.Request 69 | } { 70 | var calls []struct { 71 | Req *retryablehttp.Request 72 | } 73 | mock.lockDo.RLock() 74 | calls = mock.calls.Do 75 | mock.lockDo.RUnlock() 76 | return calls 77 | } 78 | -------------------------------------------------------------------------------- /mocks/jsonrpc/rpcnode.go: -------------------------------------------------------------------------------- 1 | // Code generated by moq; DO NOT EDIT. 2 | // github.com/matryer/moq 3 | 4 | package jsonrpc_mock 5 | 6 | import ( 7 | "context" 8 | "github.com/duneanalytics/blockchain-ingester/client/jsonrpc" 9 | "github.com/duneanalytics/blockchain-ingester/models" 10 | "sync" 11 | ) 12 | 13 | // Ensure, that BlockchainClientMock does implement jsonrpc.BlockchainClient. 14 | // If this is not the case, regenerate this file with moq. 15 | var _ jsonrpc.BlockchainClient = &BlockchainClientMock{} 16 | 17 | // BlockchainClientMock is a mock implementation of jsonrpc.BlockchainClient. 18 | // 19 | // func TestSomethingThatUsesBlockchainClient(t *testing.T) { 20 | // 21 | // // make and configure a mocked jsonrpc.BlockchainClient 22 | // mockedBlockchainClient := &BlockchainClientMock{ 23 | // BlockByNumberFunc: func(ctx context.Context, blockNumber int64) (models.RPCBlock, error) { 24 | // panic("mock out the BlockByNumber method") 25 | // }, 26 | // CloseFunc: func() error { 27 | // panic("mock out the Close method") 28 | // }, 29 | // LatestBlockNumberFunc: func() (int64, error) { 30 | // panic("mock out the LatestBlockNumber method") 31 | // }, 32 | // } 33 | // 34 | // // use mockedBlockchainClient in code that requires jsonrpc.BlockchainClient 35 | // // and then make assertions. 36 | // 37 | // } 38 | type BlockchainClientMock struct { 39 | // BlockByNumberFunc mocks the BlockByNumber method. 40 | BlockByNumberFunc func(ctx context.Context, blockNumber int64) (models.RPCBlock, error) 41 | 42 | // CloseFunc mocks the Close method. 43 | CloseFunc func() error 44 | 45 | // LatestBlockNumberFunc mocks the LatestBlockNumber method. 46 | LatestBlockNumberFunc func() (int64, error) 47 | 48 | // calls tracks calls to the methods. 49 | calls struct { 50 | // BlockByNumber holds details about calls to the BlockByNumber method. 51 | BlockByNumber []struct { 52 | // Ctx is the ctx argument value. 53 | Ctx context.Context 54 | // BlockNumber is the blockNumber argument value. 55 | BlockNumber int64 56 | } 57 | // Close holds details about calls to the Close method. 58 | Close []struct { 59 | } 60 | // LatestBlockNumber holds details about calls to the LatestBlockNumber method. 61 | LatestBlockNumber []struct { 62 | } 63 | } 64 | lockBlockByNumber sync.RWMutex 65 | lockClose sync.RWMutex 66 | lockLatestBlockNumber sync.RWMutex 67 | } 68 | 69 | // BlockByNumber calls BlockByNumberFunc. 70 | func (mock *BlockchainClientMock) BlockByNumber(ctx context.Context, blockNumber int64) (models.RPCBlock, error) { 71 | if mock.BlockByNumberFunc == nil { 72 | panic("BlockchainClientMock.BlockByNumberFunc: method is nil but BlockchainClient.BlockByNumber was just called") 73 | } 74 | callInfo := struct { 75 | Ctx context.Context 76 | BlockNumber int64 77 | }{ 78 | Ctx: ctx, 79 | BlockNumber: blockNumber, 80 | } 81 | mock.lockBlockByNumber.Lock() 82 | mock.calls.BlockByNumber = append(mock.calls.BlockByNumber, callInfo) 83 | mock.lockBlockByNumber.Unlock() 84 | return mock.BlockByNumberFunc(ctx, blockNumber) 85 | } 86 | 87 | // BlockByNumberCalls gets all the calls that were made to BlockByNumber. 88 | // Check the length with: 89 | // 90 | // len(mockedBlockchainClient.BlockByNumberCalls()) 91 | func (mock *BlockchainClientMock) BlockByNumberCalls() []struct { 92 | Ctx context.Context 93 | BlockNumber int64 94 | } { 95 | var calls []struct { 96 | Ctx context.Context 97 | BlockNumber int64 98 | } 99 | mock.lockBlockByNumber.RLock() 100 | calls = mock.calls.BlockByNumber 101 | mock.lockBlockByNumber.RUnlock() 102 | return calls 103 | } 104 | 105 | // Close calls CloseFunc. 106 | func (mock *BlockchainClientMock) Close() error { 107 | if mock.CloseFunc == nil { 108 | panic("BlockchainClientMock.CloseFunc: method is nil but BlockchainClient.Close was just called") 109 | } 110 | callInfo := struct { 111 | }{} 112 | mock.lockClose.Lock() 113 | mock.calls.Close = append(mock.calls.Close, callInfo) 114 | mock.lockClose.Unlock() 115 | return mock.CloseFunc() 116 | } 117 | 118 | // CloseCalls gets all the calls that were made to Close. 119 | // Check the length with: 120 | // 121 | // len(mockedBlockchainClient.CloseCalls()) 122 | func (mock *BlockchainClientMock) CloseCalls() []struct { 123 | } { 124 | var calls []struct { 125 | } 126 | mock.lockClose.RLock() 127 | calls = mock.calls.Close 128 | mock.lockClose.RUnlock() 129 | return calls 130 | } 131 | 132 | // LatestBlockNumber calls LatestBlockNumberFunc. 133 | func (mock *BlockchainClientMock) LatestBlockNumber() (int64, error) { 134 | if mock.LatestBlockNumberFunc == nil { 135 | panic("BlockchainClientMock.LatestBlockNumberFunc: method is nil but BlockchainClient.LatestBlockNumber was just called") 136 | } 137 | callInfo := struct { 138 | }{} 139 | mock.lockLatestBlockNumber.Lock() 140 | mock.calls.LatestBlockNumber = append(mock.calls.LatestBlockNumber, callInfo) 141 | mock.lockLatestBlockNumber.Unlock() 142 | return mock.LatestBlockNumberFunc() 143 | } 144 | 145 | // LatestBlockNumberCalls gets all the calls that were made to LatestBlockNumber. 146 | // Check the length with: 147 | // 148 | // len(mockedBlockchainClient.LatestBlockNumberCalls()) 149 | func (mock *BlockchainClientMock) LatestBlockNumberCalls() []struct { 150 | } { 151 | var calls []struct { 152 | } 153 | mock.lockLatestBlockNumber.RLock() 154 | calls = mock.calls.LatestBlockNumber 155 | mock.lockLatestBlockNumber.RUnlock() 156 | return calls 157 | } 158 | -------------------------------------------------------------------------------- /models/block.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type RPCBlock struct { 4 | BlockNumber int64 5 | // agnostic blob of data that is the block 6 | Payload []byte 7 | // optional field, if we fail to collect the block data 8 | Error error 9 | } 10 | 11 | func (b RPCBlock) Errored() bool { 12 | return b.Error != nil 13 | } 14 | -------------------------------------------------------------------------------- /models/evm.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type EVMStack string 4 | 5 | const ( 6 | OpStack EVMStack = "opstack" 7 | ArbitrumNitro EVMStack = "arbitrum-nitro" 8 | ) 9 | 10 | func (e EVMStack) String() string { 11 | return string(e) 12 | } 13 | -------------------------------------------------------------------------------- /models/gaps.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type BlockchainGaps struct { 4 | Gaps []BlockGap 5 | } 6 | 7 | type BlockGap struct { 8 | FirstMissing int64 9 | LastMissing int64 10 | } 11 | -------------------------------------------------------------------------------- /models/progress.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type BlockchainIndexProgress struct { 8 | BlockchainName string 9 | EVMStack string 10 | LastIngestedBlockNumber int64 11 | LatestBlockNumber int64 12 | Errors []BlockchainIndexError 13 | DuneErrorCounts int 14 | RPCErrorCounts int 15 | Since time.Time 16 | } 17 | 18 | type BlockchainIndexError struct { 19 | Timestamp time.Time 20 | BlockNumbers string 21 | Error string 22 | Source string 23 | } 24 | --------------------------------------------------------------------------------