├── testdata ├── block-0000000-0009999.header └── block-0010000-0019999.header ├── README.md ├── .gitignore ├── scripts └── install_bitcoind.sh ├── .golangci.yml ├── LICENSE ├── Dockerfile ├── .github └── workflows │ ├── docker.yml │ └── main.yml ├── height-to-hash-cache_test.go ├── models.go ├── Makefile ├── cfilter-files_test.go ├── height-to-hash-cache.go ├── models_test.go ├── header-files_test.go ├── server.go ├── main.go ├── cfilter-files.go ├── header-files.go ├── index.html ├── silent-payments-files_test.go ├── silent-payments-files.go ├── handlers.go ├── go.mod └── main_test.go /testdata/block-0000000-0009999.header: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guggero/block-dn/HEAD/testdata/block-0000000-0009999.header -------------------------------------------------------------------------------- /testdata/block-0010000-0019999.header: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guggero/block-dn/HEAD/testdata/block-0010000-0019999.header -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # block-dn 2 | 3 | Process BIP-157/158 cfilters and Bitcoin blocks for distribution over a CDN. 4 | 5 | Read the API documentation at [block-dn.org](https://block-dn.org). 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | .unit-test-logs 15 | 16 | block-dn.log 17 | /block-dn 18 | -------------------------------------------------------------------------------- /scripts/install_bitcoind.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ev 4 | 5 | BITCOIND_VERSION=$1 6 | 7 | # Useful for testing RCs: e.g. TAG_SUFFIX=.0rc1, DIR_SUFFIX=.0rc1 8 | TAG_SUFFIX= 9 | DIR_SUFFIX=.0 10 | 11 | # Useful for testing against an image pushed to a different Docker repo. 12 | REPO=lightninglabs/bitcoin-core 13 | 14 | if [ -z "$BITCOIND_VERSION" ]; then 15 | echo "Must specify a version of bitcoind to install." 16 | echo "Usage: install_bitcoind.sh " 17 | exit 1 18 | fi 19 | 20 | docker pull ${REPO}:${BITCOIND_VERSION}${TAG_SUFFIX} 21 | CONTAINER_ID=$(docker create ${REPO}:${BITCOIND_VERSION}${TAG_SUFFIX}) 22 | sudo docker cp $CONTAINER_ID:/opt/bitcoin-${BITCOIND_VERSION}${DIR_SUFFIX}/bin/bitcoind /usr/local/bin/bitcoind 23 | docker rm $CONTAINER_ID 24 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | run: 4 | timeout: 5m 5 | go: "1.24" 6 | 7 | linters: 8 | default: all 9 | disable: 10 | - gochecknoglobals 11 | - gosec 12 | - funlen 13 | - varnamelen 14 | - wrapcheck 15 | - testpackage 16 | - exhaustruct 17 | - forbidigo 18 | - gocognit 19 | - nestif 20 | - wsl 21 | - cyclop 22 | - gocyclo 23 | - nlreturn 24 | - paralleltest 25 | - ireturn 26 | - maintidx 27 | - noctx 28 | - exhaustive 29 | - depguard 30 | - err113 31 | - mnd 32 | - perfsprint 33 | - godox 34 | - gomoddirectives 35 | - wsl_v5 36 | - noinlineerr 37 | - revive 38 | - thelper 39 | - dupl 40 | - nolintlint 41 | 42 | settings: 43 | govet: 44 | disable: 45 | # Don't report about shadowed variables. 46 | - shadow 47 | whitespace: 48 | multi-func: true 49 | multi-if: true 50 | tagliatelle: 51 | case: 52 | rules: 53 | json: snake 54 | staticcheck: 55 | checks: [ "-SA1019" ] 56 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Oliver Gugger 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24.10-alpine as builder 2 | 3 | # Force Go to use the cgo based DNS resolver. This is required to ensure DNS 4 | # queries required to connect to linked containers succeed. 5 | ENV GODEBUG netdns=cgo 6 | 7 | # Pass a tag, branch or a commit using build-arg. This allows a docker image to 8 | # be built from a specified Git state. The default image will use the Git tip of 9 | # master by default. 10 | ARG checkout="master" 11 | ARG git_url="https://github.com/guggero/block-dn" 12 | 13 | # Install dependencies and build the binaries. 14 | RUN apk add --no-cache --update alpine-sdk \ 15 | git \ 16 | make \ 17 | && git clone $git_url /go/src/github.com/guggero/block-dn \ 18 | && cd /go/src/github.com/guggero/block-dn \ 19 | && git checkout $checkout \ 20 | && make install 21 | 22 | # Start a new, final image. 23 | FROM alpine as final 24 | 25 | # Define a root volume for data persistence. 26 | VOLUME /root/.block-dn 27 | 28 | # Add utilities for quality of life and SSL-related reasons. We also require 29 | # curl and gpg for the signature verification script. 30 | RUN apk --no-cache add \ 31 | bash \ 32 | jq \ 33 | ca-certificates \ 34 | gnupg \ 35 | curl 36 | 37 | # Copy the binaries from the builder image. 38 | COPY --from=builder /go/bin/block-dn /bin/ 39 | 40 | EXPOSE 8080 41 | 42 | # Specify the start command and entrypoint as the block-dn daemon. 43 | ENTRYPOINT ["block-dn"] -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker image build 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | defaults: 9 | run: 10 | shell: bash 11 | 12 | env: 13 | DOCKER_REPO: guggero 14 | DOCKER_IMAGE: block-dn 15 | 16 | jobs: 17 | main: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Set up QEMU 21 | uses: lightninglabs/gh-actions/setup-qemu-action@2021.01.25.00 22 | 23 | - name: Set up Docker Buildx 24 | uses: lightninglabs/gh-actions/setup-buildx-action@2021.01.25.00 25 | 26 | - name: Login to DockerHub 27 | uses: lightninglabs/gh-actions/login-action@2021.01.25.00 28 | with: 29 | username: ${{ secrets.DOCKER_USERNAME }} 30 | password: ${{ secrets.DOCKER_API_KEY }} 31 | 32 | - name: Set env 33 | run: | 34 | echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV 35 | echo "IMAGE_TAG=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV 36 | 37 | - name: Build and push 38 | id: docker_build 39 | uses: lightninglabs/gh-actions/build-push-action@2021.01.25.00 40 | with: 41 | push: true 42 | platforms: linux/amd64,linux/arm64 43 | tags: "${{ env.DOCKER_REPO }}/${{ env.DOCKER_IMAGE }}:${{ env.IMAGE_TAG }},${{ env.DOCKER_REPO }}/${{ env.DOCKER_IMAGE }}:latest" 44 | build-args: checkout=${{ env.RELEASE_VERSION }} 45 | 46 | - name: Image digest 47 | run: echo ${{ steps.docker_build.outputs.digest }} 48 | -------------------------------------------------------------------------------- /height-to-hash-cache_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | var ( 10 | expectedH2H = map[int32]string{ 11 | 0: "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3" + 12 | "f1b60a8ce26f", 13 | 9_999: "00000000fbc97cc6c599ce9c24dd4a2243e2bfd518eda56e1d5e" + 14 | "47d29e29c3a7", 15 | 10_000: "0000000099c744455f58e6c6e98b671e1bf7f37346bfd4cf5d02" + 16 | "74ad8ee660cb", 17 | 19_999: "00000000ba36eb929dc90170a96ee3efb76cbebee0e0e5c4da9e" + 18 | "b0b6e74d9124", 19 | 20_000: "", 20 | } 21 | ) 22 | 23 | func TestHeightToHashCache(t *testing.T) { 24 | testDir := t.TempDir() 25 | backend, _, _, cleanup := newBitcoind(t, testDir, []string{ 26 | "-regtest", 27 | "-disablewallet", 28 | }) 29 | 30 | t.Cleanup(func() { 31 | require.NoError(t, cleanup()) 32 | }) 33 | 34 | setupLogging(unitTestDir, "debug") 35 | c := newH2HCache(backend) 36 | 37 | bestHeight, err := c.loadFromHeaders("testdata") 38 | require.NoError(t, err) 39 | 40 | require.Equal(t, 19_999, int(bestHeight)) 41 | 42 | for height, expectedHashStr := range expectedH2H { 43 | hash, ok := c.heightToHash[height] 44 | 45 | if expectedHashStr == "" { 46 | require.False( 47 | t, ok, "expected no hash for height %d", height, 48 | ) 49 | 50 | _, err := c.getBlockHash(height) 51 | require.ErrorContains( 52 | t, err, "error fetching block hash", 53 | ) 54 | 55 | continue 56 | } 57 | 58 | require.Equal(t, expectedHashStr, hash.String()) 59 | 60 | h, err := c.getBlockHash(height) 61 | require.NoError(t, err) 62 | require.Equal(t, expectedHashStr, h.String()) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | pull_request: 8 | branches: 9 | - "*" 10 | 11 | concurrency: 12 | # Cancel any previous workflows if they are from a PR or push. 13 | group: ${{ github.event.pull_request.number || github.ref }} 14 | cancel-in-progress: true 15 | 16 | defaults: 17 | run: 18 | shell: bash 19 | 20 | env: 21 | GO_VERSION: 1.24.10 22 | LINT_VERSION: v2.6.2 23 | BITCOIND_VERSION: "30" 24 | 25 | jobs: 26 | ######################## 27 | # lint code 28 | ######################## 29 | lint: 30 | name: lint code 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: git checkout 34 | uses: actions/checkout@v5 35 | with: 36 | fetch-depth: 0 37 | 38 | - name: setup go ${{ env.GO_VERSION }} 39 | uses: actions/setup-go@v5 40 | with: 41 | go-version: '${{ env.GO_VERSION }}' 42 | 43 | - name: check code format 44 | run: make fmt-check 45 | 46 | - name: golangci-lint 47 | uses: golangci/golangci-lint-action@v9 48 | with: 49 | version: '${{ env.LINT_VERSION }}' 50 | 51 | ######################## 52 | # run unit tests 53 | ######################## 54 | unit-test: 55 | name: run unit tests 56 | runs-on: ubuntu-latest 57 | steps: 58 | - name: git checkout 59 | uses: actions/checkout@v5 60 | 61 | - name: setup go ${{ env.GO_VERSION }} 62 | uses: actions/setup-go@v5 63 | with: 64 | go-version: '${{ env.GO_VERSION }}' 65 | 66 | - name: install bitcoin core 67 | run: ./scripts/install_bitcoind.sh ${{ env.BITCOIND_VERSION }} 68 | 69 | - name: run unit tests 70 | run: make unit 71 | -------------------------------------------------------------------------------- /models.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | type Status struct { 6 | ChainGenesisHash string `json:"chain_genesis_hash"` 7 | ChainName string `json:"chain_name"` 8 | BestBlockHeight int32 `json:"best_block_height"` 9 | BestBlockHash string `json:"best_block_hash"` 10 | BestFilterHeader string `json:"best_filter_header"` 11 | BestFilterHeight int32 `json:"best_filter_height"` 12 | BestSPTweakHeight int32 `json:"best_sptweak_height"` 13 | AllFilesSynced bool `json:"all_files_synced"` 14 | EntriesPerHeaderFile int32 `json:"entries_per_header_file"` 15 | EntriesPerFilterFile int32 `json:"entries_per_filter_file"` 16 | EntriesPerSPTweakFile int32 `json:"entries_per_sptweak_file"` 17 | } 18 | 19 | type SPTweakBlock map[int32]string 20 | 21 | type SPTweakFile struct { 22 | StartHeight int32 `json:"start_height"` 23 | NumBlocks int32 `json:"num_blocks"` 24 | Blocks []SPTweakBlock `json:"blocks"` 25 | } 26 | 27 | func (f *SPTweakFile) TweakAtHeight(height int32) (SPTweakBlock, error) { 28 | var empty SPTweakBlock 29 | if height < 0 { 30 | return empty, fmt.Errorf("height must be non-negative") 31 | } 32 | 33 | if height < f.StartHeight { 34 | return empty, fmt.Errorf("height %d out of range (%d to %d)", 35 | height, f.StartHeight, f.StartHeight+f.NumBlocks-1) 36 | } 37 | 38 | if height >= f.StartHeight+f.NumBlocks { 39 | return empty, fmt.Errorf("height %d out of range (%d to %d)", 40 | height, f.StartHeight, f.StartHeight+f.NumBlocks-1) 41 | } 42 | 43 | if int(f.NumBlocks) != len(f.Blocks) { 44 | return empty, fmt.Errorf("internal error: NumBlocks %d does "+ 45 | "not match length of Blocks slice %d", f.NumBlocks, 46 | len(f.Blocks)) 47 | } 48 | 49 | blockIndex := height - f.StartHeight 50 | return f.Blocks[blockIndex], nil 51 | } 52 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PKG := github.com/guggero/block-dn 2 | 3 | GOTEST := go test -v 4 | 5 | GO_BIN := ${GOPATH}/bin 6 | 7 | GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") 8 | GOLIST := go list $(PKG)/... | grep -v '/vendor/' 9 | 10 | GOBUILD := go build -v 11 | GOINSTALL := go install -v 12 | GOTEST := go test -v 13 | XARGS := xargs -L 1 14 | 15 | VERSION_TAG = $(shell git describe --tags) 16 | VERSION_CHECK = @$(call print, "Building master with date version tag") 17 | 18 | BUILD_SYSTEM = darwin-amd64 \ 19 | linux-386 \ 20 | linux-amd64 \ 21 | linux-armv6 \ 22 | linux-armv7 \ 23 | linux-arm64 \ 24 | windows-386 \ 25 | windows-amd64 \ 26 | windows-arm 27 | 28 | # By default we will build all systems. But with the 'sys' tag, a specific 29 | # system can be specified. This is useful to release for a subset of 30 | # systems/architectures. 31 | ifneq ($(sys),) 32 | BUILD_SYSTEM = $(sys) 33 | endif 34 | 35 | TEST_TAGS := bitcoind integration dev 36 | TEST_FLAGS = -test.timeout=20m -tags="$(TEST_TAGS)" 37 | 38 | UNIT := $(GOLIST) | $(XARGS) env $(GOTEST) $(TEST_FLAGS) 39 | LDFLAGS := -X main.Commit=$(shell git describe --tags) 40 | RELEASE_LDFLAGS := -s -w -buildid= $(LDFLAGS) 41 | 42 | GREEN := "\\033[0;32m" 43 | NC := "\\033[0m" 44 | define print 45 | echo $(GREEN)$1$(NC) 46 | endef 47 | 48 | default: build 49 | 50 | unit: 51 | @$(call print, "Running unit tests.") 52 | $(UNIT) 53 | 54 | build: 55 | @$(call print, "Building block-dn.") 56 | $(GOBUILD) -ldflags "$(LDFLAGS)" ./... 57 | 58 | install: 59 | @$(call print, "Installing block-dn.") 60 | $(GOINSTALL) -ldflags "$(LDFLAGS)" ./... 61 | 62 | fmt: 63 | @$(call print, "Fixing imports.") 64 | go tool gosimports -w $(GOFILES_NOVENDOR) 65 | @$(call print, "Formatting source.") 66 | gofmt -l -w -s $(GOFILES_NOVENDOR) 67 | 68 | fmt-check: fmt 69 | @$(call print, "Checking fmt results.") 70 | if test -n "$$(git status --porcelain)"; then echo "code not formatted correctly, please run `make fmt` again!"; git status; git diff; exit 1; fi 71 | 72 | lint: 73 | @$(call print, "Linting source.") 74 | go tool golangci-lint run -v $(LINT_WORKERS) 75 | 76 | docs: install 77 | @$(call print, "Rendering docs.") 78 | cfilter-cdn doc 79 | -------------------------------------------------------------------------------- /cfilter-files_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | const ( 14 | filtersPerFile = 100 15 | emptyFilterSize int64 = 5 16 | 17 | emptyFilterFileSize int64 = headersPerFile * emptyFilterSize 18 | ) 19 | 20 | func TestCFilterFilesUpdate(t *testing.T) { 21 | testDir := ".unit-test-logs" 22 | miner, backend, _, _ := setupBackend(t, testDir) 23 | 24 | // Mine initial blocks. The miner starts with 200 blocks already mined. 25 | _ = miner.MineEmptyBlocks(initialBlocks - int(totalStartupBlocks)) 26 | 27 | // Wait until the backend is fully synced to the miner. 28 | waitBackendSync(t, backend, miner) 29 | 30 | // First run: start from scratch. 31 | dataDir := t.TempDir() 32 | quit := make(chan struct{}) 33 | h2hCache := newH2HCache(backend) 34 | hf := newCFilterFiles( 35 | filtersPerFile, backend, quit, dataDir, &testParams, h2hCache, 36 | ) 37 | 38 | var wg sync.WaitGroup 39 | 40 | // Wait for the initial blocks to be written. 41 | waitForTargetHeight(t, &wg, hf, initialBlocks) 42 | 43 | // Check files. 44 | filterDir := filepath.Join(dataDir, FilterFileDir) 45 | files, err := os.ReadDir(filterDir) 46 | require.NoError(t, err) 47 | require.Len(t, files, 4) 48 | 49 | // Check file names and sizes. 50 | checkFilterFiles(t, filterDir, 0, 99) 51 | checkFilterFiles(t, filterDir, 100, 199) 52 | checkFilterFiles(t, filterDir, 200, 299) 53 | checkFilterFiles(t, filterDir, 300, 399) 54 | 55 | // Stop the service. 56 | close(quit) 57 | wg.Wait() 58 | 59 | // Second run: restart and continue. 60 | const finalBlocks = 550 61 | _ = miner.MineEmptyBlocks(finalBlocks - initialBlocks) 62 | 63 | // Wait until the backend is fully synced to the miner. 64 | waitBackendSync(t, backend, miner) 65 | 66 | quit = make(chan struct{}) 67 | hf = newCFilterFiles( 68 | filtersPerFile, backend, quit, dataDir, &testParams, h2hCache, 69 | ) 70 | 71 | // Wait for the final blocks to be written. 72 | waitForTargetHeight(t, &wg, hf, finalBlocks) 73 | 74 | // Check files again. 75 | files, err = os.ReadDir(filterDir) 76 | require.NoError(t, err) 77 | require.Len(t, files, 5) 78 | 79 | // Check new file names and sizes. 80 | checkFilterFiles(t, filterDir, 400, 499) 81 | 82 | // Stop the service. 83 | close(quit) 84 | wg.Wait() 85 | } 86 | 87 | func checkFilterFiles(t *testing.T, filterDir string, start, end int32) { 88 | checkFile( 89 | t, fmt.Sprintf(FilterFileNamePattern, filterDir, start, end), 90 | emptyFilterFileSize, 91 | ) 92 | } 93 | -------------------------------------------------------------------------------- /height-to-hash-cache.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "sync" 8 | "sync/atomic" 9 | 10 | "github.com/btcsuite/btcd/chaincfg/chainhash" 11 | "github.com/btcsuite/btcd/rpcclient" 12 | "github.com/btcsuite/btcd/wire" 13 | ) 14 | 15 | // heightToHashCache is an in-memory cache of height to block hash. 16 | type heightToHashCache struct { 17 | sync.RWMutex 18 | 19 | client *rpcclient.Client 20 | heightToHash map[int32]chainhash.Hash 21 | bestHeight atomic.Int32 22 | } 23 | 24 | func newH2HCache(client *rpcclient.Client) *heightToHashCache { 25 | return &heightToHashCache{ 26 | client: client, 27 | heightToHash: make( 28 | map[int32]chainhash.Hash, DefaultHeightToHashCacheSize, 29 | ), 30 | } 31 | } 32 | 33 | // loadFromHeaders loads the height to block hash mapping from the header 34 | // files stored in headerDir. It returns the highest height loaded. 35 | func (c *heightToHashCache) loadFromHeaders(headerDir string) (int32, error) { 36 | c.Lock() 37 | defer c.Unlock() 38 | 39 | fileNames, err := listFiles(headerDir, HeaderFileSuffix) 40 | if err != nil { 41 | return 0, fmt.Errorf("unable to list header files: %w", err) 42 | } 43 | 44 | var ( 45 | height int32 46 | header wire.BlockHeader 47 | ) 48 | for _, fileName := range fileNames { 49 | log.Debugf("Loading height to hash cache from header file: %s", 50 | fileName) 51 | file, err := os.Open(fileName) 52 | if err != nil { 53 | return 0, fmt.Errorf("unable to open header file %s: "+ 54 | "%w", fileName, err) 55 | } 56 | 57 | outer: 58 | for { 59 | err := header.Deserialize(file) 60 | switch err { 61 | // No error, we read a header successfully. 62 | case nil: 63 | c.heightToHash[height] = header.BlockHash() 64 | height++ 65 | 66 | // EOF means we reached the end of the file. Break and 67 | // continue with the next one. 68 | // nolint:errorlint 69 | case io.EOF: 70 | _ = file.Close() 71 | break outer 72 | 73 | default: 74 | _ = file.Close() 75 | return 0, fmt.Errorf("unable to deserialize "+ 76 | "header at height %d from file %s: %w", 77 | c.bestHeight.Load(), fileName, err) 78 | } 79 | } 80 | } 81 | 82 | c.bestHeight.Store(height - 1) 83 | return c.bestHeight.Load(), nil 84 | } 85 | 86 | func (c *heightToHashCache) getBlockHash(h int32) (*chainhash.Hash, error) { 87 | c.RLock() 88 | defer c.RUnlock() 89 | 90 | hash, ok := c.heightToHash[h] 91 | if !ok { 92 | hash, err := c.client.GetBlockHash(int64(h)) 93 | if err != nil { 94 | return nil, fmt.Errorf("error fetching block hash "+ 95 | "for height %d from backend: %w", h, err) 96 | } 97 | 98 | // Cache the fetched hash. 99 | c.RUnlock() 100 | c.Lock() 101 | c.heightToHash[h] = *hash 102 | c.Unlock() 103 | c.RLock() 104 | 105 | return hash, nil 106 | } 107 | 108 | return &hash, nil 109 | } 110 | -------------------------------------------------------------------------------- /models_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | // TestSPTweakFileTweakAtHeight exercises happy and edge cases for SPTweakFile's 10 | // TweakAtHeight method. 11 | func TestSPTweakFileTweakAtHeight(t *testing.T) { 12 | tests := []struct { 13 | name string 14 | file SPTweakFile 15 | height int32 16 | want SPTweakBlock 17 | expectErr bool 18 | }{ 19 | { 20 | name: "negative height returns error", 21 | file: SPTweakFile{ 22 | StartHeight: 10, 23 | NumBlocks: 2, 24 | Blocks: []SPTweakBlock{ 25 | {10: "a"}, 26 | {11: "b"}, 27 | }, 28 | }, 29 | height: -1, 30 | expectErr: true, 31 | }, 32 | 33 | { 34 | name: "height before start errors", 35 | file: SPTweakFile{ 36 | StartHeight: 10, 37 | NumBlocks: 2, 38 | Blocks: []SPTweakBlock{ 39 | {10: "a"}, 40 | {11: "b"}, 41 | }, 42 | }, 43 | height: 9, 44 | expectErr: true, 45 | }, 46 | 47 | { 48 | name: "height equal start returns first block", 49 | file: SPTweakFile{ 50 | StartHeight: 10, 51 | NumBlocks: 3, 52 | Blocks: []SPTweakBlock{ 53 | {10: "aa"}, 54 | {11: "bb"}, 55 | {12: "cc"}, 56 | }, 57 | }, 58 | height: 10, 59 | want: SPTweakBlock{10: "aa"}, 60 | }, 61 | 62 | { 63 | name: "middle height returns middle block", 64 | file: SPTweakFile{ 65 | StartHeight: 10, 66 | NumBlocks: 3, 67 | Blocks: []SPTweakBlock{ 68 | {10: "aa"}, 69 | {11: "bb"}, 70 | {12: "cc"}, 71 | }, 72 | }, 73 | height: 11, 74 | want: SPTweakBlock{11: "bb"}, 75 | }, 76 | 77 | { 78 | name: "last valid height returns last block", 79 | file: SPTweakFile{ 80 | StartHeight: 100, 81 | NumBlocks: 2, 82 | Blocks: []SPTweakBlock{ 83 | {100: "x"}, 84 | {101: "y"}, 85 | }, 86 | }, 87 | height: 101, 88 | want: SPTweakBlock{101: "y"}, 89 | }, 90 | 91 | { 92 | name: "height equal start+num triggers out of range " + 93 | "error", 94 | file: SPTweakFile{ 95 | StartHeight: 50, 96 | NumBlocks: 2, 97 | Blocks: []SPTweakBlock{ 98 | {50: "m"}, 99 | {51: "n"}, 100 | }, 101 | }, 102 | height: 52, 103 | expectErr: true, 104 | }, 105 | 106 | { 107 | name: "blocks shorter than NumBlocks errors", 108 | file: SPTweakFile{ 109 | StartHeight: 0, 110 | NumBlocks: 2, 111 | Blocks: []SPTweakBlock{ 112 | {0: "only"}, 113 | }, 114 | }, 115 | height: 1, 116 | expectErr: true, 117 | }, 118 | } 119 | 120 | for _, tc := range tests { 121 | t.Run(tc.name, func(t *testing.T) { 122 | got, err := tc.file.TweakAtHeight(tc.height) 123 | if tc.expectErr { 124 | require.Error(t, err) 125 | 126 | return 127 | } 128 | 129 | require.NoError(t, err) 130 | require.Equal(t, tc.want, got) 131 | }) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /header-files_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/lightningnetwork/lnd/lntest/wait" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | const ( 15 | headersPerFile = 100 16 | initialBlocks = 450 17 | 18 | headerFileSize int64 = headersPerFile * headerSize 19 | filterFileSize int64 = headersPerFile * filterHeadersSize 20 | ) 21 | 22 | func TestHeaderFilesUpdate(t *testing.T) { 23 | miner, backend, _, _ := setupBackend(t, unitTestDir) 24 | 25 | // Mine initial blocks. The miner starts with 438 blocks already mined. 26 | _ = miner.MineEmptyBlocks(initialBlocks - int(totalStartupBlocks)) 27 | 28 | // Wait until the backend is fully synced to the miner. 29 | waitBackendSync(t, backend, miner) 30 | 31 | // First run: start from scratch. 32 | dataDir := t.TempDir() 33 | quit := make(chan struct{}) 34 | h2hCache := newH2HCache(backend) 35 | hf := newHeaderFiles( 36 | headersPerFile, backend, quit, dataDir, &testParams, h2hCache, 37 | ) 38 | 39 | var wg sync.WaitGroup 40 | 41 | // Wait for the initial blocks to be written. 42 | waitForTargetHeight(t, &wg, hf, initialBlocks) 43 | 44 | // Check files. 45 | headerDir := filepath.Join(dataDir, HeaderFileDir) 46 | files, err := os.ReadDir(headerDir) 47 | require.NoError(t, err) 48 | require.Len(t, files, 8) 49 | 50 | // Check file names and sizes. 51 | checkHeaderFiles(t, headerDir, 0, 99) 52 | checkHeaderFiles(t, headerDir, 100, 199) 53 | checkHeaderFiles(t, headerDir, 200, 299) 54 | checkHeaderFiles(t, headerDir, 300, 399) 55 | 56 | // Stop the service. 57 | close(quit) 58 | wg.Wait() 59 | 60 | // Second run: restart and continue. 61 | const finalBlocks = 550 62 | _ = miner.MineEmptyBlocks(finalBlocks - initialBlocks) 63 | 64 | // Wait until the backend is fully synced to the miner. 65 | waitBackendSync(t, backend, miner) 66 | 67 | quit = make(chan struct{}) 68 | hf = newHeaderFiles( 69 | headersPerFile, backend, quit, dataDir, &testParams, h2hCache, 70 | ) 71 | 72 | // Wait for the final blocks to be written. 73 | waitForTargetHeight(t, &wg, hf, finalBlocks) 74 | 75 | // Check files again. 76 | files, err = os.ReadDir(headerDir) 77 | require.NoError(t, err) 78 | require.Len(t, files, 10) 79 | 80 | // Check new file names and sizes. 81 | checkHeaderFiles(t, headerDir, 400, 499) 82 | 83 | // Stop the service. 84 | close(quit) 85 | wg.Wait() 86 | } 87 | 88 | type fileWriter interface { 89 | updateFiles(targetHeight int32) error 90 | getCurrentHeight() int32 91 | } 92 | 93 | func waitForTargetHeight(t *testing.T, wg *sync.WaitGroup, hf fileWriter, 94 | targetHeight int32) { 95 | 96 | t.Helper() 97 | 98 | wg.Add(1) 99 | go func() { 100 | defer wg.Done() 101 | err := hf.updateFiles(targetHeight) 102 | 103 | // nolint:testifylint 104 | require.ErrorIs(t, err, errServerShutdown) 105 | }() 106 | 107 | // Wait for sync. 108 | err := wait.NoError(func() error { 109 | if hf.getCurrentHeight() != targetHeight { 110 | return fmt.Errorf("not synced yet, current "+ 111 | "height %d", hf.getCurrentHeight()) 112 | } 113 | return nil 114 | }, syncTimeout) 115 | require.NoError(t, err) 116 | } 117 | 118 | func checkHeaderFiles(t *testing.T, headerDir string, start, end int32) { 119 | checkFile( 120 | t, fmt.Sprintf(HeaderFileNamePattern, headerDir, start, end), 121 | headerFileSize, 122 | ) 123 | checkFile(t, fmt.Sprintf( 124 | FilterHeaderFileNamePattern, headerDir, start, end, 125 | ), filterFileSize) 126 | } 127 | 128 | func checkFile(t *testing.T, fileName string, expectedSize int64) { 129 | info, err := os.Stat(fileName) 130 | require.NoError(t, err) 131 | require.Equal(t, expectedSize, info.Size()) 132 | } 133 | -------------------------------------------------------------------------------- /server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | "path/filepath" 9 | "sync" 10 | "time" 11 | 12 | "github.com/btcsuite/btcd/chaincfg" 13 | "github.com/btcsuite/btcd/rpcclient" 14 | "github.com/gorilla/mux" 15 | "github.com/lightningnetwork/lnd/fn/v2" 16 | ) 17 | 18 | var ( 19 | defaultTimeout = 5 * time.Second 20 | blockPollInterval = time.Second 21 | 22 | errServerShutdown = errors.New("server shutting down") 23 | ) 24 | 25 | type server struct { 26 | lightMode bool 27 | indexSPTweakData bool 28 | baseDir string 29 | listenAddr string 30 | chainCfg *rpcclient.ConnConfig 31 | chainParams *chaincfg.Params 32 | reOrgSafeDepth uint32 33 | chain *rpcclient.Client 34 | router *mux.Router 35 | httpServer *http.Server 36 | 37 | headersPerFile int32 38 | filtersPerFile int32 39 | spTweaksPerFile int32 40 | 41 | h2hCache *heightToHashCache 42 | headerFiles *headerFiles 43 | cFilterFiles *cFilterFiles 44 | spTweakFiles *spTweakFiles 45 | 46 | wg sync.WaitGroup 47 | errs *fn.ConcurrentQueue[error] 48 | quit chan struct{} 49 | } 50 | 51 | func newServer(lightMode, indexSPTweakData bool, baseDir, listenAddr string, 52 | chainCfg *rpcclient.ConnConfig, chainParams *chaincfg.Params, 53 | reOrgSafeDepth uint32, headersPerFile, filtersPerFile, 54 | spTweaksPerFile int32) *server { 55 | 56 | s := &server{ 57 | lightMode: lightMode, 58 | indexSPTweakData: indexSPTweakData, 59 | baseDir: baseDir, 60 | listenAddr: listenAddr, 61 | chainCfg: chainCfg, 62 | chainParams: chainParams, 63 | reOrgSafeDepth: reOrgSafeDepth, 64 | 65 | headersPerFile: headersPerFile, 66 | filtersPerFile: filtersPerFile, 67 | spTweaksPerFile: spTweaksPerFile, 68 | 69 | errs: fn.NewConcurrentQueue[error](2), 70 | quit: make(chan struct{}), 71 | } 72 | 73 | s.router = s.createRouter() 74 | 75 | return s 76 | } 77 | 78 | func (s *server) start() error { 79 | client, err := rpcclient.New(s.chainCfg, nil) 80 | if err != nil { 81 | return fmt.Errorf("error connecting to bitcoind: %w", err) 82 | } 83 | s.chain = client 84 | 85 | s.h2hCache = newH2HCache(client) 86 | 87 | s.headerFiles = newHeaderFiles( 88 | s.headersPerFile, s.chain, s.quit, s.baseDir, 89 | s.chainParams, s.h2hCache, 90 | ) 91 | s.cFilterFiles = newCFilterFiles( 92 | s.filtersPerFile, s.chain, s.quit, s.baseDir, 93 | s.chainParams, s.h2hCache, 94 | ) 95 | 96 | // We preload all the headers into the height to hash cache on startup. 97 | headersDir := filepath.Join(s.baseDir, HeaderFileDir) 98 | cacheBestHeight, err := s.h2hCache.loadFromHeaders(headersDir) 99 | if err != nil { 100 | return fmt.Errorf("error loading headers into cache: %w", err) 101 | } 102 | 103 | // We also want to verify the current height on startup, if there were 104 | // any headers loaded into the cache. 105 | if cacheBestHeight >= 0 { 106 | cacheBestBlock, err := s.h2hCache.getBlockHash(cacheBestHeight) 107 | if err != nil { 108 | return fmt.Errorf("error getting best block from "+ 109 | "cache: %w", err) 110 | } 111 | 112 | backendBlock, err := s.chain.GetBlockHash( 113 | int64(cacheBestHeight), 114 | ) 115 | if err != nil { 116 | return fmt.Errorf("error getting best block from "+ 117 | "backend: %w", err) 118 | } 119 | if *backendBlock != *cacheBestBlock { 120 | return fmt.Errorf("header mismatch at height %d: "+ 121 | "cache has %s, backend has %s", cacheBestHeight, 122 | cacheBestBlock, backendBlock.String()) 123 | } 124 | } 125 | 126 | info, err := s.chain.GetBlockChainInfo() 127 | if err != nil { 128 | return fmt.Errorf("error getting block chain info: %w", err) 129 | } 130 | 131 | log.Debugf("Backend best block hash: %s, height: %d", 132 | info.BestBlockHash, info.Blocks) 133 | 134 | s.httpServer = &http.Server{ 135 | Addr: s.listenAddr, 136 | Handler: s.router, 137 | WriteTimeout: defaultTimeout, 138 | ReadTimeout: defaultTimeout, 139 | } 140 | s.errs.Start() 141 | 142 | s.wg.Add(1) 143 | go func() { 144 | defer func() { 145 | s.wg.Done() 146 | log.Infof("Web server finished") 147 | }() 148 | 149 | log.Infof("Starting web server at %v", s.listenAddr) 150 | err := s.httpServer.ListenAndServe() 151 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 152 | log.Errorf("Error starting server: %v", err) 153 | s.errs.ChanIn() <- err 154 | } 155 | }() 156 | 157 | // If we're running in light mode, we don't need to create any files, 158 | // so we can just return here. 159 | if s.lightMode { 160 | return nil 161 | } 162 | 163 | s.wg.Add(1) 164 | go func() { 165 | defer func() { 166 | s.wg.Done() 167 | log.Infof("Background header file update finished") 168 | }() 169 | 170 | log.Infof("Starting background header file update") 171 | err := s.headerFiles.updateFiles(info.Blocks) 172 | if err != nil && !errors.Is(err, errServerShutdown) { 173 | log.Errorf("Error updating header files: %v", err) 174 | s.errs.ChanIn() <- err 175 | } 176 | }() 177 | 178 | s.wg.Add(1) 179 | go func() { 180 | defer func() { 181 | s.wg.Done() 182 | log.Infof("Background filter file update finished") 183 | }() 184 | 185 | log.Infof("Starting background filter file update") 186 | err := s.cFilterFiles.updateFiles(info.Blocks) 187 | if err != nil && !errors.Is(err, errServerShutdown) { 188 | log.Errorf("Error updating filter files: %v", err) 189 | s.errs.ChanIn() <- err 190 | } 191 | }() 192 | 193 | // If we're not indexing SP tweak data, we can return here. 194 | if !s.indexSPTweakData { 195 | return nil 196 | } 197 | 198 | s.spTweakFiles = newSPTweakFiles( 199 | s.spTweaksPerFile, s.chain, s.quit, s.baseDir, s.chainParams, 200 | s.h2hCache, 201 | ) 202 | 203 | s.wg.Add(1) 204 | go func() { 205 | defer func() { 206 | s.wg.Done() 207 | log.Infof("Background SP tweak data file update " + 208 | "finished") 209 | }() 210 | 211 | log.Infof("Starting background SP tweak data file update") 212 | err := s.spTweakFiles.updateFiles(info.Blocks) 213 | if err != nil && !errors.Is(err, errServerShutdown) { 214 | log.Errorf("Error updating SP tweak data file: %v", err) 215 | s.errs.ChanIn() <- err 216 | } 217 | }() 218 | 219 | return nil 220 | } 221 | 222 | func (s *server) stop() error { 223 | close(s.quit) 224 | 225 | log.Infof("Shutting down, waiting for background processes to finish") 226 | 227 | var stopErr error 228 | err := s.httpServer.Shutdown(context.Background()) 229 | if err != nil { 230 | log.Errorf("Error shutting down web server: %v", err) 231 | stopErr = fmt.Errorf("error shutting down web server: %w", err) 232 | } 233 | 234 | s.wg.Wait() 235 | s.errs.Stop() 236 | 237 | select { 238 | case err, ok := <-s.errs.ChanOut(): 239 | if ok { 240 | log.Errorf("Error shutting down: %v", err) 241 | stopErr = fmt.Errorf("error shutting down: %w", err) 242 | } 243 | 244 | default: 245 | } 246 | 247 | s.chain.Shutdown() 248 | 249 | log.Infof("Shutdown complete") 250 | 251 | return stopErr 252 | } 253 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/btcsuite/btcd/chaincfg" 9 | "github.com/btcsuite/btcd/rpcclient" 10 | "github.com/btcsuite/btclog/v2" 11 | "github.com/lightningnetwork/lnd/build" 12 | "github.com/lightningnetwork/lnd/signal" 13 | "github.com/spf13/cobra" 14 | ) 15 | 16 | const ( 17 | version = "1.2.3" 18 | 19 | defaultListenPort = 8080 20 | 21 | defaultMainnetReOrgSafeDepth = 6 22 | defaultTestnetReOrgSafeDepth = 100 23 | ) 24 | 25 | var ( 26 | // Commit will be injected at compile-time with the `-X` ldflag. 27 | Commit = "" 28 | 29 | logMgr *build.SubLoggerManager 30 | log btclog.Logger 31 | ) 32 | 33 | type mainCommand struct { 34 | testnet bool 35 | testnet4 bool 36 | regtest bool 37 | signet bool 38 | 39 | lightMode bool 40 | indexSPTweakData bool 41 | 42 | baseDir string 43 | logDir string 44 | logLevel string 45 | indexPage string 46 | 47 | listenAddr string 48 | 49 | reOrgSafeDepth uint32 50 | 51 | bitcoindConfig *rpcclient.ConnConfig 52 | cmd *cobra.Command 53 | } 54 | 55 | func main() { 56 | workDir, err := os.Getwd() 57 | if err != nil { 58 | fmt.Printf("Error: %v", err) 59 | _, _ = fmt.Fprintln(os.Stderr, err) 60 | os.Exit(1) 61 | } 62 | 63 | cc := &mainCommand{ 64 | listenAddr: fmt.Sprintf("localhost:%d", defaultListenPort), 65 | bitcoindConfig: &rpcclient.ConnConfig{ 66 | DisableTLS: true, 67 | HTTPPostMode: true, 68 | }, 69 | } 70 | cc.cmd = &cobra.Command{ 71 | Use: "block-dn", 72 | Short: "block-dn creates static files for serving compact " + 73 | "filters and blocks over HTTP", 74 | Long: ``, 75 | Version: fmt.Sprintf("v%s, commit %s", version, Commit), 76 | Run: func(_ *cobra.Command, _ []string) { 77 | chainParams := &chaincfg.MainNetParams 78 | headersPerFile := int32(DefaultHeadersPerFile) 79 | filtersPerFile := int32(DefaultFiltersPerFile) 80 | spTweaksPerFile := int32(DefaultSPTweaksPerFile) 81 | 82 | switch { 83 | case cc.testnet: 84 | chainParams = &chaincfg.TestNet3Params 85 | 86 | // The test networks are more prone to longer 87 | // re-orgs. 88 | if cc.reOrgSafeDepth == 89 | defaultMainnetReOrgSafeDepth { 90 | 91 | cc.reOrgSafeDepth = 92 | defaultTestnetReOrgSafeDepth 93 | } 94 | 95 | case cc.testnet4: 96 | chainParams = &chaincfg.TestNet4Params 97 | 98 | // The test networks are more prone to longer 99 | // re-orgs. 100 | if cc.reOrgSafeDepth == 101 | defaultMainnetReOrgSafeDepth { 102 | 103 | cc.reOrgSafeDepth = 104 | defaultTestnetReOrgSafeDepth 105 | } 106 | 107 | case cc.signet: 108 | chainParams = &chaincfg.SigNetParams 109 | 110 | case cc.regtest: 111 | chainParams = &chaincfg.RegressionNetParams 112 | 113 | headersPerFile = DefaultRegtestHeadersPerFile 114 | filtersPerFile = DefaultRegtestFiltersPerFile 115 | spTweaksPerFile = DefaultRegtestSPTweaksPerFile 116 | } 117 | 118 | setupLogging(cc.logDir, cc.logLevel) 119 | log.Infof("block-dn version v%s commit %s", version, 120 | Commit) 121 | 122 | if !cc.lightMode && cc.baseDir == "" { 123 | log.Errorf("Base directory must be set if " + 124 | "not running in light mode") 125 | return 126 | } 127 | 128 | if cc.indexPage != "" { 129 | pageContent, err := os.ReadFile(cc.indexPage) 130 | if err != nil { 131 | log.Errorf("Error reading index page "+ 132 | "file '%s': %v", cc.indexPage, 133 | err) 134 | return 135 | } 136 | 137 | indexHTML = string(pageContent) 138 | } 139 | 140 | server := newServer( 141 | cc.lightMode, cc.indexSPTweakData, cc.baseDir, 142 | cc.listenAddr, cc.bitcoindConfig, chainParams, 143 | cc.reOrgSafeDepth, headersPerFile, 144 | filtersPerFile, spTweaksPerFile, 145 | ) 146 | err := server.start() 147 | if err != nil { 148 | log.Errorf("Error starting server: %v", err) 149 | return 150 | } 151 | 152 | interceptor, err := signal.Intercept() 153 | if err != nil { 154 | log.Errorf("Error intercepting signals: %v", 155 | err) 156 | return 157 | } 158 | 159 | select { 160 | case <-interceptor.ShutdownChannel(): 161 | log.Infof("Received shutdown signal") 162 | 163 | case err := <-server.errs.ChanOut(): 164 | log.Errorf("Error running server: %v", err) 165 | } 166 | 167 | err = server.stop() 168 | if err != nil { 169 | log.Errorf("Error stopping server: %v", err) 170 | } 171 | }, 172 | DisableAutoGenTag: true, 173 | } 174 | cc.cmd.PersistentFlags().BoolVar( 175 | &cc.testnet, "testnet", false, "Indicates if testnet "+ 176 | "parameters should be used", 177 | ) 178 | cc.cmd.PersistentFlags().BoolVar( 179 | &cc.testnet4, "testnet4", false, "Indicates if testnet4 "+ 180 | "parameters should be used", 181 | ) 182 | cc.cmd.PersistentFlags().BoolVar( 183 | &cc.regtest, "regtest", false, "Indicates if regtest "+ 184 | "parameters should be used", 185 | ) 186 | cc.cmd.PersistentFlags().BoolVar( 187 | &cc.signet, "signet", false, "Indicates if signet "+ 188 | "parameters should be used", 189 | ) 190 | cc.cmd.PersistentFlags().BoolVar( 191 | &cc.lightMode, "light-mode", false, "Indicates if the "+ 192 | "server should run in light mode which creates no "+ 193 | "files on disk and therefore requires zero disk "+ 194 | "space; but only the status and block endpoints are "+ 195 | "available in this mode", 196 | ) 197 | cc.cmd.PersistentFlags().BoolVar( 198 | &cc.indexSPTweakData, "index-sp-tweak-data", false, 199 | "Indicates if the server should index BIP-0352 Silent "+ 200 | "Payments tweak data that allows light clients to "+ 201 | "scan the chain for inbound SP more efficiently; "+ 202 | "this requires every block since the activation of "+ 203 | "Taproot to be indexed which may take a while", 204 | ) 205 | cc.cmd.PersistentFlags().StringVar( 206 | &cc.baseDir, "base-dir", "", "The base directory "+ 207 | "where the generated files will be stored", 208 | ) 209 | cc.cmd.PersistentFlags().StringVar( 210 | &cc.logDir, "log-dir", workDir, "The log directory where the "+ 211 | "log file will be written", 212 | ) 213 | cc.cmd.PersistentFlags().StringVar( 214 | &cc.logLevel, "log-level", "info", "The log level for the "+ 215 | "logger: debug, info, warn, error, critical", 216 | ) 217 | cc.cmd.PersistentFlags().StringVar( 218 | &cc.indexPage, "index-page", "", "Full path to the index.html "+ 219 | "that should be used instead of the default one that "+ 220 | "comes with the project", 221 | ) 222 | cc.cmd.PersistentFlags().StringVar( 223 | &cc.listenAddr, "listen-addr", cc.listenAddr, "The local "+ 224 | "host:port to listen on", 225 | ) 226 | cc.cmd.PersistentFlags().StringVar( 227 | &cc.bitcoindConfig.Host, "bitcoind-host", "localhost:8332", 228 | "The host:port of the bitcoind instance to connect to", 229 | ) 230 | cc.cmd.PersistentFlags().StringVar( 231 | &cc.bitcoindConfig.User, "bitcoind-user", "", 232 | "The RPC username of the bitcoind instance to connect to", 233 | ) 234 | cc.cmd.PersistentFlags().StringVar( 235 | &cc.bitcoindConfig.Pass, "bitcoind-pass", "", 236 | "The RPC password of the bitcoind instance to connect to", 237 | ) 238 | cc.cmd.PersistentFlags().Uint32VarP( 239 | &cc.reOrgSafeDepth, "reorg-safe-depth", "", 240 | defaultMainnetReOrgSafeDepth, 241 | "The number of blocks to wait before considering a block "+ 242 | "safe from re-orgs", 243 | ) 244 | 245 | if err := cc.cmd.Execute(); err != nil { 246 | fmt.Printf("Error: %v", err) 247 | _, _ = fmt.Fprintln(os.Stderr, err) 248 | os.Exit(1) 249 | } 250 | } 251 | 252 | func setupLogging(logDir, logLevel string) { 253 | logConfig := build.DefaultLogConfig() 254 | logWriter := build.NewRotatingLogWriter() 255 | logMgr = build.NewSubLoggerManager(build.NewDefaultLogHandlers( 256 | logConfig, logWriter, 257 | )...) 258 | log = build.NewSubLogger("BLDN", genSubLogger(logMgr)) 259 | 260 | setSubLogger("BLDN", log) 261 | err := logWriter.InitLogRotator( 262 | logConfig.File, filepath.Join(logDir, "block-dn.log"), 263 | ) 264 | if err != nil { 265 | panic(err) 266 | } 267 | err = build.ParseAndSetDebugLevels(logLevel, logMgr) 268 | if err != nil { 269 | panic(err) 270 | } 271 | } 272 | 273 | // genSubLogger creates a sub logger with an empty shutdown function. 274 | func genSubLogger(root *build.SubLoggerManager) func(string) btclog.Logger { 275 | return func(s string) btclog.Logger { 276 | return root.GenSubLogger(s, func() {}) 277 | } 278 | } 279 | 280 | // setSubLogger is a helper method to conveniently register the logger of a sub 281 | // system. 282 | func setSubLogger(subsystem string, logger btclog.Logger, 283 | useLoggers ...func(btclog.Logger)) { 284 | 285 | logMgr.RegisterSubLogger(subsystem, logger) 286 | for _, useLogger := range useLoggers { 287 | useLogger(logger) 288 | } 289 | } 290 | -------------------------------------------------------------------------------- /cfilter-files.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | "regexp" 10 | "sort" 11 | "strconv" 12 | "sync" 13 | "sync/atomic" 14 | "time" 15 | 16 | "github.com/btcsuite/btcd/btcjson" 17 | "github.com/btcsuite/btcd/chaincfg" 18 | "github.com/btcsuite/btcd/chaincfg/chainhash" 19 | "github.com/btcsuite/btcd/rpcclient" 20 | "github.com/btcsuite/btcd/wire" 21 | ) 22 | 23 | const ( 24 | DefaultHeightToHashCacheSize = 100_000 25 | DefaultHeadersPerFile = 100_000 26 | DefaultFiltersPerFile = 2_000 27 | 28 | DefaultRegtestHeadersPerFile = 2_000 29 | DefaultRegtestFiltersPerFile = 2_000 30 | 31 | HeaderFileDir = "headers" 32 | FilterFileDir = "filters" 33 | 34 | HeaderFileSuffix = ".header" 35 | HeaderFileNamePattern = "%s/block-%07d-%07d.header" 36 | HeaderFileNameExtractPattern = "block-[0-9]{7}-([0-9]{7})\\.header" 37 | FilterFileSuffix = ".cfilter" 38 | FilterFileNamePattern = "%s/block-%07d-%07d.cfilter" 39 | FilterFileNameExtractPattern = "block-[0-9]{7}-([0-9]{7})\\.cfilter" 40 | FilterHeaderFileNamePattern = "%s/block-%07d-%07d.cfheader" 41 | 42 | DirectoryMode = 0755 43 | ) 44 | 45 | var ( 46 | headerFileNameExtractRegex = regexp.MustCompile( 47 | HeaderFileNameExtractPattern, 48 | ) 49 | 50 | filterBasic = btcjson.FilterTypeBasic 51 | 52 | filterFileNameExtractRegex = regexp.MustCompile( 53 | FilterFileNameExtractPattern, 54 | ) 55 | ) 56 | 57 | type cFilterFiles struct { 58 | sync.RWMutex 59 | 60 | quit <-chan struct{} 61 | 62 | h2hCache *heightToHashCache 63 | 64 | filtersPerFile int32 65 | baseDir string 66 | chain *rpcclient.Client 67 | chainParams *chaincfg.Params 68 | 69 | startupComplete atomic.Bool 70 | currentHeight atomic.Int32 71 | 72 | filters map[chainhash.Hash][]byte 73 | } 74 | 75 | func newCFilterFiles(filtersPerFile int32, chain *rpcclient.Client, 76 | quit <-chan struct{}, baseDir string, chainParams *chaincfg.Params, 77 | h2hCache *heightToHashCache) *cFilterFiles { 78 | 79 | cf := &cFilterFiles{ 80 | quit: quit, 81 | h2hCache: h2hCache, 82 | filtersPerFile: filtersPerFile, 83 | baseDir: baseDir, 84 | chain: chain, 85 | chainParams: chainParams, 86 | } 87 | cf.clearData() 88 | 89 | return cf 90 | } 91 | 92 | func (c *cFilterFiles) isStartupComplete() bool { 93 | return c.startupComplete.Load() 94 | } 95 | 96 | func (c *cFilterFiles) getCurrentHeight() int32 { 97 | return c.currentHeight.Load() 98 | } 99 | 100 | func (c *cFilterFiles) clearData() { 101 | c.Lock() 102 | defer c.Unlock() 103 | 104 | c.filters = make(map[chainhash.Hash][]byte, c.filtersPerFile) 105 | } 106 | 107 | // updateFiles updates the header and filter files on disk. 108 | // 109 | // NOTE: Must be called as a goroutine. 110 | func (c *cFilterFiles) updateFiles(numBlocks int32) error { 111 | log.Debugf("Updating filter files in %s for network %s", c.baseDir, 112 | c.chainParams.Name) 113 | 114 | filterDir := filepath.Join(c.baseDir, FilterFileDir) 115 | err := os.MkdirAll(filterDir, DirectoryMode) 116 | if err != nil { 117 | return fmt.Errorf("error creating directory %s: %w", filterDir, 118 | err) 119 | } 120 | 121 | lastBlock, err := lastFile( 122 | filterDir, FilterFileSuffix, filterFileNameExtractRegex, 123 | ) 124 | if err != nil { 125 | return fmt.Errorf("error getting last filter file: %w", err) 126 | } 127 | 128 | // If we already had some blocks written, then we need to start from 129 | // the next block. 130 | startBlock := lastBlock 131 | if lastBlock > 0 { 132 | startBlock++ 133 | } 134 | 135 | log.Debugf("Writing filter files from block %d to block %d", startBlock, 136 | numBlocks) 137 | err = c.updateCacheAndFiles(startBlock, numBlocks) 138 | if err != nil { 139 | return fmt.Errorf("error updating blocks: %w", err) 140 | } 141 | 142 | // Allow serving requests now that we're caught up. 143 | c.startupComplete.Store(true) 144 | 145 | // Let's now go into the infinite loop of updating the filter files 146 | // whenever a new block is mined. 147 | log.Debugf("Caught up filters to best block %d, starting to poll for "+ 148 | "new blocks", numBlocks) 149 | for { 150 | select { 151 | case <-time.After(blockPollInterval): 152 | case <-c.quit: 153 | return errServerShutdown 154 | } 155 | 156 | height, err := c.chain.GetBlockCount() 157 | if err != nil { 158 | return fmt.Errorf("error getting best block: %w", err) 159 | } 160 | 161 | currentBlock := c.currentHeight.Load() 162 | if int32(height) == currentBlock { 163 | continue 164 | } 165 | 166 | log.Infof("Processing filters for new block mined at height %d", 167 | height) 168 | err = c.updateCacheAndFiles(currentBlock+1, int32(height)) 169 | if err != nil { 170 | return fmt.Errorf("error updating filters for blocks: "+ 171 | "%w", err) 172 | } 173 | } 174 | } 175 | 176 | func (c *cFilterFiles) updateCacheAndFiles(startBlock, endBlock int32) error { 177 | filterDir := filepath.Join(c.baseDir, FilterFileDir) 178 | 179 | for i := startBlock; i <= endBlock; i++ { 180 | // Were we interrupted? 181 | select { 182 | case <-c.quit: 183 | return errServerShutdown 184 | default: 185 | } 186 | 187 | hash, err := c.h2hCache.getBlockHash(i) 188 | if err != nil { 189 | return fmt.Errorf("error getting block hash for "+ 190 | "height %d: %w", i, err) 191 | } 192 | 193 | filter, err := c.chain.GetBlockFilter(*hash, &filterBasic) 194 | if err != nil { 195 | return fmt.Errorf("error getting block filter for "+ 196 | "hash %s: %w", hash, err) 197 | } 198 | filterBytes, err := hex.DecodeString(filter.Filter) 199 | if err != nil { 200 | return fmt.Errorf("error parsing filter bytes for "+ 201 | "hash %s: %w", hash, err) 202 | } 203 | 204 | c.Lock() 205 | c.filters[*hash] = filterBytes 206 | c.Unlock() 207 | 208 | if (i+1)%c.filtersPerFile == 0 { 209 | fileStart := i - c.filtersPerFile + 1 210 | filterFileName := fmt.Sprintf( 211 | FilterFileNamePattern, filterDir, fileStart, i, 212 | ) 213 | 214 | log.Debugf("Reached filter height %d, writing file "+ 215 | "starting at %d, containing %d items to %s", i, 216 | fileStart, c.filtersPerFile, filterFileName) 217 | 218 | err = c.writeFilters(filterFileName, fileStart, i) 219 | if err != nil { 220 | return fmt.Errorf("error writing filters: %w", 221 | err) 222 | } 223 | 224 | // We don't need the filters anymore, so clear them out. 225 | c.clearData() 226 | } 227 | 228 | c.currentHeight.Store(i) 229 | } 230 | 231 | return nil 232 | } 233 | 234 | func (c *cFilterFiles) writeFilters(fileName string, startIndex, 235 | endIndex int32) error { 236 | 237 | c.RLock() 238 | defer c.RUnlock() 239 | 240 | log.Debugf("Writing filter file %s", fileName) 241 | file, err := os.Create(fileName) 242 | if err != nil { 243 | return fmt.Errorf("error creating file %s: %w", fileName, err) 244 | } 245 | 246 | err = c.serializeFilters(file, startIndex, endIndex) 247 | if err != nil { 248 | return fmt.Errorf("error writing filters to file %s: %w", 249 | fileName, err) 250 | } 251 | 252 | err = file.Close() 253 | if err != nil { 254 | return fmt.Errorf("error closing file %s: %w", fileName, err) 255 | } 256 | 257 | return nil 258 | } 259 | 260 | func (c *cFilterFiles) serializeFilters(w io.Writer, startIndex, 261 | endIndex int32) error { 262 | 263 | for j := startIndex; j <= endIndex; j++ { 264 | hash, err := c.h2hCache.getBlockHash(j) 265 | if err != nil { 266 | return fmt.Errorf("invalid height %d", j) 267 | } 268 | 269 | filter, ok := c.filters[*hash] 270 | if !ok { 271 | return fmt.Errorf("missing filter for hash %s (height "+ 272 | "%d)", hash.String(), j) 273 | } 274 | 275 | err = wire.WriteVarBytes(w, 0, filter) 276 | if err != nil { 277 | return fmt.Errorf("error writing filters: %w", err) 278 | } 279 | } 280 | 281 | return nil 282 | } 283 | 284 | func listFiles(fileDir, searchPattern string) ([]string, error) { 285 | globPattern := fmt.Sprintf("%s/*%s", fileDir, searchPattern) 286 | files, err := filepath.Glob(globPattern) 287 | if err != nil { 288 | return nil, fmt.Errorf("error listing files '%s' in %s: %w", 289 | globPattern, fileDir, err) 290 | } 291 | 292 | sort.Strings(files) 293 | 294 | return files, nil 295 | } 296 | 297 | func lastFile(fileDir, searchPattern string, 298 | extractPattern *regexp.Regexp) (int32, error) { 299 | 300 | files, err := listFiles(fileDir, searchPattern) 301 | if err != nil { 302 | return 0, err 303 | } 304 | 305 | if len(files) == 0 { 306 | return 0, nil 307 | } 308 | 309 | last := files[len(files)-1] 310 | matches := extractPattern.FindStringSubmatch(last) 311 | if len(matches) != 2 || matches[1] == "" { 312 | return 0, fmt.Errorf("error extracting number from file %s", 313 | last) 314 | } 315 | 316 | numUint, err := strconv.ParseInt(matches[1], 10, 32) 317 | if err != nil { 318 | return 0, fmt.Errorf("error parsing number '%s': %w", 319 | matches[1], err) 320 | } 321 | 322 | return int32(numUint), nil 323 | } 324 | -------------------------------------------------------------------------------- /header-files.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path/filepath" 8 | "sync" 9 | "sync/atomic" 10 | "time" 11 | 12 | "github.com/btcsuite/btcd/chaincfg" 13 | "github.com/btcsuite/btcd/chaincfg/chainhash" 14 | "github.com/btcsuite/btcd/rpcclient" 15 | "github.com/btcsuite/btcd/wire" 16 | ) 17 | 18 | type headerFiles struct { 19 | sync.RWMutex 20 | 21 | quit <-chan struct{} 22 | 23 | h2hCache *heightToHashCache 24 | 25 | headersPerFile int32 26 | baseDir string 27 | chain *rpcclient.Client 28 | chainParams *chaincfg.Params 29 | 30 | startupComplete atomic.Bool 31 | currentHeight atomic.Int32 32 | 33 | headers map[chainhash.Hash]*wire.BlockHeader 34 | filterHeaders map[chainhash.Hash]*chainhash.Hash 35 | } 36 | 37 | func newHeaderFiles(headersPerFile int32, chain *rpcclient.Client, 38 | quit <-chan struct{}, baseDir string, chainParams *chaincfg.Params, 39 | h2hCache *heightToHashCache) *headerFiles { 40 | 41 | cf := &headerFiles{ 42 | quit: quit, 43 | h2hCache: h2hCache, 44 | headersPerFile: headersPerFile, 45 | baseDir: baseDir, 46 | chain: chain, 47 | chainParams: chainParams, 48 | } 49 | cf.clearData() 50 | 51 | return cf 52 | } 53 | 54 | func (h *headerFiles) isStartupComplete() bool { 55 | return h.startupComplete.Load() 56 | } 57 | 58 | func (h *headerFiles) getCurrentHeight() int32 { 59 | return h.currentHeight.Load() 60 | } 61 | 62 | func (h *headerFiles) clearData() { 63 | h.Lock() 64 | defer h.Unlock() 65 | 66 | h.headers = make(map[chainhash.Hash]*wire.BlockHeader, h.headersPerFile) 67 | h.filterHeaders = make( 68 | map[chainhash.Hash]*chainhash.Hash, h.headersPerFile, 69 | ) 70 | } 71 | 72 | // updateFiles updates the header and filter files on disk. 73 | // 74 | // NOTE: Must be called as a goroutine. 75 | func (h *headerFiles) updateFiles(numBlocks int32) error { 76 | log.Debugf("Updating header files in %s for network %s", h.baseDir, 77 | h.chainParams.Name) 78 | 79 | headerDir := filepath.Join(h.baseDir, HeaderFileDir) 80 | err := os.MkdirAll(headerDir, DirectoryMode) 81 | if err != nil { 82 | return fmt.Errorf("error creating directory %s: %w", headerDir, 83 | err) 84 | } 85 | 86 | lastBlock, err := lastFile( 87 | headerDir, HeaderFileSuffix, headerFileNameExtractRegex, 88 | ) 89 | if err != nil { 90 | return fmt.Errorf("error getting last header file: %w", err) 91 | } 92 | 93 | // If we already had some blocks written, then we need to start from 94 | // the next block. 95 | startBlock := lastBlock 96 | if lastBlock > 0 { 97 | startBlock++ 98 | } 99 | 100 | log.Debugf("Writing header files from block %d to block %d", startBlock, 101 | numBlocks) 102 | err = h.updateCacheAndFiles(startBlock, numBlocks) 103 | if err != nil { 104 | return fmt.Errorf("error updating blocks: %w", err) 105 | } 106 | 107 | // Allow serving requests now that we're caught up. 108 | h.startupComplete.Store(true) 109 | 110 | // Let's now go into the infinite loop of updating the filter files 111 | // whenever a new block is mined. 112 | log.Debugf("Caught up headers to best block %d, starting to poll for "+ 113 | "new blocks", numBlocks) 114 | for { 115 | select { 116 | case <-time.After(blockPollInterval): 117 | case <-h.quit: 118 | return errServerShutdown 119 | } 120 | 121 | height, err := h.chain.GetBlockCount() 122 | if err != nil { 123 | return fmt.Errorf("error getting best block: %w", err) 124 | } 125 | 126 | currentBlock := h.currentHeight.Load() 127 | if int32(height) == currentBlock { 128 | continue 129 | } 130 | 131 | log.Infof("Processing headers for new block mined at height %d", 132 | height) 133 | err = h.updateCacheAndFiles(currentBlock+1, int32(height)) 134 | if err != nil { 135 | return fmt.Errorf("error updating headers for blocks: "+ 136 | "%w", err) 137 | } 138 | } 139 | } 140 | 141 | func (h *headerFiles) updateCacheAndFiles(startBlock, endBlock int32) error { 142 | headerDir := filepath.Join(h.baseDir, HeaderFileDir) 143 | 144 | for i := startBlock; i <= endBlock; i++ { 145 | // Were we interrupted? 146 | select { 147 | case <-h.quit: 148 | return errServerShutdown 149 | default: 150 | } 151 | 152 | hash, err := h.h2hCache.getBlockHash(i) 153 | if err != nil { 154 | return fmt.Errorf("error getting block hash for "+ 155 | "height %d: %w", i, err) 156 | } 157 | 158 | header, err := h.chain.GetBlockHeader(hash) 159 | if err != nil { 160 | return fmt.Errorf("error getting block header for "+ 161 | "hash %s: %w", hash, err) 162 | } 163 | 164 | filter, err := h.chain.GetBlockFilter(*hash, &filterBasic) 165 | if err != nil { 166 | return fmt.Errorf("error getting block filter for "+ 167 | "hash %s: %w", hash, err) 168 | } 169 | filterHeader, err := chainhash.NewHashFromStr(filter.Header) 170 | if err != nil { 171 | return fmt.Errorf("error parsing filter header for "+ 172 | "hash %s: %w", hash, err) 173 | } 174 | 175 | h.Lock() 176 | h.headers[*hash] = header 177 | h.filterHeaders[*hash] = filterHeader 178 | h.Unlock() 179 | 180 | if (i+1)%h.headersPerFile == 0 { 181 | fileStart := i - h.headersPerFile + 1 182 | headerFileName := fmt.Sprintf( 183 | HeaderFileNamePattern, headerDir, fileStart, i, 184 | ) 185 | filterHeaderFileName := fmt.Sprintf( 186 | FilterHeaderFileNamePattern, headerDir, 187 | fileStart, i, 188 | ) 189 | 190 | log.Debugf("Reached header height %d, writing file "+ 191 | "starting at %d, containing %d items to %s", i, 192 | fileStart, h.headersPerFile, headerFileName) 193 | 194 | err = h.writeHeaders(headerFileName, fileStart, i) 195 | if err != nil { 196 | return fmt.Errorf("error writing headers: %w", 197 | err) 198 | } 199 | 200 | log.Debugf("Reached filter header height %d, writing "+ 201 | "file starting at %d, containing %d items to "+ 202 | "%s", i, fileStart, h.headersPerFile, 203 | filterHeaderFileName) 204 | 205 | err = h.writeFilterHeaders( 206 | filterHeaderFileName, fileStart, i, 207 | ) 208 | if err != nil { 209 | return fmt.Errorf("error writing filter "+ 210 | "headers: %w", err) 211 | } 212 | 213 | // We don't need the headers or filters anymore, so 214 | // clear them out. 215 | h.clearData() 216 | } 217 | 218 | h.currentHeight.Store(i) 219 | } 220 | 221 | return nil 222 | } 223 | 224 | func (h *headerFiles) writeHeaders(fileName string, startIndex, 225 | endIndex int32) error { 226 | 227 | h.RLock() 228 | defer h.RUnlock() 229 | 230 | log.Debugf("Writing header file %s", fileName) 231 | file, err := os.Create(fileName) 232 | if err != nil { 233 | return fmt.Errorf("error creating file %s: %w", fileName, err) 234 | } 235 | 236 | err = h.serializeHeaders(file, startIndex, endIndex) 237 | if err != nil { 238 | return fmt.Errorf("error writing headers to file %s: %w", 239 | fileName, err) 240 | } 241 | 242 | err = file.Close() 243 | if err != nil { 244 | return fmt.Errorf("error closing file %s: %w", fileName, err) 245 | } 246 | 247 | return nil 248 | } 249 | 250 | func (h *headerFiles) serializeHeaders(w io.Writer, startIndex, 251 | endIndex int32) error { 252 | 253 | for j := startIndex; j <= endIndex; j++ { 254 | hash, err := h.h2hCache.getBlockHash(j) 255 | if err != nil { 256 | return fmt.Errorf("invalid height %d", j) 257 | } 258 | 259 | header, ok := h.headers[*hash] 260 | if !ok { 261 | return fmt.Errorf("missing header for hash %s (height "+ 262 | "%d)", hash.String(), j) 263 | } 264 | 265 | err = header.Serialize(w) 266 | if err != nil { 267 | return fmt.Errorf("error writing headers: %w", err) 268 | } 269 | } 270 | 271 | return nil 272 | } 273 | 274 | func (h *headerFiles) writeFilterHeaders(fileName string, startIndex, 275 | endIndex int32) error { 276 | 277 | h.RLock() 278 | defer h.RUnlock() 279 | 280 | log.Debugf("Writing filter header file %s", fileName) 281 | file, err := os.Create(fileName) 282 | if err != nil { 283 | return fmt.Errorf("error creating file %s: %w", fileName, err) 284 | } 285 | 286 | err = h.serializeFilterHeaders(file, startIndex, endIndex) 287 | if err != nil { 288 | return fmt.Errorf("error writing filter headers to file %s: %w", 289 | fileName, err) 290 | } 291 | 292 | err = file.Close() 293 | if err != nil { 294 | return fmt.Errorf("error closing file %s: %w", fileName, err) 295 | } 296 | 297 | return nil 298 | } 299 | 300 | func (h *headerFiles) serializeFilterHeaders(w io.Writer, startIndex, 301 | endIndex int32) error { 302 | 303 | for j := startIndex; j <= endIndex; j++ { 304 | hash, err := h.h2hCache.getBlockHash(j) 305 | if err != nil { 306 | return fmt.Errorf("invalid height %d", j) 307 | } 308 | 309 | filterHeader, ok := h.filterHeaders[*hash] 310 | if !ok { 311 | return fmt.Errorf("missing filter header for hash %s "+ 312 | "(height %d)", hash.String(), j) 313 | } 314 | 315 | num, err := w.Write(filterHeader[:]) 316 | if err != nil { 317 | return fmt.Errorf("error writing filter header: %w", 318 | err) 319 | } 320 | if num != chainhash.HashSize { 321 | return fmt.Errorf("short write when writing filter " + 322 | "headers") 323 | } 324 | } 325 | 326 | return nil 327 | } 328 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Block Delivery Network 7 | 60 | 61 | 62 | 68 |
69 |

Block Delivery Network

70 |

71 | block-dn is a simple web server that connects to your 72 | existing Bitcoin full node to serve data from the time chain to fetch 73 | over HTTP(S). 74 |
75 | The goal is to serve the following data to Light Clients (e.g. mobile 76 | wallets): 77 |

78 | 87 |

88 | There are two ways to use block-dn: 89 |

90 |

Connect to your own node

91 |

92 | Install and run block-dn on your own device and configure 93 | that HTTP endpoint in your compatible wallet. Your own node's chain 94 | information is then used to sync the wallet state. 95 |

96 |

Use the Cloud Flare CDN cached instance

97 |

98 | If you have a way to find out the current most recent block hash from 99 | your peers or your own node, the rest of the chain data (e.g. block 100 | headers, blocks, compact filters) can be downloaded from an untrusted 101 | source. Such an untrusted source can be found at 102 | block-dn.org. Both these 103 | sites run an instance of block-dn and are behind Cloud 104 | Flare's caching proxy service, effectively caching the data in 105 | geographically distributed sites around the world for maximum access 106 | speed and very low latency. 107 |

108 | 109 |
110 |

Available REST endpoints

111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 122 | 125 | 126 | 127 | 128 | 131 | 136 | 137 | 138 | 139 | 143 | 148 | 149 | 150 | 151 | 162 | 167 | 168 | 169 | 170 | 175 | 180 | 181 | 182 | 183 | 195 | 200 | 201 | 202 | 203 | 210 | 215 | 216 | 217 | 218 | 238 | 243 | 244 | 245 | 246 | 252 | 257 | 258 | 259 | 260 | 264 | 269 | 270 |
EndpointDescriptionExample
/status 120 | Returns a JSON file with the latest block height and hash. 121 | 123 | block-dn.org/status 124 |
/block/<block_hash> 129 | Returns a single block, identified by its block hash. 130 | 132 | 133 | block-dn.org/block/000000000000000000030ee5.... 134 | 135 |
/headers/<start_block> 140 | Returns a binary file containing 100'000 block headers, 141 | serialized as 80 bytes per header (8 MB per file). 142 | 144 | 145 | block-dn.org/headers/0 146 | 147 |
/headers/import/<end_block> 152 | Returns a binary file containing end_block 153 | block headers, for import directly into a Neutrino database. 154 | The file starts with the 10 byte metadata header (4 bytes 155 | Bitcoin network identifier, 1 byte import format version, 156 | 1 byte header type, 4 byte start header height), followed by 157 | the headers serialized as 80 bytes per header.
158 | end_block must be divisible by 100'000 and is 159 | non-inclusive, meaning all blocks up to but not including 160 | the end_block are returned! 161 |
163 | 164 | block-dn.org/headers/import/200000 165 | 166 |
/filter-headers/<start_block> 171 | Returns a binary file containing 100'000 compact filter 172 | headers, serialized as 32 bytes per filter header hash 173 | (3.2 MB per file). 174 | 176 | 177 | block-dn.org/filter-headers/100000 178 | 179 |
/filter-headers/import/<end_block> 184 | Returns a binary file containing end_block 185 | compact filter headers, for import directly into a Neutrino 186 | database. The file starts with the 10 byte metadata header 187 | (4 bytes Bitcoin network identifier, 1 byte import format 188 | version, 1 byte header type, 4 byte start header height), 189 | followed by the filter header hashes serialized as 32 bytes 190 | per header hash.
191 | end_block must be divisible by 100'000 and is 192 | non-inclusive, meaning all blocks up to but not including 193 | the end_block are returned! 194 |
196 | 197 | block-dn.org/filter-headers/import/200000 198 | 199 |
/filters/<start_block> 204 | Returns a binary file containing 2'000 compact filters, 205 | serialized as variable length byte arrays: 206 | Each filter starting with a VarInt specifying 207 | the length of a filter, followed by that many bytes for the 208 | actual filter (up to 58 MiB per file as per block 817'995). 209 | 211 | 212 | block-dn.org/filters/802000 213 | 214 |
/sp/tweak-data/<start_block> 219 | Returns a JSON file containing Silent Payment tweak data 220 | entries for transactions of 2'000 blocks, using the 221 | following format:
222 |
223 |     {
224 |       "start_height": <start_block>,
225 |       "num_blocks": 2000,
226 |       "blocks": [
227 |         {
228 |           "<tx_index 1>": "02a1633f5b1c4e8...",
229 |           "<tx_index x>": "03cdefa84923dc8..."
230 |         },
231 |         {
232 |           ...
233 |         }
234 |       ]
235 |     }
236 |                     
237 |
239 | 240 | block-dn.org/sp/tweak-data/802000 241 | 242 |
/tx/out-proof/<txid> 247 | Returns a binary merkle proof for the given transaction that 248 | proves it was included in the block. Returns an error if the 249 | transaction cannot be found or is not included in a block 250 | yet. 251 | 253 | 254 | block-dn.org/tx/out-proof/8f900c6414c5c27231080f46168105e9fec20f9fb2f11b25ee0312d89bc022c0 255 | 256 |
/tx/raw/<txid> 261 | Returns a binary response with the encoded transaction or 262 | an error if the transaction cannot be found. 263 | 265 | 266 | block-dn.org/tx/raw/8f900c6414c5c27231080f46168105e9fec20f9fb2f11b25ee0312d89bc022c0 267 | 268 |
271 |
272 |
273 |

Test instances

274 |

275 | There are instances of all Bitcoin test networks available for 276 | testing as well: 277 |

278 | 298 |
299 |
300 |

Source code on GitHub

301 |

302 | Check out the source code and installation instructions for this 303 | project at 304 | github.com/guggero/block-dn. 305 |

306 |
307 |
308 | 309 | 310 | -------------------------------------------------------------------------------- /silent-payments-files_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "math" 7 | "os" 8 | "path/filepath" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/btcsuite/btcd/btcec/v2" 14 | "github.com/btcsuite/btcd/btcec/v2/schnorr" 15 | "github.com/btcsuite/btcd/btcutil/gcs" 16 | "github.com/btcsuite/btcd/btcutil/gcs/builder" 17 | "github.com/btcsuite/btcd/btcutil/psbt" 18 | sp "github.com/btcsuite/btcd/btcutil/silentpayments" 19 | "github.com/btcsuite/btcd/chaincfg" 20 | "github.com/btcsuite/btcd/txscript" 21 | "github.com/btcsuite/btcd/wire" 22 | "github.com/btcsuite/btclog/v2" 23 | "github.com/btcsuite/btcwallet/chain" 24 | "github.com/btcsuite/btcwallet/waddrmgr" 25 | basewallet "github.com/btcsuite/btcwallet/wallet" 26 | "github.com/btcsuite/btcwallet/walletdb" 27 | "github.com/lightningnetwork/lnd/blockcache" 28 | "github.com/lightningnetwork/lnd/lntest/wait" 29 | "github.com/lightningnetwork/lnd/lnwallet" 30 | "github.com/lightningnetwork/lnd/lnwallet/btcwallet" 31 | "github.com/lightningnetwork/lnd/lnwallet/chainfee" 32 | "github.com/stretchr/testify/require" 33 | ) 34 | 35 | const ( 36 | tweakBlocksPerFile = 100 37 | ) 38 | 39 | var ( 40 | pollInterval = 10 * time.Millisecond 41 | 42 | // seedBytes is the raw entropy of the aezeed: 43 | // able promote dizzy mixture sword myth share public find tattoo 44 | // catalog cousin bulb unfair machine alarm cool large promote kick 45 | // shop rug mean year 46 | // Which corresponds to the master root key: 47 | // xprv9s21ZrQH143K2KADjED57FvNbptdKLp4sqKzssegwEGKQMGoDkbyhUeCKe5m3A 48 | // MU44z4vqkmGswwQVKrv599nFG16PPZDEkNrogwoDGeCmZ 49 | seedBytes, _ = hex.DecodeString("4a7611b6979ba7c4bc5c5cd2239b2973") 50 | 51 | addrTypes = []lnwallet.AddressType{ 52 | lnwallet.WitnessPubKey, 53 | lnwallet.NestedWitnessPubKey, 54 | lnwallet.TaprootPubkey, 55 | } 56 | 57 | spKeyScope = waddrmgr.KeyScope{ 58 | Purpose: 352, 59 | Coin: testParams.HDCoinType, 60 | } 61 | spKeyScopeSchema = waddrmgr.ScopeAddrSchema{ 62 | ExternalAddrType: waddrmgr.TaprootPubKey, 63 | InternalAddrType: waddrmgr.TaprootPubKey, 64 | } 65 | 66 | // waddrmgrNamespaceKey is the namespace key that the waddrmgr state is 67 | // stored within the top-level waleltdb buckets of btcwallet. 68 | waddrmgrNamespaceKey = []byte("waddrmgr") 69 | ) 70 | 71 | func TestSPTweakDataFilesUpdate(t *testing.T) { 72 | // Activate Taproot for regtest. 73 | TaprootActivationHeights[chaincfg.RegressionNetParams.Net] = 1 74 | 75 | miner, backend, _, _ := setupBackend(t, unitTestDir) 76 | 77 | // Mine initial blocks. The miner starts with 200 blocks already mined. 78 | _ = miner.MineEmptyBlocks(initialBlocks - int(totalStartupBlocks)) 79 | 80 | // Wait until the backend is fully synced to the miner. 81 | waitBackendSync(t, backend, miner) 82 | 83 | // First run: start from scratch. 84 | dataDir := t.TempDir() 85 | quit := make(chan struct{}) 86 | h2hCache := newH2HCache(backend) 87 | hf := newSPTweakFiles( 88 | tweakBlocksPerFile, backend, quit, dataDir, &testParams, 89 | h2hCache, 90 | ) 91 | 92 | var wg sync.WaitGroup 93 | 94 | // Wait for the initial blocks to be written. 95 | waitForTargetHeight(t, &wg, hf, initialBlocks) 96 | 97 | // Check files. 98 | spDir := filepath.Join(dataDir, SPTweakFileDir) 99 | files, err := os.ReadDir(spDir) 100 | require.NoError(t, err) 101 | require.Len(t, files, 4) 102 | 103 | // Check file names and sizes. 104 | checkSPTweakDataFileFiles(t, spDir, 0, 99, 547) 105 | checkSPTweakDataFileFiles(t, spDir, 100, 199, 549) 106 | checkSPTweakDataFileFiles(t, spDir, 200, 299, 549) 107 | checkSPTweakDataFileFiles(t, spDir, 300, 399, 549) 108 | 109 | // Stop the service. 110 | close(quit) 111 | wg.Wait() 112 | 113 | // Second run: restart and continue. 114 | const finalBlocks = 550 115 | _ = miner.MineEmptyBlocks(finalBlocks - initialBlocks) 116 | 117 | // Wait until the backend is fully synced to the miner. 118 | waitBackendSync(t, backend, miner) 119 | 120 | quit = make(chan struct{}) 121 | hf = newSPTweakFiles( 122 | tweakBlocksPerFile, backend, quit, dataDir, &testParams, 123 | h2hCache, 124 | ) 125 | 126 | // Wait for the final blocks to be written. 127 | waitForTargetHeight(t, &wg, hf, finalBlocks) 128 | 129 | // Check files again. 130 | files, err = os.ReadDir(spDir) 131 | require.NoError(t, err) 132 | require.Len(t, files, 5) 133 | 134 | // Check new file names and sizes. 135 | checkSPTweakDataFileFiles(t, spDir, 400, 499, 549) 136 | 137 | // Stop the service. 138 | close(quit) 139 | wg.Wait() 140 | } 141 | 142 | func checkSPTweakDataFileFiles(t *testing.T, filterDir string, start, end int32, 143 | size int64) { 144 | 145 | checkFile( 146 | t, fmt.Sprintf(SPTweakFileNamePattern, filterDir, start, end), 147 | size, 148 | ) 149 | } 150 | 151 | func TestSilentPaymentsDetection(t *testing.T) { 152 | // Activate Taproot for regtest. 153 | TaprootActivationHeights[chaincfg.RegressionNetParams.Net] = 1 154 | 155 | miner, backend, _, bitcoindCfg := setupBackend(t, unitTestDir) 156 | wallet, scopeMgr := newTestWallet( 157 | t, &testParams, bitcoindCfg, seedBytes, 158 | ) 159 | 160 | // Mine initial blocks. The miner starts with 200 blocks already mined. 161 | _ = miner.MineEmptyBlocks(initialBlocks - int(totalStartupBlocks)) 162 | 163 | // Wait until the backend is fully synced to the miner. 164 | waitBackendSync(t, backend, miner) 165 | 166 | // First run: start from scratch. 167 | dataDir := t.TempDir() 168 | quit := make(chan struct{}) 169 | h2hCache := newH2HCache(backend) 170 | hf := newSPTweakFiles( 171 | tweakBlocksPerFile, backend, quit, dataDir, &testParams, 172 | h2hCache, 173 | ) 174 | 175 | // Wait for the initial blocks to be written. 176 | var wg sync.WaitGroup 177 | waitForTargetHeight(t, &wg, hf, initialBlocks) 178 | 179 | // Fund an address of each type. 180 | for _, addrType := range addrTypes { 181 | addr, err := wallet.NewAddress( 182 | addrType, false, lnwallet.DefaultAccountName, 183 | ) 184 | require.NoError(t, err) 185 | 186 | pkScript, err := txscript.PayToAddrScript(addr) 187 | require.NoError(t, err) 188 | 189 | t.Logf("Sending output %x (addr %s)", pkScript, addr.String()) 190 | 191 | miner.SendOutput(&wire.TxOut{ 192 | Value: 100_000, 193 | PkScript: pkScript, 194 | }, 2) 195 | } 196 | 197 | // Mine a block to confirm the funding transactions. 198 | miner.MineBlocksAndAssertNumTxes(1, len(addrTypes)) 199 | waitBackendSync(t, backend, miner) 200 | waitForTargetHeight(t, &wg, hf, initialBlocks+1) 201 | 202 | var utxos []*lnwallet.Utxo 203 | err := wait.NoError(func() error { 204 | var err error 205 | utxos, err = wallet.ListUnspentWitness(1, math.MaxInt32, "") 206 | if err != nil { 207 | return fmt.Errorf("listing utxos: %w", err) 208 | } 209 | 210 | if len(utxos) != len(addrTypes) { 211 | return fmt.Errorf("expected %d utxos; got %d", 212 | len(addrTypes), len(utxos)) 213 | } 214 | 215 | return nil 216 | }, shortTimeout) 217 | require.NoError(t, err) 218 | 219 | scanKey, scanPrivKey := deriveNextSPKey(t, wallet, scopeMgr, false) 220 | spendKey, _ := deriveNextSPKey(t, wallet, scopeMgr, true) 221 | spAddr := sp.NewAddress(sp.TestNetHRP, *scanKey, *spendKey, nil) 222 | 223 | tx := wire.NewMsgTx(2) 224 | tx.TxOut = append(tx.TxOut, &wire.TxOut{ 225 | Value: 250_000, 226 | PkScript: psbt.SilentPaymentDummyP2TROutput, 227 | }) 228 | 229 | pkt, err := psbt.NewFromUnsignedTx(tx) 230 | require.NoError(t, err) 231 | 232 | pkt.Outputs[0].SilentPaymentInfo = &psbt.SilentPaymentInfo{ 233 | ScanKey: scanKey.SerializeCompressed(), 234 | SpendKey: spendKey.SerializeCompressed(), 235 | } 236 | 237 | changeIndex, err := wallet.FundPsbt( 238 | pkt, 0, chainfee.FeePerKwFloor, lnwallet.DefaultAccountName, 239 | nil, basewallet.CoinSelectionLargest, nil, 240 | ) 241 | require.NoError(t, err) 242 | 243 | _, err = wallet.SignPsbt(pkt) 244 | require.NoError(t, err) 245 | 246 | err = psbt.MaybeFinalizeAll(pkt) 247 | require.NoError(t, err) 248 | 249 | finalTx, err := psbt.Extract(pkt) 250 | require.NoError(t, err) 251 | 252 | err = wallet.PublishTransaction(finalTx, "silent payments test") 253 | require.NoError(t, err) 254 | 255 | // Mine a block to confirm the transaction. 256 | minedBlocks := miner.MineBlocksAndAssertNumTxes(1, 1) 257 | 258 | spHeight := int32(initialBlocks + 2) 259 | waitBackendSync(t, backend, miner) 260 | waitForTargetHeight(t, &wg, hf, spHeight) 261 | 262 | blockData := hf.tweakData[spHeight] 263 | 264 | // The tweak data for the block should have exactly one entry, since the 265 | // coinbase transaction isn't a silent payment. 266 | require.Len(t, blockData, 1) 267 | txData, ok := blockData[1] 268 | require.True(t, ok, "expected tx index 1 in block data") 269 | 270 | spOutputKeys, err := sp.TransactionOutputKeysForFilter( 271 | *txData, []sp.ScanAddress{ 272 | sp.NewScanAddress(*spAddr, *scanPrivKey), 273 | }, 274 | ) 275 | require.NoError(t, err) 276 | 277 | require.Len(t, spOutputKeys, 1) 278 | spOutputKey := spOutputKeys[0] 279 | 280 | txOut := finalTx.TxOut[len(finalTx.TxOut)-int(changeIndex)-1] 281 | txOutputKey, err := schnorr.ParsePubKey(txOut.PkScript[2:34]) 282 | require.NoError(t, err) 283 | 284 | t.Logf("Derived output key: %x", spOutputKey.SerializeCompressed()) 285 | t.Logf("Transaction output key: %x", txOutputKey.SerializeCompressed()) 286 | 287 | require.Equal( 288 | t, schnorr.SerializePubKey(spOutputKey), 289 | schnorr.SerializePubKey(txOutputKey), 290 | ) 291 | 292 | spBlockHash := minedBlocks[0].BlockHash() 293 | filter, err := backend.GetBlockFilter(spBlockHash, &filterBasic) 294 | require.NoError(t, err) 295 | 296 | filterBytes, err := hex.DecodeString(filter.Filter) 297 | require.NoError(t, err) 298 | 299 | cFilter, err := gcs.FromNBytes( 300 | builder.DefaultP, builder.DefaultM, filterBytes, 301 | ) 302 | require.NoError(t, err) 303 | 304 | match, err := sp.MatchBlock(cFilter, &spBlockHash, spOutputKeys) 305 | require.NoError(t, err) 306 | require.True(t, match) 307 | } 308 | 309 | func newTestWallet(t *testing.T, netParams *chaincfg.Params, 310 | bitcoindConfig *chain.BitcoindConfig, 311 | seedBytes []byte) (*btcwallet.BtcWallet, *waddrmgr.ScopedKeyManager) { 312 | 313 | walletLogger := log.SubSystem("BTCW") 314 | walletLogger.SetLevel(btclog.LevelInfo) 315 | btcwallet.UseLogger(walletLogger) 316 | chain.UseLogger(walletLogger) 317 | basewallet.UseLogger(walletLogger) 318 | 319 | conn, err := chain.NewBitcoindConn(bitcoindConfig) 320 | require.NoError(t, err) 321 | 322 | err = conn.Start() 323 | require.NoError(t, err) 324 | 325 | loaderOpt := btcwallet.LoaderWithLocalWalletDB( 326 | t.TempDir(), false, time.Minute, 327 | ) 328 | config := btcwallet.Config{ 329 | PrivatePass: []byte("some-pass"), 330 | HdSeed: seedBytes, 331 | NetParams: netParams, 332 | CoinType: netParams.HDCoinType, 333 | ChainSource: conn.NewBitcoindClient(), 334 | LoaderOptions: []btcwallet.LoaderOption{loaderOpt}, 335 | } 336 | blockCache := blockcache.NewBlockCache(10000) 337 | w, err := btcwallet.New(config, blockCache) 338 | require.NoError(t, err) 339 | 340 | err = w.Start() 341 | require.NoError(t, err) 342 | 343 | t.Cleanup(func() { 344 | err := w.Stop() 345 | require.NoError(t, err) 346 | }) 347 | 348 | // Add the Silent Payments key scope to the wallet. 349 | scopeMgr, err := w.InternalWallet().AddScopeManager( 350 | spKeyScope, spKeyScopeSchema, 351 | ) 352 | require.NoError(t, err) 353 | 354 | err = w.InternalWallet().InitAccounts(scopeMgr, false, 1) 355 | require.NoError(t, err) 356 | 357 | _, err = w.SubscribeTransactions() 358 | require.NoError(t, err) 359 | 360 | return w, scopeMgr 361 | } 362 | 363 | func deriveNextSPKey(t *testing.T, wallet *btcwallet.BtcWallet, 364 | scopeMgr *waddrmgr.ScopedKeyManager, spendKey bool) (*btcec.PublicKey, 365 | *btcec.PrivateKey) { 366 | 367 | var ( 368 | pubKey *btcec.PublicKey 369 | privKey *btcec.PrivateKey 370 | ) 371 | 372 | db := wallet.InternalWallet().Database() 373 | err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error { 374 | addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) 375 | 376 | var ( 377 | addrs []waddrmgr.ManagedAddress 378 | err error 379 | ) 380 | 381 | if spendKey { 382 | addrs, err = scopeMgr.NextExternalAddresses( 383 | addrmgrNs, 0, 1, 384 | ) 385 | } else { 386 | addrs, err = scopeMgr.NextInternalAddresses( 387 | addrmgrNs, 0, 1, 388 | ) 389 | } 390 | if err != nil { 391 | return err 392 | } 393 | 394 | addr, ok := addrs[0].(waddrmgr.ManagedPubKeyAddress) 395 | if !ok { 396 | return fmt.Errorf("address is not a managed pubkey " + 397 | "addr") 398 | } 399 | 400 | pubKey = addr.PubKey() 401 | privKey, err = addr.PrivKey() 402 | return err 403 | }) 404 | require.NoError(t, err) 405 | 406 | return pubKey, privKey 407 | } 408 | -------------------------------------------------------------------------------- /silent-payments-files.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "maps" 9 | "os" 10 | "path/filepath" 11 | "regexp" 12 | "slices" 13 | "sync" 14 | "sync/atomic" 15 | "time" 16 | 17 | "github.com/btcsuite/btcd/blockchain" 18 | "github.com/btcsuite/btcd/btcec/v2" 19 | sp "github.com/btcsuite/btcd/btcutil/silentpayments" 20 | "github.com/btcsuite/btcd/chaincfg" 21 | "github.com/btcsuite/btcd/rpcclient" 22 | "github.com/btcsuite/btcd/txscript" 23 | "github.com/btcsuite/btcd/wire" 24 | "github.com/lightningnetwork/lnd/lnutils" 25 | ) 26 | 27 | const ( 28 | DefaultSPTweaksPerFile = 2_000 29 | 30 | DefaultRegtestSPTweaksPerFile = 2_000 31 | 32 | SPTweakFileDir = "silentpayments" 33 | 34 | SPTweakFileSuffix = ".sptweak" 35 | SPTweakFileNamePattern = "%s/block-%07d-%07d.sptweak" 36 | SPTweakFileNameExtractPattern = "block-[0-9]{7}-([0-9]{7})\\.sptweak" 37 | 38 | BlockFilePattern = "%s/blk%05d.dat" 39 | ) 40 | 41 | var ( 42 | // TaprootActivationHeights maps each supported Bitcoin network to its 43 | // respective Taproot activation height. 44 | TaprootActivationHeights = map[wire.BitcoinNet]int32{ 45 | chaincfg.MainNetParams.Net: 709_632, 46 | chaincfg.TestNet3Params.Net: 2_011_968, 47 | chaincfg.TestNet4Params.Net: 1, 48 | chaincfg.SigNetParams.Net: 1, 49 | } 50 | 51 | spTweakFileNameExtractRegex = regexp.MustCompile( 52 | SPTweakFileNameExtractPattern, 53 | ) 54 | ) 55 | 56 | type prevOutCache struct { 57 | numBlock int32 58 | numTx int32 59 | numTrTx int32 60 | numTrHit int32 61 | numTxFetch int32 62 | 63 | enabled bool 64 | 65 | lastReset time.Time 66 | 67 | chain *rpcclient.Client 68 | 69 | cache lnutils.SyncMap[wire.OutPoint, []byte] 70 | } 71 | 72 | func newPrevOutCache(chain *rpcclient.Client) *prevOutCache { 73 | return &prevOutCache{ 74 | enabled: true, 75 | chain: chain, 76 | lastReset: time.Now(), 77 | cache: lnutils.SyncMap[wire.OutPoint, []byte]{}, 78 | } 79 | } 80 | 81 | func (p *prevOutCache) LogAndReset() { 82 | since := time.Since(p.lastReset).Seconds() 83 | blockPerSecond := float64(p.numBlock) / since 84 | 85 | log.Tracef("SP PrevOutCache: fetched %d previous txs (%d cache hits) "+ 86 | "for %d txns (%d with taproot outputs), cache size is %d, "+ 87 | "%.2f blocks per second", p.numTxFetch, p.numTrHit, p.numTx, 88 | p.numTrTx, p.cache.Len(), blockPerSecond) 89 | 90 | // Reset stats. 91 | p.numBlock = 0 92 | p.numTx = 0 93 | p.numTrTx = 0 94 | p.numTrHit = 0 95 | p.numTxFetch = 0 96 | 97 | p.lastReset = time.Now() 98 | } 99 | 100 | func (p *prevOutCache) Disable() { 101 | p.enabled = false 102 | p.cache = lnutils.SyncMap[wire.OutPoint, []byte]{} 103 | p.LogAndReset() 104 | } 105 | 106 | // FetchPreviousOutputScript fetches the previous output's pkScript for the 107 | // given outpoint. 108 | func (p *prevOutCache) FetchPreviousOutputScript(op wire.OutPoint) ([]byte, 109 | error) { 110 | 111 | // We assume we only visit every transaction once, so each previous 112 | // output we fetch is only fetched a single time. Thus, we can delete it 113 | // from the cache after fetching it. 114 | pkScript, ok := p.cache.LoadAndDelete(op) 115 | if ok { 116 | p.numTrHit++ 117 | 118 | return pkScript, nil 119 | } 120 | 121 | tx, err := p.chain.GetRawTransaction(&op.Hash) 122 | if err != nil { 123 | return nil, fmt.Errorf("error fetching previous transaction: "+ 124 | "%w", err) 125 | } 126 | 127 | p.numTxFetch++ 128 | 129 | if int(op.Index) >= len(tx.MsgTx().TxOut) { 130 | return nil, fmt.Errorf("output index %d out of range for "+ 131 | "transaction %s", op.Index, op.Hash.String()) 132 | } 133 | 134 | pkScript = tx.MsgTx().TxOut[op.Index].PkScript 135 | if p.enabled { 136 | p.cache.Store(op, pkScript) 137 | } 138 | 139 | return pkScript, nil 140 | } 141 | 142 | func (p *prevOutCache) AddOutputs(tx *wire.MsgTx) { 143 | if !p.enabled { 144 | return 145 | } 146 | 147 | txHash := tx.TxHash() 148 | for txIndex, txOut := range tx.TxOut { 149 | if !txscript.IsPayToTaproot(txOut.PkScript) { 150 | continue 151 | } 152 | 153 | p.cache.Store(wire.OutPoint{ 154 | Hash: txHash, 155 | Index: uint32(txIndex), 156 | }, txOut.PkScript) 157 | } 158 | } 159 | 160 | func (p *prevOutCache) RemoveInputs(tx *wire.MsgTx) { 161 | if !p.enabled { 162 | return 163 | } 164 | 165 | for _, txIn := range tx.TxIn { 166 | p.cache.Delete(txIn.PreviousOutPoint) 167 | } 168 | } 169 | 170 | type spTweakFiles struct { 171 | sync.RWMutex 172 | 173 | quit <-chan struct{} 174 | 175 | blocksPerFile int32 176 | baseDir string 177 | chain *rpcclient.Client 178 | chainParams *chaincfg.Params 179 | h2hCache *heightToHashCache 180 | 181 | startupComplete atomic.Bool 182 | currentHeight atomic.Int32 183 | 184 | tweakData map[int32]map[int32]*btcec.PublicKey 185 | 186 | prevOutCache *prevOutCache 187 | } 188 | 189 | func newSPTweakFiles(itemsPerFile int32, chain *rpcclient.Client, 190 | quit <-chan struct{}, baseDir string, chainParams *chaincfg.Params, 191 | h2hCache *heightToHashCache) *spTweakFiles { 192 | 193 | c := &spTweakFiles{ 194 | quit: quit, 195 | blocksPerFile: itemsPerFile, 196 | baseDir: baseDir, 197 | chain: chain, 198 | chainParams: chainParams, 199 | h2hCache: h2hCache, 200 | prevOutCache: newPrevOutCache(chain), 201 | } 202 | c.clearData() 203 | 204 | return c 205 | } 206 | 207 | func (s *spTweakFiles) isStartupComplete() bool { 208 | return s.startupComplete.Load() 209 | } 210 | 211 | func (s *spTweakFiles) getCurrentHeight() int32 { 212 | return s.currentHeight.Load() 213 | } 214 | 215 | func (s *spTweakFiles) clearData() { 216 | s.Lock() 217 | defer s.Unlock() 218 | 219 | s.tweakData = make( 220 | map[int32]map[int32]*btcec.PublicKey, s.blocksPerFile, 221 | ) 222 | } 223 | 224 | func (s *spTweakFiles) updateFiles(targetHeight int32) error { 225 | log.Debugf("Updating SP tweak data in %s for network %s", s.baseDir, 226 | s.chainParams.Name) 227 | 228 | spDir := filepath.Join(s.baseDir, SPTweakFileDir) 229 | err := os.MkdirAll(spDir, DirectoryMode) 230 | if err != nil { 231 | return fmt.Errorf("error creating directory %s: %w", spDir, err) 232 | } 233 | 234 | lastBlock, err := lastFile( 235 | spDir, SPTweakFileSuffix, spTweakFileNameExtractRegex, 236 | ) 237 | if err != nil { 238 | return fmt.Errorf("error getting last SP tweak data file: %w", 239 | err) 240 | } 241 | 242 | // If we already had some blocks written, then we need to start from 243 | // the next block. 244 | startBlock := lastBlock 245 | if lastBlock > 0 { 246 | startBlock++ 247 | } 248 | 249 | log.Debugf("Writing SP tweak data files from block %d to block %d", 250 | startBlock, targetHeight) 251 | err = s.updateCacheAndFiles(startBlock, targetHeight) 252 | if err != nil { 253 | return fmt.Errorf("error updating blocks: %w", err) 254 | } 255 | 256 | // Allow serving requests now that we're caught up. 257 | s.startupComplete.Store(true) 258 | 259 | // We can disable the UTXO cache now to free up some memory. 260 | s.prevOutCache.Disable() 261 | 262 | // Let's now go into the infinite loop of updating the filter files 263 | // whenever a new block is mined. 264 | log.Debugf("Caught up SP tweak data to best block %d, starting to "+ 265 | "poll for new blocks", targetHeight) 266 | for { 267 | select { 268 | case <-time.After(blockPollInterval): 269 | case <-s.quit: 270 | return errServerShutdown 271 | } 272 | 273 | height, err := s.chain.GetBlockCount() 274 | if err != nil { 275 | return fmt.Errorf("error getting best block: %w", err) 276 | } 277 | 278 | currentBlock := s.currentHeight.Load() 279 | if int32(height) == currentBlock { 280 | continue 281 | } 282 | 283 | log.Infof("Processing SP tweak data for new block mined at "+ 284 | "height %d", height) 285 | err = s.updateCacheAndFiles(currentBlock+1, int32(height)) 286 | if err != nil { 287 | return fmt.Errorf("error updating SP tweak data for "+ 288 | "blocks: %w", err) 289 | } 290 | } 291 | } 292 | 293 | func (s *spTweakFiles) updateCacheAndFiles(startBlock, endBlock int32) error { 294 | spDir := filepath.Join(s.baseDir, SPTweakFileDir) 295 | net := s.chainParams.Net 296 | taprootStartHeight, taprootSupported := TaprootActivationHeights[net] 297 | 298 | if !taprootSupported { 299 | log.Warnf("Silent Payments tweak data indexing enabled, "+ 300 | "but Taproot is not supported on network %s", 301 | s.chainParams.Name) 302 | 303 | return nil 304 | } 305 | 306 | // Generate the silent payment tweak data as requested. 307 | for i := startBlock; i <= endBlock; i++ { 308 | // Were we interrupted? 309 | select { 310 | case <-s.quit: 311 | return errServerShutdown 312 | default: 313 | } 314 | 315 | s.Lock() 316 | s.tweakData[i] = make(map[int32]*btcec.PublicKey) 317 | s.Unlock() 318 | 319 | // We don't look at blocks before Taproot activation, as Silent 320 | // Payments require Taproot. 321 | if i >= taprootStartHeight { 322 | blockHash, err := s.h2hCache.getBlockHash(i) 323 | if err != nil { 324 | return fmt.Errorf("error getting block hash "+ 325 | "for height %d: %w", i, err) 326 | } 327 | 328 | block, err := s.chain.GetBlock(blockHash) 329 | if err != nil { 330 | return fmt.Errorf("error getting block for SP "+ 331 | "tweak data: %w", err) 332 | } 333 | s.prevOutCache.numBlock++ 334 | 335 | err = s.indexBlockSPTweakData(i, block) 336 | if err != nil { 337 | return fmt.Errorf("error indexing SP tweak "+ 338 | "data: %w", err) 339 | } 340 | } 341 | 342 | if (i+1)%s.blocksPerFile == 0 { 343 | fileStart := i - s.blocksPerFile + 1 344 | spTweakFileName := fmt.Sprintf( 345 | SPTweakFileNamePattern, spDir, fileStart, i, 346 | ) 347 | 348 | log.Debugf("Reached SP tweak data height %d, writing"+ 349 | "file starting at %d, containing %d items to "+ 350 | "%s", i, fileStart, s.blocksPerFile, 351 | spTweakFileName) 352 | 353 | err := s.writeSPTweaks(spTweakFileName, fileStart, i) 354 | if err != nil { 355 | return fmt.Errorf("error writing SP tweak "+ 356 | "data: %w", err) 357 | } 358 | 359 | s.prevOutCache.LogAndReset() 360 | s.clearData() 361 | } 362 | 363 | s.currentHeight.Store(i) 364 | } 365 | 366 | return nil 367 | } 368 | 369 | // indexBlockSPTweakData examines the given block for transactions that 370 | // contain Taproot outputs. For each such transaction, we compute the Silent 371 | // Payments tweak data and store it in the provided index map, keyed by the 372 | // transaction index within the block. 373 | func (s *spTweakFiles) indexBlockSPTweakData(height int32, 374 | block *wire.MsgBlock) error { 375 | 376 | for txIndex, tx := range block.Transactions { 377 | // Skip coinbase transactions, they can't contain Silent Payment 378 | // outputs. 379 | if blockchain.IsCoinBaseTx(tx) { 380 | continue 381 | } 382 | 383 | s.prevOutCache.AddOutputs(tx) 384 | s.prevOutCache.numTx++ 385 | 386 | // Only transactions with Taproot outputs can have Silent 387 | // Payments. 388 | if !sp.HasTaprootOutputs(tx) { 389 | s.prevOutCache.RemoveInputs(tx) 390 | 391 | continue 392 | } 393 | s.prevOutCache.numTrTx++ 394 | 395 | tweakPubKey, err := sp.TransactionTweakData( 396 | tx, s.prevOutCache.FetchPreviousOutputScript, log, 397 | ) 398 | if err != nil { 399 | s.prevOutCache.RemoveInputs(tx) 400 | 401 | return fmt.Errorf("error calculating SP tweak "+ 402 | "data for tx index %d in block at height "+ 403 | "%d: %w", txIndex, height, err) 404 | } 405 | 406 | s.prevOutCache.RemoveInputs(tx) 407 | if tweakPubKey == nil { 408 | continue 409 | } 410 | 411 | // Store the tweak data for this transaction index. 412 | s.Lock() 413 | s.tweakData[height][int32(txIndex)] = tweakPubKey 414 | s.Unlock() 415 | } 416 | 417 | return nil 418 | } 419 | 420 | func (s *spTweakFiles) writeSPTweaks(fileName string, startIndex, 421 | endIndex int32) error { 422 | 423 | s.RLock() 424 | defer s.RUnlock() 425 | 426 | log.Debugf("Writing SP tweak data file %s", fileName) 427 | file, err := os.Create(fileName) 428 | if err != nil { 429 | return fmt.Errorf("error creating file %s: %w", fileName, err) 430 | } 431 | 432 | defer func() { 433 | err = file.Close() 434 | if err != nil { 435 | log.Errorf("Error closing file %s: %w", fileName, err) 436 | } 437 | }() 438 | 439 | return s.serializeSPTweakData(file, startIndex, endIndex) 440 | } 441 | 442 | func (s *spTweakFiles) serializeSPTweakData(w io.Writer, startIndex, 443 | endIndex int32) error { 444 | 445 | // We need to add plus one here since the end index is inclusive in the 446 | // for loop below (j <= endIndex). 447 | numBlocks := endIndex - startIndex + 1 448 | 449 | spTweakFile := &SPTweakFile{ 450 | StartHeight: startIndex, 451 | NumBlocks: numBlocks, 452 | Blocks: make([]SPTweakBlock, 0, numBlocks), 453 | } 454 | for j := startIndex; j <= endIndex; j++ { 455 | transactionTweaks, ok := s.tweakData[j] 456 | if !ok { 457 | return fmt.Errorf("invalid height %d", j) 458 | } 459 | 460 | if len(transactionTweaks) == 0 { 461 | spTweakFile.Blocks = append(spTweakFile.Blocks, nil) 462 | 463 | continue 464 | } 465 | 466 | txIndexes := slices.Collect(maps.Keys(transactionTweaks)) 467 | slices.Sort(txIndexes) 468 | 469 | block := make(SPTweakBlock, len(transactionTweaks)) 470 | for _, txIndex := range txIndexes { 471 | pubKey := transactionTweaks[txIndex] 472 | if pubKey == nil { 473 | return fmt.Errorf("nil pubkey for height %d "+ 474 | "tx index %d", j, txIndex) 475 | } 476 | 477 | block[txIndex] = hex.EncodeToString( 478 | pubKey.SerializeCompressed(), 479 | ) 480 | } 481 | spTweakFile.Blocks = append(spTweakFile.Blocks, block) 482 | } 483 | 484 | err := json.NewEncoder(w).Encode(spTweakFile) 485 | if err != nil { 486 | return fmt.Errorf("error writing SP tweak data: %w", err) 487 | } 488 | 489 | return nil 490 | } 491 | -------------------------------------------------------------------------------- /handlers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | _ "embed" 6 | "encoding/binary" 7 | "encoding/hex" 8 | "encoding/json" 9 | "errors" 10 | "fmt" 11 | "io" 12 | "net/http" 13 | "os" 14 | "path/filepath" 15 | "strconv" 16 | "time" 17 | 18 | "github.com/btcsuite/btcd/chaincfg/chainhash" 19 | "github.com/btcsuite/btcd/wire" 20 | "github.com/gorilla/mux" 21 | ) 22 | 23 | var ( 24 | maxAgeTemporary = time.Second 25 | maxAgeMemory = time.Minute 26 | maxAgeDisk = time.Hour * 24 * 365 27 | 28 | // importMetadataSize is the size of the metadata that is prepended to 29 | // each imported file. It consists of 4 bytes (uint32 little endian) for 30 | // the Bitcoin network identifier, 1 byte for the version, 1 byte for 31 | // the file type (0 for block headers, 1 for compact filter headers) and 32 | // 4 bytes (uint32 little endian) for the start header. 33 | importMetadataSize = 4 + 1 + 1 + 4 34 | 35 | // typeBlockHeader is the byte value used to indicate that the file 36 | // contains block headers. 37 | typeBlockHeader = byte(0) 38 | 39 | // typeFilterHeader is the byte value used to indicate that the file 40 | // contains compact filter headers. 41 | typeFilterHeader = byte(1) 42 | 43 | // errUnavailableInLightMode is an error indicating that a certain HTTP 44 | // endpoint isn't available when running in light mode. 45 | errUnavailableInLightMode = errors.New( 46 | "endpoint not available in light mode", 47 | ) 48 | 49 | // errUnavailableSPTweakDataTurnedOff is an error indicating that the SP 50 | // tweak data indexing is turned off. 51 | errUnavailableSPTweakDataTurnedOff = errors.New( 52 | "SP tweak data indexing is turned off", 53 | ) 54 | 55 | // errStillStartingUp is an error indicating that the server is still 56 | // starting up and not ready to serve requests yet. 57 | errStillStartingUp = errors.New( 58 | "server still starting up, please try again later", 59 | ) 60 | 61 | // errInvalidSyncStatus is an error indicating that the sync status is 62 | // invalid, caused by a bad configuration or unexpected behavior of the 63 | // backend. 64 | errInvalidSyncStatus = errors.New("invalid sync status") 65 | 66 | // errInvalidHashLength is an error indicating that the provided hash 67 | // length is invalid. 68 | errInvalidHashLength = errors.New("invalid hash length") 69 | 70 | // errInvalidBlockHash is an error indicating that the provided block 71 | // hash is invalid. 72 | errInvalidBlockHash = errors.New("invalid block hash") 73 | 74 | // errInvalidTxHash is an error indicating that the provided transaction 75 | // hash is invalid. 76 | errInvalidTxHash = errors.New("invalid transaction hash") 77 | 78 | //go:embed index.html 79 | indexHTML string 80 | ) 81 | 82 | const ( 83 | // headerImportVersion is the current version of the import file format. 84 | headerImportVersion = 0 85 | 86 | HeaderCache = "Cache-Control" 87 | HeaderCORS = "Access-Control-Allow-Origin" 88 | HeaderCORSMethods = "Access-Control-Allow-Methods" 89 | 90 | status400 = http.StatusBadRequest 91 | status500 = http.StatusInternalServerError 92 | status503 = http.StatusServiceUnavailable 93 | ) 94 | 95 | type serializable interface { 96 | Serialize(w io.Writer) error 97 | } 98 | 99 | type blockProcessor interface { 100 | isStartupComplete() bool 101 | getCurrentHeight() int32 102 | } 103 | 104 | func (s *server) createRouter() *mux.Router { 105 | router := mux.NewRouter() 106 | router.HandleFunc("/", s.indexRequestHandler) 107 | router.HandleFunc("/index.html", s.indexRequestHandler) 108 | router.HandleFunc("/status", s.statusRequestHandler) 109 | router.HandleFunc("/headers/{height:[0-9]+}", s.headersRequestHandler) 110 | router.HandleFunc( 111 | "/headers/import/{height:[0-9]+}", 112 | s.headersImportRequestHandler, 113 | ) 114 | router.HandleFunc( 115 | "/filter-headers/{height:[0-9]+}", 116 | s.filterHeadersRequestHandler, 117 | ) 118 | router.HandleFunc( 119 | "/filter-headers/import/{height:[0-9]+}", 120 | s.filterHeadersImportRequestHandler, 121 | ) 122 | router.HandleFunc("/filters/{height:[0-9]+}", s.filtersRequestHandler) 123 | router.HandleFunc( 124 | "/sp/tweak-data/{height:[0-9]+}", 125 | s.spTweakDataRequestHandler, 126 | ) 127 | router.HandleFunc("/block/{hash:[0-9a-f]+}", s.blockRequestHandler) 128 | router.HandleFunc( 129 | "/tx/out-proof/{txid:[0-9a-f]+}", s.txOutProofRequestHandler, 130 | ) 131 | router.HandleFunc( 132 | "/tx/raw/{txid:[0-9a-f]+}", s.rawTxRequestHandler, 133 | ) 134 | 135 | return router 136 | } 137 | 138 | func (s *server) indexRequestHandler(w http.ResponseWriter, _ *http.Request) { 139 | addCorsHeaders(w) 140 | w.Header().Set("Content-Type", "text/html") 141 | w.WriteHeader(http.StatusOK) 142 | _, _ = w.Write([]byte(indexHTML)) 143 | } 144 | 145 | func (s *server) statusRequestHandler(w http.ResponseWriter, _ *http.Request) { 146 | s.h2hCache.RLock() 147 | defer s.h2hCache.RUnlock() 148 | 149 | bestHeight := s.headerFiles.getCurrentHeight() 150 | bestBlock, err := s.h2hCache.getBlockHash(bestHeight) 151 | if err != nil { 152 | sendError(w, status500, errInvalidSyncStatus) 153 | return 154 | } 155 | 156 | bestFilter, ok := s.headerFiles.filterHeaders[*bestBlock] 157 | if !ok { 158 | sendError(w, status500, errInvalidSyncStatus) 159 | return 160 | } 161 | 162 | var ( 163 | spHeight int32 164 | spSynced bool 165 | ) 166 | if s.spTweakFiles != nil { 167 | spHeight = s.spTweakFiles.currentHeight.Load() 168 | spSynced = bestHeight == spHeight 169 | } 170 | 171 | status := &Status{ 172 | ChainGenesisHash: s.chainParams.GenesisHash.String(), 173 | ChainName: s.chainParams.Name, 174 | BestBlockHeight: bestHeight, 175 | BestBlockHash: bestBlock.String(), 176 | BestFilterHeight: s.cFilterFiles.currentHeight.Load(), 177 | BestFilterHeader: bestFilter.String(), 178 | BestSPTweakHeight: spHeight, 179 | EntriesPerHeaderFile: s.headersPerFile, 180 | EntriesPerFilterFile: s.filtersPerFile, 181 | EntriesPerSPTweakFile: s.spTweaksPerFile, 182 | } 183 | 184 | // nolint:gocritic 185 | status.AllFilesSynced = bestHeight == status.BestFilterHeight && 186 | spSynced 187 | 188 | sendJSON(w, status, maxAgeMemory) 189 | } 190 | 191 | func (s *server) headersRequestHandler(w http.ResponseWriter, r *http.Request) { 192 | s.heightBasedRequestHandler( 193 | w, r, HeaderFileDir, HeaderFileNamePattern, 194 | int64(s.headersPerFile), s.headerFiles.serializeHeaders, 195 | s.headerFiles, 196 | ) 197 | } 198 | 199 | func (s *server) headersImportRequestHandler(w http.ResponseWriter, 200 | r *http.Request) { 201 | 202 | s.heightBasedImportRequestHandler( 203 | w, r, HeaderFileDir, HeaderFileNamePattern, s.headersPerFile, 204 | s.headerFiles.serializeHeaders, typeBlockHeader, 205 | ) 206 | } 207 | 208 | func (s *server) filterHeadersRequestHandler(w http.ResponseWriter, 209 | r *http.Request) { 210 | 211 | s.heightBasedRequestHandler( 212 | w, r, HeaderFileDir, FilterHeaderFileNamePattern, 213 | int64(s.headersPerFile), s.headerFiles.serializeFilterHeaders, 214 | s.headerFiles, 215 | ) 216 | } 217 | 218 | func (s *server) filterHeadersImportRequestHandler(w http.ResponseWriter, 219 | r *http.Request) { 220 | 221 | s.heightBasedImportRequestHandler( 222 | w, r, HeaderFileDir, FilterHeaderFileNamePattern, 223 | s.headersPerFile, s.headerFiles.serializeFilterHeaders, 224 | typeFilterHeader, 225 | ) 226 | } 227 | 228 | func (s *server) filtersRequestHandler(w http.ResponseWriter, r *http.Request) { 229 | s.heightBasedRequestHandler( 230 | w, r, FilterFileDir, FilterFileNamePattern, 231 | int64(s.filtersPerFile), s.cFilterFiles.serializeFilters, 232 | s.cFilterFiles, 233 | ) 234 | } 235 | 236 | func (s *server) spTweakDataRequestHandler(w http.ResponseWriter, 237 | r *http.Request) { 238 | 239 | if s.spTweakFiles == nil { 240 | sendError(w, status503, errUnavailableSPTweakDataTurnedOff) 241 | return 242 | } 243 | 244 | s.heightBasedRequestHandler( 245 | w, r, SPTweakFileDir, SPTweakFileNamePattern, 246 | int64(s.spTweaksPerFile), s.spTweakFiles.serializeSPTweakData, 247 | s.spTweakFiles, 248 | ) 249 | } 250 | 251 | func (s *server) heightBasedRequestHandler(w http.ResponseWriter, 252 | r *http.Request, subDir, fileNamePattern string, entriesPerFile int64, 253 | serializeCb func(w io.Writer, startIndex, endIndex int32) error, 254 | processor blockProcessor) { 255 | 256 | // These kinds of requests aren't available in light mode. 257 | if s.lightMode { 258 | sendError(w, status503, errUnavailableInLightMode) 259 | return 260 | } 261 | 262 | if !processor.isStartupComplete() { 263 | sendError(w, status503, errStillStartingUp) 264 | return 265 | } 266 | 267 | startHeight, err := parseRequestParamInt64(r, "height") 268 | if err != nil { 269 | sendError(w, status400, err) 270 | return 271 | } 272 | 273 | err = s.checkStartHeight(processor, startHeight, int32(entriesPerFile)) 274 | if err != nil { 275 | sendError(w, status400, err) 276 | return 277 | } 278 | 279 | srcDir := filepath.Join(s.baseDir, subDir) 280 | fileName := fmt.Sprintf( 281 | fileNamePattern, srcDir, startHeight, 282 | startHeight+entriesPerFile-1, 283 | ) 284 | if fileExists(fileName) { 285 | addCorsHeaders(w) 286 | addCacheHeaders(w, maxAgeDisk) 287 | w.WriteHeader(http.StatusOK) 288 | if err := streamFile(w, fileName); err != nil { 289 | log.Errorf("Error while streaming file: %v", err) 290 | sendError(w, status500, err) 291 | } 292 | 293 | return 294 | } 295 | 296 | s.h2hCache.RLock() 297 | defer s.h2hCache.RUnlock() 298 | 299 | if _, err := s.h2hCache.getBlockHash(int32(startHeight)); err != nil { 300 | sendError(w, status400, fmt.Errorf("invalid height")) 301 | return 302 | } 303 | 304 | // The requested start height wasn't yet in a file, so we need to 305 | // stream the headers from memory. 306 | addCorsHeaders(w) 307 | addCacheHeaders(w, maxAgeMemory) 308 | w.WriteHeader(http.StatusOK) 309 | err = serializeCb(w, int32(startHeight), processor.getCurrentHeight()) 310 | if err != nil { 311 | log.Errorf("Error serializing: %v", err) 312 | } 313 | } 314 | 315 | func (s *server) heightBasedImportRequestHandler(w http.ResponseWriter, 316 | r *http.Request, subDir, fileNamePattern string, entriesPerFile int32, 317 | serializeCb func(w io.Writer, startIndex, endIndex int32) error, 318 | fileType byte) { 319 | 320 | // These kinds of requests aren't available in light mode. 321 | if s.lightMode { 322 | sendError(w, status503, errUnavailableInLightMode) 323 | return 324 | } 325 | 326 | if !s.headerFiles.startupComplete.Load() { 327 | sendError(w, status503, errStillStartingUp) 328 | return 329 | } 330 | 331 | endHeight, err := parseRequestParamInt64(r, "height") 332 | if err != nil { 333 | sendError(w, status400, err) 334 | return 335 | } 336 | 337 | err = s.checkEndHeight(s.headerFiles, endHeight, entriesPerFile) 338 | if err != nil { 339 | sendError(w, status400, err) 340 | return 341 | } 342 | 343 | // We allow the end height to be equal to the current height, which 344 | // we serve content from memory. In that case we mark the whole file as 345 | // short-term cacheable only. 346 | cache := maxAgeMemory 347 | 348 | // We also check that we don't need to server partial content from 349 | // files, as that would make things a bit more tricky. 350 | maxCacheFileEndHeight := int64( 351 | (s.headerFiles.currentHeight.Load() / entriesPerFile) * 352 | entriesPerFile, 353 | ) 354 | 355 | // We don't want to return partial files. So for any range that can be 356 | // served only from files (i.e. up to the last complete cache file), we 357 | // require the end height to be a multiple of the entries per file. 358 | if endHeight <= maxCacheFileEndHeight { 359 | // We're in the file-only range, so we can set the cache 360 | // duration to disk cache time. 361 | cache = maxAgeDisk 362 | 363 | // Make sure we'll be able to serve a full cache file. 364 | if endHeight%int64(entriesPerFile) != 0 { 365 | err = fmt.Errorf("invalid end height %d, must be a "+ 366 | "multiple of %d", endHeight, entriesPerFile) 367 | sendError(w, status400, err) 368 | return 369 | } 370 | } 371 | 372 | addCorsHeaders(w) 373 | addCacheHeaders(w, cache) 374 | w.WriteHeader(http.StatusOK) 375 | 376 | metadata := make([]byte, importMetadataSize) 377 | binary.LittleEndian.PutUint32(metadata[0:4], uint32(s.chainParams.Net)) 378 | metadata[4] = headerImportVersion 379 | metadata[5] = fileType 380 | 381 | // We always start at height 0 for the import. 382 | binary.LittleEndian.PutUint32(metadata[6:10], 0) 383 | 384 | if _, err := w.Write(metadata); err != nil { 385 | log.Errorf("Error writing metadata: %v", err) 386 | return 387 | } 388 | 389 | // lastHeight is the beginning block of each cache file. 390 | lastHeight := int64(0) 391 | for ; lastHeight <= endHeight; lastHeight += int64(entriesPerFile) { 392 | // We always start at 0, so we'll always have one entry less 393 | // in the files than the even height we require the user to 394 | // enter (non-inclusive, as described in the API docs). 395 | if lastHeight == endHeight { 396 | return 397 | } 398 | 399 | srcDir := filepath.Join(s.baseDir, subDir) 400 | fileName := fmt.Sprintf( 401 | fileNamePattern, srcDir, lastHeight, 402 | lastHeight+int64(entriesPerFile)-1, 403 | ) 404 | 405 | if !fileExists(fileName) { 406 | break 407 | } 408 | 409 | if err := streamFile(w, fileName); err != nil { 410 | log.Errorf("Error while streaming file: %v", err) 411 | sendError(w, status500, err) 412 | } 413 | } 414 | 415 | s.h2hCache.RLock() 416 | defer s.h2hCache.RUnlock() 417 | 418 | if _, err := s.h2hCache.getBlockHash(int32(lastHeight)); err != nil { 419 | sendError(w, status400, fmt.Errorf("invalid height")) 420 | return 421 | } 422 | 423 | // The requested end height goes over what's in files, so we need to 424 | // stream the remaining headers from memory. 425 | err = serializeCb(w, int32(lastHeight), int32(endHeight)) 426 | if err != nil { 427 | log.Errorf("Error serializing: %v", err) 428 | } 429 | } 430 | 431 | func (s *server) blockRequestHandler(w http.ResponseWriter, r *http.Request) { 432 | blockHash, err := parseRequestParamChainHash(r, "hash") 433 | if err != nil { 434 | sendError(w, status400, fmt.Errorf("%w: %w", 435 | errInvalidBlockHash, err)) 436 | return 437 | } 438 | 439 | block, err := s.chain.GetBlock(blockHash) 440 | if err != nil { 441 | sendError(w, status500, err) 442 | return 443 | } 444 | 445 | sendBinary(w, block, maxAgeDisk) 446 | } 447 | 448 | func (s *server) txOutProofRequestHandler(w http.ResponseWriter, 449 | r *http.Request) { 450 | 451 | txHash, err := parseRequestParamChainHash(r, "txid") 452 | if err != nil { 453 | sendError(w, status400, fmt.Errorf("%w: %w", errInvalidTxHash, 454 | err)) 455 | return 456 | } 457 | 458 | merkleBlock, err := s.chain.GetTxOutProof( 459 | []string{txHash.String()}, nil, 460 | ) 461 | if err != nil { 462 | sendError(w, status500, err) 463 | return 464 | } 465 | 466 | blockHash := merkleBlock.Header.BlockHash() 467 | verboseHeader, err := s.chain.GetBlockHeaderVerbose(&blockHash) 468 | if err != nil { 469 | sendError(w, status500, err) 470 | return 471 | } 472 | 473 | var buf bytes.Buffer 474 | err = merkleBlock.BtcEncode( 475 | &buf, wire.ProtocolVersion, wire.WitnessEncoding, 476 | ) 477 | if err != nil { 478 | sendError(w, status500, err) 479 | return 480 | } 481 | 482 | maxAge := maxAgeDisk 483 | safeHeight := s.headerFiles.currentHeight.Load() - 484 | int32(s.reOrgSafeDepth) 485 | if verboseHeader.Height > safeHeight { 486 | maxAge = maxAgeTemporary 487 | } 488 | 489 | sendRawBytes(w, buf.Bytes(), maxAge) 490 | } 491 | 492 | func (s *server) rawTxRequestHandler(w http.ResponseWriter, r *http.Request) { 493 | txHash, err := parseRequestParamChainHash(r, "txid") 494 | if err != nil { 495 | sendError(w, status400, fmt.Errorf("%w: %w", errInvalidTxHash, 496 | err)) 497 | return 498 | } 499 | 500 | tx, err := s.chain.GetRawTransaction(txHash) 501 | if err != nil { 502 | sendError(w, status500, err) 503 | return 504 | } 505 | 506 | sendBinary(w, tx.MsgTx(), maxAgeDisk) 507 | } 508 | 509 | func (s *server) checkStartHeight(processor blockProcessor, height int64, 510 | entriesPerFile int32) error { 511 | 512 | if int32(height) > processor.getCurrentHeight() { 513 | return fmt.Errorf("start height %d is greater than current "+ 514 | "height %d", height, processor.getCurrentHeight()) 515 | } 516 | 517 | if height != 0 && height%int64(entriesPerFile) != 0 { 518 | return fmt.Errorf("invalid start height %d, must be zero or "+ 519 | "a multiple of %d", height, entriesPerFile) 520 | } 521 | 522 | return nil 523 | } 524 | 525 | func (s *server) checkEndHeight(processor blockProcessor, height int64, 526 | entriesPerFile int32) error { 527 | 528 | if int32(height) > processor.getCurrentHeight() { 529 | return fmt.Errorf("end height %d is greater than current "+ 530 | "height %d", height, processor.getCurrentHeight()) 531 | } 532 | 533 | if height == 0 { 534 | return fmt.Errorf("invalid end height %d, must be a multiple "+ 535 | "of %d", height, entriesPerFile) 536 | } 537 | 538 | return nil 539 | } 540 | 541 | func sendJSON(w http.ResponseWriter, v any, maxAge time.Duration) { 542 | addCacheHeaders(w, maxAge) 543 | addCorsHeaders(w) 544 | w.Header().Set("Content-Type", "application/json") 545 | w.WriteHeader(http.StatusOK) 546 | 547 | err := json.NewEncoder(w).Encode(v) 548 | if err != nil { 549 | log.Errorf("Error serializing status: %v", err) 550 | } 551 | } 552 | 553 | func sendBinary(w http.ResponseWriter, v serializable, maxAge time.Duration) { 554 | addCacheHeaders(w, maxAge) 555 | addCorsHeaders(w) 556 | w.WriteHeader(http.StatusOK) 557 | err := v.Serialize(w) 558 | if err != nil { 559 | log.Errorf("Error serializing: %v", err) 560 | } 561 | } 562 | 563 | func sendRawBytes(w http.ResponseWriter, payload []byte, maxAge time.Duration) { 564 | addCacheHeaders(w, maxAge) 565 | addCorsHeaders(w) 566 | w.WriteHeader(http.StatusOK) 567 | _, err := w.Write(payload) 568 | if err != nil { 569 | log.Errorf("Error serializing: %v", err) 570 | } 571 | } 572 | 573 | func addCacheHeaders(w http.ResponseWriter, maxAge time.Duration) { 574 | // A max-age of 0 means no caching at all, as something is not safe 575 | // to cache yet. 576 | if maxAge == 0 { 577 | w.Header().Add(HeaderCache, "no-cache") 578 | 579 | return 580 | } 581 | 582 | w.Header().Add( 583 | HeaderCache, fmt.Sprintf("max-age=%d", int64(maxAge.Seconds())), 584 | ) 585 | } 586 | 587 | // addCorsHeaders adds HTTP header fields that are required for Cross Origin 588 | // Resource Sharing. These header fields are needed to signal to the browser 589 | // that it's ok to allow requests to subdomains, even if the JS was served from 590 | // the top level domain. 591 | func addCorsHeaders(w http.ResponseWriter) { 592 | w.Header().Add(HeaderCORS, "*") 593 | w.Header().Add(HeaderCORSMethods, "GET, POST, OPTIONS") 594 | } 595 | 596 | func parseRequestParamInt64(r *http.Request, name string) (int64, error) { 597 | vars := mux.Vars(r) 598 | paramStr := vars[name] 599 | 600 | if len(paramStr) == 0 { 601 | return 0, fmt.Errorf("invalid value for parameter %s", name) 602 | } 603 | 604 | paramValue, err := strconv.ParseInt(paramStr, 10, 64) 605 | if err != nil { 606 | return 0, fmt.Errorf("invalid value for parameter %s", name) 607 | } 608 | 609 | return paramValue, nil 610 | } 611 | 612 | func parseRequestParamChainHash(r *http.Request, name string) (*chainhash.Hash, 613 | error) { 614 | 615 | vars := mux.Vars(r) 616 | blockHash := vars[name] 617 | 618 | if len(blockHash) != hex.EncodedLen(chainhash.HashSize) { 619 | return nil, errInvalidHashLength 620 | } 621 | 622 | hash, err := chainhash.NewHashFromStr(blockHash) 623 | if err != nil { 624 | return nil, err 625 | } 626 | 627 | return hash, nil 628 | } 629 | 630 | func sendError(w http.ResponseWriter, status int, err error) { 631 | // By default, we don't cache error responses. 632 | cache := time.Duration(0) 633 | 634 | // But if it's a user error (4xx), we can cache it for a short time. 635 | if status >= 400 && status < 500 { 636 | cache = maxAgeMemory 637 | } 638 | 639 | addCacheHeaders(w, cache) 640 | addCorsHeaders(w) 641 | w.WriteHeader(status) 642 | _, _ = w.Write([]byte(err.Error())) 643 | } 644 | 645 | // filesExists reports whether the named file or directory exists. 646 | func fileExists(name string) bool { 647 | if _, err := os.Stat(name); err != nil { 648 | if os.IsNotExist(err) { 649 | return false 650 | } 651 | } 652 | return true 653 | } 654 | 655 | func streamFile(w io.Writer, fileName string) error { 656 | f, err := os.Open(fileName) 657 | if err != nil { 658 | return err 659 | } 660 | 661 | _, err = io.Copy(w, f) 662 | return err 663 | } 664 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/guggero/block-dn 2 | 3 | go 1.24.9 4 | 5 | require ( 6 | github.com/btcsuite/btcd v0.24.3-0.20250407183411-cd05d9ad3d05 7 | github.com/btcsuite/btcd/btcec/v2 v2.3.6 8 | github.com/btcsuite/btcd/btcutil v1.1.5 9 | github.com/btcsuite/btcd/btcutil/psbt v1.1.8 10 | github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 11 | github.com/btcsuite/btclog/v2 v2.0.1-0.20250728225537-6090e87c6c5b 12 | github.com/btcsuite/btcwallet v0.16.17 13 | github.com/btcsuite/btcwallet/walletdb v1.5.1 14 | github.com/gorilla/mux v1.8.0 15 | github.com/lightningnetwork/lnd v0.19.3-beta 16 | github.com/lightningnetwork/lnd/fn/v2 v2.0.9 17 | github.com/spf13/cobra v1.10.1 18 | github.com/stretchr/testify v1.11.1 19 | ) 20 | 21 | require ( 22 | 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect 23 | 4d63.com/gochecknoglobals v0.2.2 // indirect 24 | codeberg.org/chavacava/garif v0.2.0 // indirect 25 | dario.cat/mergo v1.0.2 // indirect 26 | dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect 27 | dev.gaijin.team/go/golib v0.6.0 // indirect 28 | github.com/4meepo/tagalign v1.4.3 // indirect 29 | github.com/Abirdcfly/dupword v0.1.7 // indirect 30 | github.com/AdminBenni/iota-mixing v1.0.0 // indirect 31 | github.com/AlwxSin/noinlineerr v1.0.5 // indirect 32 | github.com/Antonboom/errname v1.1.1 // indirect 33 | github.com/Antonboom/nilnil v1.1.1 // indirect 34 | github.com/Antonboom/testifylint v1.6.4 // indirect 35 | github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect 36 | github.com/BurntSushi/toml v1.5.0 // indirect 37 | github.com/Djarvur/go-err113 v0.1.1 // indirect 38 | github.com/Masterminds/semver/v3 v3.4.0 // indirect 39 | github.com/Microsoft/go-winio v0.6.1 // indirect 40 | github.com/MirrexOne/unqueryvet v1.2.1 // indirect 41 | github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect 42 | github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 // indirect 43 | github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect 44 | github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect 45 | github.com/Yawning/aez v0.0.0-20211027044916-e49e68abd344 // indirect 46 | github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect 47 | github.com/aead/siphash v1.0.1 // indirect 48 | github.com/alecthomas/chroma/v2 v2.20.0 // indirect 49 | github.com/alecthomas/go-check-sumtype v0.3.1 // indirect 50 | github.com/alexkohler/nakedret/v2 v2.0.6 // indirect 51 | github.com/alexkohler/prealloc v1.0.0 // indirect 52 | github.com/alfatraining/structtag v1.0.0 // indirect 53 | github.com/alingse/asasalint v0.0.11 // indirect 54 | github.com/alingse/nilnesserr v0.2.0 // indirect 55 | github.com/ashanbrown/forbidigo/v2 v2.3.0 // indirect 56 | github.com/ashanbrown/makezero/v2 v2.1.0 // indirect 57 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect 58 | github.com/beorn7/perks v1.0.1 // indirect 59 | github.com/bkielbasa/cyclop v1.2.3 // indirect 60 | github.com/blizzy78/varnamelen v0.8.0 // indirect 61 | github.com/bombsimon/wsl/v4 v4.7.0 // indirect 62 | github.com/bombsimon/wsl/v5 v5.3.0 // indirect 63 | github.com/breml/bidichk v0.3.3 // indirect 64 | github.com/breml/errchkjson v0.4.1 // indirect 65 | github.com/btcsuite/btcd/v2transport v1.0.1 // indirect 66 | github.com/btcsuite/btclog v1.0.0 // indirect 67 | github.com/btcsuite/btcwallet/wallet/txauthor v1.3.5 // indirect 68 | github.com/btcsuite/btcwallet/wallet/txrules v1.2.2 // indirect 69 | github.com/btcsuite/btcwallet/wallet/txsizes v1.2.5 // indirect 70 | github.com/btcsuite/btcwallet/wtxmgr v1.5.6 // indirect 71 | github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect 72 | github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect 73 | github.com/btcsuite/winsvc v1.0.0 // indirect 74 | github.com/butuzov/ireturn v0.4.0 // indirect 75 | github.com/butuzov/mirror v1.3.0 // indirect 76 | github.com/catenacyber/perfsprint v0.10.0 // indirect 77 | github.com/ccojocar/zxcvbn-go v1.0.4 // indirect 78 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect 79 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 80 | github.com/charithe/durationcheck v0.0.11 // indirect 81 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect 82 | github.com/charmbracelet/lipgloss v1.1.0 // indirect 83 | github.com/charmbracelet/x/ansi v0.8.0 // indirect 84 | github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect 85 | github.com/charmbracelet/x/term v0.2.1 // indirect 86 | github.com/ckaznocha/intrange v0.3.1 // indirect 87 | github.com/containerd/continuity v0.3.0 // indirect 88 | github.com/coreos/go-semver v0.3.0 // indirect 89 | github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect 90 | github.com/coreos/go-systemd/v22 v22.3.2 // indirect 91 | github.com/curioswitch/go-reassign v0.3.0 // indirect 92 | github.com/daixiang0/gci v0.13.7 // indirect 93 | github.com/dave/dst v0.27.3 // indirect 94 | github.com/davecgh/go-spew v1.1.1 // indirect 95 | github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect 96 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect 97 | github.com/decred/dcrd/lru v1.1.2 // indirect 98 | github.com/denis-tingaikin/go-header v0.5.0 // indirect 99 | github.com/dlclark/regexp2 v1.11.5 // indirect 100 | github.com/docker/cli v28.1.1+incompatible // indirect 101 | github.com/docker/docker v28.1.1+incompatible // indirect 102 | github.com/docker/go-connections v0.4.0 // indirect 103 | github.com/docker/go-units v0.5.0 // indirect 104 | github.com/dustin/go-humanize v1.0.1 // indirect 105 | github.com/ettle/strcase v0.2.0 // indirect 106 | github.com/fatih/color v1.18.0 // indirect 107 | github.com/fatih/structtag v1.2.0 // indirect 108 | github.com/fergusstrange/embedded-postgres v1.25.0 // indirect 109 | github.com/firefart/nonamedreturns v1.0.6 // indirect 110 | github.com/fsnotify/fsnotify v1.5.4 // indirect 111 | github.com/fzipp/gocyclo v0.6.0 // indirect 112 | github.com/ghostiam/protogetter v0.3.17 // indirect 113 | github.com/go-critic/go-critic v0.14.2 // indirect 114 | github.com/go-logr/logr v1.4.3 // indirect 115 | github.com/go-logr/stdr v1.2.2 // indirect 116 | github.com/go-toolsmith/astcast v1.1.0 // indirect 117 | github.com/go-toolsmith/astcopy v1.1.0 // indirect 118 | github.com/go-toolsmith/astequal v1.2.0 // indirect 119 | github.com/go-toolsmith/astfmt v1.1.0 // indirect 120 | github.com/go-toolsmith/astp v1.1.0 // indirect 121 | github.com/go-toolsmith/strparse v1.1.0 // indirect 122 | github.com/go-toolsmith/typep v1.1.0 // indirect 123 | github.com/go-viper/mapstructure/v2 v2.4.0 // indirect 124 | github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect 125 | github.com/gobwas/glob v0.2.3 // indirect 126 | github.com/godoc-lint/godoc-lint v0.10.1 // indirect 127 | github.com/gofrs/flock v0.13.0 // indirect 128 | github.com/gofrs/uuid v4.4.0+incompatible // indirect 129 | github.com/gogo/protobuf v1.3.2 // indirect 130 | github.com/golang-jwt/jwt/v4 v4.5.2 // indirect 131 | github.com/golang-migrate/migrate/v4 v4.17.0 // indirect 132 | github.com/golang/protobuf v1.5.4 // indirect 133 | github.com/golang/snappy v0.0.4 // indirect 134 | github.com/golangci/asciicheck v0.5.0 // indirect 135 | github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect 136 | github.com/golangci/go-printf-func-name v0.1.1 // indirect 137 | github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect 138 | github.com/golangci/golangci-lint/v2 v2.6.2 // indirect 139 | github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 // indirect 140 | github.com/golangci/misspell v0.7.0 // indirect 141 | github.com/golangci/plugin-module-register v0.1.2 // indirect 142 | github.com/golangci/revgrep v0.8.0 // indirect 143 | github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect 144 | github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect 145 | github.com/google/btree v1.0.1 // indirect 146 | github.com/google/go-cmp v0.7.0 // indirect 147 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect 148 | github.com/google/uuid v1.6.0 // indirect 149 | github.com/gordonklaus/ineffassign v0.2.0 // indirect 150 | github.com/gorilla/websocket v1.5.3 // indirect 151 | github.com/gostaticanalysis/analysisutil v0.7.1 // indirect 152 | github.com/gostaticanalysis/comment v1.5.0 // indirect 153 | github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect 154 | github.com/gostaticanalysis/nilerr v0.1.2 // indirect 155 | github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect 156 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect 157 | github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect 158 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect 159 | github.com/hashicorp/errwrap v1.1.0 // indirect 160 | github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect 161 | github.com/hashicorp/go-multierror v1.1.1 // indirect 162 | github.com/hashicorp/go-version v1.7.0 // indirect 163 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 164 | github.com/hashicorp/hcl v1.0.0 // indirect 165 | github.com/hexops/gotextdiff v1.0.3 // indirect 166 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 167 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect 168 | github.com/jackc/pgconn v1.14.3 // indirect 169 | github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 // indirect 170 | github.com/jackc/pgio v1.0.0 // indirect 171 | github.com/jackc/pgpassfile v1.0.0 // indirect 172 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect 173 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 174 | github.com/jackc/pgtype v1.14.4 // indirect 175 | github.com/jackc/pgx/v4 v4.18.3 // indirect 176 | github.com/jackc/pgx/v5 v5.7.4 // indirect 177 | github.com/jackc/puddle v1.3.0 // indirect 178 | github.com/jackc/puddle/v2 v2.2.2 // indirect 179 | github.com/jackpal/gateway v1.0.5 // indirect 180 | github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad // indirect 181 | github.com/jessevdk/go-flags v1.4.0 // indirect 182 | github.com/jgautheron/goconst v1.8.2 // indirect 183 | github.com/jingyugao/rowserrcheck v1.1.1 // indirect 184 | github.com/jjti/go-spancheck v0.6.5 // indirect 185 | github.com/jonboulle/clockwork v0.2.2 // indirect 186 | github.com/jrick/logrotate v1.1.2 // indirect 187 | github.com/json-iterator/go v1.1.12 // indirect 188 | github.com/juju/clock v1.1.1 // indirect 189 | github.com/juju/loggo v1.0.0 // indirect 190 | github.com/juju/testing v1.0.2 // indirect 191 | github.com/juju/utils/v3 v3.2.3 // indirect 192 | github.com/julz/importas v0.2.0 // indirect 193 | github.com/karamaru-alpha/copyloopvar v1.2.2 // indirect 194 | github.com/kisielk/errcheck v1.9.0 // indirect 195 | github.com/kkHAIKE/contextcheck v1.1.6 // indirect 196 | github.com/kkdai/bstream v1.0.0 // indirect 197 | github.com/klauspost/compress v1.17.9 // indirect 198 | github.com/kulti/thelper v0.7.1 // indirect 199 | github.com/kunwardeep/paralleltest v1.0.15 // indirect 200 | github.com/lasiar/canonicalheader v1.1.2 // indirect 201 | github.com/ldez/exptostd v0.4.5 // indirect 202 | github.com/ldez/gomoddirectives v0.7.1 // indirect 203 | github.com/ldez/grignotin v0.10.1 // indirect 204 | github.com/ldez/tagliatelle v0.7.2 // indirect 205 | github.com/ldez/usetesting v0.5.0 // indirect 206 | github.com/leonklingele/grouper v1.1.2 // indirect 207 | github.com/lib/pq v1.10.9 // indirect 208 | github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf // indirect 209 | github.com/lightninglabs/neutrino v0.16.1 // indirect 210 | github.com/lightninglabs/neutrino/cache v1.1.2 // indirect 211 | github.com/lightningnetwork/lightning-onion v1.2.1-0.20240815225420-8b40adf04ab9 // indirect 212 | github.com/lightningnetwork/lnd/cert v1.2.2 // indirect 213 | github.com/lightningnetwork/lnd/clock v1.1.1 // indirect 214 | github.com/lightningnetwork/lnd/healthcheck v1.2.6 // indirect 215 | github.com/lightningnetwork/lnd/kvdb v1.4.16 // indirect 216 | github.com/lightningnetwork/lnd/queue v1.1.1 // indirect 217 | github.com/lightningnetwork/lnd/sqldb v1.0.11 // indirect 218 | github.com/lightningnetwork/lnd/ticker v1.1.1 // indirect 219 | github.com/lightningnetwork/lnd/tlv v1.3.2 // indirect 220 | github.com/lightningnetwork/lnd/tor v1.1.6 // indirect 221 | github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 // indirect 222 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 223 | github.com/macabu/inamedparam v0.2.0 // indirect 224 | github.com/magiconair/properties v1.8.6 // indirect 225 | github.com/manuelarte/embeddedstructfieldcheck v0.4.0 // indirect 226 | github.com/manuelarte/funcorder v0.5.0 // indirect 227 | github.com/maratori/testableexamples v1.0.1 // indirect 228 | github.com/maratori/testpackage v1.1.2 // indirect 229 | github.com/matoous/godox v1.1.0 // indirect 230 | github.com/mattn/go-colorable v0.1.14 // indirect 231 | github.com/mattn/go-isatty v0.0.20 // indirect 232 | github.com/mattn/go-runewidth v0.0.16 // indirect 233 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 234 | github.com/mgechev/revive v1.12.0 // indirect 235 | github.com/miekg/dns v1.1.43 // indirect 236 | github.com/mitchellh/go-homedir v1.1.0 // indirect 237 | github.com/mitchellh/mapstructure v1.5.0 // indirect 238 | github.com/moby/docker-image-spec v1.3.1 // indirect 239 | github.com/moby/term v0.5.0 // indirect 240 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 241 | github.com/modern-go/reflect2 v1.0.2 // indirect 242 | github.com/moricho/tparallel v0.3.2 // indirect 243 | github.com/muesli/termenv v0.16.0 // indirect 244 | github.com/nakabonne/nestif v0.3.1 // indirect 245 | github.com/ncruces/go-strftime v0.1.9 // indirect 246 | github.com/nishanths/exhaustive v0.12.0 // indirect 247 | github.com/nishanths/predeclared v0.2.2 // indirect 248 | github.com/nunnatsa/ginkgolinter v0.21.2 // indirect 249 | github.com/opencontainers/go-digest v1.0.0 // indirect 250 | github.com/opencontainers/image-spec v1.0.2 // indirect 251 | github.com/opencontainers/runc v1.1.14 // indirect 252 | github.com/ory/dockertest/v3 v3.10.0 // indirect 253 | github.com/pelletier/go-toml v1.9.5 // indirect 254 | github.com/pelletier/go-toml/v2 v2.2.4 // indirect 255 | github.com/pkg/errors v0.9.1 // indirect 256 | github.com/pmezard/go-difflib v1.0.0 // indirect 257 | github.com/polyfloyd/go-errorlint v1.8.0 // indirect 258 | github.com/prometheus/client_golang v1.12.1 // indirect 259 | github.com/prometheus/client_model v0.2.0 // indirect 260 | github.com/prometheus/common v0.32.1 // indirect 261 | github.com/prometheus/procfs v0.7.3 // indirect 262 | github.com/quasilyte/go-ruleguard v0.4.5 // indirect 263 | github.com/quasilyte/go-ruleguard/dsl v0.3.23 // indirect 264 | github.com/quasilyte/gogrep v0.5.0 // indirect 265 | github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect 266 | github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect 267 | github.com/raeperd/recvcheck v0.2.0 // indirect 268 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect 269 | github.com/rinchsan/gosimports v0.3.8 // indirect 270 | github.com/rivo/uniseg v0.4.7 // indirect 271 | github.com/rogpeppe/fastuuid v1.2.0 // indirect 272 | github.com/rogpeppe/go-internal v1.14.1 // indirect 273 | github.com/ryancurrah/gomodguard v1.4.1 // indirect 274 | github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect 275 | github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect 276 | github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect 277 | github.com/sashamelentyev/interfacebloat v1.1.0 // indirect 278 | github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect 279 | github.com/securego/gosec/v2 v2.22.10 // indirect 280 | github.com/sirupsen/logrus v1.9.3 // indirect 281 | github.com/sivchari/containedctx v1.0.3 // indirect 282 | github.com/soheilhy/cmux v0.1.5 // indirect 283 | github.com/sonatard/noctx v0.4.0 // indirect 284 | github.com/sourcegraph/go-diff v0.7.0 // indirect 285 | github.com/spf13/afero v1.14.0 // indirect 286 | github.com/spf13/cast v1.5.0 // indirect 287 | github.com/spf13/jwalterweatherman v1.1.0 // indirect 288 | github.com/spf13/pflag v1.0.10 // indirect 289 | github.com/spf13/viper v1.12.0 // indirect 290 | github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect 291 | github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect 292 | github.com/stretchr/objx v0.5.2 // indirect 293 | github.com/subosito/gotenv v1.4.1 // indirect 294 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect 295 | github.com/tetafro/godot v1.5.4 // indirect 296 | github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect 297 | github.com/timonwong/loggercheck v0.11.0 // indirect 298 | github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect 299 | github.com/tomarrell/wrapcheck/v2 v2.11.0 // indirect 300 | github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect 301 | github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 // indirect 302 | github.com/ultraware/funlen v0.2.0 // indirect 303 | github.com/ultraware/whitespace v0.2.0 // indirect 304 | github.com/uudashr/gocognit v1.2.0 // indirect 305 | github.com/uudashr/iface v1.4.1 // indirect 306 | github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect 307 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect 308 | github.com/xeipuuv/gojsonschema v1.2.0 // indirect 309 | github.com/xen0n/gosmopolitan v1.3.0 // indirect 310 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect 311 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect 312 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 313 | github.com/yagipy/maintidx v1.0.0 // indirect 314 | github.com/yeya24/promlinter v0.3.0 // indirect 315 | github.com/ykadowak/zerologlint v0.1.5 // indirect 316 | gitlab.com/bosi/decorder v0.4.2 // indirect 317 | gitlab.com/yawning/bsaes.git v0.0.0-20190805113838-0a714cd429ec // indirect 318 | go-simpler.org/musttag v0.14.0 // indirect 319 | go-simpler.org/sloglint v0.11.1 // indirect 320 | go.augendre.info/arangolint v0.3.1 // indirect 321 | go.augendre.info/fatcontext v0.9.0 // indirect 322 | go.etcd.io/bbolt v1.4.3 // indirect 323 | go.etcd.io/etcd/api/v3 v3.5.12 // indirect 324 | go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect 325 | go.etcd.io/etcd/client/v2 v2.305.12 // indirect 326 | go.etcd.io/etcd/client/v3 v3.5.12 // indirect 327 | go.etcd.io/etcd/pkg/v3 v3.5.12 // indirect 328 | go.etcd.io/etcd/raft/v3 v3.5.12 // indirect 329 | go.etcd.io/etcd/server/v3 v3.5.12 // indirect 330 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 331 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect 332 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect 333 | go.opentelemetry.io/otel v1.38.0 // indirect 334 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect 335 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect 336 | go.opentelemetry.io/otel/metric v1.38.0 // indirect 337 | go.opentelemetry.io/otel/sdk v1.38.0 // indirect 338 | go.opentelemetry.io/otel/trace v1.38.0 // indirect 339 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect 340 | go.uber.org/atomic v1.7.0 // indirect 341 | go.uber.org/automaxprocs v1.6.0 // indirect 342 | go.uber.org/multierr v1.10.0 // indirect 343 | go.uber.org/zap v1.27.0 // indirect 344 | golang.org/x/crypto v0.43.0 // indirect 345 | golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect 346 | golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 // indirect 347 | golang.org/x/mod v0.29.0 // indirect 348 | golang.org/x/net v0.46.0 // indirect 349 | golang.org/x/sync v0.18.0 // indirect 350 | golang.org/x/sys v0.37.0 // indirect 351 | golang.org/x/term v0.36.0 // indirect 352 | golang.org/x/text v0.30.0 // indirect 353 | golang.org/x/time v0.12.0 // indirect 354 | golang.org/x/tools v0.38.0 // indirect 355 | google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect 356 | google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect 357 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect 358 | google.golang.org/grpc v1.75.0 // indirect 359 | google.golang.org/protobuf v1.36.8 // indirect 360 | gopkg.in/errgo.v1 v1.0.1 // indirect 361 | gopkg.in/ini.v1 v1.67.0 // indirect 362 | gopkg.in/macaroon-bakery.v2 v2.0.1 // indirect 363 | gopkg.in/macaroon.v2 v2.0.0 // indirect 364 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 365 | gopkg.in/yaml.v2 v2.4.0 // indirect 366 | gopkg.in/yaml.v3 v3.0.1 // indirect 367 | honnef.co/go/tools v0.6.1 // indirect 368 | modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect 369 | modernc.org/libc v1.49.3 // indirect 370 | modernc.org/mathutil v1.6.0 // indirect 371 | modernc.org/memory v1.8.0 // indirect 372 | modernc.org/sqlite v1.29.10 // indirect 373 | modernc.org/strutil v1.2.0 // indirect 374 | modernc.org/token v1.1.0 // indirect 375 | mvdan.cc/gofumpt v0.9.2 // indirect 376 | mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 // indirect 377 | pgregory.net/rapid v1.2.0 // indirect 378 | sigs.k8s.io/yaml v1.2.0 // indirect 379 | ) 380 | 381 | tool ( 382 | github.com/golangci/golangci-lint/v2/cmd/golangci-lint 383 | github.com/rinchsan/gosimports/cmd/gosimports 384 | ) 385 | 386 | // Needed for the GetTxOutProof RPC call. 387 | replace github.com/btcsuite/btcd => github.com/guggero/btcd v0.20.1-beta.0.20251202195639-45154ad3fb60 388 | 389 | // Needed for the Silent Payments library. 390 | replace github.com/btcsuite/btcd/btcutil => github.com/guggero/btcd/btcutil v0.0.0-20251214193129-415da0a46ecb 391 | 392 | replace github.com/btcsuite/btcd/btcutil/psbt => github.com/guggero/btcd/btcutil/psbt v0.0.0-20251214193129-415da0a46ecb 393 | 394 | // Needed for the Silent Payments PSBT wallet support. 395 | replace github.com/lightningnetwork/lnd => github.com/guggero/lnd v0.11.0-beta.rc4.0.20251212135228-c2b293e689a9 396 | 397 | replace github.com/lightningnetwork/lnd/sqldb => github.com/guggero/lnd/sqldb v0.0.0-20251212135228-c2b293e689a9 398 | 399 | // We want to format raw bytes as hex instead of base64. The forked version 400 | // allows us to specify that as an option. 401 | replace google.golang.org/protobuf => github.com/lightninglabs/protobuf-go-hex-display v1.33.0-hex-display 402 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/hex" 7 | "encoding/json" 8 | "fmt" 9 | "net/http" 10 | "os" 11 | "os/exec" 12 | "path/filepath" 13 | "strconv" 14 | "strings" 15 | "testing" 16 | "time" 17 | 18 | "github.com/btcsuite/btcd/btcec/v2" 19 | "github.com/btcsuite/btcd/btcutil/psbt" 20 | sp "github.com/btcsuite/btcd/btcutil/silentpayments" 21 | "github.com/btcsuite/btcd/chaincfg" 22 | "github.com/btcsuite/btcd/chaincfg/chainhash" 23 | "github.com/btcsuite/btcd/rpcclient" 24 | "github.com/btcsuite/btcd/wire" 25 | "github.com/btcsuite/btcwallet/chain" 26 | lntestminer "github.com/lightningnetwork/lnd/lntest/miner" 27 | "github.com/lightningnetwork/lnd/lntest/port" 28 | "github.com/lightningnetwork/lnd/lntest/unittest" 29 | "github.com/lightningnetwork/lnd/lntest/wait" 30 | "github.com/stretchr/testify/require" 31 | ) 32 | 33 | const ( 34 | numStartupBlocks = 50 35 | 36 | // numInitialBlocks is the number of blocks we mine for testing. In 37 | // regtest mode, when using btcd as a miner, we can only mine blocks in 38 | // a 2-hour window and each block needs to have a timestamp at least 1 39 | // second greater than the previous block. Thus, we can only mine at 40 | // most 7200 blocks in a short period of time with the first 200 being 41 | // mined when the miner is created as part of its startup procedure. 42 | numInitialBlocks = 3000 43 | 44 | headerSize = 80 45 | filterHeadersSize = 32 46 | 47 | cacheTemporary = "max-age=1" 48 | cacheMemory = "max-age=60" 49 | cacheDisk = "max-age=31536000" 50 | 51 | unitTestDir = ".unit-test-logs" 52 | ) 53 | 54 | var ( 55 | syncTimeout = 2 * time.Minute 56 | testTimeout = 60 * time.Second 57 | shortTimeout = 5 * time.Second 58 | 59 | testParams = chaincfg.RegressionNetParams 60 | 61 | totalStartupBlocks = numStartupBlocks + 62 | uint32(testParams.CoinbaseMaturity) + 63 | testParams.MinerConfirmationWindow*2 64 | totalInitialBlocks = totalStartupBlocks + numInitialBlocks 65 | ) 66 | 67 | type testContext struct { 68 | miner *lntestminer.HarnessMiner 69 | backend *rpcclient.Client 70 | server *server 71 | } 72 | 73 | func (ctx *testContext) fetchJSON(t *testing.T, endpoint string, 74 | target any) http.Header { 75 | 76 | url := fmt.Sprintf("http://%s/%s", ctx.server.listenAddr, endpoint) 77 | resp, err := http.Get(url) 78 | require.NoError(t, err) 79 | 80 | defer func() { 81 | require.NoError(t, resp.Body.Close()) 82 | }() 83 | 84 | body := new(bytes.Buffer) 85 | _, err = body.ReadFrom(resp.Body) 86 | require.NoError(t, err) 87 | err = json.Unmarshal(body.Bytes(), target) 88 | if err != nil { 89 | require.NoError(t, err) 90 | } 91 | 92 | return resp.Header 93 | } 94 | 95 | func (ctx *testContext) fetchBinary(t *testing.T, endpoint string) ([]byte, 96 | http.Header) { 97 | 98 | data, header, _ := ctx.fetchBinaryWithStatus(t, endpoint) 99 | return data, header 100 | } 101 | 102 | func (ctx *testContext) fetchBinaryWithStatus(t *testing.T, 103 | endpoint string) ([]byte, http.Header, int) { 104 | 105 | url := fmt.Sprintf("http://%s/%s", ctx.server.listenAddr, endpoint) 106 | resp, err := http.Get(url) 107 | require.NoError(t, err) 108 | 109 | defer func() { 110 | require.NoError(t, resp.Body.Close()) 111 | }() 112 | 113 | body := new(bytes.Buffer) 114 | _, err = body.ReadFrom(resp.Body) 115 | require.NoError(t, err) 116 | 117 | return body.Bytes(), resp.Header, resp.StatusCode 118 | } 119 | 120 | func (ctx *testContext) bestBlock(t *testing.T) (int32, chainhash.Hash) { 121 | height, err := ctx.backend.GetBlockCount() 122 | require.NoError(t, err) 123 | 124 | blockHash, err := ctx.backend.GetBlockHash(height) 125 | require.NoError(t, err) 126 | 127 | return int32(height), *blockHash 128 | } 129 | 130 | func (ctx *testContext) blockAtHeight(t *testing.T, 131 | height int32) *wire.MsgBlock { 132 | 133 | hash, err := ctx.backend.GetBlockHash(int64(height)) 134 | require.NoError(t, err) 135 | 136 | block, err := ctx.backend.GetBlock(hash) 137 | require.NoError(t, err) 138 | 139 | return block 140 | } 141 | 142 | func (ctx *testContext) fetchPrevOutScript(op wire.OutPoint) ([]byte, error) { 143 | tx, err := ctx.backend.GetRawTransaction(&op.Hash) 144 | if err != nil { 145 | return nil, fmt.Errorf("error fetching previous "+ 146 | "transaction: %w", err) 147 | } 148 | 149 | if int(op.Index) >= len(tx.MsgTx().TxOut) { 150 | return nil, fmt.Errorf("output index %d out of range for "+ 151 | "transaction %s", op.Index, op.Hash.String()) 152 | } 153 | 154 | return tx.MsgTx().TxOut[op.Index].PkScript, nil 155 | } 156 | 157 | func (ctx *testContext) waitBackendSync(t *testing.T) { 158 | waitBackendSync(t, ctx.backend, ctx.miner) 159 | } 160 | 161 | func (ctx *testContext) waitFilesSync(t *testing.T) { 162 | err := wait.NoError(func() error { 163 | headerHeight := ctx.server.headerFiles.getCurrentHeight() 164 | _, minerHeight, err := ctx.miner.Client.GetBestBlock() 165 | if err != nil { 166 | return fmt.Errorf("unable to get miner height: %w", err) 167 | } 168 | 169 | if minerHeight != headerHeight { 170 | return fmt.Errorf("expected height %d, got %d", 171 | minerHeight, headerHeight) 172 | } 173 | 174 | if headerHeight != ctx.server.cFilterFiles.getCurrentHeight() { 175 | return fmt.Errorf("cfilter height mismatch: %d vs %d", 176 | ctx.server.cFilterFiles.getCurrentHeight(), 177 | headerHeight) 178 | } 179 | 180 | if headerHeight != ctx.server.spTweakFiles.getCurrentHeight() { 181 | return fmt.Errorf("sp tweak data height mismatch: %d "+ 182 | "vs %d", 183 | ctx.server.spTweakFiles.getCurrentHeight(), 184 | headerHeight) 185 | } 186 | 187 | return nil 188 | }, syncTimeout) 189 | require.NoError(t, err) 190 | } 191 | 192 | type testFunc func(t *testing.T, ctx *testContext) 193 | 194 | var testCases = []struct { 195 | name string 196 | fn testFunc 197 | }{ 198 | { 199 | name: "errors", 200 | fn: testErrors, 201 | }, 202 | { 203 | name: "index", 204 | fn: testIndex, 205 | }, 206 | { 207 | name: "status", 208 | fn: testStatus, 209 | }, 210 | { 211 | name: "headers", 212 | fn: testHeaders, 213 | }, 214 | { 215 | name: "headers-import", 216 | fn: testHeadersImport, 217 | }, 218 | { 219 | name: "filter-headers", 220 | fn: testFilterHeaders, 221 | }, 222 | { 223 | name: "filter-headers-import", 224 | fn: testFilterHeadersImport, 225 | }, 226 | { 227 | name: "sp-tweak-data", 228 | fn: testSPTweakData, 229 | }, 230 | { 231 | name: "tx-out-proof", 232 | fn: testTxOutProof, 233 | }, 234 | { 235 | name: "tx-raw", 236 | fn: testTxRaw, 237 | }, 238 | } 239 | 240 | func testErrors(t *testing.T, ctx *testContext) { 241 | type errorResponse struct { 242 | status int 243 | error string 244 | } 245 | 246 | var ( 247 | badHash = strings.Repeat("k", 64) 248 | badInt64 = strings.Repeat("9", 20) 249 | badHeight = strconv.Itoa(int(totalInitialBlocks + 1)) 250 | respBadHeight = errorResponse{ 251 | status: 400, 252 | error: "invalid value for parameter height", 253 | } 254 | respBadStartHeight1 = errorResponse{ 255 | status: 400, 256 | error: fmt.Sprintf("invalid start height %d, must be "+ 257 | "zero or a multiple of %d", 1, 258 | DefaultRegtestHeadersPerFile), 259 | } 260 | respBadStartHeightLarge = errorResponse{ 261 | status: 400, 262 | error: fmt.Sprintf("start height %s is greater than "+ 263 | "current height %d", badHeight, 264 | totalInitialBlocks), 265 | } 266 | respBadEndHeight0 = errorResponse{ 267 | status: 400, 268 | error: fmt.Sprintf("invalid end height %d, must be "+ 269 | "a multiple of %d", 0, 270 | DefaultRegtestHeadersPerFile), 271 | } 272 | respBadEndHeightPartial = errorResponse{ 273 | status: 400, 274 | error: fmt.Sprintf("invalid end height %d, must be "+ 275 | "a multiple of %d", 1000, 276 | DefaultRegtestHeadersPerFile), 277 | } 278 | respBadEndHeightLarge = errorResponse{ 279 | status: 400, 280 | error: fmt.Sprintf("end height %s is greater than "+ 281 | "current height %d", badHeight, 282 | totalInitialBlocks), 283 | } 284 | respBadHashLength = errorResponse{ 285 | status: 400, 286 | error: errInvalidHashLength.Error(), 287 | } 288 | respNotFound = errorResponse{ 289 | status: 404, 290 | error: "404 page not found", 291 | } 292 | ) 293 | errorCases := map[string]errorResponse{ 294 | "foo": respNotFound, 295 | "headers": respNotFound, 296 | "headers/" + badInt64: respBadHeight, 297 | "headers/1": respBadStartHeight1, 298 | "headers/" + badHeight: respBadStartHeightLarge, 299 | "headers/import": respNotFound, 300 | "headers/import/" + badInt64: respBadHeight, 301 | "headers/import/0": respBadEndHeight0, 302 | "headers/import/1000": respBadEndHeightPartial, 303 | "headers/import/" + badHeight: respBadEndHeightLarge, 304 | "filter-headers": respNotFound, 305 | "filter-headers/" + badInt64: respBadHeight, 306 | "filter-headers/1": respBadStartHeight1, 307 | "filter-headers/" + badHeight: respBadStartHeightLarge, 308 | "filter-headers/import": respNotFound, 309 | "filter-headers/import/" + badInt64: respBadHeight, 310 | "filter-headers/import/0": respBadEndHeight0, 311 | "filter-headers/import/1000": respBadEndHeightPartial, 312 | "filter-headers/import/" + badHeight: respBadEndHeightLarge, 313 | "filters": respNotFound, 314 | "filters/" + badInt64: respBadHeight, 315 | "filters/1": respBadStartHeight1, 316 | "filters/" + badHeight: respBadStartHeightLarge, 317 | "block": respNotFound, 318 | "block/aaaa": respBadHashLength, 319 | "block/" + badHash: respNotFound, 320 | "tx/out-proof": respNotFound, 321 | "tx/out-proof/aaaa": respBadHashLength, 322 | "tx/out-proof/" + badHash: respNotFound, 323 | "tx/raw": respNotFound, 324 | "tx/raw/aaaa": respBadHashLength, 325 | "tx/raw/" + badHash: respNotFound, 326 | } 327 | for endpoint, expected := range errorCases { 328 | body, headers, status := ctx.fetchBinaryWithStatus(t, endpoint) 329 | require.Equalf( 330 | t, expected.status, status, "endpoint: %s", endpoint, 331 | ) 332 | 333 | require.Containsf( 334 | t, string(body), expected.error, "endpoint: %s", 335 | endpoint, 336 | ) 337 | 338 | // If the endpoint isn't found, there are no cache or CORS 339 | // headers. 340 | if expected.status == http.StatusNotFound { 341 | continue 342 | } 343 | 344 | require.Equalf( 345 | t, "*", headers.Get(HeaderCORS), "endpoint: %s", 346 | endpoint, 347 | ) 348 | require.Equalf( 349 | t, cacheMemory, headers.Get(HeaderCache), 350 | "endpoint: %s", endpoint, 351 | ) 352 | } 353 | } 354 | 355 | func testIndex(t *testing.T, ctx *testContext) { 356 | data, headers := ctx.fetchBinary(t, "") 357 | require.Contains( 358 | t, string(data), "Block Delivery Network", 359 | ) 360 | require.Equal(t, "*", headers.Get(HeaderCORS)) 361 | } 362 | 363 | func testStatus(t *testing.T, ctx *testContext) { 364 | var status Status 365 | headers := ctx.fetchJSON(t, "status", &status) 366 | 367 | require.Equal(t, int32(totalInitialBlocks), status.BestBlockHeight) 368 | require.Equal(t, testParams.Name, status.ChainName) 369 | require.Equal( 370 | t, testParams.GenesisHash.String(), status.ChainGenesisHash, 371 | ) 372 | require.Equal( 373 | t, ctx.server.headersPerFile, status.EntriesPerHeaderFile, 374 | ) 375 | require.Equal( 376 | t, ctx.server.filtersPerFile, status.EntriesPerFilterFile, 377 | ) 378 | require.Equal( 379 | t, ctx.server.spTweaksPerFile, status.EntriesPerSPTweakFile, 380 | ) 381 | 382 | require.Contains(t, headers, HeaderCache) 383 | require.Equal(t, cacheMemory, headers.Get(HeaderCache)) 384 | require.Equal(t, "*", headers.Get(HeaderCORS)) 385 | 386 | height, blockHash := ctx.bestBlock(t) 387 | require.Equal(t, height, status.BestBlockHeight) 388 | require.Equal(t, height, status.BestFilterHeight) 389 | require.Equal(t, height, status.BestSPTweakHeight) 390 | require.Equal(t, blockHash.String(), status.BestBlockHash) 391 | 392 | filterHeader := ctx.server.headerFiles.filterHeaders[blockHash] 393 | require.Equal(t, filterHeader.String(), status.BestFilterHeader) 394 | } 395 | 396 | func testHeaders(t *testing.T, ctx *testContext) { 397 | // We first query for a start block that can be served from files only. 398 | body, headers := ctx.fetchBinary(t, "headers/0") 399 | targetLen := DefaultRegtestHeadersPerFile * headerSize 400 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 401 | targetLen, len(body)) 402 | 403 | require.Contains(t, headers, HeaderCache) 404 | require.Equal(t, cacheDisk, headers.Get(HeaderCache)) 405 | require.Equal(t, "*", headers.Get(HeaderCORS)) 406 | 407 | // And now we try to fetch all headers up to the current height, which 408 | // will require some of them to be served from memory. 409 | const startHeight = DefaultRegtestHeadersPerFile 410 | body, headers = ctx.fetchBinary( 411 | t, fmt.Sprintf("headers/%d", startHeight), 412 | ) 413 | expectedBlocks := totalInitialBlocks - startHeight + 1 414 | targetLen = int(expectedBlocks) * headerSize 415 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 416 | targetLen, len(body)) 417 | 418 | require.Contains(t, headers, HeaderCache) 419 | require.Equal(t, cacheMemory, headers.Get(HeaderCache)) 420 | require.Equal(t, "*", headers.Get(HeaderCORS)) 421 | 422 | // We make sure that the last 10 entries are actually correct. 423 | for index := expectedBlocks - 9; index <= expectedBlocks-1; index++ { 424 | start := int(index) * headerSize 425 | end := int(index+1) * headerSize 426 | headerBytes := body[start:end] 427 | 428 | blockHash, err := ctx.backend.GetBlockHash( 429 | startHeight + int64(index), 430 | ) 431 | require.NoError(t, err) 432 | 433 | block, err := ctx.backend.GetBlock(blockHash) 434 | require.NoError(t, err) 435 | 436 | var headerBuf bytes.Buffer 437 | err = block.Header.Serialize(&headerBuf) 438 | require.NoError(t, err) 439 | 440 | require.Equalf( 441 | t, headerBuf.Bytes(), headerBytes, 442 | "header at height %d does not match", index, 443 | ) 444 | } 445 | } 446 | 447 | func testHeadersImport(t *testing.T, ctx *testContext) { 448 | // We first query for a block height that can be served from files only. 449 | body, headers := ctx.fetchBinary( 450 | t, fmt.Sprintf("headers/import/%d", 451 | DefaultRegtestHeadersPerFile), 452 | ) 453 | targetLen := importMetadataSize + 454 | DefaultRegtestHeadersPerFile*headerSize 455 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 456 | targetLen, len(body)) 457 | 458 | require.Contains(t, headers, HeaderCache) 459 | require.Equal(t, cacheDisk, headers.Get(HeaderCache)) 460 | require.Equal(t, "*", headers.Get(HeaderCORS)) 461 | 462 | // And now we try to fetch all headers up to the current height, which 463 | // will require some of them to be served from memory. 464 | body, headers = ctx.fetchBinary( 465 | t, fmt.Sprintf("headers/import/%d", totalInitialBlocks), 466 | ) 467 | targetLen = importMetadataSize + int(totalInitialBlocks+1)*headerSize 468 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 469 | targetLen, len(body)) 470 | 471 | require.Contains(t, headers, HeaderCache) 472 | require.Equal(t, cacheMemory, headers.Get(HeaderCache)) 473 | require.Equal(t, "*", headers.Get(HeaderCORS)) 474 | 475 | // We make sure that the last 10 entries are actually correct. 476 | lastHeight := ctx.server.headerFiles.getCurrentHeight() 477 | require.Equal(t, int32(totalInitialBlocks), lastHeight) 478 | for height := lastHeight - 9; height <= lastHeight; height++ { 479 | start := importMetadataSize + int(height)*headerSize 480 | end := importMetadataSize + int(height+1)*headerSize 481 | headerBytes := body[start:end] 482 | 483 | blockHash, err := ctx.backend.GetBlockHash(int64(height)) 484 | require.NoError(t, err) 485 | 486 | block, err := ctx.backend.GetBlock(blockHash) 487 | require.NoError(t, err) 488 | 489 | var headerBuf bytes.Buffer 490 | err = block.Header.Serialize(&headerBuf) 491 | require.NoError(t, err) 492 | 493 | require.Equalf( 494 | t, headerBuf.Bytes(), headerBytes, 495 | "header at height %d does not match", height, 496 | ) 497 | } 498 | } 499 | 500 | func testFilterHeaders(t *testing.T, ctx *testContext) { 501 | // We first query for a start block that can be served from files only. 502 | body, headers := ctx.fetchBinary(t, "filter-headers/0") 503 | targetLen := DefaultRegtestHeadersPerFile * filterHeadersSize 504 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 505 | targetLen, len(body)) 506 | 507 | require.Contains(t, headers, HeaderCache) 508 | require.Equal(t, cacheDisk, headers.Get(HeaderCache)) 509 | require.Equal(t, "*", headers.Get(HeaderCORS)) 510 | 511 | // And now we try to fetch all headers up to the current height, which 512 | // will require some of them to be served from memory. 513 | const startHeight = DefaultRegtestHeadersPerFile 514 | body, headers = ctx.fetchBinary( 515 | t, fmt.Sprintf("filter-headers/%d", startHeight), 516 | ) 517 | expectedBlocks := totalInitialBlocks - startHeight + 1 518 | targetLen = int(expectedBlocks) * filterHeadersSize 519 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 520 | targetLen, len(body)) 521 | 522 | require.Contains(t, headers, HeaderCache) 523 | require.Equal(t, cacheMemory, headers.Get(HeaderCache)) 524 | require.Equal(t, "*", headers.Get(HeaderCORS)) 525 | 526 | // We make sure that the last 10 entries are actually correct. 527 | for index := expectedBlocks - 9; index <= expectedBlocks-1; index++ { 528 | start := int(index) * filterHeadersSize 529 | end := int(index+1) * filterHeadersSize 530 | headerBytes := body[start:end] 531 | 532 | blockHash, err := ctx.backend.GetBlockHash( 533 | startHeight + int64(index), 534 | ) 535 | require.NoError(t, err) 536 | 537 | filter, err := ctx.backend.GetBlockFilter( 538 | *blockHash, &filterBasic, 539 | ) 540 | require.NoError(t, err) 541 | 542 | filterHeaderHash, err := chainhash.NewHashFromStr(filter.Header) 543 | require.NoError(t, err) 544 | 545 | require.Equalf( 546 | t, filterHeaderHash[:], headerBytes, 547 | "filter header at height %d does not match", index, 548 | ) 549 | } 550 | } 551 | 552 | func testFilterHeadersImport(t *testing.T, ctx *testContext) { 553 | // We first query for a block height that can be served from files only. 554 | body, headers := ctx.fetchBinary( 555 | t, fmt.Sprintf("filter-headers/import/%d", 556 | DefaultRegtestHeadersPerFile), 557 | ) 558 | targetLen := importMetadataSize + 559 | DefaultRegtestHeadersPerFile*filterHeadersSize 560 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 561 | targetLen, len(body)) 562 | 563 | require.Contains(t, headers, HeaderCache) 564 | require.Equal(t, cacheDisk, headers.Get(HeaderCache)) 565 | require.Equal(t, "*", headers.Get(HeaderCORS)) 566 | 567 | // And now we try to fetch all headers up to the current height, which 568 | // will require some of them to be served from memory. 569 | body, headers = ctx.fetchBinary( 570 | t, fmt.Sprintf("filter-headers/import/%d", totalInitialBlocks), 571 | ) 572 | targetLen = importMetadataSize + 573 | int(totalInitialBlocks+1)*filterHeadersSize 574 | require.Lenf(t, body, targetLen, "body length should be %d but is %d", 575 | targetLen, len(body)) 576 | 577 | require.Contains(t, headers, HeaderCache) 578 | require.Equal(t, cacheMemory, headers.Get(HeaderCache)) 579 | require.Equal(t, "*", headers.Get(HeaderCORS)) 580 | 581 | // We make sure that the last 10 entries are actually correct. 582 | lastHeight := ctx.server.headerFiles.getCurrentHeight() 583 | require.Equal(t, int32(totalInitialBlocks), lastHeight) 584 | for height := lastHeight - 9; height <= lastHeight; height++ { 585 | start := importMetadataSize + int(height)*filterHeadersSize 586 | end := importMetadataSize + int(height+1)*filterHeadersSize 587 | headerBytes := body[start:end] 588 | 589 | blockHash, err := ctx.backend.GetBlockHash(int64(height)) 590 | require.NoError(t, err) 591 | 592 | filter, err := ctx.backend.GetBlockFilter( 593 | *blockHash, &filterBasic, 594 | ) 595 | require.NoError(t, err) 596 | 597 | filterHeaderHash, err := chainhash.NewHashFromStr(filter.Header) 598 | require.NoError(t, err) 599 | 600 | require.Equalf( 601 | t, filterHeaderHash[:], headerBytes, 602 | "filter header at height %d does not match", height, 603 | ) 604 | } 605 | } 606 | 607 | func testSPTweakData(t *testing.T, ctx *testContext) { 608 | var spTweakData SPTweakFile 609 | headers := ctx.fetchJSON(t, "sp/tweak-data/0", &spTweakData) 610 | require.Equal(t, int32(0), spTweakData.StartHeight) 611 | require.Equal( 612 | t, int32(DefaultRegtestSPTweaksPerFile), spTweakData.NumBlocks, 613 | ) 614 | require.Len(t, spTweakData.Blocks, int(spTweakData.NumBlocks)) 615 | 616 | require.Contains(t, headers, HeaderCache) 617 | require.Equal(t, cacheDisk, headers.Get(HeaderCache)) 618 | require.Equal(t, "*", headers.Get(HeaderCORS)) 619 | 620 | // And now we try to fetch all SP tweak data up to the current height, 621 | // which will require some of them to be served from memory. 622 | const startHeight = DefaultRegtestSPTweaksPerFile 623 | headers = ctx.fetchJSON( 624 | t, fmt.Sprintf("sp/tweak-data/%d", startHeight), &spTweakData, 625 | ) 626 | expectedBlocks := totalInitialBlocks - startHeight + 1 627 | require.Equal( 628 | t, int32(expectedBlocks), spTweakData.NumBlocks, 629 | ) 630 | require.Len(t, spTweakData.Blocks, int(expectedBlocks)) 631 | 632 | require.Contains(t, headers, HeaderCache) 633 | require.Equal(t, cacheMemory, headers.Get(HeaderCache)) 634 | require.Equal(t, "*", headers.Get(HeaderCORS)) 635 | 636 | // Now we mine some blocks with Taproot outputs to ensure we have 637 | // Taproot tweaks in the SP tweak data. 638 | numTrBlocks := uint32(20) 639 | for range numTrBlocks { 640 | ctx.miner.SendOutput(&wire.TxOut{ 641 | Value: 5_000, 642 | PkScript: psbt.SilentPaymentDummyP2TROutput, 643 | }, 2) 644 | ctx.miner.SendOutput(&wire.TxOut{ 645 | Value: 5_000, 646 | PkScript: psbt.SilentPaymentDummyP2TROutput, 647 | }, 2) 648 | ctx.miner.MineBlocksAndAssertNumTxes(1, 2) 649 | } 650 | 651 | ctx.waitBackendSync(t) 652 | ctx.waitFilesSync(t) 653 | 654 | headers = ctx.fetchJSON( 655 | t, fmt.Sprintf("sp/tweak-data/%d", startHeight), &spTweakData, 656 | ) 657 | expectedHeight := totalInitialBlocks + numTrBlocks 658 | expectedBlocks = expectedHeight - DefaultRegtestSPTweaksPerFile + 1 659 | require.Equal( 660 | t, int32(expectedBlocks), spTweakData.NumBlocks, 661 | ) 662 | require.Len(t, spTweakData.Blocks, int(expectedBlocks)) 663 | 664 | require.Contains(t, headers, HeaderCache) 665 | require.Equal(t, cacheMemory, headers.Get(HeaderCache)) 666 | require.Equal(t, "*", headers.Get(HeaderCORS)) 667 | 668 | // We expect the last block before the Taproot blocks to not have any 669 | // Taproot tweaks. 670 | noTrHeight := expectedHeight - numTrBlocks 671 | noTrBlock, err := spTweakData.TweakAtHeight(int32(noTrHeight)) 672 | require.NoError(t, err) 673 | require.Empty(t, noTrBlock) 674 | 675 | // Check the actual tweaks in the last 20 blocks. 676 | loopStart := expectedHeight - numTrBlocks + 1 677 | for height := loopStart; height <= expectedHeight; height++ { 678 | trBlock, err := spTweakData.TweakAtHeight(int32(height)) 679 | require.NoError(t, err) 680 | 681 | require.Len(t, trBlock, 2) 682 | require.Lenf( 683 | t, trBlock[1], 684 | hex.EncodedLen(btcec.PubKeyBytesLenCompressed), 685 | "block %d, index 1", height, 686 | ) 687 | require.Lenf( 688 | t, trBlock[2], 689 | hex.EncodedLen(btcec.PubKeyBytesLenCompressed), 690 | "block %d, index 2", height, 691 | ) 692 | 693 | block := ctx.blockAtHeight(t, int32(height)) 694 | require.Len(t, block.Transactions, 3) 695 | 696 | key1, err := sp.TransactionTweakData( 697 | block.Transactions[1], ctx.fetchPrevOutScript, log, 698 | ) 699 | require.NoError(t, err) 700 | require.Equal( 701 | t, trBlock[1], 702 | hex.EncodeToString(key1.SerializeCompressed()), 703 | ) 704 | 705 | key2, err := sp.TransactionTweakData( 706 | block.Transactions[2], ctx.fetchPrevOutScript, log, 707 | ) 708 | require.NoError(t, err) 709 | require.Equal( 710 | t, trBlock[2], 711 | hex.EncodeToString(key2.SerializeCompressed()), 712 | ) 713 | } 714 | 715 | // We mine an empty block to ensure that the following tests can assume 716 | // empty blocks again. 717 | ctx.miner.MineEmptyBlocks(1) 718 | ctx.waitBackendSync(t) 719 | ctx.waitFilesSync(t) 720 | } 721 | 722 | func testTxOutProof(t *testing.T, ctx *testContext) { 723 | // We start with the latest block. 724 | bestHeight, bestHash := ctx.bestBlock(t) 725 | block, err := ctx.backend.GetBlock(&bestHash) 726 | require.NoError(t, err) 727 | 728 | require.Len(t, block.Transactions, 1) 729 | tx := block.Transactions[0] 730 | 731 | data, headers := ctx.fetchBinary( 732 | t, fmt.Sprintf("tx/out-proof/%s", tx.TxHash().String()), 733 | ) 734 | require.NotEmpty(t, data) 735 | 736 | require.Contains(t, headers, HeaderCache) 737 | require.Equal(t, cacheTemporary, headers.Get(HeaderCache)) 738 | require.Equal(t, "*", headers.Get(HeaderCORS)) 739 | 740 | // Then we verify that a sufficiently confirmed block has cache headers. 741 | buriedHash, err := ctx.backend.GetBlockHash( 742 | int64(bestHeight) - defaultTestnetReOrgSafeDepth - 1, 743 | ) 744 | require.NoError(t, err) 745 | 746 | buriedBlock, err := ctx.backend.GetBlock(buriedHash) 747 | require.NoError(t, err) 748 | 749 | require.Len(t, buriedBlock.Transactions, 1) 750 | buriedTx := buriedBlock.Transactions[0] 751 | 752 | data, headers = ctx.fetchBinary( 753 | t, fmt.Sprintf("tx/out-proof/%s", buriedTx.TxHash().String()), 754 | ) 755 | require.NotEmpty(t, data) 756 | 757 | require.Contains(t, headers, HeaderCache) 758 | require.Equal(t, cacheDisk, headers.Get(HeaderCache)) 759 | require.Equal(t, "*", headers.Get(HeaderCORS)) 760 | } 761 | 762 | func testTxRaw(t *testing.T, ctx *testContext) { 763 | _, bestHash := ctx.bestBlock(t) 764 | block, err := ctx.backend.GetBlock(&bestHash) 765 | require.NoError(t, err) 766 | 767 | require.Len(t, block.Transactions, 1) 768 | tx := block.Transactions[0] 769 | 770 | data, headers := ctx.fetchBinary( 771 | t, fmt.Sprintf("tx/raw/%s", tx.TxHash().String()), 772 | ) 773 | require.NotEmpty(t, data) 774 | 775 | var txBuf bytes.Buffer 776 | require.NoError(t, tx.Serialize(&txBuf)) 777 | 778 | require.Equal(t, txBuf.Bytes(), data) 779 | 780 | require.Contains(t, headers, HeaderCache) 781 | require.Equal(t, cacheDisk, headers.Get(HeaderCache)) 782 | require.Equal(t, "*", headers.Get(HeaderCORS)) 783 | } 784 | 785 | func TestBlockDN(t *testing.T) { 786 | // Activate Taproot for regtest. 787 | TaprootActivationHeights[chaincfg.RegressionNetParams.Net] = 1 788 | 789 | miner, backend, backendCfg, _ := setupBackend(t, unitTestDir) 790 | 791 | dataDir := t.TempDir() 792 | listenAddr := fmt.Sprintf("127.0.0.1:%d", port.NextAvailablePort()) 793 | 794 | testServer := newServer( 795 | false, true, dataDir, listenAddr, &backendCfg, 796 | unittest.NetParams, 6, DefaultRegtestHeadersPerFile, 797 | DefaultRegtestFiltersPerFile, DefaultRegtestSPTweaksPerFile, 798 | ) 799 | ctx := &testContext{ 800 | miner: miner, 801 | backend: backend, 802 | server: testServer, 803 | } 804 | 805 | // Mine a couple blocks and wait for the backend to catch up. 806 | t.Logf("Mining %d blocks...", numInitialBlocks) 807 | _ = miner.MineEmptyBlocks(numInitialBlocks) 808 | 809 | // Wait until the backend is fully synced to the miner. 810 | ctx.waitBackendSync(t) 811 | 812 | t.Logf("Starting block-dn server at %s...", listenAddr) 813 | require.NoError(t, testServer.start()) 814 | ctx.waitFilesSync(t) 815 | 816 | for _, testCase := range testCases { 817 | success := t.Run(testCase.name, func(t *testing.T) { 818 | testCase.fn(t, ctx) 819 | }) 820 | if !success { 821 | t.Fatalf("test case %s failed", testCase.name) 822 | } 823 | } 824 | } 825 | 826 | func newBitcoind(t *testing.T, logdir string, 827 | extraArgs []string) (*rpcclient.Client, rpcclient.ConnConfig, 828 | *chain.BitcoindConfig, func() error) { 829 | 830 | tempBitcoindDir := t.TempDir() 831 | 832 | err := os.MkdirAll(logdir, 0700) 833 | require.NoError(t, err) 834 | 835 | logFile, err := filepath.Abs(logdir + "/bitcoind.log") 836 | require.NoError(t, err) 837 | 838 | zmqBlockAddr := fmt.Sprintf("tcp://127.0.0.1:%d", 839 | port.NextAvailablePort()) 840 | zmqTxAddr := fmt.Sprintf("tcp://127.0.0.1:%d", port.NextAvailablePort()) 841 | rpcPort := port.NextAvailablePort() 842 | p2pPort := port.NextAvailablePort() 843 | torBindPort := port.NextAvailablePort() 844 | 845 | cmdArgs := []string{ 846 | "-datadir=" + tempBitcoindDir, 847 | "-whitelist=127.0.0.1", // whitelist localhost to speed up relay 848 | "-rpcauth=weks:469e9bb14ab2360f8e226efed5ca6f" + 849 | "d$507c670e800a95284294edb5773b05544b" + 850 | "220110063096c221be9933c82d38e1", 851 | fmt.Sprintf("-rpcport=%d", rpcPort), 852 | fmt.Sprintf("-port=%d", p2pPort), 853 | fmt.Sprintf("-bind=127.0.0.1:%d=onion", torBindPort), 854 | "-zmqpubrawblock=" + zmqBlockAddr, 855 | "-zmqpubrawtx=" + zmqTxAddr, 856 | "-debuglogfile=" + logFile, 857 | } 858 | cmdArgs = append(cmdArgs, extraArgs...) 859 | bitcoind := exec.Command("bitcoind", cmdArgs...) 860 | 861 | err = bitcoind.Start() 862 | if err != nil { 863 | err := os.RemoveAll(tempBitcoindDir) 864 | require.NoError(t, err) 865 | } 866 | 867 | cleanUp := func() error { 868 | _ = bitcoind.Process.Kill() 869 | _ = bitcoind.Wait() 870 | 871 | return nil 872 | } 873 | 874 | // Allow process to start. 875 | time.Sleep(1 * time.Second) 876 | 877 | rpcHost := fmt.Sprintf("127.0.0.1:%d", rpcPort) 878 | rpcUser := "weks" 879 | rpcPass := "weks" 880 | 881 | rpcCfg := rpcclient.ConnConfig{ 882 | Host: rpcHost, 883 | User: rpcUser, 884 | Pass: rpcPass, 885 | DisableConnectOnNew: true, 886 | DisableAutoReconnect: false, 887 | DisableTLS: true, 888 | HTTPPostMode: true, 889 | } 890 | 891 | bitcoindCfg := &chain.BitcoindConfig{ 892 | ChainParams: &testParams, 893 | Host: rpcHost, 894 | User: rpcUser, 895 | Pass: rpcPass, 896 | ZMQConfig: &chain.ZMQConfig{ 897 | ZMQBlockHost: zmqBlockAddr, 898 | ZMQTxHost: zmqTxAddr, 899 | MempoolPollingInterval: pollInterval, 900 | RPCBatchInterval: pollInterval, 901 | RPCBatchSize: 1, 902 | ZMQReadDeadline: defaultTimeout, 903 | }, 904 | } 905 | 906 | client, err := rpcclient.New(&rpcCfg, nil) 907 | if err != nil { 908 | _ = cleanUp() 909 | require.NoError(t, err) 910 | } 911 | 912 | return client, rpcCfg, bitcoindCfg, cleanUp 913 | } 914 | 915 | // nolint:unparam 916 | func setupBackend(t *testing.T, testDir string) (*lntestminer.HarnessMiner, 917 | *rpcclient.Client, rpcclient.ConnConfig, *chain.BitcoindConfig) { 918 | 919 | ctx := context.Background() 920 | setupLogging(testDir, "debug") 921 | 922 | _ = os.RemoveAll(testDir) 923 | _ = os.MkdirAll(testDir, 0700) 924 | 925 | miner := lntestminer.NewTempMiner( 926 | ctx, t, filepath.Join(testDir, "temp-miner"), "miner.log", 927 | ) 928 | require.NoError(t, miner.SetUp(true, numStartupBlocks)) 929 | 930 | // Next mine enough blocks in order for segwit and the CSV package 931 | // soft-fork to activate on SimNet. 932 | numBlocks := testParams.MinerConfirmationWindow * 2 933 | miner.GenerateBlocks(numBlocks) 934 | 935 | t.Cleanup(miner.Stop) 936 | 937 | backend, backendCfg, bitcoindCfg, cleanup := newBitcoind( 938 | t, testDir, []string{ 939 | "-regtest", 940 | "-txindex", 941 | "-disablewallet", 942 | "-peerblockfilters=1", 943 | "-blockfilterindex=1", 944 | }, 945 | ) 946 | 947 | t.Cleanup(func() { 948 | require.NoError(t, cleanup()) 949 | }) 950 | 951 | err := wait.NoError(func() error { 952 | return backend.AddNode(miner.P2PAddress(), rpcclient.ANAdd) 953 | }, testTimeout) 954 | require.NoError(t, err) 955 | 956 | return miner, backend, backendCfg, bitcoindCfg 957 | } 958 | 959 | func waitBackendSync(t *testing.T, backend *rpcclient.Client, 960 | miner *lntestminer.HarnessMiner) { 961 | 962 | t.Log("Waiting for bitcoind backend to sync to miner...") 963 | syncState := int32(0) 964 | err := wait.NoError(func() error { 965 | backendCount, err := backend.GetBlockCount() 966 | if err != nil { 967 | return fmt.Errorf("unable to get backend height: %w", 968 | err) 969 | } 970 | 971 | backendHeight := int32(backendCount) 972 | 973 | _, minerHeight, err := miner.Client.GetBestBlock() 974 | if err != nil { 975 | return fmt.Errorf("unable to get miner height: %w", err) 976 | } 977 | 978 | if backendHeight > syncState+1000 { 979 | t.Logf("Backend height: %d, Miner height: %d", 980 | backendHeight, minerHeight) 981 | syncState = backendHeight 982 | } 983 | 984 | if minerHeight != backendHeight { 985 | return fmt.Errorf("expected height %d, got %d", 986 | minerHeight, backendHeight) 987 | } 988 | 989 | t.Logf("Synced backend to miner at height %d", backendHeight) 990 | 991 | return nil 992 | }, syncTimeout) 993 | require.NoError(t, err) 994 | } 995 | --------------------------------------------------------------------------------