├── .gitignore ├── headers ├── test_fixtures │ └── headers │ │ └── 000002cf ├── config.go ├── handler.go ├── header_data.go ├── proof_of_work.go ├── splits.go ├── test_helpers.go ├── header_data_test.go ├── splits_test.go ├── proof_of_work_test.go ├── headers_test.go └── branches_test.go ├── internal └── platform │ └── tests │ ├── tests.go │ ├── test_mock_processor.go │ └── test_block_tx_processor.go ├── README.md ├── handlers_test.go ├── go.mod ├── Makefile ├── tx_manager_test.go ├── node_manager_test.go ├── interfaces.go ├── config.go ├── LICENSE.md ├── peers_test.go ├── bitcoin_node_test.go ├── test_helpers.go ├── cmd └── node │ └── main.go ├── seeds.go ├── peers.go ├── go.sum ├── test_nodes.go ├── tx_manager.go ├── block_manager.go ├── scaling_test.go ├── block_downloader.go ├── messages.go └── bitcoin_node.go /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *.exe 3 | *.test 4 | *.prof 5 | *.log 6 | *.env 7 | tmp 8 | dist 9 | .vscode/ 10 | -------------------------------------------------------------------------------- /headers/test_fixtures/headers/000002cf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tokenized/bitcoin_reader/HEAD/headers/test_fixtures/headers/000002cf -------------------------------------------------------------------------------- /internal/platform/tests/tests.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/tokenized/logger" 7 | ) 8 | 9 | // Context creates a context for testing. 10 | func Context() context.Context { 11 | return logger.ContextWithLogConfig(context.Background(), logger.NewConfig(true, true, "")) 12 | } 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bitcoin Reader 2 | 3 | Bitcoin reader finds peers in the Bitcoin P2P network and listens to them. It always collects all 4 | headers with valid proof of work. If a tx processor is provided then all txs are run through it as 5 | they are seen on the network. If a block manager is provided then all blocks are fed through it. 6 | -------------------------------------------------------------------------------- /handlers_test.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import "testing" 4 | 5 | func Test_sizeString(t *testing.T) { 6 | tests := []struct { 7 | size uint64 8 | want string 9 | }{ 10 | { 11 | size: 123, 12 | want: "123 B", 13 | }, 14 | { 15 | size: 1234, 16 | want: "1.23 KB", 17 | }, 18 | { 19 | size: 123456, 20 | want: "123.46 KB", 21 | }, 22 | { 23 | size: 1234567, 24 | want: "1.23 MB", 25 | }, 26 | { 27 | size: 12345678, 28 | want: "12.35 MB", 29 | }, 30 | } 31 | 32 | for _, tt := range tests { 33 | t.Run(tt.want, func(t *testing.T) { 34 | got := sizeString(tt.size) 35 | 36 | if got != tt.want { 37 | t.Errorf("Wrong size string : got %s, want %s", got, tt.want) 38 | } 39 | }) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tokenized/bitcoin_reader 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/google/uuid v1.3.0 7 | github.com/pkg/errors v0.9.1 8 | github.com/tokenized/config v0.2.2 9 | github.com/tokenized/logger v0.1.3 10 | github.com/tokenized/pkg v0.7.1-0.20230518151913-31bef1f54301 11 | github.com/tokenized/threads v0.1.2 12 | ) 13 | 14 | require ( 15 | github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect 16 | github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect 17 | github.com/aws/aws-sdk-go v1.35.3 // indirect 18 | github.com/btcsuite/btcd v0.20.1-beta // indirect 19 | github.com/btcsuite/btcutil v1.0.2 // indirect 20 | github.com/gomodule/redigo v1.8.2 // indirect 21 | github.com/jmespath/go-jmespath v0.4.0 // indirect 22 | github.com/kelseyhightower/envconfig v1.4.0 // indirect 23 | github.com/tyler-smith/go-bip32 v0.0.0-20170922074101-2c9cfd177564 // indirect 24 | golang.org/x/crypto v0.8.0 // indirect 25 | ) 26 | -------------------------------------------------------------------------------- /headers/config.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import "github.com/tokenized/pkg/bitcoin" 4 | 5 | type Config struct { 6 | Network bitcoin.Network `default:"mainnet" envconfig:"NETWORK" json:"network"` 7 | 8 | // MaxBranchDepth is the max depth, from the longest height, that a new branch will be created. 9 | // This helps prevent "hidden mining" where a miner doesn't release there blocks until later. 10 | MaxBranchDepth int `default:"144" envconfig:"MAX_BRANCH_DEPTH" json:"max_branch_depth"` 11 | 12 | // InvalidHeaderHashes are header hashes that will not be accepted as valid. These can also be 13 | // added/removed via a function call and will be retained in storage. After added via config 14 | // and the repo is saved then these will be retained even if removed from the config. To remove 15 | // them after that a function call must be performed. The BTC and BCH split headers are handled 16 | // by a separate "split" system for handling other chains. 17 | InvalidHeaderHashes []bitcoin.Hash32 `envconfig:"INVALID_HEADER_HASHES" json:"invalid_header_hashes"` 18 | } 19 | 20 | func DefaultConfig() *Config { 21 | return &Config{ 22 | Network: bitcoin.MainNet, 23 | MaxBranchDepth: 144, 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /headers/handler.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "time" 8 | 9 | "github.com/tokenized/logger" 10 | "github.com/tokenized/pkg/wire" 11 | 12 | "github.com/pkg/errors" 13 | ) 14 | 15 | func (repo *Repository) HandleHeadersMessage(ctx context.Context, header *wire.MessageHeader, 16 | r io.Reader) error { 17 | waitWarning := logger.NewWaitingWarning(ctx, 3*time.Second, "headers.HandleHeadersMessage") 18 | defer waitWarning.Cancel() 19 | 20 | count, err := wire.ReadVarInt(r, wire.ProtocolVersion) 21 | if err != nil { 22 | return errors.Wrap(err, "header count") 23 | } 24 | 25 | for i := uint64(0); i < count; i++ { 26 | blockHeader := &wire.BlockHeader{} 27 | if err := blockHeader.Deserialize(r); err != nil { 28 | return errors.Wrap(err, fmt.Sprintf("read header %d / %d", i, count)) 29 | } 30 | 31 | txCount, err := wire.ReadVarInt(r, wire.ProtocolVersion) 32 | if err != nil { 33 | return errors.Wrap(err, fmt.Sprintf("read tx count %d / %d", i, count)) 34 | } 35 | 36 | if txCount != 0 { 37 | return fmt.Errorf("Non-zero header tx count : %d", txCount) 38 | } 39 | 40 | if err := repo.ProcessHeader(ctx, blockHeader); err != nil { 41 | return errors.Wrap(err, "process") 42 | } 43 | } 44 | 45 | return nil 46 | } 47 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BUILD_DATE = `date +%FT%T%z` 2 | BUILD_USER = $(USER)@`hostname` 3 | VERSION = `git describe --tags` 4 | 5 | # command to build and run on the local OS. 6 | GO_BUILD = go build 7 | 8 | # command to compiling the distributable. Specify GOOS and GOARCH for the target OS. 9 | GO_DIST = CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GO_BUILD) -a -tags netgo -ldflags "-w -X main.buildVersion=$(VERSION) -X main.buildDate=$(BUILD_DATE) -X main.buildUser=$(BUILD_USER)" 10 | 11 | BINARY=bitcoin_reader 12 | 13 | .PHONY: build 14 | 15 | dist: 16 | $(GO_DIST) -o dist/$(BINARY) cmd/node/main.go 17 | 18 | run: 19 | go run cmd/node/main.go 20 | 21 | run-race: 22 | go run -race cmd/node/main.go 23 | 24 | deps: 25 | go get -t ./... 26 | 27 | test: 28 | mkdir -p tmp 29 | go test -coverprofile=tmp/coverage.out ./... 30 | 31 | test-race: 32 | go test -race ./... 33 | 34 | test-all: 35 | go clean -testcache 36 | go test ./... 37 | 38 | lint: golint vet goimports 39 | 40 | vet: 41 | ret=0 && test -z "$$(go vet ./... | tee /dev/stderr)" || ret=1 ; exit $$ret 42 | 43 | golint: 44 | ret=0 && test -z "$$(golint . | tee /dev/stderr)" || ret=1 ; exit $$ret 45 | 46 | goimports: 47 | ret=0 && test -z "$$(goimports -l . | tee /dev/stderr)" || ret=1 ; exit $$ret 48 | 49 | tools: 50 | [ -f $(GOPATH)/bin/goimports ] || go get golang.org/x/tools/cmd/goimports 51 | [ -f $(GOPATH)/bin/golint ] || go get github.com/golang/lint/golint 52 | 53 | clean: 54 | rm -rf dist 55 | go clean -testcache 56 | -------------------------------------------------------------------------------- /tx_manager_test.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "bytes" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | 9 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 10 | "github.com/tokenized/pkg/bitcoin" 11 | 12 | "github.com/google/uuid" 13 | ) 14 | 15 | func Test_TxManager_General(t *testing.T) { 16 | ctx := tests.Context() 17 | manager := NewTxManager(250 * time.Millisecond) 18 | node1 := uuid.New() 19 | node2 := uuid.New() 20 | 21 | txids := make([]bitcoin.Hash32, 10) 22 | for i := range txids { 23 | rand.Read(txids[i][:]) 24 | } 25 | 26 | needsRequest, err := manager.AddTxID(ctx, node1, txids[0]) 27 | if err != nil { 28 | t.Fatalf("Failed to add txid : %s", err) 29 | } 30 | if !needsRequest { 31 | t.Errorf("Tx should need requested") 32 | } 33 | 34 | needsRequest, err = manager.AddTxID(ctx, node2, txids[0]) 35 | if err != nil { 36 | t.Fatalf("Failed to add txid : %s", err) 37 | } 38 | if needsRequest { 39 | t.Errorf("Tx should not need requested") 40 | } 41 | 42 | time.Sleep(300 * time.Millisecond) 43 | 44 | requestTxIDs, err := manager.GetTxRequests(ctx, node2, 100) 45 | if err != nil { 46 | t.Fatalf("Failed to get tx requests : %s", err) 47 | } 48 | 49 | if len(requestTxIDs) != 1 { 50 | t.Fatalf("Wrong request count : got %d, want %d", len(requestTxIDs), 1) 51 | } 52 | if !bytes.Equal(requestTxIDs[0][:], txids[0][:]) { 53 | t.Errorf("Wrong request txid : \ngot : %s\nwant : %s", requestTxIDs[0], txids[0]) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /node_manager_test.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 8 | "github.com/tokenized/pkg/bitcoin" 9 | "github.com/tokenized/pkg/storage" 10 | "github.com/tokenized/threads" 11 | ) 12 | 13 | func Test_NodeManager(t *testing.T) { 14 | if !testing.Verbose() { 15 | t.Skip() // Don't want to redownload the block all the time 16 | } 17 | 18 | ctx := tests.Context() 19 | store := storage.NewMockStorage() 20 | peers := NewPeerRepository(store, "") 21 | headers := NewMockHeaderRepository() 22 | config := &Config{ 23 | Network: bitcoin.MainNet, 24 | DesiredNodeCount: 20, 25 | } 26 | 27 | peers.LoadSeeds(ctx, bitcoin.MainNet) 28 | 29 | manager := NewNodeManager("/Tokenized/Spynode:Test/", config, headers, peers) 30 | runThread := threads.NewInterruptableThread("Run", manager.Run) 31 | runComplete := runThread.GetCompleteChannel() 32 | runThread.Start(ctx) 33 | 34 | select { 35 | case <-runComplete: 36 | if err := runThread.Error(); err != nil { 37 | t.Errorf("Failed to run : %s", err) 38 | } 39 | t.Errorf("Completed without interrupt") 40 | 41 | case <-time.After(20 * time.Second): 42 | t.Logf("Shutting down") 43 | runThread.Stop(ctx) 44 | select { 45 | case <-runComplete: 46 | if err := runThread.Error(); err != nil { 47 | t.Errorf("Failed to run : %s", err) 48 | } 49 | 50 | case <-time.After(time.Second): 51 | t.Fatalf("Failed to shut down") 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /headers/header_data.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "io" 5 | "math/big" 6 | 7 | "github.com/tokenized/pkg/bitcoin" 8 | "github.com/tokenized/pkg/wire" 9 | 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | const ( 14 | headerDataSerializeSize = 112 // bytes for each header data 15 | ) 16 | 17 | type HeaderData struct { 18 | Hash bitcoin.Hash32 19 | Header *wire.BlockHeader 20 | AccumulatedWork *big.Int 21 | } 22 | 23 | func (h HeaderData) Serialize(w io.Writer) error { 24 | if err := h.Header.Serialize(w); err != nil { 25 | return errors.Wrap(err, "header") 26 | } 27 | 28 | if err := serializeBigInt(h.AccumulatedWork, w); err != nil { 29 | return errors.Wrap(err, "work") 30 | } 31 | 32 | return nil 33 | } 34 | 35 | func (h *HeaderData) Deserialize(r io.Reader) error { 36 | h.Header = &wire.BlockHeader{} 37 | if err := h.Header.Deserialize(r); err != nil { 38 | return errors.Wrap(err, "header") 39 | } 40 | h.Hash = *h.Header.BlockHash() 41 | 42 | work, err := deserializeBigInt(r) 43 | if err != nil { 44 | return errors.Wrap(err, "work") 45 | } 46 | h.AccumulatedWork = work 47 | 48 | return nil 49 | } 50 | 51 | func serializeBigInt(value *big.Int, w io.Writer) error { 52 | b := value.Bytes() 53 | full := make([]byte, 32) 54 | copy(full[32-len(b):], b) 55 | 56 | if _, err := w.Write(full); err != nil { 57 | return errors.Wrap(err, "bytes") 58 | } 59 | 60 | return nil 61 | } 62 | 63 | func deserializeBigInt(r io.Reader) (*big.Int, error) { 64 | b := make([]byte, 32) 65 | if _, err := io.ReadFull(r, b); err != nil { 66 | return nil, errors.Wrap(err, "bytes") 67 | } 68 | 69 | result := &big.Int{} 70 | result.SetBytes(b) 71 | 72 | return result, nil 73 | } 74 | -------------------------------------------------------------------------------- /interfaces.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/tokenized/pkg/bitcoin" 7 | "github.com/tokenized/pkg/merkle_proof" 8 | "github.com/tokenized/pkg/wire" 9 | 10 | "github.com/google/uuid" 11 | ) 12 | 13 | type TxProcessor interface { 14 | // ProcessTx returns true if the tx is relevant. 15 | ProcessTx(ctx context.Context, tx *wire.MsgTx) (bool, error) 16 | 17 | // CancelTx specifies that a tx is no longer valid because a conflicting tx has been confirmed. 18 | CancelTx(ctx context.Context, txid bitcoin.Hash32) error 19 | 20 | // AddTxConflict specifies that there is an unconfirmed conflicting tx to a relevant tx. 21 | AddTxConflict(ctx context.Context, txid, conflictTxID bitcoin.Hash32) error 22 | 23 | ConfirmTx(ctx context.Context, txid bitcoin.Hash32, blockHeight int, 24 | merkleProof *merkle_proof.MerkleProof) error 25 | 26 | UpdateTxChainDepth(ctx context.Context, txid bitcoin.Hash32, chainDepth uint32) error 27 | 28 | ProcessCoinbaseTx(ctx context.Context, blockHash bitcoin.Hash32, tx *wire.MsgTx) error 29 | } 30 | 31 | type TxSaver interface { 32 | SaveTx(context.Context, *wire.MsgTx) error 33 | } 34 | 35 | // HandleBlock handles a block coming from a data source. 36 | type HandleBlock func(ctx context.Context, header *wire.BlockHeader, txCount uint64, 37 | txChannel <-chan *wire.MsgTx) error 38 | 39 | type OnStop func(context.Context) 40 | 41 | type BlockRequestor interface { 42 | // RequestBlock requests a block from a data source. 43 | // "handler" handles the block data as it is provided. 44 | // "onStop" is a function that is called if the data source stops. It should abort the request 45 | // from the handler side. 46 | RequestBlock(ctx context.Context, hash bitcoin.Hash32, 47 | handler HandleBlock, onStop OnStop) (BlockRequestCanceller, error) 48 | } 49 | 50 | type BlockRequestCanceller interface { 51 | // ID returns the unique id of block requestor. 52 | ID() uuid.UUID 53 | 54 | // CancelBlockRequest cancels a request for a block. It returns true if the block handler has 55 | // already been called. 56 | CancelBlockRequest(context.Context, bitcoin.Hash32) bool 57 | } 58 | 59 | type BlockTxManager interface { 60 | // FetchBlockTxIDs fetches the relevant txids for a block hash. Returns false if data doesn't 61 | // exist for the block hash, which means the block has not been processed yet. 62 | FetchBlockTxIDs(ctx context.Context, blockHash bitcoin.Hash32) ([]bitcoin.Hash32, bool, error) 63 | 64 | AppendBlockTxIDs(ctx context.Context, blockHash bitcoin.Hash32, txids []bitcoin.Hash32) error 65 | } 66 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/tokenized/config" 7 | "github.com/tokenized/pkg/bitcoin" 8 | ) 9 | 10 | type Config struct { 11 | Network bitcoin.Network `default:"mainnet" json:"network" envconfig:"NETWORK"` 12 | 13 | // Timeout is the amount of time a node will remain connected. This is to promote making new 14 | // connections to keep a diverse set of peers. 15 | Timeout config.Duration `default:"4h" json:"timeout" envconfig:"NODE_TIMEOUT"` 16 | 17 | // ScanCount is the number of peer addresses that will be scanned for valid peers when a scan is 18 | // performed. 19 | ScanCount int `default:"100" json:"scan_count" envconfig:"SCAN_COUNT"` 20 | 21 | // TxRequestCount is the maximum number of txs that will be requested from a node at one time. 22 | TxRequestCount int `default:"10000" json:"tx_request_count" envconfig:"TX_REQUEST_COUNT"` 23 | 24 | // StartupDelay delay after node manager startup before block processing will begin. 25 | StartupDelay config.Duration `default:"1m" json:"startup_delay" envconfig:"STARTUP_DELAY"` 26 | 27 | // ConcurrentBlockRequests is the number of concurrent block requests that will be attempted in 28 | // case some of the requests are slow. 29 | ConcurrentBlockRequests int `default:"2" json:"concurrent_block_requests" envconfig:"CONCURRENT_BLOCK_REQUESTS"` 30 | 31 | // DesiredNodeCount is the number of node connectes that should be maintained. 32 | DesiredNodeCount int `default:"50" json:"desired_node_count" envconfig:"DESIRED_NODE_COUNT"` 33 | 34 | // StartBlockHeight is the block height at which blocks should be downloaded and processed. 35 | // Only headers will be collected for blocks below that height. 36 | StartBlockHeight int `default:"700000" json:"start_block_height" envconfig:"START_BLOCK_HEIGHT"` 37 | 38 | // BlockRequestDelay is the delay between concurrent block requests. It should be long enough 39 | // that small blocks will complete before the second request is made and only use concurrent 40 | // requests for slow or large blocks. 41 | BlockRequestDelay config.Duration `default:"30s" json:"block_request_delay" envconfig:"BLOCK_REQUEST_DELAY"` 42 | } 43 | 44 | func DefaultConfig() *Config { 45 | return &Config{ 46 | Network: bitcoin.MainNet, 47 | Timeout: config.NewDuration(time.Hour * 4), 48 | ScanCount: 1000, 49 | TxRequestCount: 10000, 50 | StartupDelay: config.NewDuration(time.Minute), 51 | ConcurrentBlockRequests: 2, 52 | DesiredNodeCount: 50, 53 | StartBlockHeight: 700000, 54 | BlockRequestDelay: config.NewDuration(time.Second * 5), 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /internal/platform/tests/test_mock_processor.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "context" 5 | "math/rand" 6 | "sync" 7 | "time" 8 | 9 | "github.com/tokenized/logger" 10 | "github.com/tokenized/pkg/bitcoin" 11 | "github.com/tokenized/pkg/merkle_proof" 12 | "github.com/tokenized/pkg/miner_id" 13 | "github.com/tokenized/pkg/wire" 14 | 15 | "github.com/pkg/errors" 16 | ) 17 | 18 | type MockDataProcessor struct { 19 | rand *rand.Rand 20 | relevantTxs map[bitcoin.Hash32]bool 21 | 22 | sync.Mutex 23 | } 24 | 25 | func NewMockDataProcessor() *MockDataProcessor { 26 | return &MockDataProcessor{ 27 | rand: rand.New(rand.NewSource(time.Now().UnixNano())), 28 | relevantTxs: make(map[bitcoin.Hash32]bool), 29 | } 30 | } 31 | 32 | func (m *MockDataProcessor) randomRelavent() bool { 33 | return m.rand.Intn(1000) == 1 34 | } 35 | 36 | func (m *MockDataProcessor) ProcessTx(ctx context.Context, tx *wire.MsgTx) (bool, error) { 37 | m.Lock() 38 | defer m.Unlock() 39 | 40 | txid := *tx.TxHash() 41 | _, isRelevant := m.relevantTxs[txid] 42 | if !isRelevant { 43 | isRelevant = m.randomRelavent() 44 | } 45 | 46 | return isRelevant, nil 47 | } 48 | 49 | func (m *MockDataProcessor) CancelTx(ctx context.Context, txid bitcoin.Hash32) error { 50 | m.Lock() 51 | defer m.Unlock() 52 | 53 | return nil 54 | } 55 | 56 | func (m *MockDataProcessor) AddTxConflict(ctx context.Context, 57 | txid, conflictTxID bitcoin.Hash32) error { 58 | m.Lock() 59 | defer m.Unlock() 60 | 61 | return nil 62 | } 63 | 64 | func (m *MockDataProcessor) ConfirmTx(ctx context.Context, txid bitcoin.Hash32, blockHeight int, 65 | merkleProof *merkle_proof.MerkleProof) error { 66 | m.Lock() 67 | defer m.Unlock() 68 | 69 | // js, _ := json.MarshalIndent(merkleProof, "", " ") 70 | // fmt.Printf("Confirmed tx : %s\n%s\n", txid, js) 71 | delete(m.relevantTxs, txid) 72 | 73 | return nil 74 | } 75 | 76 | func (m *MockDataProcessor) UpdateTxChainDepth(ctx context.Context, txid bitcoin.Hash32, 77 | chainDepth uint32) error { 78 | m.Lock() 79 | defer m.Unlock() 80 | 81 | return nil 82 | } 83 | 84 | func (m *MockDataProcessor) ProcessCoinbaseTx(ctx context.Context, blockHash bitcoin.Hash32, 85 | tx *wire.MsgTx) error { 86 | m.Lock() 87 | defer m.Unlock() 88 | 89 | for i, output := range tx.TxOut { 90 | minerID, err := miner_id.ParseMinerIDFromScript(output.LockingScript) 91 | if err != nil { 92 | if errors.Cause(err) != miner_id.ErrNotMinerID { 93 | logger.WarnWithFields(ctx, []logger.Field{ 94 | logger.Stringer("txid", tx.TxHash()), 95 | logger.Int("output", i), 96 | }, "Failed to parse miner id output script : %s", err) 97 | } 98 | continue 99 | } 100 | 101 | logger.InfoWithFields(ctx, []logger.Field{ 102 | logger.JSON("miner_id", minerID), 103 | }, "Found miner id") 104 | 105 | break 106 | } 107 | 108 | return nil 109 | } 110 | -------------------------------------------------------------------------------- /headers/proof_of_work.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "context" 5 | "math/big" 6 | "sort" 7 | 8 | "github.com/tokenized/pkg/bitcoin" 9 | 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | func (b Branch) Target(ctx context.Context, height int) (*big.Int, error) { 14 | 15 | // NOTE: Assume 2017 difficulty adjustment is active --ce 16 | 17 | // Get median time and work for 3 blocks at current height 18 | lastTime, lastWork, err := b.MedianTimeAndWork(ctx, height-1, 3) 19 | if err != nil { 20 | return nil, errors.Wrap(err, "last header stats") 21 | } 22 | 23 | // Get median time and work for 3 blocks for 144 blocks below current height 24 | firstTime, firstWork, err := b.MedianTimeAndWork(ctx, height-144-1, 3) 25 | if err != nil { 26 | return nil, errors.Wrap(err, "first header stats") 27 | } 28 | 29 | timeSpan := lastTime - firstTime 30 | 31 | // Apply time span limits 32 | if timeSpan < 72*600 { 33 | timeSpan = 72 * 600 34 | } 35 | if timeSpan > 288*600 { 36 | timeSpan = 288 * 600 37 | } 38 | 39 | // Work (W) is difference in median accumulated work 40 | work := &big.Int{} 41 | work.Sub(lastWork, firstWork) 42 | 43 | // Projected Work (PW) = (W * 600) / TS. 44 | projected := &big.Int{} 45 | projected.Mul(work, big.NewInt(600)) 46 | projected.Div(projected, big.NewInt(int64(timeSpan))) 47 | 48 | target := bitcoin.ConvertToWork(projected) 49 | 50 | if target.Cmp(bitcoin.MaxWork) > 0 { 51 | target.Set(bitcoin.MaxWork) 52 | } 53 | 54 | return target, nil 55 | } 56 | 57 | func (b Branch) TimeAndWork(ctx context.Context, height int) (uint32, *big.Int, error) { 58 | data := b.AtHeight(height) 59 | if data == nil { 60 | return 0, nil, ErrHeaderDataNotFound 61 | } 62 | 63 | return data.Header.Timestamp, data.AccumulatedWork, nil 64 | } 65 | 66 | func (b Branch) MedianTimeAndWork(ctx context.Context, 67 | height, count int) (uint32, *big.Int, error) { 68 | 69 | // Get time and accumulated work for 3 blocks ending at height 70 | list := make(timeAndWorkList, count) 71 | for i := 0; i < count; i++ { 72 | time, work, err := b.TimeAndWork(ctx, height) 73 | if err != nil { 74 | return 0, nil, errors.Wrapf(err, "get header stats : %d", height) 75 | } 76 | 77 | list[count-i-1] = &timeAndWork{ 78 | time: time, 79 | work: work, 80 | } 81 | 82 | height-- 83 | } 84 | 85 | // Sort by time 86 | sort.Sort(list) 87 | 88 | // Get values from the middle item in the list. 89 | result := list[count/2] 90 | return result.time, result.work, nil 91 | } 92 | 93 | type timeAndWork struct { 94 | time uint32 95 | work *big.Int 96 | } 97 | 98 | type timeAndWorkList []*timeAndWork 99 | 100 | // Len is part of sort.Interface. 101 | func (l timeAndWorkList) Len() int { 102 | return len(l) 103 | } 104 | 105 | // Swap is part of sort.Interface. 106 | func (l timeAndWorkList) Swap(i, j int) { 107 | l[i], l[j] = l[j], l[i] 108 | } 109 | 110 | // Less is part of sort.Interface. 111 | func (l timeAndWorkList) Less(i, j int) bool { 112 | return l[i].time < l[j].time 113 | } 114 | -------------------------------------------------------------------------------- /headers/splits.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "github.com/tokenized/pkg/bitcoin" 5 | "github.com/tokenized/pkg/wire" 6 | ) 7 | 8 | var ( 9 | SplitNameBSV = "BSV" 10 | SplitNameBTC = "BTC" 11 | SplitNameBCH = "BCH" 12 | 13 | // Chain splits that need to be checked. 14 | mainNetSplits = []splitHex{ 15 | splitHex{ 16 | name: SplitNameBTC, 17 | before: "0000000000000000011865af4122fe3b144e2cbeea86142e8ff2fb4107352d43", 18 | after: "00000000000000000019f112ec0a9982926f1258cdcc558dd7c3b7e5dc7fa148", 19 | height: 478559, 20 | }, 21 | splitHex{ 22 | name: SplitNameBCH, 23 | before: "00000000000000000102d94fde9bd0807a2cc7582fe85dd6349b73ce4e8d9322", 24 | after: "0000000000000000004626ff6e3b936941d341c5932ece4357eeccac44e6d56c", 25 | height: 556767, 26 | }, 27 | } 28 | 29 | mainNetRequiredSplit = splitHex{ 30 | name: SplitNameBSV, 31 | before: "00000000000000000102d94fde9bd0807a2cc7582fe85dd6349b73ce4e8d9322", 32 | after: "000000000000000001d956714215d96ffc00e0afda4cd0a96c96f8d802b1662b", 33 | height: 556767, 34 | } 35 | 36 | // First block after the BCH/BSV split on the BSV chain. 37 | mainNetRequiredHeaderPrevBlock, _ = bitcoin.NewHash32FromStr("00000000000000000102d94fde9bd0807a2cc7582fe85dd6349b73ce4e8d9322") 38 | mainNetRequiredHeaderMerkleRoot, _ = bitcoin.NewHash32FromStr("da2b9eb7e8a3619734a17b55c47bdd6fd855b0afa9c7e14e3a164a279e51bba9") 39 | MainNetRequiredHeader = &wire.BlockHeader{ 40 | Version: 536870912, 41 | PrevBlock: *mainNetRequiredHeaderPrevBlock, 42 | MerkleRoot: *mainNetRequiredHeaderMerkleRoot, 43 | Timestamp: 1542305817, 44 | Bits: 0x18021fdb, 45 | Nonce: 1301274612, 46 | } 47 | ) 48 | 49 | type Split struct { 50 | Name string 51 | BeforeHash bitcoin.Hash32 // Header on both chains 52 | AfterHash bitcoin.Hash32 // Header only on that split 53 | Height int // Height of AfterHash 54 | } 55 | 56 | type Splits []Split 57 | 58 | // Len is part of sort.Interface. 59 | func (l Splits) Len() int { 60 | return len(l) 61 | } 62 | 63 | // Swap is part of sort.Interface. 64 | func (l Splits) Swap(i, j int) { 65 | l[i], l[j] = l[j], l[i] 66 | } 67 | 68 | // Less is part of sort.Interface. Sorts by highest height first. 69 | func (l Splits) Less(i, j int) bool { 70 | return l[i].Height > l[j].Height 71 | } 72 | 73 | type splitHex struct { 74 | name string 75 | before string // Header on both chains 76 | after string // Header only on that split 77 | height int 78 | } 79 | 80 | type HeightHash struct { 81 | Height int 82 | Hash bitcoin.Hash32 83 | } 84 | 85 | type HeightHashes []*HeightHash 86 | 87 | // Len is part of sort.Interface. 88 | func (l HeightHashes) Len() int { 89 | return len(l) 90 | } 91 | 92 | // Swap is part of sort.Interface. 93 | func (l HeightHashes) Swap(i, j int) { 94 | l[i], l[j] = l[j], l[i] 95 | } 96 | 97 | // Less is part of sort.Interface. Sorts by highest height first. 98 | func (l HeightHashes) Less(i, j int) bool { 99 | return l[i].Height > l[j].Height 100 | } 101 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | OPEN BITCOIN SV LICENSE FOR BSV SOLUTION 2 | 3 | Effective Date: June 1, 2022 4 | 5 | Copyright © 2022 Tokenized Group Pty Ltd. 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction other than the conditions set out below, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 8 | 9 | 1. Blockchain Limitation. The grant to deal provided above is restricted to dealing in the Software only for purposes relating to the Bitcoin SV blockchain. The Bitcoin SV blockchain is defined, for purposes of this license, as the Bitcoin chain containing block height #556767 with this hash: 000000000000000001d956714215d96ffc00e0afda4cd0a96c96f8d802b1662b. 10 | 11 | 2. Redistributions of all copies or substantial portions of the source code of the Software must retain the above copyright notice and above permission notice (with blockchain limitation), this list of conditions and the following disclaimer. 12 | 13 | 3. Redistributions of all copies or substantial portions of the object code of the Software must reproduce the above copyright notice and above permission notice (with blockchain limitation), this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 14 | 15 | 4. Any work to the extent consisting of the Software or modifications made thereto, must be licensed under this license to anyone who comes into possession of a copy. regardless of how such work is packaged. Each time such a work is conveyed to a recipient, the recipient automatically receives this license from the original licensors, to run, modify and otherwise deal with that work. 16 | 17 | 5. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 18 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OF OR OTHER DEALINGS IN THE SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 19 | 20 | LICENSE APPLICABILITY 21 | 22 | This license applies to any person or entity that downloads, reproduces or obtains the Software on or after the Effective Date stated above. 23 | 24 | For any person or entity that downloaded, reproduced or obtained the Software before the Effective Date under the prior applicable license entitled “BCH Solution Open Source License,” nChain Holdings Limited hereby designates the Bitcoin SV blockchain (as defined above) to be the only version or fork of the Bitcoin Cash (“BCH”) blockchain on which dealing in the Software is permitted, as set forth in paragraph 1 of that prior license. Therefore, the Software may be used only on the Bitcoin SV blockchain and may not be used on any other chain. 25 | -------------------------------------------------------------------------------- /headers/test_helpers.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/big" 7 | "math/rand" 8 | 9 | "github.com/tokenized/pkg/bitcoin" 10 | "github.com/tokenized/pkg/wire" 11 | ) 12 | 13 | func MockHeaders(ctx context.Context, repo *Repository, afterHash bitcoin.Hash32, timestamp uint32, 14 | count int) []*wire.BlockHeader { 15 | 16 | var headers []*wire.BlockHeader 17 | previousHash := afterHash 18 | for i := 0; i < count; i++ { 19 | header := &wire.BlockHeader{ 20 | Version: 1, 21 | PrevBlock: previousHash, 22 | Timestamp: timestamp, 23 | Bits: 0x1d00ffff, 24 | Nonce: rand.Uint32(), 25 | } 26 | rand.Read(header.MerkleRoot[:]) 27 | 28 | if err := repo.ProcessHeader(ctx, header); err != nil { 29 | panic(fmt.Sprintf("add header %d: %s", i, err)) 30 | } 31 | 32 | headers = append(headers, header) 33 | previousHash = *header.BlockHash() 34 | timestamp += 600 35 | } 36 | 37 | return headers 38 | } 39 | 40 | func MockHeadersOnBranch(branch *Branch, count int) { 41 | previousHash := branch.Last().Hash 42 | timestamp := branch.Last().Header.Timestamp 43 | 44 | for i := 0; i < count; i++ { 45 | header := &wire.BlockHeader{ 46 | Version: 1, 47 | PrevBlock: previousHash, 48 | Timestamp: timestamp, 49 | Bits: 0x1d00ffff, 50 | Nonce: rand.Uint32(), 51 | } 52 | rand.Read(header.MerkleRoot[:]) 53 | 54 | if !branch.Add(header) { 55 | panic(fmt.Sprintf("add header %d", i)) 56 | } 57 | 58 | previousHash = *header.BlockHash() 59 | timestamp += 600 60 | } 61 | } 62 | 63 | // Initialize with genesis header with specified timestamp. 64 | func (repo *Repository) InitializeWithTimeStamp(timestamp uint32) error { 65 | repo.Lock() 66 | defer repo.Unlock() 67 | 68 | header := &wire.BlockHeader{ 69 | Version: 1, 70 | Timestamp: timestamp, 71 | Bits: 0x1d00ffff, 72 | Nonce: rand.Uint32(), 73 | } 74 | rand.Read(header.MerkleRoot[:]) 75 | 76 | repo.longest, _ = NewBranch(nil, -1, header) 77 | repo.branches = Branches{repo.longest} 78 | return nil 79 | } 80 | 81 | func (repo *Repository) DisableDifficulty() { 82 | repo.Lock() 83 | defer repo.Unlock() 84 | 85 | repo.disableDifficulty = true 86 | } 87 | 88 | func (repo *Repository) EnableDifficulty() { 89 | repo.Lock() 90 | defer repo.Unlock() 91 | 92 | repo.disableDifficulty = false 93 | } 94 | 95 | func (repo *Repository) DisableSplitProtection() { 96 | repo.Lock() 97 | defer repo.Unlock() 98 | 99 | repo.disableSplitProtection = true 100 | } 101 | 102 | func (repo *Repository) EnableSplitProtection() { 103 | repo.Lock() 104 | defer repo.Unlock() 105 | 106 | repo.disableSplitProtection = false 107 | } 108 | 109 | // MockLatest is used only during testing to set a header as the latest. 110 | func (repo *Repository) MockLatest(ctx context.Context, header *wire.BlockHeader, 111 | height int, work *big.Int) error { 112 | repo.Lock() 113 | defer repo.Unlock() 114 | 115 | mockBranch, _ := NewBranch(nil, height-1, header) 116 | mockBranch.headers[0].AccumulatedWork = work 117 | repo.branches = Branches{mockBranch} 118 | repo.longest = mockBranch 119 | 120 | return nil 121 | } 122 | -------------------------------------------------------------------------------- /headers/header_data_test.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "math/big" 7 | "math/rand" 8 | "testing" 9 | "time" 10 | 11 | "github.com/tokenized/pkg/bitcoin" 12 | ) 13 | 14 | func Test_HeaderData_Serialize(t *testing.T) { 15 | genesis := genesisHeader(bitcoin.MainNet) 16 | data := &HeaderData{ 17 | Hash: *genesis.BlockHash(), 18 | Header: genesis, 19 | AccumulatedWork: bitcoin.ConvertToWork(bitcoin.ConvertToDifficulty(genesis.Bits)), 20 | } 21 | 22 | buf := &bytes.Buffer{} 23 | if err := data.Serialize(buf); err != nil { 24 | t.Fatalf("Failed to serialize header data : %s", err) 25 | } 26 | 27 | if buf.Len() != headerDataSerializeSize { 28 | t.Errorf("Wrong serialize size : got %d, want %d", buf.Len(), headerDataSerializeSize) 29 | } 30 | 31 | read := &HeaderData{} 32 | if err := read.Deserialize(buf); err != nil { 33 | t.Fatalf("Failed to deserialize header data : %s", err) 34 | } 35 | js, _ := json.MarshalIndent(read, "", " ") 36 | t.Logf("Read : %s", js) 37 | 38 | if !read.Hash.Equal(&data.Hash) { 39 | t.Errorf("Wrong hash : \ngot : %s\nwant : %s", read.Hash, data.Hash) 40 | } 41 | 42 | if !read.Header.BlockHash().Equal(genesis.BlockHash()) { 43 | t.Errorf("Wrong header : \ngot : %s\nwant : %s", read.Header.BlockHash(), 44 | genesis.BlockHash()) 45 | } 46 | 47 | if read.AccumulatedWork.Cmp(data.AccumulatedWork) != 0 { 48 | t.Errorf("Wrong hash : \ngot : %s\nwant : %s", read.AccumulatedWork.Text(16), 49 | data.AccumulatedWork.Text(16)) 50 | } 51 | } 52 | 53 | func Test_serializeBigInt(t *testing.T) { 54 | in := &big.Int{} 55 | buf := &bytes.Buffer{} 56 | 57 | if err := serializeBigInt(in, buf); err != nil { 58 | t.Fatalf("Failed to serialize big int : %s", err) 59 | } 60 | 61 | out, err := deserializeBigInt(buf) 62 | if err != nil { 63 | t.Fatalf("Failed to deserialize big int : %s", err) 64 | } 65 | t.Logf("Read : %s", out.Text(16)) 66 | 67 | if in.Cmp(out) != 0 { 68 | t.Errorf("Read value not equal : got %s, want %s", out.Text(16), in.Text(16)) 69 | } 70 | 71 | in.Add(in, big.NewInt(0x128379826753)) 72 | buf = &bytes.Buffer{} 73 | 74 | if err := serializeBigInt(in, buf); err != nil { 75 | t.Fatalf("Failed to serialize big int : %s", err) 76 | } 77 | 78 | out, err = deserializeBigInt(buf) 79 | if err != nil { 80 | t.Fatalf("Failed to deserialize big int : %s", err) 81 | } 82 | t.Logf("Read : %s", out.Text(16)) 83 | 84 | if in.Cmp(out) != 0 { 85 | t.Errorf("Read value not equal : got %s, want %s", out.Text(16), in.Text(16)) 86 | } 87 | 88 | max := &big.Int{} 89 | max.SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) 90 | in.Rand(rand.New(rand.NewSource(time.Now().UnixNano())), max) 91 | buf = &bytes.Buffer{} 92 | 93 | if err := serializeBigInt(in, buf); err != nil { 94 | t.Fatalf("Failed to serialize big int : %s", err) 95 | } 96 | 97 | out, err = deserializeBigInt(buf) 98 | if err != nil { 99 | t.Fatalf("Failed to deserialize big int : %s", err) 100 | } 101 | t.Logf("Read : %s", out.Text(16)) 102 | 103 | if in.Cmp(out) != 0 { 104 | t.Errorf("Read value not equal : got %s, want %s", out.Text(16), in.Text(16)) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /peers_test.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 8 | "github.com/tokenized/pkg/bitcoin" 9 | "github.com/tokenized/pkg/storage" 10 | ) 11 | 12 | func Test_LoadSeeds(t *testing.T) { 13 | ctx := tests.Context() 14 | store := storage.NewMockStorage() 15 | peers := NewPeerRepository(store, "") 16 | 17 | peers.LoadSeeds(ctx, bitcoin.MainNet) 18 | } 19 | 20 | func TestPeers(t *testing.T) { 21 | addresses := []string{ 22 | "test 1", 23 | "test 2", 24 | "test 3", 25 | "test 4", 26 | "test 5", 27 | "test 6", 28 | } 29 | 30 | ctx := tests.Context() 31 | store := storage.NewMockStorage() 32 | repo := NewPeerRepository(store, "") 33 | 34 | // For logging to test from within functions 35 | ctx = context.WithValue(ctx, 999, t) 36 | // Use this to get the test value from within non-test code. 37 | // testValue := ctx.Value(999) 38 | // test, ok := testValue.(*testing.T) 39 | // if ok { 40 | // test.Logf("Test Debug Message") 41 | // } 42 | 43 | repo.Clear(ctx) 44 | 45 | // Load 46 | if err := repo.Load(ctx); err != nil { 47 | t.Errorf("Failed to load repo : %v", err) 48 | } 49 | 50 | // Add 51 | for _, address := range addresses { 52 | added, err := repo.Add(ctx, address) 53 | if err != nil { 54 | t.Errorf("Failed to add address : %v", err) 55 | } 56 | if !added { 57 | t.Errorf("Didn't add address : %s", address) 58 | } 59 | } 60 | 61 | // Get min score 0 62 | peers, err := repo.Get(ctx, 0, -1) 63 | if err != nil { 64 | t.Errorf("Failed to get addresses : %v", err) 65 | } 66 | 67 | for _, address := range addresses { 68 | found := false 69 | for _, peer := range peers { 70 | if peer.Address == address { 71 | t.Logf("Found address : %s", address) 72 | found = true 73 | break 74 | } 75 | } 76 | 77 | if !found { 78 | t.Errorf("Failed to find address : %s", address) 79 | } 80 | } 81 | 82 | // Get min score 0 83 | peers, err = repo.Get(ctx, 1, -1) 84 | if err != nil { 85 | t.Errorf("Failed to get addresses : %v", err) 86 | } 87 | 88 | if len(peers) > 0 { 89 | t.Errorf("Pulled high score peers") 90 | } 91 | 92 | // Save 93 | t.Logf("Saving") 94 | if err := repo.Save(ctx); err != nil { 95 | t.Errorf("Failed to save repo : %v", err) 96 | } 97 | 98 | // Load 99 | t.Logf("Reloading") 100 | if err := repo.Load(ctx); err != nil { 101 | t.Errorf("Failed to re-load repo : %v", err) 102 | } 103 | 104 | // Get min score 0 105 | peers, err = repo.Get(ctx, 0, -1) 106 | if err != nil { 107 | t.Errorf("Failed to get addresses : %v", err) 108 | } 109 | 110 | for _, address := range addresses { 111 | found := false 112 | for _, peer := range peers { 113 | if peer.Address == address { 114 | t.Logf("Found address : %s", address) 115 | found = true 116 | break 117 | } 118 | } 119 | 120 | if !found { 121 | t.Errorf("Failed to find address : %s", address) 122 | } 123 | } 124 | 125 | // Get min score 0 126 | peers, err = repo.Get(ctx, 1, -1) 127 | if err != nil { 128 | t.Errorf("Failed to get addresses : %v", err) 129 | } 130 | 131 | if len(peers) > 0 { 132 | t.Errorf("Pulled high score peers") 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /internal/platform/tests/test_block_tx_processor.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/binary" 7 | "fmt" 8 | 9 | "github.com/tokenized/pkg/bitcoin" 10 | "github.com/tokenized/pkg/storage" 11 | "github.com/tokenized/pkg/wire" 12 | 13 | "github.com/pkg/errors" 14 | ) 15 | 16 | const ( 17 | blockTxsPath = "block_txids" 18 | blockTxsVersion = uint8(0) 19 | ) 20 | 21 | var ( 22 | endian = binary.LittleEndian 23 | ) 24 | 25 | type BlockTxManager struct { 26 | store storage.Storage 27 | } 28 | 29 | func NewBlockTxManager(store storage.Storage) *BlockTxManager { 30 | return &BlockTxManager{ 31 | store: store, 32 | } 33 | } 34 | 35 | func (m *BlockTxManager) FetchBlockTxIDs(ctx context.Context, 36 | blockHash bitcoin.Hash32) ([]bitcoin.Hash32, bool, error) { 37 | 38 | b, err := m.store.Read(ctx, blockTxIDsPath(blockHash)) 39 | if err != nil { 40 | if errors.Cause(err) == storage.ErrNotFound { 41 | return nil, false, nil 42 | } 43 | return nil, false, errors.Wrap(err, "read") 44 | } 45 | 46 | r := bytes.NewReader(b) 47 | 48 | var version uint8 49 | if err := binary.Read(r, endian, &version); err != nil { 50 | return nil, false, errors.Wrap(err, "version") 51 | } 52 | 53 | if version != 0 { 54 | return nil, false, errors.New("Unknown version") 55 | } 56 | 57 | count, err := wire.ReadVarInt(r, 0) 58 | if err != nil { 59 | return nil, false, errors.Wrap(err, "count") 60 | } 61 | 62 | txids := make([]bitcoin.Hash32, count) 63 | for i := range txids { 64 | if err := txids[i].Deserialize(r); err != nil { 65 | return nil, false, errors.Wrapf(err, "txid %d", i) 66 | } 67 | } 68 | 69 | return txids, true, nil 70 | } 71 | 72 | func (m *BlockTxManager) AppendBlockTxIDs(ctx context.Context, blockHash bitcoin.Hash32, 73 | txids []bitcoin.Hash32) error { 74 | 75 | existingTxIDs, exists, err := m.FetchBlockTxIDs(ctx, blockHash) 76 | if err != nil { 77 | return errors.Wrap(err, "fetch block txids") 78 | } else if !exists { 79 | return m.SaveBlockTxIDs(ctx, blockHash, txids) 80 | } 81 | 82 | for _, txid := range txids { 83 | existingTxIDs = appendHash32(existingTxIDs, txid) 84 | } 85 | 86 | return m.SaveBlockTxIDs(ctx, blockHash, existingTxIDs) 87 | } 88 | 89 | func appendHash32(hashes []bitcoin.Hash32, hash bitcoin.Hash32) []bitcoin.Hash32 { 90 | for _, h := range hashes { 91 | if h.Equal(&hash) { 92 | return hashes // already exists 93 | } 94 | } 95 | 96 | return append(hashes, hash) 97 | } 98 | 99 | func (m *BlockTxManager) SaveBlockTxIDs(ctx context.Context, blockHash bitcoin.Hash32, 100 | txids []bitcoin.Hash32) error { 101 | 102 | buf := &bytes.Buffer{} 103 | if err := binary.Write(buf, endian, blockTxsVersion); err != nil { 104 | return errors.Wrap(err, "version") 105 | } 106 | 107 | if err := wire.WriteVarInt(buf, 0, uint64(len(txids))); err != nil { 108 | return errors.Wrap(err, "count") 109 | } 110 | 111 | for i, txid := range txids { 112 | if err := txid.Serialize(buf); err != nil { 113 | return errors.Wrapf(err, "txid %d", i) 114 | } 115 | } 116 | 117 | if err := m.store.Write(ctx, blockTxIDsPath(blockHash), buf.Bytes(), nil); err != nil { 118 | return errors.Wrap(err, "write") 119 | } 120 | 121 | return nil 122 | } 123 | 124 | func blockTxIDsPath(hash bitcoin.Hash32) string { 125 | return fmt.Sprintf("%s/%s", blockTxsPath, hash) 126 | } 127 | -------------------------------------------------------------------------------- /bitcoin_node_test.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 10 | "github.com/tokenized/pkg/bitcoin" 11 | "github.com/tokenized/pkg/storage" 12 | "github.com/tokenized/threads" 13 | ) 14 | 15 | func Test_Handshake(t *testing.T) { 16 | if !testing.Verbose() { 17 | t.Skip() // Don't want to redownload the block all the time 18 | } 19 | ctx := tests.Context() 20 | store := storage.NewMockStorage() 21 | 22 | config := &Config{ 23 | Network: bitcoin.MainNet, 24 | } 25 | 26 | headers := NewMockHeaderRepository() 27 | peers := NewPeerRepository(store, "") 28 | 29 | address := "bitcoind.shared.tokenized.com:8333" 30 | 31 | node := NewBitcoinNode(address, "/Tokenized/Spynode:Test/", config, headers, peers) 32 | node.SetVerifyOnly() 33 | 34 | runThread := threads.NewInterruptableThread("Run", node.Run) 35 | runComplete := runThread.GetCompleteChannel() 36 | runThread.Start(ctx) 37 | 38 | select { 39 | case <-runComplete: 40 | if err := runThread.Error(); err != nil { 41 | t.Errorf("Failed to run : %s", err) 42 | } 43 | 44 | case <-time.After(3 * time.Second): 45 | t.Logf("Shutting down") 46 | runThread.Stop(ctx) 47 | select { 48 | case <-runComplete: 49 | if err := runThread.Error(); err != nil { 50 | t.Errorf("Failed to run : %s", err) 51 | } 52 | 53 | case <-time.After(time.Second): 54 | t.Fatalf("Failed to shut down") 55 | } 56 | } 57 | 58 | if !node.Verified() { 59 | t.Errorf("Failed to verify node") 60 | } 61 | } 62 | 63 | func Test_FindPeers(t *testing.T) { 64 | if !testing.Verbose() { 65 | t.Skip() // Don't want to redownload the block all the time 66 | } 67 | ctx := tests.Context() 68 | store := storage.NewMockStorage() 69 | 70 | config := &Config{ 71 | Network: bitcoin.MainNet, 72 | } 73 | 74 | headers := NewMockHeaderRepository() 75 | peers := NewPeerRepository(store, "") 76 | // if err := peers.Load(ctx); err != nil { 77 | // t.Fatalf("Failed to load peers : %s", err) 78 | // } 79 | peers.LoadSeeds(ctx, config.Network) 80 | 81 | peerList, err := peers.Get(ctx, 0, -1) 82 | if err != nil { 83 | t.Fatalf("Failed to get peers : %s", err) 84 | } 85 | 86 | if len(peerList) == 0 { 87 | t.Fatalf("No peers returned") 88 | } 89 | 90 | var wait sync.WaitGroup 91 | var nodes []*BitcoinNode 92 | var stopper threads.StopCombiner 93 | for i, peer := range peerList { 94 | node := NewBitcoinNode(peer.Address, "/Tokenized/Spynode:Test/", config, headers, peers) 95 | node.SetVerifyOnly() 96 | nodes = append(nodes, node) 97 | 98 | thread := threads.NewInterruptableThread(fmt.Sprintf("Run (%d)", i), node.Run) 99 | thread.SetWait(&wait) 100 | stopper.Add(thread) 101 | thread.Start(ctx) 102 | 103 | if i%100 == 0 { 104 | time.Sleep(5 * time.Second) 105 | } 106 | 107 | // if i >= 100 { 108 | // break 109 | // } 110 | } 111 | 112 | time.Sleep(5 * time.Second) 113 | t.Logf("Stopping") 114 | stopper.Stop(ctx) 115 | wait.Wait() 116 | 117 | verifiedCount := 0 118 | for _, node := range nodes { 119 | if node.Verified() { 120 | verifiedCount++ 121 | 122 | // ip := node.IP.To16() 123 | // value := "\t{[]byte{" 124 | // for i, b := range ip { 125 | // if i != 0 { 126 | // value += ", " 127 | // } 128 | // value += fmt.Sprintf("0x%02x", b) 129 | // } 130 | // value += fmt.Sprintf("}, %d},", node.Port) 131 | 132 | // fmt.Printf("%s\n", value) 133 | } 134 | } 135 | 136 | t.Logf("Verified count : %d/%d", verifiedCount, len(nodes)) 137 | } 138 | 139 | func Test_ChannelFlush(t *testing.T) { 140 | 141 | channel := make(chan int, 20) 142 | for i := 0; i < 10; i++ { 143 | channel <- i 144 | } 145 | 146 | for i := 0; i < 5; i++ { 147 | v := <-channel 148 | t.Logf("Received %d", v) 149 | } 150 | 151 | close(channel) 152 | for v := range channel { 153 | t.Logf("Received %d", v) 154 | } 155 | 156 | } 157 | -------------------------------------------------------------------------------- /test_helpers.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/tokenized/bitcoin_reader/headers" 8 | "github.com/tokenized/pkg/bitcoin" 9 | "github.com/tokenized/pkg/wire" 10 | 11 | "github.com/pkg/errors" 12 | ) 13 | 14 | type MockHeaderRepository struct { 15 | lastHeight int 16 | lastHash bitcoin.Hash32 17 | previousHashes []bitcoin.Hash32 18 | newHeadersAvailableChannel *chan *wire.BlockHeader 19 | } 20 | 21 | func NewMockHeaderRepository() *MockHeaderRepository { 22 | lastHeight := 725107 23 | lastHash, _ := bitcoin.NewHash32FromStr("00000000000000000749cd1e05f963bde1347295de3bea047842d6c7b6a45311") 24 | previousHash, _ := bitcoin.NewHash32FromStr("000000000000000007de78e08cc3d133f6a9cb2062f31764b7f64643b2575d90") 25 | 26 | return &MockHeaderRepository{ 27 | lastHeight: lastHeight, 28 | lastHash: *lastHash, 29 | previousHashes: []bitcoin.Hash32{*previousHash}, 30 | } 31 | } 32 | 33 | func (m *MockHeaderRepository) GetNewHeadersAvailableChannel() <-chan *wire.BlockHeader { 34 | result := make(chan *wire.BlockHeader, 1000) 35 | 36 | m.newHeadersAvailableChannel = &result 37 | return result 38 | } 39 | 40 | func (m *MockHeaderRepository) GetVerifyOnlyLocatorHashes(ctx context.Context) ([]bitcoin.Hash32, error) { 41 | return m.previousHashes, nil 42 | } 43 | 44 | func (m *MockHeaderRepository) GetLocatorHashes(ctx context.Context, 45 | max int) ([]bitcoin.Hash32, error) { 46 | return m.previousHashes, nil 47 | } 48 | 49 | func (m *MockHeaderRepository) Height() int { 50 | return m.lastHeight 51 | } 52 | 53 | func (m *MockHeaderRepository) Hash(ctx context.Context, height int) (*bitcoin.Hash32, error) { 54 | return nil, errors.New("Not implemented") 55 | } 56 | 57 | func (m *MockHeaderRepository) HashHeight(hash bitcoin.Hash32) int { 58 | if m.lastHash.Equal(&hash) { 59 | return m.lastHeight 60 | } 61 | 62 | return -1 63 | } 64 | 65 | func (m *MockHeaderRepository) LastHash() bitcoin.Hash32 { 66 | return m.lastHash 67 | } 68 | 69 | func (m *MockHeaderRepository) LastTime() uint32 { 70 | return 0 71 | } 72 | 73 | func (m *MockHeaderRepository) PreviousHash(hash bitcoin.Hash32) (*bitcoin.Hash32, int) { 74 | return nil, -1 75 | } 76 | 77 | func (m *MockHeaderRepository) VerifyHeader(ctx context.Context, header *wire.BlockHeader) error { 78 | hash := *header.BlockHash() 79 | if hash.Equal(&m.lastHash) { 80 | return nil 81 | } 82 | 83 | return headers.ErrUnknownHeader 84 | } 85 | 86 | func (m *MockHeaderRepository) ProcessHeader(ctx context.Context, header *wire.BlockHeader) error { 87 | hash := *header.BlockHash() 88 | if hash.Equal(&m.lastHash) { 89 | return nil 90 | } 91 | 92 | return headers.ErrUnknownHeader 93 | } 94 | 95 | func (m *MockHeaderRepository) Stop(ctx context.Context) { 96 | if m.newHeadersAvailableChannel != nil { 97 | close(*m.newHeadersAvailableChannel) 98 | m.newHeadersAvailableChannel = nil 99 | } 100 | } 101 | 102 | type MockBlockTxManager struct { 103 | blocks map[bitcoin.Hash32][]bitcoin.Hash32 104 | 105 | sync.Mutex 106 | } 107 | 108 | func NewMockBlockTxManager() *MockBlockTxManager { 109 | return &MockBlockTxManager{ 110 | blocks: make(map[bitcoin.Hash32][]bitcoin.Hash32), 111 | } 112 | } 113 | 114 | func (m *MockBlockTxManager) FetchBlockTxIDs(ctx context.Context, 115 | blockHash bitcoin.Hash32) ([]bitcoin.Hash32, bool, error) { 116 | m.Lock() 117 | defer m.Unlock() 118 | 119 | txids, exists := m.blocks[blockHash] 120 | if !exists { 121 | return nil, false, nil 122 | } 123 | return txids, true, nil 124 | } 125 | 126 | func (m *MockBlockTxManager) AppendBlockTxIDs(ctx context.Context, blockHash bitcoin.Hash32, 127 | txids []bitcoin.Hash32) error { 128 | m.Lock() 129 | defer m.Unlock() 130 | 131 | existingTxids, exists := m.blocks[blockHash] 132 | if !exists { 133 | m.blocks[blockHash] = txids 134 | return nil 135 | } 136 | 137 | for _, txid := range txids { 138 | existingTxids = appendHash32(existingTxids, txid) 139 | } 140 | 141 | m.blocks[blockHash] = existingTxids 142 | return nil 143 | } 144 | 145 | func appendHash32(hashes []bitcoin.Hash32, hash bitcoin.Hash32) []bitcoin.Hash32 { 146 | for _, h := range hashes { 147 | if h.Equal(&hash) { 148 | return hashes // already exists 149 | } 150 | } 151 | 152 | return append(hashes, hash) 153 | } 154 | -------------------------------------------------------------------------------- /cmd/node/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "path" 9 | "sync" 10 | "syscall" 11 | "time" 12 | 13 | "github.com/tokenized/bitcoin_reader" 14 | "github.com/tokenized/bitcoin_reader/headers" 15 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 16 | "github.com/tokenized/config" 17 | "github.com/tokenized/logger" 18 | "github.com/tokenized/pkg/bitcoin" 19 | "github.com/tokenized/pkg/storage" 20 | "github.com/tokenized/threads" 21 | 22 | "github.com/pkg/errors" 23 | ) 24 | 25 | var ( 26 | buildVersion = "unknown" 27 | buildDate = "unknown" 28 | buildUser = "unknown" 29 | ) 30 | 31 | func main() { 32 | // --------------------------------------------------------------------------------------------- 33 | // Logging 34 | 35 | logPath := "./tmp/node/node.log" 36 | if len(logPath) > 0 { 37 | os.MkdirAll(path.Dir(logPath), os.ModePerm) 38 | } 39 | isDevelopment := true 40 | 41 | logConfig := logger.NewConfig(isDevelopment, false, logPath) 42 | 43 | ctx := logger.ContextWithLogConfig(context.Background(), logConfig) 44 | 45 | logger.Info(ctx, "Started : Application Initializing") 46 | defer logger.Info(ctx, "Completed") 47 | 48 | logger.Info(ctx, "Build %v (%v on %v)", buildVersion, buildUser, buildDate) 49 | 50 | // --------------------------------------------------------------------------------------------- 51 | // Storage 52 | 53 | store, err := storage.CreateStorage("standalone", "./tmp/node", 5, 100) 54 | if err != nil { 55 | logger.Fatal(ctx, "Failed to create storage : %s", err) 56 | } 57 | 58 | headers := headers.NewRepository(headers.DefaultConfig(), store) 59 | peers := bitcoin_reader.NewPeerRepository(store, "") 60 | 61 | if err := headers.Load(ctx); err != nil { 62 | logger.Fatal(ctx, "Failed to load headers : %s", err) 63 | } 64 | 65 | if err := peers.Load(ctx); err != nil { 66 | logger.Fatal(ctx, "Failed to load peers : %s", err) 67 | } 68 | 69 | if peers.Count() == 0 { 70 | peers.LoadSeeds(ctx, bitcoin.MainNet) 71 | } 72 | 73 | var managerWait, wait sync.WaitGroup 74 | 75 | // --------------------------------------------------------------------------------------------- 76 | // Node Manager (Bitcoin P2P) 77 | 78 | userAgent := fmt.Sprintf("/Tokenized/Spynode:Test-%s/", buildVersion) 79 | logger.Info(ctx, "User Agent : %s", userAgent) 80 | 81 | nodeConfig := &bitcoin_reader.Config{ 82 | Network: bitcoin.MainNet, 83 | Timeout: config.NewDuration(time.Hour), 84 | ScanCount: 500, 85 | StartupDelay: config.NewDuration(time.Second * 20), 86 | ConcurrentBlockRequests: 2, 87 | DesiredNodeCount: 50, 88 | BlockRequestDelay: config.NewDuration(time.Second * 5), 89 | } 90 | manager := bitcoin_reader.NewNodeManager(userAgent, nodeConfig, headers, peers) 91 | managerThread, managerComplete := threads.NewInterruptableThreadComplete("Node Manager", 92 | manager.Run, &managerWait) 93 | 94 | // --------------------------------------------------------------------------------------------- 95 | // Processing 96 | 97 | processor := tests.NewMockDataProcessor() 98 | 99 | txManager := bitcoin_reader.NewTxManager(2 * time.Second) 100 | txManager.SetTxProcessor(processor) 101 | manager.SetTxManager(txManager) 102 | 103 | blockTxManager := tests.NewBlockTxManager(store) 104 | 105 | processTxThread, processTxComplete := threads.NewUninterruptableThreadComplete("Process Txs", 106 | txManager.Run, &wait) 107 | 108 | blockManager := bitcoin_reader.NewBlockManager(blockTxManager, manager, 5, time.Second*30) 109 | manager.SetBlockManager(blockTxManager, blockManager, processor) 110 | 111 | processBlocksThread, processBlocksComplete := threads.NewInterruptableThreadComplete("Process Blocks", 112 | blockManager.Run, &wait) 113 | 114 | // --------------------------------------------------------------------------------------------- 115 | // Periodic 116 | 117 | saveThread, saveComplete := threads.NewPeriodicThreadComplete("Save", 118 | func(ctx context.Context) error { 119 | if err := headers.Clean(ctx); err != nil { 120 | return errors.Wrap(err, "clean headers") 121 | } 122 | if err := peers.Save(ctx); err != nil { 123 | return errors.Wrap(err, "save peers") 124 | } 125 | return nil 126 | }, 30*time.Minute, &wait) 127 | 128 | previousTime := time.Now() 129 | cleanTxsThread, cleanTxsComplete := threads.NewPeriodicThreadComplete("Clean Txs", 130 | func(ctx context.Context) error { 131 | if err := txManager.Clean(ctx, previousTime); err != nil { 132 | return errors.Wrap(err, "clean tx manager") 133 | } 134 | previousTime = time.Now() 135 | return nil 136 | }, 5*time.Minute, &wait) 137 | 138 | // --------------------------------------------------------------------------------------------- 139 | // Shutdown 140 | 141 | // Make a channel to listen for an interrupt or terminate signal from the OS. Use a buffered 142 | // channel because the signal package requires it. 143 | osSignals := make(chan os.Signal, 1) 144 | signal.Notify(osSignals, os.Interrupt, syscall.SIGTERM) 145 | 146 | managerThread.Start(ctx) 147 | processBlocksThread.Start(ctx) 148 | saveThread.Start(ctx) 149 | cleanTxsThread.Start(ctx) 150 | processTxThread.Start(ctx) 151 | 152 | // Blocking main and waiting for shutdown. 153 | select { 154 | case <-managerComplete: 155 | logger.Warn(ctx, "Finished: Manager") 156 | 157 | case <-saveComplete: 158 | logger.Warn(ctx, "Finished: Save") 159 | 160 | case <-cleanTxsComplete: 161 | logger.Warn(ctx, "Finished: Clean Txs") 162 | 163 | case <-processTxComplete: 164 | logger.Warn(ctx, "Finished: Process Txs") 165 | 166 | case <-processBlocksComplete: 167 | logger.Warn(ctx, "Finished: Process Blocks") 168 | 169 | case <-osSignals: 170 | logger.Info(ctx, "Shutdown requested") 171 | } 172 | 173 | // Stop remaining threads 174 | headers.Stop(ctx) 175 | managerThread.Stop(ctx) 176 | 177 | // Block until goroutines finish 178 | waitWarning := logger.NewWaitingWarning(ctx, 3*time.Second, "Node Manager Shutdown") 179 | managerWait.Wait() 180 | waitWarning.Cancel() 181 | 182 | txManager.Stop(ctx) 183 | processBlocksThread.Stop(ctx) 184 | saveThread.Stop(ctx) 185 | cleanTxsThread.Stop(ctx) 186 | 187 | waitWarning = logger.NewWaitingWarning(ctx, 3*time.Second, "Tx Manager Shutdown") 188 | wait.Wait() 189 | waitWarning.Cancel() 190 | 191 | if err := headers.Save(ctx); err != nil { 192 | logger.Error(ctx, "Failed to save headers : %s", err) 193 | } 194 | if err := peers.Save(ctx); err != nil { 195 | logger.Error(ctx, "Failed to save peers : %s", err) 196 | } 197 | 198 | if err := threads.CombineErrors( 199 | managerThread.Error(), 200 | saveThread.Error(), 201 | cleanTxsThread.Error(), 202 | ); err != nil { 203 | logger.Error(ctx, "Failed : %s", err) 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /seeds.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | type Seed struct { 4 | Bytes []byte 5 | Port uint16 6 | } 7 | 8 | var MainNetSeeds = []Seed{ 9 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xae, 0x8a, 0x05, 0xfd}, 8333}, 10 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xe6, 0x29, 0x80}, 8333}, 11 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x41, 0x98, 0xc8}, 8333}, 12 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0xf8, 0xf5, 0x52}, 8333}, 13 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xe6, 0x1a, 0x4d}, 8333}, 14 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0xc0, 0xfd, 0x3b}, 8333}, 15 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x34, 0x25, 0x48, 0x1d}, 8333}, 16 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x7d, 0x23, 0x3c}, 8333}, 17 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x78, 0x1a, 0x67}, 8333}, 18 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x9f, 0x18, 0x6a}, 8333}, 19 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x80, 0xe8, 0xbc}, 8333}, 20 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xce, 0xbd, 0x51, 0xe9}, 8333}, 21 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xd9, 0x4e, 0xa9}, 8333}, 22 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x3e, 0xcc, 0x70}, 8333}, 23 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0xb7, 0xcf, 0xf0}, 8333}, 24 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x78, 0xaf, 0x85}, 8333}, 25 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xfb, 0x6e, 0x24}, 8333}, 26 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x34, 0x3a, 0x3b, 0x36}, 8333}, 27 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa7, 0x63, 0x5c, 0xba}, 8333}, 28 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x3b, 0x43, 0x12}, 8333}, 29 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0x4f, 0xc9, 0x6f}, 8333}, 30 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xce, 0xbd, 0x68, 0x62}, 8333}, 31 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xd9, 0x5d, 0x04}, 8333}, 32 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x77, 0x56, 0x0c}, 8333}, 33 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0xf8, 0x1e, 0x3c}, 8333}, 34 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x3e, 0xc7, 0x71}, 8333}, 35 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0xb7, 0xd9, 0x2c}, 8333}, 36 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa7, 0x63, 0xbf, 0xd7}, 8333}, 37 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xfb, 0x6e, 0x2c}, 8333}, 38 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xf4, 0x84, 0x38}, 8333}, 39 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x9f, 0xe8, 0xbc}, 8333}, 40 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x77, 0x56, 0x04}, 8333}, 41 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xce, 0xbd, 0x80, 0xae}, 8333}, 42 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0xb7, 0x2a, 0x3f}, 8333}, 43 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xd9, 0x26, 0x5d}, 8333}, 44 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x09, 0x94, 0xa3}, 8333}, 45 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x80, 0xa8, 0xcf}, 8333}, 46 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x36, 0xf3, 0xd4, 0xdb}, 8333}, 47 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x5d, 0x99, 0x82}, 8333}, 48 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa7, 0x63, 0xdb, 0x0b}, 8333}, 49 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x80, 0x5a, 0xbc}, 8333}, 50 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0x15, 0x7a, 0x57}, 8333}, 51 | } 52 | 53 | var TestNetSeeds = []Seed{ 54 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x23, 0xb8, 0x23, 0x01}, 18333}, 55 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x23, 0xb8, 0x98, 0xad}, 18333}, 56 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x21, 0x02, 0x26}, 18333}, 57 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2e, 0x65, 0xd1, 0xf2}, 18444}, 58 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x32, 0x03, 0xe9, 0x14}, 18333}, 59 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x58, 0xc6, 0x24, 0xe8}, 18333}, 60 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x5f, 0x64, 0x99}, 18333}, 61 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x62, 0x73, 0xfb, 0xd6}, 18333}, 62 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0x9a, 0x1b, 0x6a}, 18333}, 63 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0x4f, 0x9e, 0x92}, 18333}, 64 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x44, 0xe7, 0x7a}, 18333}, 65 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0xd9, 0x49, 0x56}, 18333}, 66 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x41, 0x3c, 0x36}, 19100}, 67 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x41, 0xa3, 0x0f}, 18333}, 68 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x09, 0x9a, 0x6e}, 18333}, 69 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x18, 0xcb, 0xc4}, 28333}, 70 | {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x9a, 0xb1, 0x31}, 18333}, 71 | } 72 | -------------------------------------------------------------------------------- /peers.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/binary" 7 | "fmt" 8 | "io" 9 | "math/rand" 10 | "net" 11 | "sync" 12 | "time" 13 | 14 | "github.com/tokenized/logger" 15 | "github.com/tokenized/pkg/bitcoin" 16 | "github.com/tokenized/pkg/storage" 17 | 18 | "github.com/pkg/errors" 19 | ) 20 | 21 | const ( 22 | peersDefaultPath = "peers" 23 | peersVersion = uint8(0) 24 | ) 25 | 26 | type Peer struct { 27 | Address string 28 | Score int32 29 | LastTime uint32 30 | } 31 | 32 | type StoragePeerRepository struct { 33 | store storage.Storage 34 | path string 35 | lookup map[string]*Peer 36 | list PeerList 37 | lastSaved time.Time 38 | 39 | lock sync.Mutex 40 | } 41 | 42 | type PeerList []*Peer 43 | 44 | func (l PeerList) Swap(i, j int) { 45 | l[i], l[j] = l[j], l[i] 46 | } 47 | 48 | func NewPeerRepository(store storage.Storage, path string) *StoragePeerRepository { 49 | if len(path) == 0 { 50 | path = peersDefaultPath 51 | } 52 | 53 | return &StoragePeerRepository{ 54 | store: store, 55 | path: path, 56 | lookup: make(map[string]*Peer), 57 | } 58 | } 59 | 60 | func (repo *StoragePeerRepository) Count() int { 61 | repo.lock.Lock() 62 | defer repo.lock.Unlock() 63 | 64 | return len(repo.list) 65 | } 66 | 67 | func (repo *StoragePeerRepository) Get(ctx context.Context, 68 | minScore, maxScore int32) (PeerList, error) { 69 | repo.lock.Lock() 70 | defer repo.lock.Unlock() 71 | 72 | result := make(PeerList, 0, 1000) 73 | for _, peer := range repo.list { 74 | if peer.Score >= minScore && (maxScore == -1 || peer.Score <= maxScore) { 75 | result = append(result, peer) 76 | } 77 | } 78 | 79 | // Shuffle the list 80 | rand.Seed(time.Now().UnixNano()) 81 | rand.Shuffle(len(result), result.Swap) 82 | 83 | return result, nil 84 | } 85 | 86 | func (repo *StoragePeerRepository) Add(ctx context.Context, address string) (bool, error) { 87 | repo.lock.Lock() 88 | defer repo.lock.Unlock() 89 | 90 | _, exists := repo.lookup[address] 91 | if exists { 92 | return false, nil 93 | } 94 | 95 | // Add peer 96 | peer := Peer{Address: address, Score: 0} 97 | repo.list = append(repo.list, &peer) 98 | repo.lookup[peer.Address] = &peer 99 | return true, nil 100 | } 101 | 102 | func (repo *StoragePeerRepository) UpdateScore(ctx context.Context, address string, 103 | delta int32) bool { 104 | repo.lock.Lock() 105 | defer repo.lock.Unlock() 106 | 107 | peer, exists := repo.lookup[address] 108 | if exists { 109 | now := time.Now() 110 | peer.LastTime = uint32(now.Unix()) 111 | peer.Score += delta 112 | return true 113 | } 114 | 115 | return false 116 | } 117 | 118 | func (repo *StoragePeerRepository) UpdateTime(ctx context.Context, address string) bool { 119 | repo.lock.Lock() 120 | defer repo.lock.Unlock() 121 | 122 | peer, exists := repo.lookup[address] 123 | if exists { 124 | now := time.Now() 125 | peer.LastTime = uint32(now.Unix()) 126 | return true 127 | } 128 | 129 | return false 130 | } 131 | 132 | func (repo *StoragePeerRepository) LoadSeeds(ctx context.Context, network bitcoin.Network) { 133 | logger.Info(ctx, "Loading %s peer seeds", network) 134 | 135 | var seeds []Seed 136 | if network == bitcoin.MainNet { 137 | seeds = MainNetSeeds 138 | } else { 139 | seeds = TestNetSeeds 140 | } 141 | 142 | for _, seed := range seeds { 143 | ip := net.IP(seed.Bytes) 144 | peer := &Peer{ 145 | Address: fmt.Sprintf("[%s]:%d", ip.To16().String(), seed.Port), 146 | } 147 | 148 | repo.list = append(repo.list, peer) 149 | repo.lookup[peer.Address] = peer 150 | } 151 | } 152 | 153 | // Loads peers from storage 154 | func (repo *StoragePeerRepository) Load(ctx context.Context) error { 155 | repo.lock.Lock() 156 | defer repo.lock.Unlock() 157 | 158 | // Clear 159 | repo.list = make(PeerList, 0) 160 | repo.lookup = make(map[string]*Peer) 161 | 162 | // Get current data 163 | data, err := repo.store.Read(ctx, repo.path) 164 | if err == storage.ErrNotFound { 165 | return nil // Leave empty 166 | } 167 | if err != nil { 168 | return err 169 | } 170 | 171 | // Parse peers 172 | buffer := bytes.NewBuffer(data) 173 | var version uint8 174 | if err := binary.Read(buffer, binary.LittleEndian, &version); err != nil { 175 | return errors.Wrap(err, "Failed to read peers version") 176 | } 177 | 178 | if version != 0 { 179 | return errors.New("Unknown Version") 180 | } 181 | 182 | var count int32 183 | if err := binary.Read(buffer, binary.LittleEndian, &count); err != nil { 184 | return errors.Wrap(err, "Failed to read peers count") 185 | } 186 | 187 | // Reset 188 | repo.list = make(PeerList, 0, count) 189 | 190 | // Parse peers 191 | for { 192 | peer, err := readPeer(buffer, version) 193 | if err != nil { 194 | break 195 | } 196 | 197 | // Add peer 198 | repo.list = append(repo.list, &peer) 199 | repo.lookup[peer.Address] = &peer 200 | } 201 | 202 | repo.lastSaved = time.Now() 203 | 204 | logger.Info(ctx, "Loaded %d peers", len(repo.list)) 205 | 206 | return nil 207 | } 208 | 209 | // Saves the peers to storage 210 | func (repo *StoragePeerRepository) Save(ctx context.Context) error { 211 | repo.lock.Lock() 212 | defer repo.lock.Unlock() 213 | start := time.Now() 214 | 215 | var buffer bytes.Buffer 216 | 217 | // Write version 218 | if err := binary.Write(&buffer, binary.LittleEndian, peersVersion); err != nil { 219 | return err 220 | } 221 | 222 | // Write count 223 | if err := binary.Write(&buffer, binary.LittleEndian, int32(len(repo.list))); err != nil { 224 | return err 225 | } 226 | 227 | // Write peers 228 | for _, peer := range repo.list { 229 | if err := peer.write(&buffer); err != nil { 230 | return err 231 | } 232 | } 233 | 234 | if err := repo.store.Write(ctx, repo.path, buffer.Bytes(), nil); err != nil { 235 | return err 236 | } 237 | 238 | repo.lastSaved = time.Now() 239 | 240 | logger.ElapsedWithFields(ctx, start, []logger.Field{ 241 | logger.Int("peer_count", len(repo.list)), 242 | }, "Saved peers") 243 | return nil 244 | } 245 | 246 | // Clears all peers from the database 247 | func (repo *StoragePeerRepository) Clear(ctx context.Context) error { 248 | repo.lock.Lock() 249 | defer repo.lock.Unlock() 250 | 251 | repo.list = make(PeerList, 0) 252 | repo.lookup = make(map[string]*Peer) 253 | repo.lastSaved = time.Now() 254 | return repo.store.Remove(ctx, repo.path) 255 | } 256 | 257 | func readPeer(r io.Reader, version uint8) (Peer, error) { 258 | result := Peer{} 259 | 260 | // Read address 261 | var addressSize int32 262 | if err := binary.Read(r, binary.LittleEndian, &addressSize); err != nil { 263 | return result, err 264 | } 265 | 266 | addressData := make([]byte, addressSize) 267 | _, err := io.ReadFull(r, addressData) // Read until string terminator 268 | if err != nil { 269 | return result, err 270 | } 271 | result.Address = string(addressData) 272 | 273 | // Read score 274 | if err := binary.Read(r, binary.LittleEndian, &result.Score); err != nil { 275 | return result, err 276 | } 277 | 278 | // Read score 279 | if err := binary.Read(r, binary.LittleEndian, &result.LastTime); err != nil { 280 | return result, err 281 | } 282 | 283 | return result, nil 284 | } 285 | 286 | func (peer *Peer) write(w io.Writer) error { 287 | // Write address 288 | err := binary.Write(w, binary.LittleEndian, int32(len(peer.Address))) 289 | if err != nil { 290 | return err 291 | } 292 | _, err = w.Write([]byte(peer.Address)) 293 | if err != nil { 294 | return err 295 | } 296 | 297 | // Write score 298 | err = binary.Write(w, binary.LittleEndian, peer.Score) 299 | if err != nil { 300 | return err 301 | } 302 | 303 | // Write time 304 | err = binary.Write(w, binary.LittleEndian, peer.LastTime) 305 | if err != nil { 306 | return err 307 | } 308 | 309 | return nil 310 | } 311 | -------------------------------------------------------------------------------- /headers/splits_test.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | 7 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 8 | "github.com/tokenized/pkg/bitcoin" 9 | "github.com/tokenized/pkg/storage" 10 | "github.com/tokenized/pkg/wire" 11 | ) 12 | 13 | func Test_Splits_init_Order(t *testing.T) { 14 | store := storage.NewMockStorage() 15 | repo := NewRepository(DefaultConfig(), store) 16 | 17 | // Check order is highest to lowest 18 | for i, split := range repo.splits { 19 | if i == 0 { 20 | continue 21 | } 22 | 23 | if repo.splits[i-1].Height < split.Height { 24 | t.Errorf("Split %d (height %d) is before split %d (height %d)", i-1, 25 | repo.splits[i-1].Height, i, split.Height) 26 | } 27 | } 28 | } 29 | 30 | func Test_Splits_init_Values(t *testing.T) { 31 | store := storage.NewMockStorage() 32 | repo := NewRepository(DefaultConfig(), store) 33 | 34 | // Verify correct hashes 35 | btcBefore, _ := bitcoin.NewHash32FromStr("0000000000000000011865af4122fe3b144e2cbeea86142e8ff2fb4107352d43") 36 | btcAfter, _ := bitcoin.NewHash32FromStr("00000000000000000019f112ec0a9982926f1258cdcc558dd7c3b7e5dc7fa148") 37 | btcHeight := 478559 38 | btcFound := false 39 | 40 | bchBefore, _ := bitcoin.NewHash32FromStr("00000000000000000102d94fde9bd0807a2cc7582fe85dd6349b73ce4e8d9322") 41 | bchAfter, _ := bitcoin.NewHash32FromStr("0000000000000000004626ff6e3b936941d341c5932ece4357eeccac44e6d56c") 42 | bchHeight := 556767 43 | bchFound := false 44 | 45 | for _, split := range repo.splits { 46 | t.Logf("Split %s height %d, before %s, after, %s", split.Name, split.Height, 47 | split.BeforeHash, split.AfterHash) 48 | 49 | if split.Name == SplitNameBTC { 50 | btcFound = true 51 | if split.Height != btcHeight { 52 | t.Errorf("Wrong %s split height : got %d, want %d", split.Name, split.Height, 53 | btcHeight) 54 | } 55 | 56 | if !split.BeforeHash.Equal(btcBefore) { 57 | t.Errorf("Wrong %s split before hash : got %s, want %s", split.Name, 58 | split.BeforeHash, btcBefore) 59 | } 60 | 61 | if !split.AfterHash.Equal(btcAfter) { 62 | t.Errorf("Wrong %s split after hash : got %s, want %s", split.Name, 63 | split.AfterHash, btcBefore) 64 | } 65 | } 66 | if split.Name == SplitNameBCH { 67 | bchFound = true 68 | if split.Height != bchHeight { 69 | t.Errorf("Wrong %s split height : got %d, want %d", split.Name, split.Height, 70 | bchHeight) 71 | } 72 | 73 | if !split.BeforeHash.Equal(bchBefore) { 74 | t.Errorf("Wrong %s split before hash : got %s, want %s", split.Name, 75 | split.BeforeHash, bchBefore) 76 | } 77 | 78 | if !split.AfterHash.Equal(bchAfter) { 79 | t.Errorf("Wrong %s split after hash : got %s, want %s", split.Name, 80 | split.AfterHash, bchBefore) 81 | } 82 | } 83 | } 84 | 85 | if !btcFound { 86 | t.Errorf("%s Split not found", SplitNameBTC) 87 | } 88 | if !bchFound { 89 | t.Errorf("%s Split not found", SplitNameBCH) 90 | } 91 | } 92 | 93 | func Test_Splits_init_GetLocatorHashes_AfterSplits(t *testing.T) { 94 | if !testing.Verbose() { 95 | t.Skip() // Don't want to redownload the block all the time 96 | } 97 | ctx := tests.Context() 98 | 99 | store := storage.NewMockStorage() 100 | repo := NewRepository(DefaultConfig(), store) 101 | repo.DisableDifficulty() 102 | repo.DisableSplitProtection() 103 | 104 | repo.InitializeWithGenesis() 105 | 106 | // Mock 575000 headers 107 | previous := repo.LastHash() 108 | timestamp := repo.longest.Last().Header.Timestamp 109 | for i := 0; i < 575000; i++ { 110 | header := &wire.BlockHeader{ 111 | Version: 1, 112 | PrevBlock: previous, 113 | Timestamp: timestamp, 114 | Bits: 0x1d00ffff, 115 | Nonce: rand.Uint32(), 116 | } 117 | rand.Read(header.MerkleRoot[:]) 118 | 119 | if err := repo.ProcessHeader(ctx, header); err != nil { 120 | t.Fatalf("Failed to add header : %s", err) 121 | } 122 | 123 | previous = *header.BlockHash() 124 | timestamp += 600 125 | } 126 | 127 | heightHashes := repo.longest.GetLocatorHashes(repo.splits, 100, 50) 128 | previousHeight := repo.Height() 129 | btcFound := false 130 | bchFound := false 131 | for i, heightHash := range heightHashes { 132 | height := repo.HashHeight(heightHash.Hash) 133 | if height != -1 { 134 | t.Logf("Hash %06d : %s", height, heightHash.Hash) 135 | if height > previousHeight { 136 | t.Errorf("Hash %d at offset after lower height : height %d, previous %d", i, height, 137 | previousHeight) 138 | } 139 | previousHeight = height 140 | continue 141 | } 142 | 143 | for _, split := range repo.splits { 144 | if !heightHash.Hash.Equal(&split.BeforeHash) { 145 | continue 146 | } 147 | 148 | t.Logf("Split %d : %s", split.Height, heightHash.Hash) 149 | 150 | if split.Name == SplitNameBCH { 151 | bchFound = true 152 | } else if split.Name == SplitNameBTC { 153 | btcFound = true 154 | } else { 155 | t.Logf("Other split found : %s", split.Name) 156 | } 157 | 158 | if split.Height > previousHeight { 159 | t.Errorf("%s split at offset after lower height : height %d, previous %d", 160 | split.Name, split.Height, previousHeight) 161 | } 162 | 163 | previousHeight = split.Height 164 | break 165 | } 166 | } 167 | 168 | if !btcFound { 169 | t.Errorf("%s Split not found", SplitNameBTC) 170 | } 171 | if !bchFound { 172 | t.Errorf("%s Split not found", SplitNameBCH) 173 | } 174 | } 175 | 176 | func Test_Splits_GetLocatorHashes_BeforeSplits(t *testing.T) { 177 | if !testing.Verbose() { 178 | t.Skip() // Don't want to redownload the block all the time 179 | } 180 | ctx := tests.Context() 181 | 182 | store := storage.NewMockStorage() 183 | repo := NewRepository(DefaultConfig(), store) 184 | repo.DisableDifficulty() 185 | 186 | repo.InitializeWithGenesis() 187 | 188 | // Mock 575000 headers 189 | previous := repo.LastHash() 190 | var merklehash bitcoin.Hash32 191 | timestamp := genesisHeader(bitcoin.MainNet).Timestamp 192 | var nonce uint32 193 | for i := 0; i < 300000; i++ { 194 | rand.Read(merklehash[:]) 195 | nonce = rand.Uint32() 196 | 197 | header := &wire.BlockHeader{ 198 | Version: 1, 199 | PrevBlock: previous, 200 | MerkleRoot: merklehash, 201 | Timestamp: timestamp, 202 | Bits: 0x1d00ffff, 203 | Nonce: nonce, 204 | } 205 | 206 | if err := repo.ProcessHeader(ctx, header); err != nil { 207 | t.Fatalf("Failed to add header : %s", err) 208 | } 209 | 210 | previous = *header.BlockHash() 211 | timestamp += 600 212 | } 213 | 214 | heightHashes := repo.longest.GetLocatorHashes(repo.splits, 100, 50) 215 | previousHeight := repo.Height() 216 | btcFound := false 217 | bchFound := false 218 | for i, heightHash := range heightHashes { 219 | height := repo.HashHeight(heightHash.Hash) 220 | if height != -1 { 221 | t.Logf("Hash %06d : %s", height, heightHash.Hash) 222 | if height > previousHeight { 223 | t.Errorf("Hash %d at offset after lower height : height %d, previous %d", i, height, 224 | previousHeight) 225 | } 226 | previousHeight = height 227 | continue 228 | } 229 | 230 | for _, split := range repo.splits { 231 | if !heightHash.Hash.Equal(&split.BeforeHash) { 232 | continue 233 | } 234 | 235 | t.Logf("Split Hash %d : %s", split.Height, heightHash.Hash) 236 | 237 | if split.Name == SplitNameBCH { 238 | bchFound = true 239 | } else if split.Name == SplitNameBTC { 240 | btcFound = true 241 | } else { 242 | t.Logf("Other split found : %s", split.Name) 243 | } 244 | 245 | if split.Height > previousHeight { 246 | t.Errorf("%s split at offset after lower height : height %d, previous %d", 247 | split.Name, split.Height, previousHeight) 248 | } 249 | 250 | previousHeight = split.Height 251 | break 252 | } 253 | } 254 | 255 | if btcFound { 256 | t.Errorf("%s Split found", SplitNameBTC) 257 | } 258 | if bchFound { 259 | t.Errorf("%s Split found", SplitNameBCH) 260 | } 261 | } 262 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e h1:ahyvB3q25YnZWly5Gq1ekg6jcmWaGj/vG/MhF4aisoc= 2 | github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw= 3 | github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc= 4 | github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec/go.mod h1:CD8UlnlLDiqb36L110uqiP2iSflVjx9g/3U9hCI4q2U= 5 | github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= 6 | github.com/aws/aws-sdk-go v1.35.3 h1:r0puXncSaAfRt7Btml2swUo74Kao+vKhO3VLjwDjK54= 7 | github.com/aws/aws-sdk-go v1.35.3/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= 8 | github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= 9 | github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= 10 | github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= 11 | github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= 12 | github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= 13 | github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= 14 | github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= 15 | github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= 16 | github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= 17 | github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= 18 | github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= 19 | github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e h1:0XBUw73chJ1VYSsfvcPvVT7auykAJce9FpRr10L6Qhw= 20 | github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 21 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 22 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 23 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 24 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 25 | github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= 26 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 27 | github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= 28 | github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= 29 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= 30 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 31 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 32 | github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= 33 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 34 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 35 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 36 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 37 | github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= 38 | github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= 39 | github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= 40 | github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= 41 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 42 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 43 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 44 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 45 | github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 46 | github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 47 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 48 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 49 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 50 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 51 | github.com/scottjbarr/redis v0.0.1 h1:cCXEzPXuHhDM0PEFwv7QKBiDF22S+9aivxvfags2kj0= 52 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 53 | github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= 54 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 55 | github.com/tokenized/config v0.2.2 h1:dGfFdv3P6JK9TcHvud34vozocm3Qq2Wxbc9PRctgDL8= 56 | github.com/tokenized/config v0.2.2/go.mod h1:CfxdNBZh5W1CCmE4bKHrSq7dgKQiNUxdrDtmvhemq2w= 57 | github.com/tokenized/logger v0.1.3 h1:Vq/2jEgqyDfjJPgoMo773/bn7ZUow8EWSAY8+gFdTQI= 58 | github.com/tokenized/logger v0.1.3/go.mod h1:bA5PfUEFwtUy0yEIIq4Q3ZwBk1kIZTkYIRASq3fPw+k= 59 | github.com/tokenized/pkg v0.7.1-0.20230518151913-31bef1f54301 h1:x6meCB+cO0ItyljfJd0wgXm5uQzhLj8cAD/8RIRLLPc= 60 | github.com/tokenized/pkg v0.7.1-0.20230518151913-31bef1f54301/go.mod h1:c1xkP+9ON6kwoMQB9LMpy34YcqUwqNXjPMlltYTYZIc= 61 | github.com/tokenized/threads v0.1.2 h1:olWOi2kZ4GLX4XmRbgHJzMbaTxp7CKMdiMbHJjKZaRQ= 62 | github.com/tokenized/threads v0.1.2/go.mod h1:+/44KAYA6wqU+AC743pLGBgzkD06IB9+piOOE8BkGK0= 63 | github.com/tyler-smith/go-bip32 v0.0.0-20170922074101-2c9cfd177564 h1:NXXyQVeRVLK8Xu27/hkkjwVOZLk5v4ZBEvvMtqMqznM= 64 | github.com/tyler-smith/go-bip32 v0.0.0-20170922074101-2c9cfd177564/go.mod h1:0/YuQQF676+d4CMNclTqGUam1EDwz0B8o03K9pQqA3c= 65 | golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 66 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 67 | golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 68 | golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= 69 | golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= 70 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 71 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 72 | golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 73 | golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= 74 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 75 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 76 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 77 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 78 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 79 | golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= 80 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 81 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 82 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 83 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 84 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 85 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 86 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 87 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 88 | launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= 89 | -------------------------------------------------------------------------------- /test_nodes.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/tokenized/bitcoin_reader/headers" 12 | "github.com/tokenized/logger" 13 | "github.com/tokenized/pkg/bitcoin" 14 | "github.com/tokenized/pkg/wire" 15 | "github.com/tokenized/threads" 16 | 17 | "github.com/pkg/errors" 18 | ) 19 | 20 | const ( 21 | TestUserAgent = "TestAgent/1" 22 | TestNet = bitcoin.MainNet 23 | ) 24 | 25 | type MockNode struct { 26 | Name string 27 | InternalNode *BitcoinNode 28 | ExternalNode *MockExternalNode 29 | } 30 | 31 | func NewMockNode(name string, config *Config, headers *headers.Repository, 32 | peers PeerRepository, txManager *TxManager) *MockNode { 33 | result := &MockNode{ 34 | Name: name, 35 | InternalNode: NewBitcoinNode(name, TestUserAgent, config, headers, peers), 36 | ExternalNode: NewMockExternalNode(name, headers, 500), 37 | } 38 | 39 | result.InternalNode.SetTxManager(txManager) 40 | return result 41 | } 42 | 43 | func (n *MockNode) Run(ctx context.Context, interrupt <-chan interface{}) error { 44 | // Create mock connection 45 | incomingConn, externalConn := net.Pipe() 46 | 47 | n.InternalNode.mockConnect(ctx, incomingConn) 48 | n.ExternalNode.SetConnection(externalConn) 49 | 50 | var wait sync.WaitGroup 51 | 52 | internalThread, internalComplete := threads.NewInterruptableThreadComplete(fmt.Sprintf("%s: Internal", n.Name), 53 | n.InternalNode.run, &wait) 54 | externalThread, externalComplete := threads.NewInterruptableThreadComplete(fmt.Sprintf("%s: External", n.Name), 55 | n.ExternalNode.Run, &wait) 56 | 57 | internalThread.Start(ctx) 58 | externalThread.Start(ctx) 59 | 60 | select { 61 | case internalErr := <-internalComplete: 62 | logger.Fatal(ctx, "[%s] Internal node failed : %s", n.Name, internalErr) 63 | 64 | case externalErr := <-externalComplete: 65 | logger.Fatal(ctx, "[%s] External node failed : %s", n.Name, externalErr) 66 | 67 | case <-interrupt: 68 | } 69 | 70 | internalThread.Stop(ctx) 71 | externalThread.Stop(ctx) 72 | 73 | wait.Wait() 74 | return nil 75 | } 76 | 77 | type MockExternalNode struct { 78 | connection net.Conn 79 | isClosed atomic.Value 80 | 81 | Name string 82 | 83 | Txs []*wire.MsgTx 84 | TxRetentionCount int 85 | TxLock sync.Mutex 86 | 87 | headers *headers.Repository 88 | 89 | outgoingMsgChannel chan wire.Message 90 | } 91 | 92 | func NewMockExternalNode(name string, headers *headers.Repository, 93 | txRetentionCount int) *MockExternalNode { 94 | return &MockExternalNode{ 95 | Name: name, 96 | TxRetentionCount: txRetentionCount, 97 | headers: headers, 98 | outgoingMsgChannel: make(chan wire.Message, 100), 99 | } 100 | } 101 | 102 | func (n *MockExternalNode) ProvideTx(tx *wire.MsgTx) error { 103 | n.TxLock.Lock() 104 | n.Txs = append(n.Txs, tx) 105 | if len(n.Txs) > n.TxRetentionCount { 106 | overage := len(n.Txs) - n.TxRetentionCount 107 | n.Txs = n.Txs[overage:] 108 | } 109 | n.TxLock.Unlock() 110 | 111 | // TODO Remove transactions if they aren't immediately requested with a get data message. --ce 112 | 113 | // Send tx inventory. 114 | msg := &wire.MsgInv{ 115 | InvList: []*wire.InvVect{ 116 | { 117 | Type: wire.InvTypeTx, 118 | Hash: *tx.TxHash(), 119 | }, 120 | }, 121 | } 122 | 123 | if err := n.sendMessage(msg); err != nil { 124 | return errors.Wrap(err, "send message") 125 | } 126 | 127 | return nil 128 | } 129 | 130 | func (n *MockExternalNode) SetConnection(connection net.Conn) { 131 | n.connection = connection 132 | n.isClosed.Store(false) 133 | } 134 | 135 | func (n *MockExternalNode) Run(ctx context.Context, interrupt <-chan interface{}) error { 136 | var wait sync.WaitGroup 137 | 138 | // Listen for requests from connection and respond. 139 | handleThread, handleComplete := threads.NewUninterruptableThreadComplete(fmt.Sprintf("%s: External: handle", n.Name), 140 | n.handleMessages, &wait) 141 | 142 | sendThread, sendComplete := threads.NewUninterruptableThreadComplete(fmt.Sprintf("%s: External: send", n.Name), 143 | func(ctx context.Context) error { 144 | return n.sendMessages(ctx) 145 | }, &wait) 146 | 147 | handleThread.Start(ctx) 148 | sendThread.Start(ctx) 149 | 150 | var resultErr error 151 | select { 152 | case err := <-sendComplete: 153 | resultErr = errors.Wrap(err, "send") 154 | case err := <-handleComplete: 155 | resultErr = errors.Wrap(err, "handle") 156 | } 157 | 158 | logger.Info(ctx, "Stopping External: %s", n.Name) 159 | n.isClosed.Store(true) 160 | n.connection.Close() 161 | close(n.outgoingMsgChannel) 162 | wait.Wait() 163 | 164 | return resultErr 165 | } 166 | 167 | func (n *MockExternalNode) handleMessages(ctx context.Context) error { 168 | version := buildVersionMsg("", TestUserAgent, n.headers.Height(), true) 169 | if err := n.sendMessage(version); err != nil { 170 | return errors.Wrap(err, "send version") 171 | } 172 | 173 | for { 174 | if n.isClosed.Load().(bool) { 175 | return nil 176 | } 177 | 178 | if msg, _, err := wire.ReadMessage(n.connection, wire.ProtocolVersion, 179 | wire.BitcoinNet(TestNet)); err != nil { 180 | 181 | if typeError, ok := errors.Cause(err).(*wire.MessageError); ok { 182 | if typeError.Type == wire.MessageErrorUnknownCommand { 183 | continue 184 | } 185 | 186 | if typeError.Type == wire.MessageErrorConnectionClosed { 187 | return nil 188 | } 189 | } 190 | 191 | return errors.Wrap(err, "read") 192 | } else { 193 | if err := n.handleMessage(ctx, msg); err != nil { 194 | return errors.Wrap(err, "handle") 195 | } 196 | } 197 | } 198 | } 199 | 200 | func (n *MockExternalNode) handleMessage(ctx context.Context, msg wire.Message) error { 201 | switch message := msg.(type) { 202 | case *wire.MsgVersion: 203 | if err := n.sendMessage(&wire.MsgVerAck{}); err != nil { 204 | return errors.Wrap(err, "send ver ack") 205 | } 206 | 207 | case *wire.MsgVerAck: 208 | 209 | case *wire.MsgGetHeaders: 210 | msgHeaders := &wire.MsgHeaders{} 211 | foundSplit := false 212 | for _, hash := range message.BlockLocatorHashes { 213 | if hash.Equal(&headers.MainNetRequiredHeader.PrevBlock) { 214 | foundSplit = true 215 | msgHeaders.AddBlockHeader(headers.MainNetRequiredHeader) 216 | } 217 | } 218 | 219 | if !foundSplit { 220 | // Find block height and send headers. 221 | l := len(message.BlockLocatorHashes) 222 | if l == 0 { 223 | return errors.New("Empty block request") 224 | } 225 | 226 | lastHash := *message.BlockLocatorHashes[l-1] 227 | height := n.headers.HashHeight(lastHash) 228 | if height == -1 { 229 | return fmt.Errorf("Header not found: %s", lastHash) 230 | } 231 | 232 | headers, err := n.headers.GetHeaders(ctx, height, 500) 233 | if err != nil { 234 | return errors.Wrap(err, "get headers") 235 | } 236 | 237 | if len(headers) == 0 { 238 | return fmt.Errorf("No headers found: height %d", height) 239 | } 240 | 241 | for _, header := range headers { 242 | msgHeaders.AddBlockHeader(header) 243 | } 244 | } 245 | 246 | if err := n.sendMessage(msgHeaders); err != nil { 247 | return errors.Wrap(err, "send headers") 248 | } 249 | 250 | case *wire.MsgGetData: 251 | for _, inv := range message.InvList { 252 | if inv.Type != wire.InvTypeTx { 253 | continue 254 | } 255 | 256 | var tx *wire.MsgTx 257 | n.TxLock.Lock() 258 | for i, ltx := range n.Txs { 259 | if ltx.TxHash().Equal(&inv.Hash) { 260 | tx = ltx 261 | n.Txs = append(n.Txs[:i], n.Txs[i+1:]...) 262 | break 263 | } 264 | } 265 | n.TxLock.Unlock() 266 | 267 | if tx != nil { 268 | if err := n.sendMessage(tx); err != nil { 269 | return errors.Wrap(err, "send tx") 270 | } 271 | } else { 272 | logger.ErrorWithFields(ctx, []logger.Field{ 273 | logger.Stringer("txid", inv.Hash), 274 | }, "Tx not found for get data request") 275 | } 276 | } 277 | } 278 | 279 | return nil 280 | } 281 | 282 | func (n *MockExternalNode) sendMessage(msg wire.Message) error { 283 | select { 284 | case n.outgoingMsgChannel <- msg: 285 | return nil 286 | case <-time.After(time.Second * 10): 287 | return errors.New("Could not add message to channel") 288 | } 289 | } 290 | 291 | func (n *MockExternalNode) sendMessages(ctx context.Context) error { 292 | for msg := range n.outgoingMsgChannel { 293 | if n.isClosed.Load().(bool) { 294 | return nil 295 | } 296 | 297 | if _, err := wire.WriteMessageN(n.connection, msg, wire.ProtocolVersion, 298 | wire.BitcoinNet(TestNet)); err != nil { 299 | logger.VerboseWithFields(ctx, []logger.Field{ 300 | logger.String("command", msg.Command()), 301 | }, "Failed to send message : %s", err) 302 | for range n.outgoingMsgChannel { // flush channel 303 | } 304 | return errors.Wrap(err, "write message") 305 | } 306 | } 307 | 308 | return nil 309 | } 310 | -------------------------------------------------------------------------------- /tx_manager.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "math/rand" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/tokenized/logger" 12 | "github.com/tokenized/pkg/bitcoin" 13 | "github.com/tokenized/pkg/wire" 14 | 15 | "github.com/google/uuid" 16 | "github.com/pkg/errors" 17 | ) 18 | 19 | // TxManager manages tx inventories and determines when txs should be requested from peer nodes. 20 | // It creates 256 txid maps and uses the first byte of the txid to divide the txs. This allows 21 | // separate locks on each map and reduces the size of each map. 22 | type TxManager struct { 23 | txMaps []*txMap 24 | requestTimeout atomic.Value 25 | 26 | txChannel chan *wire.MsgTx 27 | 28 | txProcessor TxProcessor 29 | txSaver TxSaver 30 | 31 | sync.RWMutex 32 | } 33 | 34 | type txMap struct { 35 | txs map[bitcoin.Hash32]*TxData 36 | 37 | sync.RWMutex 38 | } 39 | 40 | type TxData struct { 41 | FirstSeen time.Time // time the txid was first seen in an inventory message 42 | LastRequested time.Time // time of the last request for the full tx 43 | Received *time.Time // time the full tx was first seen. nil when not seen yet 44 | NodeIDs []uuid.UUID // the nodes that have announced this tx. not including nodes the tx has been requested from 45 | ReceivedFrom *uuid.UUID // the node that provided the full tx. possibly use to track bad nodes 46 | 47 | sync.RWMutex 48 | } 49 | 50 | func NewTxManager(requestTimeout time.Duration) *TxManager { 51 | result := &TxManager{ 52 | txMaps: make([]*txMap, 256), 53 | txChannel: make(chan *wire.MsgTx, 1000), 54 | } 55 | result.requestTimeout.Store(requestTimeout) 56 | 57 | for i := range result.txMaps { 58 | result.txMaps[i] = newTxMap() 59 | } 60 | 61 | return result 62 | } 63 | 64 | func (m *TxManager) SetTxProcessor(txProcessor TxProcessor) { 65 | m.Lock() 66 | m.txProcessor = txProcessor 67 | m.Unlock() 68 | } 69 | 70 | func (m *TxManager) SetTxSaver(txSaver TxSaver) { 71 | m.Lock() 72 | m.txSaver = txSaver 73 | m.Unlock() 74 | } 75 | 76 | func (m *TxManager) Run(ctx context.Context) error { 77 | m.Lock() 78 | txProcessor := m.txProcessor 79 | txSaver := m.txSaver 80 | m.Unlock() 81 | 82 | if txProcessor == nil { 83 | for range m.txChannel { // wait for channel to close 84 | } 85 | 86 | return nil 87 | } 88 | 89 | for tx := range m.txChannel { 90 | isRelevant, err := txProcessor.ProcessTx(ctx, tx) 91 | if err != nil { 92 | return errors.Wrap(err, "process tx") 93 | } 94 | 95 | if isRelevant { 96 | logger.InfoWithFields(ctx, []logger.Field{ 97 | logger.Stringer("txid", tx.TxHash()), 98 | logger.Float64("tx_size_kb", float64(tx.SerializeSize())/1e3), 99 | }, "Relevant Tx") 100 | 101 | if txSaver != nil { 102 | if err := txSaver.SaveTx(ctx, tx); err != nil { 103 | return errors.Wrap(err, "save tx") 104 | } 105 | } 106 | } 107 | } 108 | 109 | return nil 110 | } 111 | 112 | func (m *TxManager) Stop(ctx context.Context) { 113 | close(m.txChannel) 114 | } 115 | 116 | func newTxMap() *txMap { 117 | return &txMap{ 118 | txs: make(map[bitcoin.Hash32]*TxData), 119 | } 120 | } 121 | 122 | // AddTxID adds a txid to the tx manager and returns true if the tx should be requested. 123 | func (m *TxManager) AddTxID(ctx context.Context, nodeID uuid.UUID, 124 | txid bitcoin.Hash32) (bool, error) { 125 | 126 | requestTimeout := m.requestTimeout.Load().(time.Duration) 127 | 128 | m.RLock() 129 | txMap := m.txMaps[txid[0]] 130 | m.RUnlock() 131 | 132 | txMap.Lock() 133 | data, exists := txMap.txs[txid] 134 | if exists { 135 | txMap.Unlock() 136 | data.Lock() 137 | 138 | if data.Received != nil { 139 | data.Unlock() 140 | return false, nil // already received tx 141 | } 142 | 143 | if time.Since(data.LastRequested) < requestTimeout { 144 | // Save node id to request later if this request doesn't return 145 | data.NodeIDs = appendID(data.NodeIDs, nodeID) 146 | data.Unlock() 147 | return false, nil // requested recently 148 | } 149 | 150 | // Request timed out, so request again 151 | data.LastRequested = time.Now() 152 | 153 | // Make sure the node id isn't in the list since it is being requested 154 | data.NodeIDs = removeID(data.NodeIDs, nodeID) 155 | 156 | data.Unlock() 157 | return true, nil 158 | } 159 | 160 | now := time.Now() 161 | data = &TxData{ 162 | FirstSeen: now, 163 | LastRequested: now, 164 | NodeIDs: []uuid.UUID{}, // don't include this node id since it will be requested now 165 | } 166 | 167 | txMap.txs[txid] = data 168 | txMap.Unlock() 169 | 170 | return true, nil 171 | } 172 | 173 | func removeID(ids []uuid.UUID, newID uuid.UUID) []uuid.UUID { 174 | for i, id := range ids { 175 | if bytes.Equal(id[:], newID[:]) { 176 | return append(ids[:i], ids[i+1:]...) 177 | } 178 | } 179 | 180 | return ids // not in list 181 | } 182 | 183 | func appendID(ids []uuid.UUID, newID uuid.UUID) []uuid.UUID { 184 | for _, id := range ids { 185 | if bytes.Equal(id[:], newID[:]) { 186 | return ids // already in list 187 | } 188 | } 189 | 190 | return append(ids, newID) // append to list 191 | } 192 | 193 | // AddTx registers that we have received the full tx data. We want to keep the txid around for a 194 | // while to ensure we don't re-request it in case it is still propagating some of the nodes. 195 | func (m *TxManager) AddTx(ctx context.Context, interrupt <-chan interface{}, nodeID uuid.UUID, 196 | tx *wire.MsgTx) error { 197 | 198 | now := time.Now() 199 | txid := *tx.TxHash() 200 | 201 | m.RLock() 202 | txMap := m.txMaps[txid[0]] 203 | m.RUnlock() 204 | 205 | txMap.Lock() 206 | data, exists := txMap.txs[txid] 207 | if exists { 208 | txMap.Unlock() 209 | data.Lock() 210 | 211 | isNew := false 212 | if data.Received == nil { 213 | data.Received = &now 214 | data.ReceivedFrom = &nodeID 215 | isNew = true 216 | } 217 | data.Unlock() 218 | 219 | if isNew { 220 | m.sendTx(ctx, interrupt, tx) 221 | } 222 | 223 | return nil 224 | } 225 | 226 | // Add new item 227 | data = &TxData{ 228 | FirstSeen: now, 229 | LastRequested: now, 230 | Received: &now, 231 | ReceivedFrom: &nodeID, 232 | } 233 | 234 | txMap.txs[txid] = data 235 | txMap.Unlock() 236 | 237 | m.sendTx(ctx, interrupt, tx) 238 | return nil 239 | } 240 | 241 | // sendTx adds a tx to the tx channel if it is set. 242 | func (m *TxManager) sendTx(ctx context.Context, interrupt <-chan interface{}, tx *wire.MsgTx) { 243 | start := time.Now() 244 | for { 245 | select { 246 | case m.txChannel <- tx: 247 | return 248 | case <-interrupt: 249 | logger.WarnWithFields(ctx, []logger.Field{ 250 | logger.Stringer("txid", tx.TxHash()), 251 | logger.MillisecondsFromNano("elapsed_ms", time.Since(start).Nanoseconds()), 252 | }, "Aborting add to tx manager channel") 253 | return 254 | case <-time.After(3 * time.Second): 255 | logger.WarnWithFields(ctx, []logger.Field{ 256 | logger.Stringer("txid", tx.TxHash()), 257 | logger.MillisecondsFromNano("elapsed_ms", time.Since(start).Nanoseconds()), 258 | }, "Waiting to add to tx manager channel") 259 | } 260 | } 261 | } 262 | 263 | type indexList []int 264 | 265 | func (l indexList) Swap(i, j int) { 266 | l[i], l[j] = l[j], l[i] 267 | } 268 | 269 | // GetTxRequests returns a set of txs that need to be requested again. 270 | func (m *TxManager) GetTxRequests(ctx context.Context, nodeID uuid.UUID, 271 | max int) ([]bitcoin.Hash32, error) { 272 | 273 | requestTimeout := m.requestTimeout.Load().(time.Duration) 274 | 275 | // Randomly sort indexes 276 | indexes := make(indexList, 256) 277 | for i := range indexes { 278 | indexes[i] = i 279 | } 280 | rand.Seed(time.Now().UnixNano()) 281 | rand.Shuffle(len(indexes), indexes.Swap) 282 | 283 | now := time.Now() 284 | var result []bitcoin.Hash32 285 | count := 0 286 | for _, index := range indexes { 287 | m.RLock() 288 | txMap := m.txMaps[index] 289 | m.RUnlock() 290 | 291 | txMap.RLock() 292 | for txid, data := range txMap.txs { 293 | data.Lock() 294 | if data.Received != nil { 295 | data.Unlock() 296 | continue // already received 297 | } 298 | 299 | if !contains(data.NodeIDs, nodeID) { 300 | data.Unlock() 301 | continue // not reported by this node 302 | } 303 | 304 | if time.Since(data.LastRequested) < requestTimeout { 305 | data.Unlock() 306 | continue // recently requested 307 | } 308 | 309 | data.LastRequested = now 310 | // Remove node id from the list since it is being requested 311 | data.NodeIDs = removeID(data.NodeIDs, nodeID) 312 | data.Unlock() 313 | 314 | result = append(result, txid) 315 | count++ 316 | } 317 | txMap.RUnlock() 318 | 319 | if count >= max { 320 | return result, nil 321 | } 322 | } 323 | 324 | return result, nil 325 | } 326 | 327 | func contains(ids []uuid.UUID, lookup uuid.UUID) bool { 328 | for _, id := range ids { 329 | if bytes.Equal(id[:], lookup[:]) { 330 | return true 331 | } 332 | } 333 | 334 | return false 335 | } 336 | 337 | // Clean removes all txs that are older than the specified time. Txs are retained for long enough to 338 | // prevent redownloading them while they continue to propagate, but are no longer needed after nodes 339 | // stop announcing them. 340 | func (m *TxManager) Clean(ctx context.Context, oldest time.Time) error { 341 | start := time.Now() 342 | 343 | removedCount := 0 344 | for i := 0; i < 256; i++ { 345 | m.RLock() 346 | txMap := m.txMaps[i] 347 | m.RUnlock() 348 | 349 | txMap.Lock() 350 | newTxs := make(map[bitcoin.Hash32]*TxData) 351 | for txid, data := range txMap.txs { 352 | if data.Latest().After(oldest) { 353 | newTxs[txid] = data 354 | } else { 355 | removedCount++ 356 | } 357 | } 358 | txMap.txs = newTxs 359 | txMap.Unlock() 360 | } 361 | 362 | logger.ElapsedWithFields(ctx, start, []logger.Field{ 363 | logger.Int("removed_count", removedCount), 364 | }, "Cleaned tx manager") 365 | return nil 366 | } 367 | 368 | func (tx *TxData) Latest() time.Time { 369 | tx.RLock() 370 | defer tx.RUnlock() 371 | 372 | if tx.Received != nil { 373 | return *tx.Received 374 | } 375 | 376 | return tx.LastRequested 377 | } 378 | -------------------------------------------------------------------------------- /headers/proof_of_work_test.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "encoding/json" 5 | "math/big" 6 | "os" 7 | "testing" 8 | 9 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 10 | "github.com/tokenized/pkg/bitcoin" 11 | "github.com/tokenized/pkg/storage" 12 | "github.com/tokenized/pkg/wire" 13 | 14 | "github.com/pkg/errors" 15 | ) 16 | 17 | func Test_CalculateTarget_725000(t *testing.T) { 18 | ctx := tests.Context() 19 | 20 | file, err := os.Open("test_fixtures/headers_725000.txt") 21 | if err != nil { 22 | t.Fatalf("Failed to open file : %s", err) 23 | } 24 | 25 | store := storage.NewMockStorage() 26 | headersRepo := NewRepository(DefaultConfig(), store) 27 | headersRepo.DisableDifficulty() 28 | height := 725000 29 | 30 | var headers []*wire.BlockHeader 31 | if err := json.NewDecoder(file).Decode(&headers); err != nil { 32 | t.Fatalf("Failed to read headers : %s", err) 33 | } 34 | 35 | work := &big.Int{} 36 | work.SetString("134b2eb2b14bbedbad9a14b", 16) // Chain work from 724999 37 | 38 | for i, header := range headers { 39 | if i == 150 { 40 | // Re-enable after there are enough previous headers to calculate it 41 | headersRepo.EnableDifficulty() 42 | } 43 | if i > 150 { 44 | target, err := headersRepo.branches[0].Target(ctx, height) 45 | if err != nil { 46 | t.Fatalf("Failed to calculate target : %s", err) 47 | } 48 | 49 | bits := bitcoin.ConvertToBits(target, bitcoin.MaxBits) 50 | if bits != header.Bits { 51 | t.Fatalf("Wrong calculated target bits for height %d : got 0x%08x, want 0x%08x", 52 | height, bits, header.Bits) 53 | } 54 | 55 | if !header.WorkIsValid() { 56 | t.Fatalf("Failed to verify header work at height %d", height) 57 | } 58 | 59 | // Verify work was done on header 60 | actualWork := header.BlockHash().Value() 61 | // t.Logf("Work : %64s", actualWork.Text(16)) 62 | // t.Logf("Target : %64s", target.Text(16)) 63 | 64 | if actualWork.Cmp(target) > 0 { 65 | t.Fatalf("Not enough work for height %d", height) 66 | } 67 | } 68 | 69 | if i == 0 { 70 | if err := headersRepo.MockLatest(ctx, header, height, work); err != nil { 71 | t.Fatalf("Failed to initialize headers repo : %s", err) 72 | } 73 | } else { 74 | if err := headersRepo.ProcessHeader(ctx, header); err != nil { 75 | t.Fatalf("Failed to add header : %s", err) 76 | } 77 | } 78 | 79 | blockWork := bitcoin.ConvertToWork(bitcoin.ConvertToDifficulty(header.Bits)) 80 | work.Add(work, blockWork) 81 | 82 | _, lastWork, err := headersRepo.branches[0].TimeAndWork(ctx, height) 83 | if err != nil { 84 | t.Fatalf("Failed to get header stats %d : %s", height, err) 85 | } 86 | 87 | if work.Cmp(lastWork) != 0 { 88 | t.Fatalf("Wrong result work for block %d : \ngot %s \nwant %s", height, 89 | work.Text(16), lastWork.Text(16)) 90 | } 91 | 92 | height++ 93 | } 94 | 95 | wantWork := &big.Int{} 96 | wantWork.SetString("134e46a6635192307dfa78b", 16) // Chain work from 725835 97 | if work.Cmp(wantWork) != 0 { 98 | t.Errorf("Wrong result work : \ngot %s \nwant %s", work.Text(16), wantWork.Text(16)) 99 | } 100 | } 101 | 102 | // Test_ChainSplit_556000_BSV ensure that the BSV split header is accepted. 103 | func Test_ChainSplit_556000_BSV(t *testing.T) { 104 | ctx := tests.Context() 105 | 106 | file, err := os.Open("test_fixtures/headers_556000.txt") 107 | if err != nil { 108 | t.Fatalf("Failed to open file : %s", err) 109 | } 110 | 111 | store := storage.NewMockStorage() 112 | headersRepo := NewRepository(DefaultConfig(), store) 113 | headersRepo.DisableDifficulty() 114 | height := 556000 115 | 116 | var headers []*wire.BlockHeader 117 | if err := json.NewDecoder(file).Decode(&headers); err != nil { 118 | t.Fatalf("Failed to read headers : %s", err) 119 | } 120 | 121 | work := &big.Int{} 122 | work.SetString("d167cf38dd7a9c078a40d5", 16) // Chain work from 555999 123 | 124 | for i, header := range headers { 125 | if i == 150 { 126 | // Re-enable after there are enough previous headers to calculate it 127 | headersRepo.EnableDifficulty() 128 | } 129 | if i > 150 { 130 | target, err := headersRepo.branches[0].Target(ctx, height) 131 | if err != nil { 132 | t.Fatalf("Failed to calculate target : %s", err) 133 | } 134 | 135 | bits := bitcoin.ConvertToBits(target, bitcoin.MaxBits) 136 | if bits != header.Bits { 137 | t.Fatalf("Wrong calculated target bits for height %d : got 0x%08x, want 0x%08x", 138 | height, bits, header.Bits) 139 | } 140 | 141 | if !header.WorkIsValid() { 142 | t.Fatalf("Failed to verify header work at height %d", height) 143 | } 144 | 145 | // Verify work was done on header 146 | actualWork := header.BlockHash().Value() 147 | // t.Logf("Work : %64s", actualWork.Text(16)) 148 | // t.Logf("Target : %64s", target.Text(16)) 149 | // t.Logf("Accumulated Work %d : %64s", height, work.Text(16)) 150 | 151 | if actualWork.Cmp(target) > 0 { 152 | t.Fatalf("Not enough work for height %d", height) 153 | } 154 | } 155 | 156 | if i == 0 { 157 | if err := headersRepo.MockLatest(ctx, header, height, work); err != nil { 158 | t.Fatalf("Failed to initialize headers repo : %s", err) 159 | } 160 | } else { 161 | if err := headersRepo.ProcessHeader(ctx, header); err != nil { 162 | t.Fatalf("Failed to add header : %s", err) 163 | } 164 | } 165 | 166 | blockWork := bitcoin.ConvertToWork(bitcoin.ConvertToDifficulty(header.Bits)) 167 | work.Add(work, blockWork) 168 | 169 | _, lastWork, err := headersRepo.branches[0].TimeAndWork(ctx, height) 170 | if err != nil { 171 | t.Fatalf("Failed to get header stats : %s", err) 172 | } 173 | 174 | if work.Cmp(lastWork) != 0 { 175 | t.Fatalf("Wrong result work for block %d : \ngot %s \nwant %s", height, 176 | work.Text(16), lastWork.Text(16)) 177 | } 178 | 179 | height++ 180 | } 181 | 182 | t.Logf("Added headers to height : %d", headersRepo.Height()) 183 | 184 | wantWork := &big.Int{} 185 | wantWork.SetString("d555fcdba6ad0ba9b95c36", 16) // Chain work from 557985 186 | if work.Cmp(wantWork) != 0 { 187 | t.Errorf("Wrong result work : \ngot %s \nwant %s", work.Text(16), wantWork.Text(16)) 188 | } 189 | } 190 | 191 | // Test_ChainSplit_556000_BCH ensure that the BCH split header is rejected with ErrWrongChain. 192 | func Test_ChainSplit_556000_BCH(t *testing.T) { 193 | ctx := tests.Context() 194 | 195 | file, err := os.Open("test_fixtures/headers_556000.txt") 196 | if err != nil { 197 | t.Fatalf("Failed to open file : %s", err) 198 | } 199 | 200 | store := storage.NewMockStorage() 201 | headersRepo := NewRepository(DefaultConfig(), store) 202 | headersRepo.DisableDifficulty() 203 | height := 556000 204 | 205 | var headers []*wire.BlockHeader 206 | if err := json.NewDecoder(file).Decode(&headers); err != nil { 207 | t.Fatalf("Failed to read headers : %s", err) 208 | } 209 | 210 | if len(headers) < 1000 { 211 | t.Fatalf("Not enough headers read : %d", len(headers)) 212 | } 213 | 214 | splitHash, _ := bitcoin.NewHash32FromStr("000000000000000001d956714215d96ffc00e0afda4cd0a96c96f8d802b1662b") 215 | if !headers[767].BlockHash().Equal(splitHash) { 216 | t.Fatalf("Wrong header at height 556767 : \ngot %s \nwant %s", headers[767].BlockHash(), 217 | splitHash) 218 | } 219 | 220 | bchMerkleRoot, _ := bitcoin.NewHash32FromStr("1cf31105bd6b1b4dba9ae55290ec06fff15b4567ec62a6e3863409bb3efd1944") 221 | bchSplitHeader := &wire.BlockHeader{ 222 | Version: 0x20000000, 223 | PrevBlock: *headers[766].BlockHash(), 224 | MerkleRoot: *bchMerkleRoot, 225 | Timestamp: 1542304936, // Nov 15, 2018 6:02 PM 226 | Bits: 402792411, 227 | Nonce: 3911120513, 228 | } 229 | 230 | bchHash, _ := bitcoin.NewHash32FromStr("0000000000000000004626ff6e3b936941d341c5932ece4357eeccac44e6d56c") 231 | if !bchHash.Equal(bchSplitHeader.BlockHash()) { 232 | t.Errorf("Incorrect BCH header hash : %s", bchSplitHeader.BlockHash()) 233 | } 234 | 235 | headers = append(headers[:767], bchSplitHeader) 236 | 237 | work := &big.Int{} 238 | work.SetString("d167cf38dd7a9c078a40d5", 16) // Chain work from 555999 239 | 240 | for i, header := range headers { 241 | if i == 150 { 242 | // Re-enable after there are enough previous headers to calculate it 243 | headersRepo.EnableDifficulty() 244 | } 245 | if i > 150 { 246 | target, err := headersRepo.branches[0].Target(ctx, height) 247 | if err != nil { 248 | t.Fatalf("Failed to calculate target : %s", err) 249 | } 250 | 251 | bits := bitcoin.ConvertToBits(target, bitcoin.MaxBits) 252 | if bits != header.Bits { 253 | t.Fatalf("Wrong calculated target bits for height %d : got 0x%08x, want 0x%08x", 254 | height, bits, header.Bits) 255 | } 256 | 257 | if !header.WorkIsValid() { 258 | t.Fatalf("Failed to verify header work at height %d", height) 259 | } 260 | 261 | // Verify work was done on header 262 | actualWork := header.BlockHash().Value() 263 | // t.Logf("Work : %64s", actualWork.Text(16)) 264 | // t.Logf("Target : %64s", target.Text(16)) 265 | // t.Logf("Accumulated Work %d : %64s", height, work.Text(16)) 266 | 267 | if actualWork.Cmp(target) > 0 { 268 | t.Fatalf("Not enough work for height %d", height) 269 | } 270 | } 271 | 272 | if i == 0 { 273 | if err := headersRepo.MockLatest(ctx, header, height, work); err != nil { 274 | t.Fatalf("Failed to initialize headers repo : %s", err) 275 | } 276 | } else if height == 556767 { 277 | if err := headersRepo.ProcessHeader(ctx, header); err == nil { 278 | t.Fatalf("Added invalid header") 279 | } else if errors.Cause(err) != ErrWrongChain { 280 | t.Errorf("Wrong error : %s", err) 281 | } 282 | break 283 | } else { 284 | if err := headersRepo.ProcessHeader(ctx, header); err != nil { 285 | t.Fatalf("Failed to add header : %s", err) 286 | } 287 | } 288 | 289 | blockWork := bitcoin.ConvertToWork(bitcoin.ConvertToDifficulty(header.Bits)) 290 | work.Add(work, blockWork) 291 | 292 | _, lastWork, err := headersRepo.branches[0].TimeAndWork(ctx, height) 293 | if err != nil { 294 | t.Fatalf("Failed to get header stats : %s", err) 295 | } 296 | 297 | if work.Cmp(lastWork) != 0 { 298 | t.Fatalf("Wrong result work for block %d : \ngot %s \nwant %s", height, 299 | work.Text(16), lastWork.Text(16)) 300 | } 301 | 302 | height++ 303 | } 304 | 305 | t.Logf("Added headers to height : %d", headersRepo.Height()) 306 | 307 | wantWork := &big.Int{} 308 | wantWork.SetString("d3367b433e911be0f8dbb9", 16) // Chain work from 556766 309 | if work.Cmp(wantWork) != 0 { 310 | t.Errorf("Wrong result work : \ngot %s \nwant %s", work.Text(16), wantWork.Text(16)) 311 | } 312 | } 313 | -------------------------------------------------------------------------------- /block_manager.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/tokenized/logger" 10 | "github.com/tokenized/pkg/bitcoin" 11 | "github.com/tokenized/threads" 12 | 13 | "github.com/pkg/errors" 14 | ) 15 | 16 | var ( 17 | ErrWrongBlock = errors.New("Wrong Block") 18 | 19 | BlockAborted = errors.New("Block Aborted") 20 | ) 21 | 22 | type BlockManager struct { 23 | blockTxManager BlockTxManager 24 | requestor BlockRequestor 25 | concurrentBlockRequests int 26 | blockRequestDelay time.Duration 27 | 28 | requests chan *downloadRequest 29 | requestsClosed bool 30 | requestLock sync.Mutex 31 | 32 | downloaders []*downloadThread 33 | downloaderLock sync.Mutex 34 | 35 | currentHash bitcoin.Hash32 36 | currentIsComplete bool 37 | currentComplete chan interface{} 38 | currentLock sync.Mutex 39 | 40 | sync.Mutex 41 | } 42 | 43 | type downloadThread struct { 44 | downloader *BlockDownloader 45 | thread *threads.InterruptableThread 46 | } 47 | 48 | type downloadRequest struct { 49 | hash bitcoin.Hash32 50 | height int 51 | processor TxProcessor 52 | complete chan error 53 | abort chan interface{} 54 | } 55 | 56 | func NewBlockManager(blockTxManager BlockTxManager, requestor BlockRequestor, 57 | concurrentBlockRequests int, blockRequestDelay time.Duration) *BlockManager { 58 | 59 | return &BlockManager{ 60 | blockTxManager: blockTxManager, 61 | requestor: requestor, 62 | concurrentBlockRequests: concurrentBlockRequests, 63 | blockRequestDelay: blockRequestDelay, 64 | requests: make(chan *downloadRequest, 10), 65 | } 66 | } 67 | 68 | func (m *BlockManager) AddRequest(ctx context.Context, hash bitcoin.Hash32, height int, 69 | processor TxProcessor) (<-chan error, chan<- interface{}) { 70 | 71 | request := &downloadRequest{ 72 | hash: hash, 73 | height: height, 74 | processor: processor, 75 | complete: make(chan error), 76 | abort: make(chan interface{}), 77 | } 78 | 79 | m.requestLock.Lock() 80 | if m.requestsClosed { 81 | m.requestLock.Unlock() 82 | return nil, nil 83 | } 84 | m.requests <- request 85 | m.requestLock.Unlock() 86 | 87 | return request.complete, request.abort 88 | } 89 | 90 | func (m *BlockManager) close(blockInterrupt chan<- interface{}) { 91 | close(blockInterrupt) 92 | 93 | m.requestLock.Lock() 94 | m.requestsClosed = true 95 | close(m.requests) 96 | m.requestLock.Unlock() 97 | } 98 | 99 | func (m *BlockManager) Run(ctx context.Context, interrupt <-chan interface{}) error { 100 | defer func() { 101 | m.Stop(ctx) 102 | m.shutdown(ctx) 103 | }() 104 | 105 | abortInterrupt := make(chan interface{}) 106 | blockInterrupt := make(chan interface{}) 107 | go func() { 108 | select { 109 | case <-abortInterrupt: 110 | m.close(blockInterrupt) 111 | case <-interrupt: 112 | m.close(blockInterrupt) 113 | } 114 | }() 115 | 116 | for request := range m.requests { 117 | if err := m.processRequest(ctx, request, blockInterrupt); err != nil { 118 | close(abortInterrupt) 119 | for range m.requests { // flush channel 120 | } 121 | 122 | if errors.Cause(err) == threads.Interrupted { 123 | return nil 124 | } 125 | 126 | logger.ErrorWithFields(ctx, []logger.Field{ 127 | logger.Stringer("block_hash", request.hash), 128 | logger.Int("block_height", request.height), 129 | }, "Failed to process request : %s", err) 130 | 131 | return errors.Wrap(err, "process request") 132 | } 133 | } 134 | 135 | return nil 136 | } 137 | 138 | func (m *BlockManager) Stop(ctx context.Context) { 139 | logger.InfoWithFields(ctx, []logger.Field{ 140 | logger.Int("downloader_count", len(m.downloaders)), 141 | }, "Stopping block manager") 142 | 143 | m.downloaderLock.Lock() 144 | downloaders := make([]*downloadThread, len(m.downloaders)) 145 | copy(downloaders, m.downloaders) 146 | m.downloaderLock.Unlock() 147 | 148 | for _, dt := range downloaders { 149 | dt.downloader.Cancel(ctx) 150 | dt.thread.Stop(ctx) 151 | } 152 | } 153 | 154 | func (m *BlockManager) shutdown(ctx context.Context) { 155 | start := time.Now() 156 | count := 0 157 | for { 158 | m.downloaderLock.Lock() 159 | activeCount := len(m.downloaders) 160 | if count >= 30 { 161 | for _, dt := range m.downloaders { 162 | logger.WarnWithFields(ctx, []logger.Field{ 163 | logger.Stringer("connection", dt.downloader.RequesterID()), 164 | logger.Stringer("block_hash", dt.downloader.Hash()), 165 | logger.Int("block_height", dt.downloader.Height()), 166 | }, "Waiting for: Block Downloader Shutdown") 167 | } 168 | } 169 | m.downloaderLock.Unlock() 170 | 171 | if activeCount == 0 { 172 | logger.Info(ctx, "Finished Block Manager") 173 | return 174 | } 175 | 176 | if count >= 30 { 177 | logger.WarnWithFields(ctx, []logger.Field{ 178 | logger.Timestamp("start", start.UnixNano()), 179 | logger.MillisecondsFromNano("elapsed_ms", time.Since(start).Nanoseconds()), 180 | logger.Int("active_downloaders", activeCount), 181 | }, "Waiting for: Block Manager Shutdown") 182 | count = 0 183 | } 184 | 185 | count++ 186 | time.Sleep(time.Millisecond * 100) 187 | } 188 | } 189 | 190 | func (m *BlockManager) processRequest(ctx context.Context, request *downloadRequest, 191 | interrupt <-chan interface{}) error { 192 | 193 | ctx = logger.ContextWithLogFields(ctx, logger.Stringer("block_hash", request.hash), 194 | logger.Int("block_height", request.height)) 195 | 196 | logger.Verbose(ctx, "Starting block request") 197 | 198 | m.currentLock.Lock() 199 | m.currentHash = request.hash 200 | m.currentIsComplete = false 201 | m.currentComplete = make(chan interface{}) 202 | m.currentLock.Unlock() 203 | 204 | // Send initial block request. 205 | countWithoutActiveDownload := 0 206 | if err := m.requestBlock(ctx, request.hash, request.height, request.processor); err != nil { 207 | logger.Warn(ctx, "Failed to request block : %s", err) 208 | } 209 | 210 | // Wait for a download to complete and send new block requests as necessary. 211 | for { 212 | select { 213 | case <-time.After(m.blockRequestDelay): // most blocks finish within 5 seconds 214 | downloaders := m.Downloaders(request.hash) 215 | logger.VerboseWithFields(ctx, []logger.Field{ 216 | logger.Stringers("active_downloads", downloaders), 217 | }, "Active block downloads") 218 | activeDownloadCount := len(downloaders) 219 | if activeDownloadCount > 0 { 220 | countWithoutActiveDownload = 0 221 | } else { 222 | countWithoutActiveDownload++ 223 | } 224 | 225 | if activeDownloadCount < m.concurrentBlockRequests { 226 | if err := m.requestBlock(ctx, request.hash, request.height, 227 | request.processor); err != nil { 228 | logger.Warn(ctx, "Failed to request block : %s", err) 229 | } else { 230 | activeDownloadCount++ 231 | } 232 | } 233 | 234 | if countWithoutActiveDownload > 20 { 235 | return ErrNodeNotAvailable 236 | } 237 | 238 | case <-interrupt: 239 | return threads.Interrupted 240 | 241 | case <-request.abort: 242 | m.cancelDownloaders(ctx, request.hash) 243 | request.complete <- BlockAborted 244 | return nil 245 | 246 | case <-m.currentComplete: 247 | m.cancelDownloaders(ctx, request.hash) // stop any others still downloading 248 | close(request.complete) 249 | return nil 250 | } 251 | } 252 | } 253 | 254 | func (m *BlockManager) DownloaderCount(hash bitcoin.Hash32) int { 255 | result := 0 256 | m.downloaderLock.Lock() 257 | for _, dt := range m.downloaders { 258 | dh := dt.downloader.Hash() 259 | if hash.Equal(&dh) { 260 | result++ 261 | } 262 | } 263 | m.downloaderLock.Unlock() 264 | return result 265 | } 266 | 267 | func (m *BlockManager) Downloaders(hash bitcoin.Hash32) []fmt.Stringer { 268 | var result []fmt.Stringer 269 | m.downloaderLock.Lock() 270 | for _, dt := range m.downloaders { 271 | dh := dt.downloader.Hash() 272 | if hash.Equal(&dh) { 273 | result = append(result, dt.downloader.RequesterID()) 274 | } 275 | } 276 | m.downloaderLock.Unlock() 277 | return result 278 | } 279 | 280 | func (m *BlockManager) cancelDownloaders(ctx context.Context, hash bitcoin.Hash32) { 281 | m.downloaderLock.Lock() 282 | downloaders := make([]*downloadThread, len(m.downloaders)) 283 | copy(downloaders, m.downloaders) 284 | m.downloaderLock.Unlock() 285 | 286 | for _, dt := range downloaders { 287 | dh := dt.downloader.Hash() 288 | if hash.Equal(&dh) { 289 | logger.VerboseWithFields(ctx, []logger.Field{ 290 | logger.Stringer("connection", dt.downloader.RequesterID()), 291 | logger.Stringer("block_hash", dt.downloader.Hash()), 292 | logger.Int("block_height", dt.downloader.Height()), 293 | }, "Cancelling block downloader") 294 | dt.downloader.Cancel(ctx) 295 | dt.thread.Stop(ctx) 296 | } 297 | } 298 | } 299 | 300 | func (m *BlockManager) requestBlock(ctx context.Context, hash bitcoin.Hash32, height int, 301 | processor TxProcessor) error { 302 | 303 | logger.Verbose(ctx, "Creating block request") 304 | downloader := NewBlockDownloader(processor, m.blockTxManager, hash, height) 305 | 306 | node, err := m.requestor.RequestBlock(ctx, hash, downloader.HandleBlock, downloader.Stop) 307 | if err != nil { 308 | return err 309 | } 310 | 311 | nodeID := node.ID() 312 | downloader.SetCanceller(nodeID, node) 313 | 314 | dt := &downloadThread{ 315 | downloader: downloader, 316 | thread: threads.NewInterruptableThread(fmt.Sprintf("Download Block: %s", hash), 317 | downloader.Run), 318 | } 319 | df := &downloadFinisher{ 320 | manager: m, 321 | downloader: downloader, 322 | } 323 | 324 | onCompleteChannel := dt.thread.GetCompleteChannel() 325 | onCompleteThread := threads.NewUninterruptableThread(fmt.Sprintf("On Block Complete: %s", hash), 326 | func(ctx context.Context) error { 327 | err, ok := <-onCompleteChannel 328 | if ok { 329 | df.onDownloaderCompleted(ctx, err) 330 | } 331 | return nil 332 | }) 333 | 334 | m.downloaderLock.Lock() 335 | m.downloaders = append(m.downloaders, dt) 336 | m.downloaderLock.Unlock() 337 | 338 | // Start download thread 339 | ctx = logger.ContextWithLogFields(ctx, logger.Stringer("connection", nodeID)) 340 | dt.thread.Start(ctx) 341 | onCompleteThread.Start(ctx) 342 | return nil 343 | } 344 | 345 | func (m *BlockManager) removeDownloader(ctx context.Context, downloader *BlockDownloader) { 346 | m.downloaderLock.Lock() 347 | for i, dt := range m.downloaders { 348 | if dt.downloader == downloader { 349 | m.downloaders = append(m.downloaders[:i], m.downloaders[i+1:]...) 350 | m.downloaderLock.Unlock() 351 | return 352 | } 353 | } 354 | 355 | logger.Warn(ctx, "Block downloader not found to remove") 356 | m.downloaderLock.Unlock() 357 | } 358 | 359 | func (m *BlockManager) markBlockRequestComplete(ctx context.Context, hash bitcoin.Hash32) { 360 | // Update status of block request 361 | m.currentLock.Lock() 362 | defer m.currentLock.Unlock() 363 | 364 | if !hash.Equal(&m.currentHash) { 365 | logger.Verbose(ctx, "Block not currently active") 366 | return 367 | } 368 | 369 | if m.currentIsComplete { 370 | logger.Verbose(ctx, "Block already marked complete") 371 | return 372 | } 373 | 374 | m.currentIsComplete = true 375 | close(m.currentComplete) 376 | logger.Verbose(ctx, "Block marked complete") 377 | } 378 | 379 | type downloadFinisher struct { 380 | manager *BlockManager 381 | downloader *BlockDownloader 382 | } 383 | 384 | func (c *downloadFinisher) onDownloaderCompleted(ctx context.Context, err error) { 385 | hash := c.downloader.Hash() 386 | ctx = logger.ContextWithLogFields(ctx, 387 | logger.Stringer("connection", c.downloader.RequesterID()), 388 | logger.Stringer("block_hash", hash), logger.Int("block_height", c.downloader.Height())) 389 | logger.Verbose(ctx, "Finishing downloader : %s", err) 390 | 391 | c.manager.removeDownloader(ctx, c.downloader) 392 | 393 | if err == nil { 394 | c.manager.markBlockRequestComplete(ctx, hash) 395 | return 396 | } 397 | 398 | if errors.Cause(err) == threads.Interrupted { 399 | logger.Verbose(ctx, "Block download interrupted") 400 | return 401 | } 402 | 403 | if errors.Cause(err) == errBlockDownloadCancelled { 404 | logger.Verbose(ctx, "Block download cancelled") 405 | return 406 | } 407 | 408 | if IsCloseError(err) { 409 | logger.Verbose(ctx, "Block download aborted by remote node : %s", err) 410 | return 411 | } 412 | 413 | logger.Warn(ctx, "Block download failed : %s", err) 414 | } 415 | -------------------------------------------------------------------------------- /scaling_test.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | mathRand "math/rand" 7 | "reflect" 8 | "runtime" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | 14 | "github.com/tokenized/bitcoin_reader/headers" 15 | "github.com/tokenized/config" 16 | "github.com/tokenized/logger" 17 | "github.com/tokenized/pkg/bitcoin" 18 | "github.com/tokenized/pkg/merkle_proof" 19 | "github.com/tokenized/pkg/storage" 20 | "github.com/tokenized/pkg/wire" 21 | "github.com/tokenized/threads" 22 | 23 | "github.com/pkg/errors" 24 | ) 25 | 26 | func Test_Memory(t *testing.T) { 27 | t.Skip("With processor count at 4 or more it doesn't seem to accumulate memory") 28 | 29 | Run_Test_Memory(t, 2, 50, 1000000, 10, 10, time.Microsecond*10) 30 | } 31 | 32 | // nodeCount is how many mock node connections to use. 33 | // totalTxCount is the total number of transactions to generate and distribute. 34 | // distCount is how many nodes to send each tx to. 35 | // checkCount is how many times to pull memory stats. 36 | func Run_Test_Memory(t *testing.T, cpuCount, nodeCount, totalTxCount, distCount, checkCount int, 37 | txFrequency time.Duration) { 38 | 39 | ctx := logger.ContextWithLogger(context.Background(), true, false, "") 40 | store := storage.NewMockStorage() 41 | 42 | headersRepo := headers.NewRepository(headers.DefaultConfig(), store) 43 | startTime := uint32(952644136) 44 | headersRepo.DisableDifficulty() 45 | headersRepo.InitializeWithTimeStamp(startTime) 46 | headers.MockHeaders(ctx, headersRepo, headersRepo.LastHash(), startTime, 1100) 47 | if err := headersRepo.Clean(ctx); err != nil { 48 | t.Fatalf("Failed to clean headers : %s", err) 49 | } 50 | 51 | peers := NewPeerRepository(store, "") 52 | 53 | txManager := NewTxManager(time.Second * 10) 54 | txProcessor := NewMockTxProcessor() 55 | txManager.SetTxProcessor(txProcessor) 56 | 57 | nodeConfig := &Config{ 58 | Network: bitcoin.MainNet, 59 | Timeout: config.NewDuration(time.Hour * 4), 60 | ScanCount: 1000, 61 | TxRequestCount: 10000, 62 | StartupDelay: config.NewDuration(time.Minute), 63 | ConcurrentBlockRequests: 2, 64 | DesiredNodeCount: 50, 65 | StartBlockHeight: 700000, 66 | BlockRequestDelay: config.NewDuration(time.Second * 5), 67 | } 68 | nodes := make([]*MockNode, nodeCount) 69 | for i := range nodes { 70 | nodes[i] = NewMockNode(fmt.Sprintf("Node %d", i), nodeConfig, headersRepo, peers, txManager) 71 | } 72 | 73 | txs := GenerateMockTxs(totalTxCount, runtime.GOMAXPROCS(0)/2) 74 | 75 | trailingTxCount := 1000 76 | txs2 := GenerateMockTxs(trailingTxCount, 1) 77 | 78 | // Start all the nodes. 79 | var wait sync.WaitGroup 80 | var stopper threads.StopCombiner 81 | selects := make([]reflect.SelectCase, nodeCount+2) 82 | nodeThreads := make([]*threads.InterruptableThread, nodeCount) 83 | for i, node := range nodes { 84 | thread, complete := threads.NewInterruptableThreadComplete(fmt.Sprintf("Node %d", i), 85 | node.Run, &wait) 86 | selects[i] = reflect.SelectCase{ 87 | Dir: reflect.SelectRecv, 88 | Chan: reflect.ValueOf(complete), 89 | } 90 | nodeThreads[i] = thread 91 | stopper.Add(thread) 92 | } 93 | 94 | distributeInterrupt := make(chan interface{}) 95 | distributeComplete := make(chan error) 96 | selects[nodeCount] = reflect.SelectCase{ 97 | Dir: reflect.SelectRecv, 98 | Chan: reflect.ValueOf(distributeComplete), 99 | } 100 | 101 | txManagerComplete := make(chan error, 1) 102 | selects[nodeCount+1] = reflect.SelectCase{ 103 | Dir: reflect.SelectRecv, 104 | Chan: reflect.ValueOf(txManagerComplete), 105 | } 106 | 107 | runtime.GC() 108 | time.Sleep(time.Second) 109 | runtime.GC() 110 | time.Sleep(time.Second) 111 | runtime.GC() 112 | time.Sleep(time.Second) 113 | runtime.GC() 114 | 115 | t.Logf("Set GOMAXPROCS=%d, previously %d", cpuCount, runtime.GOMAXPROCS(cpuCount)) 116 | 117 | wait.Add(1) 118 | go func() { 119 | txManagerComplete <- txManager.Run(ctx) 120 | wait.Done() 121 | }() 122 | 123 | for _, thread := range nodeThreads { 124 | thread.Start(ctx) 125 | } 126 | time.Sleep(time.Second) // give nodes time to shake hands 127 | 128 | wait.Add(1) 129 | go func() { 130 | var priorMemStats runtime.MemStats 131 | runtime.ReadMemStats(&priorMemStats) 132 | t.Logf("Prior Mem Stats : %s", formatMemStats(priorMemStats)) 133 | 134 | txChunkCount := totalTxCount / checkCount 135 | for i := 0; i < checkCount; i++ { 136 | if txChunkCount >= len(txs) { 137 | txChunkCount = len(txs) - 1 138 | } 139 | if err := DistributeTxs(t, nodes, txs[:txChunkCount], distCount, txFrequency, 140 | distributeInterrupt); err != nil { 141 | distributeComplete <- err 142 | return 143 | } 144 | 145 | var memStats runtime.MemStats 146 | runtime.ReadMemStats(&memStats) 147 | t.Logf("Intermediate Mem Stats Diff : %s", formatMemStatsDiff(memStats, priorMemStats)) 148 | txs = txs[txChunkCount:] 149 | } 150 | 151 | // Send a slow rate of trailing txs to see mem stats settle back down 152 | txChunkCount = trailingTxCount / checkCount 153 | for i := 0; i < checkCount; i++ { 154 | if txChunkCount >= len(txs2) { 155 | txChunkCount = len(txs2) - 1 156 | } 157 | if err := DistributeTxs(t, nodes, txs2[:txChunkCount], distCount, time.Millisecond*10, 158 | distributeInterrupt); err != nil { 159 | distributeComplete <- err 160 | return 161 | } 162 | 163 | var memStats runtime.MemStats 164 | runtime.ReadMemStats(&memStats) 165 | t.Logf("Trailing Mem Stats Diff : %s", formatMemStatsDiff(memStats, priorMemStats)) 166 | txs2 = txs2[txChunkCount:] 167 | } 168 | 169 | wait.Done() 170 | distributeComplete <- nil 171 | }() 172 | 173 | waitComplete := make(chan interface{}) 174 | go func() { 175 | wait.Wait() 176 | close(waitComplete) 177 | }() 178 | 179 | selectIndex, selectValue, valueReceived := reflect.Select(selects) 180 | var selectErr error 181 | if valueReceived { 182 | selectInterface := selectValue.Interface() 183 | if selectInterface != nil { 184 | err, ok := selectInterface.(error) 185 | if ok { 186 | selectErr = err 187 | } 188 | } 189 | } 190 | 191 | if selectIndex == len(nodes) { 192 | if selectErr != nil { 193 | t.Fatalf("Failed to distribute transactions : %s", selectErr) 194 | } 195 | } else if selectIndex == len(nodes)+1 { 196 | t.Fatalf("Failed to run tx manager : %s", selectErr) 197 | } else if selectErr != nil { 198 | t.Fatalf("Node %d failed : %s", selectIndex, selectErr) 199 | } 200 | 201 | t.Logf("Shutting down") 202 | close(distributeInterrupt) 203 | txManager.Stop(ctx) 204 | stopper.Stop(ctx) // Stop all nodes 205 | 206 | select { 207 | case <-waitComplete: 208 | t.Logf("Shutdown completed") 209 | case <-time.After(time.Second): 210 | t.Fatalf("Shutdown timed out") 211 | } 212 | 213 | t.Logf("GOMAXPROCS: %d", runtime.GOMAXPROCS(0)) 214 | } 215 | 216 | func formatMemStats(memStats runtime.MemStats) string { 217 | return fmt.Sprintf("TotalAlloc: %0.6f, HeapAlloc %0.6f", float64(memStats.TotalAlloc)/1e6, 218 | float64(memStats.HeapAlloc)/1e6) 219 | } 220 | 221 | func formatMemStatsDiff(memStats, prior runtime.MemStats) string { 222 | return fmt.Sprintf("TotalAlloc: %0.6f, HeapAlloc %0.6f", 223 | (float64(memStats.TotalAlloc)-float64(prior.TotalAlloc))/1e6, 224 | (float64(memStats.HeapAlloc)-float64(prior.HeapAlloc))/1e6) 225 | } 226 | 227 | func GenerateMockTxs(totalTxCount, threadCount int) []*wire.MsgTx { 228 | var txCount uint64 229 | atomic.StoreUint64(&txCount, 0) 230 | complete := make(chan error, threadCount) 231 | result := make([]*wire.MsgTx, totalTxCount) 232 | offset := 0 233 | chunkCount := (totalTxCount / threadCount) + 1 234 | for i := 0; i < threadCount; i++ { 235 | if offset+chunkCount >= totalTxCount { 236 | chunkCount = totalTxCount - offset 237 | } 238 | set := result[offset : offset+chunkCount] 239 | go func() { 240 | complete <- generateMockTxSet(set, &txCount) 241 | }() 242 | offset += chunkCount 243 | } 244 | 245 | finishLogging := make(chan interface{}) 246 | go func() { 247 | for { 248 | select { 249 | case <-time.After(time.Minute): 250 | currentCount := atomic.LoadUint64(&txCount) 251 | fmt.Printf("%d txs generated (%0.2f)", currentCount, 252 | (float64(currentCount)/float64(totalTxCount))*100.0) 253 | case <-finishLogging: 254 | return 255 | } 256 | } 257 | }() 258 | 259 | completeCount := 0 260 | for err := range complete { 261 | completeCount++ 262 | if err != nil { 263 | panic(fmt.Sprintf("Failed to generate txs : %s", err)) 264 | } 265 | if completeCount == threadCount { 266 | break 267 | } 268 | } 269 | 270 | close(finishLogging) 271 | fmt.Printf("%d txs generated", atomic.LoadUint64(&txCount)) 272 | 273 | return result 274 | } 275 | 276 | func generateMockTxSet(txs []*wire.MsgTx, txCount *uint64) error { 277 | for i := range txs { 278 | tx := wire.NewMsgTx(0) 279 | 280 | inputCount := mathRand.Intn(5) + 1 281 | inputValue := uint64(0) 282 | for j := 0; j < inputCount; j++ { 283 | value := uint64(mathRand.Intn(10000) + 1) 284 | if j == 0 { 285 | value += 40 286 | } 287 | inputValue += value 288 | 289 | unlockingScript := make(bitcoin.Script, 34+73+1) 290 | mathRand.Read(unlockingScript[:]) 291 | 292 | var randHash bitcoin.Hash32 293 | mathRand.Read(randHash[:]) 294 | tx.AddTxIn(wire.NewTxIn(wire.NewOutPoint(&randHash, uint32(mathRand.Intn(5))), 295 | unlockingScript)) 296 | } 297 | 298 | inputValue -= 20 // tx fee 299 | for inputValue > 0 { 300 | value := uint64(mathRand.Intn(10000) + 1) 301 | if value > inputValue { 302 | value = inputValue 303 | } 304 | inputValue -= value 305 | 306 | key, _ := bitcoin.GenerateKey(bitcoin.MainNet) 307 | lockingScript, _ := key.LockingScript() 308 | tx.AddTxOut(wire.NewTxOut(value, lockingScript)) 309 | } 310 | 311 | txs[i] = tx 312 | 313 | atomic.AddUint64(txCount, 1) 314 | } 315 | 316 | return nil 317 | } 318 | 319 | func DistributeTxs(t *testing.T, nodes []*MockNode, txs []*wire.MsgTx, distCount int, 320 | txFrequency time.Duration, interrupt <-chan interface{}) error { 321 | 322 | start := time.Now() 323 | 324 | l := len(nodes) 325 | for _, tx := range txs { 326 | // t.Logf("Sending tx %d: %s", i, tx.TxHash()) 327 | 328 | // Pick some nodes and deliver tx 329 | count := mathRand.Intn(distCount) + 1 330 | for i := 0; i < count; i++ { 331 | nodeIndex := mathRand.Intn(l) 332 | if err := nodes[nodeIndex].ExternalNode.ProvideTx(tx); err != nil { 333 | return errors.Wrapf(err, "provide tx: %d", i) 334 | } 335 | } 336 | 337 | if txFrequency == 0 { 338 | select { 339 | default: 340 | case <-interrupt: 341 | return errors.New("Distribute interrupted") 342 | } 343 | } else { 344 | select { 345 | case <-time.After(txFrequency): // wait to deliver next tx 346 | case <-interrupt: 347 | return errors.New("Distribute interrupted") 348 | } 349 | } 350 | } 351 | 352 | elapsed := time.Since(start).Seconds() 353 | 354 | t.Logf("Finished distributing %d txs in %0.4f seconds (%0.4f/sec)", len(txs), elapsed, 355 | float64(len(txs))/elapsed) 356 | return nil 357 | } 358 | 359 | type MockTxProcessor struct { 360 | } 361 | 362 | func NewMockTxProcessor() *MockTxProcessor { 363 | return &MockTxProcessor{} 364 | } 365 | 366 | // ProcessTx returns true if the tx is relevant. 367 | func (p *MockTxProcessor) ProcessTx(ctx context.Context, tx *wire.MsgTx) (bool, error) { 368 | // time.Sleep(time.Microsecond * 10) 369 | for i := 0; i < 100000; i++ { 370 | 371 | } 372 | return false, nil 373 | } 374 | 375 | // CancelTx specifies that a tx is no longer valid because a conflicting tx has been confirmed. 376 | func (p *MockTxProcessor) CancelTx(ctx context.Context, txid bitcoin.Hash32) error { 377 | return nil 378 | } 379 | 380 | // AddTxConflict specifies that there is an unconfirmed conflicting tx to a relevant tx. 381 | func (p *MockTxProcessor) AddTxConflict(ctx context.Context, 382 | txid, conflictTxID bitcoin.Hash32) error { 383 | return nil 384 | } 385 | 386 | func (p *MockTxProcessor) ConfirmTx(ctx context.Context, txid bitcoin.Hash32, blockHeight int, 387 | merkleProof *merkle_proof.MerkleProof) error { 388 | return nil 389 | } 390 | 391 | func (p *MockTxProcessor) UpdateTxChainDepth(ctx context.Context, txid bitcoin.Hash32, 392 | chainDepth uint32) error { 393 | return nil 394 | } 395 | 396 | func (p *MockTxProcessor) ProcessCoinbaseTx(ctx context.Context, blockHash bitcoin.Hash32, 397 | tx *wire.MsgTx) error { 398 | return nil 399 | } 400 | -------------------------------------------------------------------------------- /block_downloader.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/tokenized/logger" 10 | "github.com/tokenized/pkg/bitcoin" 11 | "github.com/tokenized/pkg/merkle_proof" 12 | "github.com/tokenized/pkg/wire" 13 | "github.com/tokenized/threads" 14 | 15 | "github.com/google/uuid" 16 | "github.com/pkg/errors" 17 | ) 18 | 19 | var ( 20 | errBlockDownloadCancelled = errors.New("Block Download Cancelled") 21 | ) 22 | 23 | type OnComplete func(context.Context, *BlockDownloader, error) 24 | 25 | type BlockDownloader struct { 26 | id uuid.UUID 27 | requesterID uuid.UUID 28 | hash bitcoin.Hash32 29 | height int 30 | 31 | txProcessor TxProcessor 32 | blockTxManager BlockTxManager 33 | 34 | Started chan interface{} 35 | Complete chan error 36 | 37 | stateLock sync.Mutex 38 | isCancelled bool 39 | isStarted bool 40 | isComplete bool 41 | canceller BlockRequestCanceller 42 | 43 | err error 44 | 45 | sync.Mutex 46 | } 47 | 48 | func NewBlockDownloader(txProcessor TxProcessor, blockTxManager BlockTxManager, hash bitcoin.Hash32, 49 | height int) *BlockDownloader { 50 | 51 | return &BlockDownloader{ 52 | hash: hash, 53 | height: height, 54 | txProcessor: txProcessor, 55 | blockTxManager: blockTxManager, 56 | Started: make(chan interface{}, 2), 57 | Complete: make(chan error, 2), 58 | } 59 | } 60 | 61 | func (bd *BlockDownloader) SetCanceller(id uuid.UUID, canceller BlockRequestCanceller) { 62 | bd.stateLock.Lock() 63 | bd.requesterID = id 64 | bd.canceller = canceller 65 | bd.stateLock.Unlock() 66 | } 67 | 68 | func (bd *BlockDownloader) ID() uuid.UUID { 69 | bd.Lock() 70 | result := bd.id 71 | bd.Unlock() 72 | 73 | return result 74 | } 75 | 76 | func (bd *BlockDownloader) RequesterID() uuid.UUID { 77 | bd.Lock() 78 | result := bd.requesterID 79 | bd.Unlock() 80 | 81 | return result 82 | } 83 | 84 | func (bd *BlockDownloader) Hash() bitcoin.Hash32 { 85 | bd.Lock() 86 | result := bd.hash 87 | bd.Unlock() 88 | 89 | return result 90 | } 91 | 92 | func (bd *BlockDownloader) Height() int { 93 | bd.Lock() 94 | result := bd.height 95 | bd.Unlock() 96 | 97 | return result 98 | } 99 | 100 | func (bd *BlockDownloader) Error() error { 101 | bd.Lock() 102 | result := bd.err 103 | bd.Unlock() 104 | 105 | return result 106 | } 107 | 108 | func (bd *BlockDownloader) Run(ctx context.Context, interrupt <-chan interface{}) error { 109 | start := time.Now() 110 | hash := bd.Hash() 111 | height := bd.Height() 112 | ctx = logger.ContextWithLogFields(ctx, 113 | logger.Stringer("connection", bd.RequesterID()), 114 | logger.Stringer("block_hash", hash), 115 | logger.Int("block_height", height)) 116 | 117 | // Wait for download to start 118 | select { 119 | case <-interrupt: 120 | bd.cancelAndWaitForComplete(ctx) 121 | return threads.Interrupted 122 | 123 | case <-bd.Started: 124 | bd.stateLock.Lock() 125 | bd.isStarted = true 126 | bd.stateLock.Unlock() 127 | 128 | // We must start receiving the block before this time, otherwise it is a slow node or the node 129 | // is ignoring our request. 130 | case <-time.After(2 * time.Minute): 131 | logger.WarnWithFields(ctx, []logger.Field{ 132 | logger.MillisecondsFromNano("elapsed_ms", time.Since(start).Nanoseconds()), 133 | }, "Block request timed out") 134 | bd.cancelAndWaitForComplete(ctx) 135 | return ErrTimeout 136 | 137 | case err := <-bd.Complete: 138 | bd.stateLock.Lock() 139 | bd.isComplete = true 140 | bd.stateLock.Unlock() 141 | if err != nil && errors.Cause(err) != errBlockDownloadCancelled { 142 | logger.Warn(ctx, "Block download failed : %s", err) 143 | } 144 | 145 | return err 146 | } 147 | 148 | // Wait for completion 149 | select { 150 | case <-interrupt: 151 | bd.cancelAndWaitForComplete(ctx) 152 | return threads.Interrupted 153 | 154 | case <-time.After(time.Hour): 155 | logger.WarnWithFields(ctx, []logger.Field{ 156 | logger.MillisecondsFromNano("elapsed_ms", time.Since(start).Nanoseconds()), 157 | }, "Block download timed out") 158 | return ErrTimeout 159 | 160 | case err := <-bd.Complete: 161 | bd.stateLock.Lock() 162 | bd.isComplete = true 163 | bd.stateLock.Unlock() 164 | if err != nil && !IsCloseError(err) && errors.Cause(err) != errBlockDownloadCancelled { 165 | logger.Warn(ctx, "Block download failed : %s", err) 166 | } 167 | 168 | return err 169 | } 170 | } 171 | 172 | func (bd *BlockDownloader) cancelAndWaitForComplete(ctx context.Context) { 173 | bd.Cancel(ctx) 174 | 175 | count := 0 176 | start := time.Now() 177 | for { 178 | select { 179 | case <-time.After(time.Second * 10): 180 | count++ 181 | 182 | if count >= 60 { 183 | logger.WarnWithFields(ctx, []logger.Field{ 184 | logger.MillisecondsFromNano("elapsed_ms", time.Since(start).Nanoseconds()), 185 | }, "Block cancel timed out") 186 | 187 | return 188 | } 189 | 190 | logger.WarnWithFields(ctx, []logger.Field{ 191 | logger.MillisecondsFromNano("elapsed_ms", time.Since(start).Nanoseconds()), 192 | }, "Waiting for block download cancel") 193 | 194 | case err := <-bd.Complete: 195 | bd.stateLock.Lock() 196 | bd.isComplete = true 197 | bd.stateLock.Unlock() 198 | if err != nil && errors.Cause(err) != errBlockDownloadCancelled { 199 | logger.Warn(ctx, "Block download failed : %s", err) 200 | } 201 | 202 | return 203 | } 204 | } 205 | } 206 | 207 | func (bd *BlockDownloader) Stop(ctx context.Context) { 208 | hash := bd.Hash() 209 | 210 | isStarted := true 211 | wasStopped := false 212 | bd.stateLock.Lock() 213 | if bd.isComplete { 214 | bd.stateLock.Unlock() 215 | logger.WarnWithFields(ctx, []logger.Field{ 216 | logger.Stringer("connection", bd.RequesterID()), 217 | logger.Stringer("block_hash", hash), 218 | logger.Int("block_height", bd.Height()), 219 | }, "Stopping block download that already completed") 220 | return 221 | } 222 | 223 | if !bd.isCancelled { 224 | isStarted = bd.isStarted 225 | wasStopped = true 226 | } 227 | bd.isCancelled = true 228 | bd.stateLock.Unlock() 229 | 230 | if !isStarted { 231 | logger.InfoWithFields(ctx, []logger.Field{ 232 | logger.Stringer("connection", bd.RequesterID()), 233 | logger.Stringer("block_hash", hash), 234 | logger.Int("block_height", bd.Height()), 235 | }, "Stopping block download that hasn't started") 236 | bd.Started <- true 237 | bd.Complete <- errBlockDownloadCancelled 238 | } 239 | 240 | if wasStopped { 241 | logger.InfoWithFields(ctx, []logger.Field{ 242 | logger.Stringer("connection", bd.RequesterID()), 243 | logger.Stringer("block_hash", hash), 244 | logger.Int("block_height", bd.Height()), 245 | }, "Stopped block download") 246 | } 247 | } 248 | 249 | func (bd *BlockDownloader) Cancel(ctx context.Context) { 250 | hash := bd.Hash() 251 | ctx = logger.ContextWithLogFields(ctx, 252 | logger.Stringer("connection", bd.RequesterID()), 253 | logger.Stringer("block_hash", hash), 254 | logger.Int("block_height", bd.Height())) 255 | 256 | sendComplete := false // default to not sending complete signal 257 | sendStarted := false // default to not sending started signal 258 | bd.stateLock.Lock() 259 | if bd.isComplete { 260 | bd.stateLock.Unlock() 261 | logger.Warn(ctx, "Attempted cancel of block download that was already complete") 262 | return 263 | } 264 | 265 | if !bd.isCancelled { 266 | if bd.canceller != nil { 267 | alreadyStarted := bd.canceller.CancelBlockRequest(ctx, hash) // Cancel at connection 268 | if !alreadyStarted { 269 | sendComplete = true 270 | } 271 | } 272 | if !bd.isStarted { 273 | sendStarted = true 274 | } 275 | logger.InfoWithFields(ctx, []logger.Field{ 276 | logger.Bool("send_complete", sendComplete), 277 | logger.Bool("send_started", sendStarted), 278 | logger.Bool("has_canceller", bd.canceller != nil), 279 | }, "Cancelled block download with canceller") 280 | } 281 | bd.isCancelled = true 282 | bd.stateLock.Unlock() 283 | 284 | if sendStarted { 285 | // Trigger the "Started" select in "Run". 286 | bd.Started <- true 287 | } 288 | if sendComplete { 289 | // The handler function doesn't need to be waited for so just trigger the "Complete" select 290 | // in "Run". 291 | bd.Complete <- errBlockDownloadCancelled 292 | } 293 | } 294 | 295 | func (bd *BlockDownloader) wasCancelled() bool { 296 | bd.stateLock.Lock() 297 | result := bd.isCancelled 298 | bd.stateLock.Unlock() 299 | return result 300 | } 301 | 302 | func (bd *BlockDownloader) HandleBlock(ctx context.Context, header *wire.BlockHeader, 303 | txCount uint64, txChannel <-chan *wire.MsgTx) error { 304 | 305 | hash := *header.BlockHash() 306 | bd.Started <- hash 307 | 308 | ctx = logger.ContextWithLogFields(ctx, 309 | logger.Stringer("connection", bd.RequesterID()), 310 | logger.Stringer("block_hash", bd.Hash()), 311 | logger.Int("block_height", bd.Height())) 312 | 313 | if bd.wasCancelled() { 314 | logger.Warn(ctx, "Block download handler called for cancelled block") 315 | bd.Complete <- errBlockDownloadCancelled 316 | return errBlockDownloadCancelled 317 | } 318 | 319 | requestedHash := bd.Hash() 320 | 321 | // Verify this is the correct block 322 | if !requestedHash.Equal(&hash) { 323 | logger.WarnWithFields(ctx, []logger.Field{ 324 | logger.Stringer("received_block_hash", hash), 325 | }, "Wrong block") 326 | bd.Complete <- errors.Wrap(ErrWrongBlock, hash.String()) 327 | return nil 328 | } 329 | 330 | err := bd.handleBlock(ctx, header, txCount, txChannel) 331 | if err != nil && errors.Cause(err) != errBlockDownloadCancelled { 332 | logger.Warn(ctx, "Failed to handle block : %s", err) 333 | } 334 | 335 | bd.Complete <- err 336 | return err // return error to node 337 | } 338 | 339 | func (bd *BlockDownloader) handleBlock(ctx context.Context, header *wire.BlockHeader, 340 | txCount uint64, txChannel <-chan *wire.MsgTx) error { 341 | start := time.Now() 342 | 343 | hash := bd.Hash() 344 | height := bd.Height() 345 | 346 | // Process block txs 347 | var blockTxIDs []bitcoin.Hash32 348 | 349 | merkleTree := merkle_proof.NewMerkleTree(true) 350 | var coinbaseTx *wire.MsgTx 351 | txByteCount := 0 352 | i := 0 353 | for tx := range txChannel { 354 | txByteCount += tx.SerializeSize() 355 | txid := *tx.TxHash() 356 | 357 | if i == 0 { 358 | coinbaseTx = tx 359 | } 360 | 361 | isRelevant, err := bd.txProcessor.ProcessTx(ctx, tx) 362 | if err != nil { 363 | for range txChannel { // flush channel 364 | } 365 | return errors.Wrap(err, "process tx") 366 | } 367 | 368 | if isRelevant { 369 | blockTxIDs = append(blockTxIDs, txid) 370 | merkleTree.AddMerkleProof(txid) 371 | } 372 | 373 | merkleTree.AddHash(txid) 374 | 375 | if bd.wasCancelled() { 376 | for range txChannel { // flush channel 377 | } 378 | return errBlockDownloadCancelled 379 | } 380 | 381 | i++ 382 | } 383 | 384 | if uint64(i) != txCount { 385 | return errBlockDownloadCancelled 386 | } 387 | 388 | // Check merkle root hash 389 | merkleRootHash, merkleProofs := merkleTree.FinalizeMerkleProofs() 390 | if !merkleRootHash.Equal(&header.MerkleRoot) { 391 | logger.WarnWithFields(ctx, []logger.Field{ 392 | logger.Stringer("calculated", merkleRootHash), 393 | logger.Stringer("header", header.MerkleRoot), 394 | }, "Invalid merkle root") 395 | return merkle_proof.ErrWrongMerkleRoot 396 | } 397 | 398 | // Merkle proofs should be in same order as relevant txs and send updated status to server. 399 | if len(merkleProofs) != len(blockTxIDs) { 400 | return fmt.Errorf("Wrong merkle proof count : got %d, want %d", len(merkleProofs), 401 | len(blockTxIDs)) 402 | } 403 | 404 | if bd.wasCancelled() { 405 | return errBlockDownloadCancelled 406 | } 407 | 408 | // Don't process coinbase or confirms until after merkle root is verified, in case block is 409 | // malicious and the wrong txs are provided. 410 | if err := bd.txProcessor.ProcessCoinbaseTx(ctx, hash, coinbaseTx); err != nil { 411 | return errors.Wrap(err, "process coinbase tx") 412 | } 413 | 414 | for i, txid := range blockTxIDs { 415 | merkleProofs[i].BlockHeader = header 416 | merkleProofs[i].BlockHash = &hash 417 | 418 | if err := bd.txProcessor.ConfirmTx(ctx, txid, height, merkleProofs[i]); err != nil { 419 | return errors.Wrap(err, "confirm tx") 420 | } 421 | } 422 | 423 | if err := bd.blockTxManager.AppendBlockTxIDs(ctx, hash, blockTxIDs); err != nil { 424 | return errors.Wrap(err, "save block txids") 425 | } 426 | 427 | logger.ElapsedWithFields(ctx, start, []logger.Field{ 428 | logger.Uint64("tx_count", txCount), 429 | logger.Float64("block_size_mb", 430 | float64(txByteCount+80+wire.VarIntSerializeSize(txCount))/1e6), 431 | logger.Int("relevant_tx_count", len(blockTxIDs)), 432 | }, "Processed block") 433 | return nil 434 | } 435 | -------------------------------------------------------------------------------- /messages.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/rand" 7 | "crypto/sha256" 8 | "encoding/binary" 9 | "fmt" 10 | "hash" 11 | "io" 12 | "net" 13 | "strconv" 14 | "strings" 15 | "sync" 16 | "time" 17 | 18 | "github.com/tokenized/logger" 19 | "github.com/tokenized/pkg/bitcoin" 20 | "github.com/tokenized/pkg/wire" 21 | "github.com/tokenized/threads" 22 | 23 | "github.com/pkg/errors" 24 | ) 25 | 26 | var ( 27 | ErrInvalidMessage = errors.New("Invalid Message") 28 | ErrConnectionClosed = errors.New("Connection Closed") 29 | ErrWrongNetwork = errors.New("Wrong Network") 30 | ErrMessageTooLarge = errors.New("Message Too Large") 31 | ErrChannelClosed = errors.New("Channel closed") 32 | 33 | // The endian encoding of messages. 34 | endian = binary.LittleEndian 35 | ) 36 | 37 | type MessageHandlerFunction func(context.Context, *wire.MessageHeader, io.Reader) error 38 | type MessageHandlers map[string]MessageHandlerFunction 39 | 40 | type MessageChannel struct { 41 | Channel chan wire.Message 42 | lock sync.Mutex 43 | open bool 44 | } 45 | 46 | func IsCloseError(err error) bool { 47 | if err == nil { 48 | return false 49 | } 50 | return errors.Cause(err) == io.EOF || errors.Cause(err) == io.ErrUnexpectedEOF || 51 | strings.Contains(err.Error(), "Closed") || 52 | strings.Contains(err.Error(), "closed pipe") || 53 | strings.Contains(err.Error(), "use of closed network connection") || 54 | strings.Contains(err.Error(), "connection reset by peer") 55 | } 56 | 57 | // Read incoming messages. 58 | func (n *BitcoinNode) readIncoming(ctx context.Context) error { 59 | for { 60 | n.connectionLock.Lock() 61 | connection := n.connection 62 | connectionClosedLocally := n.connectionClosedLocally 63 | n.connectionLock.Unlock() 64 | 65 | if connection == nil { 66 | if !connectionClosedLocally { 67 | logger.Verbose(ctx, "Connection closed remotely") 68 | } 69 | return nil // disconnected 70 | } 71 | 72 | if err := n.handleMessage(ctx, connection); err != nil { 73 | if IsCloseError(err) { 74 | n.connectionLock.Lock() 75 | connectionClosedLocally := n.connectionClosedLocally 76 | n.connectionLock.Unlock() 77 | if !connectionClosedLocally { 78 | logger.Verbose(ctx, "Connection closed remotely : %s", err) 79 | } else { 80 | logger.Verbose(ctx, "Disconnected : %s", err) 81 | } 82 | return nil 83 | } else { 84 | return errors.Wrap(err, "handle message") 85 | } 86 | } 87 | } 88 | } 89 | 90 | func (n *BitcoinNode) sendMessage(ctx context.Context, msg wire.Message) error { 91 | warning := logger.NewWaitingWarning(ctx, 3*time.Second, "Add outgoing message %s", 92 | msg.Command()) 93 | defer warning.Cancel() 94 | return n.outgoingMsgChannel.Add(msg) 95 | } 96 | 97 | func (n *BitcoinNode) sendOutgoing(ctx context.Context) error { 98 | for msg := range n.outgoingMsgChannel.Channel { 99 | n.connectionLock.Lock() 100 | connection := n.connection 101 | n.connectionLock.Unlock() 102 | 103 | if connection == nil { 104 | for range n.outgoingMsgChannel.Channel { // flush channel 105 | } 106 | return nil // disconnected 107 | } 108 | 109 | // logger.InfoWithFields(ctx, []logger.Field{ 110 | // logger.String("command", msg.Command()), 111 | // }, "Sending message") 112 | 113 | if _, err := wire.WriteMessageN(connection, msg, wire.ProtocolVersion, 114 | wire.BitcoinNet(n.config.Network)); err != nil { 115 | logger.VerboseWithFields(ctx, []logger.Field{ 116 | logger.String("command", msg.Command()), 117 | }, "Failed to send message : %s", err) 118 | for range n.outgoingMsgChannel.Channel { // flush channel 119 | } 120 | return errors.Wrap(err, "write message") 121 | } 122 | } 123 | 124 | return nil 125 | } 126 | 127 | func (c *MessageChannel) Add(msg wire.Message) error { 128 | c.lock.Lock() 129 | defer c.lock.Unlock() 130 | 131 | if !c.open { 132 | return ErrChannelClosed 133 | } 134 | 135 | c.Channel <- msg 136 | return nil 137 | } 138 | 139 | func (c *MessageChannel) Open(count int) error { 140 | c.lock.Lock() 141 | defer c.lock.Unlock() 142 | 143 | c.Channel = make(chan wire.Message, count) 144 | c.open = true 145 | return nil 146 | } 147 | 148 | func (c *MessageChannel) Close() error { 149 | c.lock.Lock() 150 | defer c.lock.Unlock() 151 | 152 | if !c.open { 153 | return errors.New("Channel closed") 154 | } 155 | 156 | close(c.Channel) 157 | c.open = false 158 | return nil 159 | } 160 | 161 | func buildVersionMsg(address, userAgent string, blockHeight int, 162 | receiveTxInventories bool) *wire.MsgVersion { 163 | 164 | // my local. This doesn't matter, we don't accept inbound connections. 165 | local := wire.NewNetAddressIPPort(net.IPv4(127, 0, 0, 1), 9333, 0) 166 | 167 | // build the address of the remote 168 | ip, port := parseAddress(address) 169 | remote := wire.NewNetAddressIPPort(ip, port, 0) 170 | 171 | version := wire.NewMsgVersion(remote, local, nonce(), int32(blockHeight)) 172 | version.UserAgent = userAgent 173 | if receiveTxInventories { 174 | version.Services = ServiceFull // tells nodes to send tx inventory messages 175 | } 176 | 177 | return version 178 | } 179 | 180 | func parseAddress(address string) (net.IP, uint16) { 181 | parts := strings.Split(address, ":") 182 | var port uint16 183 | if len(parts) == 2 { 184 | p, err := strconv.Atoi(parts[1]) 185 | if err == nil { 186 | port = uint16(p) 187 | address = parts[0] 188 | } 189 | } 190 | 191 | if strings.HasPrefix(address, "[") && strings.HasSuffix(address, "]") { 192 | address = address[1 : len(address)-2] 193 | } 194 | 195 | if ip := net.ParseIP(address); ip != nil { 196 | return ip, port 197 | } 198 | 199 | if len(address) > 2 && address[0] == '[' { 200 | parts := strings.Split(address[1:], "]") 201 | if len(parts) == 2 { 202 | address = parts[0] // remove port "[IP]:Port" 203 | } 204 | } else { 205 | parts := strings.Split(address, ":") 206 | if len(parts) == 2 { 207 | address = parts[0] // remove port "IP:Port" 208 | p, err := strconv.Atoi(parts[1]) 209 | if err == nil { 210 | port = uint16(p) 211 | } 212 | } 213 | } 214 | 215 | if ip := net.ParseIP(address); ip != nil { 216 | return ip, port 217 | } 218 | 219 | return nil, 0 220 | } 221 | 222 | func buildAddressesMessage(ctx context.Context, peers PeerRepository) (*wire.MsgAddr, error) { 223 | peerList, err := peers.Get(ctx, 1, -1) 224 | if err != nil { 225 | return nil, errors.Wrap(err, "get peers") 226 | } 227 | 228 | result := wire.NewMsgAddr() 229 | for i, peer := range peerList { 230 | if i == wire.MaxAddrPerMsg { 231 | break 232 | } 233 | 234 | address, port := parseAddress(peer.Address) 235 | result.AddAddress(wire.NewNetAddressIPPort(address, port, wire.SFNodeNetwork)) 236 | } 237 | 238 | return result, nil 239 | } 240 | 241 | func nonce() uint64 { 242 | buf := make([]byte, 8) 243 | rand.Read(buf) 244 | return binary.LittleEndian.Uint64(buf) 245 | } 246 | 247 | func (n *BitcoinNode) sendVerifyHeaderRequest(ctx context.Context) error { 248 | locatorHeaderHashes, err := n.headers.GetVerifyOnlyLocatorHashes(ctx) 249 | if err != nil { 250 | return errors.Wrap(err, "get verify only locator hashes") 251 | } 252 | 253 | n.Lock() 254 | n.lastHeaderRequest = locatorHeaderHashes 255 | n.Unlock() 256 | 257 | getheaders := wire.NewMsgGetHeaders() 258 | getheaders.ProtocolVersion = wire.ProtocolVersion 259 | 260 | for i := 0; i < len(locatorHeaderHashes); i++ { 261 | getheaders.AddBlockLocatorHash(&locatorHeaderHashes[i]) 262 | } 263 | 264 | if err := n.sendMessage(ctx, getheaders); err != nil { 265 | return errors.Wrap(err, "send message") 266 | } 267 | 268 | return nil 269 | } 270 | 271 | func (n *BitcoinNode) sendInitialHeaderRequest(ctx context.Context) error { 272 | locatorHeaderHashes, err := n.headers.GetLocatorHashes(ctx, 10) 273 | if err != nil { 274 | return errors.Wrap(err, "get locator hashes") 275 | } 276 | 277 | n.Lock() 278 | n.lastHeaderRequest = locatorHeaderHashes 279 | n.Unlock() 280 | 281 | getheaders := wire.NewMsgGetHeaders() 282 | getheaders.ProtocolVersion = wire.ProtocolVersion 283 | 284 | for i := 0; i < len(locatorHeaderHashes); i++ { 285 | getheaders.AddBlockLocatorHash(&locatorHeaderHashes[i]) 286 | } 287 | 288 | if err := n.sendMessage(ctx, getheaders); err != nil { 289 | return errors.Wrap(err, "send message") 290 | } 291 | 292 | return nil 293 | } 294 | 295 | func (n *BitcoinNode) sendHeaderRequest(ctx context.Context) error { 296 | locatorHeaderHashes, err := n.headers.GetLocatorHashes(ctx, 3) 297 | if err != nil { 298 | return errors.Wrap(err, "get locator hashes") 299 | } 300 | 301 | n.Lock() 302 | n.lastHeaderRequest = locatorHeaderHashes 303 | n.Unlock() 304 | 305 | getheaders := wire.NewMsgGetHeaders() 306 | getheaders.ProtocolVersion = wire.ProtocolVersion 307 | 308 | for i := 0; i < len(locatorHeaderHashes); i++ { 309 | getheaders.AddBlockLocatorHash(&locatorHeaderHashes[i]) 310 | } 311 | 312 | if err := n.sendMessage(ctx, getheaders); err != nil { 313 | return errors.Wrap(err, "send message") 314 | } 315 | 316 | return nil 317 | } 318 | 319 | // readHeader reads a bitcoin P2P message header. 320 | func readHeader(r io.Reader, network bitcoin.Network) (*wire.MessageHeader, error) { 321 | result := &wire.MessageHeader{} 322 | if err := binary.Read(r, endian, &result.Network); err != nil { 323 | return result, errors.Wrap(err, "network") 324 | } 325 | 326 | if result.Network != network { 327 | return result, errors.Wrap(ErrWrongNetwork, fmt.Sprintf("got %s (%08x), want %s (%08x)", 328 | result.Network, uint32(result.Network), network, uint32(network))) 329 | } 330 | 331 | if _, err := io.ReadFull(r, result.Command[:]); err != nil { 332 | return result, errors.Wrap(err, "command") 333 | } 334 | 335 | // Only read 32 bits, but convert to 64 bits in case it is updated by an extended message. 336 | var length32 uint32 337 | if err := binary.Read(r, endian, &length32); err != nil { 338 | return result, errors.Wrap(err, "length") 339 | } 340 | result.Length = uint64(length32) 341 | 342 | if _, err := io.ReadFull(r, result.Checksum[:]); err != nil { 343 | return result, errors.Wrap(err, "checksum") 344 | } 345 | 346 | return result, nil 347 | } 348 | 349 | func readMessage(r io.Reader, header *wire.MessageHeader, msg wire.Message) error { 350 | // Check for maximum length based on the message type as a malicious client 351 | // could otherwise create a well-formed header and set the length to max 352 | // numbers in order to exhaust the machine's memory. 353 | maxLength := msg.MaxPayloadLength(wire.ProtocolVersion) 354 | if header.Length > maxLength { 355 | DiscardInput(r, header.Length) 356 | return errors.Wrap(ErrMessageTooLarge, fmt.Sprintf("%s: %d b > %d", header.CommandString(), 357 | header.Length, maxLength)) 358 | } 359 | 360 | var checkSum hash.Hash 361 | var rc io.Reader 362 | 363 | // Tee data read into checksum to calculate checksum. Extended messages don't use a checksum. 364 | if header.CommandString() != wire.CmdExtended { 365 | checkSum = sha256.New() 366 | rc = io.TeeReader(r, checkSum) 367 | } else { 368 | rc = r 369 | } 370 | 371 | // Read payload. 372 | payload := make([]byte, header.Length) 373 | if _, err := io.ReadFull(rc, payload); err != nil { 374 | return errors.Wrap(err, "read") 375 | } 376 | 377 | // Extended messages don't use a checksum. 378 | if checkSum != nil { 379 | // Check the checksum 380 | single := checkSum.Sum(nil) 381 | double := sha256.Sum256(single[:]) 382 | if !bytes.Equal(double[0:4], header.Checksum[:]) { 383 | return errors.Wrap(ErrInvalidMessage, "bad checksum") 384 | } 385 | } 386 | 387 | // Unmarshal message 388 | if err := msg.BtcDecode(bytes.NewBuffer(payload), wire.ProtocolVersion); err != nil { 389 | return errors.Wrap(err, "decode") 390 | } 391 | 392 | return nil 393 | } 394 | 395 | func readInvVect(ctx context.Context, r io.Reader) (wire.InvVect, error) { 396 | var result wire.InvVect 397 | if err := binary.Read(r, endian, &result.Type); err != nil { 398 | return result, errors.Wrap(err, "read type") 399 | } 400 | 401 | if err := result.Hash.Deserialize(r); err != nil { 402 | return result, errors.Wrap(err, "read hash") 403 | } 404 | 405 | return result, nil 406 | } 407 | 408 | // DiscardInput reads and disposes of the specified number of bytes. 409 | func DiscardInput(r io.Reader, n uint64) error { 410 | maxSize := uint64(1024) // 1k at a time 411 | numReads := n / maxSize 412 | bytesRemaining := n % maxSize 413 | if n > 0 { 414 | b := make([]byte, maxSize) 415 | for i := uint64(0); i < numReads; i++ { 416 | if _, err := io.ReadFull(r, b); err != nil { 417 | return err 418 | } 419 | } 420 | } 421 | if bytesRemaining > 0 { 422 | b := make([]byte, bytesRemaining) 423 | if _, err := io.ReadFull(r, b); err != nil { 424 | return err 425 | } 426 | } 427 | 428 | return nil 429 | } 430 | 431 | func DiscardInputWithCounter(r io.Reader, n uint64, counter *threads.WriteCounter) error { 432 | return DiscardInput(r, n-counter.Count()) 433 | } 434 | -------------------------------------------------------------------------------- /headers/headers_test.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/binary" 7 | "fmt" 8 | "testing" 9 | 10 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 11 | "github.com/tokenized/logger" 12 | "github.com/tokenized/pkg/bitcoin" 13 | "github.com/tokenized/pkg/storage" 14 | 15 | "github.com/pkg/errors" 16 | ) 17 | 18 | func Test_Headers_Clean(t *testing.T) { 19 | ctx := tests.Context() 20 | store := storage.NewMockStorage() 21 | repo := NewRepository(DefaultConfig(), store) 22 | repo.DisableDifficulty() 23 | startTime := uint32(952644136) 24 | repo.InitializeWithTimeStamp(startTime) 25 | 26 | MockHeaders(ctx, repo, repo.LastHash(), startTime, 1100) 27 | 28 | otherHashes := make([]bitcoin.Hash32, repo.Height()+1) 29 | for i := range otherHashes { 30 | otherHashes[i] = repo.longest.AtHeight(i).Hash 31 | } 32 | 33 | branchHash, _ := repo.Hash(ctx, 1090) 34 | t.Logf("Branching after hash %d %s", 1090, branchHash) 35 | MockHeaders(ctx, repo, *branchHash, startTime+(1090*600), 20) 36 | 37 | if len(repo.branches) != 2 { 38 | t.Fatalf("Wrong branch count : got %d, want %d", len(repo.branches), 2) 39 | } 40 | 41 | if repo.longest.Height() != 1110 { 42 | t.Errorf("Wrong longest branch height : got %d, want %d", repo.longest.Height(), 1110) 43 | } 44 | 45 | longestHashes := make([]bitcoin.Hash32, repo.Height()+1) 46 | for i := range longestHashes { 47 | longestHashes[i] = repo.longest.AtHeight(i).Hash 48 | } 49 | 50 | var otherBranch *Branch 51 | if repo.longest == repo.branches[0] { 52 | otherBranch = repo.branches[1] 53 | } else { 54 | otherBranch = repo.branches[0] 55 | } 56 | 57 | if otherBranch.Height() != 1100 { 58 | t.Errorf("Wrong longest branch height : got %d, want %d", otherBranch.Height(), 1100) 59 | } 60 | 61 | if repo.longest.PrunedLowestHeight() != 1091 { 62 | t.Errorf("Wrong longest branch pruned lowest height : got %d, want %d", 63 | repo.longest.PrunedLowestHeight(), 1091) 64 | } 65 | 66 | if otherBranch.PrunedLowestHeight() != 0 { 67 | t.Errorf("Wrong other branch pruned lowest height : got %d, want %d", 68 | otherBranch.PrunedLowestHeight(), 0) 69 | } 70 | 71 | if err := repo.consolidate(ctx); err != nil { 72 | t.Fatalf("Failed to consolidate : %s", err) 73 | } 74 | 75 | if repo.longest == repo.branches[0] { 76 | otherBranch = repo.branches[1] 77 | } else { 78 | otherBranch = repo.branches[0] 79 | } 80 | 81 | if repo.longest.PrunedLowestHeight() != 0 { 82 | t.Errorf("Wrong longest branch pruned lowest height : got %d, want %d", 83 | repo.longest.PrunedLowestHeight(), 0) 84 | } 85 | 86 | if otherBranch.PrunedLowestHeight() != 1091 { 87 | t.Errorf("Wrong other branch pruned lowest height : got %d, want %d", 88 | otherBranch.PrunedLowestHeight(), 1091) 89 | } 90 | 91 | if repo.longest.Height() != 1110 { 92 | t.Errorf("Wrong longest branch height : got %d, want %d", repo.longest.Height(), 1110) 93 | } 94 | 95 | for i := 0; i < 1110; i++ { 96 | if !repo.longest.AtHeight(i).Hash.Equal(&longestHashes[i]) { 97 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, 98 | repo.longest.AtHeight(i).Hash, longestHashes[i]) 99 | } 100 | } 101 | 102 | if otherBranch.Height() != 1100 { 103 | t.Errorf("Wrong other branch height : got %d, want %d", otherBranch.Height(), 1100) 104 | } 105 | 106 | for i := 0; i < 1100; i++ { 107 | if !otherBranch.AtHeight(i).Hash.Equal(&otherHashes[i]) { 108 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, 109 | otherBranch.AtHeight(i).Hash, otherHashes[i]) 110 | } 111 | } 112 | 113 | if err := repo.saveMainBranch(ctx); err != nil { 114 | t.Fatalf("Failed to save main branch : %s", err) 115 | } 116 | 117 | if err := repo.saveBranches(ctx); err != nil { 118 | t.Fatalf("Failed to save branches : %s", err) 119 | } 120 | 121 | headerFiles, err := store.List(ctx, headersPath) 122 | if err != nil { 123 | t.Fatalf("Failed to list files : %s", err) 124 | } 125 | 126 | if len(headerFiles) != 5 { 127 | t.Errorf("Wrong header file count : got %d, want %d", len(headerFiles), 5) 128 | } 129 | 130 | for _, headerFile := range headerFiles { 131 | t.Logf("Header file : %s", headerFile) 132 | } 133 | 134 | data, err := store.Read(ctx, headersFilePath(0)) 135 | if err != nil { 136 | t.Fatalf("Failed to read first headers file : %s", err) 137 | } 138 | buf := bytes.NewReader(data) 139 | var version uint8 140 | if err := binary.Read(buf, endian, &version); err != nil { 141 | t.Fatalf("Failed to read version : %s", err) 142 | } 143 | if version != headersVersion { 144 | t.Fatalf("Wrong version : got %d, want %d", version, headersVersion) 145 | } 146 | 147 | for i := 0; i < headersPerFile; i++ { 148 | headerData := &HeaderData{} 149 | if err := headerData.Deserialize(buf); err != nil { 150 | t.Fatalf("Failed to read header data %d : %s", i, err) 151 | } 152 | 153 | if !headerData.Hash.Equal(&longestHashes[i]) { 154 | t.Fatalf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, headerData.Hash, 155 | longestHashes[i]) 156 | } 157 | } 158 | 159 | data, err = store.Read(ctx, headersFilePath(1)) 160 | if err != nil { 161 | t.Fatalf("Failed to read second headers file : %s", err) 162 | } 163 | if data[0] != 1 { 164 | t.Fatalf("Wrong version %d", data[0]) 165 | } 166 | buf = bytes.NewReader(data[1:]) 167 | for i := 1000; i < 1111; i++ { 168 | headerData := &HeaderData{} 169 | if err := headerData.Deserialize(buf); err != nil { 170 | t.Fatalf("Failed to read header data %d : %s", i, err) 171 | } 172 | 173 | if !headerData.Hash.Equal(&longestHashes[i]) { 174 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, headerData.Hash, 175 | longestHashes[i]) 176 | } 177 | } 178 | 179 | if buf.Len() != 0 { 180 | t.Errorf("Extra header file data left : %d", buf.Len()) 181 | } 182 | 183 | if err := repo.prune(ctx, 500); err != nil { 184 | t.Fatalf("Failed to prune : %s", err) 185 | } 186 | 187 | if repo.longest.AtHeight(500) != nil { 188 | t.Errorf("Height 500 not pruned") 189 | } 190 | 191 | data, err = store.Read(ctx, fmt.Sprintf("%s/index", branchPath)) 192 | if err != nil { 193 | t.Fatalf("Failed to read branch index file : %s", err) 194 | } 195 | buf = bytes.NewReader(data) 196 | var indexCount uint32 197 | if err := binary.Read(buf, endian, &indexCount); err != nil { 198 | t.Fatalf("Failed to read branch index count : %s", err) 199 | } 200 | 201 | if indexCount != 2 { 202 | t.Fatalf("Wrong branch index count : got %d, want %d", indexCount, 2) 203 | } 204 | 205 | firstBranchHash := &bitcoin.Hash32{} 206 | if err := firstBranchHash.Deserialize(buf); err != nil { 207 | t.Fatalf("Failed to read first branch hash : %s", err) 208 | } 209 | 210 | secondBranchHash := &bitcoin.Hash32{} 211 | if err := secondBranchHash.Deserialize(buf); err != nil { 212 | t.Fatalf("Failed to read second branch hash : %s", err) 213 | } 214 | 215 | if buf.Len() != 0 { 216 | t.Errorf("Extra branch index data left : %d", buf.Len()) 217 | } 218 | 219 | firstBranch, err := LoadBranch(ctx, store, *firstBranchHash) 220 | if err != nil { 221 | t.Fatalf("Failed to load first branch : %s", err) 222 | } 223 | 224 | t.Logf("\nFirst Branch :" + firstBranch.String(" ")) 225 | 226 | if firstBranch.PrunedLowestHeight() != 0 { 227 | t.Errorf("Wrong first branch pruned lowest height : got %d, want %d", 228 | firstBranch.PrunedLowestHeight(), 0) 229 | } 230 | 231 | if firstBranch.Height() != 1110 { 232 | t.Errorf("Wrong first branch height : got %d, want %d", firstBranch.Height(), 1110) 233 | } 234 | 235 | if !firstBranch.Last().Hash.Equal(&repo.longest.Last().Hash) { 236 | t.Errorf("Wrong first branch last hash : \ngot : %s\nwant : %s", firstBranch.Last().Hash, 237 | repo.longest.Last().Hash) 238 | } 239 | 240 | for i := 0; i < 1110; i++ { 241 | if !firstBranch.AtHeight(i).Hash.Equal(&longestHashes[i]) { 242 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, 243 | firstBranch.AtHeight(i).Hash, longestHashes[i]) 244 | } 245 | } 246 | 247 | secondBranch, err := LoadBranch(ctx, store, *secondBranchHash) 248 | if err != nil { 249 | t.Fatalf("Failed to load second branch : %s", err) 250 | } 251 | 252 | t.Logf("\nSecond Branch :" + secondBranch.String(" ")) 253 | 254 | if secondBranch.PrunedLowestHeight() != 1091 { 255 | t.Errorf("Wrong second branch pruned lowest height : got %d, want %d", 256 | secondBranch.PrunedLowestHeight(), 1091) 257 | } 258 | 259 | if secondBranch.Height() != 1100 { 260 | t.Errorf("Wrong second branch height : got %d, want %d", secondBranch.Height(), 1100) 261 | } 262 | 263 | for i := 1091; i < 1100; i++ { 264 | if !secondBranch.AtHeight(i).Hash.Equal(&otherHashes[i]) { 265 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, 266 | secondBranch.AtHeight(i).Hash, otherHashes[i]) 267 | } 268 | } 269 | 270 | if err := repo.Save(ctx); err != nil { 271 | t.Fatalf("Failed to save repo : %s", err) 272 | } 273 | 274 | loadedRepo := NewRepository(DefaultConfig(), store) 275 | if err := loadedRepo.load(ctx, 500); err != nil { 276 | t.Fatalf("Failed to load repo : %s", err) 277 | } 278 | 279 | if len(loadedRepo.branches) != 2 { 280 | t.Fatalf("Wrong loaded branch count : got %d, want %d", len(loadedRepo.branches), 2) 281 | } 282 | 283 | firstBranch = loadedRepo.branches[0] 284 | if firstBranch.PrunedLowestHeight() != 610 { 285 | t.Errorf("Wrong first branch pruned lowest height : got %d, want %d", 286 | firstBranch.PrunedLowestHeight(), 610) 287 | } 288 | 289 | if firstBranch.Height() != 1110 { 290 | t.Errorf("Wrong first branch height : got %d, want %d", firstBranch.Height(), 1110) 291 | } 292 | 293 | for i := firstBranch.PrunedLowestHeight(); i <= firstBranch.Height(); i++ { 294 | if !firstBranch.AtHeight(i).Hash.Equal(&longestHashes[i]) { 295 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, 296 | firstBranch.AtHeight(i).Hash, longestHashes[i]) 297 | } 298 | } 299 | 300 | secondBranch = loadedRepo.branches[1] 301 | if secondBranch.PrunedLowestHeight() != 1091 { 302 | t.Errorf("Wrong second branch pruned lowest height : got %d, want %d", 303 | secondBranch.PrunedLowestHeight(), 1091) 304 | } 305 | 306 | if secondBranch.Height() != 1100 { 307 | t.Errorf("Wrong second branch height : got %d, want %d", secondBranch.Height(), 1100) 308 | } 309 | 310 | for i := firstBranch.PrunedLowestHeight(); i <= secondBranch.Height(); i++ { 311 | if !secondBranch.AtHeight(i).Hash.Equal(&otherHashes[i]) { 312 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, 313 | secondBranch.AtHeight(i).Hash, otherHashes[i]) 314 | } 315 | } 316 | } 317 | 318 | func Test_genesisHeaders(t *testing.T) { 319 | main := genesisHeader(bitcoin.MainNet) 320 | const mainHash = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" 321 | if main.BlockHash().String() != mainHash { 322 | t.Errorf("Wrong mainnet genesis header hash : \ngot : %s\nwant : %s", main.BlockHash(), 323 | mainHash) 324 | } 325 | t.Logf("Main Genesis Hash : %s", main.BlockHash()) 326 | 327 | test := genesisHeader(bitcoin.TestNet) 328 | const testHash = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943" 329 | if test.BlockHash().String() != testHash { 330 | t.Errorf("Wrong testnet genesis header hash : \ngot : %s\nwant : %s", test.BlockHash(), 331 | testHash) 332 | } 333 | t.Logf("Test Genesis Hash : %s", test.BlockHash()) 334 | } 335 | 336 | func Test_Headers_Load_empty(t *testing.T) { 337 | ctx := tests.Context() 338 | store := storage.NewMockStorage() 339 | repo := NewRepository(DefaultConfig(), store) 340 | if err := repo.Load(ctx); err != nil { 341 | t.Fatalf("Failed to load repo : %s", err) 342 | } 343 | } 344 | 345 | func Test_Headers_migrate(t *testing.T) { 346 | ctx := tests.Context() 347 | store := storage.NewMockStorage() 348 | repo := NewRepository(DefaultConfig(), store) 349 | repo.DisableDifficulty() 350 | 351 | startTime := uint32(952644136) 352 | repo.InitializeWithTimeStamp(startTime) 353 | MockHeaders(ctx, repo, repo.LastHash(), repo.LastTime(), 5100) 354 | 355 | t.Logf("Last header %d : %s", repo.Height(), repo.LastHash()) 356 | 357 | for file := 0; file < 6; file++ { 358 | if err := saveOldFile(ctx, store, file, repo.longest); err != nil { 359 | t.Fatalf("Failed to save old file %d : %s", file, err) 360 | } 361 | } 362 | 363 | loadedRepo := NewRepository(DefaultConfig(), store) 364 | if err := loadedRepo.Load(ctx); err != nil { 365 | t.Fatalf("Failed to load repo : %s", err) 366 | } 367 | 368 | if repo.Height() != loadedRepo.Height() { 369 | t.Errorf("Wrong loaded height : got %d, want %d", loadedRepo.Height(), repo.Height()) 370 | } 371 | 372 | lastHash := repo.LastHash() 373 | gotLastHash := loadedRepo.LastHash() 374 | if !lastHash.Equal(&gotLastHash) { 375 | t.Errorf("Wrong loaded last hash : got %s, want %s", loadedRepo.LastHash(), lastHash) 376 | } 377 | } 378 | 379 | func saveOldFile(ctx context.Context, store storage.Storage, file int, branch *Branch) error { 380 | fileHeight := file * headersPerFile 381 | nextFileHeight := fileHeight + headersPerFile 382 | path := headersFilePath(file) 383 | 384 | lastHeight := branch.Height() 385 | if lastHeight < nextFileHeight { 386 | nextFileHeight = lastHeight + 1 387 | } 388 | 389 | logger.InfoWithFields(ctx, []logger.Field{ 390 | logger.Int("file", file), 391 | logger.Int("last_height", nextFileHeight-1), 392 | }, "Saving old headers") 393 | 394 | buf := &bytes.Buffer{} 395 | if err := binary.Write(buf, endian, uint8(0)); err != nil { 396 | return errors.Wrap(err, "version") 397 | } 398 | 399 | for height := fileHeight; height < nextFileHeight; height++ { 400 | header := branch.AtHeight(height) 401 | if header == nil { 402 | return fmt.Errorf("Could not fetch header %d", height) 403 | } 404 | 405 | if err := header.Header.Serialize(buf); err != nil { 406 | return errors.Wrapf(err, "write header %d", height) 407 | } 408 | } 409 | 410 | logger.InfoWithFields(ctx, []logger.Field{ 411 | logger.Int("file", file), 412 | logger.Int("bytes", buf.Len()), 413 | }, "File bytes") 414 | 415 | if err := store.Write(ctx, path, buf.Bytes(), nil); err != nil { 416 | return errors.Wrapf(err, "write : %s", path) 417 | } 418 | 419 | return nil 420 | } 421 | 422 | func Test_Headers_getOldData(t *testing.T) { 423 | ctx := tests.Context() 424 | store := storage.NewFilesystemStorage(storage.Config{ 425 | Bucket: "", 426 | Root: "test_fixtures", 427 | MaxRetries: 2, 428 | RetryDelay: 1000, 429 | }) 430 | 431 | path := headersFilePath(719) 432 | t.Logf("Using path : %s", path) 433 | headers, err := getOldData(ctx, store, path) 434 | if err != nil { 435 | t.Fatalf("Failed to get old data : %s", err) 436 | } 437 | 438 | if len(headers) != 1000 { 439 | t.Errorf("Wrong headers count : got %d, want %d", len(headers), 1000) 440 | } 441 | 442 | t.Logf("Loaded %d headers", len(headers)) 443 | 444 | for i, header := range headers { 445 | if i%100 == 0 { 446 | t.Logf("Header %d : %s", i, header.BlockHash()) 447 | } 448 | } 449 | } 450 | 451 | func Test_Headers_loadHistoricalHashHeights(t *testing.T) { 452 | ctx := tests.Context() 453 | store := storage.NewMockStorage() 454 | repo := NewRepository(DefaultConfig(), store) 455 | repo.DisableDifficulty() 456 | 457 | startTime := uint32(952644136) 458 | repo.InitializeWithTimeStamp(startTime) 459 | MockHeaders(ctx, repo, repo.LastHash(), repo.LastTime(), 100) 460 | hash := repo.LastHash() 461 | height := repo.Height() 462 | 463 | MockHeaders(ctx, repo, repo.LastHash(), repo.LastTime(), 10001) 464 | hash2 := repo.LastHash() 465 | height2 := repo.Height() 466 | 467 | MockHeaders(ctx, repo, repo.LastHash(), repo.LastTime(), 50000) 468 | 469 | if err := repo.Save(ctx); err != nil { 470 | t.Fatalf("Failed to save repo : %s", err) 471 | } 472 | 473 | readRepo := NewRepository(DefaultConfig(), store) 474 | readRepo.DisableDifficulty() 475 | if err := readRepo.Load(ctx); err != nil { 476 | t.Fatalf("Failed to load repo : %s", err) 477 | } 478 | 479 | readHeight := readRepo.HashHeight(hash) 480 | t.Logf("Header height 1 : %d", readHeight) 481 | if readHeight != height { 482 | t.Errorf("Wrong read header height 1 : got %d, want %d", readHeight, height) 483 | } 484 | 485 | readHeight2 := readRepo.HashHeight(hash2) 486 | t.Logf("Header height 2 : %d", readHeight2) 487 | if readHeight2 != height2 { 488 | t.Errorf("Wrong read header height 2 : got %d, want %d", readHeight2, height2) 489 | } 490 | } 491 | 492 | func Test_Headers_Reorg_1(t *testing.T) { 493 | ctx := tests.Context() 494 | store := storage.NewMockStorage() 495 | repo := NewRepository(DefaultConfig(), store) 496 | repo.DisableDifficulty() 497 | 498 | startTime := uint32(952644136) 499 | repo.InitializeWithTimeStamp(startTime) 500 | MockHeaders(ctx, repo, repo.LastHash(), repo.LastTime(), 100) 501 | hash := repo.LastHash() 502 | height := repo.Height() 503 | t.Logf("Header height : %d : %s", height, hash) 504 | originalLatestHash := hash 505 | 506 | hashes := make([]bitcoin.Hash32, repo.longest.Height()+1) 507 | for i := range hashes { 508 | hashes[i] = repo.longest.AtHeight(i).Hash 509 | // t.Logf("Header : %d : %s", i, hashes[i]) 510 | } 511 | 512 | branchHash := hashes[height-1] 513 | t.Logf("Branch height : %d : %s", height-1, hashes[height-1]) 514 | 515 | MockHeaders(ctx, repo, branchHash, repo.LastTime(), 2) 516 | 517 | hash = repo.LastHash() 518 | height = repo.Height() 519 | t.Logf("Header height : %d : %s", height, hash) 520 | 521 | if height != 101 { 522 | t.Errorf("Wrong latest block height : got %d, want %d", height, 101) 523 | } 524 | 525 | if hash.Equal(&originalLatestHash) { 526 | t.Errorf("Wrong latest block hash : got %s, don't want %s", hash, originalLatestHash) 527 | } 528 | 529 | at100, err := repo.Hash(ctx, 100) 530 | if err != nil { 531 | t.Fatalf("Failed to get block at height 100 : %s", err) 532 | } 533 | 534 | if at100 == nil { 535 | t.Fatalf("Missing hash at 100") 536 | } 537 | 538 | t.Logf("Block hash at 100 : %s", at100) 539 | 540 | if at100.Equal(&originalLatestHash) { 541 | t.Errorf("Wrong block hash at height 100 : got %s, don't want %s", hash, originalLatestHash) 542 | } 543 | } 544 | -------------------------------------------------------------------------------- /bitcoin_node.go: -------------------------------------------------------------------------------- 1 | package bitcoin_reader 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "net" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/tokenized/logger" 12 | "github.com/tokenized/pkg/bitcoin" 13 | "github.com/tokenized/pkg/wire" 14 | "github.com/tokenized/threads" 15 | 16 | "github.com/google/uuid" 17 | "github.com/pkg/errors" 18 | ) 19 | 20 | const ( 21 | ServiceFull = 0x01 22 | ) 23 | 24 | var ( 25 | ErrTimeout = errors.New("Timeout") 26 | ErrBusy = errors.New("Busy") 27 | 28 | // ErrNotFullService node is not a full service node. 29 | ErrNotFullService = errors.New("Not Full Service") 30 | ) 31 | 32 | // BitcoinNode is a connection to a Bitcoin node in the peer to peer network that can be used to 33 | // send requests. 34 | type BitcoinNode struct { 35 | id uuid.UUID 36 | address string 37 | userAgent string 38 | config *Config 39 | 40 | headers HeaderRepository 41 | peers PeerRepository 42 | 43 | connection net.Conn // Connection to trusted full node 44 | connectionClosedLocally bool 45 | connectionLock sync.Mutex 46 | // IP net.IP 47 | // Port uint16 48 | 49 | pingNonce uint64 50 | pingSent time.Time 51 | 52 | handlers MessageHandlers 53 | headerHandler MessageHandlerFunction 54 | lastHeaderHash *bitcoin.Hash32 // last header received from the node 55 | lastHeaderRequest []bitcoin.Hash32 56 | 57 | requestTime *time.Time 58 | blockRequest *bitcoin.Hash32 59 | blockHandler HandleBlock 60 | blockReader io.ReadCloser 61 | blockOnStop OnStop 62 | lastRequestedBlock *bitcoin.Hash32 63 | 64 | txManager *TxManager 65 | txReceivedCount uint64 66 | txReceivedSize uint64 67 | 68 | outgoingMsgChannel MessageChannel 69 | handshakeChannel chan wire.Message 70 | 71 | handshakeIsComplete atomic.Value 72 | isReady atomic.Value 73 | isStopped atomic.Value 74 | verified atomic.Value 75 | protoconfCount int 76 | 77 | isVerifyOnly bool // disconnect after chain verification 78 | 79 | interrupt <-chan interface{} 80 | 81 | sync.Mutex 82 | } 83 | 84 | type HeaderRepository interface { 85 | GetNewHeadersAvailableChannel() <-chan *wire.BlockHeader 86 | Height() int 87 | Hash(ctx context.Context, height int) (*bitcoin.Hash32, error) 88 | HashHeight(hash bitcoin.Hash32) int 89 | LastHash() bitcoin.Hash32 90 | LastTime() uint32 91 | PreviousHash(bitcoin.Hash32) (*bitcoin.Hash32, int) 92 | GetLocatorHashes(ctx context.Context, max int) ([]bitcoin.Hash32, error) 93 | GetVerifyOnlyLocatorHashes(ctx context.Context) ([]bitcoin.Hash32, error) 94 | VerifyHeader(ctx context.Context, header *wire.BlockHeader) error 95 | ProcessHeader(ctx context.Context, header *wire.BlockHeader) error 96 | Stop(ctx context.Context) 97 | } 98 | 99 | type PeerRepository interface { 100 | Add(ctx context.Context, address string) (bool, error) 101 | Get(ctx context.Context, minScore, maxScore int32) (PeerList, error) 102 | 103 | UpdateTime(ctx context.Context, address string) bool 104 | UpdateScore(ctx context.Context, address string, delta int32) bool 105 | } 106 | 107 | func NewBitcoinNode(address, userAgent string, config *Config, headers HeaderRepository, 108 | peers PeerRepository) *BitcoinNode { 109 | 110 | result := &BitcoinNode{ 111 | id: uuid.New(), 112 | address: address, 113 | userAgent: userAgent, 114 | config: config, 115 | headers: headers, 116 | peers: peers, 117 | handlers: make(MessageHandlers), 118 | handshakeChannel: make(chan wire.Message, 10), 119 | } 120 | 121 | result.handshakeIsComplete.Store(false) 122 | result.isReady.Store(false) 123 | result.isStopped.Store(false) 124 | result.verified.Store(false) 125 | 126 | // Only enable messages that are required for handshake and verification. 127 | result.handlers[wire.CmdVersion] = result.handleVersion 128 | result.handlers[wire.CmdVerAck] = result.handleVerack 129 | result.handlers[wire.CmdHeaders] = result.handleHeadersVerify 130 | result.handlers[wire.CmdProtoconf] = result.handleProtoconf 131 | result.handlers[wire.CmdPing] = result.handlePing 132 | result.handlers[wire.CmdReject] = result.handleReject 133 | 134 | // Extended messages must be handled to properly get the size of the message. The payload 135 | // message will still be ignored if the tx and block handlers aren't enabled. 136 | result.handlers[wire.CmdExtended] = result.handleExtended 137 | 138 | return result 139 | } 140 | 141 | func (n *BitcoinNode) ID() uuid.UUID { 142 | n.Lock() 143 | defer n.Unlock() 144 | 145 | return n.id 146 | } 147 | 148 | // SetVerifyOnly sets the node to only verify the correct chain and then disconnect. 149 | func (n *BitcoinNode) SetVerifyOnly() { 150 | n.Lock() 151 | defer n.Unlock() 152 | 153 | n.isVerifyOnly = true 154 | } 155 | 156 | func (n *BitcoinNode) SetTxManager(txManager *TxManager) { 157 | n.Lock() 158 | defer n.Unlock() 159 | 160 | n.txManager = txManager 161 | } 162 | 163 | func (n *BitcoinNode) GetAndResetTxReceivedCount() (uint64, uint64) { 164 | n.Lock() 165 | defer n.Unlock() 166 | 167 | resultCount := n.txReceivedCount 168 | resultSize := n.txReceivedSize 169 | n.txReceivedCount = 0 170 | n.txReceivedSize = 0 171 | return resultCount, resultSize 172 | } 173 | 174 | func (n *BitcoinNode) IsBusy() bool { 175 | n.Lock() 176 | defer n.Unlock() 177 | 178 | return n.requestTime != nil 179 | } 180 | 181 | func (n *BitcoinNode) HasBlock(ctx context.Context, hash bitcoin.Hash32, height int) bool { 182 | n.Lock() 183 | id := n.id 184 | lastRequestedBlock := n.lastRequestedBlock 185 | lastHeaderHash := n.lastHeaderHash 186 | n.Unlock() 187 | 188 | ctx = logger.ContextWithLogFields(ctx, logger.Stringer("connection", id)) 189 | 190 | if lastRequestedBlock != nil && lastRequestedBlock.Equal(&hash) { 191 | return false // already requested this block and failed 192 | } 193 | 194 | if lastHeaderHash == nil { 195 | return false 196 | } 197 | 198 | if lastHeaderHash.Equal(&hash) { 199 | return true 200 | } 201 | 202 | lastHeight := n.headers.HashHeight(*lastHeaderHash) 203 | if lastHeight == -1 { 204 | logger.WarnWithFields(ctx, []logger.Field{ 205 | logger.Stringer("last_hash", lastHeaderHash), 206 | }, "Last header height not found") 207 | return false // node's last header isn't in our chain 208 | } 209 | 210 | return lastHeight >= height 211 | } 212 | 213 | func (n *BitcoinNode) RequestBlock(ctx context.Context, hash bitcoin.Hash32, handler HandleBlock, 214 | onStop OnStop) error { 215 | n.Lock() 216 | ctx = logger.ContextWithLogFields(ctx, logger.Stringer("connection", n.id)) 217 | 218 | if n.requestTime != nil { 219 | n.Unlock() 220 | return ErrBusy 221 | } 222 | 223 | now := time.Now() 224 | n.requestTime = &now 225 | n.blockRequest = &hash 226 | n.handlers[wire.CmdBlock] = n.handleBlock 227 | n.blockHandler = handler 228 | n.blockReader = nil 229 | n.lastRequestedBlock = &hash 230 | n.Unlock() 231 | 232 | logger.InfoWithFields(ctx, []logger.Field{ 233 | logger.Stringer("block_hash", hash), 234 | logger.Int("block_height", n.headers.HashHeight(hash)), 235 | }, "Requesting block") 236 | getBlocks := wire.NewMsgGetData() // Block request message 237 | getBlocks.AddInvVect(wire.NewInvVect(wire.InvTypeBlock, &hash)) 238 | if err := n.sendMessage(ctx, getBlocks); err != nil { 239 | return errors.Wrap(err, "send block request") 240 | } 241 | 242 | n.Lock() 243 | n.blockOnStop = onStop 244 | n.Unlock() 245 | 246 | return nil 247 | } 248 | 249 | // CancelBlockRequest cancels a request for a block. It returns true if the block handler has 250 | // already been called and started handling the block. 251 | func (n *BitcoinNode) CancelBlockRequest(ctx context.Context, hash bitcoin.Hash32) bool { 252 | n.Lock() 253 | defer n.Unlock() 254 | 255 | if n.blockRequest == nil { 256 | logger.Warn(ctx, "Block request not found to cancel") 257 | return false 258 | } 259 | 260 | if !n.blockRequest.Equal(&hash) { 261 | logger.WarnWithFields(ctx, []logger.Field{ 262 | logger.Stringer("current_block_hash", n.blockRequest), 263 | }, "Wrong block request found to cancel") 264 | return false 265 | } 266 | 267 | if n.blockReader != nil { 268 | // Stop in progress handling of block 269 | n.blockReader.Close() 270 | n.blockReader = nil 271 | n.blockOnStop = nil 272 | n.blockHandler = nil 273 | logger.Info(ctx, "Cancelled in progress block") 274 | return true 275 | } 276 | 277 | // Stop handling a block before it happens 278 | n.blockOnStop = nil 279 | n.blockHandler = nil 280 | logger.Info(ctx, "Cancelled block request before download started") 281 | return false 282 | } 283 | 284 | func (n *BitcoinNode) RequestHeaders(ctx context.Context) error { 285 | n.Lock() 286 | ctx = logger.ContextWithLogFields(ctx, logger.Stringer("connection", n.id)) 287 | 288 | if n.requestTime != nil { 289 | n.Unlock() 290 | return ErrBusy 291 | } 292 | n.Unlock() 293 | 294 | logger.Verbose(ctx, "Requesting headers") 295 | if err := n.sendHeaderRequest(ctx); err != nil { 296 | return errors.Wrap(err, "send header request") 297 | } 298 | 299 | return nil 300 | } 301 | 302 | func (n *BitcoinNode) RequestTxs(ctx context.Context, txids []bitcoin.Hash32) error { 303 | ctx = logger.ContextWithLogFields(ctx, logger.Stringer("connection", n.ID())) 304 | logger.Info(ctx, "Requesting %d previous txs", len(txids)) 305 | 306 | invRequest := wire.NewMsgGetData() 307 | for _, txid := range txids { 308 | hash := txid 309 | item := wire.NewInvVect(wire.InvTypeTx, &hash) 310 | 311 | if err := invRequest.AddInvVect(item); err != nil { 312 | // Too many requests for one message, send it and start a new message. 313 | if err := n.sendMessage(ctx, invRequest); err != nil { 314 | return errors.Wrap(err, "send tx request") 315 | } 316 | 317 | invRequest = wire.NewMsgGetData() 318 | if err := invRequest.AddInvVect(item); err != nil { 319 | return errors.Wrap(err, "add tx to request") 320 | } 321 | } 322 | } 323 | 324 | if len(invRequest.InvList) > 0 { 325 | if err := n.sendMessage(ctx, invRequest); err != nil { 326 | return errors.Wrap(err, "send tx request") 327 | } 328 | } 329 | 330 | return nil 331 | } 332 | 333 | func (n *BitcoinNode) SetBlockHandler(handler MessageHandlerFunction) { 334 | n.Lock() 335 | defer n.Unlock() 336 | 337 | if handler == nil { 338 | delete(n.handlers, wire.CmdBlock) 339 | } else { 340 | n.handlers[wire.CmdBlock] = handler 341 | } 342 | } 343 | 344 | func (n *BitcoinNode) SetHeaderHandler(handler MessageHandlerFunction) { 345 | n.Lock() 346 | defer n.Unlock() 347 | 348 | n.headerHandler = handler 349 | } 350 | 351 | func (n *BitcoinNode) SetTxHandler(handler MessageHandlerFunction) { 352 | n.Lock() 353 | defer n.Unlock() 354 | 355 | if handler == nil { 356 | delete(n.handlers, wire.CmdTx) 357 | } else { 358 | n.handlers[wire.CmdTx] = handler 359 | } 360 | } 361 | 362 | func (n *BitcoinNode) Run(ctx context.Context, interrupt <-chan interface{}) error { 363 | logger.VerboseWithFields(ctx, []logger.Field{ 364 | logger.String("address", n.address), 365 | }, "Connecting to node") 366 | 367 | n.interrupt = interrupt 368 | 369 | if err := n.connect(ctx); err != nil { 370 | n.isReady.Store(false) 371 | n.isStopped.Store(true) 372 | logger.VerboseWithFields(ctx, []logger.Field{ 373 | logger.String("address", n.address), 374 | }, "Failed to connect to node : %s", err) 375 | return nil 376 | } 377 | 378 | return n.run(ctx, interrupt) 379 | } 380 | 381 | func (n *BitcoinNode) run(ctx context.Context, interrupt <-chan interface{}) error { 382 | n.interrupt = interrupt 383 | n.outgoingMsgChannel.Open(1000) 384 | 385 | var stopper threads.StopCombiner 386 | var wait sync.WaitGroup 387 | 388 | stopper.Add(n) // close connection and outgoing channel to stop incoming and outgoing threads 389 | 390 | readIncomingThread, readIncomingComplete := threads.NewUninterruptableThreadComplete("Read Incoming", 391 | n.readIncoming, &wait) 392 | 393 | sendOutgoingThread, sendOutgoingComplete := threads.NewUninterruptableThreadComplete("Send Outgoing", 394 | n.sendOutgoing, &wait) 395 | 396 | pingThread, pingComplete := threads.NewPeriodicThreadComplete("Ping", n.sendPing, 397 | 10*time.Minute, &wait) 398 | stopper.Add(pingThread) 399 | 400 | handshakeThread := threads.NewInterruptableThread("Handshake", n.handshake) 401 | handshakeThread.SetWait(&wait) 402 | stopper.Add(handshakeThread) 403 | 404 | // Start threads 405 | readIncomingThread.Start(ctx) 406 | sendOutgoingThread.Start(ctx) 407 | pingThread.Start(ctx) 408 | handshakeThread.Start(ctx) 409 | 410 | // Wait for a thread to complete 411 | select { 412 | case <-interrupt: 413 | case <-readIncomingComplete: 414 | case <-sendOutgoingComplete: 415 | case <-pingComplete: 416 | case <-time.After(n.config.Timeout.Duration): 417 | logger.Verbose(ctx, "Node reached timeout") 418 | } 419 | 420 | stopper.Stop(ctx) 421 | 422 | n.Lock() 423 | n.isReady.Store(false) 424 | blockOnStop := n.blockOnStop 425 | n.Unlock() 426 | 427 | if blockOnStop != nil { 428 | logger.Info(ctx, "Calling block request \"on stop\" function") 429 | waitWarning := logger.NewWaitingWarning(ctx, time.Second, "Call block \"on stop\"") 430 | blockOnStop(ctx) 431 | waitWarning.Cancel() 432 | } 433 | 434 | waitWarning := logger.NewWaitingWarning(ctx, 3*time.Second, "Node Shutdown") 435 | wait.Wait() 436 | waitWarning.Cancel() 437 | 438 | n.isStopped.Store(true) 439 | 440 | return threads.CombineErrors( 441 | handshakeThread.Error(), 442 | readIncomingThread.Error(), 443 | sendOutgoingThread.Error(), 444 | ) 445 | } 446 | 447 | func (n *BitcoinNode) Stop(ctx context.Context) { 448 | logger.Info(ctx, "Stopping: %s", n.Address()) 449 | n.connectionLock.Lock() 450 | if n.connection != nil { 451 | n.connection.Close() 452 | n.connection = nil 453 | n.connectionClosedLocally = true 454 | } 455 | n.connectionLock.Unlock() 456 | 457 | n.outgoingMsgChannel.Close() 458 | } 459 | 460 | func (n *BitcoinNode) HandshakeIsComplete() bool { 461 | return n.handshakeIsComplete.Load().(bool) 462 | } 463 | 464 | func (n *BitcoinNode) IsReady() bool { 465 | return n.isReady.Load().(bool) 466 | } 467 | 468 | func (n *BitcoinNode) IsStopped() bool { 469 | return n.isStopped.Load().(bool) 470 | } 471 | 472 | func (n *BitcoinNode) Verified() bool { 473 | return n.verified.Load().(bool) 474 | } 475 | 476 | func (n *BitcoinNode) Address() string { 477 | n.Lock() 478 | defer n.Unlock() 479 | 480 | return n.address 481 | } 482 | 483 | // handshake performs the initial handshake with the node. 484 | func (n *BitcoinNode) handshake(ctx context.Context, interrupt <-chan interface{}) error { 485 | versionReceived := false 486 | verAckSent := false 487 | verAckReceived := false 488 | 489 | n.Lock() 490 | address := n.address 491 | userAgent := n.userAgent 492 | receiveTxs := n.txManager != nil 493 | n.Unlock() 494 | 495 | if err := n.sendMessage(ctx, buildVersionMsg(address, userAgent, n.headers.Height(), 496 | receiveTxs)); err != nil { 497 | return errors.Wrap(err, "send version") 498 | } 499 | 500 | for { 501 | select { 502 | case msg, ok := <-n.handshakeChannel: 503 | if !ok { 504 | return nil 505 | } 506 | 507 | switch message := msg.(type) { 508 | case *wire.MsgVersion: 509 | logger.VerboseWithFields(ctx, []logger.Field{ 510 | logger.String("address", address), 511 | logger.String("user_agent", message.UserAgent), 512 | logger.Int32("protocol", message.ProtocolVersion), 513 | logger.Formatter("services", "%016x", message.Services), 514 | logger.Int32("block_height", message.LastBlock), 515 | }, "Version") 516 | versionReceived = true 517 | 518 | if !verAckSent { 519 | if err := n.sendMessage(ctx, &wire.MsgVerAck{}); err != nil { 520 | return errors.Wrap(err, "send ver ack") 521 | } 522 | verAckSent = true 523 | } 524 | 525 | if verAckReceived { 526 | return n.sendVerifyInitiation(ctx) 527 | } 528 | 529 | case *wire.MsgVerAck: 530 | verAckReceived = true 531 | if versionReceived { 532 | return n.sendVerifyInitiation(ctx) 533 | } 534 | } 535 | 536 | case <-time.After(3 * time.Second): 537 | logger.Verbose(ctx, "Handshake timed out") 538 | n.Stop(ctx) 539 | return nil 540 | 541 | case <-interrupt: 542 | return nil 543 | } 544 | } 545 | } 546 | 547 | func (n *BitcoinNode) sendVerifyInitiation(ctx context.Context) error { 548 | n.handshakeIsComplete.Store(true) 549 | 550 | if err := n.sendMessage(ctx, wire.NewMsgProtoconf()); err != nil { 551 | return errors.Wrap(err, "send protoconf") 552 | } 553 | 554 | // Send header request to check the node is on the same chain 555 | if err := n.sendVerifyHeaderRequest(ctx); err != nil { 556 | return errors.Wrap(err, "send verify header request") 557 | } 558 | 559 | return nil 560 | } 561 | 562 | func (n *BitcoinNode) accept(ctx context.Context) error { 563 | n.Lock() 564 | 565 | // Switch headers handler to tracking mode. 566 | n.handlers[wire.CmdHeaders] = n.handleHeadersTrack 567 | 568 | // Enable more commands. These messages are ignored before this point. 569 | n.handlers[wire.CmdAddr] = n.handleAddress 570 | n.handlers[wire.CmdPong] = n.handlePong 571 | n.handlers[wire.CmdGetAddr] = n.handleGetAddresses 572 | 573 | // Enable tx handling 574 | if n.txManager != nil { 575 | n.handlers[wire.CmdInv] = n.handleInventory 576 | n.handlers[wire.CmdTx] = n.handleTx 577 | } 578 | 579 | isVerifyOnly := n.isVerifyOnly 580 | 581 | n.isReady.Store(true) 582 | n.verified.Store(true) 583 | n.Unlock() 584 | 585 | if isVerifyOnly { 586 | logger.Verbose(ctx, "Disconnecting after chain verification") 587 | n.Stop(ctx) 588 | return nil 589 | } 590 | 591 | if err := n.sendMessage(ctx, wire.NewMsgSendHeaders()); err != nil { 592 | return errors.Wrap(err, "send \"sendheaders\" request") 593 | } 594 | 595 | if err := n.sendMessage(ctx, wire.NewMsgGetAddr()); err != nil { 596 | return errors.Wrap(err, "send peer request") 597 | } 598 | 599 | // Send initial header request to get any new headers the node might have. 600 | if err := n.sendInitialHeaderRequest(ctx); err != nil { 601 | return errors.Wrap(err, "send initial header request") 602 | } 603 | 604 | addresses, err := buildAddressesMessage(ctx, n.peers) 605 | if err != nil { 606 | return errors.Wrap(err, "build addresses") 607 | } 608 | 609 | logger.Verbose(ctx, "Sending %d addresses", len(addresses.AddrList)) 610 | if err := n.sendMessage(ctx, addresses); err != nil { 611 | return errors.Wrap(err, "send addresses") 612 | } 613 | 614 | return nil 615 | } 616 | 617 | func (n *BitcoinNode) sendPing(ctx context.Context) error { 618 | n.Lock() 619 | defer n.Unlock() 620 | 621 | n.pingNonce = nonce() 622 | n.pingSent = time.Now() 623 | 624 | logger.Debug(ctx, "Sending ping 0x%16x", n.pingNonce) 625 | return n.sendMessage(ctx, wire.NewMsgPing(n.pingNonce)) 626 | } 627 | 628 | func (n *BitcoinNode) connect(ctx context.Context) error { 629 | connection, err := net.DialTimeout("tcp", n.address, 5*time.Second) 630 | if err != nil { 631 | return err 632 | } 633 | 634 | addr := connection.RemoteAddr() 635 | ip, port := parseAddress(addr.String()) 636 | if ip == nil { 637 | logger.Info(ctx, "Connected to unknown IP") 638 | } else { 639 | logger.Verbose(ctx, "Connected to %s:%d", ip.String(), port) 640 | } 641 | 642 | n.connectionLock.Lock() 643 | n.connection = connection 644 | n.connectionLock.Unlock() 645 | n.peers.UpdateTime(ctx, n.address) 646 | return nil 647 | } 648 | 649 | func (n *BitcoinNode) mockConnect(ctx context.Context, connection net.Conn) error { 650 | logger.Info(ctx, "Connected to mock connection") 651 | 652 | n.connectionLock.Lock() 653 | n.connection = connection 654 | n.connectionLock.Unlock() 655 | return nil 656 | } 657 | -------------------------------------------------------------------------------- /headers/branches_test.go: -------------------------------------------------------------------------------- 1 | package headers 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | 7 | "github.com/tokenized/bitcoin_reader/internal/platform/tests" 8 | "github.com/tokenized/pkg/bitcoin" 9 | "github.com/tokenized/pkg/storage" 10 | "github.com/tokenized/pkg/wire" 11 | ) 12 | 13 | func Test_Branches_General(t *testing.T) { 14 | genesis := genesisHeader(bitcoin.MainNet) 15 | 16 | initialBranch, _ := NewBranch(nil, -1, genesis) 17 | if initialBranch == nil { 18 | t.Fatalf("Failed to create initial branch") 19 | } 20 | 21 | // Add some headers 22 | previousHash := initialBranch.Last().Hash 23 | timestamp := initialBranch.Last().Header.Timestamp 24 | headers := make([]*wire.BlockHeader, 11) 25 | headers[0] = genesis 26 | for i := 1; i <= 10; i++ { 27 | header := &wire.BlockHeader{ 28 | Version: 1, 29 | PrevBlock: previousHash, 30 | Timestamp: timestamp, 31 | Bits: 0x1d00ffff, 32 | Nonce: rand.Uint32(), 33 | } 34 | rand.Read(header.MerkleRoot[:]) 35 | headers[i] = header 36 | t.Logf("Header at %02d : %s", i, header.BlockHash()) 37 | 38 | if !initialBranch.Add(header) { 39 | t.Fatalf("Failed to add header %d", i) 40 | } 41 | 42 | previousHash = *header.BlockHash() 43 | timestamp += 600 44 | } 45 | 46 | if !initialBranch.Last().Hash.Equal(&previousHash) { 47 | t.Fatalf("Wrong last hash : \ngot : %s\nwant : %s", initialBranch.Last().Hash, 48 | previousHash) 49 | } 50 | 51 | if initialBranch.Height() != 10 { 52 | t.Fatalf("Wrong initial branch height : got %d, want %d", initialBranch.Height(), 10) 53 | } 54 | 55 | at := initialBranch.AtHeight(5) 56 | if !at.Hash.Equal(headers[5].BlockHash()) { 57 | t.Fatalf("Wrong hash at 5 : \ngot : %s\nwant : %s", at.Hash, headers[5].BlockHash()) 58 | } 59 | 60 | branchHash := at.Hash 61 | 62 | branchHeader := &wire.BlockHeader{ 63 | Version: 1, 64 | PrevBlock: at.Hash, 65 | Timestamp: timestamp, 66 | Bits: 0x1d00ffff, 67 | Nonce: rand.Uint32(), 68 | } 69 | rand.Read(branchHeader.MerkleRoot[:]) 70 | timestamp += 600 71 | 72 | branches := Branches{initialBranch} 73 | foundBranch, foundHeight := branches.Find(branchHeader.PrevBlock) 74 | if foundBranch == nil { 75 | t.Fatalf("Failed to find branch") 76 | } 77 | if foundHeight != 5 { 78 | t.Fatalf("Wrong found height : got %d, want %d", foundHeight, 5) 79 | } 80 | 81 | branchAt5, err := NewBranch(initialBranch, 5, branchHeader) 82 | if err != nil { 83 | t.Fatalf("Failed to create branch at 5 : %s", err) 84 | } 85 | branches = append(branches, branchAt5) 86 | 87 | foundBranch, foundHeight = branches.Find(*branchHeader.BlockHash()) 88 | if foundBranch == nil { 89 | t.Fatalf("Failed to find branch") 90 | } 91 | if foundBranch != branchAt5 { 92 | t.Fatalf("Wrong branch found") 93 | } 94 | if foundHeight != 6 { 95 | t.Fatalf("Wrong found height : got %d, want %d", foundHeight, 6) 96 | } 97 | 98 | nextBranchHeader := &wire.BlockHeader{ 99 | Version: 1, 100 | PrevBlock: *branchHeader.BlockHash(), 101 | Timestamp: timestamp, 102 | Bits: 0x1d00ffff, 103 | Nonce: rand.Uint32(), 104 | } 105 | rand.Read(nextBranchHeader.MerkleRoot[:]) 106 | timestamp += 600 107 | 108 | if !branchAt5.Add(nextBranchHeader) { 109 | t.Fatalf("Failed to add header to branch at 5") 110 | } 111 | 112 | if availableHeight := initialBranch.AvailableHeight(); availableHeight != 0 { 113 | t.Errorf("Wrong available height for initial branch : got %d, want %d", availableHeight, 0) 114 | } 115 | 116 | if availableHeight := branchAt5.AvailableHeight(); availableHeight != 0 { 117 | t.Errorf("Wrong available height for branch at 5 : got %d, want %d", availableHeight, 0) 118 | } 119 | 120 | foundBranch, foundHeight = branches.Find(*nextBranchHeader.BlockHash()) 121 | if foundBranch == nil { 122 | t.Fatalf("Failed to find branch") 123 | } 124 | if foundBranch != branchAt5 { 125 | t.Fatalf("Wrong branch found") 126 | } 127 | if foundHeight != 7 { 128 | t.Fatalf("Wrong found height : got %d, want %d", foundHeight, 7) 129 | } 130 | 131 | getGenesis := branchAt5.AtHeight(0) 132 | if getGenesis == nil { 133 | t.Fatalf("Failed to get genesis from branch at 5") 134 | } 135 | if !getGenesis.Hash.Equal(genesis.BlockHash()) { 136 | t.Fatalf("Wrong genesis hash : \ngot : %s\nwant : %s", getGenesis.Hash, 137 | genesis.BlockHash()) 138 | } 139 | 140 | getFirst := branchAt5.AtHeight(1) 141 | if getFirst == nil { 142 | t.Fatalf("Failed to get genesis from branch at 5") 143 | } 144 | if !getFirst.Hash.Equal(headers[1].BlockHash()) { 145 | t.Fatalf("Wrong first hash : \ngot : %s\nwant : %s", getFirst.Hash, 146 | headers[1].BlockHash()) 147 | } 148 | 149 | intersectHash := initialBranch.IntersectHash(branchAt5) 150 | if !intersectHash.Equal(&branchHash) { 151 | t.Errorf("Wrong intersect hash : \ngot : %s\nwant : %s", intersectHash, branchHash) 152 | } 153 | 154 | intersectHash = branchAt5.IntersectHash(initialBranch) 155 | if !intersectHash.Equal(&branchHash) { 156 | t.Errorf("Wrong intersect hash : \ngot : %s\nwant : %s", intersectHash, branchHash) 157 | } 158 | } 159 | 160 | func Test_Branches_Prune(t *testing.T) { 161 | genesis := genesisHeader(bitcoin.MainNet) 162 | 163 | initialBranch, _ := NewBranch(nil, -1, genesis) 164 | if initialBranch == nil { 165 | t.Fatalf("Failed to create initial branch") 166 | } 167 | 168 | // Add some headers 169 | previousHash := initialBranch.Last().Hash 170 | timestamp := initialBranch.Last().Header.Timestamp 171 | headers := make([]*wire.BlockHeader, 11) 172 | headers[0] = genesis 173 | for i := 1; i <= 10; i++ { 174 | header := &wire.BlockHeader{ 175 | Version: 1, 176 | PrevBlock: previousHash, 177 | Timestamp: timestamp, 178 | Bits: 0x1d00ffff, 179 | Nonce: rand.Uint32(), 180 | } 181 | rand.Read(header.MerkleRoot[:]) 182 | headers[i] = header 183 | t.Logf("Header at %02d : %s", i, header.BlockHash()) 184 | 185 | if !initialBranch.Add(header) { 186 | t.Fatalf("Failed to add header %d", i) 187 | } 188 | 189 | previousHash = *header.BlockHash() 190 | timestamp += 600 191 | } 192 | 193 | if availableHeight := initialBranch.AvailableHeight(); availableHeight != 0 { 194 | t.Errorf("Wrong available height for initial branch : got %d, want %d", availableHeight, 0) 195 | } 196 | 197 | initialBranch.Prune(5) 198 | 199 | if height := initialBranch.Height(); height != 10 { 200 | t.Errorf("Wrong height : got %d, want %d", height, 10) 201 | } 202 | 203 | if prunedHeight := initialBranch.PrunedLowestHeight(); prunedHeight != 5 { 204 | t.Errorf("Wrong pruned lowest height : got %d, want %d", prunedHeight, 5) 205 | } 206 | 207 | if availableHeight := initialBranch.AvailableHeight(); availableHeight != 5 { 208 | t.Errorf("Wrong available height : got %d, want %d", availableHeight, 5) 209 | } 210 | 211 | for i := 0; i <= 4; i++ { 212 | if initialBranch.AtHeight(i) != nil { 213 | t.Errorf("%d should be pruned", i) 214 | } else { 215 | t.Logf("Verified height %d is pruned", i) 216 | } 217 | 218 | if initialBranch.Find(*headers[i].BlockHash()) != -1 { 219 | t.Errorf("Should not find pruned header %d", i) 220 | } else { 221 | t.Logf("Verified find %d is pruned", i) 222 | } 223 | } 224 | 225 | for i := 5; i <= 10; i++ { 226 | data := initialBranch.AtHeight(i) 227 | if data == nil { 228 | t.Errorf("Missing header at %d", i) 229 | continue 230 | } 231 | 232 | t.Logf("Header at %02d : %s", i, data.Hash) 233 | if !data.Hash.Equal(headers[i].BlockHash()) { 234 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, data.Hash, 235 | headers[i].BlockHash()) 236 | } 237 | 238 | if height := initialBranch.Find(*headers[i].BlockHash()); height != i { 239 | t.Errorf("Find header at wrong height : got %d, want %d", height, i) 240 | } 241 | } 242 | } 243 | 244 | func Test_Branches_Save(t *testing.T) { 245 | ctx := tests.Context() 246 | store := storage.NewMockStorage() 247 | genesis := genesisHeader(bitcoin.MainNet) 248 | 249 | initialBranch, _ := NewBranch(nil, -1, genesis) 250 | if initialBranch == nil { 251 | t.Fatalf("Failed to create initial branch") 252 | } 253 | 254 | // Add some headers 255 | previousHash := initialBranch.Last().Hash 256 | timestamp := initialBranch.Last().Header.Timestamp 257 | headers := make([]*wire.BlockHeader, 11) 258 | headers[0] = genesis 259 | for i := 1; i <= 10; i++ { 260 | header := &wire.BlockHeader{ 261 | Version: 1, 262 | PrevBlock: previousHash, 263 | Timestamp: timestamp, 264 | Bits: 0x1d00ffff, 265 | Nonce: rand.Uint32(), 266 | } 267 | rand.Read(header.MerkleRoot[:]) 268 | headers[i] = header 269 | t.Logf("Header at %02d : %s", i, header.BlockHash()) 270 | 271 | if !initialBranch.Add(header) { 272 | t.Fatalf("Failed to add header %d", i) 273 | } 274 | 275 | previousHash = *header.BlockHash() 276 | timestamp += 600 277 | } 278 | 279 | if err := initialBranch.Save(ctx, store); err != nil { 280 | t.Fatalf("Failed to save initial branch : %s", err) 281 | } 282 | 283 | initialBranch.Prune(5) 284 | 285 | for i := 11; i <= 15; i++ { 286 | header := &wire.BlockHeader{ 287 | Version: 1, 288 | PrevBlock: previousHash, 289 | Timestamp: timestamp, 290 | Bits: 0x1d00ffff, 291 | Nonce: rand.Uint32(), 292 | } 293 | rand.Read(header.MerkleRoot[:]) 294 | headers = append(headers, header) 295 | t.Logf("Header at %02d : %s", i, header.BlockHash()) 296 | 297 | if !initialBranch.Add(header) { 298 | t.Fatalf("Failed to add header %d", i) 299 | } 300 | 301 | previousHash = *header.BlockHash() 302 | timestamp += 600 303 | } 304 | 305 | if height := initialBranch.Height(); height != 15 { 306 | t.Errorf("Wrong height after prune and append : got %d, want %d", height, 15) 307 | } 308 | 309 | if err := initialBranch.Save(ctx, store); err != nil { 310 | t.Fatalf("Failed to save/append initial branch : %s", err) 311 | } 312 | 313 | readBranch, err := LoadBranch(ctx, store, *initialBranch.firstHeader.BlockHash()) 314 | if err != nil { 315 | t.Fatalf("Failed to load branch : %s", err) 316 | } 317 | 318 | if height := readBranch.Height(); height != initialBranch.Height() { 319 | t.Errorf("Wrong read branch height : got %d, want %d", height, initialBranch.Height()) 320 | } 321 | 322 | for i, header := range headers { 323 | data := readBranch.AtHeight(i) 324 | if data == nil { 325 | t.Errorf("Missing header at %d", i) 326 | continue 327 | } 328 | 329 | t.Logf("Header at %02d : %s", i, data.Hash) 330 | if !data.Hash.Equal(header.BlockHash()) { 331 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, data.Hash, 332 | header.BlockHash()) 333 | } 334 | 335 | if height := readBranch.Find(*header.BlockHash()); height != i { 336 | t.Errorf("Find header at wrong height : got %d, want %d", height, i) 337 | } 338 | } 339 | } 340 | 341 | func Test_Branches_Consolidate(t *testing.T) { 342 | ctx := tests.Context() 343 | store := storage.NewMockStorage() 344 | genesis := genesisHeader(bitcoin.MainNet) 345 | 346 | rand.Seed(100) 347 | 348 | initialBranch, _ := NewBranch(nil, -1, genesis) 349 | if initialBranch == nil { 350 | t.Fatalf("Failed to create initial branch") 351 | } 352 | 353 | MockHeadersOnBranch(initialBranch, 10) 354 | 355 | if err := initialBranch.Save(ctx, store); err != nil { 356 | t.Fatalf("Failed to save initial branch : %s", err) 357 | } 358 | 359 | t.Logf("Initial Branch :") 360 | t.Logf(initialBranch.String(" ")) 361 | t.Logf(initialBranch.StringHeaderHashes(" ")) 362 | 363 | initialBranch.Prune(5) 364 | 365 | previousHeader := initialBranch.AtHeight(8) 366 | t.Logf("Linking new branch after %s", previousHeader.Hash) 367 | 368 | otherBranchHeader := &wire.BlockHeader{ 369 | Version: 1, 370 | PrevBlock: previousHeader.Hash, 371 | Timestamp: previousHeader.Header.Timestamp + 600, 372 | Bits: 0x1d00ffff, 373 | Nonce: rand.Uint32(), 374 | } 375 | rand.Read(otherBranchHeader.MerkleRoot[:]) 376 | 377 | otherBranch, err := NewBranch(initialBranch, 8, otherBranchHeader) 378 | if err != nil { 379 | t.Fatalf("Failed to create other branch : %s", err) 380 | } 381 | MockHeadersOnBranch(otherBranch, 11) 382 | 383 | t.Logf("Other Branch :") 384 | t.Logf(otherBranch.String(" ")) 385 | t.Logf(otherBranch.StringHeaderHashes(" ")) 386 | 387 | previousThirdHeader := initialBranch.AtHeight(6) 388 | thirdBranchHeader := &wire.BlockHeader{ 389 | Version: 1, 390 | PrevBlock: previousThirdHeader.Hash, 391 | Timestamp: previousThirdHeader.Header.Timestamp + 600, 392 | Bits: 0x1d00ffff, 393 | Nonce: rand.Uint32(), 394 | } 395 | rand.Read(thirdBranchHeader.MerkleRoot[:]) 396 | 397 | thirdBranch, err := NewBranch(initialBranch, 6, thirdBranchHeader) 398 | if err != nil { 399 | t.Fatalf("Failed to create third branch : %s", err) 400 | } 401 | MockHeadersOnBranch(thirdBranch, 5) 402 | 403 | t.Logf("Third Branch :") 404 | t.Logf(thirdBranch.String(" ")) 405 | t.Logf(thirdBranch.StringHeaderHashes(" ")) 406 | 407 | MockHeadersOnBranch(initialBranch, 8) 408 | 409 | consolidateBranch, linkHeight, err := otherBranch.Consolidate(ctx, store, initialBranch) 410 | if err != nil { 411 | t.Fatalf("Failed to consolidate branch : %s", err) 412 | } 413 | 414 | t.Logf("Consolidated :") 415 | t.Logf(consolidateBranch.String(" ")) 416 | t.Logf(consolidateBranch.StringHeaderHashes(" ")) 417 | 418 | if linkHeight != 8 { 419 | t.Errorf("Wrong parent link height : got %d, want %d", linkHeight, 8) 420 | } 421 | 422 | if consolidateBranch.Height() != otherBranch.Height() { 423 | t.Errorf("Wrong consolidated height : got %d, want %d", consolidateBranch.Height(), 424 | otherBranch.Height()) 425 | } 426 | 427 | if consolidateBranch.PrunedLowestHeight() != initialBranch.PrunedLowestHeight() { 428 | t.Errorf("Wrong consolidated pruned lowest height : got %d, want %d", 429 | consolidateBranch.PrunedLowestHeight(), initialBranch.PrunedLowestHeight()) 430 | } 431 | 432 | // Verify headers on consolidated branch 433 | for i := 6; i <= 8; i++ { 434 | want := initialBranch.AtHeight(i) 435 | got := consolidateBranch.AtHeight(i) 436 | 437 | if want == nil { 438 | t.Fatalf("Missing want header %d", i) 439 | } 440 | if got == nil { 441 | t.Fatalf("Missing got header %d", i) 442 | } 443 | 444 | if !want.Hash.Equal(&got.Hash) { 445 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, got.Hash, want.Hash) 446 | } 447 | } 448 | 449 | for i := 9; i <= 20; i++ { 450 | want := otherBranch.AtHeight(i) 451 | got := consolidateBranch.AtHeight(i) 452 | 453 | if want == nil { 454 | t.Fatalf("Missing want header %d", i) 455 | } 456 | if got == nil { 457 | t.Fatalf("Missing got header %d", i) 458 | } 459 | 460 | if !want.Hash.Equal(&got.Hash) { 461 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, got.Hash, want.Hash) 462 | } 463 | } 464 | 465 | truncateBranch, err := initialBranch.Truncate(ctx, store, consolidateBranch, linkHeight) 466 | if err != nil { 467 | t.Fatalf("Failed to truncate branch : %s", err) 468 | } 469 | 470 | t.Logf("Truncated :") 471 | t.Logf(truncateBranch.String(" ")) 472 | t.Logf(truncateBranch.StringHeaderHashes(" ")) 473 | 474 | if truncateBranch.Height() != initialBranch.Height() { 475 | t.Errorf("Wrong truncated height : got %d, want %d", truncateBranch.Height(), 476 | initialBranch.Height()) 477 | } 478 | 479 | if truncateBranch.PrunedLowestHeight() != linkHeight+1 { 480 | t.Errorf("Wrong consolidated pruned lowest height : got %d, want %d", 481 | truncateBranch.PrunedLowestHeight(), linkHeight+1) 482 | } 483 | 484 | if !truncateBranch.firstHeader.PrevBlock.Equal(&previousHeader.Hash) { 485 | t.Errorf("Wrong truncated previous hash : \ngot : %s\nwant : %s", 486 | truncateBranch.firstHeader.PrevBlock, previousHeader.Hash) 487 | } 488 | 489 | for i := 9; i <= 18; i++ { 490 | want := initialBranch.AtHeight(i) 491 | got := truncateBranch.AtHeight(i) 492 | 493 | if want == nil { 494 | t.Fatalf("Missing want header %d", i) 495 | } 496 | if got == nil { 497 | t.Fatalf("Missing got header %d", i) 498 | } 499 | 500 | if !want.Hash.Equal(&got.Hash) { 501 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, got.Hash, want.Hash) 502 | } 503 | } 504 | 505 | connectBranch, err := thirdBranch.Connect(ctx, store, 506 | Branches{consolidateBranch, truncateBranch}) 507 | if err != nil { 508 | t.Fatalf("Failed to connect branch : %s", err) 509 | } 510 | 511 | t.Logf("Connected :") 512 | t.Logf(connectBranch.String(" ")) 513 | t.Logf(connectBranch.StringHeaderHashes(" ")) 514 | 515 | if connectBranch.parent != consolidateBranch { 516 | t.Errorf("Wrong parent for connected branch") 517 | } 518 | 519 | for i := 6; i <= 6; i++ { 520 | want := consolidateBranch.AtHeight(i) 521 | got := connectBranch.AtHeight(i) 522 | 523 | if want == nil { 524 | t.Fatalf("Missing want header %d", i) 525 | } 526 | if got == nil { 527 | t.Fatalf("Missing got header %d", i) 528 | } 529 | 530 | if !want.Hash.Equal(&got.Hash) { 531 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, got.Hash, want.Hash) 532 | } 533 | } 534 | 535 | for i := 7; i <= 12; i++ { 536 | want := thirdBranch.AtHeight(i) 537 | got := connectBranch.AtHeight(i) 538 | 539 | if want == nil { 540 | t.Fatalf("Missing want header %d", i) 541 | } 542 | if got == nil { 543 | t.Fatalf("Missing got header %d", i) 544 | } 545 | 546 | if !want.Hash.Equal(&got.Hash) { 547 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, got.Hash, want.Hash) 548 | } 549 | } 550 | } 551 | 552 | func Test_Branches_Trim(t *testing.T) { 553 | genesis := genesisHeader(bitcoin.MainNet) 554 | 555 | rand.Seed(100) 556 | 557 | initialBranch, _ := NewBranch(nil, -1, genesis) 558 | if initialBranch == nil { 559 | t.Fatalf("Failed to create initial branch") 560 | } 561 | 562 | MockHeadersOnBranch(initialBranch, 10) 563 | 564 | hashes := make([]bitcoin.Hash32, initialBranch.Height()+1) 565 | for i := range hashes { 566 | hashes[i] = initialBranch.AtHeight(i).Hash 567 | } 568 | 569 | previousHeader := initialBranch.AtHeight(8) 570 | t.Logf("Linking new branch after %s", previousHeader.Hash) 571 | 572 | otherBranchHeader := &wire.BlockHeader{ 573 | Version: 1, 574 | PrevBlock: previousHeader.Hash, 575 | Timestamp: previousHeader.Header.Timestamp + 600, 576 | Bits: 0x1d00ffff, 577 | Nonce: rand.Uint32(), 578 | } 579 | rand.Read(otherBranchHeader.MerkleRoot[:]) 580 | 581 | otherBranch, err := NewBranch(initialBranch, 8, otherBranchHeader) 582 | if err != nil { 583 | t.Fatalf("Failed to create other branch : %s", err) 584 | } 585 | MockHeadersOnBranch(otherBranch, 11) 586 | 587 | branches := Branches{initialBranch, otherBranch} 588 | if len(branches) != 2 { 589 | t.Fatalf("Wrong branch count : got %d, want %d", len(branches), 2) 590 | } 591 | 592 | branches.Trim(initialBranch, 7) 593 | 594 | if len(branches) != 1 { 595 | t.Fatalf("Wrong branch count : got %d, want %d", len(branches), 1) 596 | } 597 | 598 | if initialBranch.Height() != 6 { 599 | t.Fatalf("Wrong branch height : got %d, want %d", initialBranch.Height(), 6) 600 | } 601 | 602 | for i := 0; i <= initialBranch.Height(); i++ { 603 | if !hashes[i].Equal(&initialBranch.AtHeight(i).Hash) { 604 | t.Errorf("Wrong hash at height %d : \ngot : %s\nwant : %s", i, hashes[i], 605 | initialBranch.AtHeight(i).Hash) 606 | } 607 | } 608 | } 609 | 610 | // Test_Branches_BranchOfBranch tests a consolidation when there is a branch of a branch. 611 | func Test_Branches_BranchOfBranch(t *testing.T) { 612 | ctx := tests.Context() 613 | firstBranchSize := 10 614 | 615 | for offset := 0; offset < firstBranchSize; offset++ { 616 | store := storage.NewMockStorage() 617 | repo := NewRepository(DefaultConfig(), store) 618 | repo.DisableDifficulty() 619 | 620 | startTime := uint32(952644136) 621 | repo.InitializeWithTimeStamp(startTime) 622 | initialHeaders := MockHeaders(ctx, repo, repo.LastHash(), repo.LastTime(), 15) 623 | 624 | for i, header := range initialHeaders { 625 | t.Logf("Header %d : %s", 1+i, header.BlockHash()) 626 | } 627 | 628 | firstBranchHeader := initialHeaders[9] 629 | firstBranchHash := *firstBranchHeader.BlockHash() 630 | 631 | firstBranchHeaders := MockHeaders(ctx, repo, firstBranchHash, firstBranchHeader.Timestamp, 632 | firstBranchSize) 633 | 634 | for i, header := range firstBranchHeaders { 635 | t.Logf("Header %d : %s", 11+i, header.BlockHash()) 636 | } 637 | 638 | secondBranchHeader := firstBranchHeaders[offset] 639 | secondBranchHash := *secondBranchHeader.BlockHash() 640 | 641 | secondBranchHeaders := MockHeaders(ctx, repo, secondBranchHash, 642 | secondBranchHeader.Timestamp, 8) 643 | 644 | for i, header := range secondBranchHeaders { 645 | t.Logf("Header %d : %s", 15+i, header.BlockHash()) 646 | } 647 | 648 | for i, branch := range repo.branches { 649 | t.Logf("Branch %d : %s", i, branch.String("")) 650 | t.Logf("Hashes : %s", branch.StringHeaderHashes(" ")) 651 | } 652 | 653 | if err := repo.saveMainBranch(ctx); err != nil { 654 | t.Fatalf("Failed to save main branch : %s", err) 655 | } 656 | 657 | if err := repo.prune(ctx, 8); err != nil { 658 | t.Fatalf("Failed to prune repo : %s", err) 659 | } 660 | 661 | t.Logf("After prune") 662 | 663 | for i, branch := range repo.branches { 664 | t.Logf("Branch %d : %s", i, branch.String("")) 665 | t.Logf("Hashes : %s", branch.StringHeaderHashes(" ")) 666 | } 667 | 668 | if err := repo.consolidate(ctx); err != nil { 669 | t.Fatalf("Failed to consolidate repo : %s", err) 670 | } 671 | 672 | t.Logf("After consolidate") 673 | 674 | for i, branch := range repo.branches { 675 | t.Logf("Branch %d : %s", i, branch.String("")) 676 | t.Logf("Hashes : %s", branch.StringHeaderHashes(" ")) 677 | } 678 | } 679 | } 680 | --------------------------------------------------------------------------------