├── .github └── workflows │ ├── generated-pr.yml │ ├── go-check.yml │ ├── go-test.yml │ ├── release-check.yml │ ├── releaser.yml │ ├── stale.yml │ └── tagpush.yml ├── LICENSE ├── README.md ├── bucket.go ├── bucket_prefixmap.go ├── bucket_test.go ├── codecov.yml ├── generate └── main.go ├── go.mod ├── go.sum ├── keyspace ├── keyspace.go ├── xor.go └── xor_test.go ├── peerdiversity ├── filter.go └── filter_test.go ├── sorting.go ├── table.go ├── table_refresh.go ├── table_refresh_test.go ├── table_test.go ├── util.go ├── util_test.go └── version.json /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/go-check.yml: -------------------------------------------------------------------------------- 1 | name: Go Checks 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-check: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 19 | -------------------------------------------------------------------------------- /.github/workflows/go-test.yml: -------------------------------------------------------------------------------- 1 | name: Go Test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-test: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0 19 | secrets: 20 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 21 | -------------------------------------------------------------------------------- /.github/workflows/release-check.yml: -------------------------------------------------------------------------------- 1 | name: Release Checker 2 | 3 | on: 4 | pull_request_target: 5 | paths: [ 'version.json' ] 6 | types: [ opened, synchronize, reopened, labeled, unlabeled ] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | release-check: 19 | uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 20 | -------------------------------------------------------------------------------- /.github/workflows/releaser.yml: -------------------------------------------------------------------------------- 1 | name: Releaser 2 | 3 | on: 4 | push: 5 | paths: [ 'version.json' ] 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: write 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.sha }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | releaser: 17 | uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 18 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/tagpush.yml: -------------------------------------------------------------------------------- 1 | name: Tag Push Checker 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | permissions: 9 | contents: read 10 | issues: write 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | releaser: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Protocol Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-libp2p-kbucket 2 | 3 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) 4 | [![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/) 5 | [![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23libp2p) 6 | [![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) 7 | 8 | > A kbucket implementation for use as a routing table in go-libp2p-kad-dht 9 | 10 | ## Documenation 11 | 12 | See https://godoc.org/github.com/libp2p/go-libp2p-kbucket. 13 | 14 | ## Contribute 15 | 16 | Feel free to join in. All welcome. Open an [issue](https://github.com/libp2p/go-libp2p-kbucket/issues)! 17 | 18 | This repository falls under the libp2p [Code of Conduct](https://github.com/libp2p/community/blob/master/code-of-conduct.md). 19 | 20 | ### Want to hack on libp2p? 21 | 22 | [![](https://cdn.rawgit.com/libp2p/community/master/img/contribute.gif)](https://github.com/libp2p/community/blob/master/CONTRIBUTE.md) 23 | 24 | ## License 25 | 26 | MIT 27 | 28 | --- 29 | 30 | The last gx published version of this module was: 2.2.23: QmSNE1XryoCMnZCbRaj1D23k6YKCaTQ386eJciu1pAfu8M 31 | -------------------------------------------------------------------------------- /bucket.go: -------------------------------------------------------------------------------- 1 | //go:generate go run ./generate 2 | 3 | package kbucket 4 | 5 | import ( 6 | "container/list" 7 | "time" 8 | 9 | "github.com/libp2p/go-libp2p/core/peer" 10 | ) 11 | 12 | // PeerInfo holds all related information for a peer in the K-Bucket. 13 | type PeerInfo struct { 14 | Id peer.ID 15 | 16 | // LastUsefulAt is the time instant at which the peer was last "useful" to us. 17 | // Please see the DHT docs for the definition of usefulness. 18 | LastUsefulAt time.Time 19 | 20 | // LastSuccessfulOutboundQueryAt is the time instant at which we last got a 21 | // successful query response from the peer. 22 | LastSuccessfulOutboundQueryAt time.Time 23 | 24 | // AddedAt is the time this peer was added to the routing table. 25 | AddedAt time.Time 26 | 27 | // Id of the peer in the DHT XOR keyspace 28 | dhtId ID 29 | 30 | // if a bucket is full, this peer can be replaced to make space for a new peer. 31 | replaceable bool 32 | } 33 | 34 | // bucket holds a list of peers. 35 | // we synchronize on the Routing Table lock for all access to the bucket 36 | // and so do not need any locks in the bucket. 37 | // if we want/need to avoid locking the table for accessing a bucket in the future, 38 | // it WILL be the caller's responsibility to synchronize all access to a bucket. 39 | type bucket struct { 40 | list *list.List 41 | } 42 | 43 | func newBucket() *bucket { 44 | b := new(bucket) 45 | b.list = list.New() 46 | return b 47 | } 48 | 49 | // returns all peers in the bucket 50 | // it is safe for the caller to modify the returned objects as it is a defensive copy 51 | func (b *bucket) peers() []PeerInfo { 52 | ps := make([]PeerInfo, 0, b.len()) 53 | for e := b.list.Front(); e != nil; e = e.Next() { 54 | p := e.Value.(*PeerInfo) 55 | ps = append(ps, *p) 56 | } 57 | return ps 58 | } 59 | 60 | // returns the "minimum" peer in the bucket based on the `lessThan` comparator passed to it. 61 | // It is NOT safe for the comparator to mutate the given `PeerInfo` 62 | // as we pass in a pointer to it. 63 | // It is NOT safe to modify the returned value. 64 | func (b *bucket) min(lessThan func(p1 *PeerInfo, p2 *PeerInfo) bool) *PeerInfo { 65 | if b.list.Len() == 0 { 66 | return nil 67 | } 68 | 69 | minVal := b.list.Front().Value.(*PeerInfo) 70 | 71 | for e := b.list.Front().Next(); e != nil; e = e.Next() { 72 | val := e.Value.(*PeerInfo) 73 | 74 | if lessThan(val, minVal) { 75 | minVal = val 76 | } 77 | } 78 | 79 | return minVal 80 | } 81 | 82 | // updateAllWith updates all the peers in the bucket by applying the given update function. 83 | func (b *bucket) updateAllWith(updateFnc func(p *PeerInfo)) { 84 | for e := b.list.Front(); e != nil; e = e.Next() { 85 | val := e.Value.(*PeerInfo) 86 | updateFnc(val) 87 | } 88 | } 89 | 90 | // return the Ids of all the peers in the bucket. 91 | func (b *bucket) peerIds() []peer.ID { 92 | ps := make([]peer.ID, 0, b.list.Len()) 93 | for e := b.list.Front(); e != nil; e = e.Next() { 94 | p := e.Value.(*PeerInfo) 95 | ps = append(ps, p.Id) 96 | } 97 | return ps 98 | } 99 | 100 | // returns the peer with the given Id if it exists 101 | // returns nil if the peerId does not exist 102 | func (b *bucket) getPeer(p peer.ID) *PeerInfo { 103 | for e := b.list.Front(); e != nil; e = e.Next() { 104 | if e.Value.(*PeerInfo).Id == p { 105 | return e.Value.(*PeerInfo) 106 | } 107 | } 108 | return nil 109 | } 110 | 111 | // removes the peer with the given Id from the bucket. 112 | // returns true if successful, false otherwise. 113 | func (b *bucket) remove(id peer.ID) bool { 114 | for e := b.list.Front(); e != nil; e = e.Next() { 115 | if e.Value.(*PeerInfo).Id == id { 116 | b.list.Remove(e) 117 | return true 118 | } 119 | } 120 | return false 121 | } 122 | 123 | func (b *bucket) pushFront(p *PeerInfo) { 124 | b.list.PushFront(p) 125 | } 126 | 127 | func (b *bucket) len() int { 128 | return b.list.Len() 129 | } 130 | 131 | // splits a buckets peers into two buckets, the methods receiver will have 132 | // peers with CPL equal to cpl, the returned bucket will have peers with CPL 133 | // greater than cpl (returned bucket has closer peers) 134 | func (b *bucket) split(cpl int, target ID) *bucket { 135 | out := list.New() 136 | newbuck := newBucket() 137 | newbuck.list = out 138 | e := b.list.Front() 139 | for e != nil { 140 | pDhtId := e.Value.(*PeerInfo).dhtId 141 | peerCPL := CommonPrefixLen(pDhtId, target) 142 | if peerCPL > cpl { 143 | cur := e 144 | out.PushBack(e.Value) 145 | e = e.Next() 146 | b.list.Remove(cur) 147 | continue 148 | } 149 | e = e.Next() 150 | } 151 | return newbuck 152 | } 153 | 154 | // maxCommonPrefix returns the maximum common prefix length between any peer in 155 | // the bucket with the target ID. 156 | func (b *bucket) maxCommonPrefix(target ID) uint { 157 | maxCpl := uint(0) 158 | for e := b.list.Front(); e != nil; e = e.Next() { 159 | cpl := uint(CommonPrefixLen(e.Value.(*PeerInfo).dhtId, target)) 160 | if cpl > maxCpl { 161 | maxCpl = cpl 162 | } 163 | } 164 | return maxCpl 165 | } 166 | -------------------------------------------------------------------------------- /bucket_test.go: -------------------------------------------------------------------------------- 1 | package kbucket 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/libp2p/go-libp2p/core/test" 8 | 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestBucketMinimum(t *testing.T) { 13 | t.Parallel() 14 | 15 | b := newBucket() 16 | require.Nil(t, b.min(func(p1 *PeerInfo, p2 *PeerInfo) bool { return true })) 17 | 18 | pid1 := test.RandPeerIDFatal(t) 19 | pid2 := test.RandPeerIDFatal(t) 20 | pid3 := test.RandPeerIDFatal(t) 21 | 22 | // first is min 23 | b.pushFront(&PeerInfo{Id: pid1, LastUsefulAt: time.Now()}) 24 | require.Equal(t, pid1, b.min(func(first *PeerInfo, second *PeerInfo) bool { 25 | return first.LastUsefulAt.Before(second.LastUsefulAt) 26 | }).Id) 27 | 28 | // first is still min 29 | b.pushFront(&PeerInfo{Id: pid2, LastUsefulAt: time.Now().AddDate(1, 0, 0)}) 30 | require.Equal(t, pid1, b.min(func(first *PeerInfo, second *PeerInfo) bool { 31 | return first.LastUsefulAt.Before(second.LastUsefulAt) 32 | }).Id) 33 | 34 | // second is the min 35 | b.pushFront(&PeerInfo{Id: pid3, LastUsefulAt: time.Now().AddDate(-1, 0, 0)}) 36 | require.Equal(t, pid3, b.min(func(first *PeerInfo, second *PeerInfo) bool { 37 | return first.LastUsefulAt.Before(second.LastUsefulAt) 38 | }).Id) 39 | } 40 | 41 | func TestUpdateAllWith(t *testing.T) { 42 | t.Parallel() 43 | 44 | b := newBucket() 45 | // dont crash 46 | b.updateAllWith(func(p *PeerInfo) {}) 47 | 48 | pid1 := test.RandPeerIDFatal(t) 49 | pid2 := test.RandPeerIDFatal(t) 50 | pid3 := test.RandPeerIDFatal(t) 51 | 52 | // peer1 53 | b.pushFront(&PeerInfo{Id: pid1, replaceable: false}) 54 | b.updateAllWith(func(p *PeerInfo) { 55 | p.replaceable = true 56 | }) 57 | require.True(t, b.getPeer(pid1).replaceable) 58 | 59 | // peer2 60 | b.pushFront(&PeerInfo{Id: pid2, replaceable: false}) 61 | b.updateAllWith(func(p *PeerInfo) { 62 | if p.Id == pid1 { 63 | p.replaceable = false 64 | } else { 65 | p.replaceable = true 66 | } 67 | }) 68 | require.True(t, b.getPeer(pid2).replaceable) 69 | require.False(t, b.getPeer(pid1).replaceable) 70 | 71 | // peer3 72 | b.pushFront(&PeerInfo{Id: pid3, replaceable: false}) 73 | require.False(t, b.getPeer(pid3).replaceable) 74 | b.updateAllWith(func(p *PeerInfo) { 75 | p.replaceable = true 76 | }) 77 | require.True(t, b.getPeer(pid1).replaceable) 78 | require.True(t, b.getPeer(pid2).replaceable) 79 | require.True(t, b.getPeer(pid3).replaceable) 80 | } 81 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | range: "50...100" 3 | comment: off 4 | -------------------------------------------------------------------------------- /generate/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/binary" 6 | "fmt" 7 | "os" 8 | "strings" 9 | 10 | mh "github.com/multiformats/go-multihash" 11 | ) 12 | 13 | const bits = 16 14 | const target = 1 << bits 15 | const idLen = 32 + 2 16 | 17 | func main() { 18 | pkg := os.Getenv("GOPACKAGE") 19 | file := os.Getenv("GOFILE") 20 | targetFile := strings.TrimSuffix(file, ".go") + "_prefixmap.go" 21 | 22 | ids := new([target]uint32) 23 | found := new([target]bool) 24 | count := int32(0) 25 | 26 | out := make([]byte, 32) 27 | inp := [idLen]byte{mh.SHA2_256, 32} 28 | hasher := sha256.New() 29 | 30 | for i := uint32(0); count < target; i++ { 31 | binary.BigEndian.PutUint32(inp[2:], i) 32 | 33 | hasher.Write(inp[:]) 34 | out = hasher.Sum(out[:0]) 35 | hasher.Reset() 36 | 37 | prefix := binary.BigEndian.Uint32(out) >> (32 - bits) 38 | if !found[prefix] { 39 | found[prefix] = true 40 | ids[prefix] = i 41 | count++ 42 | } 43 | } 44 | 45 | f, err := os.Create(targetFile) 46 | if err != nil { 47 | panic(err) 48 | } 49 | 50 | printf := func(s string, args ...interface{}) { 51 | _, err = fmt.Fprintf(f, s, args...) 52 | if err != nil { 53 | panic(err) 54 | } 55 | } 56 | 57 | printf("package %s\n\n", pkg) 58 | printf("// Code generated by generate/generate_map.go DO NOT EDIT\n") 59 | printf("var keyPrefixMap = [...]uint32{") 60 | for i, j := range ids[:] { 61 | if i%16 == 0 { 62 | printf("\n\t") 63 | } else { 64 | printf(" ") 65 | } 66 | printf("%d,", j) 67 | } 68 | printf("\n}") 69 | if err = f.Close(); err != nil { 70 | panic(err) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/libp2p/go-libp2p-kbucket 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/ipfs/boxo v0.29.1 7 | github.com/ipfs/go-log/v2 v2.5.1 8 | github.com/libp2p/go-cidranger v1.1.0 9 | github.com/libp2p/go-libp2p v0.41.1 10 | github.com/libp2p/go-libp2p-asn-util v0.4.1 11 | github.com/minio/sha256-simd v1.0.1 12 | github.com/multiformats/go-multiaddr v0.15.0 13 | github.com/multiformats/go-multihash v0.2.3 14 | github.com/stretchr/testify v1.10.0 15 | ) 16 | 17 | require ( 18 | github.com/davecgh/go-spew v1.1.1 // indirect 19 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 20 | github.com/ipfs/go-cid v0.5.0 // indirect 21 | github.com/klauspost/cpuid/v2 v2.2.10 // indirect 22 | github.com/kr/pretty v0.3.1 // indirect 23 | github.com/libp2p/go-buffer-pool v0.1.0 // indirect 24 | github.com/mattn/go-isatty v0.0.20 // indirect 25 | github.com/mr-tron/base58 v1.2.0 // indirect 26 | github.com/multiformats/go-base32 v0.1.0 // indirect 27 | github.com/multiformats/go-base36 v0.2.0 // indirect 28 | github.com/multiformats/go-multibase v0.2.0 // indirect 29 | github.com/multiformats/go-multicodec v0.9.0 // indirect 30 | github.com/multiformats/go-multistream v0.6.0 // indirect 31 | github.com/multiformats/go-varint v0.0.7 // indirect 32 | github.com/pmezard/go-difflib v1.0.0 // indirect 33 | github.com/rogpeppe/go-internal v1.10.0 // indirect 34 | github.com/spaolacci/murmur3 v1.1.0 // indirect 35 | go.uber.org/multierr v1.11.0 // indirect 36 | go.uber.org/zap v1.27.0 // indirect 37 | golang.org/x/crypto v0.36.0 // indirect 38 | golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect 39 | golang.org/x/sys v0.31.0 // indirect 40 | google.golang.org/protobuf v1.36.6 // indirect 41 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 42 | gopkg.in/yaml.v3 v3.0.1 // indirect 43 | lukechampine.com/blake3 v1.4.0 // indirect 44 | ) 45 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 2 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 3 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 5 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= 7 | github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= 8 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= 9 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= 10 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 11 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 12 | github.com/ipfs/boxo v0.29.1 h1:z61ZT4YDfTHLjXTsu/+3wvJ8aJlExthDSOCpx6Nh8xc= 13 | github.com/ipfs/boxo v0.29.1/go.mod h1:MkDJStXiJS9U99cbAijHdcmwNfVn5DKYBmQCOgjY2NU= 14 | github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= 15 | github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= 16 | github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= 17 | github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= 18 | github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= 19 | github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= 20 | github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= 21 | github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= 22 | github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= 23 | github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= 24 | github.com/ipfs/go-test v0.2.1 h1:/D/a8xZ2JzkYqcVcV/7HYlCnc7bv/pKHQiX5TdClkPE= 25 | github.com/ipfs/go-test v0.2.1/go.mod h1:dzu+KB9cmWjuJnXFDYJwC25T3j1GcN57byN+ixmK39M= 26 | github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= 27 | github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= 28 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 29 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 30 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 31 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 32 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 33 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 34 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 35 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 36 | github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= 37 | github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= 38 | github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= 39 | github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= 40 | github.com/libp2p/go-libp2p v0.41.1 h1:8ecNQVT5ev/jqALTvisSJeVNvXYJyK4NhQx1nNRXQZE= 41 | github.com/libp2p/go-libp2p v0.41.1/go.mod h1:DcGTovJzQl/I7HMrby5ZRjeD0kQkGiy+9w6aEkSZpRI= 42 | github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= 43 | github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= 44 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 45 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 46 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 47 | github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= 48 | github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= 49 | github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= 50 | github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= 51 | github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= 52 | github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= 53 | github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= 54 | github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= 55 | github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= 56 | github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= 57 | github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= 58 | github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= 59 | github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= 60 | github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= 61 | github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= 62 | github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= 63 | github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= 64 | github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= 65 | github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= 66 | github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= 67 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 68 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 69 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 70 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 71 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 72 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 73 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 74 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 75 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 76 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 77 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 78 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 79 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 80 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 81 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 82 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 83 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 84 | go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= 85 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 86 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 87 | go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= 88 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 89 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 90 | go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= 91 | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 92 | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 93 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 94 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 95 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 96 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 97 | golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= 98 | golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= 99 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 100 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 101 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 102 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 103 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 104 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 105 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 106 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 107 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 108 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 109 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 110 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 111 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 112 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 113 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 114 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 115 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 116 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 117 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 118 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 119 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 120 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 121 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 122 | golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 123 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 124 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 125 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 126 | google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= 127 | google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= 128 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 129 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 130 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 131 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 132 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 133 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 134 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 135 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 136 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 137 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 138 | lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= 139 | lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= 140 | -------------------------------------------------------------------------------- /keyspace/keyspace.go: -------------------------------------------------------------------------------- 1 | package keyspace 2 | 3 | import ( 4 | "sort" 5 | 6 | "math/big" 7 | ) 8 | 9 | // Key represents an identifier in a KeySpace. It holds a reference to the 10 | // associated KeySpace, as well references to both the Original identifier, 11 | // as well as the new, KeySpace Bytes one. 12 | type Key struct { 13 | 14 | // Space is the KeySpace this Key is related to. 15 | Space KeySpace 16 | 17 | // Original is the original value of the identifier 18 | Original []byte 19 | 20 | // Bytes is the new value of the identifier, in the KeySpace. 21 | Bytes []byte 22 | } 23 | 24 | // Equal returns whether this key is equal to another. 25 | func (k1 Key) Equal(k2 Key) bool { 26 | if k1.Space != k2.Space { 27 | panic("k1 and k2 not in same key space.") 28 | } 29 | return k1.Space.Equal(k1, k2) 30 | } 31 | 32 | // Less returns whether this key comes before another. 33 | func (k1 Key) Less(k2 Key) bool { 34 | if k1.Space != k2.Space { 35 | panic("k1 and k2 not in same key space.") 36 | } 37 | return k1.Space.Less(k1, k2) 38 | } 39 | 40 | // Distance returns this key's distance to another 41 | func (k1 Key) Distance(k2 Key) *big.Int { 42 | if k1.Space != k2.Space { 43 | panic("k1 and k2 not in same key space.") 44 | } 45 | return k1.Space.Distance(k1, k2) 46 | } 47 | 48 | // KeySpace is an object used to do math on identifiers. Each keyspace has its 49 | // own properties and rules. See XorKeySpace. 50 | type KeySpace interface { 51 | 52 | // Key converts an identifier into a Key in this space. 53 | Key([]byte) Key 54 | 55 | // Equal returns whether keys are equal in this key space 56 | Equal(Key, Key) bool 57 | 58 | // Distance returns the distance metric in this key space 59 | Distance(Key, Key) *big.Int 60 | 61 | // Less returns whether the first key is smaller than the second. 62 | Less(Key, Key) bool 63 | } 64 | 65 | // byDistanceToCenter is a type used to sort Keys by proximity to a center. 66 | type byDistanceToCenter struct { 67 | Center Key 68 | Keys []Key 69 | } 70 | 71 | func (s byDistanceToCenter) Len() int { 72 | return len(s.Keys) 73 | } 74 | 75 | func (s byDistanceToCenter) Swap(i, j int) { 76 | s.Keys[i], s.Keys[j] = s.Keys[j], s.Keys[i] 77 | } 78 | 79 | func (s byDistanceToCenter) Less(i, j int) bool { 80 | a := s.Center.Distance(s.Keys[i]) 81 | b := s.Center.Distance(s.Keys[j]) 82 | return a.Cmp(b) == -1 83 | } 84 | 85 | // SortByDistance takes a KeySpace, a center Key, and a list of Keys toSort. 86 | // It returns a new list, where the Keys toSort have been sorted by their 87 | // distance to the center Key. 88 | func SortByDistance(sp KeySpace, center Key, toSort []Key) []Key { 89 | toSortCopy := make([]Key, len(toSort)) 90 | copy(toSortCopy, toSort) 91 | bdtc := &byDistanceToCenter{ 92 | Center: center, 93 | Keys: toSortCopy, // copy 94 | } 95 | sort.Sort(bdtc) 96 | return bdtc.Keys 97 | } 98 | -------------------------------------------------------------------------------- /keyspace/xor.go: -------------------------------------------------------------------------------- 1 | package keyspace 2 | 3 | import ( 4 | "bytes" 5 | "math/big" 6 | "math/bits" 7 | 8 | u "github.com/ipfs/boxo/util" 9 | sha256 "github.com/minio/sha256-simd" 10 | ) 11 | 12 | // XORKeySpace is a KeySpace which: 13 | // - normalizes identifiers using a cryptographic hash (sha256) 14 | // - measures distance by XORing keys together 15 | var XORKeySpace = &xorKeySpace{} 16 | var _ KeySpace = XORKeySpace // ensure it conforms 17 | 18 | type xorKeySpace struct{} 19 | 20 | // Key converts an identifier into a Key in this space. 21 | func (s *xorKeySpace) Key(id []byte) Key { 22 | hash := sha256.Sum256(id) 23 | key := hash[:] 24 | return Key{ 25 | Space: s, 26 | Original: id, 27 | Bytes: key, 28 | } 29 | } 30 | 31 | // Equal returns whether keys are equal in this key space 32 | func (s *xorKeySpace) Equal(k1, k2 Key) bool { 33 | return bytes.Equal(k1.Bytes, k2.Bytes) 34 | } 35 | 36 | // Distance returns the distance metric in this key space 37 | func (s *xorKeySpace) Distance(k1, k2 Key) *big.Int { 38 | // XOR the keys 39 | k3 := u.XOR(k1.Bytes, k2.Bytes) 40 | 41 | // interpret it as an integer 42 | dist := big.NewInt(0).SetBytes(k3) 43 | return dist 44 | } 45 | 46 | // Less returns whether the first key is smaller than the second. 47 | func (s *xorKeySpace) Less(k1, k2 Key) bool { 48 | return bytes.Compare(k1.Bytes, k2.Bytes) < 0 49 | } 50 | 51 | // ZeroPrefixLen returns the number of consecutive zeroes in a byte slice. 52 | func ZeroPrefixLen(id []byte) int { 53 | for i, b := range id { 54 | if b != 0 { 55 | return i*8 + bits.LeadingZeros8(uint8(b)) 56 | } 57 | } 58 | return len(id) * 8 59 | } 60 | -------------------------------------------------------------------------------- /keyspace/xor_test.go: -------------------------------------------------------------------------------- 1 | package keyspace 2 | 3 | import ( 4 | "bytes" 5 | "math/big" 6 | "testing" 7 | 8 | u "github.com/ipfs/boxo/util" 9 | ) 10 | 11 | func TestPrefixLen(t *testing.T) { 12 | cases := [][]byte{ 13 | {0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00}, 14 | {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 15 | {0x00, 0x58, 0xFF, 0x80, 0x00, 0x00, 0xF0}, 16 | } 17 | lens := []int{24, 56, 9} 18 | 19 | for i, c := range cases { 20 | r := ZeroPrefixLen(c) 21 | if r != lens[i] { 22 | t.Errorf("ZeroPrefixLen failed: %v != %v", r, lens[i]) 23 | } 24 | } 25 | 26 | } 27 | 28 | func TestXorKeySpace(t *testing.T) { 29 | 30 | ids := [][]byte{ 31 | {0xFF, 0xFF, 0xFF, 0xFF}, 32 | {0x00, 0x00, 0x00, 0x00}, 33 | {0xFF, 0xFF, 0xFF, 0xF0}, 34 | } 35 | 36 | ks := [][2]Key{ 37 | {XORKeySpace.Key(ids[0]), XORKeySpace.Key(ids[0])}, 38 | {XORKeySpace.Key(ids[1]), XORKeySpace.Key(ids[1])}, 39 | {XORKeySpace.Key(ids[2]), XORKeySpace.Key(ids[2])}, 40 | } 41 | 42 | for i, set := range ks { 43 | if !set[0].Equal(set[1]) { 44 | t.Errorf("Key not eq. %v != %v", set[0], set[1]) 45 | } 46 | 47 | if !bytes.Equal(set[0].Bytes, set[1].Bytes) { 48 | t.Errorf("Key gen failed. %v != %v", set[0].Bytes, set[1].Bytes) 49 | } 50 | 51 | if !bytes.Equal(set[0].Original, ids[i]) { 52 | t.Errorf("ptrs to original. %v != %v", set[0].Original, ids[i]) 53 | } 54 | 55 | if len(set[0].Bytes) != 32 { 56 | t.Errorf("key length incorrect. 32 != %d", len(set[0].Bytes)) 57 | } 58 | } 59 | 60 | for i := 1; i < len(ks); i++ { 61 | if ks[i][0].Less(ks[i-1][0]) == ks[i-1][0].Less(ks[i][0]) { 62 | t.Errorf("less should be different.") 63 | } 64 | 65 | if ks[i][0].Distance(ks[i-1][0]).Cmp(ks[i-1][0].Distance(ks[i][0])) != 0 { 66 | t.Errorf("distance should be the same.") 67 | } 68 | 69 | if ks[i][0].Equal(ks[i-1][0]) { 70 | t.Errorf("Keys should not be eq. %v != %v", ks[i][0], ks[i-1][0]) 71 | } 72 | } 73 | } 74 | 75 | func TestDistancesAndCenterSorting(t *testing.T) { 76 | 77 | adjs := [][]byte{ 78 | {173, 149, 19, 27, 192, 183, 153, 192, 177, 175, 71, 127, 177, 79, 207, 38, 166, 169, 247, 96, 121, 228, 139, 240, 144, 172, 183, 232, 54, 123, 253, 14}, 79 | {223, 63, 97, 152, 4, 169, 47, 219, 64, 87, 25, 45, 196, 61, 215, 72, 234, 119, 138, 220, 82, 188, 73, 140, 232, 5, 36, 192, 20, 184, 17, 25}, 80 | {73, 176, 221, 176, 149, 143, 22, 42, 129, 124, 213, 114, 232, 95, 189, 154, 18, 3, 122, 132, 32, 199, 53, 185, 58, 157, 117, 78, 52, 146, 157, 127}, 81 | {73, 176, 221, 176, 149, 143, 22, 42, 129, 124, 213, 114, 232, 95, 189, 154, 18, 3, 122, 132, 32, 199, 53, 185, 58, 157, 117, 78, 52, 146, 157, 127}, 82 | {73, 176, 221, 176, 149, 143, 22, 42, 129, 124, 213, 114, 232, 95, 189, 154, 18, 3, 122, 132, 32, 199, 53, 185, 58, 157, 117, 78, 52, 146, 157, 126}, 83 | {73, 0, 221, 176, 149, 143, 22, 42, 129, 124, 213, 114, 232, 95, 189, 154, 18, 3, 122, 132, 32, 199, 53, 185, 58, 157, 117, 78, 52, 146, 157, 127}, 84 | } 85 | 86 | keys := make([]Key, len(adjs)) 87 | for i, a := range adjs { 88 | keys[i] = Key{Space: XORKeySpace, Bytes: a} 89 | } 90 | 91 | cmp := func(a int64, b *big.Int) int { 92 | return big.NewInt(a).Cmp(b) 93 | } 94 | 95 | if cmp(0, keys[2].Distance(keys[3])) != 0 { 96 | t.Errorf("distance calculation wrong: %v", keys[2].Distance(keys[3])) 97 | } 98 | 99 | if cmp(1, keys[2].Distance(keys[4])) != 0 { 100 | t.Errorf("distance calculation wrong: %v", keys[2].Distance(keys[4])) 101 | } 102 | 103 | d1 := keys[2].Distance(keys[5]) 104 | d2 := u.XOR(keys[2].Bytes, keys[5].Bytes) 105 | d2 = d2[len(keys[2].Bytes)-len(d1.Bytes()):] // skip empty space for big 106 | if !bytes.Equal(d1.Bytes(), d2) { 107 | t.Errorf("bytes should be the same. %v == %v", d1.Bytes(), d2) 108 | } 109 | 110 | if cmp(2<<32, keys[2].Distance(keys[5])) != -1 { 111 | t.Errorf("2<<32 should be smaller") 112 | } 113 | 114 | keys2 := SortByDistance(XORKeySpace, keys[2], keys) 115 | order := []int{2, 3, 4, 5, 1, 0} 116 | for i, o := range order { 117 | if !bytes.Equal(keys[o].Bytes, keys2[i].Bytes) { 118 | t.Errorf("order is wrong. %d?? %v == %v", o, keys[o], keys2[i]) 119 | } 120 | } 121 | 122 | } 123 | -------------------------------------------------------------------------------- /peerdiversity/filter.go: -------------------------------------------------------------------------------- 1 | package peerdiversity 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net" 7 | "sort" 8 | "strconv" 9 | "sync" 10 | 11 | "github.com/libp2p/go-libp2p/core/peer" 12 | 13 | "github.com/libp2p/go-cidranger" 14 | asnutil "github.com/libp2p/go-libp2p-asn-util" 15 | 16 | logging "github.com/ipfs/go-log/v2" 17 | ma "github.com/multiformats/go-multiaddr" 18 | manet "github.com/multiformats/go-multiaddr/net" 19 | ) 20 | 21 | var dfLog = logging.Logger("diversityFilter") 22 | 23 | // PeerIPGroupKey is a unique key that represents ONE of the IP Groups the peer belongs to. 24 | // A peer has one PeerIPGroupKey per address. Thus, a peer can belong to MULTIPLE Groups if it has 25 | // multiple addresses. 26 | // For now, given a peer address, our grouping mechanism is as follows: 27 | // 1. For IPv6 addresses, we group by the ASN of the IP address. 28 | // 2. For IPv4 addresses, all addresses that belong to same legacy (Class A)/8 allocations 29 | // OR share the same /16 prefix are in the same group. 30 | type PeerIPGroupKey string 31 | 32 | // legacy IPv4 Class A networks. 33 | var legacyCidrs cidranger.Ranger 34 | 35 | // https://en.wikipedia.org/wiki/List_of_assigned_/8_IPv4_address_blocks 36 | var legacyClassA = []string{"12.0.0.0/8", "17.0.0.0/8", "19.0.0.0/8", "38.0.0.0/8", "48.0.0.0/8", "56.0.0.0/8", "73.0.0.0/8", "53.0.0.0/8"} 37 | 38 | func init() { 39 | // Initialize the trie for legacy Class A networks 40 | legacyCidrs = cidranger.NewPCTrieRanger() 41 | for _, cidr := range legacyClassA { 42 | _, nn, err := net.ParseCIDR(cidr) 43 | if err != nil { 44 | panic(fmt.Errorf("failed to parse CIDR %s: %w", cidr, err)) 45 | } 46 | if err := legacyCidrs.Insert(cidranger.NewBasicRangerEntry(*nn)); err != nil { 47 | panic(fmt.Errorf("failed to insert CIDR %s: %w", cidr, err)) 48 | } 49 | } 50 | } 51 | 52 | // PeerGroupInfo represents the grouping info for a Peer. 53 | type PeerGroupInfo struct { 54 | Id peer.ID 55 | Cpl int 56 | IPGroupKey PeerIPGroupKey 57 | } 58 | 59 | // PeerIPGroupFilter is the interface that must be implemented by callers who want to 60 | // instantiate a `peerdiversity.Filter`. This interface provides the function hooks 61 | // that are used/called by the `peerdiversity.Filter`. 62 | type PeerIPGroupFilter interface { 63 | // Allow is called by the Filter to test if a peer with the given 64 | // grouping info should be allowed/rejected by the Filter. This will be called ONLY 65 | // AFTER the peer has successfully passed all of the Filter's internal checks. 66 | // Note: If the peer is whitelisted on the Filter, the peer will be allowed by the Filter without calling this function. 67 | Allow(PeerGroupInfo) (allow bool) 68 | 69 | // Increment is called by the Filter when a peer with the given Grouping Info. 70 | // is added to the Filter state. This will happen after the peer has passed 71 | // all of the Filter's internal checks and the Allow function defined above for all of it's Groups. 72 | Increment(PeerGroupInfo) 73 | 74 | // Decrement is called by the Filter when a peer with the given 75 | // Grouping Info is removed from the Filter. This will happen when the caller/user of the Filter 76 | // no longer wants the peer and the IP groups it belongs to to count towards the Filter state. 77 | Decrement(PeerGroupInfo) 78 | 79 | // PeerAddresses is called by the Filter to determine the addresses of the given peer 80 | // it should use to determine the IP groups it belongs to. 81 | PeerAddresses(peer.ID) []ma.Multiaddr 82 | } 83 | 84 | // Filter is a peer diversity filter that accepts or rejects peers based on the whitelisting rules configured 85 | // AND the diversity policies defined by the implementation of the PeerIPGroupFilter interface 86 | // passed to it. 87 | type Filter struct { 88 | mu sync.Mutex 89 | // An implementation of the `PeerIPGroupFilter` interface defined above. 90 | pgm PeerIPGroupFilter 91 | peerGroups map[peer.ID][]PeerGroupInfo 92 | 93 | // whitelisted peers 94 | wlpeers map[peer.ID]struct{} 95 | 96 | logKey string 97 | 98 | cplFnc func(peer.ID) int 99 | 100 | cplPeerGroups map[int]map[peer.ID][]PeerIPGroupKey 101 | } 102 | 103 | // NewFilter creates a Filter for Peer Diversity. 104 | func NewFilter(pgm PeerIPGroupFilter, logKey string, cplFnc func(peer.ID) int) (*Filter, error) { 105 | if pgm == nil { 106 | return nil, errors.New("peergroup implementation can not be nil") 107 | } 108 | 109 | return &Filter{ 110 | pgm: pgm, 111 | peerGroups: make(map[peer.ID][]PeerGroupInfo), 112 | wlpeers: make(map[peer.ID]struct{}), 113 | logKey: logKey, 114 | cplFnc: cplFnc, 115 | cplPeerGroups: make(map[int]map[peer.ID][]PeerIPGroupKey), 116 | }, nil 117 | } 118 | 119 | func (f *Filter) Remove(p peer.ID) { 120 | f.mu.Lock() 121 | defer f.mu.Unlock() 122 | 123 | cpl := f.cplFnc(p) 124 | 125 | for _, info := range f.peerGroups[p] { 126 | f.pgm.Decrement(info) 127 | } 128 | f.peerGroups[p] = nil 129 | delete(f.peerGroups, p) 130 | delete(f.cplPeerGroups[cpl], p) 131 | 132 | if len(f.cplPeerGroups[cpl]) == 0 { 133 | delete(f.cplPeerGroups, cpl) 134 | } 135 | } 136 | 137 | // TryAdd attempts to add the peer to the Filter state and returns true if it's successful, false otherwise. 138 | func (f *Filter) TryAdd(p peer.ID) bool { 139 | f.mu.Lock() 140 | defer f.mu.Unlock() 141 | 142 | if _, ok := f.wlpeers[p]; ok { 143 | return true 144 | } 145 | 146 | cpl := f.cplFnc(p) 147 | 148 | // don't allow peers for which we can't determine addresses. 149 | addrs := f.pgm.PeerAddresses(p) 150 | if len(addrs) == 0 { 151 | dfLog.Debugw("no addresses found for peer", "appKey", f.logKey, "peer", p) 152 | return false 153 | } 154 | 155 | peerGroups := make([]PeerGroupInfo, 0, len(addrs)) 156 | for _, a := range addrs { 157 | ip, err := manet.ToIP(a) 158 | if err != nil { 159 | dfLog.Errorw("failed to parse IP from multiaddr", "appKey", f.logKey, 160 | "multiaddr", a.String(), "err", err) 161 | return false 162 | } 163 | 164 | // reject the peer if we can't determine a grouping for one of it's address. 165 | key := IPGroupKey(ip) 166 | if len(key) == 0 { 167 | dfLog.Errorw("group key is empty", "appKey", f.logKey, "ip", ip.String(), "peer", p) 168 | return false 169 | } 170 | group := PeerGroupInfo{Id: p, Cpl: cpl, IPGroupKey: key} 171 | 172 | if !f.pgm.Allow(group) { 173 | return false 174 | } 175 | 176 | peerGroups = append(peerGroups, group) 177 | } 178 | 179 | if _, ok := f.cplPeerGroups[cpl]; !ok { 180 | f.cplPeerGroups[cpl] = make(map[peer.ID][]PeerIPGroupKey) 181 | } 182 | 183 | for _, g := range peerGroups { 184 | f.pgm.Increment(g) 185 | 186 | f.peerGroups[p] = append(f.peerGroups[p], g) 187 | f.cplPeerGroups[cpl][p] = append(f.cplPeerGroups[cpl][p], g.IPGroupKey) 188 | } 189 | 190 | return true 191 | } 192 | 193 | // WhitelistPeers will always allow the given peers. 194 | func (f *Filter) WhitelistPeers(peers ...peer.ID) { 195 | f.mu.Lock() 196 | defer f.mu.Unlock() 197 | 198 | for _, p := range peers { 199 | f.wlpeers[p] = struct{}{} 200 | } 201 | } 202 | 203 | // returns the PeerIPGroupKey to which the given IP belongs. 204 | func IPGroupKey(ip net.IP) PeerIPGroupKey { 205 | switch bz := ip.To4(); bz { 206 | case nil: 207 | // ipv6 Address -> get ASN 208 | s := asnutil.AsnForIPv6(ip) 209 | 210 | // if no ASN found then fallback on using the /32 prefix 211 | if s == 0 { 212 | dfLog.Debugw("ASN not known", "ip", ip) 213 | return PeerIPGroupKey(fmt.Sprintf("unknown ASN: %s", net.CIDRMask(32, 128).String())) 214 | } 215 | 216 | return PeerIPGroupKey(strconv.FormatUint(uint64(s), 10)) 217 | default: 218 | // If it belongs to a legacy Class 8, we return the /8 prefix as the key 219 | rs, _ := legacyCidrs.ContainingNetworks(ip) 220 | if len(rs) != 0 { 221 | key := ip.Mask(net.IPv4Mask(255, 0, 0, 0)).String() 222 | return PeerIPGroupKey(key) 223 | } 224 | 225 | // otherwise -> /16 prefix 226 | key := ip.Mask(net.IPv4Mask(255, 255, 0, 0)).String() 227 | return PeerIPGroupKey(key) 228 | } 229 | } 230 | 231 | // CplDiversityStats contains the peer diversity stats for a Cpl. 232 | type CplDiversityStats struct { 233 | Cpl int 234 | Peers map[peer.ID][]PeerIPGroupKey 235 | } 236 | 237 | // GetDiversityStats returns the diversity stats for each CPL and is sorted by the CPL. 238 | func (f *Filter) GetDiversityStats() []CplDiversityStats { 239 | f.mu.Lock() 240 | defer f.mu.Unlock() 241 | 242 | stats := make([]CplDiversityStats, 0, len(f.cplPeerGroups)) 243 | 244 | var sortedCpls []int 245 | for cpl := range f.cplPeerGroups { 246 | sortedCpls = append(sortedCpls, cpl) 247 | } 248 | sort.Ints(sortedCpls) 249 | 250 | for _, cpl := range sortedCpls { 251 | ps := make(map[peer.ID][]PeerIPGroupKey, len(f.cplPeerGroups[cpl])) 252 | cd := CplDiversityStats{cpl, ps} 253 | 254 | for p, groups := range f.cplPeerGroups[cpl] { 255 | ps[p] = groups 256 | } 257 | stats = append(stats, cd) 258 | } 259 | 260 | return stats 261 | } 262 | -------------------------------------------------------------------------------- /peerdiversity/filter_test.go: -------------------------------------------------------------------------------- 1 | package peerdiversity 2 | 3 | import ( 4 | "net" 5 | "strconv" 6 | "sync" 7 | "testing" 8 | 9 | asnutil "github.com/libp2p/go-libp2p-asn-util" 10 | "github.com/libp2p/go-libp2p/core/peer" 11 | 12 | ma "github.com/multiformats/go-multiaddr" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | var _ PeerIPGroupFilter = (*mockPeerGroupFilter)(nil) 17 | 18 | type mockPeerGroupFilter struct { 19 | mu sync.Mutex 20 | increments map[peer.ID]struct{} 21 | decrements map[peer.ID]struct{} 22 | 23 | peerAddressFunc func(p peer.ID) []ma.Multiaddr 24 | allowFnc func(g PeerGroupInfo) bool 25 | } 26 | 27 | func (m *mockPeerGroupFilter) Allow(g PeerGroupInfo) (allow bool) { 28 | return m.allowFnc(g) 29 | } 30 | 31 | func (m *mockPeerGroupFilter) PeerAddresses(p peer.ID) []ma.Multiaddr { 32 | return m.peerAddressFunc(p) 33 | } 34 | 35 | func (m *mockPeerGroupFilter) Increment(g PeerGroupInfo) { 36 | m.mu.Lock() 37 | defer m.mu.Unlock() 38 | 39 | m.increments[g.Id] = struct{}{} 40 | } 41 | 42 | func (m *mockPeerGroupFilter) Decrement(g PeerGroupInfo) { 43 | m.mu.Lock() 44 | defer m.mu.Unlock() 45 | 46 | m.decrements[g.Id] = struct{}{} 47 | } 48 | 49 | func newMockPeerGroupFilter() *mockPeerGroupFilter { 50 | m := &mockPeerGroupFilter{ 51 | increments: map[peer.ID]struct{}{}, 52 | decrements: map[peer.ID]struct{}{}, 53 | 54 | peerAddressFunc: func(p peer.ID) []ma.Multiaddr { 55 | return nil 56 | }, 57 | allowFnc: func(g PeerGroupInfo) bool { 58 | return false 59 | }, 60 | } 61 | 62 | return m 63 | } 64 | 65 | func TestDiversityFilter(t *testing.T) { 66 | tcs := map[string]struct { 67 | peersForTest func() []peer.ID 68 | mFnc func(m *mockPeerGroupFilter) 69 | fFnc func(f *Filter) 70 | allowed map[peer.ID]bool 71 | isWhitelisted bool 72 | }{ 73 | "simple allow": { 74 | peersForTest: func() []peer.ID { 75 | return []peer.ID{"p1", "p2"} 76 | }, 77 | mFnc: func(m *mockPeerGroupFilter) { 78 | m.peerAddressFunc = func(id peer.ID) []ma.Multiaddr { 79 | return []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0")} 80 | } 81 | m.allowFnc = func(g PeerGroupInfo) bool { return g.Id == "p1" } 82 | }, 83 | allowed: map[peer.ID]bool{ 84 | "p1": true, 85 | "p2": false, 86 | }, 87 | fFnc: func(f *Filter) {}, 88 | }, 89 | 90 | "one address is allowed, one isn't": { 91 | peersForTest: func() []peer.ID { 92 | return []peer.ID{"p1", "p2"} 93 | }, 94 | mFnc: func(m *mockPeerGroupFilter) { 95 | m.peerAddressFunc = func(id peer.ID) []ma.Multiaddr { 96 | if id == "p1" { 97 | return []ma.Multiaddr{ 98 | ma.StringCast("/ip4/127.0.0.1/tcp/0"), 99 | ma.StringCast("/ip4/127.0.0.1/tcp/0"), 100 | } 101 | } 102 | return []ma.Multiaddr{ 103 | ma.StringCast("/ip4/127.0.0.1/tcp/0"), 104 | ma.StringCast("/ip4/192.168.1.1/tcp/0"), 105 | } 106 | } 107 | m.allowFnc = func(g PeerGroupInfo) bool { return g.IPGroupKey == "127.0.0.0" } 108 | }, 109 | allowed: map[peer.ID]bool{ 110 | "p1": true, 111 | "p2": false, 112 | }, 113 | fFnc: func(f *Filter) {}, 114 | }, 115 | 116 | "whitelisted peers": { 117 | peersForTest: func() []peer.ID { 118 | return []peer.ID{"p1", "p2"} 119 | }, 120 | mFnc: func(m *mockPeerGroupFilter) { 121 | m.peerAddressFunc = func(id peer.ID) []ma.Multiaddr { 122 | if id == "p1" { 123 | return []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0")} 124 | } else { 125 | return []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0")} 126 | } 127 | } 128 | 129 | m.allowFnc = func(g PeerGroupInfo) bool { 130 | return false 131 | } 132 | }, 133 | allowed: map[peer.ID]bool{ 134 | "p1": false, 135 | "p2": true, 136 | }, 137 | fFnc: func(f *Filter) { 138 | f.WhitelistPeers(peer.ID("p2")) 139 | }, 140 | isWhitelisted: true, 141 | }, 142 | "whitelist peers works even if peer has no addresses": { 143 | peersForTest: func() []peer.ID { 144 | return []peer.ID{"p1", "p2"} 145 | }, 146 | mFnc: func(m *mockPeerGroupFilter) { 147 | m.peerAddressFunc = func(id peer.ID) []ma.Multiaddr { 148 | if id == "p1" { 149 | return []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0")} 150 | } else { 151 | return nil 152 | } 153 | } 154 | 155 | m.allowFnc = func(g PeerGroupInfo) bool { 156 | return false 157 | } 158 | }, 159 | allowed: map[peer.ID]bool{ 160 | "p1": false, 161 | "p2": true, 162 | }, 163 | fFnc: func(f *Filter) { 164 | f.WhitelistPeers(peer.ID("p2")) 165 | }, 166 | isWhitelisted: true, 167 | }, 168 | 169 | "peer has no addresses": { 170 | peersForTest: func() []peer.ID { 171 | return []peer.ID{"p1"} 172 | }, 173 | mFnc: func(m *mockPeerGroupFilter) { 174 | m.peerAddressFunc = func(id peer.ID) []ma.Multiaddr { 175 | return nil 176 | } 177 | m.allowFnc = func(g PeerGroupInfo) bool { 178 | return true 179 | } 180 | }, 181 | allowed: map[peer.ID]bool{ 182 | "p1": false, 183 | }, 184 | fFnc: func(f *Filter) {}, 185 | }, 186 | } 187 | 188 | for name, tc := range tcs { 189 | t.Run(name, func(t *testing.T) { 190 | m := newMockPeerGroupFilter() 191 | tc.mFnc(m) 192 | f, err := NewFilter(m, "test", func(p peer.ID) int { return 1 }) 193 | require.NoError(t, err, name) 194 | tc.fFnc(f) 195 | 196 | for _, p := range tc.peersForTest() { 197 | b := f.TryAdd(p) 198 | v, ok := tc.allowed[p] 199 | require.True(t, ok, string(p)) 200 | require.Equal(t, v, b, string(p)) 201 | 202 | if v && !tc.isWhitelisted { 203 | m.mu.Lock() 204 | _, ok := m.increments[p] 205 | require.True(t, ok) 206 | m.mu.Unlock() 207 | 208 | f.Remove(p) 209 | 210 | m.mu.Lock() 211 | _, ok = m.decrements[p] 212 | require.True(t, ok) 213 | m.mu.Unlock() 214 | } else if v && tc.isWhitelisted { 215 | m.mu.Lock() 216 | _, ok := m.increments[p] 217 | require.False(t, ok) 218 | m.mu.Unlock() 219 | 220 | f.Remove(p) 221 | 222 | m.mu.Lock() 223 | _, ok = m.decrements[p] 224 | require.False(t, ok) 225 | m.mu.Unlock() 226 | } 227 | } 228 | }) 229 | } 230 | } 231 | 232 | func TestIPGroupKey(t *testing.T) { 233 | // case 1 legacy /8 234 | ip := net.ParseIP("17.111.0.1") 235 | require.NotNil(t, ip.To4()) 236 | g := IPGroupKey(ip) 237 | require.Equal(t, "17.0.0.0", string(g)) 238 | 239 | // case2 ip4 /16 240 | ip = net.ParseIP("192.168.1.1") 241 | require.NotNil(t, ip.To4()) 242 | g = IPGroupKey(ip) 243 | require.Equal(t, "192.168.0.0", string(g)) 244 | 245 | // case3 ipv6 246 | ip = net.ParseIP("2a03:2880:f003:c07:face:b00c::2") 247 | g = IPGroupKey(ip) 248 | require.Equal(t, strconv.FormatUint(uint64(asnutil.AsnForIPv6(ip)), 10), string(g)) 249 | } 250 | 251 | func TestGetDiversityStats(t *testing.T) { 252 | p1 := peer.ID("a") 253 | p2 := peer.ID("b") 254 | 255 | p3 := peer.ID("aa") 256 | p4 := peer.ID("bb") 257 | 258 | paddrs := map[peer.ID][]ma.Multiaddr{ 259 | p1: {ma.StringCast("/ip4/17.0.0.1/tcp/0"), ma.StringCast("/ip4/19.1.1.0")}, 260 | p2: {ma.StringCast("/ip4/18.1.0.1/tcp/0")}, 261 | p3: {ma.StringCast("/ip4/19.2.0.1/tcp/0")}, 262 | p4: {ma.StringCast("/ip4/20.3.0.1/tcp/0")}, 263 | } 264 | 265 | m := newMockPeerGroupFilter() 266 | m.peerAddressFunc = func(p peer.ID) []ma.Multiaddr { 267 | return paddrs[p] 268 | } 269 | m.allowFnc = func(g PeerGroupInfo) bool { 270 | return true 271 | } 272 | 273 | f, err := NewFilter(m, "test", func(p peer.ID) int { 274 | return len(string(p)) 275 | }) 276 | require.NoError(t, err) 277 | 278 | require.True(t, f.TryAdd(p1)) 279 | require.True(t, f.TryAdd(p2)) 280 | require.True(t, f.TryAdd(p3)) 281 | require.True(t, f.TryAdd(p4)) 282 | 283 | stats := f.GetDiversityStats() 284 | require.Len(t, stats, 2) 285 | require.Equal(t, stats[0].Cpl, 1) 286 | require.Len(t, stats[0].Peers[p1], 2) 287 | require.Len(t, stats[0].Peers[p2], 1) 288 | 289 | require.Equal(t, stats[1].Cpl, 2) 290 | require.Len(t, stats[1].Peers[p3], 1) 291 | require.Len(t, stats[1].Peers[p4], 1) 292 | } 293 | -------------------------------------------------------------------------------- /sorting.go: -------------------------------------------------------------------------------- 1 | package kbucket 2 | 3 | import ( 4 | "container/list" 5 | "sort" 6 | 7 | "github.com/libp2p/go-libp2p/core/peer" 8 | ) 9 | 10 | // A helper struct to sort peers by their distance to the local node 11 | type peerDistance struct { 12 | p peer.ID 13 | distance ID 14 | } 15 | 16 | // peerDistanceSorter implements sort.Interface to sort peers by xor distance 17 | type peerDistanceSorter struct { 18 | peers []peerDistance 19 | target ID 20 | } 21 | 22 | func (pds *peerDistanceSorter) Len() int { return len(pds.peers) } 23 | func (pds *peerDistanceSorter) Swap(a, b int) { 24 | pds.peers[a], pds.peers[b] = pds.peers[b], pds.peers[a] 25 | } 26 | func (pds *peerDistanceSorter) Less(a, b int) bool { 27 | return pds.peers[a].distance.less(pds.peers[b].distance) 28 | } 29 | 30 | // Append the peer.ID to the sorter's slice. It may no longer be sorted. 31 | func (pds *peerDistanceSorter) appendPeer(p peer.ID, pDhtId ID) { 32 | pds.peers = append(pds.peers, peerDistance{ 33 | p: p, 34 | distance: xor(pds.target, pDhtId), 35 | }) 36 | } 37 | 38 | // Append the peer.ID values in the list to the sorter's slice. It may no longer be sorted. 39 | func (pds *peerDistanceSorter) appendPeersFromList(l *list.List) { 40 | for e := l.Front(); e != nil; e = e.Next() { 41 | pds.appendPeer(e.Value.(*PeerInfo).Id, e.Value.(*PeerInfo).dhtId) 42 | } 43 | } 44 | 45 | func (pds *peerDistanceSorter) sort() { 46 | sort.Sort(pds) 47 | } 48 | 49 | // SortClosestPeers Sort the given peers by their ascending distance from the target. A new slice is returned. 50 | func SortClosestPeers(peers []peer.ID, target ID) []peer.ID { 51 | sorter := peerDistanceSorter{ 52 | peers: make([]peerDistance, 0, len(peers)), 53 | target: target, 54 | } 55 | for _, p := range peers { 56 | sorter.appendPeer(p, ConvertPeerID(p)) 57 | } 58 | sorter.sort() 59 | out := make([]peer.ID, 0, sorter.Len()) 60 | for _, p := range sorter.peers { 61 | out = append(out, p.p) 62 | } 63 | return out 64 | } 65 | -------------------------------------------------------------------------------- /table.go: -------------------------------------------------------------------------------- 1 | // Package kbucket implements a kademlia 'k-bucket' routing table. 2 | package kbucket 3 | 4 | import ( 5 | "context" 6 | "errors" 7 | "fmt" 8 | "sync" 9 | "time" 10 | 11 | "github.com/libp2p/go-libp2p/core/peer" 12 | "github.com/libp2p/go-libp2p/core/peerstore" 13 | 14 | "github.com/libp2p/go-libp2p-kbucket/peerdiversity" 15 | 16 | logging "github.com/ipfs/go-log/v2" 17 | ) 18 | 19 | var log = logging.Logger("table") 20 | 21 | var ( 22 | ErrPeerRejectedHighLatency = errors.New("peer rejected; latency too high") 23 | ErrPeerRejectedNoCapacity = errors.New("peer rejected; insufficient capacity") 24 | ) 25 | 26 | // RoutingTable defines the routing table. 27 | type RoutingTable struct { 28 | // the routing table context 29 | ctx context.Context 30 | // function to cancel the RT context 31 | ctxCancel context.CancelFunc 32 | 33 | // ID of the local peer 34 | local ID 35 | 36 | // Blanket lock, refine later for better performance 37 | tabLock sync.RWMutex 38 | 39 | // latency metrics 40 | metrics peerstore.Metrics 41 | 42 | // Maximum acceptable latency for peers in this cluster 43 | maxLatency time.Duration 44 | 45 | // kBuckets define all the fingers to other nodes. 46 | buckets []*bucket 47 | bucketsize int 48 | 49 | cplRefreshLk sync.RWMutex 50 | cplRefreshedAt map[uint]time.Time 51 | 52 | // notification functions 53 | PeerRemoved func(peer.ID) 54 | PeerAdded func(peer.ID) 55 | 56 | // usefulnessGracePeriod is the maximum grace period we will give to a 57 | // peer in the bucket to be useful to us, failing which, we will evict 58 | // it to make place for a new peer if the bucket is full 59 | usefulnessGracePeriod time.Duration 60 | 61 | df *peerdiversity.Filter 62 | } 63 | 64 | // NewRoutingTable creates a new routing table with a given bucketsize, local ID, and latency tolerance. 65 | func NewRoutingTable(bucketsize int, localID ID, latency time.Duration, m peerstore.Metrics, usefulnessGracePeriod time.Duration, 66 | df *peerdiversity.Filter, 67 | ) (*RoutingTable, error) { 68 | rt := &RoutingTable{ 69 | buckets: []*bucket{newBucket()}, 70 | bucketsize: bucketsize, 71 | local: localID, 72 | 73 | maxLatency: latency, 74 | metrics: m, 75 | 76 | cplRefreshedAt: make(map[uint]time.Time), 77 | 78 | PeerRemoved: func(p peer.ID) { 79 | log.Debugw("peer removed", "peer", p) 80 | }, 81 | PeerAdded: func(p peer.ID) { 82 | log.Debugw("peer added", "peer", p) 83 | }, 84 | 85 | usefulnessGracePeriod: usefulnessGracePeriod, 86 | 87 | df: df, 88 | } 89 | 90 | rt.ctx, rt.ctxCancel = context.WithCancel(context.Background()) 91 | 92 | return rt, nil 93 | } 94 | 95 | // Close shuts down the Routing Table & all associated processes. 96 | // It is safe to call this multiple times. 97 | func (rt *RoutingTable) Close() error { 98 | rt.ctxCancel() 99 | return nil 100 | } 101 | 102 | // NPeersForCpl returns the number of peers we have for a given Cpl 103 | func (rt *RoutingTable) NPeersForCpl(cpl uint) int { 104 | rt.tabLock.RLock() 105 | defer rt.tabLock.RUnlock() 106 | 107 | // it's in the last bucket 108 | if int(cpl) >= len(rt.buckets)-1 { 109 | count := 0 110 | b := rt.buckets[len(rt.buckets)-1] 111 | for _, p := range b.peers() { 112 | if CommonPrefixLen(rt.local, p.dhtId) == int(cpl) { 113 | count++ 114 | } 115 | } 116 | return count 117 | } else { 118 | return rt.buckets[cpl].len() 119 | } 120 | } 121 | 122 | // UsefulNewPeer verifies whether the given peer.ID would be a good fit for the 123 | // routing table. It returns true if the peer isn't in the routing table yet, if 124 | // the bucket corresponding to peer.ID isn't full, if it contains replaceable 125 | // peers or if it is the last bucket and adding a peer would unfold it. 126 | func (rt *RoutingTable) UsefulNewPeer(p peer.ID) bool { 127 | rt.tabLock.RLock() 128 | defer rt.tabLock.RUnlock() 129 | 130 | // bucket corresponding to p 131 | bucketID := rt.bucketIdForPeer(p) 132 | bucket := rt.buckets[bucketID] 133 | 134 | if bucket.getPeer(p) != nil { 135 | // peer already exists in the routing table, so it isn't useful 136 | return false 137 | } 138 | 139 | // bucket isn't full 140 | if bucket.len() < rt.bucketsize { 141 | return true 142 | } 143 | 144 | // bucket is full, check if it contains replaceable peers 145 | for e := bucket.list.Front(); e != nil; e = e.Next() { 146 | peer := e.Value.(*PeerInfo) 147 | if peer.replaceable { 148 | // at least 1 peer is replaceable 149 | return true 150 | } 151 | } 152 | 153 | // the last bucket potentially contains peer ids with different CPL, 154 | // and can be split in 2 buckets if needed 155 | if bucketID == len(rt.buckets)-1 { 156 | peers := bucket.peers() 157 | cpl := CommonPrefixLen(rt.local, ConvertPeerID(p)) 158 | for _, peer := range peers { 159 | // if at least 2 peers have a different CPL, the new peer is 160 | // useful and will trigger a bucket split 161 | if CommonPrefixLen(rt.local, peer.dhtId) != cpl { 162 | return true 163 | } 164 | } 165 | } 166 | 167 | // the appropriate bucket is full of non replaceable peers 168 | return false 169 | } 170 | 171 | // TryAddPeer tries to add a peer to the Routing table. 172 | // If the peer ALREADY exists in the Routing Table and has been queried before, this call is a no-op. 173 | // If the peer ALREADY exists in the Routing Table but hasn't been queried before, we set it's LastUsefulAt value to 174 | // the current time. This needs to done because we don't mark peers as "Useful"(by setting the LastUsefulAt value) 175 | // when we first connect to them. 176 | // 177 | // If the peer is a queryPeer i.e. we queried it or it queried us, we set the LastSuccessfulOutboundQuery to the current time. 178 | // If the peer is just a peer that we connect to/it connected to us without any DHT query, we consider it as having 179 | // no LastSuccessfulOutboundQuery. 180 | // 181 | // 182 | // If the logical bucket to which the peer belongs is full and it's not the last bucket, we try to replace an existing peer 183 | // whose LastSuccessfulOutboundQuery is above the maximum allowed threshold in that bucket with the new peer. 184 | // If no such peer exists in that bucket, we do NOT add the peer to the Routing Table and return error "ErrPeerRejectedNoCapacity". 185 | 186 | // TryAddPeer returns a boolean value set to true if the peer was newly added to the Routing Table, false otherwise. 187 | // It also returns any error that occurred while adding the peer to the Routing Table. If the error is not nil, 188 | // the boolean value will ALWAYS be false i.e. the peer wont be added to the Routing Table it it's not already there. 189 | // 190 | // A return value of false with error=nil indicates that the peer ALREADY exists in the Routing Table. 191 | func (rt *RoutingTable) TryAddPeer(p peer.ID, queryPeer bool, isReplaceable bool) (bool, error) { 192 | rt.tabLock.Lock() 193 | defer rt.tabLock.Unlock() 194 | 195 | return rt.addPeer(p, queryPeer, isReplaceable) 196 | } 197 | 198 | // locking is the responsibility of the caller 199 | func (rt *RoutingTable) addPeer(p peer.ID, queryPeer bool, isReplaceable bool) (bool, error) { 200 | bucketID := rt.bucketIdForPeer(p) 201 | bucket := rt.buckets[bucketID] 202 | 203 | now := time.Now() 204 | var lastUsefulAt time.Time 205 | if queryPeer { 206 | lastUsefulAt = now 207 | } 208 | 209 | // peer already exists in the Routing Table. 210 | if peerInfo := bucket.getPeer(p); peerInfo != nil { 211 | // if we're querying the peer first time after adding it, let's give it a 212 | // usefulness bump. This will ONLY happen once. 213 | if peerInfo.LastUsefulAt.IsZero() && queryPeer { 214 | peerInfo.LastUsefulAt = lastUsefulAt 215 | } 216 | return false, nil 217 | } 218 | 219 | // peer's latency threshold is NOT acceptable 220 | if rt.metrics.LatencyEWMA(p) > rt.maxLatency { 221 | // Connection doesnt meet requirements, skip! 222 | return false, ErrPeerRejectedHighLatency 223 | } 224 | 225 | // add it to the diversity filter for now. 226 | // if we aren't able to find a place for the peer in the table, 227 | // we will simply remove it from the Filter later. 228 | if rt.df != nil { 229 | if !rt.df.TryAdd(p) { 230 | return false, errors.New("peer rejected by the diversity filter") 231 | } 232 | } 233 | 234 | // We have enough space in the bucket (whether spawned or grouped). 235 | if bucket.len() < rt.bucketsize { 236 | bucket.pushFront(&PeerInfo{ 237 | Id: p, 238 | LastUsefulAt: lastUsefulAt, 239 | LastSuccessfulOutboundQueryAt: now, 240 | AddedAt: now, 241 | dhtId: ConvertPeerID(p), 242 | replaceable: isReplaceable, 243 | }) 244 | rt.PeerAdded(p) 245 | return true, nil 246 | } 247 | 248 | if bucketID == len(rt.buckets)-1 { 249 | // if the bucket is too large and this is the last bucket (i.e. wildcard), unfold it. 250 | rt.nextBucket() 251 | // the structure of the table has changed, so let's recheck if the peer now has a dedicated bucket. 252 | bucketID = rt.bucketIdForPeer(p) 253 | bucket = rt.buckets[bucketID] 254 | 255 | // push the peer only if the bucket isn't overflowing after slitting 256 | if bucket.len() < rt.bucketsize { 257 | bucket.pushFront(&PeerInfo{ 258 | Id: p, 259 | LastUsefulAt: lastUsefulAt, 260 | LastSuccessfulOutboundQueryAt: now, 261 | AddedAt: now, 262 | dhtId: ConvertPeerID(p), 263 | replaceable: isReplaceable, 264 | }) 265 | rt.PeerAdded(p) 266 | return true, nil 267 | } 268 | } 269 | 270 | // the bucket to which the peer belongs is full. Let's try to find a peer 271 | // in that bucket which is replaceable. 272 | // we don't really need a stable sort here as it dosen't matter which peer we evict 273 | // as long as it's a replaceable peer. 274 | replaceablePeer := bucket.min(func(p1 *PeerInfo, p2 *PeerInfo) bool { 275 | return p1.replaceable 276 | }) 277 | 278 | if replaceablePeer != nil && replaceablePeer.replaceable { 279 | // we found a replaceable peer, let's replace it with the new peer. 280 | 281 | // add new peer to the bucket. needs to happen before we remove the replaceable peer 282 | // as if the bucket size is 1, we will end up removing the only peer, and deleting 283 | // the bucket. 284 | bucket.pushFront(&PeerInfo{ 285 | Id: p, 286 | LastUsefulAt: lastUsefulAt, 287 | LastSuccessfulOutboundQueryAt: now, 288 | AddedAt: now, 289 | dhtId: ConvertPeerID(p), 290 | replaceable: isReplaceable, 291 | }) 292 | rt.PeerAdded(p) 293 | 294 | // remove the replaceable peer 295 | rt.removePeer(replaceablePeer.Id) 296 | return true, nil 297 | } 298 | 299 | // we weren't able to find place for the peer, remove it from the filter state. 300 | if rt.df != nil { 301 | rt.df.Remove(p) 302 | } 303 | return false, ErrPeerRejectedNoCapacity 304 | } 305 | 306 | // MarkAllPeersIrreplaceable marks all peers in the routing table as irreplaceable 307 | // This means that we will never replace an existing peer in the table to make space for a new peer. 308 | // However, they can still be removed by calling the `RemovePeer` API. 309 | func (rt *RoutingTable) MarkAllPeersIrreplaceable() { 310 | rt.tabLock.Lock() 311 | defer rt.tabLock.Unlock() 312 | 313 | for i := range rt.buckets { 314 | b := rt.buckets[i] 315 | b.updateAllWith(func(p *PeerInfo) { 316 | p.replaceable = false 317 | }) 318 | } 319 | } 320 | 321 | // GetPeerInfos returns the peer information that we've stored in the buckets 322 | func (rt *RoutingTable) GetPeerInfos() []PeerInfo { 323 | rt.tabLock.RLock() 324 | defer rt.tabLock.RUnlock() 325 | 326 | var pis []PeerInfo 327 | for _, b := range rt.buckets { 328 | pis = append(pis, b.peers()...) 329 | } 330 | return pis 331 | } 332 | 333 | // UpdateLastSuccessfulOutboundQueryAt updates the LastSuccessfulOutboundQueryAt time of the peer. 334 | // Returns true if the update was successful, false otherwise. 335 | func (rt *RoutingTable) UpdateLastSuccessfulOutboundQueryAt(p peer.ID, t time.Time) bool { 336 | rt.tabLock.Lock() 337 | defer rt.tabLock.Unlock() 338 | 339 | bucketID := rt.bucketIdForPeer(p) 340 | bucket := rt.buckets[bucketID] 341 | 342 | if pc := bucket.getPeer(p); pc != nil { 343 | pc.LastSuccessfulOutboundQueryAt = t 344 | return true 345 | } 346 | return false 347 | } 348 | 349 | // UpdateLastUsefulAt updates the LastUsefulAt time of the peer. 350 | // Returns true if the update was successful, false otherwise. 351 | func (rt *RoutingTable) UpdateLastUsefulAt(p peer.ID, t time.Time) bool { 352 | rt.tabLock.Lock() 353 | defer rt.tabLock.Unlock() 354 | 355 | bucketID := rt.bucketIdForPeer(p) 356 | bucket := rt.buckets[bucketID] 357 | 358 | if pc := bucket.getPeer(p); pc != nil { 359 | pc.LastUsefulAt = t 360 | return true 361 | } 362 | return false 363 | } 364 | 365 | // RemovePeer should be called when the caller is sure that a peer is not useful for queries. 366 | // For eg: the peer could have stopped supporting the DHT protocol. 367 | // It evicts the peer from the Routing Table. 368 | func (rt *RoutingTable) RemovePeer(p peer.ID) { 369 | rt.tabLock.Lock() 370 | defer rt.tabLock.Unlock() 371 | rt.removePeer(p) 372 | } 373 | 374 | // locking is the responsibility of the caller 375 | func (rt *RoutingTable) removePeer(p peer.ID) bool { 376 | bucketID := rt.bucketIdForPeer(p) 377 | bucket := rt.buckets[bucketID] 378 | if bucket.remove(p) { 379 | if rt.df != nil { 380 | rt.df.Remove(p) 381 | } 382 | for { 383 | lastBucketIndex := len(rt.buckets) - 1 384 | 385 | // remove the last bucket if it's empty and it isn't the only bucket we have 386 | if len(rt.buckets) > 1 && rt.buckets[lastBucketIndex].len() == 0 { 387 | rt.buckets[lastBucketIndex] = nil 388 | rt.buckets = rt.buckets[:lastBucketIndex] 389 | } else if len(rt.buckets) >= 2 && rt.buckets[lastBucketIndex-1].len() == 0 { 390 | // if the second last bucket just became empty, remove and replace it with the last bucket. 391 | rt.buckets[lastBucketIndex-1] = rt.buckets[lastBucketIndex] 392 | rt.buckets[lastBucketIndex] = nil 393 | rt.buckets = rt.buckets[:lastBucketIndex] 394 | } else { 395 | break 396 | } 397 | } 398 | 399 | // peer removed callback 400 | rt.PeerRemoved(p) 401 | return true 402 | } 403 | return false 404 | } 405 | 406 | func (rt *RoutingTable) nextBucket() { 407 | // This is the last bucket, which allegedly is a mixed bag containing peers not belonging in dedicated (unfolded) buckets. 408 | // _allegedly_ is used here to denote that *all* peers in the last bucket might feasibly belong to another bucket. 409 | // This could happen if e.g. we've unfolded 4 buckets, and all peers in folded bucket 5 really belong in bucket 8. 410 | bucket := rt.buckets[len(rt.buckets)-1] 411 | newBucket := bucket.split(len(rt.buckets)-1, rt.local) 412 | rt.buckets = append(rt.buckets, newBucket) 413 | 414 | // The newly formed bucket still contains too many peers. We probably just unfolded a empty bucket. 415 | if newBucket.len() >= rt.bucketsize { 416 | // Keep unfolding the table until the last bucket is not overflowing. 417 | rt.nextBucket() 418 | } 419 | } 420 | 421 | // Find a specific peer by ID or return nil 422 | func (rt *RoutingTable) Find(id peer.ID) peer.ID { 423 | srch := rt.NearestPeers(ConvertPeerID(id), 1) 424 | if len(srch) == 0 || srch[0] != id { 425 | return "" 426 | } 427 | return srch[0] 428 | } 429 | 430 | // NearestPeer returns a single peer that is nearest to the given ID 431 | func (rt *RoutingTable) NearestPeer(id ID) peer.ID { 432 | peers := rt.NearestPeers(id, 1) 433 | if len(peers) > 0 { 434 | return peers[0] 435 | } 436 | 437 | log.Debugf("NearestPeer: Returning nil, table size = %d", rt.Size()) 438 | return "" 439 | } 440 | 441 | // NearestPeers returns a list of the 'count' closest peers to the given ID 442 | func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID { 443 | // This is the number of bits _we_ share with the key. All peers in this 444 | // bucket share cpl bits with us and will therefore share at least cpl+1 445 | // bits with the given key. +1 because both the target and all peers in 446 | // this bucket differ from us in the cpl bit. 447 | cpl := CommonPrefixLen(id, rt.local) 448 | 449 | // It's assumed that this also protects the buckets. 450 | rt.tabLock.RLock() 451 | 452 | // Get bucket index or last bucket 453 | if cpl >= len(rt.buckets) { 454 | cpl = len(rt.buckets) - 1 455 | } 456 | 457 | pds := peerDistanceSorter{ 458 | peers: make([]peerDistance, 0, count+rt.bucketsize), 459 | target: id, 460 | } 461 | 462 | // Add peers from the target bucket (cpl+1 shared bits). 463 | pds.appendPeersFromList(rt.buckets[cpl].list) 464 | 465 | // If we're short, add peers from all buckets to the right. All buckets 466 | // to the right share exactly cpl bits (as opposed to the cpl+1 bits 467 | // shared by the peers in the cpl bucket). 468 | // 469 | // This is, unfortunately, less efficient than we'd like. We will switch 470 | // to a trie implementation eventually which will allow us to find the 471 | // closest N peers to any target key. 472 | 473 | if pds.Len() < count { 474 | for i := cpl + 1; i < len(rt.buckets); i++ { 475 | pds.appendPeersFromList(rt.buckets[i].list) 476 | } 477 | } 478 | 479 | // If we're still short, add in buckets that share _fewer_ bits. We can 480 | // do this bucket by bucket because each bucket will share 1 fewer bit 481 | // than the last. 482 | // 483 | // * bucket cpl-1: cpl-1 shared bits. 484 | // * bucket cpl-2: cpl-2 shared bits. 485 | // ... 486 | for i := cpl - 1; i >= 0 && pds.Len() < count; i-- { 487 | pds.appendPeersFromList(rt.buckets[i].list) 488 | } 489 | rt.tabLock.RUnlock() 490 | 491 | // Sort by distance to local peer 492 | pds.sort() 493 | 494 | if count < pds.Len() { 495 | pds.peers = pds.peers[:count] 496 | } 497 | 498 | out := make([]peer.ID, 0, pds.Len()) 499 | for _, p := range pds.peers { 500 | out = append(out, p.p) 501 | } 502 | 503 | return out 504 | } 505 | 506 | // Size returns the total number of peers in the routing table 507 | func (rt *RoutingTable) Size() int { 508 | var tot int 509 | rt.tabLock.RLock() 510 | for _, buck := range rt.buckets { 511 | tot += buck.len() 512 | } 513 | rt.tabLock.RUnlock() 514 | return tot 515 | } 516 | 517 | // ListPeers takes a RoutingTable and returns a list of all peers from all buckets in the table. 518 | func (rt *RoutingTable) ListPeers() []peer.ID { 519 | rt.tabLock.RLock() 520 | defer rt.tabLock.RUnlock() 521 | 522 | var peers []peer.ID 523 | for _, buck := range rt.buckets { 524 | peers = append(peers, buck.peerIds()...) 525 | } 526 | return peers 527 | } 528 | 529 | // Print prints a descriptive statement about the provided RoutingTable 530 | func (rt *RoutingTable) Print() { 531 | fmt.Printf("Routing Table, bs = %d, Max latency = %d\n", rt.bucketsize, rt.maxLatency) 532 | rt.tabLock.RLock() 533 | 534 | for i, b := range rt.buckets { 535 | fmt.Printf("\tbucket: %d\n", i) 536 | 537 | for e := b.list.Front(); e != nil; e = e.Next() { 538 | p := e.Value.(*PeerInfo).Id 539 | fmt.Printf("\t\t- %s %s\n", p.String(), rt.metrics.LatencyEWMA(p).String()) 540 | } 541 | } 542 | rt.tabLock.RUnlock() 543 | } 544 | 545 | // GetDiversityStats returns the diversity stats for the Routing Table if a diversity Filter 546 | // is configured. 547 | func (rt *RoutingTable) GetDiversityStats() []peerdiversity.CplDiversityStats { 548 | if rt.df != nil { 549 | return rt.df.GetDiversityStats() 550 | } 551 | return nil 552 | } 553 | 554 | // the caller is responsible for the locking 555 | func (rt *RoutingTable) bucketIdForPeer(p peer.ID) int { 556 | peerID := ConvertPeerID(p) 557 | cpl := CommonPrefixLen(peerID, rt.local) 558 | bucketID := cpl 559 | if bucketID >= len(rt.buckets) { 560 | bucketID = len(rt.buckets) - 1 561 | } 562 | return bucketID 563 | } 564 | 565 | // maxCommonPrefix returns the maximum common prefix length between any peer in 566 | // the table and the current peer. 567 | func (rt *RoutingTable) maxCommonPrefix() uint { 568 | rt.tabLock.RLock() 569 | defer rt.tabLock.RUnlock() 570 | 571 | for i := len(rt.buckets) - 1; i >= 0; i-- { 572 | if rt.buckets[i].len() > 0 { 573 | return rt.buckets[i].maxCommonPrefix(rt.local) 574 | } 575 | } 576 | return 0 577 | } 578 | -------------------------------------------------------------------------------- /table_refresh.go: -------------------------------------------------------------------------------- 1 | package kbucket 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/binary" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/libp2p/go-libp2p/core/peer" 10 | 11 | mh "github.com/multiformats/go-multihash" 12 | ) 13 | 14 | // maxCplForRefresh is the maximum cpl we support for refresh. 15 | // This limit exists because we can only generate 'maxCplForRefresh' bit prefixes for now. 16 | const maxCplForRefresh uint = 15 17 | 18 | // GetTrackedCplsForRefresh returns the Cpl's we are tracking for refresh. 19 | // Caller is free to modify the returned slice as it is a defensive copy. 20 | func (rt *RoutingTable) GetTrackedCplsForRefresh() []time.Time { 21 | maxCommonPrefix := rt.maxCommonPrefix() 22 | if maxCommonPrefix > maxCplForRefresh { 23 | maxCommonPrefix = maxCplForRefresh 24 | } 25 | 26 | rt.cplRefreshLk.RLock() 27 | defer rt.cplRefreshLk.RUnlock() 28 | 29 | cpls := make([]time.Time, maxCommonPrefix+1) 30 | for i := uint(0); i <= maxCommonPrefix; i++ { 31 | // defaults to the zero value if we haven't refreshed it yet. 32 | cpls[i] = rt.cplRefreshedAt[i] 33 | } 34 | return cpls 35 | } 36 | 37 | func randUint16() (uint16, error) { 38 | // Read a random prefix. 39 | var prefixBytes [2]byte 40 | _, err := rand.Read(prefixBytes[:]) 41 | return binary.BigEndian.Uint16(prefixBytes[:]), err 42 | } 43 | 44 | // GenRandPeerID generates a random peerID for a given Cpl 45 | func (rt *RoutingTable) GenRandPeerID(targetCpl uint) (peer.ID, error) { 46 | if targetCpl > maxCplForRefresh { 47 | return "", fmt.Errorf("cannot generate peer ID for Cpl greater than %d", maxCplForRefresh) 48 | } 49 | 50 | localPrefix := binary.BigEndian.Uint16(rt.local) 51 | 52 | // For host with ID `L`, an ID `K` belongs to a bucket with ID `B` ONLY IF CommonPrefixLen(L,K) is EXACTLY B. 53 | // Hence, to achieve a targetPrefix `T`, we must toggle the (T+1)th bit in L & then copy (T+1) bits from L 54 | // to our randomly generated prefix. 55 | toggledLocalPrefix := localPrefix ^ (uint16(0x8000) >> targetCpl) 56 | randPrefix, err := randUint16() 57 | if err != nil { 58 | return "", err 59 | } 60 | 61 | // Combine the toggled local prefix and the random bits at the correct offset 62 | // such that ONLY the first `targetCpl` bits match the local ID. 63 | mask := (^uint16(0)) << (16 - (targetCpl + 1)) 64 | targetPrefix := (toggledLocalPrefix & mask) | (randPrefix & ^mask) 65 | 66 | // Convert to a known peer ID. 67 | key := keyPrefixMap[targetPrefix] 68 | id := [32 + 2]byte{mh.SHA2_256, 32} 69 | binary.BigEndian.PutUint32(id[2:], key) 70 | return peer.ID(id[:]), nil 71 | } 72 | 73 | // GenRandomKey generates a random key matching a provided Common Prefix Length (Cpl) 74 | // wrt. the local identity. The returned key matches the targetCpl first bits of the 75 | // local key, the following bit is the inverse of the local key's bit at position 76 | // targetCpl+1 and the remaining bits are randomly generated. 77 | func (rt *RoutingTable) GenRandomKey(targetCpl uint) (ID, error) { 78 | if int(targetCpl+1) >= len(rt.local)*8 { 79 | return nil, fmt.Errorf("cannot generate peer ID for Cpl greater than key length") 80 | } 81 | partialOffset := targetCpl / 8 82 | 83 | // output contains the first partialOffset bytes of the local key 84 | // and the remaining bytes are random 85 | output := make([]byte, len(rt.local)) 86 | copy(output, rt.local[:partialOffset]) 87 | _, err := rand.Read(output[partialOffset:]) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | remainingBits := 8 - targetCpl%8 93 | orig := rt.local[partialOffset] 94 | 95 | origMask := ^uint8(0) << remainingBits 96 | randMask := ^origMask >> 1 97 | flippedBitOffset := remainingBits - 1 98 | flippedBitMask := uint8(1) << flippedBitOffset 99 | 100 | // restore the remainingBits Most Significant Bits of orig 101 | // and flip the flippedBitOffset-th bit of orig 102 | output[partialOffset] = orig&origMask | (orig & flippedBitMask) ^ flippedBitMask | output[partialOffset]&randMask 103 | 104 | return ID(output), nil 105 | } 106 | 107 | // ResetCplRefreshedAtForID resets the refresh time for the Cpl of the given ID. 108 | func (rt *RoutingTable) ResetCplRefreshedAtForID(id ID, newTime time.Time) { 109 | cpl := CommonPrefixLen(id, rt.local) 110 | if uint(cpl) > maxCplForRefresh { 111 | return 112 | } 113 | 114 | rt.cplRefreshLk.Lock() 115 | defer rt.cplRefreshLk.Unlock() 116 | 117 | rt.cplRefreshedAt[uint(cpl)] = newTime 118 | } 119 | -------------------------------------------------------------------------------- /table_refresh_test.go: -------------------------------------------------------------------------------- 1 | package kbucket 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/libp2p/go-libp2p/core/peer" 8 | "github.com/libp2p/go-libp2p/core/test" 9 | 10 | pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestGenRandPeerID(t *testing.T) { 16 | t.Parallel() 17 | 18 | local := test.RandPeerIDFatal(t) 19 | m := pstore.NewMetrics() 20 | rt, err := NewRoutingTable(1, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 21 | require.NoError(t, err) 22 | 23 | // generate above maxCplForRefresh fails 24 | p, err := rt.GenRandPeerID(maxCplForRefresh + 1) 25 | require.Error(t, err) 26 | require.Empty(t, p) 27 | 28 | // test generate rand peer ID 29 | for cpl := uint(0); cpl <= maxCplForRefresh; cpl++ { 30 | peerID, err := rt.GenRandPeerID(cpl) 31 | require.NoError(t, err) 32 | 33 | require.True(t, uint(CommonPrefixLen(ConvertPeerID(peerID), rt.local)) == cpl, "failed for cpl=%d", cpl) 34 | } 35 | } 36 | 37 | func TestGenRandomKey(t *testing.T) { 38 | // test can be run in parallel 39 | t.Parallel() 40 | 41 | // run multiple occurences to make sure the test wasn't just lucky 42 | for i := 0; i < 100; i++ { 43 | // generate routing table with random local peer ID 44 | local := test.RandPeerIDFatal(t) 45 | m := pstore.NewMetrics() 46 | rt, err := NewRoutingTable(1, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 47 | require.NoError(t, err) 48 | 49 | // GenRandomKey fails for cpl >= 256 50 | _, err = rt.GenRandomKey(256) 51 | require.Error(t, err) 52 | _, err = rt.GenRandomKey(300) 53 | require.Error(t, err) 54 | 55 | // bitwise comparison legend: 56 | // O for same bit, X for different bit, ? for don't care 57 | 58 | // we compare the returned generated key with the local key 59 | // for CPL = X, the first X bits should be the same, bit X+1 should be 60 | // different, and the rest should be random / don't care 61 | 62 | // cpl = 0 should return a different first bit 63 | // X??????? ???... 64 | key0, err := rt.GenRandomKey(0) 65 | require.NoError(t, err) 66 | // most significant bit should be different 67 | require.NotEqual(t, key0[0]>>7, rt.local[0]>>7) 68 | 69 | // cpl = 1 should return a different second bit 70 | // OX?????? ???... 71 | key1, err := rt.GenRandomKey(1) 72 | require.NoError(t, err) 73 | // MSB should be equal, as cpl = 1 74 | require.Equal(t, key1[0]>>7, rt.local[0]>>7) 75 | // 2nd MSB should be different 76 | require.NotEqual(t, (key1[0]<<1)>>6, (rt.local[0]<<1)>>6) 77 | 78 | // cpl = 2 should return a different third bit 79 | // OOX????? ???... 80 | key2, err := rt.GenRandomKey(2) 81 | require.NoError(t, err) 82 | // 2 MSB should be equal, as cpl = 2 83 | require.Equal(t, key2[0]>>6, rt.local[0]>>6) 84 | // 3rd MSB should be different 85 | require.NotEqual(t, (key2[0]<<2)>>5, (rt.local[0]<<2)>>5) 86 | 87 | // cpl = 7 should return a different eighth bit 88 | // OOOOOOOX ???... 89 | key7, err := rt.GenRandomKey(7) 90 | require.NoError(t, err) 91 | // 7 MSB should be equal, as cpl = 7 92 | require.Equal(t, key7[0]>>1, rt.local[0]>>1) 93 | // 8th MSB should be different 94 | require.NotEqual(t, key7[0]<<7, rt.local[0]<<7) 95 | 96 | // cpl = 8 should return a different ninth bit 97 | // OOOOOOOO X???... 98 | key8, err := rt.GenRandomKey(8) 99 | require.NoError(t, err) 100 | // 8 MSB should be equal, as cpl = 8 101 | require.Equal(t, key8[0], rt.local[0]) 102 | // 9th MSB should be different 103 | require.NotEqual(t, key8[1]>>7, rt.local[1]>>7) 104 | 105 | // cpl = 53 should return a different 54th bit 106 | // OOOOOOOO OOOOOOOO OOOOOOOO OOOOOOOO OOOOOOOO OOOOOOOO OOOOOX?? ???... 107 | key53, err := rt.GenRandomKey(53) 108 | require.NoError(t, err) 109 | // 53 MSB should be equal, as cpl = 53 110 | require.Equal(t, key53[:6], rt.local[:6]) 111 | require.Equal(t, key53[6]>>3, rt.local[6]>>3) 112 | // 54th MSB should be different 113 | require.NotEqual(t, (key53[6]<<5)>>7, (rt.local[6]<<5)>>7) 114 | } 115 | } 116 | 117 | func TestRefreshAndGetTrackedCpls(t *testing.T) { 118 | t.Parallel() 119 | 120 | const ( 121 | minCpl = 8 122 | testCpl = 10 123 | maxCpl = 12 124 | ) 125 | 126 | local := test.RandPeerIDFatal(t) 127 | m := pstore.NewMetrics() 128 | rt, err := NewRoutingTable(2, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 129 | require.NoError(t, err) 130 | 131 | // fetch cpl's 132 | trackedCpls := rt.GetTrackedCplsForRefresh() 133 | // should have nothing. 134 | require.Len(t, trackedCpls, 1) 135 | 136 | var peerIDs []peer.ID 137 | for i := minCpl; i <= maxCpl; i++ { 138 | id, err := rt.GenRandPeerID(uint(i)) 139 | require.NoError(t, err) 140 | peerIDs = append(peerIDs, id) 141 | } 142 | 143 | // add peer IDs. 144 | for i, id := range peerIDs { 145 | added, err := rt.TryAddPeer(id, true, false) 146 | require.NoError(t, err) 147 | require.True(t, added) 148 | require.Len(t, rt.GetTrackedCplsForRefresh(), minCpl+i+1) 149 | } 150 | 151 | // and remove down to the test CPL 152 | for i := maxCpl; i > testCpl; i-- { 153 | rt.RemovePeer(peerIDs[i-minCpl]) 154 | require.Len(t, rt.GetTrackedCplsForRefresh(), i) 155 | } 156 | 157 | // should be tracking testCpl 158 | trackedCpls = rt.GetTrackedCplsForRefresh() 159 | require.Len(t, trackedCpls, testCpl+1) 160 | // they should all be zero 161 | for _, refresh := range trackedCpls { 162 | require.True(t, refresh.IsZero(), "tracked cpl's should be zero") 163 | } 164 | 165 | // add our peer ID to max out the table 166 | added, err := rt.TryAddPeer(local, true, false) 167 | require.NoError(t, err) 168 | require.True(t, added) 169 | 170 | // should be tracking the max 171 | trackedCpls = rt.GetTrackedCplsForRefresh() 172 | require.Len(t, trackedCpls, int(maxCplForRefresh)+1) 173 | 174 | // and not refreshed 175 | for _, refresh := range trackedCpls { 176 | require.True(t, refresh.IsZero(), "tracked cpl's should be zero") 177 | } 178 | 179 | now := time.Now() 180 | // reset the test peer ID. 181 | rt.ResetCplRefreshedAtForID(ConvertPeerID(peerIDs[testCpl-minCpl]), now) 182 | 183 | // should still be tracking all buckets 184 | trackedCpls = rt.GetTrackedCplsForRefresh() 185 | require.Len(t, trackedCpls, int(maxCplForRefresh)+1) 186 | 187 | for i, refresh := range trackedCpls { 188 | if i == testCpl { 189 | require.True(t, now.Equal(refresh), "test cpl should have the correct refresh time") 190 | } else { 191 | require.True(t, refresh.IsZero(), "other cpl's should be 0") 192 | } 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /table_test.go: -------------------------------------------------------------------------------- 1 | package kbucket 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/libp2p/go-libp2p/core/peer" 9 | "github.com/libp2p/go-libp2p/core/test" 10 | 11 | "github.com/libp2p/go-libp2p-kbucket/peerdiversity" 12 | pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore" 13 | 14 | ma "github.com/multiformats/go-multiaddr" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | var NoOpThreshold = 100 * time.Hour 19 | 20 | func TestPrint(t *testing.T) { 21 | t.Parallel() 22 | local := test.RandPeerIDFatal(t) 23 | m := pstore.NewMetrics() 24 | rt, err := NewRoutingTable(1, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 25 | require.NoError(t, err) 26 | rt.Print() 27 | } 28 | 29 | // Test basic features of the bucket struct 30 | func TestBucket(t *testing.T) { 31 | t.Parallel() 32 | testTime1 := time.Now() 33 | testTime2 := time.Now().AddDate(1, 0, 0) 34 | 35 | b := newBucket() 36 | 37 | peers := make([]peer.ID, 100) 38 | for i := 0; i < 100; i++ { 39 | peers[i] = test.RandPeerIDFatal(t) 40 | b.pushFront(&PeerInfo{ 41 | Id: peers[i], 42 | LastUsefulAt: testTime1, 43 | LastSuccessfulOutboundQueryAt: testTime2, 44 | AddedAt: testTime1, 45 | dhtId: ConvertPeerID(peers[i]), 46 | }) 47 | } 48 | 49 | local := test.RandPeerIDFatal(t) 50 | localID := ConvertPeerID(local) 51 | 52 | infos := b.peers() 53 | require.Len(t, infos, 100) 54 | 55 | i := rand.Intn(len(peers)) 56 | p := b.getPeer(peers[i]) 57 | require.NotNil(t, p) 58 | require.Equal(t, peers[i], p.Id) 59 | require.Equal(t, ConvertPeerID(peers[i]), p.dhtId) 60 | require.EqualValues(t, testTime1, p.LastUsefulAt) 61 | require.EqualValues(t, testTime2, p.LastSuccessfulOutboundQueryAt) 62 | 63 | t2 := time.Now().Add(1 * time.Hour) 64 | t3 := t2.Add(1 * time.Hour) 65 | p.LastSuccessfulOutboundQueryAt = t2 66 | p.LastUsefulAt = t3 67 | p = b.getPeer(peers[i]) 68 | require.NotNil(t, p) 69 | require.EqualValues(t, t2, p.LastSuccessfulOutboundQueryAt) 70 | require.EqualValues(t, t3, p.LastUsefulAt) 71 | 72 | spl := b.split(0, ConvertPeerID(local)) 73 | llist := b.list 74 | for e := llist.Front(); e != nil; e = e.Next() { 75 | p := ConvertPeerID(e.Value.(*PeerInfo).Id) 76 | cpl := CommonPrefixLen(p, localID) 77 | if cpl > 0 { 78 | t.Fatalf("split failed. found id with cpl > 0 in 0 bucket") 79 | } 80 | } 81 | 82 | rlist := spl.list 83 | for e := rlist.Front(); e != nil; e = e.Next() { 84 | p := ConvertPeerID(e.Value.(*PeerInfo).Id) 85 | cpl := CommonPrefixLen(p, localID) 86 | if cpl == 0 { 87 | t.Fatalf("split failed. found id with cpl == 0 in non 0 bucket") 88 | } 89 | } 90 | } 91 | 92 | func TestNPeersForCpl(t *testing.T) { 93 | t.Parallel() 94 | local := test.RandPeerIDFatal(t) 95 | m := pstore.NewMetrics() 96 | rt, err := NewRoutingTable(2, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 97 | require.NoError(t, err) 98 | 99 | require.Equal(t, 0, rt.NPeersForCpl(0)) 100 | require.Equal(t, 0, rt.NPeersForCpl(1)) 101 | 102 | // one peer with cpl 1 103 | p, _ := rt.GenRandPeerID(1) 104 | rt.TryAddPeer(p, true, false) 105 | require.Equal(t, 0, rt.NPeersForCpl(0)) 106 | require.Equal(t, 1, rt.NPeersForCpl(1)) 107 | require.Equal(t, 0, rt.NPeersForCpl(2)) 108 | 109 | // one peer with cpl 0 110 | p, _ = rt.GenRandPeerID(0) 111 | rt.TryAddPeer(p, true, false) 112 | require.Equal(t, 1, rt.NPeersForCpl(0)) 113 | require.Equal(t, 1, rt.NPeersForCpl(1)) 114 | require.Equal(t, 0, rt.NPeersForCpl(2)) 115 | 116 | // split the bucket with a peer with cpl 1 117 | p, _ = rt.GenRandPeerID(1) 118 | rt.TryAddPeer(p, true, false) 119 | require.Equal(t, 1, rt.NPeersForCpl(0)) 120 | require.Equal(t, 2, rt.NPeersForCpl(1)) 121 | require.Equal(t, 0, rt.NPeersForCpl(2)) 122 | 123 | p, _ = rt.GenRandPeerID(0) 124 | rt.TryAddPeer(p, true, false) 125 | require.Equal(t, 2, rt.NPeersForCpl(0)) 126 | } 127 | 128 | func TestUsefulNewPeer(t *testing.T) { 129 | t.Parallel() 130 | local := test.RandPeerIDFatal(t) 131 | m := pstore.NewMetrics() 132 | rt, err := NewRoutingTable(2, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 133 | require.NoError(t, err) 134 | 135 | generatedPeerIds := map[peer.ID]struct{}{} 136 | genNewPeerIdWithCpl := func(targetCpl uint) (peer.ID, error) { 137 | var p peer.ID 138 | var err error 139 | for { 140 | p, err = rt.GenRandPeerID(targetCpl) 141 | if err != nil { 142 | break 143 | } 144 | if _, exists := generatedPeerIds[p]; !exists { 145 | generatedPeerIds[p] = struct{}{} 146 | break 147 | } 148 | } 149 | return p, err 150 | } 151 | 152 | // add first peer to bucket 0 153 | p, _ := genNewPeerIdWithCpl(0) 154 | require.True(t, rt.UsefulNewPeer(p)) 155 | added, err := rt.TryAddPeer(p, true, false) 156 | require.NoError(t, err) 157 | require.True(t, added) 158 | // first peer shouldn't be useful, as it is already in the rt 159 | require.False(t, rt.UsefulNewPeer(p)) 160 | 161 | // add second peer to bucket 0 162 | p, _ = genNewPeerIdWithCpl(0) 163 | require.True(t, rt.UsefulNewPeer(p)) 164 | added, err = rt.TryAddPeer(p, true, false) 165 | require.NoError(t, err) 166 | require.True(t, added) 167 | 168 | // bucket 0 (also last bucket) full with non replaceable peers 169 | p, _ = genNewPeerIdWithCpl(0) 170 | require.False(t, rt.UsefulNewPeer(p)) 171 | 172 | // bucket 0 is full, unfolding it 173 | // add first peer to bucket 1 174 | p, _ = genNewPeerIdWithCpl(1) 175 | require.True(t, rt.UsefulNewPeer(p)) 176 | added, err = rt.TryAddPeer(p, true, false) 177 | require.NoError(t, err) 178 | require.True(t, added) 179 | 180 | // add second peer to bucket 1 181 | // cpl is 2, but bucket 1 is last bucket 182 | p, _ = genNewPeerIdWithCpl(2) 183 | require.True(t, rt.UsefulNewPeer(p)) 184 | added, err = rt.TryAddPeer(p, true, false) 185 | require.NoError(t, err) 186 | require.True(t, added) 187 | 188 | // unfolding bucket 1 189 | // adding second peer to bucket 2 190 | p, _ = genNewPeerIdWithCpl(2) 191 | require.True(t, rt.UsefulNewPeer(p)) 192 | added, err = rt.TryAddPeer(p, true, false) 193 | require.NoError(t, err) 194 | require.True(t, added) 195 | 196 | // adding replaceable peer to bucket 1 197 | // bucket 1 size: 1 -> 2 198 | p, _ = genNewPeerIdWithCpl(1) 199 | require.True(t, rt.UsefulNewPeer(p)) 200 | added, err = rt.TryAddPeer(p, true, true) 201 | require.NoError(t, err) 202 | require.True(t, added) 203 | 204 | // adding replaceable peer to bucket 1 205 | // bucket 1 size: 2 -> 2 206 | p, _ = genNewPeerIdWithCpl(1) 207 | require.True(t, rt.UsefulNewPeer(p)) 208 | added, err = rt.TryAddPeer(p, true, true) 209 | require.NoError(t, err) 210 | require.True(t, added) 211 | 212 | // adding non replaceable peer to bucket 1 213 | // bucket 1 size: 2 -> 2 214 | p, _ = genNewPeerIdWithCpl(1) 215 | require.True(t, rt.UsefulNewPeer(p)) 216 | added, err = rt.TryAddPeer(p, true, false) 217 | require.NoError(t, err) 218 | require.True(t, added) 219 | 220 | // adding non replaceable peer to bucket 1 221 | // bucket 1 size: 2 -> 2 222 | p, _ = genNewPeerIdWithCpl(1) 223 | require.False(t, rt.UsefulNewPeer(p)) 224 | added, err = rt.TryAddPeer(p, true, false) 225 | require.Error(t, err) 226 | require.False(t, added) 227 | } 228 | 229 | func TestEmptyBucketCollapse(t *testing.T) { 230 | t.Parallel() 231 | local := test.RandPeerIDFatal(t) 232 | 233 | m := pstore.NewMetrics() 234 | rt, err := NewRoutingTable(1, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 235 | require.NoError(t, err) 236 | 237 | // generate peers with cpl 0,1,2 & 3 238 | p1, _ := rt.GenRandPeerID(0) 239 | p2, _ := rt.GenRandPeerID(1) 240 | p3, _ := rt.GenRandPeerID(2) 241 | p4, _ := rt.GenRandPeerID(3) 242 | 243 | // remove peer on an empty bucket should not panic. 244 | rt.RemovePeer(p1) 245 | 246 | // add peer with cpl 0 and remove it..bucket should still exist as it's the ONLY bucket we have 247 | b, err := rt.TryAddPeer(p1, true, false) 248 | require.True(t, b) 249 | require.NoError(t, err) 250 | rt.RemovePeer(p1) 251 | rt.tabLock.Lock() 252 | require.Len(t, rt.buckets, 1) 253 | rt.tabLock.Unlock() 254 | require.Empty(t, rt.ListPeers()) 255 | 256 | // add peer with cpl 0 and cpl 1 and verify we have two buckets. 257 | b, err = rt.TryAddPeer(p1, true, false) 258 | require.NoError(t, err) 259 | require.True(t, b) 260 | b, err = rt.TryAddPeer(p2, true, false) 261 | require.NoError(t, err) 262 | require.True(t, b) 263 | rt.tabLock.Lock() 264 | require.Len(t, rt.buckets, 2) 265 | rt.tabLock.Unlock() 266 | 267 | // removing a peer from the last bucket collapses it. 268 | rt.RemovePeer(p2) 269 | rt.tabLock.Lock() 270 | require.Len(t, rt.buckets, 1) 271 | rt.tabLock.Unlock() 272 | require.Len(t, rt.ListPeers(), 1) 273 | require.Contains(t, rt.ListPeers(), p1) 274 | 275 | // add p2 again 276 | b, err = rt.TryAddPeer(p2, true, false) 277 | require.True(t, b) 278 | require.NoError(t, err) 279 | rt.tabLock.Lock() 280 | require.Len(t, rt.buckets, 2) 281 | rt.tabLock.Unlock() 282 | 283 | // now remove a peer from the second-last i.e. first bucket and ensure it collapses 284 | rt.RemovePeer(p1) 285 | rt.tabLock.Lock() 286 | require.Len(t, rt.buckets, 1) 287 | rt.tabLock.Unlock() 288 | require.Len(t, rt.ListPeers(), 1) 289 | require.Contains(t, rt.ListPeers(), p2) 290 | 291 | // let's have a total of 4 buckets now 292 | rt.TryAddPeer(p1, true, false) 293 | rt.TryAddPeer(p2, true, false) 294 | rt.TryAddPeer(p3, true, false) 295 | rt.TryAddPeer(p4, true, false) 296 | 297 | rt.tabLock.Lock() 298 | require.Len(t, rt.buckets, 4) 299 | rt.tabLock.Unlock() 300 | 301 | // removing from 2,3 and then 4 leaves us with ONLY one bucket 302 | rt.RemovePeer(p2) 303 | rt.RemovePeer(p3) 304 | rt.RemovePeer(p4) 305 | rt.tabLock.Lock() 306 | require.Len(t, rt.buckets, 1) 307 | rt.tabLock.Unlock() 308 | 309 | // an empty bucket in the middle DOES NOT collapse buckets 310 | rt.TryAddPeer(p1, true, false) 311 | rt.TryAddPeer(p2, true, false) 312 | rt.TryAddPeer(p3, true, false) 313 | rt.TryAddPeer(p4, true, false) 314 | 315 | rt.tabLock.Lock() 316 | require.Len(t, rt.buckets, 4) 317 | rt.tabLock.Unlock() 318 | 319 | rt.RemovePeer(p2) 320 | rt.tabLock.Lock() 321 | require.Len(t, rt.buckets, 4) 322 | rt.tabLock.Unlock() 323 | require.NotContains(t, rt.ListPeers(), p2) 324 | } 325 | 326 | func TestRemovePeer(t *testing.T) { 327 | t.Parallel() 328 | local := test.RandPeerIDFatal(t) 329 | 330 | m := pstore.NewMetrics() 331 | rt, err := NewRoutingTable(2, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 332 | require.NoError(t, err) 333 | 334 | p1, _ := rt.GenRandPeerID(0) 335 | p2, _ := rt.GenRandPeerID(0) 336 | b, err := rt.TryAddPeer(p1, true, false) 337 | require.True(t, b) 338 | require.NoError(t, err) 339 | b, err = rt.TryAddPeer(p2, true, false) 340 | require.True(t, b) 341 | require.NoError(t, err) 342 | 343 | // ensure p1 & p2 are in the RT 344 | require.Len(t, rt.ListPeers(), 2) 345 | require.Contains(t, rt.ListPeers(), p1) 346 | require.Contains(t, rt.ListPeers(), p2) 347 | 348 | // remove a peer and ensure it's not in the RT 349 | require.NotEmpty(t, rt.Find(p1)) 350 | rt.RemovePeer(p1) 351 | require.Empty(t, rt.Find(p1)) 352 | require.NotEmpty(t, rt.Find(p2)) 353 | } 354 | 355 | func TestTableCallbacks(t *testing.T) { 356 | t.Parallel() 357 | 358 | local := test.RandPeerIDFatal(t) 359 | m := pstore.NewMetrics() 360 | rt, err := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 361 | require.NoError(t, err) 362 | 363 | peers := make([]peer.ID, 100) 364 | for i := 0; i < 100; i++ { 365 | peers[i] = test.RandPeerIDFatal(t) 366 | } 367 | 368 | pset := make(map[peer.ID]struct{}) 369 | rt.PeerAdded = func(p peer.ID) { 370 | pset[p] = struct{}{} 371 | } 372 | rt.PeerRemoved = func(p peer.ID) { 373 | delete(pset, p) 374 | } 375 | 376 | rt.TryAddPeer(peers[0], true, false) 377 | if _, ok := pset[peers[0]]; !ok { 378 | t.Fatal("should have this peer") 379 | } 380 | 381 | rt.RemovePeer(peers[0]) 382 | if _, ok := pset[peers[0]]; ok { 383 | t.Fatal("should not have this peer") 384 | } 385 | 386 | for _, p := range peers { 387 | rt.TryAddPeer(p, true, false) 388 | } 389 | 390 | out := rt.ListPeers() 391 | for _, outp := range out { 392 | if _, ok := pset[outp]; !ok { 393 | t.Fatal("should have peer in the peerset") 394 | } 395 | delete(pset, outp) 396 | } 397 | 398 | if len(pset) > 0 { 399 | t.Fatal("have peers in peerset that were not in the table", len(pset)) 400 | } 401 | } 402 | 403 | // Right now, this just makes sure that it doesnt hang or crash 404 | func TestTryAddPeerLoad(t *testing.T) { 405 | t.Parallel() 406 | 407 | local := test.RandPeerIDFatal(t) 408 | m := pstore.NewMetrics() 409 | rt, err := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 410 | require.NoError(t, err) 411 | 412 | peers := make([]peer.ID, 100) 413 | for i := 0; i < 100; i++ { 414 | peers[i] = test.RandPeerIDFatal(t) 415 | } 416 | 417 | for i := 0; i < 10000; i++ { 418 | rt.TryAddPeer(peers[rand.Intn(len(peers))], true, false) 419 | } 420 | 421 | for i := 0; i < 100; i++ { 422 | id := ConvertPeerID(test.RandPeerIDFatal(t)) 423 | ret := rt.NearestPeers(id, 5) 424 | if len(ret) == 0 { 425 | t.Fatal("Failed to find node near ID.") 426 | } 427 | } 428 | } 429 | 430 | func TestTableFind(t *testing.T) { 431 | t.Parallel() 432 | 433 | local := test.RandPeerIDFatal(t) 434 | m := pstore.NewMetrics() 435 | rt, err := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 436 | require.NoError(t, err) 437 | 438 | peers := make([]peer.ID, 100) 439 | for i := 0; i < 5; i++ { 440 | peers[i] = test.RandPeerIDFatal(t) 441 | rt.TryAddPeer(peers[i], true, false) 442 | } 443 | 444 | t.Logf("Searching for peer: '%s'", peers[2]) 445 | found := rt.NearestPeer(ConvertPeerID(peers[2])) 446 | if !(found == peers[2]) { 447 | t.Fatalf("Failed to lookup known node...") 448 | } 449 | } 450 | 451 | func TestUpdateLastSuccessfulOutboundQueryAt(t *testing.T) { 452 | local := test.RandPeerIDFatal(t) 453 | m := pstore.NewMetrics() 454 | rt, err := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 455 | require.NoError(t, err) 456 | 457 | p := test.RandPeerIDFatal(t) 458 | b, err := rt.TryAddPeer(p, true, false) 459 | require.True(t, b) 460 | require.NoError(t, err) 461 | 462 | // increment and assert 463 | t2 := time.Now().Add(1 * time.Hour) 464 | rt.UpdateLastSuccessfulOutboundQueryAt(p, t2) 465 | rt.tabLock.Lock() 466 | pi := rt.buckets[0].getPeer(p) 467 | require.NotNil(t, pi) 468 | require.EqualValues(t, t2, pi.LastSuccessfulOutboundQueryAt) 469 | rt.tabLock.Unlock() 470 | } 471 | 472 | func TestUpdateLastUsefulAt(t *testing.T) { 473 | local := test.RandPeerIDFatal(t) 474 | m := pstore.NewMetrics() 475 | rt, err := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 476 | require.NoError(t, err) 477 | 478 | p := test.RandPeerIDFatal(t) 479 | b, err := rt.TryAddPeer(p, true, false) 480 | require.True(t, b) 481 | require.NoError(t, err) 482 | 483 | // increment and assert 484 | t2 := time.Now().Add(1 * time.Hour) 485 | rt.UpdateLastUsefulAt(p, t2) 486 | rt.tabLock.Lock() 487 | pi := rt.buckets[0].getPeer(p) 488 | require.NotNil(t, pi) 489 | require.EqualValues(t, t2, pi.LastUsefulAt) 490 | rt.tabLock.Unlock() 491 | } 492 | 493 | func TestTryAddPeer(t *testing.T) { 494 | t.Parallel() 495 | 496 | local := test.RandPeerIDFatal(t) 497 | m := pstore.NewMetrics() 498 | rt, err := NewRoutingTable(2, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 499 | require.NoError(t, err) 500 | 501 | // generate 2 peers to saturate the first bucket for cpl=0 502 | p1, _ := rt.GenRandPeerID(0) 503 | b, err := rt.TryAddPeer(p1, true, false) 504 | require.NoError(t, err) 505 | require.True(t, b) 506 | p2, _ := rt.GenRandPeerID(0) 507 | b, err = rt.TryAddPeer(p2, true, true) 508 | require.NoError(t, err) 509 | require.True(t, b) 510 | require.Equal(t, p1, rt.Find(p1)) 511 | require.Equal(t, p2, rt.Find(p2)) 512 | 513 | // trying to add a peer with cpl=0 works as p2 is replacable 514 | p3, _ := rt.GenRandPeerID(0) 515 | b, err = rt.TryAddPeer(p3, true, false) 516 | require.NoError(t, err) 517 | require.True(t, b) 518 | require.Equal(t, p3, rt.Find(p3)) 519 | // p2 has been removed 520 | require.Empty(t, rt.Find(p2)) 521 | 522 | // however adding peer fails as there are no more replacable peers. 523 | p5, err := rt.GenRandPeerID(0) 524 | require.NoError(t, err) 525 | b, err = rt.TryAddPeer(p5, true, false) 526 | require.Error(t, err) 527 | require.False(t, b) 528 | 529 | // however, trying to add peer with cpl=1 works 530 | p4, _ := rt.GenRandPeerID(1) 531 | b, err = rt.TryAddPeer(p4, true, false) 532 | require.NoError(t, err) 533 | require.True(t, b) 534 | require.Equal(t, p4, rt.Find(p4)) 535 | 536 | // adding non query peer 537 | p6, err := rt.GenRandPeerID(3) 538 | require.NoError(t, err) 539 | b, err = rt.TryAddPeer(p6, false, false) 540 | require.NoError(t, err) 541 | require.True(t, b) 542 | rt.tabLock.Lock() 543 | pi := rt.buckets[rt.bucketIdForPeer(p6)].getPeer(p6) 544 | require.NotNil(t, p6) 545 | require.True(t, pi.LastUsefulAt.IsZero()) 546 | rt.tabLock.Unlock() 547 | } 548 | 549 | func TestReplacePeerWithBucketSize1(t *testing.T) { 550 | localID := test.RandPeerIDFatal(t) 551 | rt, err := NewRoutingTable(1, ConvertPeerID(localID), time.Hour, pstore.NewMetrics(), NoOpThreshold, nil) 552 | require.NoError(t, err) 553 | p1, _ := rt.GenRandPeerID(1) // for any targetCpl > 0 554 | p2, _ := rt.GenRandPeerID(1) 555 | 556 | rt.TryAddPeer(p1, true, true) 557 | success, err := rt.TryAddPeer(p2, true, true) 558 | 559 | require.NoError(t, err) 560 | require.True(t, success) 561 | 562 | require.Equal(t, peer.ID(""), rt.Find(p1)) 563 | require.Equal(t, p2, rt.Find(p2)) 564 | require.Equal(t, rt.Size(), 1) 565 | } 566 | 567 | func TestMarkAllPeersIrreplaceable(t *testing.T) { 568 | t.Parallel() 569 | 570 | local := test.RandPeerIDFatal(t) 571 | m := pstore.NewMetrics() 572 | rt, err := NewRoutingTable(2, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 573 | require.NoError(t, err) 574 | 575 | // generate 2 peers 576 | p1, _ := rt.GenRandPeerID(0) 577 | b, err := rt.TryAddPeer(p1, true, true) 578 | require.NoError(t, err) 579 | require.True(t, b) 580 | p2, _ := rt.GenRandPeerID(0) 581 | b, err = rt.TryAddPeer(p2, true, true) 582 | require.NoError(t, err) 583 | require.True(t, b) 584 | require.Equal(t, p1, rt.Find(p1)) 585 | require.Equal(t, p2, rt.Find(p2)) 586 | 587 | rt.MarkAllPeersIrreplaceable() 588 | ps := rt.GetPeerInfos() 589 | for i := range ps { 590 | require.False(t, ps[i].replaceable) 591 | } 592 | } 593 | 594 | func TestTableFindMultiple(t *testing.T) { 595 | t.Parallel() 596 | 597 | local := test.RandPeerIDFatal(t) 598 | m := pstore.NewMetrics() 599 | rt, err := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 600 | require.NoError(t, err) 601 | 602 | peers := make([]peer.ID, 100) 603 | for i := 0; i < 18; i++ { 604 | peers[i] = test.RandPeerIDFatal(t) 605 | rt.TryAddPeer(peers[i], true, false) 606 | } 607 | 608 | t.Logf("Searching for peer: '%s'", peers[2]) 609 | found := rt.NearestPeers(ConvertPeerID(peers[2]), 15) 610 | if len(found) != 15 { 611 | t.Fatalf("Got back different number of peers than we expected.") 612 | } 613 | } 614 | 615 | func TestTableFindMultipleBuckets(t *testing.T) { 616 | t.Parallel() 617 | 618 | local := test.RandPeerIDFatal(t) 619 | m := pstore.NewMetrics() 620 | 621 | rt, err := NewRoutingTable(5, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 622 | require.NoError(t, err) 623 | 624 | generatedPeerCount := 100 625 | peers := make([]peer.ID, generatedPeerCount) 626 | for i := 0; i < generatedPeerCount; i++ { 627 | peers[i] = test.RandPeerIDFatal(t) 628 | rt.TryAddPeer(peers[i], true, false) 629 | } 630 | 631 | closest := SortClosestPeers(rt.ListPeers(), ConvertPeerID(peers[2])) 632 | 633 | t.Logf("Searching for peer: '%s'", peers[2]) 634 | 635 | // should be able to find at least 30 636 | // ~31 (logtwo(100) * 5) 637 | targetNumberOfPeers := 20 638 | found := rt.NearestPeers(ConvertPeerID(peers[2]), targetNumberOfPeers) 639 | if len(found) != min(targetNumberOfPeers, rt.Size()) { 640 | rt.Print() 641 | t.Fatalf("asked for %d peers, got %d, rt size %d", targetNumberOfPeers, len(found), rt.Size()) 642 | } 643 | for i, p := range found { 644 | if p != closest[i] { 645 | t.Fatalf("unexpected peer %d", i) 646 | } 647 | } 648 | 649 | // Ok, now let's try finding all of them. 650 | found = rt.NearestPeers(ConvertPeerID(peers[2]), generatedPeerCount) 651 | if len(found) != rt.Size() { 652 | t.Fatalf("asked for %d peers, got %d", rt.Size(), len(found)) 653 | } 654 | 655 | for i, p := range found { 656 | if p != closest[i] { 657 | t.Fatalf("unexpected peer %d", i) 658 | } 659 | } 660 | } 661 | 662 | // Looks for race conditions in table operations. For a more 'certain' 663 | // test, increase the loop counter from 1000 to a much higher number 664 | // and set GOMAXPROCS above 1 665 | func TestTableMultithreaded(t *testing.T) { 666 | t.Parallel() 667 | 668 | local := peer.ID("localPeer") 669 | m := pstore.NewMetrics() 670 | tab, err := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 671 | require.NoError(t, err) 672 | var peers []peer.ID 673 | for i := 0; i < 500; i++ { 674 | peers = append(peers, test.RandPeerIDFatal(t)) 675 | } 676 | 677 | done := make(chan struct{}) 678 | go func() { 679 | for i := 0; i < 1000; i++ { 680 | n := rand.Intn(len(peers)) 681 | tab.TryAddPeer(peers[n], true, false) 682 | } 683 | done <- struct{}{} 684 | }() 685 | 686 | go func() { 687 | for i := 0; i < 1000; i++ { 688 | n := rand.Intn(len(peers)) 689 | tab.TryAddPeer(peers[n], true, false) 690 | } 691 | done <- struct{}{} 692 | }() 693 | 694 | go func() { 695 | for i := 0; i < 1000; i++ { 696 | n := rand.Intn(len(peers)) 697 | tab.Find(peers[n]) 698 | } 699 | done <- struct{}{} 700 | }() 701 | <-done 702 | <-done 703 | <-done 704 | } 705 | 706 | type mockPeerGroupFilter struct { 707 | peerAddressFunc func(p peer.ID) []ma.Multiaddr 708 | allowFnc func(g peerdiversity.PeerGroupInfo) bool 709 | 710 | incrementFnc func(g peerdiversity.PeerGroupInfo) 711 | decrementFnc func(p peerdiversity.PeerGroupInfo) 712 | } 713 | 714 | func (m *mockPeerGroupFilter) Allow(g peerdiversity.PeerGroupInfo) (allow bool) { 715 | return m.allowFnc(g) 716 | } 717 | 718 | func (m *mockPeerGroupFilter) PeerAddresses(p peer.ID) []ma.Multiaddr { 719 | return m.peerAddressFunc(p) 720 | } 721 | 722 | func (m *mockPeerGroupFilter) Increment(g peerdiversity.PeerGroupInfo) { 723 | if m.incrementFnc != nil { 724 | m.incrementFnc(g) 725 | } 726 | } 727 | 728 | func (m *mockPeerGroupFilter) Decrement(g peerdiversity.PeerGroupInfo) { 729 | if m.decrementFnc != nil { 730 | m.decrementFnc(g) 731 | } 732 | } 733 | 734 | func TestDiversityFiltering(t *testing.T) { 735 | local := test.RandPeerIDFatal(t) 736 | cplCount := make(map[int]int) 737 | mg := &mockPeerGroupFilter{} 738 | mg.peerAddressFunc = func(p peer.ID) []ma.Multiaddr { 739 | return []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0")} 740 | } 741 | mg.allowFnc = func(g peerdiversity.PeerGroupInfo) bool { 742 | return cplCount[g.Cpl] < 1 743 | } 744 | 745 | mg.incrementFnc = func(g peerdiversity.PeerGroupInfo) { 746 | cplCount[g.Cpl] = cplCount[g.Cpl] + 1 747 | } 748 | 749 | mg.decrementFnc = func(g peerdiversity.PeerGroupInfo) { 750 | cplCount[g.Cpl] = cplCount[g.Cpl] - 1 751 | } 752 | 753 | df, err := peerdiversity.NewFilter(mg, "appname", func(p peer.ID) int { 754 | return CommonPrefixLen(ConvertPeerID(local), ConvertPeerID(p)) 755 | }) 756 | require.NoError(t, err) 757 | 758 | rt, err := NewRoutingTable(10, ConvertPeerID(local), time.Hour, pstore.NewMetrics(), NoOpThreshold, df) 759 | require.NoError(t, err) 760 | p, _ := rt.GenRandPeerID(2) 761 | b, err := rt.TryAddPeer(p, true, false) 762 | require.NoError(t, err) 763 | require.True(t, b) 764 | 765 | p2, _ := rt.GenRandPeerID(2) 766 | b, err = rt.TryAddPeer(p2, true, false) 767 | require.Error(t, err) 768 | require.False(t, b) 769 | 770 | rt.RemovePeer(p) 771 | b, err = rt.TryAddPeer(p2, true, false) 772 | require.NoError(t, err) 773 | require.True(t, b) 774 | } 775 | 776 | func TestGetPeerInfos(t *testing.T) { 777 | local := test.RandPeerIDFatal(t) 778 | m := pstore.NewMetrics() 779 | rt, err := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 780 | require.NoError(t, err) 781 | 782 | require.Empty(t, rt.GetPeerInfos()) 783 | 784 | p1 := test.RandPeerIDFatal(t) 785 | p2 := test.RandPeerIDFatal(t) 786 | 787 | b, err := rt.TryAddPeer(p1, false, false) 788 | require.True(t, b) 789 | require.NoError(t, err) 790 | b, err = rt.TryAddPeer(p2, true, false) 791 | require.True(t, b) 792 | require.NoError(t, err) 793 | 794 | ps := rt.GetPeerInfos() 795 | require.Len(t, ps, 2) 796 | ms := make(map[peer.ID]PeerInfo) 797 | for _, p := range ps { 798 | ms[p.Id] = p 799 | } 800 | 801 | require.Equal(t, p1, ms[p1].Id) 802 | require.True(t, ms[p1].LastUsefulAt.IsZero()) 803 | require.Equal(t, p2, ms[p2].Id) 804 | require.False(t, ms[p2].LastUsefulAt.IsZero()) 805 | } 806 | 807 | func TestPeerRemovedNotificationWhenPeerIsEvicted(t *testing.T) { 808 | t.Parallel() 809 | 810 | local := test.RandPeerIDFatal(t) 811 | m := pstore.NewMetrics() 812 | rt, err := NewRoutingTable(1, ConvertPeerID(local), time.Hour, m, NoOpThreshold, nil) 813 | require.NoError(t, err) 814 | pset := make(map[peer.ID]struct{}) 815 | rt.PeerAdded = func(p peer.ID) { 816 | pset[p] = struct{}{} 817 | } 818 | rt.PeerRemoved = func(p peer.ID) { 819 | delete(pset, p) 820 | } 821 | 822 | p1, _ := rt.GenRandPeerID(0) 823 | p2, _ := rt.GenRandPeerID(0) 824 | 825 | // first peer works 826 | b, err := rt.TryAddPeer(p1, true, false) 827 | require.NoError(t, err) 828 | require.True(t, b) 829 | 830 | // second is rejected because of capacity 831 | b, err = rt.TryAddPeer(p2, true, false) 832 | require.False(t, b) 833 | require.Error(t, err) 834 | 835 | // pset has first peer 836 | require.Contains(t, pset, p1) 837 | require.NotContains(t, pset, p2) 838 | 839 | // mark peers as replacable so we can evict. 840 | i := rt.bucketIdForPeer(p1) 841 | rt.tabLock.Lock() 842 | bucket := rt.buckets[i] 843 | rt.tabLock.Unlock() 844 | bucket.getPeer(p1).replaceable = true 845 | 846 | b, err = rt.TryAddPeer(p2, true, false) 847 | require.NoError(t, err) 848 | require.True(t, b) 849 | require.Contains(t, pset, p2) 850 | require.NotContains(t, pset, p1) 851 | } 852 | 853 | func BenchmarkAddPeer(b *testing.B) { 854 | b.StopTimer() 855 | local := ConvertKey("localKey") 856 | m := pstore.NewMetrics() 857 | tab, err := NewRoutingTable(20, local, time.Hour, m, NoOpThreshold, nil) 858 | require.NoError(b, err) 859 | 860 | var peers []peer.ID 861 | for i := 0; i < b.N; i++ { 862 | peers = append(peers, test.RandPeerIDFatal(b)) 863 | } 864 | 865 | b.StartTimer() 866 | for i := 0; i < b.N; i++ { 867 | tab.TryAddPeer(peers[i], true, false) 868 | } 869 | } 870 | 871 | func BenchmarkFinds(b *testing.B) { 872 | b.StopTimer() 873 | local := ConvertKey("localKey") 874 | m := pstore.NewMetrics() 875 | tab, err := NewRoutingTable(20, local, time.Hour, m, NoOpThreshold, nil) 876 | require.NoError(b, err) 877 | 878 | var peers []peer.ID 879 | for i := 0; i < b.N; i++ { 880 | peers = append(peers, test.RandPeerIDFatal(b)) 881 | tab.TryAddPeer(peers[i], true, false) 882 | } 883 | 884 | b.StartTimer() 885 | for i := 0; i < b.N; i++ { 886 | tab.Find(peers[i]) 887 | } 888 | } 889 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package kbucket 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/minio/sha256-simd" 7 | 8 | ks "github.com/libp2p/go-libp2p-kbucket/keyspace" 9 | "github.com/libp2p/go-libp2p/core/peer" 10 | 11 | u "github.com/ipfs/boxo/util" 12 | ) 13 | 14 | // ErrLookupFailure is returned if a routing table query returns no results. This is NOT expected 15 | // behaviour 16 | var ErrLookupFailure = errors.New("failed to find any peer in table") 17 | 18 | // ID for IpfsDHT is in the XORKeySpace 19 | // 20 | // The type dht.ID signifies that its contents have been hashed from either a 21 | // peer.ID or a util.Key. This unifies the keyspace 22 | type ID []byte 23 | 24 | func (id ID) less(other ID) bool { 25 | a := ks.Key{Space: ks.XORKeySpace, Bytes: id} 26 | b := ks.Key{Space: ks.XORKeySpace, Bytes: other} 27 | return a.Less(b) 28 | } 29 | 30 | func xor(a, b ID) ID { 31 | return ID(u.XOR(a, b)) 32 | } 33 | 34 | func CommonPrefixLen(a, b ID) int { 35 | return ks.ZeroPrefixLen(u.XOR(a, b)) 36 | } 37 | 38 | // ConvertPeerID creates a DHT ID by hashing a Peer ID (Multihash) 39 | func ConvertPeerID(id peer.ID) ID { 40 | hash := sha256.Sum256([]byte(id)) 41 | return hash[:] 42 | } 43 | 44 | // ConvertKey creates a DHT ID by hashing a local key (String) 45 | func ConvertKey(id string) ID { 46 | hash := sha256.Sum256([]byte(id)) 47 | return hash[:] 48 | } 49 | 50 | // Closer returns true if a is closer to key than b is 51 | func Closer(a, b peer.ID, key string) bool { 52 | aid := ConvertPeerID(a) 53 | bid := ConvertPeerID(b) 54 | tgt := ConvertKey(key) 55 | adist := xor(aid, tgt) 56 | bdist := xor(bid, tgt) 57 | 58 | return adist.less(bdist) 59 | } 60 | -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package kbucket 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/libp2p/go-libp2p/core/test" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestCloser(t *testing.T) { 11 | Pa := test.RandPeerIDFatal(t) 12 | Pb := test.RandPeerIDFatal(t) 13 | var X string 14 | 15 | // returns true if d(Pa, X) < d(Pb, X) 16 | for { 17 | X = string(test.RandPeerIDFatal(t)) 18 | if xor(ConvertPeerID(Pa), ConvertKey(X)).less(xor(ConvertPeerID(Pb), ConvertKey(X))) { 19 | break 20 | } 21 | } 22 | 23 | require.True(t, Closer(Pa, Pb, X)) 24 | 25 | // returns false if d(Pa,X) > d(Pb, X) 26 | for { 27 | X = string(test.RandPeerIDFatal(t)) 28 | if xor(ConvertPeerID(Pb), ConvertKey(X)).less(xor(ConvertPeerID(Pa), ConvertKey(X))) { 29 | break 30 | } 31 | 32 | } 33 | require.False(t, Closer(Pa, Pb, X)) 34 | } 35 | -------------------------------------------------------------------------------- /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "v0.7.0" 3 | } 4 | --------------------------------------------------------------------------------