├── .github └── workflows │ ├── generated-pr.yml │ ├── go-check.yml │ ├── go-test.yml │ ├── release-check.yml │ ├── releaser.yml │ ├── stale.yml │ └── tagpush.yml ├── LICENSE ├── README.md ├── bootstrap.go ├── bootstrap_test.go ├── compconfig.go ├── composed.go ├── compparallel.go ├── compparallel_test.go ├── compsequential.go ├── compsequential_test.go ├── dummy_test.go ├── go.mod ├── go.sum ├── limited.go ├── limited_test.go ├── nothing_test.go ├── null.go ├── null_test.go ├── parallel.go ├── parallel_test.go ├── pubkey_test.go ├── tiered.go ├── tiered_test.go ├── tracing └── tracing.go └── version.json /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/go-check.yml: -------------------------------------------------------------------------------- 1 | name: Go Checks 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-check: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 19 | -------------------------------------------------------------------------------- /.github/workflows/go-test.yml: -------------------------------------------------------------------------------- 1 | name: Go Test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-test: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0 19 | secrets: 20 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 21 | -------------------------------------------------------------------------------- /.github/workflows/release-check.yml: -------------------------------------------------------------------------------- 1 | name: Release Checker 2 | 3 | on: 4 | pull_request_target: 5 | paths: [ 'version.json' ] 6 | types: [ opened, synchronize, reopened, labeled, unlabeled ] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | release-check: 19 | uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 20 | -------------------------------------------------------------------------------- /.github/workflows/releaser.yml: -------------------------------------------------------------------------------- 1 | name: Releaser 2 | 3 | on: 4 | push: 5 | paths: [ 'version.json' ] 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: write 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.sha }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | releaser: 17 | uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 18 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/tagpush.yml: -------------------------------------------------------------------------------- 1 | name: Tag Push Checker 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | permissions: 9 | contents: read 10 | issues: write 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | releaser: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Protocol Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-libp2p-routing-helpers 2 | 3 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) 4 | [![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/) 5 | [![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23libp2p) 6 | [![GoDoc](https://godoc.org/github.com/libp2p/go-libp2p-routing-helpers?status.svg)](https://godoc.org/github.com/libp2p/go-libp2p-routing-helpers) 7 | [![Coverage Status](https://img.shields.io/codecov/c/github/libp2p/go-libp2p-routing-helpers.svg?style=flat-square&branch=master)](https://codecov.io/github/libp2p/go-libp2p-routing-helpers?branch=master) 8 | [![Build Status](https://travis-ci.org/libp2p/go-libp2p-routing-helpers.svg?branch=master)](https://travis-ci.org/libp2p/go-libp2p-routing-helpers) 9 | [![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) 10 | 11 | > A collection of helper types for composing different types of routers. 12 | 13 | ## Documenation 14 | 15 | See https://godoc.org/github.com/libp2p/go-libp2p-routing-helpers. 16 | 17 | ## Contribute 18 | 19 | Feel free to join in. All welcome. Open an [issue](https://github.com/libp2p/go-libp2p-routing-helpers/issues)! 20 | 21 | This repository falls under the libp2p [Code of Conduct](https://github.com/libp2p/community/blob/master/code-of-conduct.md). 22 | 23 | ### Want to hack on libp2p? 24 | 25 | [![](https://cdn.rawgit.com/libp2p/community/master/img/contribute.gif)](https://github.com/libp2p/community/blob/master/CONTRIBUTE.md) 26 | 27 | ## License 28 | 29 | MIT 30 | 31 | --- 32 | 33 | The last gx published version of this module was: 0.4.0: QmXwV9RskR8vpoYWu9bvKAeAWaBKyxEsEiM9yy6ezbpNBm 34 | -------------------------------------------------------------------------------- /bootstrap.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | // TODO: Consider moving this to the routing package? 8 | 9 | // Bootstrap is an interface that should be implemented by any routers wishing 10 | // to be bootstrapped. 11 | type Bootstrap interface { 12 | // Bootstrap bootstraps the router. 13 | Bootstrap(ctx context.Context) error 14 | } 15 | -------------------------------------------------------------------------------- /bootstrap_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/libp2p/go-libp2p/core/routing" 9 | ) 10 | 11 | type bootstrapRouter struct { 12 | Null 13 | bs func() error 14 | } 15 | 16 | func (bs *bootstrapRouter) Bootstrap(ctx context.Context) error { 17 | return bs.bs() 18 | } 19 | 20 | func TestBootstrap(t *testing.T) { 21 | t.Parallel() 22 | 23 | pings := make([]bool, 6) 24 | d := Parallel{ 25 | Routers: []routing.Routing{ 26 | Tiered{ 27 | Routers: []routing.Routing{ 28 | &bootstrapRouter{ 29 | bs: func() error { 30 | pings[0] = true 31 | return nil 32 | }, 33 | }, 34 | }, 35 | }, 36 | Tiered{ 37 | Routers: []routing.Routing{ 38 | &bootstrapRouter{ 39 | bs: func() error { 40 | pings[1] = true 41 | return nil 42 | }, 43 | }, 44 | &bootstrapRouter{ 45 | bs: func() error { 46 | pings[2] = true 47 | return nil 48 | }, 49 | }, 50 | }, 51 | }, 52 | &Compose{ 53 | ValueStore: &LimitedValueStore{ 54 | ValueStore: &bootstrapRouter{ 55 | bs: func() error { 56 | pings[3] = true 57 | return nil 58 | }, 59 | }, 60 | Namespaces: []string{"allow1", "allow2", "notsupported", "error"}, 61 | }, 62 | }, 63 | &Compose{ 64 | ValueStore: &LimitedValueStore{ 65 | ValueStore: &dummyValueStore{}, 66 | }, 67 | }, 68 | Null{}, 69 | &Compose{}, 70 | &Compose{ 71 | ContentRouting: &bootstrapRouter{ 72 | bs: func() error { 73 | pings[4] = true 74 | return nil 75 | }, 76 | }, 77 | PeerRouting: &bootstrapRouter{ 78 | bs: func() error { 79 | pings[5] = true 80 | return nil 81 | }, 82 | }, 83 | }, 84 | }, 85 | } 86 | ctx := context.Background() 87 | if err := d.Bootstrap(ctx); err != nil { 88 | t.Fatal(err) 89 | } 90 | for i, p := range pings { 91 | if !p { 92 | t.Errorf("pings %d not seen", i) 93 | } 94 | } 95 | 96 | } 97 | func TestBootstrapErr(t *testing.T) { 98 | t.Parallel() 99 | 100 | d := Parallel{ 101 | Routers: []routing.Routing{ 102 | Tiered{ 103 | Routers: []routing.Routing{ 104 | &bootstrapRouter{ 105 | bs: func() error { 106 | return errors.New("err1") 107 | }, 108 | }, 109 | }, 110 | }, 111 | Tiered{ 112 | Routers: []routing.Routing{ 113 | &bootstrapRouter{ 114 | bs: func() error { 115 | return nil 116 | }, 117 | }, 118 | &bootstrapRouter{ 119 | bs: func() error { 120 | return nil 121 | }, 122 | }, 123 | }, 124 | }, 125 | &Compose{ 126 | ValueStore: &LimitedValueStore{ 127 | ValueStore: &bootstrapRouter{ 128 | bs: func() error { 129 | return errors.New("err2") 130 | }, 131 | }, 132 | Namespaces: []string{"allow1", "allow2", "notsupported", "error"}, 133 | }, 134 | }, 135 | &Compose{ 136 | ValueStore: &bootstrapRouter{ 137 | bs: func() error { 138 | return errors.New("err3") 139 | }, 140 | }, 141 | ContentRouting: &bootstrapRouter{ 142 | bs: func() error { 143 | return errors.New("err4") 144 | }, 145 | }, 146 | }, 147 | Null{}, 148 | }, 149 | } 150 | ctx := context.Background() 151 | err := d.Bootstrap(ctx) 152 | t.Log(err) 153 | for _, s := range []string{"err1", "err2", "err3", "err4"} { 154 | if !errContains(err, s) { 155 | t.Errorf("expecting error to contain '%s'", s) 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /compconfig.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/libp2p/go-libp2p-routing-helpers/tracing" 8 | "github.com/libp2p/go-libp2p/core/routing" 9 | "github.com/multiformats/go-multihash" 10 | ) 11 | 12 | const tracer = tracing.Tracer("go-libp2p-routing-helpers") 13 | 14 | type ParallelRouter struct { 15 | Timeout time.Duration 16 | Router routing.Routing 17 | ExecuteAfter time.Duration 18 | // DoNotWaitForSearchValue is experimental while we wait for a better solution. 19 | DoNotWaitForSearchValue bool 20 | IgnoreError bool 21 | } 22 | 23 | type SequentialRouter struct { 24 | Timeout time.Duration 25 | IgnoreError bool 26 | Router routing.Routing 27 | } 28 | 29 | type ProvideManyRouter interface { 30 | ProvideMany(ctx context.Context, keys []multihash.Multihash) error 31 | } 32 | 33 | type ReadyAbleRouter interface { 34 | Ready() bool 35 | } 36 | 37 | type ComposableRouter interface { 38 | Routers() []routing.Routing 39 | } 40 | -------------------------------------------------------------------------------- /composed.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ipfs/go-cid" 7 | ci "github.com/libp2p/go-libp2p/core/crypto" 8 | "github.com/libp2p/go-libp2p/core/peer" 9 | "github.com/libp2p/go-libp2p/core/routing" 10 | "go.uber.org/multierr" 11 | ) 12 | 13 | // Compose composes the components into a single router. Not specifying a 14 | // component (leaving it nil) is equivalent to specifying the Null router. 15 | // 16 | // It also implements Bootstrap. All *distinct* components implementing 17 | // Bootstrap will be bootstrapped in parallel. Identical components will not be 18 | // bootstrapped twice. 19 | type Compose struct { 20 | ValueStore routing.ValueStore 21 | PeerRouting routing.PeerRouting 22 | ContentRouting routing.ContentRouting 23 | } 24 | 25 | const composeName = "Compose" 26 | 27 | // note: we implement these methods explicitly to avoid having to manually 28 | // specify the Null router everywhere we don't want to implement some 29 | // functionality. 30 | 31 | // PutValue adds value corresponding to given Key. 32 | func (cr *Compose) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) (err error) { 33 | ctx, end := tracer.PutValue(composeName, ctx, key, value, opts...) 34 | defer func() { end(err) }() 35 | 36 | if cr.ValueStore == nil { 37 | return routing.ErrNotSupported 38 | } 39 | return cr.ValueStore.PutValue(ctx, key, value, opts...) 40 | } 41 | 42 | // GetValue searches for the value corresponding to given Key. 43 | func (cr *Compose) GetValue(ctx context.Context, key string, opts ...routing.Option) (value []byte, err error) { 44 | ctx, end := tracer.GetValue(composeName, ctx, key, opts...) 45 | defer func() { end(value, err) }() 46 | 47 | if cr.ValueStore == nil { 48 | return nil, routing.ErrNotFound 49 | } 50 | return cr.ValueStore.GetValue(ctx, key, opts...) 51 | } 52 | 53 | // SearchValue searches for the value corresponding to given Key. 54 | func (cr *Compose) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) { 55 | ctx, wrapper := tracer.SearchValue(composeName, ctx, key, opts...) 56 | defer func() { ch, err = wrapper(ch, err) }() 57 | 58 | if cr.ValueStore == nil { 59 | out := make(chan []byte) 60 | close(out) 61 | return out, nil 62 | } 63 | return cr.ValueStore.SearchValue(ctx, key, opts...) 64 | } 65 | 66 | // Provide adds the given cid to the content routing system. If 'true' is 67 | // passed, it also announces it, otherwise it is just kept in the local 68 | // accounting of which objects are being provided. 69 | func (cr *Compose) Provide(ctx context.Context, c cid.Cid, local bool) (err error) { 70 | ctx, end := tracer.Provide(composeName, ctx, c, local) 71 | defer func() { end(err) }() 72 | 73 | if cr.ContentRouting == nil { 74 | return routing.ErrNotSupported 75 | } 76 | return cr.ContentRouting.Provide(ctx, c, local) 77 | } 78 | 79 | // FindProvidersAsync searches for peers who are able to provide a given key. 80 | // 81 | // If count > 0, it returns at most count providers. If count == 0, it returns 82 | // an unbounded number of providers. 83 | func (cr *Compose) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { 84 | ctx, wrapper := tracer.FindProvidersAsync(composeName, ctx, c, count) 85 | 86 | if cr.ContentRouting == nil { 87 | ch := make(chan peer.AddrInfo) 88 | close(ch) 89 | return wrapper(ch, routing.ErrNotFound) 90 | } 91 | return wrapper(cr.ContentRouting.FindProvidersAsync(ctx, c, count), nil) 92 | } 93 | 94 | // FindPeer searches for a peer with given ID, returns a peer.AddrInfo 95 | // with relevant addresses. 96 | func (cr *Compose) FindPeer(ctx context.Context, p peer.ID) (info peer.AddrInfo, err error) { 97 | ctx, end := tracer.FindPeer(composeName, ctx, p) 98 | defer func() { end(info, err) }() 99 | 100 | if cr.PeerRouting == nil { 101 | return peer.AddrInfo{}, routing.ErrNotFound 102 | } 103 | return cr.PeerRouting.FindPeer(ctx, p) 104 | } 105 | 106 | // GetPublicKey returns the public key for the given peer. 107 | func (cr *Compose) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) { 108 | if cr.ValueStore == nil { 109 | return nil, routing.ErrNotFound 110 | } 111 | return routing.GetPublicKey(cr.ValueStore, ctx, p) 112 | } 113 | 114 | // Bootstrap the router. 115 | func (cr *Compose) Bootstrap(ctx context.Context) (err error) { 116 | ctx, end := tracer.Bootstrap(composeName, ctx) 117 | defer func() { end(err) }() 118 | 119 | // Deduplicate. Technically, calling bootstrap multiple times shouldn't 120 | // be an issue but using the same router for multiple fields of Compose 121 | // is common. 122 | routers := make(map[Bootstrap]struct{}, 3) 123 | for _, value := range [...]interface{}{ 124 | cr.ValueStore, 125 | cr.ContentRouting, 126 | cr.PeerRouting, 127 | } { 128 | switch b := value.(type) { 129 | case nil: 130 | case Null: 131 | case Bootstrap: 132 | routers[b] = struct{}{} 133 | } 134 | } 135 | 136 | var errs error 137 | for b := range routers { 138 | if err := b.Bootstrap(ctx); err != nil { 139 | errs = multierr.Append(errs, err) 140 | } 141 | } 142 | return errs 143 | } 144 | 145 | var _ routing.Routing = (*Compose)(nil) 146 | var _ routing.PubKeyFetcher = (*Compose)(nil) 147 | -------------------------------------------------------------------------------- /compparallel.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "reflect" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/Jorropo/jsync" 12 | "github.com/ipfs/go-cid" 13 | logging "github.com/ipfs/go-log/v2" 14 | "github.com/libp2p/go-libp2p/core/peer" 15 | "github.com/libp2p/go-libp2p/core/routing" 16 | "github.com/multiformats/go-multihash" 17 | "go.opentelemetry.io/otel/attribute" 18 | "go.opentelemetry.io/otel/codes" 19 | "go.uber.org/multierr" 20 | ) 21 | 22 | var log = logging.Logger("routing/composable") 23 | 24 | var _ routing.Routing = (*composableParallel)(nil) 25 | var _ ProvideManyRouter = (*composableParallel)(nil) 26 | var _ ReadyAbleRouter = (*composableParallel)(nil) 27 | var _ ComposableRouter = (*composableParallel)(nil) 28 | 29 | const nameParallel = "ComposableParallel" 30 | 31 | type composableParallel struct { 32 | routers []*ParallelRouter 33 | } 34 | 35 | // NewComposableParallel creates a Router that will execute methods from provided Routers in parallel. 36 | // On all methods, If IgnoreError flag is set, that Router will not stop the entire execution. 37 | // On all methods, If ExecuteAfter is set, that Router will be executed after the timer. 38 | // Router specific timeout will start counting AFTER the ExecuteAfter timer. 39 | func NewComposableParallel(routers []*ParallelRouter) *composableParallel { 40 | return &composableParallel{ 41 | routers: routers, 42 | } 43 | } 44 | 45 | func (r *composableParallel) Routers() []routing.Routing { 46 | var routers []routing.Routing 47 | for _, pr := range r.routers { 48 | routers = append(routers, pr.Router) 49 | } 50 | 51 | return routers 52 | } 53 | 54 | // Provide will call all Routers in parallel. 55 | func (r *composableParallel) Provide(ctx context.Context, cid cid.Cid, provide bool) (err error) { 56 | ctx, end := tracer.Provide(nameParallel, ctx, cid, provide) 57 | defer func() { end(err) }() 58 | 59 | return executeParallel(ctx, r.routers, 60 | func(ctx context.Context, r routing.Routing) error { 61 | return r.Provide(ctx, cid, provide) 62 | }, 63 | ) 64 | } 65 | 66 | // ProvideMany will call all Routers in parallel, falling back to iterative 67 | // single Provide call for routers which do not support [ProvideManyRouter]. 68 | func (r *composableParallel) ProvideMany(ctx context.Context, keys []multihash.Multihash) (err error) { 69 | ctx, end := tracer.ProvideMany(nameParallel, ctx, keys) 70 | defer func() { end(err) }() 71 | 72 | return executeParallel(ctx, r.routers, 73 | func(ctx context.Context, r routing.Routing) error { 74 | if pm, ok := r.(ProvideManyRouter); ok { 75 | return pm.ProvideMany(ctx, keys) 76 | } 77 | 78 | for _, k := range keys { 79 | if err := r.Provide(ctx, cid.NewCidV1(cid.Raw, k), true); err != nil { 80 | return err 81 | } 82 | } 83 | return nil 84 | }, 85 | ) 86 | } 87 | 88 | // Ready will call all supported [ReadyAbleRouter] Routers SEQUENTIALLY. 89 | // If some of them are not ready, this method will return false. 90 | func (r *composableParallel) Ready() bool { 91 | for _, ro := range r.routers { 92 | pm, ok := ro.Router.(ReadyAbleRouter) 93 | if !ok { 94 | continue 95 | } 96 | 97 | if !pm.Ready() { 98 | return false 99 | } 100 | } 101 | 102 | return true 103 | } 104 | 105 | // FindProvidersAsync will execute all Routers in parallel, iterating results from them in unspecified order. 106 | // If count is set, only that amount of elements will be returned without any specification about from what router is obtained. 107 | // To gather providers from a set of Routers first, you can use the ExecuteAfter timer to delay some Router execution. 108 | func (r *composableParallel) FindProvidersAsync(ctx context.Context, cid cid.Cid, count int) <-chan peer.AddrInfo { 109 | ctx, chWrapper := tracer.FindProvidersAsync(nameParallel, ctx, cid, count) 110 | 111 | var totalCount int64 112 | ch, err := getChannelOrErrorParallel( 113 | ctx, 114 | r.routers, 115 | func(ctx context.Context, r routing.Routing) (<-chan peer.AddrInfo, error) { 116 | return r.FindProvidersAsync(ctx, cid, count), nil 117 | }, 118 | func() bool { 119 | if count <= 0 { 120 | return false 121 | } 122 | return atomic.AddInt64(&totalCount, 1) >= int64(count) 123 | }, false, 124 | ) 125 | 126 | if err != nil { 127 | ch = make(chan peer.AddrInfo) 128 | close(ch) 129 | } 130 | 131 | return chWrapper(ch, err) 132 | } 133 | 134 | // FindPeer will execute all Routers in parallel, getting the first AddrInfo found and cancelling all other Router calls. 135 | func (r *composableParallel) FindPeer(ctx context.Context, id peer.ID) (p peer.AddrInfo, err error) { 136 | ctx, end := tracer.FindPeer(nameParallel, ctx, id) 137 | defer func() { end(p, err) }() 138 | 139 | return getValueOrErrorParallel(ctx, r.routers, 140 | func(ctx context.Context, r routing.Routing) (peer.AddrInfo, bool, error) { 141 | addr, err := r.FindPeer(ctx, id) 142 | return addr, addr.ID == "", err 143 | }, 144 | ) 145 | } 146 | 147 | // PutValue will execute all Routers in parallel. If a Router fails and IgnoreError flag is not set, the whole execution will fail. 148 | // Some Puts before the failure might be successful, even if we return an error. 149 | func (r *composableParallel) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) (err error) { 150 | ctx, end := tracer.PutValue(nameParallel, ctx, key, val, opts...) 151 | defer func() { end(err) }() 152 | 153 | return executeParallel(ctx, r.routers, 154 | func(ctx context.Context, r routing.Routing) error { 155 | return r.PutValue(ctx, key, val, opts...) 156 | }, 157 | ) 158 | } 159 | 160 | // GetValue will execute all Routers in parallel. The first value found will be returned, cancelling all other executions. 161 | func (r *composableParallel) GetValue(ctx context.Context, key string, opts ...routing.Option) (val []byte, err error) { 162 | ctx, end := tracer.GetValue(nameParallel, ctx, key, opts...) 163 | defer func() { end(val, err) }() 164 | 165 | return getValueOrErrorParallel(ctx, r.routers, 166 | func(ctx context.Context, r routing.Routing) ([]byte, bool, error) { 167 | val, err := r.GetValue(ctx, key, opts...) 168 | return val, len(val) == 0, err 169 | }) 170 | } 171 | 172 | func (r *composableParallel) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 173 | ctx, wrapper := tracer.SearchValue(nameParallel, ctx, key, opts...) 174 | 175 | return wrapper(getChannelOrErrorParallel( 176 | ctx, 177 | r.routers, 178 | func(ctx context.Context, r routing.Routing) (<-chan []byte, error) { 179 | return r.SearchValue(ctx, key, opts...) 180 | }, 181 | func() bool { return false }, true, 182 | )) 183 | } 184 | 185 | func (r *composableParallel) Bootstrap(ctx context.Context) (err error) { 186 | ctx, end := tracer.Bootstrap(nameParallel, ctx) 187 | defer end(err) 188 | 189 | return executeParallel(ctx, r.routers, 190 | func(ctx context.Context, r routing.Routing) error { 191 | return r.Bootstrap(ctx) 192 | }) 193 | } 194 | 195 | func withCancelAndOptionalTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { 196 | if timeout != 0 { 197 | return context.WithTimeout(ctx, timeout) 198 | } 199 | return context.WithCancel(ctx) 200 | } 201 | 202 | func getValueOrErrorParallel[T any]( 203 | ctx context.Context, 204 | routers []*ParallelRouter, 205 | f func(context.Context, routing.Routing) (T, bool, error), 206 | ) (value T, err error) { 207 | outCh := make(chan T) 208 | errCh := make(chan error) 209 | 210 | // global cancel context to stop early other router's execution. 211 | ctx, cancelAll := context.WithCancel(ctx) 212 | defer cancelAll() 213 | fwg := jsync.NewFWaitGroup(func() { 214 | close(outCh) 215 | close(errCh) 216 | log.Debug("getValueOrErrorParallel: finished executing all routers ", len(routers)) 217 | }, uint64(len(routers))) 218 | for _, r := range routers { 219 | go func(r *ParallelRouter) { 220 | defer fwg.Done() 221 | log.Debug("getValueOrErrorParallel: starting execution for router ", r.Router, 222 | " with timeout ", r.Timeout, 223 | " and ignore errors ", r.IgnoreError, 224 | ) 225 | tim := time.NewTimer(r.ExecuteAfter) 226 | defer tim.Stop() 227 | select { 228 | case <-ctx.Done(): 229 | case <-tim.C: 230 | ctx, cancel := withCancelAndOptionalTimeout(ctx, r.Timeout) 231 | defer cancel() 232 | value, empty, err := f(ctx, r.Router) 233 | if err != nil { 234 | if r.IgnoreError || errors.Is(err, routing.ErrNotFound) { 235 | log.Debug("getValueOrErrorParallel: not found or ignorable error for router ", r.Router, 236 | " with timeout ", r.Timeout, 237 | " and ignore errors ", r.IgnoreError, 238 | ) 239 | return 240 | } 241 | log.Debug("getValueOrErrorParallel: error calling router function for router ", r.Router, 242 | " with timeout ", r.Timeout, 243 | " and ignore errors ", r.IgnoreError, 244 | " with error ", err, 245 | ) 246 | select { 247 | case <-ctx.Done(): 248 | case errCh <- err: 249 | } 250 | return 251 | } 252 | if empty { 253 | log.Debug("getValueOrErrorParallel: empty flag for router ", r.Router, 254 | " with timeout ", r.Timeout, 255 | " and ignore errors ", r.IgnoreError, 256 | ) 257 | return 258 | } 259 | select { 260 | case <-ctx.Done(): 261 | return 262 | case outCh <- value: 263 | } 264 | } 265 | }(r) 266 | } 267 | 268 | select { 269 | case out, ok := <-outCh: 270 | if !ok { 271 | return value, routing.ErrNotFound 272 | } 273 | 274 | log.Debug("getValueOrErrorParallel: value returned by channel") 275 | 276 | return out, nil 277 | case err, ok := <-errCh: 278 | if !ok { 279 | return value, routing.ErrNotFound 280 | } 281 | 282 | log.Debug("getValueOrErrorParallel: error returned by channel:", err) 283 | 284 | return value, err 285 | case <-ctx.Done(): 286 | err := ctx.Err() 287 | log.Debug("getValueOrErrorParallel: error on context done:", err) 288 | return value, err 289 | } 290 | } 291 | 292 | func executeParallel( 293 | ctx context.Context, 294 | routers []*ParallelRouter, 295 | f func(context.Context, routing.Routing) error, 296 | ) error { 297 | var errsLk sync.Mutex 298 | var errs []error 299 | var wg sync.WaitGroup 300 | wg.Add(len(routers)) 301 | 302 | for _, r := range routers { 303 | go func(r *ParallelRouter, ctx context.Context) { 304 | defer wg.Done() 305 | 306 | if err := func() error { 307 | log.Debug("executeParallel: starting execution for router ", r.Router, 308 | " with timeout ", r.Timeout, 309 | " and ignore errors ", r.IgnoreError, 310 | ) 311 | tim := time.NewTimer(r.ExecuteAfter) 312 | defer tim.Stop() 313 | select { 314 | case <-ctx.Done(): 315 | if !r.IgnoreError { 316 | log.Debug("executeParallel: stopping execution on router on context done for router ", r.Router, 317 | " with timeout ", r.Timeout, 318 | " and ignore errors ", r.IgnoreError, 319 | ) 320 | return ctx.Err() 321 | } 322 | case <-tim.C: 323 | ctx, cancel := withCancelAndOptionalTimeout(ctx, r.Timeout) 324 | defer cancel() 325 | 326 | log.Debug("executeParallel: calling router function for router ", r.Router, 327 | " with timeout ", r.Timeout, 328 | " and ignore errors ", r.IgnoreError, 329 | ) 330 | if err := f(ctx, r.Router); err != nil && !r.IgnoreError { 331 | log.Debug("executeParallel: error calling router function for router ", r.Router, 332 | " with timeout ", r.Timeout, 333 | " and ignore errors ", r.IgnoreError, 334 | " with error ", err, 335 | ) 336 | return err 337 | } 338 | } 339 | 340 | return nil 341 | }(); err != nil { 342 | errsLk.Lock() 343 | errs = append(errs, err) 344 | errsLk.Unlock() 345 | } 346 | }(r, ctx) 347 | } 348 | 349 | wg.Wait() 350 | errOut := multierr.Combine(errs...) 351 | 352 | if errOut != nil { 353 | log.Debug("executeParallel: finished executing all routers with error: ", errOut) 354 | } 355 | 356 | return errOut 357 | } 358 | 359 | func getChannelOrErrorParallel[T any]( 360 | ctx context.Context, 361 | routers []*ParallelRouter, 362 | f func(context.Context, routing.Routing) (<-chan T, error), 363 | shouldStop func() bool, isSearchValue bool, 364 | ) (chan T, error) { 365 | // ready is a mutex that starts locked, it will stay locked until at least one of the channels got an item, 366 | // this make us return only when ready. 367 | var ready sync.Mutex 368 | ready.Lock() 369 | var resultsLk sync.Mutex 370 | var outCh chan T 371 | // nil errors indicate sucess 372 | errors := []error{} 373 | 374 | ctx, cancelAll := context.WithCancel(ctx) 375 | fwg := jsync.NewFWaitGroup(func() { 376 | if outCh != nil { 377 | close(outCh) 378 | } else { 379 | ready.Unlock() 380 | } 381 | 382 | cancelAll() 383 | }, uint64(len(routers))) 384 | 385 | var blocking atomic.Uint64 386 | blocking.Add(1) // start at one so we don't cancel while dispatching 387 | var sent atomic.Bool 388 | 389 | for i, r := range routers { 390 | ctx, span := tracer.StartSpan(ctx, composeName+".worker") 391 | isBlocking := !isSearchValue || !r.DoNotWaitForSearchValue 392 | if isBlocking { 393 | blocking.Add(1) 394 | } 395 | isRecording := span.IsRecording() 396 | if isRecording { 397 | span.SetAttributes( 398 | attribute.Bool("blocking", isBlocking), 399 | attribute.Stringer("type", reflect.TypeOf(r.Router)), 400 | attribute.Int("routingNumber", i), 401 | ) 402 | } 403 | 404 | go func(r *ParallelRouter) { 405 | defer span.End() 406 | defer fwg.Done() 407 | defer func() { 408 | var remainingBlockers uint64 409 | if isSearchValue && r.DoNotWaitForSearchValue { 410 | remainingBlockers = blocking.Load() 411 | } else { 412 | var minusOne uint64 413 | minusOne-- 414 | remainingBlockers = blocking.Add(minusOne) 415 | } 416 | 417 | if remainingBlockers == 0 && sent.Load() { 418 | cancelAll() 419 | } 420 | }() 421 | 422 | if r.ExecuteAfter != 0 { 423 | tim := time.NewTimer(r.ExecuteAfter) 424 | defer tim.Stop() 425 | select { 426 | case <-ctx.Done(): 427 | return 428 | case <-tim.C: 429 | // ready 430 | } 431 | } 432 | 433 | ctx, cancel := withCancelAndOptionalTimeout(ctx, r.Timeout) 434 | defer cancel() 435 | 436 | valueChan, err := f(ctx, r.Router) 437 | if err != nil { 438 | if isRecording { 439 | span.SetStatus(codes.Error, err.Error()) 440 | } 441 | 442 | if r.IgnoreError { 443 | return 444 | } 445 | 446 | resultsLk.Lock() 447 | defer resultsLk.Unlock() 448 | if errors == nil { 449 | return 450 | } 451 | errors = append(errors, err) 452 | 453 | return 454 | } 455 | if isRecording { 456 | span.AddEvent("started streaming") 457 | } 458 | 459 | for first := true; true; first = false { 460 | select { 461 | case <-ctx.Done(): 462 | return 463 | case val, ok := <-valueChan: 464 | if !ok { 465 | return 466 | } 467 | if isRecording { 468 | span.AddEvent("got result") 469 | } 470 | 471 | if first { 472 | resultsLk.Lock() 473 | if outCh == nil { 474 | outCh = make(chan T) 475 | errors = nil 476 | ready.Unlock() 477 | } 478 | resultsLk.Unlock() 479 | } 480 | 481 | select { 482 | case <-ctx.Done(): 483 | return 484 | case outCh <- val: 485 | sent.Store(true) 486 | } 487 | 488 | if shouldStop() { 489 | cancelAll() 490 | return 491 | } 492 | } 493 | } 494 | }(r) 495 | } 496 | 497 | // remove the dispatch count and check if we should cancel 498 | var minusOne uint64 499 | minusOne-- 500 | if blocking.Add(minusOne) == 0 && sent.Load() { 501 | cancelAll() 502 | } 503 | 504 | ready.Lock() 505 | if outCh != nil { 506 | return outCh, nil 507 | } else if len(errors) == 0 { 508 | // found nothing 509 | ch := make(chan T) 510 | close(ch) 511 | return ch, nil 512 | } else { 513 | return nil, multierr.Combine(errors...) 514 | } 515 | } 516 | -------------------------------------------------------------------------------- /compparallel_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | "time" 8 | 9 | "github.com/ipfs/go-cid" 10 | "github.com/libp2p/go-libp2p/core/peer" 11 | "github.com/libp2p/go-libp2p/core/routing" 12 | mh "github.com/multiformats/go-multihash" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | func TestNoResults(t *testing.T) { 17 | t.Parallel() 18 | 19 | require := require.New(t) 20 | rs := []*ParallelRouter{ 21 | { 22 | Timeout: time.Second, 23 | IgnoreError: true, 24 | Router: Null{}, 25 | }, 26 | { 27 | Timeout: time.Second, 28 | IgnoreError: true, 29 | Router: &Compose{ 30 | ValueStore: newDummyValueStore(t, []string{"a"}, []string{"av"}), 31 | PeerRouting: Null{}, 32 | ContentRouting: Null{}, 33 | }, 34 | }, 35 | } 36 | 37 | cp := NewComposableParallel(rs) 38 | 39 | v, err := cp.GetValue(context.Background(), "a") 40 | require.NoError(err) 41 | require.Equal("av", string(v)) 42 | 43 | require.Equal(2, len(cp.Routers())) 44 | } 45 | 46 | type getValueFixture struct { 47 | err error 48 | key string 49 | value string 50 | searchValCount int 51 | } 52 | 53 | type putValueFixture struct { 54 | err error 55 | key string 56 | value string 57 | } 58 | type provideFixture struct { 59 | err error 60 | } 61 | type findPeerFixture struct { 62 | peerID string 63 | err error 64 | } 65 | type searchValueFixture struct { 66 | ctx context.Context 67 | err error 68 | key string 69 | vals []string 70 | } 71 | 72 | func TestComposableParallelFixtures(t *testing.T) { 73 | t.Parallel() 74 | 75 | fixtures := []struct { 76 | Name string 77 | routers []*ParallelRouter 78 | GetValue []getValueFixture 79 | PutValue []putValueFixture 80 | Provide []provideFixture 81 | FindPeer []findPeerFixture 82 | SearchValue []searchValueFixture 83 | }{ 84 | { 85 | Name: "simple two routers, one with delay", 86 | routers: []*ParallelRouter{ 87 | { 88 | Timeout: time.Second, 89 | IgnoreError: false, 90 | Router: &Compose{ 91 | ValueStore: newDummyValueStore(t, []string{"a", "b", "c"}, []string{"av", "bv", "cv"}), 92 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid2"}), 93 | ContentRouting: Null{}, 94 | }, 95 | }, 96 | { 97 | Timeout: time.Minute, 98 | IgnoreError: false, 99 | ExecuteAfter: time.Second, 100 | Router: &Compose{ 101 | ValueStore: newDummyValueStore(t, []string{"a", "d"}, []string{"av2", "dv"}), 102 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid3"}), 103 | ContentRouting: Null{}, 104 | }, 105 | }, 106 | }, 107 | GetValue: []getValueFixture{ 108 | {key: "d", value: "dv", searchValCount: 1}, 109 | {key: "a", value: "av", searchValCount: 2}, 110 | }, 111 | PutValue: []putValueFixture{ 112 | {err: errors.New("a; a"), key: "/error/a", value: "a"}, 113 | {key: "a", value: "a"}, 114 | }, 115 | Provide: []provideFixture{{ 116 | err: errors.New("routing: operation or key not supported; routing: operation or key not supported"), 117 | }}, 118 | FindPeer: []findPeerFixture{{peerID: "pid1"}, {peerID: "pid3"}}, 119 | SearchValue: []searchValueFixture{{key: "a", vals: []string{"a", "a"}}}, 120 | }, 121 | { 122 | Name: "simple two routers, one with delay, plus a third nothing DoNotWaitForStreamingResponses router", 123 | routers: []*ParallelRouter{ 124 | { 125 | Timeout: time.Second, 126 | IgnoreError: false, 127 | Router: &Compose{ 128 | ValueStore: newDummyValueStore(t, []string{"a", "b", "c"}, []string{"av", "bv", "cv"}), 129 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid2"}), 130 | ContentRouting: Null{}, 131 | }, 132 | }, 133 | { 134 | Timeout: time.Minute, 135 | IgnoreError: false, 136 | ExecuteAfter: time.Second, 137 | Router: &Compose{ 138 | ValueStore: newDummyValueStore(t, []string{"a", "d"}, []string{"av2", "dv"}), 139 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid3"}), 140 | ContentRouting: Null{}, 141 | }, 142 | }, 143 | { 144 | DoNotWaitForSearchValue: true, 145 | IgnoreError: true, 146 | Router: nothing{}, 147 | }, 148 | }, 149 | GetValue: []getValueFixture{ 150 | {key: "d", value: "dv", searchValCount: 1}, 151 | {key: "a", value: "av", searchValCount: 2}, 152 | }, 153 | PutValue: []putValueFixture{ 154 | {err: errors.New("a; a"), key: "/error/a", value: "a"}, 155 | {key: "a", value: "a"}, 156 | }, 157 | Provide: []provideFixture{{ 158 | err: errors.New("routing: operation or key not supported; routing: operation or key not supported"), 159 | }}, 160 | FindPeer: []findPeerFixture{{peerID: "pid1"}, {peerID: "pid3"}}, 161 | SearchValue: []searchValueFixture{{key: "a", vals: []string{"a", "a"}}}, 162 | }, 163 | { 164 | Name: "two routers with ignore errors", 165 | routers: []*ParallelRouter{ 166 | { 167 | Timeout: time.Second, 168 | IgnoreError: true, 169 | Router: &Compose{ 170 | ValueStore: newDummyValueStore(t, []string{}, []string{}), 171 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid2"}), 172 | ContentRouting: Null{}, 173 | }, 174 | }, 175 | { 176 | Timeout: time.Minute, 177 | IgnoreError: true, 178 | ExecuteAfter: time.Second, 179 | Router: &Compose{ 180 | ValueStore: newDummyValueStore(t, []string{"d"}, []string{"dv"}), 181 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid3"}), 182 | ContentRouting: Null{}, 183 | }, 184 | }, 185 | }, 186 | GetValue: []getValueFixture{ 187 | {key: "d", value: "dv", searchValCount: 1}, 188 | {err: routing.ErrNotFound, key: "a"}, // even ignoring errors, if the value is not found we return not found 189 | }, 190 | PutValue: []putValueFixture{{key: "/error/x", value: "xv"}, {key: "/error/y", value: "yv"}}, 191 | FindPeer: []findPeerFixture{ 192 | {peerID: "pid1"}, 193 | {err: routing.ErrNotFound, peerID: "pid4"}, // even ignoring errors, if the value is not found we return not found 194 | }, 195 | SearchValue: []searchValueFixture{{key: "a", vals: nil}}, 196 | }, 197 | { 198 | Name: "two routers with ignore errors no delay", 199 | routers: []*ParallelRouter{ 200 | { 201 | Timeout: time.Second, 202 | IgnoreError: true, 203 | Router: &Compose{ 204 | ValueStore: newDummyValueStore(t, []string{"a"}, []string{"av"}), 205 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid2"}), 206 | ContentRouting: Null{}, 207 | }, 208 | }, 209 | { 210 | Timeout: time.Minute, 211 | IgnoreError: true, 212 | Router: &Compose{ 213 | ValueStore: newDummyValueStore(t, []string{"d", "e"}, []string{"dv", "ev"}), 214 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid3"}), 215 | ContentRouting: Null{}, 216 | }, 217 | }, 218 | }, 219 | GetValue: []getValueFixture{ 220 | {key: "d", value: "dv", searchValCount: 1}, 221 | {key: "a", value: "av", searchValCount: 1}, 222 | {err: routing.ErrNotFound, key: "/error/z"}, 223 | {err: routing.ErrNotFound, key: "/error/y"}, 224 | }, 225 | PutValue: []putValueFixture{ 226 | {key: "/error/x", value: "xv"}, 227 | {key: "/error/y", value: "yv"}, 228 | }, 229 | FindPeer: []findPeerFixture{ 230 | {peerID: "pid1"}, 231 | {peerID: "pid4", err: routing.ErrNotFound}, 232 | }, 233 | }, 234 | { 235 | Name: "two routers one value store failing always", 236 | routers: []*ParallelRouter{ 237 | { 238 | Timeout: time.Second, 239 | IgnoreError: false, 240 | Router: &Compose{ 241 | ValueStore: failValueStore{}, 242 | PeerRouting: Null{}, 243 | ContentRouting: Null{}, 244 | }, 245 | }, 246 | { 247 | Timeout: time.Minute, 248 | IgnoreError: false, 249 | ExecuteAfter: time.Minute, 250 | Router: &Compose{ 251 | ValueStore: newDummyValueStore(t, []string{"d", "e"}, []string{"dv", "ev"}), 252 | PeerRouting: Null{}, 253 | ContentRouting: Null{}, 254 | }, 255 | }, 256 | }, 257 | GetValue: []getValueFixture{ 258 | {err: errFailValue, key: "d", value: "dv"}, 259 | {err: errFailValue, key: "a", value: "av"}, 260 | }, 261 | }, 262 | { 263 | Name: "two routers one value store failing always but ignored", 264 | routers: []*ParallelRouter{ 265 | { 266 | Timeout: time.Second, 267 | IgnoreError: true, 268 | Router: &Compose{ 269 | ValueStore: failValueStore{}, 270 | PeerRouting: Null{}, 271 | ContentRouting: Null{}, 272 | }, 273 | }, 274 | { 275 | Timeout: time.Second, 276 | IgnoreError: false, 277 | Router: &Compose{ 278 | ValueStore: newDummyValueStore(t, []string{"d", "e"}, []string{"dv", "ev"}), 279 | PeerRouting: Null{}, 280 | ContentRouting: Null{}, 281 | }, 282 | }, 283 | }, 284 | GetValue: []getValueFixture{ 285 | {key: "d", value: "dv", searchValCount: 1}, 286 | {err: routing.ErrNotFound, key: "a", value: "av"}, 287 | }, 288 | }, 289 | { 290 | Name: "timeout=0 should disable the timeout, two routers with one disabled timeout should timeout on the other router", 291 | routers: []*ParallelRouter{ 292 | { 293 | Timeout: 0, 294 | IgnoreError: false, 295 | Router: &Compose{ 296 | ValueStore: newDummyValueStore(t, nil, nil), 297 | }, 298 | }, 299 | { 300 | Timeout: time.Second, 301 | IgnoreError: false, 302 | Router: &Compose{ 303 | ValueStore: newDummyValueStore(t, []string{"a"}, []string{"av"}), 304 | }, 305 | }, 306 | }, 307 | GetValue: []getValueFixture{ 308 | {key: "/wait/100ms/a", value: "av", searchValCount: 1}, 309 | }, 310 | }, 311 | { 312 | Name: "Return an error even if routers return data alongside the error", 313 | routers: []*ParallelRouter{ 314 | { 315 | Timeout: 0, 316 | IgnoreError: true, 317 | Router: &Compose{ 318 | PeerRouting: peerRoutingDataWithError{}, 319 | }, 320 | }, 321 | { 322 | Timeout: time.Second, 323 | IgnoreError: true, 324 | Router: &Compose{ 325 | PeerRouting: peerRoutingDataWithError{}, 326 | }, 327 | }, 328 | }, 329 | FindPeer: []findPeerFixture{ 330 | {peerID: "pid1", err: routing.ErrNotFound}, 331 | }, 332 | }, 333 | } 334 | 335 | for _, f := range fixtures { 336 | f := f 337 | t.Run(f.Name, func(t *testing.T) { 338 | t.Parallel() 339 | 340 | require := require.New(t) 341 | cpr := NewComposableParallel(f.routers) 342 | for _, gvf := range f.GetValue { 343 | val, err := cpr.GetValue(context.Background(), gvf.key) 344 | if gvf.err != nil { 345 | require.ErrorContains(err, gvf.err.Error()) 346 | continue 347 | } 348 | require.NoError(err) 349 | require.Equal(gvf.value, string(val)) 350 | 351 | vals, err := cpr.SearchValue(context.Background(), gvf.key) 352 | if gvf.err != nil { 353 | require.ErrorContains(err, gvf.err.Error()) 354 | continue 355 | } 356 | require.NoError(err) 357 | 358 | count := 0 359 | for range vals { 360 | count++ 361 | } 362 | 363 | require.Equal(gvf.searchValCount, count) 364 | } 365 | 366 | for _, pvf := range f.PutValue { 367 | err := cpr.PutValue(context.Background(), pvf.key, []byte(pvf.value)) 368 | if pvf.err != nil { 369 | require.ErrorContains(err, pvf.err.Error()) 370 | continue 371 | } 372 | require.NoError(err) 373 | } 374 | 375 | for _, pf := range f.Provide { 376 | err := cpr.Provide(context.Background(), cid.Cid{}, true) 377 | if pf.err != nil { 378 | require.ErrorContains(err, pf.err.Error()) 379 | continue 380 | } 381 | require.NoError(err) 382 | } 383 | 384 | for _, fpf := range f.FindPeer { 385 | addr, err := cpr.FindPeer(context.Background(), peer.ID(fpf.peerID)) 386 | if fpf.err != nil { 387 | require.ErrorContains(err, fpf.err.Error()) 388 | continue 389 | } 390 | require.NoError(err) 391 | require.Equal(fpf.peerID, string(addr.ID)) 392 | } 393 | 394 | for _, svf := range f.SearchValue { 395 | ctx := context.Background() 396 | if svf.ctx != nil { 397 | ctx = svf.ctx 398 | } 399 | res, err := cpr.SearchValue(ctx, svf.key) 400 | if svf.err != nil { 401 | require.ErrorContains(err, svf.err.Error()) 402 | 403 | // check that the result channel is responsive 404 | // ex if we accidentally return nil then result chan will hang when reading 405 | // it should either be closed or produce results 406 | timer := time.NewTimer(1 * time.Second) 407 | select { 408 | case <-res: 409 | case <-timer.C: 410 | t.Fatalf("result channel was unresponsive after an error occurred") 411 | } 412 | 413 | continue 414 | } 415 | require.NoError(err) 416 | 417 | var vals []string 418 | for v := range res { 419 | vals = append(vals, string(v)) 420 | } 421 | require.Equal(svf.vals, vals) 422 | } 423 | }) 424 | } 425 | } 426 | 427 | type peerRoutingDataWithError struct{} 428 | 429 | func (r peerRoutingDataWithError) FindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { 430 | return peer.AddrInfo{ID: p}, routing.ErrNotFound 431 | } 432 | 433 | func newDummyPeerRouting(t testing.TB, ids []peer.ID) routing.PeerRouting { 434 | pr := dummyPeerRouter{} 435 | for _, id := range ids { 436 | pr[id] = struct{}{} 437 | } 438 | 439 | return pr 440 | } 441 | 442 | func newDummyValueStore(t testing.TB, keys []string, values []string) routing.ValueStore { 443 | t.Helper() 444 | 445 | if len(keys) != len(values) { 446 | t.Fatal("keys and values must be the same amount") 447 | } 448 | 449 | dvs := &dummyValueStore{} 450 | for i, k := range keys { 451 | v := values[i] 452 | err := dvs.PutValue(context.TODO(), k, []byte(v)) 453 | if err != nil { 454 | t.Fatal(err) 455 | } 456 | } 457 | 458 | return dvs 459 | } 460 | 461 | type nothingContentRouter struct{} 462 | 463 | func (nothingContentRouter) FindProvidersAsync(ctx context.Context, _ cid.Cid, _ int) <-chan peer.AddrInfo { 464 | c := make(chan peer.AddrInfo) 465 | go func() { 466 | <-ctx.Done() 467 | close(c) 468 | }() 469 | return c 470 | } 471 | 472 | func (nothingContentRouter) Provide(context.Context, cid.Cid, bool) error { 473 | return nil 474 | } 475 | 476 | func TestFindProvsAboveCount(t *testing.T) { 477 | t.Parallel() 478 | 479 | prefix := cid.NewPrefixV1(cid.Raw, mh.SHA2_256) 480 | c, err := prefix.Sum([]byte("foo")) 481 | require.NoError(t, err) 482 | 483 | d := NewComposableParallel([]*ParallelRouter{ 484 | { 485 | Router: &Compose{ 486 | ValueStore: Null{}, 487 | PeerRouting: Null{}, 488 | ContentRouting: dummyProvider{c: {"1", "2", "3"}}}, 489 | }, 490 | { 491 | Router: &Compose{ 492 | ValueStore: Null{}, 493 | PeerRouting: Null{}, 494 | ContentRouting: nothingContentRouter{}}, 495 | }, 496 | }) 497 | ch := d.FindProvidersAsync(context.Background(), c, 2) 498 | var count int 499 | for ; ; count++ { 500 | v, ok := <-ch 501 | if !ok { 502 | break 503 | } 504 | t.Log(v.ID) 505 | } 506 | require.Equal(t, 2, count) 507 | } 508 | -------------------------------------------------------------------------------- /compsequential.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sync/atomic" 7 | 8 | "github.com/ipfs/go-cid" 9 | "github.com/libp2p/go-libp2p/core/peer" 10 | "github.com/libp2p/go-libp2p/core/routing" 11 | "github.com/multiformats/go-multihash" 12 | ) 13 | 14 | var _ routing.Routing = (*composableSequential)(nil) 15 | var _ ProvideManyRouter = (*composableSequential)(nil) 16 | var _ ReadyAbleRouter = (*composableSequential)(nil) 17 | var _ ComposableRouter = (*composableSequential)(nil) 18 | 19 | const sequentialName = "ComposableSequential" 20 | 21 | type composableSequential struct { 22 | routers []*SequentialRouter 23 | } 24 | 25 | func NewComposableSequential(routers []*SequentialRouter) *composableSequential { 26 | return &composableSequential{ 27 | routers: routers, 28 | } 29 | } 30 | 31 | func (r *composableSequential) Routers() []routing.Routing { 32 | var routers []routing.Routing 33 | for _, sr := range r.routers { 34 | routers = append(routers, sr.Router) 35 | } 36 | 37 | return routers 38 | } 39 | 40 | // Provide calls Provide method per each router sequentially. 41 | // If some router fails and the IgnoreError flag is true, we continue to the next router. 42 | // Context timeout error will be also ignored if the flag is set. 43 | func (r *composableSequential) Provide(ctx context.Context, cid cid.Cid, provide bool) (err error) { 44 | ctx, end := tracer.Provide(sequentialName, ctx, cid, provide) 45 | defer func() { end(err) }() 46 | 47 | return executeSequential(ctx, r.routers, 48 | func(ctx context.Context, r routing.Routing) error { 49 | return r.Provide(ctx, cid, provide) 50 | }) 51 | } 52 | 53 | // ProvideMany will call all supported Routers sequentially, falling back to iterative 54 | // single Provide call for routers which do not support [ProvideManyRouter]. 55 | func (r *composableSequential) ProvideMany(ctx context.Context, keys []multihash.Multihash) (err error) { 56 | ctx, end := tracer.ProvideMany(sequentialName, ctx, keys) 57 | defer func() { end(err) }() 58 | 59 | return executeSequential(ctx, r.routers, 60 | func(ctx context.Context, r routing.Routing) error { 61 | if pm, ok := r.(ProvideManyRouter); ok { 62 | return pm.ProvideMany(ctx, keys) 63 | } 64 | 65 | for _, k := range keys { 66 | if err := r.Provide(ctx, cid.NewCidV1(cid.Raw, k), true); err != nil { 67 | return err 68 | } 69 | } 70 | return nil 71 | }, 72 | ) 73 | } 74 | 75 | // Ready will call all supported [ReadyAbleRouter] sequentially. 76 | // If some of them are not ready, this method will return false. 77 | func (r *composableSequential) Ready() bool { 78 | for _, ro := range r.routers { 79 | pm, ok := ro.Router.(ReadyAbleRouter) 80 | if !ok { 81 | continue 82 | } 83 | 84 | if !pm.Ready() { 85 | return false 86 | } 87 | } 88 | 89 | return true 90 | } 91 | 92 | // FindProvidersAsync calls FindProvidersAsync per each router sequentially. 93 | // If some router fails and the IgnoreError flag is true, we continue to the next router. 94 | // Context timeout error will be also ignored if the flag is set. 95 | // If count is set, the channel will return up to count results, stopping routers iteration. 96 | func (r *composableSequential) FindProvidersAsync(ctx context.Context, cid cid.Cid, count int) <-chan peer.AddrInfo { 97 | ctx, wrapper := tracer.FindProvidersAsync(sequentialName, ctx, cid, count) 98 | 99 | var totalCount int64 100 | return wrapper(getChannelOrErrorSequential(ctx, r.routers, 101 | func(ctx context.Context, r routing.Routing) (<-chan peer.AddrInfo, error) { 102 | return r.FindProvidersAsync(ctx, cid, count), nil 103 | }, 104 | func() bool { 105 | return atomic.AddInt64(&totalCount, 1) > int64(count) && count != 0 106 | }, 107 | ), nil) 108 | } 109 | 110 | // FindPeer calls FindPeer per each router sequentially. 111 | // If some router fails and the IgnoreError flag is true, we continue to the next router. 112 | // Context timeout error will be also ignored if the flag is set. 113 | func (r *composableSequential) FindPeer(ctx context.Context, pid peer.ID) (p peer.AddrInfo, err error) { 114 | ctx, end := tracer.FindPeer(sequentialName, ctx, pid) 115 | defer func() { end(p, err) }() 116 | 117 | return getValueOrErrorSequential(ctx, r.routers, 118 | func(ctx context.Context, r routing.Routing) (peer.AddrInfo, bool, error) { 119 | addr, err := r.FindPeer(ctx, pid) 120 | return addr, addr.ID == "", err 121 | }, 122 | ) 123 | } 124 | 125 | // If some router fails and the IgnoreError flag is true, we continue to the next router. 126 | // Context timeout error will be also ignored if the flag is set. 127 | func (r *composableSequential) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) (err error) { 128 | ctx, end := tracer.PutValue(sequentialName, ctx, key, val, opts...) 129 | defer func() { end(err) }() 130 | 131 | return executeSequential(ctx, r.routers, 132 | func(ctx context.Context, r routing.Routing) error { 133 | return r.PutValue(ctx, key, val, opts...) 134 | }) 135 | } 136 | 137 | // If some router fails and the IgnoreError flag is true, we continue to the next router. 138 | // Context timeout error will be also ignored if the flag is set. 139 | func (r *composableSequential) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { 140 | return getValueOrErrorSequential(ctx, r.routers, 141 | func(ctx context.Context, r routing.Routing) ([]byte, bool, error) { 142 | val, err := r.GetValue(ctx, key, opts...) 143 | return val, len(val) == 0, err 144 | }, 145 | ) 146 | } 147 | 148 | // If some router fails and the IgnoreError flag is true, we continue to the next router. 149 | // Context timeout error will be also ignored if the flag is set. 150 | func (r *composableSequential) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 151 | ctx, wrapper := tracer.SearchValue(sequentialName, ctx, key, opts...) 152 | 153 | return wrapper(getChannelOrErrorSequential(ctx, r.routers, 154 | func(ctx context.Context, r routing.Routing) (<-chan []byte, error) { 155 | return r.SearchValue(ctx, key, opts...) 156 | }, 157 | func() bool { return false }, 158 | ), nil) 159 | 160 | } 161 | 162 | // If some router fails and the IgnoreError flag is true, we continue to the next router. 163 | // Context timeout error will be also ignored if the flag is set. 164 | func (r *composableSequential) Bootstrap(ctx context.Context) (err error) { 165 | ctx, end := tracer.Bootstrap(sequentialName, ctx) 166 | defer func() { end(err) }() 167 | 168 | return executeSequential(ctx, r.routers, 169 | func(ctx context.Context, r routing.Routing) error { 170 | return r.Bootstrap(ctx) 171 | }, 172 | ) 173 | } 174 | 175 | func getValueOrErrorSequential[T any]( 176 | ctx context.Context, 177 | routers []*SequentialRouter, 178 | f func(context.Context, routing.Routing) (T, bool, error), 179 | ) (value T, err error) { 180 | for _, router := range routers { 181 | if ctxErr := ctx.Err(); ctxErr != nil { 182 | return value, ctxErr 183 | } 184 | 185 | ctx, cancel := withCancelAndOptionalTimeout(ctx, router.Timeout) 186 | defer cancel() 187 | 188 | value, empty, err := f(ctx, router.Router) 189 | if err != nil && 190 | !errors.Is(err, routing.ErrNotFound) && 191 | !router.IgnoreError { 192 | return value, err 193 | } 194 | 195 | if empty { 196 | continue 197 | } 198 | 199 | return value, nil 200 | } 201 | 202 | return value, routing.ErrNotFound 203 | } 204 | 205 | func executeSequential( 206 | ctx context.Context, 207 | routers []*SequentialRouter, 208 | f func(context.Context, routing.Routing, 209 | ) error) error { 210 | for _, router := range routers { 211 | if ctxErr := ctx.Err(); ctxErr != nil { 212 | return ctxErr 213 | } 214 | 215 | ctx, cancel := withCancelAndOptionalTimeout(ctx, router.Timeout) 216 | defer cancel() 217 | 218 | if err := f(ctx, router.Router); err != nil && 219 | !errors.Is(err, routing.ErrNotFound) && 220 | !router.IgnoreError { 221 | return err 222 | } 223 | } 224 | 225 | return nil 226 | } 227 | 228 | func getChannelOrErrorSequential[T any]( 229 | ctx context.Context, 230 | routers []*SequentialRouter, 231 | f func(context.Context, routing.Routing) (<-chan T, error), 232 | shouldStop func() bool, 233 | ) chan T { 234 | chanOut := make(chan T) 235 | 236 | go func() { 237 | for _, router := range routers { 238 | if ctxErr := ctx.Err(); ctxErr != nil { 239 | close(chanOut) 240 | return 241 | } 242 | ctx, cancel := withCancelAndOptionalTimeout(ctx, router.Timeout) 243 | defer cancel() 244 | rch, err := f(ctx, router.Router) 245 | if err != nil && 246 | !errors.Is(err, routing.ErrNotFound) && 247 | !router.IgnoreError { 248 | break 249 | } 250 | 251 | f: 252 | for { 253 | select { 254 | case <-ctx.Done(): 255 | break f 256 | case v, ok := <-rch: 257 | if !ok { 258 | break f 259 | } 260 | select { 261 | case <-ctx.Done(): 262 | break f 263 | case chanOut <- v: 264 | } 265 | 266 | } 267 | } 268 | } 269 | 270 | close(chanOut) 271 | }() 272 | 273 | return chanOut 274 | } 275 | -------------------------------------------------------------------------------- /compsequential_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | "time" 8 | 9 | "github.com/ipfs/go-cid" 10 | "github.com/libp2p/go-libp2p/core/peer" 11 | "github.com/libp2p/go-libp2p/core/routing" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestNoResultsSequential(t *testing.T) { 16 | t.Parallel() 17 | 18 | require := require.New(t) 19 | rs := []*SequentialRouter{ 20 | { 21 | Timeout: time.Second, 22 | IgnoreError: true, 23 | Router: Null{}, 24 | }, 25 | { 26 | Timeout: time.Second, 27 | IgnoreError: true, 28 | Router: &Compose{ 29 | ValueStore: newDummyValueStore(t, []string{"a"}, []string{"av"}), 30 | PeerRouting: Null{}, 31 | ContentRouting: Null{}, 32 | }, 33 | }, 34 | } 35 | 36 | cs := NewComposableSequential(rs) 37 | 38 | v, err := cs.GetValue(context.Background(), "a") 39 | require.NoError(err) 40 | require.Equal("av", string(v)) 41 | 42 | require.Equal(2, len(cs.Routers())) 43 | } 44 | 45 | func TestComposableSequentialFixtures(t *testing.T) { 46 | t.Parallel() 47 | 48 | type getValueFixture struct { 49 | err error 50 | key string 51 | value string 52 | searchValCount int 53 | } 54 | type putValueFixture struct { 55 | err error 56 | key string 57 | value string 58 | } 59 | type provideFixture struct { 60 | err error 61 | } 62 | type findPeerFixture struct { 63 | peerID string 64 | err error 65 | } 66 | fixtures := []struct { 67 | Name string 68 | routers []*SequentialRouter 69 | GetValueFixtures []getValueFixture 70 | PutValueFixtures []putValueFixture 71 | ProvideFixtures []provideFixture 72 | FindPeerFixtures []findPeerFixture 73 | }{ 74 | { 75 | Name: "simple two routers", 76 | routers: []*SequentialRouter{ 77 | { 78 | Timeout: time.Second, 79 | IgnoreError: false, 80 | Router: &Compose{ 81 | ValueStore: newDummyValueStore(t, []string{"a", "b", "c"}, []string{"av", "bv", "cv"}), 82 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid2"}), 83 | ContentRouting: Null{}, 84 | }, 85 | }, 86 | { 87 | Timeout: time.Minute, 88 | IgnoreError: false, 89 | Router: &Compose{ 90 | ValueStore: newDummyValueStore(t, []string{"a", "d"}, []string{"av2", "dv"}), 91 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid3"}), 92 | ContentRouting: Null{}, 93 | }, 94 | }, 95 | }, 96 | GetValueFixtures: []getValueFixture{ 97 | { 98 | key: "d", 99 | value: "dv", 100 | searchValCount: 1, 101 | }, 102 | { 103 | key: "a", 104 | value: "av", 105 | searchValCount: 2, 106 | }, 107 | }, 108 | PutValueFixtures: []putValueFixture{ 109 | { 110 | err: errors.New("a"), 111 | key: "/error/a", 112 | value: "a", 113 | }, 114 | { 115 | key: "a", 116 | value: "a", 117 | }, 118 | }, 119 | ProvideFixtures: []provideFixture{ 120 | { 121 | err: routing.ErrNotSupported, 122 | }, 123 | }, 124 | FindPeerFixtures: []findPeerFixture{ 125 | { 126 | peerID: "pid1", 127 | }, 128 | { 129 | peerID: "pid3", 130 | }, 131 | }, 132 | }, 133 | { 134 | Name: "two routers with ignore errors", 135 | routers: []*SequentialRouter{ 136 | { 137 | Timeout: time.Second, 138 | IgnoreError: true, 139 | Router: &Compose{ 140 | ValueStore: newDummyValueStore(t, []string{}, []string{}), 141 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid2"}), 142 | ContentRouting: Null{}, 143 | }, 144 | }, 145 | { 146 | Timeout: time.Minute, 147 | IgnoreError: true, 148 | Router: &Compose{ 149 | ValueStore: newDummyValueStore(t, []string{"d"}, []string{"dv"}), 150 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid3"}), 151 | ContentRouting: Null{}, 152 | }, 153 | }, 154 | }, 155 | GetValueFixtures: []getValueFixture{ 156 | { 157 | key: "d", 158 | value: "dv", 159 | searchValCount: 1, 160 | }, 161 | { 162 | err: routing.ErrNotFound, // even ignoring errors, if the value is not found we return not found 163 | key: "a", 164 | }, 165 | }, 166 | PutValueFixtures: []putValueFixture{ 167 | { 168 | key: "/error/x", 169 | value: "xv", 170 | }, 171 | { 172 | key: "/error/y", 173 | value: "yv", 174 | }, 175 | }, 176 | FindPeerFixtures: []findPeerFixture{ 177 | { 178 | peerID: "pid1", 179 | }, 180 | { 181 | err: routing.ErrNotFound, // even ignoring errors, if the value is not found we return not found 182 | peerID: "pid4", 183 | }, 184 | }, 185 | }, 186 | { 187 | Name: "two routers with ignore errors no delay", 188 | routers: []*SequentialRouter{ 189 | { 190 | Timeout: time.Second, 191 | IgnoreError: true, 192 | Router: &Compose{ 193 | ValueStore: newDummyValueStore(t, []string{"a"}, []string{"av"}), 194 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid2"}), 195 | ContentRouting: Null{}, 196 | }, 197 | }, 198 | { 199 | Timeout: time.Minute, 200 | IgnoreError: true, 201 | Router: &Compose{ 202 | ValueStore: newDummyValueStore(t, []string{"d", "e"}, []string{"dv", "ev"}), 203 | PeerRouting: newDummyPeerRouting(t, []peer.ID{"pid1", "pid3"}), 204 | ContentRouting: Null{}, 205 | }, 206 | }, 207 | }, 208 | GetValueFixtures: []getValueFixture{ 209 | { 210 | key: "d", 211 | value: "dv", 212 | searchValCount: 1, 213 | }, 214 | { 215 | key: "a", 216 | value: "av", 217 | searchValCount: 1, 218 | }, 219 | { 220 | err: routing.ErrNotFound, 221 | key: "/error/z", 222 | }, 223 | { 224 | err: routing.ErrNotFound, 225 | key: "/error/y", 226 | }, 227 | }, 228 | PutValueFixtures: []putValueFixture{ 229 | { 230 | key: "/error/x", 231 | value: "xv", 232 | }, 233 | { 234 | key: "/error/y", 235 | value: "yv", 236 | }, 237 | }, 238 | FindPeerFixtures: []findPeerFixture{ 239 | { 240 | peerID: "pid1", 241 | }, 242 | { 243 | peerID: "pid4", 244 | err: routing.ErrNotFound, 245 | }, 246 | }, 247 | }, 248 | { 249 | Name: "two routers one value store failing always", 250 | routers: []*SequentialRouter{ 251 | { 252 | Timeout: time.Second, 253 | IgnoreError: false, 254 | Router: &Compose{ 255 | ValueStore: failValueStore{}, 256 | PeerRouting: Null{}, 257 | ContentRouting: Null{}, 258 | }, 259 | }, 260 | { 261 | Timeout: time.Minute, 262 | IgnoreError: false, 263 | Router: &Compose{ 264 | ValueStore: newDummyValueStore(t, []string{"d", "e"}, []string{"dv", "ev"}), 265 | PeerRouting: Null{}, 266 | ContentRouting: Null{}, 267 | }, 268 | }, 269 | }, 270 | GetValueFixtures: []getValueFixture{ 271 | { 272 | err: errFailValue, 273 | key: "d", 274 | value: "dv", 275 | }, 276 | { 277 | err: errFailValue, 278 | key: "a", 279 | value: "av", 280 | }, 281 | }, 282 | }, 283 | { 284 | Name: "two routers one value store failing always but ignored", 285 | routers: []*SequentialRouter{ 286 | { 287 | Timeout: time.Second, 288 | IgnoreError: true, 289 | Router: &Compose{ 290 | ValueStore: failValueStore{}, 291 | PeerRouting: Null{}, 292 | ContentRouting: Null{}, 293 | }, 294 | }, 295 | { 296 | Timeout: time.Second, 297 | IgnoreError: false, 298 | Router: &Compose{ 299 | ValueStore: newDummyValueStore(t, []string{"d", "e"}, []string{"dv", "ev"}), 300 | PeerRouting: Null{}, 301 | ContentRouting: Null{}, 302 | }, 303 | }, 304 | }, 305 | GetValueFixtures: []getValueFixture{ 306 | { 307 | key: "d", 308 | value: "dv", 309 | searchValCount: 1, 310 | }, 311 | { 312 | err: routing.ErrNotFound, 313 | key: "a", 314 | value: "av", 315 | }, 316 | }, 317 | }, 318 | { 319 | Name: "timeout=0 should disable the timeout, two routers with one disabled timeout should timeout on the other router", 320 | routers: []*SequentialRouter{ 321 | { 322 | Timeout: 0, 323 | IgnoreError: false, 324 | Router: &Compose{ 325 | ValueStore: newDummyValueStore(t, nil, nil), 326 | }, 327 | }, 328 | { 329 | Timeout: time.Minute, 330 | IgnoreError: false, 331 | Router: &Compose{ 332 | ValueStore: newDummyValueStore(t, []string{"a"}, []string{"av"}), 333 | }, 334 | }, 335 | }, 336 | GetValueFixtures: []getValueFixture{{key: "/wait/100ms/a", value: "av", searchValCount: 1}}, 337 | }, 338 | } 339 | 340 | for _, f := range fixtures { 341 | f := f 342 | t.Run(f.Name, func(t *testing.T) { 343 | t.Parallel() 344 | 345 | require := require.New(t) 346 | cpr := NewComposableSequential(f.routers) 347 | for _, gvf := range f.GetValueFixtures { 348 | val, err := cpr.GetValue(context.Background(), gvf.key) 349 | if gvf.err != nil { 350 | require.ErrorContains(err, gvf.err.Error()) 351 | continue 352 | } else { 353 | require.NoError(err) 354 | } 355 | 356 | require.Equal(gvf.value, string(val)) 357 | 358 | vals, err := cpr.SearchValue(context.Background(), gvf.key) 359 | if gvf.err != nil { 360 | require.ErrorContains(err, gvf.err.Error()) 361 | continue 362 | } else { 363 | require.NoError(err) 364 | } 365 | 366 | count := 0 367 | for range vals { 368 | count++ 369 | } 370 | 371 | require.Equal(gvf.searchValCount, count) 372 | } 373 | 374 | for _, pvf := range f.PutValueFixtures { 375 | err := cpr.PutValue(context.Background(), pvf.key, []byte(pvf.value)) 376 | if pvf.err != nil { 377 | require.ErrorContains(err, pvf.err.Error()) 378 | continue 379 | } else { 380 | require.NoError(err) 381 | } 382 | } 383 | 384 | for _, pf := range f.ProvideFixtures { 385 | err := cpr.Provide(context.Background(), cid.Cid{}, true) 386 | if pf.err != nil { 387 | require.ErrorContains(err, pf.err.Error()) 388 | continue 389 | } else { 390 | require.NoError(err) 391 | } 392 | } 393 | 394 | for _, fpf := range f.FindPeerFixtures { 395 | addr, err := cpr.FindPeer(context.Background(), peer.ID(fpf.peerID)) 396 | if fpf.err != nil { 397 | require.ErrorContains(err, fpf.err.Error()) 398 | continue 399 | } else { 400 | require.NoError(err) 401 | } 402 | 403 | require.Equal(fpf.peerID, string(addr.ID)) 404 | } 405 | }) 406 | } 407 | } 408 | -------------------------------------------------------------------------------- /dummy_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/ipfs/go-cid" 12 | "github.com/libp2p/go-libp2p/core/peer" 13 | "github.com/libp2p/go-libp2p/core/routing" 14 | ) 15 | 16 | type testCloser struct { 17 | closed int 18 | } 19 | 20 | func (closer *testCloser) Close() error { 21 | closer.closed++ 22 | return nil 23 | } 24 | 25 | type failValueStore struct{} 26 | 27 | var errFailValue = errors.New("fail value-store error") 28 | 29 | func (f failValueStore) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) error { 30 | return errFailValue 31 | } 32 | func (f failValueStore) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { 33 | return nil, errFailValue 34 | } 35 | 36 | func (f failValueStore) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 37 | return nil, errFailValue 38 | } 39 | 40 | type dummyValueStore sync.Map 41 | 42 | func (d *dummyValueStore) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) error { 43 | if strings.HasPrefix(key, "/notsupported/") { 44 | return routing.ErrNotSupported 45 | } 46 | if strings.HasPrefix(key, "/error/") { 47 | return errors.New(key[len("/error/"):]) 48 | } 49 | if strings.HasPrefix(key, "/stall/") { 50 | <-ctx.Done() 51 | return ctx.Err() 52 | } 53 | (*sync.Map)(d).Store(key, value) 54 | return nil 55 | } 56 | 57 | func (d *dummyValueStore) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { 58 | if strings.HasPrefix(key, "/error/") { 59 | return nil, errors.New(key[len("/error/"):]) 60 | } 61 | if strings.HasPrefix(key, "/stall/") { 62 | <-ctx.Done() 63 | return nil, ctx.Err() 64 | } 65 | // format: /wait/10s/key 66 | // this will wait for the given duration and then perform the lookup normally on key, 67 | // short circuiting if the context closes 68 | if strings.HasPrefix(key, "/wait/") { 69 | durationAndKey := strings.TrimPrefix(key, "/wait/") 70 | split := strings.Split(durationAndKey, "/") 71 | durationStr, key := split[0], split[1] 72 | duration, err := time.ParseDuration(durationStr) 73 | if err != nil { 74 | return nil, fmt.Errorf("parsing wait duration: %w", err) 75 | } 76 | timer := time.NewTimer(duration) 77 | defer timer.Stop() 78 | select { 79 | case <-timer.C: 80 | return d.GetValue(ctx, key, opts...) 81 | case <-ctx.Done(): 82 | return nil, ctx.Err() 83 | } 84 | } 85 | if v, ok := (*sync.Map)(d).Load(key); ok { 86 | return v.([]byte), nil 87 | } 88 | return nil, routing.ErrNotFound 89 | } 90 | 91 | func (d *dummyValueStore) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 92 | out := make(chan []byte) 93 | if strings.HasPrefix(key, "/error/") { 94 | return nil, errors.New(key[len("/error/"):]) 95 | } 96 | 97 | go func() { 98 | defer close(out) 99 | v, err := d.GetValue(ctx, key, opts...) 100 | if err == nil { 101 | select { 102 | case out <- v: 103 | case <-ctx.Done(): 104 | } 105 | } 106 | }() 107 | return out, nil 108 | } 109 | 110 | type dummyProvider map[cid.Cid][]peer.ID 111 | 112 | func (d dummyProvider) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { 113 | peers := d[c] 114 | if count > 0 && len(peers) > count { 115 | peers = peers[:count] 116 | } 117 | out := make(chan peer.AddrInfo) 118 | go func() { 119 | defer close(out) 120 | for _, p := range peers { 121 | if p == "stall" { 122 | <-ctx.Done() 123 | return 124 | } 125 | select { 126 | case out <- peer.AddrInfo{ID: p}: 127 | case <-ctx.Done(): 128 | return 129 | } 130 | } 131 | }() 132 | return out 133 | } 134 | 135 | func (d dummyProvider) Provide(ctx context.Context, c cid.Cid, local bool) error { 136 | return routing.ErrNotSupported 137 | } 138 | 139 | type cbProvider func(c cid.Cid, local bool) error 140 | 141 | func (d cbProvider) Provide(ctx context.Context, c cid.Cid, local bool) error { 142 | return d(c, local) 143 | } 144 | 145 | func (d cbProvider) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { 146 | ch := make(chan peer.AddrInfo) 147 | close(ch) 148 | return ch 149 | } 150 | 151 | type dummyPeerRouter map[peer.ID]struct{} 152 | 153 | func (d dummyPeerRouter) FindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { 154 | if _, ok := d[p]; ok { 155 | return peer.AddrInfo{ID: p}, nil 156 | } 157 | return peer.AddrInfo{}, routing.ErrNotFound 158 | } 159 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/libp2p/go-libp2p-routing-helpers 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/Jorropo/jsync v1.0.1 7 | github.com/ipfs/go-cid v0.5.0 8 | github.com/ipfs/go-log/v2 v2.5.1 9 | github.com/libp2p/go-libp2p v0.41.0 10 | github.com/libp2p/go-libp2p-record v0.3.1 11 | github.com/multiformats/go-multibase v0.2.0 12 | github.com/multiformats/go-multihash v0.2.3 13 | github.com/stretchr/testify v1.10.0 14 | go.opentelemetry.io/otel v1.16.0 15 | go.opentelemetry.io/otel/trace v1.16.0 16 | go.uber.org/multierr v1.11.0 17 | ) 18 | 19 | require ( 20 | github.com/davecgh/go-spew v1.1.1 // indirect 21 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 22 | github.com/go-logr/logr v1.4.2 // indirect 23 | github.com/go-logr/stdr v1.2.2 // indirect 24 | github.com/klauspost/cpuid/v2 v2.2.10 // indirect 25 | github.com/kr/pretty v0.3.1 // indirect 26 | github.com/libp2p/go-buffer-pool v0.1.0 // indirect 27 | github.com/mattn/go-isatty v0.0.20 // indirect 28 | github.com/minio/sha256-simd v1.0.1 // indirect 29 | github.com/mr-tron/base58 v1.2.0 // indirect 30 | github.com/multiformats/go-base32 v0.1.0 // indirect 31 | github.com/multiformats/go-base36 v0.2.0 // indirect 32 | github.com/multiformats/go-multiaddr v0.15.0 // indirect 33 | github.com/multiformats/go-multicodec v0.9.0 // indirect 34 | github.com/multiformats/go-varint v0.0.7 // indirect 35 | github.com/pmezard/go-difflib v1.0.0 // indirect 36 | github.com/rogpeppe/go-internal v1.10.0 // indirect 37 | github.com/spaolacci/murmur3 v1.1.0 // indirect 38 | go.opentelemetry.io/otel/metric v1.16.0 // indirect 39 | go.uber.org/zap v1.27.0 // indirect 40 | golang.org/x/crypto v0.35.0 // indirect 41 | golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa // indirect 42 | golang.org/x/sys v0.30.0 // indirect 43 | google.golang.org/protobuf v1.36.5 // indirect 44 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 45 | gopkg.in/yaml.v3 v3.0.1 // indirect 46 | lukechampine.com/blake3 v1.4.0 // indirect 47 | ) 48 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= 2 | github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= 3 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 4 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= 9 | github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= 10 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= 11 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= 12 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 13 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 14 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 15 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 16 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 17 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 18 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 19 | github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= 20 | github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= 21 | github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= 22 | github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= 23 | github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= 24 | github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= 25 | github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= 26 | github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= 27 | github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= 28 | github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= 29 | github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= 30 | github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= 31 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 32 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 33 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 34 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 35 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 36 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 37 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 38 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 39 | github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= 40 | github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= 41 | github.com/libp2p/go-libp2p v0.41.0 h1:JRaD39dqf/tBBGapJ0T38N73vOaDCsWgcx3mE6HgXWk= 42 | github.com/libp2p/go-libp2p v0.41.0/go.mod h1:Be8QYqC4JW6Xq8buukNeoZJjyT1XUDcGoIooCHm1ye4= 43 | github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= 44 | github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= 45 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 46 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 47 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 48 | github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= 49 | github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= 50 | github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= 51 | github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= 52 | github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= 53 | github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= 54 | github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= 55 | github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= 56 | github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= 57 | github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= 58 | github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= 59 | github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= 60 | github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= 61 | github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= 62 | github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= 63 | github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= 64 | github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= 65 | github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= 66 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 67 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 68 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 69 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 70 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 71 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 72 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 73 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 74 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 75 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 76 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 77 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 78 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 79 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 80 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 81 | go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= 82 | go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= 83 | go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= 84 | go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= 85 | go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= 86 | go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= 87 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 88 | go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= 89 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 90 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 91 | go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= 92 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 93 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 94 | go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= 95 | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 96 | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 97 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 98 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 99 | golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= 100 | golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= 101 | golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4= 102 | golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk= 103 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 104 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 105 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 106 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 107 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 108 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 109 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 110 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 111 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 112 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 113 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 114 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 115 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 116 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 117 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 118 | golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= 119 | golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 120 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 121 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 122 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 123 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 124 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 125 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 126 | golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 127 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 128 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 129 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 130 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 131 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 132 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 133 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 134 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 135 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 136 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 137 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 138 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 139 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 140 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 141 | lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= 142 | lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= 143 | -------------------------------------------------------------------------------- /limited.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "strings" 7 | 8 | ci "github.com/libp2p/go-libp2p/core/crypto" 9 | "github.com/libp2p/go-libp2p/core/peer" 10 | "github.com/libp2p/go-libp2p/core/routing" 11 | ) 12 | 13 | // LimitedValueStore limits the internal value store to the given namespaces. 14 | type LimitedValueStore struct { 15 | routing.ValueStore 16 | Namespaces []string 17 | } 18 | 19 | // GetPublicKey returns the public key for the given peer, if and only if this 20 | // router supports the /pk namespace. Otherwise, it returns routing.ErrNotFound. 21 | func (lvs *LimitedValueStore) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) { 22 | for _, ns := range lvs.Namespaces { 23 | if ns == "pk" { 24 | return routing.GetPublicKey(lvs.ValueStore, ctx, p) 25 | } 26 | } 27 | return nil, routing.ErrNotFound 28 | } 29 | 30 | // PutValue puts the given key in the underlying value store if the namespace 31 | // is supported. Otherwise, it returns routing.ErrNotSupported. 32 | func (lvs *LimitedValueStore) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) error { 33 | if !lvs.KeySupported(key) { 34 | return routing.ErrNotSupported 35 | } 36 | return lvs.ValueStore.PutValue(ctx, key, value, opts...) 37 | } 38 | 39 | // KeySupported returns true if the passed key is supported by this value store. 40 | func (lvs *LimitedValueStore) KeySupported(key string) bool { 41 | if len(key) < 3 { 42 | return false 43 | } 44 | if key[0] != '/' { 45 | return false 46 | } 47 | key = key[1:] 48 | for _, ns := range lvs.Namespaces { 49 | if len(ns) < len(key) && strings.HasPrefix(key, ns) && key[len(ns)] == '/' { 50 | return true 51 | } 52 | } 53 | return false 54 | } 55 | 56 | // GetValue retrieves the given key from the underlying value store if the namespace 57 | // is supported. Otherwise, it returns routing.ErrNotFound. 58 | func (lvs *LimitedValueStore) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { 59 | if !lvs.KeySupported(key) { 60 | return nil, routing.ErrNotFound 61 | } 62 | return lvs.ValueStore.GetValue(ctx, key, opts...) 63 | } 64 | 65 | // SearchValue searches the underlying value store for the given key if the 66 | // namespace is supported, returning results in monotonically increasing 67 | // "freshness". Otherwise, it returns an empty, closed channel to indicate that 68 | // the value wasn't found. 69 | func (lvs *LimitedValueStore) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 70 | if !lvs.KeySupported(key) { 71 | out := make(chan []byte) 72 | close(out) 73 | return out, nil 74 | } 75 | return lvs.ValueStore.SearchValue(ctx, key, opts...) 76 | } 77 | 78 | // Bootstrap signals the underlying value store to get into the "bootstrapped" 79 | // state, if it implements the Bootstrap interface. 80 | func (lvs *LimitedValueStore) Bootstrap(ctx context.Context) error { 81 | if bs, ok := lvs.ValueStore.(Bootstrap); ok { 82 | return bs.Bootstrap(ctx) 83 | } 84 | return nil 85 | } 86 | 87 | // Close closest the underlying value store if it implements the io.Closer 88 | // interface. 89 | func (lvs *LimitedValueStore) Close() error { 90 | if closer, ok := lvs.ValueStore.(io.Closer); ok { 91 | return closer.Close() 92 | } 93 | return nil 94 | } 95 | 96 | var _ routing.PubKeyFetcher = (*LimitedValueStore)(nil) 97 | var _ routing.ValueStore = (*LimitedValueStore)(nil) 98 | var _ Bootstrap = (*LimitedValueStore)(nil) 99 | -------------------------------------------------------------------------------- /limited_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/libp2p/go-libp2p/core/routing" 8 | ) 9 | 10 | func TestLimitedValueStore(t *testing.T) { 11 | t.Parallel() 12 | 13 | d := LimitedValueStore{ 14 | ValueStore: new(dummyValueStore), 15 | Namespaces: []string{"allow"}, 16 | } 17 | 18 | ctx := context.Background() 19 | 20 | for i, k := range []string{ 21 | "/allow/hello", 22 | "/allow/foo", 23 | "/allow/foo/bar", 24 | } { 25 | if err := d.PutValue(ctx, k, []byte{byte(i)}); err != nil { 26 | t.Fatal(err) 27 | } 28 | v, err := d.GetValue(ctx, k) 29 | if err != nil { 30 | t.Fatal(err) 31 | } 32 | if len(v) != 1 || v[0] != byte(i) { 33 | t.Fatalf("expected value [%d], got %v", i, v) 34 | } 35 | } 36 | for i, k := range []string{ 37 | "/deny/hello", 38 | "/allow", 39 | "allow", 40 | "deny", 41 | "", 42 | "/", 43 | "//", 44 | "///", 45 | "//allow", 46 | } { 47 | if err := d.PutValue(ctx, k, []byte{byte(i)}); err != routing.ErrNotSupported { 48 | t.Fatalf("expected put with key %s to fail", k) 49 | } 50 | _, err := d.GetValue(ctx, k) 51 | if err != routing.ErrNotFound { 52 | t.Fatalf("expected get with key %s to fail", k) 53 | } 54 | _, err = d.ValueStore.GetValue(ctx, k) 55 | if err != routing.ErrNotFound { 56 | t.Fatalf("expected get with key %s to fail", k) 57 | } 58 | err = d.ValueStore.PutValue(ctx, k, []byte{byte(i)}) 59 | if err != nil { 60 | t.Fatal(err) 61 | } 62 | _, err = d.GetValue(ctx, k) 63 | if err == nil { 64 | t.Fatalf("expected get with key %s to fail", k) 65 | } 66 | } 67 | } 68 | 69 | func TestLimitedClose(t *testing.T) { 70 | t.Parallel() 71 | 72 | closer := new(testCloser) 73 | d := LimitedValueStore{ 74 | ValueStore: struct { 75 | *testCloser 76 | routing.Routing 77 | }{closer, Null{}}, 78 | } 79 | d.Close() 80 | if closer.closed != 1 { 81 | t.Fatal("expected one close") 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /nothing_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/libp2p/go-libp2p/core/routing" 7 | ) 8 | 9 | // nothing is like [Null] but it never reach quorum for SearchValue. 10 | type nothing struct { 11 | Null 12 | } 13 | 14 | // SearchValue always returns ErrNotFound 15 | func (nr nothing) SearchValue(ctx context.Context, _ string, _ ...routing.Option) (<-chan []byte, error) { 16 | ch := make(chan []byte) 17 | go func() { 18 | <-ctx.Done() 19 | close(ch) 20 | }() 21 | return ch, nil 22 | } 23 | -------------------------------------------------------------------------------- /null.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ipfs/go-cid" 7 | "github.com/libp2p/go-libp2p/core/peer" 8 | "github.com/libp2p/go-libp2p/core/routing" 9 | ) 10 | 11 | // Null is a router that doesn't do anything. 12 | type Null struct{} 13 | 14 | // PutValue always returns ErrNotSupported 15 | func (nr Null) PutValue(context.Context, string, []byte, ...routing.Option) error { 16 | return routing.ErrNotSupported 17 | } 18 | 19 | // GetValue always returns ErrNotFound 20 | func (nr Null) GetValue(context.Context, string, ...routing.Option) ([]byte, error) { 21 | return nil, routing.ErrNotFound 22 | } 23 | 24 | // SearchValue always returns ErrNotFound 25 | func (nr Null) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 26 | return nil, routing.ErrNotFound 27 | } 28 | 29 | // Provide always returns ErrNotSupported 30 | func (nr Null) Provide(context.Context, cid.Cid, bool) error { 31 | return routing.ErrNotSupported 32 | } 33 | 34 | // FindProvidersAsync always returns a closed channel 35 | func (nr Null) FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.AddrInfo { 36 | ch := make(chan peer.AddrInfo) 37 | close(ch) 38 | return ch 39 | } 40 | 41 | // FindPeer always returns ErrNotFound 42 | func (nr Null) FindPeer(context.Context, peer.ID) (peer.AddrInfo, error) { 43 | return peer.AddrInfo{}, routing.ErrNotFound 44 | } 45 | 46 | // Bootstrap always succeeds instantly 47 | func (nr Null) Bootstrap(context.Context) error { 48 | return nil 49 | } 50 | 51 | // Close always succeeds instantly 52 | func (nr Null) Close() error { 53 | return nil 54 | } 55 | 56 | var _ routing.Routing = Null{} 57 | -------------------------------------------------------------------------------- /null_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/ipfs/go-cid" 8 | "github.com/libp2p/go-libp2p/core/peer" 9 | "github.com/libp2p/go-libp2p/core/routing" 10 | ) 11 | 12 | func TestNull(t *testing.T) { 13 | t.Parallel() 14 | 15 | var n Null 16 | ctx := context.Background() 17 | if err := n.PutValue(ctx, "anything", nil); err != routing.ErrNotSupported { 18 | t.Fatal(err) 19 | } 20 | if _, err := n.GetValue(ctx, "anything", nil); err != routing.ErrNotFound { 21 | t.Fatal(err) 22 | } 23 | if err := n.Provide(ctx, cid.Cid{}, false); err != routing.ErrNotSupported { 24 | t.Fatal(err) 25 | } 26 | if _, ok := <-n.FindProvidersAsync(ctx, cid.Cid{}, 10); ok { 27 | t.Fatal("expected no values") 28 | } 29 | if _, err := n.FindPeer(ctx, peer.ID("thing")); err != routing.ErrNotFound { 30 | t.Fatal(err) 31 | } 32 | if err := n.Close(); err != nil { 33 | t.Fatal(err) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /parallel.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | "reflect" 8 | "sync" 9 | 10 | "github.com/Jorropo/jsync" 11 | "github.com/ipfs/go-cid" 12 | record "github.com/libp2p/go-libp2p-record" 13 | ci "github.com/libp2p/go-libp2p/core/crypto" 14 | "github.com/libp2p/go-libp2p/core/peer" 15 | "github.com/libp2p/go-libp2p/core/routing" 16 | "go.uber.org/multierr" 17 | ) 18 | 19 | // Parallel operates on the slice of routers in parallel. 20 | type Parallel struct { 21 | Routers []routing.Routing 22 | Validator record.Validator 23 | } 24 | 25 | // Helper function that sees through router composition to avoid unnecessary 26 | // go routines. 27 | func supportsKey(vs routing.ValueStore, key string) bool { 28 | switch vs := vs.(type) { 29 | case Null: 30 | return false 31 | case *Compose: 32 | return vs.ValueStore != nil && supportsKey(vs.ValueStore, key) 33 | case Parallel: 34 | for _, ri := range vs.Routers { 35 | if supportsKey(ri, key) { 36 | return true 37 | } 38 | } 39 | return false 40 | case Tiered: 41 | for _, ri := range vs.Routers { 42 | if supportsKey(ri, key) { 43 | return true 44 | } 45 | } 46 | return false 47 | case *LimitedValueStore: 48 | return vs.KeySupported(key) && supportsKey(vs.ValueStore, key) 49 | default: 50 | return true 51 | } 52 | } 53 | 54 | func supportsPeer(vs routing.PeerRouting) bool { 55 | switch vs := vs.(type) { 56 | case Null: 57 | return false 58 | case *Compose: 59 | return vs.PeerRouting != nil && supportsPeer(vs.PeerRouting) 60 | case Parallel: 61 | for _, ri := range vs.Routers { 62 | if supportsPeer(ri) { 63 | return true 64 | } 65 | } 66 | return false 67 | case Tiered: 68 | for _, ri := range vs.Routers { 69 | if supportsPeer(ri) { 70 | return true 71 | } 72 | } 73 | return false 74 | default: 75 | return true 76 | } 77 | } 78 | 79 | func supportsContent(vs routing.ContentRouting) bool { 80 | switch vs := vs.(type) { 81 | case Null: 82 | return false 83 | case *Compose: 84 | return vs.ContentRouting != nil && supportsContent(vs.ContentRouting) 85 | case Parallel: 86 | for _, ri := range vs.Routers { 87 | if supportsContent(ri) { 88 | return true 89 | } 90 | } 91 | return false 92 | case Tiered: 93 | for _, ri := range vs.Routers { 94 | if supportsContent(ri) { 95 | return true 96 | } 97 | } 98 | return false 99 | default: 100 | return true 101 | } 102 | } 103 | 104 | func (r Parallel) filter(filter func(routing.Routing) bool) Parallel { 105 | cpy := make([]routing.Routing, 0, len(r.Routers)) 106 | for _, ri := range r.Routers { 107 | if filter(ri) { 108 | cpy = append(cpy, ri) 109 | } 110 | } 111 | return Parallel{Routers: cpy, Validator: r.Validator} 112 | } 113 | 114 | func (r Parallel) put(do func(routing.Routing) error) error { 115 | switch len(r.Routers) { 116 | case 0: 117 | return routing.ErrNotSupported 118 | case 1: 119 | return do(r.Routers[0]) 120 | } 121 | 122 | var wg sync.WaitGroup 123 | results := make([]error, len(r.Routers)) 124 | wg.Add(len(r.Routers)) 125 | for i, ri := range r.Routers { 126 | go func(ri routing.Routing, i int) { 127 | results[i] = do(ri) 128 | wg.Done() 129 | }(ri, i) 130 | } 131 | wg.Wait() 132 | 133 | var ( 134 | errs []error 135 | success bool 136 | ) 137 | for _, err := range results { 138 | switch err { 139 | case nil: 140 | // at least one router supports this. 141 | success = true 142 | case routing.ErrNotSupported: 143 | default: 144 | errs = append(errs, err) 145 | } 146 | } 147 | 148 | switch len(errs) { 149 | case 0: 150 | if success { 151 | // No errors and at least one router succeeded. 152 | return nil 153 | } 154 | // No routers supported this operation. 155 | return routing.ErrNotSupported 156 | case 1: 157 | return errs[0] 158 | default: 159 | return multierr.Combine(errs...) 160 | } 161 | } 162 | 163 | func (r Parallel) search(ctx context.Context, do func(routing.Routing) (<-chan []byte, error)) (<-chan []byte, error) { 164 | switch len(r.Routers) { 165 | case 0: 166 | return nil, routing.ErrNotFound 167 | case 1: 168 | return do(r.Routers[0]) 169 | } 170 | 171 | ctx, cancel := context.WithCancel(ctx) 172 | 173 | out := make(chan []byte) 174 | 175 | fwg := jsync.NewFWaitGroup(func() { 176 | close(out) 177 | cancel() 178 | }, 1) 179 | for _, ri := range r.Routers { 180 | vchan, err := do(ri) 181 | if err != nil { 182 | continue 183 | } 184 | 185 | fwg.Add() 186 | go func() { 187 | var sent int 188 | defer fwg.Done() 189 | 190 | for { 191 | select { 192 | case v, ok := <-vchan: 193 | if !ok { 194 | if sent > 0 { 195 | cancel() 196 | } 197 | return 198 | } 199 | 200 | select { 201 | case out <- v: 202 | sent++ 203 | case <-ctx.Done(): 204 | return 205 | } 206 | case <-ctx.Done(): 207 | return 208 | } 209 | } 210 | }() 211 | } 212 | 213 | fwg.Done() 214 | 215 | return out, nil 216 | } 217 | 218 | func (r Parallel) get(ctx context.Context, do func(routing.Routing) (interface{}, error)) (interface{}, error) { 219 | switch len(r.Routers) { 220 | case 0: 221 | return nil, routing.ErrNotFound 222 | case 1: 223 | return do(r.Routers[0]) 224 | } 225 | 226 | ctx, cancel := context.WithCancel(ctx) 227 | defer cancel() 228 | 229 | results := make(chan struct { 230 | val interface{} 231 | err error 232 | }) 233 | for _, ri := range r.Routers { 234 | go func(ri routing.Routing) { 235 | value, err := do(ri) 236 | select { 237 | case results <- struct { 238 | val interface{} 239 | err error 240 | }{ 241 | val: value, 242 | err: err, 243 | }: 244 | case <-ctx.Done(): 245 | } 246 | }(ri) 247 | } 248 | 249 | var errs []error 250 | for range r.Routers { 251 | select { 252 | case res := <-results: 253 | switch res.err { 254 | case nil: 255 | return res.val, nil 256 | case routing.ErrNotFound, routing.ErrNotSupported: 257 | continue 258 | } 259 | // If the context has expired, just return that error 260 | // and ignore the other errors. 261 | if ctx.Err() != nil { 262 | return nil, ctx.Err() 263 | } 264 | errs = append(errs, res.err) 265 | case <-ctx.Done(): 266 | return nil, ctx.Err() 267 | } 268 | } 269 | 270 | switch len(errs) { 271 | case 0: 272 | return nil, routing.ErrNotFound 273 | case 1: 274 | return nil, errs[0] 275 | default: 276 | return nil, multierr.Combine(errs...) 277 | } 278 | } 279 | 280 | func (r Parallel) forKey(key string) Parallel { 281 | return r.filter(func(ri routing.Routing) bool { 282 | return supportsKey(ri, key) 283 | }) 284 | } 285 | 286 | // mergeQueryEvents limits `routing.QueryError` events to only be sent on the context in case all parallel 287 | // routers fail. 288 | func (r Parallel) mergeQueryEvents(ctx context.Context) (context.Context, context.CancelFunc) { 289 | subCtx, cancel := context.WithCancel(ctx) 290 | if !routing.SubscribesToQueryEvents(ctx) { 291 | return subCtx, cancel 292 | } 293 | 294 | subCtx, evCh := routing.RegisterForQueryEvents(subCtx) 295 | go func() { 296 | var errEvt *routing.QueryEvent 297 | successfulEvent := false 298 | for { 299 | select { 300 | // Note: this is the outer context 301 | // An error event may be dropped in this case, but closing due to 302 | // timeout is inherently racy in that regard. 303 | case <-ctx.Done(): 304 | return 305 | // evCh will be closed when subCtx is canceled. 306 | case ev, ok := <-evCh: 307 | if !ok { 308 | if errEvt != nil && !successfulEvent { 309 | routing.PublishQueryEvent(ctx, errEvt) 310 | } 311 | return 312 | } 313 | if ev == nil { 314 | continue 315 | } 316 | if ev.Type == routing.QueryError { 317 | errEvt = ev 318 | continue 319 | } 320 | successfulEvent = true 321 | routing.PublishQueryEvent(ctx, ev) 322 | } 323 | } 324 | }() 325 | return subCtx, cancel 326 | } 327 | 328 | // PutValue puts the given key to all sub-routers in parallel. It succeeds as 329 | // long as putting to at least one sub-router succeeds, but it waits for all 330 | // puts to terminate. 331 | func (r Parallel) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) error { 332 | reqCtx, cancel := r.mergeQueryEvents(ctx) 333 | defer cancel() 334 | err := r.forKey(key).put(func(ri routing.Routing) error { 335 | return ri.PutValue(reqCtx, key, value, opts...) 336 | }) 337 | return err 338 | } 339 | 340 | // GetValue searches all sub-routers for the given key, returning the result 341 | // from the first sub-router to complete the query. 342 | func (r Parallel) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { 343 | reqCtx, cancel := r.mergeQueryEvents(ctx) 344 | defer cancel() 345 | vInt, err := r.forKey(key).get(reqCtx, func(ri routing.Routing) (interface{}, error) { 346 | return ri.GetValue(reqCtx, key, opts...) 347 | }) 348 | val, _ := vInt.([]byte) 349 | return val, err 350 | } 351 | 352 | // SearchValue searches all sub-routers for the given key in parallel, 353 | // returning results in monotonically increasing "freshness" from all 354 | // sub-routers. 355 | func (r Parallel) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 356 | reqCtx, cancel := r.mergeQueryEvents(ctx) 357 | resCh, err := r.forKey(key).search(reqCtx, func(ri routing.Routing) (<-chan []byte, error) { 358 | return ri.SearchValue(reqCtx, key, opts...) 359 | }) 360 | if err != nil { 361 | cancel() 362 | return nil, err 363 | } 364 | 365 | valid := make(chan []byte) 366 | var best []byte 367 | go func() { 368 | defer close(valid) 369 | defer cancel() 370 | 371 | for v := range resCh { 372 | if best != nil { 373 | n, err := r.Validator.Select(key, [][]byte{best, v}) 374 | if err != nil { 375 | continue 376 | } 377 | if n != 1 { 378 | continue 379 | } 380 | } 381 | if bytes.Equal(best, v) && len(v) != 0 { 382 | continue 383 | } 384 | 385 | best = v 386 | select { 387 | case valid <- v: 388 | case <-ctx.Done(): 389 | return 390 | } 391 | } 392 | }() 393 | 394 | return valid, err 395 | } 396 | 397 | // GetPublicKey retrieves the public key from all sub-routers in parallel, 398 | // returning the first result. 399 | func (r Parallel) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) { 400 | vInt, err := r. 401 | forKey(routing.KeyForPublicKey(p)). 402 | get(ctx, func(ri routing.Routing) (interface{}, error) { 403 | return routing.GetPublicKey(ri, ctx, p) 404 | }) 405 | val, _ := vInt.(ci.PubKey) 406 | return val, err 407 | } 408 | 409 | // FindPeer finds the given peer in all sub-routers in parallel, returning the 410 | // first result. 411 | func (r Parallel) FindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { 412 | reqCtx, cancel := r.mergeQueryEvents(ctx) 413 | defer cancel() 414 | vInt, err := r.filter(func(ri routing.Routing) bool { 415 | return supportsPeer(ri) 416 | }).get(ctx, func(ri routing.Routing) (interface{}, error) { 417 | return ri.FindPeer(reqCtx, p) 418 | }) 419 | pi, _ := vInt.(peer.AddrInfo) 420 | return pi, err 421 | } 422 | 423 | // Provide announces that this peer provides the content in question to all 424 | // sub-routers in parallel. Provide returns success as long as a single 425 | // sub-router succeeds, but still waits for all sub-routers to finish before 426 | // returning. 427 | // 428 | // If count > 0, it returns at most count providers. If count == 0, it returns 429 | // an unbounded number of providers. 430 | func (r Parallel) Provide(ctx context.Context, c cid.Cid, local bool) error { 431 | return r.filter(func(ri routing.Routing) bool { 432 | return supportsContent(ri) 433 | }).put(func(ri routing.Routing) error { 434 | return ri.Provide(ctx, c, local) 435 | }) 436 | } 437 | 438 | // FindProvidersAsync searches all sub-routers in parallel for peers who are 439 | // able to provide a given key. 440 | // 441 | // If count > 0, it returns at most count providers. If count == 0, it returns 442 | // an unbounded number of providers. 443 | func (r Parallel) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { 444 | routers := r.filter(func(ri routing.Routing) bool { 445 | return supportsContent(ri) 446 | }) 447 | 448 | switch len(routers.Routers) { 449 | case 0: 450 | ch := make(chan peer.AddrInfo) 451 | close(ch) 452 | return ch 453 | case 1: 454 | return routers.Routers[0].FindProvidersAsync(ctx, c, count) 455 | } 456 | 457 | out := make(chan peer.AddrInfo) 458 | 459 | reqCtx, cancel := r.mergeQueryEvents(ctx) 460 | 461 | providers := make([]<-chan peer.AddrInfo, len(routers.Routers)) 462 | for i, ri := range routers.Routers { 463 | providers[i] = ri.FindProvidersAsync(reqCtx, c, count) 464 | } 465 | 466 | go func() { 467 | defer cancel() 468 | defer close(out) 469 | if len(providers) > 8 { 470 | manyProviders(reqCtx, out, providers, count) 471 | } else { 472 | fewProviders(reqCtx, out, providers, count) 473 | } 474 | }() 475 | return out 476 | } 477 | 478 | // Unoptimized many provider case. Doing this with reflection is a bit slow but 479 | // definitely simpler. If we start having more than 8 peer routers running in 480 | // parallel, we can revisit this. 481 | func manyProviders(ctx context.Context, out chan<- peer.AddrInfo, in []<-chan peer.AddrInfo, count int) { 482 | found := make(map[peer.ID]struct{}, count) 483 | 484 | selectCases := make([]reflect.SelectCase, len(in)) 485 | for i, ch := range in { 486 | selectCases[i] = reflect.SelectCase{ 487 | Dir: reflect.SelectRecv, 488 | Chan: reflect.ValueOf(ch), 489 | } 490 | } 491 | 492 | // If we ask for 0 providers, that means fetch _all_ providers. 493 | if count == 0 { 494 | count = -1 495 | } 496 | 497 | for count != 0 && len(selectCases) > 0 { 498 | chosen, val, ok := reflect.Select(selectCases) 499 | if !ok { 500 | // Remove the channel 501 | selectCases[chosen] = selectCases[len(selectCases)-1] 502 | selectCases = selectCases[:len(selectCases)-1] 503 | continue 504 | } 505 | 506 | pi := val.Interface().(peer.AddrInfo) 507 | if _, ok := found[pi.ID]; ok { 508 | continue 509 | } 510 | 511 | select { 512 | case out <- pi: 513 | found[pi.ID] = struct{}{} 514 | count-- 515 | case <-ctx.Done(): 516 | return 517 | } 518 | } 519 | } 520 | 521 | // Optimization for few providers (<=8). 522 | func fewProviders(ctx context.Context, out chan<- peer.AddrInfo, in []<-chan peer.AddrInfo, count int) { 523 | if len(in) > 8 { 524 | panic("case only valid for combining fewer than 8 channels") 525 | } 526 | 527 | found := make(map[peer.ID]struct{}, count) 528 | 529 | cases := make([]<-chan peer.AddrInfo, 8) 530 | copy(cases, in) 531 | 532 | // If we ask for 0 providers, that means fetch _all_ providers. 533 | if count == 0 { 534 | count = -1 535 | } 536 | 537 | // Oh go, what would we do without you! 538 | nch := len(in) 539 | var pi peer.AddrInfo 540 | for nch > 0 && count != 0 { 541 | var ok bool 542 | var selected int 543 | select { 544 | case pi, ok = <-cases[0]: 545 | selected = 0 546 | case pi, ok = <-cases[1]: 547 | selected = 1 548 | case pi, ok = <-cases[2]: 549 | selected = 2 550 | case pi, ok = <-cases[3]: 551 | selected = 3 552 | case pi, ok = <-cases[4]: 553 | selected = 4 554 | case pi, ok = <-cases[5]: 555 | selected = 5 556 | case pi, ok = <-cases[6]: 557 | selected = 6 558 | case pi, ok = <-cases[7]: 559 | selected = 7 560 | } 561 | if !ok { 562 | cases[selected] = nil 563 | nch-- 564 | continue 565 | } 566 | if _, ok = found[pi.ID]; ok { 567 | continue 568 | } 569 | 570 | select { 571 | case out <- pi: 572 | found[pi.ID] = struct{}{} 573 | count-- 574 | case <-ctx.Done(): 575 | return 576 | } 577 | } 578 | } 579 | 580 | // Bootstrap signals all the sub-routers to bootstrap. 581 | func (r Parallel) Bootstrap(ctx context.Context) error { 582 | var errs error 583 | for _, b := range r.Routers { 584 | if err := b.Bootstrap(ctx); err != nil { 585 | errs = multierr.Append(errs, err) 586 | } 587 | } 588 | return errs 589 | } 590 | 591 | // Close closes all sub-routers that implement the io.Closer interface. 592 | func (r Parallel) Close() error { 593 | var errs error 594 | for _, router := range r.Routers { 595 | if closer, ok := router.(io.Closer); ok { 596 | if err := closer.Close(); err != nil { 597 | errs = multierr.Append(errs, err) 598 | } 599 | } 600 | } 601 | return errs 602 | } 603 | 604 | var _ routing.Routing = Parallel{} 605 | -------------------------------------------------------------------------------- /parallel_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "github.com/ipfs/go-cid" 11 | "github.com/libp2p/go-libp2p/core/peer" 12 | "github.com/libp2p/go-libp2p/core/routing" 13 | mh "github.com/multiformats/go-multihash" 14 | "go.uber.org/multierr" 15 | ) 16 | 17 | // NOTE: While this test is primarily testing the Parallel combinator, it also 18 | // mixes and matches other combiners for better coverage. Please don't simplify. 19 | 20 | func TestParallelPutGet(t *testing.T) { 21 | d := Parallel{ 22 | Routers: []routing.Routing{ 23 | Parallel{ 24 | Routers: []routing.Routing{ 25 | &Compose{ 26 | ValueStore: &LimitedValueStore{ 27 | ValueStore: new(dummyValueStore), 28 | Namespaces: []string{"allow1", "allow2", "notsupported"}, 29 | }, 30 | }, 31 | }, 32 | }, 33 | Tiered{ 34 | Routers: []routing.Routing{ 35 | &Compose{ 36 | ValueStore: &LimitedValueStore{ 37 | ValueStore: new(dummyValueStore), 38 | Namespaces: []string{"allow1", "allow2", "notsupported", "error"}, 39 | }, 40 | }, 41 | }, 42 | }, 43 | &Compose{ 44 | ValueStore: &LimitedValueStore{ 45 | ValueStore: new(dummyValueStore), 46 | Namespaces: []string{"allow1", "error", "solo", "stall"}, 47 | }, 48 | }, 49 | Parallel{ 50 | Routers: []routing.Routing{&struct{ Compose }{}}, 51 | }, 52 | Tiered{ 53 | Routers: []routing.Routing{ 54 | &struct{ Compose }{}, 55 | }, 56 | }, 57 | &struct{ Parallel }{}, 58 | &struct{ Tiered }{}, 59 | }, 60 | } 61 | 62 | ctx, cancel := context.WithCancel(context.Background()) 63 | defer cancel() 64 | 65 | if err := d.PutValue(ctx, "/allow1/hello", []byte("world")); err != nil { 66 | t.Fatal(err) 67 | } 68 | for _, di := range append([]routing.Routing{d}, d.Routers[:3]...) { 69 | v, err := di.GetValue(ctx, "/allow1/hello") 70 | if err != nil { 71 | t.Fatal(err) 72 | } 73 | if string(v) != "world" { 74 | t.Fatal("got the wrong value") 75 | } 76 | } 77 | 78 | if err := d.PutValue(ctx, "/allow2/hello", []byte("world2")); err != nil { 79 | t.Fatal(err) 80 | } 81 | for _, di := range append([]routing.Routing{d}, d.Routers[:1]...) { 82 | v, err := di.GetValue(ctx, "/allow2/hello") 83 | if err != nil { 84 | t.Fatal(err) 85 | } 86 | if string(v) != "world2" { 87 | t.Fatal("got the wrong value") 88 | } 89 | } 90 | if err := d.PutValue(ctx, "/forbidden/hello", []byte("world")); err != routing.ErrNotSupported { 91 | t.Fatalf("expected ErrNotSupported, got: %s", err) 92 | } 93 | for _, di := range append([]routing.Routing{d}, d.Routers...) { 94 | _, err := di.GetValue(ctx, "/forbidden/hello") 95 | if err != routing.ErrNotFound { 96 | t.Fatalf("expected ErrNotFound, got: %s", err) 97 | } 98 | } 99 | // Bypass the LimitedValueStore. 100 | if err := d.PutValue(ctx, "/notsupported/hello", []byte("world")); err != routing.ErrNotSupported { 101 | t.Fatalf("expected ErrNotSupported, got: %s", err) 102 | } 103 | if err := d.PutValue(ctx, "/error/myErr", []byte("world")); !errContains(err, "myErr") { 104 | t.Fatalf("expected error to contain myErr, got: %s", err) 105 | } 106 | if _, err := d.GetValue(ctx, "/error/myErr"); !errContains(err, "myErr") { 107 | t.Fatalf("expected error to contain myErr, got: %s", err) 108 | } 109 | if err := d.PutValue(ctx, "/solo/thing", []byte("value")); err != nil { 110 | t.Fatal(err) 111 | } 112 | v, err := d.GetValue(ctx, "/solo/thing") 113 | if err != nil { 114 | t.Fatal(err) 115 | } 116 | if string(v) != "value" { 117 | t.Fatalf("expected 'value', got '%s'", string(v)) 118 | } 119 | 120 | ctxt, cancel := context.WithTimeout(ctx, 10*time.Millisecond) 121 | if _, err := d.GetValue(ctxt, "/stall/bla"); err != context.DeadlineExceeded { 122 | t.Error(err) 123 | } 124 | cancel() 125 | 126 | ctxt, cancel = context.WithTimeout(ctx, 10*time.Millisecond) 127 | if err := d.PutValue(ctxt, "/stall/bla", []byte("bla")); err != context.DeadlineExceeded { 128 | t.Error(err) 129 | } 130 | cancel() 131 | } 132 | 133 | func errContains(err error, substr string) bool { 134 | for _, e := range multierr.Errors(err) { 135 | if strings.Contains(e.Error(), substr) { 136 | return true 137 | } 138 | } 139 | return false 140 | } 141 | 142 | func TestParallelPutFailure(t *testing.T) { 143 | ctx := context.Background() 144 | router := Parallel{ 145 | Routers: []routing.Routing{ 146 | &Compose{ 147 | ValueStore: new(failValueStore), 148 | }, 149 | &Compose{ 150 | ValueStore: new(dummyValueStore), 151 | }, 152 | }, 153 | } 154 | err := router.PutValue(ctx, "/some/thing", []byte("thing")) 155 | if err != errFailValue { 156 | t.Fatalf("exected put to fail with %q, got %q", errFailValue, err) 157 | } 158 | } 159 | 160 | func TestBasicParallelFindProviders(t *testing.T) { 161 | prefix := cid.NewPrefixV1(cid.Raw, mh.SHA2_256) 162 | c, _ := prefix.Sum([]byte("foo")) 163 | 164 | ctx := context.Background() 165 | 166 | d := Parallel{} 167 | if _, ok := <-d.FindProvidersAsync(ctx, c, 10); ok { 168 | t.Fatal("expected no results") 169 | } 170 | d = Parallel{ 171 | Routers: []routing.Routing{ 172 | &Compose{ 173 | ContentRouting: &dummyProvider{}, 174 | }, 175 | }, 176 | } 177 | if _, ok := <-d.FindProvidersAsync(ctx, c, 10); ok { 178 | t.Fatal("expected no results") 179 | } 180 | } 181 | 182 | func TestParallelFindProviders(t *testing.T) { 183 | prefix := cid.NewPrefixV1(cid.Raw, mh.SHA2_256) 184 | 185 | cid1, _ := prefix.Sum([]byte("foo")) 186 | cid2, _ := prefix.Sum([]byte("bar")) 187 | cid3, _ := prefix.Sum([]byte("baz")) 188 | cid4, _ := prefix.Sum([]byte("none")) 189 | cid5, _ := prefix.Sum([]byte("stall")) 190 | 191 | d := Parallel{ 192 | Routers: []routing.Routing{ 193 | Parallel{ 194 | Routers: []routing.Routing{ 195 | &Compose{}, 196 | }, 197 | }, 198 | Tiered{ 199 | Routers: []routing.Routing{ 200 | &Compose{}, 201 | &struct{ Compose }{}, 202 | }, 203 | }, 204 | &struct{ Compose }{}, 205 | Null{}, 206 | Tiered{ 207 | Routers: []routing.Routing{ 208 | &Compose{ 209 | ContentRouting: dummyProvider{ 210 | cid1: []peer.ID{ 211 | "first", 212 | "second", 213 | "third", 214 | "fourth", 215 | "fifth", 216 | "sixth", 217 | }, 218 | cid2: []peer.ID{ 219 | "fourth", 220 | "fifth", 221 | "sixth", 222 | }, 223 | cid5: []peer.ID{ 224 | "before", 225 | "stall", 226 | "after", 227 | }, 228 | }, 229 | }, 230 | }, 231 | }, 232 | Parallel{ 233 | Routers: []routing.Routing{ 234 | Null{}, 235 | &Compose{ 236 | ContentRouting: dummyProvider{ 237 | cid1: []peer.ID{ 238 | "first", 239 | "second", 240 | "fifth", 241 | "sixth", 242 | }, 243 | cid2: []peer.ID{ 244 | "second", 245 | "fourth", 246 | "fifth", 247 | }, 248 | }, 249 | }, 250 | }, 251 | }, 252 | &Compose{ 253 | ValueStore: &LimitedValueStore{ 254 | ValueStore: new(dummyValueStore), 255 | Namespaces: []string{"allow1"}, 256 | }, 257 | ContentRouting: dummyProvider{ 258 | cid2: []peer.ID{ 259 | "first", 260 | }, 261 | cid3: []peer.ID{ 262 | "second", 263 | "fourth", 264 | "fifth", 265 | "sixth", 266 | }, 267 | }, 268 | }, 269 | }, 270 | } 271 | 272 | ctx := context.Background() 273 | 274 | for i := 0; i < 2; i++ { 275 | 276 | for i, tc := range []struct { 277 | cid cid.Cid 278 | providers []peer.ID 279 | }{ 280 | { 281 | cid: cid1, 282 | providers: []peer.ID{"first", "second", "third", "fourth", "fifth", "sixth"}, 283 | }, 284 | { 285 | cid: cid2, 286 | providers: []peer.ID{"first", "second", "fourth", "fifth", "sixth"}, 287 | }, 288 | { 289 | cid: cid3, 290 | providers: []peer.ID{"second", "fourth", "fifth", "sixth"}, 291 | }, 292 | } { 293 | expecting := make(map[peer.ID]struct{}, len(tc.providers)) 294 | for _, p := range tc.providers { 295 | expecting[p] = struct{}{} 296 | } 297 | for p := range d.FindProvidersAsync(ctx, tc.cid, 10) { 298 | if _, ok := expecting[p.ID]; !ok { 299 | t.Errorf("not expecting provider %s for test case %d", string(p.ID), i) 300 | } 301 | delete(expecting, p.ID) 302 | } 303 | for p := range expecting { 304 | t.Errorf("failed to find expected provider %s for test case %d", string(p), i) 305 | } 306 | } 307 | expecting := []peer.ID{"second", "fourth", "fifth"} 308 | for p := range d.FindProvidersAsync(ctx, cid3, 3) { 309 | if len(expecting) == 0 { 310 | t.Errorf("not expecting any more providers, got %s", string(p.ID)) 311 | continue 312 | } 313 | if expecting[0] != p.ID { 314 | t.Errorf("expecting peer %s, got peer %s", string(expecting[0]), string(p.ID)) 315 | } 316 | expecting = expecting[1:] 317 | } 318 | for _, e := range expecting { 319 | t.Errorf("didn't find expected peer: %s", string(e)) 320 | } 321 | if _, ok := <-d.FindProvidersAsync(ctx, cid4, 3); ok { 322 | t.Fatalf("shouldn't have found this CID") 323 | } 324 | count := 0 325 | for range d.FindProvidersAsync(ctx, cid1, 0) { 326 | count++ 327 | } 328 | if count != 6 { 329 | t.Fatalf("should have found 6 peers, found %d", count) 330 | } 331 | 332 | ctxt, cancel := context.WithTimeout(ctx, 10*time.Millisecond) 333 | stallCh := d.FindProvidersAsync(ctxt, cid5, 5) 334 | if v := <-stallCh; v.ID != "before" { 335 | t.Fatal("expected peer 'before'") 336 | } 337 | if _, ok := <-stallCh; ok { 338 | t.Fatal("expected stall and close") 339 | } 340 | cancel() 341 | 342 | ctxt, cancel = context.WithTimeout(ctx, 10*time.Millisecond) 343 | stallCh = d.FindProvidersAsync(ctxt, cid1, 10) 344 | time.Sleep(100 * time.Millisecond) 345 | if _, ok := <-stallCh; ok { 346 | t.Fatal("expected channel to have been closed") 347 | } 348 | cancel() 349 | 350 | // Now to test many content routers 351 | for i := 0; i < 30; i++ { 352 | d.Routers = append(d.Routers, &Compose{ 353 | ContentRouting: &dummyProvider{}, 354 | }) 355 | } 356 | } 357 | } 358 | 359 | func TestParallelFindPeer(t *testing.T) { 360 | d := Parallel{ 361 | Routers: []routing.Routing{ 362 | Null{}, 363 | Parallel{ 364 | Routers: []routing.Routing{ 365 | Null{}, 366 | Null{}, 367 | }, 368 | }, 369 | Tiered{ 370 | Routers: []routing.Routing{ 371 | Null{}, 372 | Null{}, 373 | }, 374 | }, 375 | &struct{ Compose }{}, 376 | Parallel{ 377 | Routers: []routing.Routing{ 378 | &Compose{ 379 | PeerRouting: dummyPeerRouter{ 380 | "first": struct{}{}, 381 | "second": struct{}{}, 382 | }, 383 | }, 384 | }, 385 | }, 386 | Tiered{ 387 | Routers: []routing.Routing{ 388 | &Compose{ 389 | PeerRouting: dummyPeerRouter{ 390 | "first": struct{}{}, 391 | "third": struct{}{}, 392 | }, 393 | }, 394 | }, 395 | }, 396 | &Compose{ 397 | PeerRouting: dummyPeerRouter{ 398 | "first": struct{}{}, 399 | "fifth": struct{}{}, 400 | }, 401 | }, 402 | }, 403 | } 404 | 405 | ctx := context.Background() 406 | 407 | for _, di := range append([]routing.Routing{d}, d.Routers[4:]...) { 408 | if _, err := di.FindPeer(ctx, "first"); err != nil { 409 | t.Fatal(err) 410 | } 411 | } 412 | 413 | for _, p := range []peer.ID{ 414 | "first", 415 | "second", 416 | "third", 417 | "fifth", 418 | } { 419 | if _, err := d.FindPeer(ctx, p); err != nil { 420 | t.Fatal(err) 421 | } 422 | } 423 | 424 | if _, err := d.FindPeer(ctx, "fourth"); err != routing.ErrNotFound { 425 | t.Fatal(err) 426 | } 427 | } 428 | 429 | func TestParallelProvide(t *testing.T) { 430 | prefix := cid.NewPrefixV1(cid.Raw, mh.SHA2_256) 431 | 432 | d := Parallel{ 433 | Routers: []routing.Routing{ 434 | Parallel{ 435 | Routers: []routing.Routing{ 436 | &Compose{ 437 | ContentRouting: cbProvider(func(c cid.Cid, local bool) error { 438 | return routing.ErrNotSupported 439 | }), 440 | }, 441 | &Compose{ 442 | ContentRouting: cbProvider(func(c cid.Cid, local bool) error { 443 | return errors.New(c.String()) 444 | }), 445 | }, 446 | }, 447 | }, 448 | Tiered{ 449 | Routers: []routing.Routing{ 450 | &struct{ Compose }{}, 451 | &Compose{}, 452 | &Compose{}, 453 | }, 454 | }, 455 | }, 456 | } 457 | 458 | ctx := context.Background() 459 | 460 | cid1, _ := prefix.Sum([]byte("foo")) 461 | 462 | if err := d.Provide(ctx, cid1, false); err.Error() != cid1.String() { 463 | t.Fatal(err) 464 | } 465 | } 466 | 467 | func TestParallelClose(t *testing.T) { 468 | closer := new(testCloser) 469 | d := Parallel{ 470 | Routers: []routing.Routing{ 471 | struct { 472 | *testCloser 473 | routing.Routing 474 | }{closer, Null{}}, 475 | }, 476 | } 477 | d.Close() 478 | if closer.closed != 1 { 479 | t.Fatalf("expected one close, got %d", closer.closed) 480 | } 481 | } 482 | -------------------------------------------------------------------------------- /pubkey_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/libp2p/go-libp2p/core/routing" 8 | "github.com/libp2p/go-libp2p/core/test" 9 | ) 10 | 11 | func TestGetPublicKey(t *testing.T) { 12 | t.Parallel() 13 | 14 | d := Parallel{ 15 | Routers: []routing.Routing{ 16 | Parallel{ 17 | Routers: []routing.Routing{ 18 | &Compose{ 19 | ValueStore: &LimitedValueStore{ 20 | ValueStore: new(dummyValueStore), 21 | Namespaces: []string{"other"}, 22 | }, 23 | }, 24 | }, 25 | }, 26 | Tiered{ 27 | Routers: []routing.Routing{ 28 | &Compose{ 29 | ValueStore: &LimitedValueStore{ 30 | ValueStore: new(dummyValueStore), 31 | Namespaces: []string{"pk"}, 32 | }, 33 | }, 34 | }, 35 | }, 36 | &Compose{ 37 | ValueStore: &LimitedValueStore{ 38 | ValueStore: new(dummyValueStore), 39 | Namespaces: []string{"other", "pk"}, 40 | }, 41 | }, 42 | &struct{ Compose }{Compose{ValueStore: &LimitedValueStore{ValueStore: Null{}}}}, 43 | &struct{ Compose }{}, 44 | }, 45 | } 46 | 47 | pid, _ := test.RandPeerID() 48 | 49 | ctx := context.Background() 50 | if _, err := d.GetPublicKey(ctx, pid); err != routing.ErrNotFound { 51 | t.Fatal(err) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /tiered.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "context" 5 | "io" 6 | 7 | "github.com/ipfs/go-cid" 8 | record "github.com/libp2p/go-libp2p-record" 9 | ci "github.com/libp2p/go-libp2p/core/crypto" 10 | "github.com/libp2p/go-libp2p/core/peer" 11 | "github.com/libp2p/go-libp2p/core/routing" 12 | "go.uber.org/multierr" 13 | ) 14 | 15 | // Tiered is like the Parallel except that GetValue and FindPeer 16 | // are called in series. 17 | type Tiered struct { 18 | Routers []routing.Routing 19 | Validator record.Validator 20 | } 21 | 22 | // PutValue puts the given key to all sub-routers in parallel. It succeeds as 23 | // long as putting to at least one sub-router succeeds, but it waits for all 24 | // puts to terminate. 25 | func (r Tiered) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) error { 26 | return Parallel{Routers: r.Routers}.PutValue(ctx, key, value, opts...) 27 | } 28 | 29 | func (r Tiered) get(ctx context.Context, do func(routing.Routing) (interface{}, error)) (interface{}, error) { 30 | var errs []error 31 | for _, ri := range r.Routers { 32 | val, err := do(ri) 33 | switch err { 34 | case nil: 35 | return val, nil 36 | case routing.ErrNotFound, routing.ErrNotSupported: 37 | continue 38 | } 39 | if ctx.Err() != nil { 40 | return nil, ctx.Err() 41 | } 42 | errs = append(errs, err) 43 | } 44 | switch len(errs) { 45 | case 0: 46 | return nil, routing.ErrNotFound 47 | case 1: 48 | return nil, errs[0] 49 | default: 50 | return nil, multierr.Combine(errs...) 51 | } 52 | } 53 | 54 | // GetValue sequentially searches each sub-router for the given key, returning 55 | // the value from the first sub-router to complete the query. 56 | func (r Tiered) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { 57 | valInt, err := r.get(ctx, func(ri routing.Routing) (interface{}, error) { 58 | return ri.GetValue(ctx, key, opts...) 59 | }) 60 | val, _ := valInt.([]byte) 61 | return val, err 62 | } 63 | 64 | // SearchValue searches all sub-routers for the given key in parallel, 65 | // returning results in monotonically increasing "freshness" from all 66 | // sub-routers. 67 | func (r Tiered) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) { 68 | return Parallel(r).SearchValue(ctx, key, opts...) 69 | } 70 | 71 | // GetPublicKey sequentially searches each sub-router for the the public key, 72 | // returning the first result. 73 | func (r Tiered) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) { 74 | vInt, err := r.get(ctx, func(ri routing.Routing) (interface{}, error) { 75 | return routing.GetPublicKey(ri, ctx, p) 76 | }) 77 | val, _ := vInt.(ci.PubKey) 78 | return val, err 79 | } 80 | 81 | // Provide announces that this peer provides the content in question to all 82 | // sub-routers in parallel. Provide returns success as long as a single 83 | // sub-router succeeds, but still waits for all sub-routers to finish before 84 | // returning. 85 | func (r Tiered) Provide(ctx context.Context, c cid.Cid, local bool) error { 86 | return Parallel{Routers: r.Routers}.Provide(ctx, c, local) 87 | } 88 | 89 | // FindProvidersAsync searches all sub-routers in parallel for peers who are 90 | // able to provide a given key. 91 | // 92 | // If count > 0, it returns at most count providers. If count == 0, it returns 93 | // an unbounded number of providers. 94 | func (r Tiered) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { 95 | return Parallel{Routers: r.Routers}.FindProvidersAsync(ctx, c, count) 96 | } 97 | 98 | // FindPeer sequentially searches for given peer using each sub-router, 99 | // returning the first result. 100 | func (r Tiered) FindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { 101 | valInt, err := r.get(ctx, func(ri routing.Routing) (interface{}, error) { 102 | return ri.FindPeer(ctx, p) 103 | }) 104 | val, _ := valInt.(peer.AddrInfo) 105 | return val, err 106 | } 107 | 108 | // Bootstrap signals all the sub-routers to bootstrap. 109 | func (r Tiered) Bootstrap(ctx context.Context) error { 110 | return Parallel{Routers: r.Routers}.Bootstrap(ctx) 111 | } 112 | 113 | // Close closes all sub-routers that implement the io.Closer interface. 114 | func (r Tiered) Close() error { 115 | var errs error 116 | for _, router := range r.Routers { 117 | if closer, ok := router.(io.Closer); ok { 118 | if err := closer.Close(); err != nil { 119 | errs = multierr.Append(errs, err) 120 | } 121 | } 122 | } 123 | return errs 124 | } 125 | 126 | var _ routing.Routing = Tiered{} 127 | -------------------------------------------------------------------------------- /tiered_test.go: -------------------------------------------------------------------------------- 1 | package routinghelpers 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "testing" 7 | 8 | "github.com/ipfs/go-cid" 9 | record "github.com/libp2p/go-libp2p-record" 10 | "github.com/libp2p/go-libp2p/core/routing" 11 | ) 12 | 13 | type testValidator struct{} 14 | 15 | func (testValidator) Validate(key string, value []byte) error { 16 | ns, k, err := record.SplitKey(key) 17 | if err != nil { 18 | return err 19 | } 20 | if ns != "namespace" { 21 | return record.ErrInvalidRecordType 22 | } 23 | if !bytes.Contains(value, []byte(k)) { 24 | return record.ErrInvalidRecordType 25 | } 26 | if bytes.Contains(value, []byte("invalid")) { 27 | return record.ErrInvalidRecordType 28 | } 29 | return nil 30 | 31 | } 32 | 33 | func (testValidator) Select(key string, vals [][]byte) (int, error) { 34 | if len(vals) == 0 { 35 | panic("selector with no values") 36 | } 37 | var best []byte 38 | idx := 0 39 | for i, val := range vals { 40 | if bytes.Compare(best, val) < 0 { 41 | best = val 42 | idx = i 43 | } 44 | } 45 | return idx, nil 46 | } 47 | 48 | func TestTieredSearch(t *testing.T) { 49 | t.Parallel() 50 | 51 | d := Tiered{ 52 | Validator: testValidator{}, 53 | Routers: []routing.Routing{ 54 | Null{}, 55 | &Compose{ 56 | ValueStore: new(dummyValueStore), 57 | ContentRouting: Null{}, 58 | PeerRouting: Null{}, 59 | }, 60 | &Compose{ 61 | ValueStore: new(dummyValueStore), 62 | ContentRouting: Null{}, 63 | PeerRouting: Null{}, 64 | }, 65 | Null{}, 66 | &Compose{}, 67 | }, 68 | } 69 | 70 | ctx, cancel := context.WithCancel(context.Background()) 71 | defer cancel() 72 | 73 | if err := d.Routers[1].PutValue(ctx, "/namespace/v1", []byte("v1 - 1")); err != nil { 74 | t.Fatal(err) 75 | } 76 | 77 | valch, err := d.SearchValue(ctx, "/namespace/v1") 78 | if err != nil { 79 | t.Fatal(err) 80 | } 81 | 82 | v, ok := <-valch 83 | if !ok { 84 | t.Fatal("expected to get a value") 85 | } 86 | if string(v) != "v1 - 1" { 87 | t.Fatalf("unexpected value: %s", string(v)) 88 | } 89 | _, ok = <-valch 90 | if ok { 91 | t.Fatal("didn't expect a value") 92 | } 93 | 94 | if err := d.Routers[2].PutValue(ctx, "/namespace/v1", []byte("v1 - 2")); err != nil { 95 | t.Fatal(err) 96 | } 97 | valch, err = d.SearchValue(ctx, "/namespace/v1") 98 | if err != nil { 99 | t.Fatal(err) 100 | } 101 | 102 | if _, ok := <-valch; !ok { 103 | t.Fatal("expected to get a value") 104 | } 105 | 106 | v, ok = <-valch 107 | if ok { 108 | if string(v) != "v1 - 2" { 109 | t.Fatalf("unexpected value: %s", string(v)) 110 | } 111 | 112 | _, ok = <-valch 113 | if ok { 114 | t.Fatal("didn't expect a value") 115 | } 116 | } 117 | 118 | } 119 | 120 | func TestTieredGet(t *testing.T) { 121 | t.Parallel() 122 | 123 | d := Tiered{ 124 | Routers: []routing.Routing{ 125 | Null{}, 126 | &Compose{ 127 | ValueStore: new(dummyValueStore), 128 | ContentRouting: Null{}, 129 | PeerRouting: Null{}, 130 | }, 131 | &Compose{ 132 | ValueStore: new(dummyValueStore), 133 | ContentRouting: Null{}, 134 | PeerRouting: Null{}, 135 | }, 136 | &Compose{ 137 | ValueStore: new(dummyValueStore), 138 | ContentRouting: Null{}, 139 | PeerRouting: Null{}, 140 | }, 141 | Null{}, 142 | &Compose{}, 143 | }, 144 | } 145 | ctx := context.Background() 146 | if err := d.Routers[1].PutValue(ctx, "k1", []byte("v1")); err != nil { 147 | t.Fatal(err) 148 | } 149 | if err := d.Routers[2].PutValue(ctx, "k2", []byte("v2")); err != nil { 150 | t.Fatal(err) 151 | } 152 | if err := d.Routers[2].PutValue(ctx, "k1", []byte("v1shadow")); err != nil { 153 | t.Fatal(err) 154 | } 155 | if err := d.Routers[3].PutValue(ctx, "k3", []byte("v3")); err != nil { 156 | t.Fatal(err) 157 | } 158 | 159 | for k, v := range map[string]string{ 160 | "k1": "v1", 161 | "k2": "v2", 162 | "k3": "v3", 163 | } { 164 | actual, err := d.GetValue(ctx, k) 165 | if err != nil { 166 | t.Fatal(err) 167 | } 168 | if string(actual) != v { 169 | t.Errorf("expected %s, got %s", v, string(actual)) 170 | } 171 | } 172 | if _, err := d.GetValue(ctx, "missing"); err != routing.ErrNotFound { 173 | t.Fatal("wrong error: ", err) 174 | } 175 | 176 | if err := d.PutValue(ctx, "key", []byte("value")); err != nil { 177 | t.Fatal(err) 178 | } 179 | 180 | if _, err := d.GetValue(ctx, "/error/myErr"); !errContains(err, "myErr") { 181 | t.Fatalf("expected error to contain myErr, got: %s", err) 182 | } 183 | 184 | if _, err := (Tiered{Routers: []routing.Routing{d.Routers[1]}}).GetValue(ctx, "/error/myErr"); !errContains(err, "myErr") { 185 | t.Fatalf("expected error to contain myErr, got: %s", err) 186 | } 187 | 188 | for _, di := range append([]routing.Routing{d}, d.Routers[1:len(d.Routers)-2]...) { 189 | v, err := di.GetValue(ctx, "key") 190 | if err != nil { 191 | t.Fatal(err) 192 | } 193 | if string(v) != "value" { 194 | t.Errorf("expected value, got %s", string(v)) 195 | } 196 | } 197 | } 198 | 199 | func TestTieredNoSupport(t *testing.T) { 200 | t.Parallel() 201 | 202 | d := Tiered{Routers: []routing.Routing{Tiered{Routers: []routing.Routing{Null{}}}}} 203 | if _, ok := <-d.FindProvidersAsync(context.Background(), cid.Cid{}, 0); ok { 204 | t.Fatal("shouldn't have found a provider") 205 | } 206 | } 207 | 208 | func TestTieredClose(t *testing.T) { 209 | t.Parallel() 210 | 211 | closer := new(testCloser) 212 | d := Tiered{ 213 | Routers: []routing.Routing{ 214 | struct { 215 | *testCloser 216 | routing.Routing 217 | }{closer, Null{}}, 218 | }, 219 | } 220 | d.Close() 221 | if closer.closed != 1 { 222 | t.Fatal("expected one close") 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /tracing/tracing.go: -------------------------------------------------------------------------------- 1 | // tracing provides high level method tracing for the [routing.Routing] API. 2 | // Each method of the API has a corresponding method on [Tracer] which return either a defered wrapping callback or just defered callback. 3 | package tracing 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/ipfs/go-cid" 10 | "github.com/libp2p/go-libp2p/core/peer" 11 | "github.com/libp2p/go-libp2p/core/routing" 12 | "github.com/multiformats/go-multibase" 13 | "github.com/multiformats/go-multihash" 14 | "go.opentelemetry.io/otel" 15 | "go.opentelemetry.io/otel/attribute" 16 | "go.opentelemetry.io/otel/codes" 17 | "go.opentelemetry.io/otel/trace" 18 | ) 19 | 20 | // Tracer is the librairy name that will be passed to [otel.Tracer]. 21 | type Tracer string 22 | 23 | func (t Tracer) StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { 24 | return otel.Tracer(string(t)).Start(ctx, name, opts...) 25 | } 26 | 27 | const base = multibase.Base64url 28 | 29 | func bytesAsMultibase(b []byte) string { 30 | r, err := multibase.Encode(base, b) 31 | if err != nil { 32 | panic(fmt.Errorf("unreachable: %w", err)) 33 | } 34 | return r 35 | } 36 | 37 | // keysAsMultibase avoids returning non utf8 which otel does not like. 38 | func keysAsMultibase(name string, keys []multihash.Multihash) attribute.KeyValue { 39 | keysStr := make([]string, len(keys)) 40 | for i, k := range keys { 41 | keysStr[i] = bytesAsMultibase(k) 42 | } 43 | return attribute.StringSlice(name, keysStr) 44 | } 45 | 46 | func (t Tracer) Provide(routerName string, ctx context.Context, key cid.Cid, announce bool) (_ context.Context, end func(error)) { 47 | // outline so the concatenation can be folded at compile-time 48 | return t.provide(routerName+".Provide", ctx, key, announce) 49 | } 50 | 51 | func (t Tracer) provide(traceName string, ctx context.Context, key cid.Cid, announce bool) (_ context.Context, end func(error)) { 52 | ctx, span := t.StartSpan(ctx, traceName) 53 | if !span.IsRecording() { 54 | span.End() 55 | return ctx, func(error) {} 56 | } 57 | 58 | span.SetAttributes( 59 | attribute.Stringer("key", key), 60 | attribute.Bool("announce", announce), 61 | ) 62 | 63 | return ctx, func(err error) { 64 | if err != nil { 65 | span.SetStatus(codes.Error, err.Error()) 66 | } 67 | span.End() 68 | } 69 | } 70 | 71 | func (t Tracer) ProvideMany(routerName string, ctx context.Context, keys []multihash.Multihash) (_ context.Context, end func(error)) { 72 | // outline so the concatenation can be folded at compile-time 73 | return t.provideMany(routerName+".ProvideMany", ctx, keys) 74 | } 75 | 76 | func (t Tracer) provideMany(traceName string, ctx context.Context, keys []multihash.Multihash) (_ context.Context, end func(error)) { 77 | ctx, span := t.StartSpan(ctx, traceName) 78 | if !span.IsRecording() { 79 | span.End() 80 | return ctx, func(error) {} 81 | } 82 | 83 | span.SetAttributes(keysAsMultibase("keys", keys)) 84 | 85 | return ctx, func(err error) { 86 | if err != nil { 87 | span.SetStatus(codes.Error, err.Error()) 88 | } 89 | span.End() 90 | } 91 | } 92 | 93 | func peerInfoToAttributes(p peer.AddrInfo) []attribute.KeyValue { 94 | strs := make([]string, len(p.Addrs)) 95 | for i, v := range p.Addrs { 96 | strs[i] = v.String() 97 | } 98 | 99 | return []attribute.KeyValue{ 100 | attribute.Stringer("id", p.ID), 101 | attribute.StringSlice("addrs", strs), 102 | } 103 | } 104 | 105 | func (t Tracer) FindProvidersAsync(routerName string, ctx context.Context, key cid.Cid, count int) (_ context.Context, passthrough func(<-chan peer.AddrInfo, error) <-chan peer.AddrInfo) { 106 | // outline so the concatenation can be folded at compile-time 107 | return t.findProvidersAsync(routerName+".FindProvidersAsync", ctx, key, count) 108 | } 109 | 110 | func (t Tracer) findProvidersAsync(traceName string, ctx context.Context, key cid.Cid, count int) (_ context.Context, passthrough func(<-chan peer.AddrInfo, error) <-chan peer.AddrInfo) { 111 | ctx, span := t.StartSpan(ctx, traceName) 112 | if !span.IsRecording() { 113 | span.End() 114 | return ctx, func(c <-chan peer.AddrInfo, _ error) <-chan peer.AddrInfo { return c } 115 | } 116 | 117 | span.SetAttributes( 118 | attribute.Stringer("key", key), 119 | attribute.Int("count", count), 120 | ) 121 | 122 | return ctx, func(in <-chan peer.AddrInfo, err error) <-chan peer.AddrInfo { 123 | if err != nil { 124 | span.SetStatus(codes.Error, err.Error()) 125 | span.End() 126 | return in 127 | } 128 | 129 | span.AddEvent("started streaming") 130 | 131 | out := make(chan peer.AddrInfo) 132 | go func() { 133 | defer span.End() 134 | defer close(out) 135 | 136 | for v := range in { 137 | span.AddEvent("found provider", trace.WithAttributes(peerInfoToAttributes(v)...)) 138 | select { 139 | case out <- v: 140 | case <-ctx.Done(): 141 | span.SetStatus(codes.Error, ctx.Err().Error()) 142 | } 143 | } 144 | }() 145 | 146 | return out 147 | } 148 | } 149 | 150 | func (t Tracer) FindPeer(routerName string, ctx context.Context, id peer.ID) (_ context.Context, end func(peer.AddrInfo, error)) { 151 | // outline so the concatenation can be folded at compile-time 152 | return t.findPeer(routerName+".FindPeer", ctx, id) 153 | } 154 | 155 | func (t Tracer) findPeer(traceName string, ctx context.Context, id peer.ID) (_ context.Context, end func(peer.AddrInfo, error)) { 156 | ctx, span := t.StartSpan(ctx, traceName) 157 | if !span.IsRecording() { 158 | span.End() 159 | return ctx, func(peer.AddrInfo, error) {} 160 | } 161 | 162 | span.SetAttributes(attribute.Stringer("key", id)) 163 | 164 | return ctx, func(p peer.AddrInfo, err error) { 165 | defer span.End() 166 | 167 | if err != nil { 168 | span.SetStatus(codes.Error, err.Error()) 169 | return 170 | } 171 | 172 | span.AddEvent("found peer", trace.WithAttributes(peerInfoToAttributes(p)...)) 173 | } 174 | } 175 | 176 | func (t Tracer) PutValue(routerName string, ctx context.Context, key string, val []byte, opts ...routing.Option) (_ context.Context, end func(error)) { 177 | // outline so the concatenation can be folded at compile-time 178 | return t.putValue(routerName+".PutValue", ctx, key, val, opts...) 179 | } 180 | 181 | func (t Tracer) putValue(traceName string, ctx context.Context, key string, val []byte, opts ...routing.Option) (_ context.Context, end func(error)) { 182 | ctx, span := t.StartSpan(ctx, traceName) 183 | if !span.IsRecording() { 184 | span.End() 185 | return ctx, func(error) {} 186 | } 187 | 188 | span.SetAttributes( 189 | attribute.String("key", bytesAsMultibase([]byte(key))), 190 | attribute.String("value", bytesAsMultibase(val)), 191 | attribute.Int("len(opts)", len(opts)), 192 | ) 193 | 194 | return ctx, func(err error) { 195 | if err != nil { 196 | span.SetStatus(codes.Error, err.Error()) 197 | } 198 | span.End() 199 | } 200 | } 201 | 202 | func (t Tracer) GetValue(routerName string, ctx context.Context, key string, opts ...routing.Option) (_ context.Context, end func([]byte, error)) { 203 | // outline so the concatenation can be folded at compile-time 204 | return t.getValue(routerName+".GetValue", ctx, key, opts...) 205 | } 206 | 207 | func (t Tracer) getValue(traceName string, ctx context.Context, key string, opts ...routing.Option) (_ context.Context, end func([]byte, error)) { 208 | ctx, span := t.StartSpan(ctx, traceName) 209 | if !span.IsRecording() { 210 | span.End() 211 | return ctx, func([]byte, error) {} 212 | } 213 | 214 | span.SetAttributes( 215 | attribute.String("key", bytesAsMultibase([]byte(key))), 216 | attribute.Int("len(opts)", len(opts)), 217 | ) 218 | 219 | return ctx, func(val []byte, err error) { 220 | defer span.End() 221 | 222 | if err != nil { 223 | span.SetStatus(codes.Error, err.Error()) 224 | return 225 | } 226 | 227 | span.AddEvent("found value", trace.WithAttributes( 228 | attribute.String("value", bytesAsMultibase(val)))) 229 | } 230 | } 231 | 232 | func (t Tracer) SearchValue(routerName string, ctx context.Context, key string, opts ...routing.Option) (_ context.Context, passthrough func(<-chan []byte, error) (<-chan []byte, error)) { 233 | // outline so the concatenation can be folded at compile-time 234 | return t.searchValue(routerName+".SearchValue", ctx, key, opts...) 235 | } 236 | 237 | func (t Tracer) searchValue(traceName string, ctx context.Context, key string, opts ...routing.Option) (_ context.Context, passthrough func(<-chan []byte, error) (<-chan []byte, error)) { 238 | ctx, span := t.StartSpan(ctx, traceName) 239 | if !span.IsRecording() { 240 | span.End() 241 | return ctx, func(c <-chan []byte, err error) (<-chan []byte, error) { return c, err } 242 | } 243 | 244 | span.SetAttributes( 245 | attribute.String("key", bytesAsMultibase([]byte(key))), 246 | attribute.Int("len(opts)", len(opts)), 247 | ) 248 | 249 | return ctx, func(in <-chan []byte, err error) (<-chan []byte, error) { 250 | if err != nil { 251 | span.SetStatus(codes.Error, err.Error()) 252 | span.End() 253 | return in, err 254 | } 255 | 256 | span.AddEvent("started streaming") 257 | 258 | out := make(chan []byte) 259 | go func() { 260 | defer span.End() 261 | defer close(out) 262 | 263 | for v := range in { 264 | span.AddEvent("found value", trace.WithAttributes( 265 | attribute.String("value", bytesAsMultibase(v))), 266 | ) 267 | select { 268 | case out <- v: 269 | case <-ctx.Done(): 270 | span.SetStatus(codes.Error, ctx.Err().Error()) 271 | } 272 | } 273 | }() 274 | 275 | return out, nil 276 | } 277 | } 278 | 279 | func (t Tracer) Bootstrap(routerName string, ctx context.Context) (_ context.Context, end func(error)) { 280 | // outline so the concatenation can be folded at compile-time 281 | return t.bootstrap(routerName+".Bootstrap", ctx) 282 | } 283 | 284 | func (t Tracer) bootstrap(traceName string, ctx context.Context) (_ context.Context, end func(error)) { 285 | ctx, span := t.StartSpan(ctx, traceName) 286 | if !span.IsRecording() { 287 | span.End() 288 | return ctx, func(error) {} 289 | } 290 | 291 | return ctx, func(err error) { 292 | if err != nil { 293 | span.SetStatus(codes.Error, err.Error()) 294 | } 295 | span.End() 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "v0.7.5" 3 | } 4 | --------------------------------------------------------------------------------