├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ └── open_an_issue.md ├── actions │ └── go-test-setup │ │ └── action.yml ├── config.yml ├── dependabot.yml └── workflows │ ├── generated-pr.yml │ ├── go.yml │ └── stale.yml ├── .gitignore ├── COPYRIGHT ├── LICENSE ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── crdt.go ├── crdt_norace_test.go ├── crdt_test.go ├── examples └── globaldb │ ├── .gitignore │ ├── README.md │ ├── globaldb.go │ ├── go.mod │ └── go.sum ├── go.mod ├── go.sum ├── heads.go ├── heads_test.go ├── ipld.go ├── migrations.go ├── package.json ├── pb ├── bcast.pb.go ├── bcast.proto ├── delta.pb.go ├── delta.proto └── generate.go ├── pubsub_broadcaster.go └── set.go /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Getting Help on IPFS 4 | url: https://ipfs.io/help 5 | about: All information about how and where to get help on IPFS. 6 | - name: IPFS Official Forum 7 | url: https://discuss.ipfs.io 8 | about: Please post general questions, support requests, and discussions here. 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/open_an_issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Open an issue 3 | about: Only for actionable issues relevant to this repository. 4 | title: '' 5 | labels: need/triage 6 | assignees: '' 7 | 8 | --- 9 | 20 | -------------------------------------------------------------------------------- /.github/actions/go-test-setup/action.yml: -------------------------------------------------------------------------------- 1 | name: extend-tests-timeout 2 | description: add -timeout 20m flag to GOFLAGS to extend timeout for tests 3 | 4 | runs: 5 | using: "composite" 6 | steps: 7 | - name: Extend timeout for tests 8 | shell: bash 9 | run: echo "GOFLAGS=$GOFLAGS -timeout=20m" >> $GITHUB_ENV 10 | -------------------------------------------------------------------------------- /.github/config.yml: -------------------------------------------------------------------------------- 1 | # Configuration for welcome - https://github.com/behaviorbot/welcome 2 | 3 | # Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome 4 | # Comment to be posted to on first time issues 5 | newIssueWelcomeComment: > 6 | Thank you for submitting your first issue to this repository! A maintainer 7 | will be here shortly to triage and review. 8 | 9 | In the meantime, please double-check that you have provided all the 10 | necessary information to make this process easy! Any information that can 11 | help save additional round trips is useful! We currently aim to give 12 | initial feedback within **two business days**. If this does not happen, feel 13 | free to leave a comment. 14 | 15 | Please keep an eye on how this issue will be labeled, as labels give an 16 | overview of priorities, assignments and additional actions requested by the 17 | maintainers: 18 | 19 | - "Priority" labels will show how urgent this is for the team. 20 | - "Status" labels will show if this is ready to be worked on, blocked, or in progress. 21 | - "Need" labels will indicate if additional input or analysis is required. 22 | 23 | Finally, remember to use https://discuss.ipfs.io if you just need general 24 | support. 25 | 26 | # Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome 27 | # Comment to be posted to on PRs from first time contributors in your repository 28 | newPRWelcomeComment: > 29 | Thank you for submitting this PR! 30 | 31 | A maintainer will be here shortly to review it. 32 | 33 | We are super grateful, but we are also overloaded! Help us by making sure 34 | that: 35 | 36 | * The context for this PR is clear, with relevant discussion, decisions 37 | and stakeholders linked/mentioned. 38 | 39 | * Your contribution itself is clear (code comments, self-review for the 40 | rest) and in its best form. Follow the [code contribution 41 | guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md#code-contribution-guidelines) 42 | if they apply. 43 | 44 | Getting other community members to do a review would be great help too on 45 | complex PRs (you can ask in the chats/forums). If you are unsure about 46 | something, just leave us a comment. 47 | 48 | Next steps: 49 | 50 | * A maintainer will triage and assign priority to this PR, commenting on 51 | any missing things and potentially assigning a reviewer for high 52 | priority items. 53 | 54 | * The PR gets reviews, discussed and approvals as needed. 55 | 56 | * The PR is merged by maintainers when it has been approved and comments addressed. 57 | 58 | We currently aim to provide initial feedback/triaging within **two business 59 | days**. Please keep an eye on any labelling actions, as these will indicate 60 | priorities and status of your contribution. 61 | 62 | We are very grateful for your contribution! 63 | 64 | 65 | # Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge 66 | # Comment to be posted to on pull requests merged by a first time user 67 | # Currently disabled 68 | #firstPRMergeComment: "" 69 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | time: "11:00" 8 | open-pull-requests-limit: 10 9 | target-branch: dependency-upgrades 10 | ignore: 11 | - dependency-name: github.com/golang/protobuf 12 | versions: 13 | - 1.5.1 14 | - dependency-name: github.com/ipfs/go-log/v2 15 | versions: 16 | - 2.1.2 17 | - package-ecosystem: "github-actions" 18 | directory: "/" 19 | schedule: 20 | interval: "weekly" 21 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | env: 9 | GO111MODULE: on 10 | steps: 11 | - uses: actions/checkout@v4 12 | 13 | - name: Set up Go 14 | uses: actions/setup-go@v5 15 | with: 16 | go-version: "1.24" 17 | 18 | - name: Install depedencies 19 | run: go install honnef.co/go/tools/cmd/staticcheck@latest 20 | 21 | - name: Build 22 | run: go build -v ./... 23 | 24 | - name: Test 25 | run: go test -timeout 20m -v -run "TestCRDT" -race -coverprofile=coverage.txt -covermode=atomic 26 | 27 | - name: Staticcheck 28 | run: staticcheck ./... 29 | 30 | - name: Send coverage 31 | run: bash <(curl -s https://codecov.io/bash) 32 | 33 | suite: 34 | runs-on: ubuntu-latest 35 | env: 36 | GO111MODULE: on 37 | steps: 38 | - uses: actions/checkout@v4 39 | 40 | - name: Set up Go 41 | uses: actions/setup-go@v5 42 | with: 43 | go-version: "1.24" 44 | 45 | - name: Test 46 | run: go test -timeout 30m -v -run "TestDatastoreSuite" . 47 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | Copyright 2019. Protocol Labs, Inc. 2 | 3 | This library is dual-licensed under Apache 2.0 and MIT terms. 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Dual-licensed under MIT and ASLv2, by way of the [Permissive License 2 | Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/). 3 | 4 | Apache-2.0: https://www.apache.org/licenses/license-2.0 5 | MIT: https://www.opensource.org/licenses/mit 6 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright 2020. Protocol Labs, Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright 2020. Protocol Labs, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-ds-crdt 2 | 3 | > A distributed [go-datastore](https://github.com/ipfs/go-datastore) 4 | > implementation using Merkle-CRDTs. 5 | 6 | `go-ds-crdt` is a key-value store implementation using Merkle CRDTs, as 7 | described in 8 | [the paper by Héctor Sanjuán, Samuli Pöyhtäri and Pedro Teixeira](https://arxiv.org/abs/2004.00107). 9 | It satisfies the 10 | [`Datastore`](https://pkg.go.dev/github.com/ipfs/go-datastore#Datastore) 11 | and [`Batching`](https://pkg.go.dev/github.com/ipfs/go-datastore#Batching) 12 | interfaces from `go-datastore`. 13 | 14 | This means that you can create a network of nodes that use this datastore, and 15 | that each key-value pair written to it will automatically replicate to every 16 | other node. Updates can be published by any node. Network messages can be dropped, 17 | reordered, corrupted or duplicated. It is not necessary to know beforehand 18 | the number of replicas participating in the system. Replicas can join and leave 19 | at will, without informing any other replica. There can be network partitions 20 | but they are resolved as soon as connectivity is re-established between replicas. 21 | 22 | Internally it uses a delta-CRDT Add-Wins Observed-Removed set. The current 23 | value for a key is the one with highest priority. Priorities are defined as 24 | the height of the Merkle-CRDT node in which the key was introduced. 25 | 26 | Implementation is independent from Broadcaster and DAG syncer layers, although the 27 | easiest is to use out of the box components from the IPFS stack (see below). 28 | 29 | ## Performance 30 | 31 | Using batching, Any `go-ds-crdt` replica can easily process and sync 400 keys/s at least. The largest known deployment has 100M keys. 32 | 33 | `go-ds-crdt` is used in production as state-synchronization layer for [IPFS Clusters](https://ipfscluster.io). 34 | 35 | ## Usage 36 | 37 | `go-ds-crdt` needs: 38 | * A user-provided, thread-safe, 39 | [`go-datastore`](https://github.com/ipfs/go-datastore) implementation to 40 | be used as permanent storage. We recommend using the 41 | [Badger implementation](https://pkg.go.dev/github.com/ipfs/go-ds-badger). 42 | * A user-defined `Broadcaster` component to broadcast and receive updates 43 | from a set of replicas. If your application uses 44 | [libp2p](https://libp2p.io), you can use 45 | [libp2p PubSub](https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub) and 46 | the provided 47 | [`PubsubBroadcaster`](https://pkg.go.dev/github.com/ipfs/go-ds-crdt?utm_source=godoc#PubSubBroadcaster). 48 | * A user-defined "DAG syncer" component ([`ipld.DAGService`](https://pkg.go.dev/github.com/ipfs/go-ipld-format?utm_source=godoc#DAGService)) to publish and 49 | retrieve Merkle DAGs to the network. For example, you can use 50 | [IPFS-Lite](https://github.com/hsanjuan/ipfs-lite) which casually 51 | satisfies this interface. 52 | 53 | The permanent storage layout is optimized for KV stores with fast indexes and 54 | key-prefix support. 55 | 56 | See https://pkg.go.dev/github.com/ipfs/go-ds-crdt for more information. 57 | 58 | ## Captain 59 | 60 | This project is captained by @hsanjuan. 61 | 62 | ## License 63 | 64 | This library is dual-licensed under Apache 2.0 and MIT terms. 65 | 66 | Copyright 2019. Protocol Labs, Inc. 67 | 68 | -------------------------------------------------------------------------------- /crdt.go: -------------------------------------------------------------------------------- 1 | // Package crdt provides a replicated go-datastore (key-value store) 2 | // implementation using Merkle-CRDTs built with IPLD nodes. 3 | // 4 | // This Datastore is agnostic to how new MerkleDAG roots are broadcasted to 5 | // the rest of replicas (`Broadcaster` component) and to how the IPLD nodes 6 | // are made discoverable and retrievable to by other replicas (`DAGSyncer` 7 | // component). 8 | // 9 | // The implementation is based on the "Merkle-CRDTs: Merkle-DAGs meet CRDTs" 10 | // paper by Héctor Sanjuán, Samuli Pöyhtäri and Pedro Teixeira. 11 | // 12 | // Note that, in the absence of compaction (which must be performed manually), 13 | // a crdt.Datastore will only grow in size even when keys are deleted. 14 | // 15 | // The time to be fully synced for new Datastore replicas will depend on how 16 | // fast they can retrieve the DAGs announced by the other replicas, but newer 17 | // values will be available before older ones. 18 | package crdt 19 | 20 | import ( 21 | "context" 22 | "errors" 23 | "fmt" 24 | "io" 25 | "math/rand" 26 | "sync" 27 | "sync/atomic" 28 | "time" 29 | 30 | dshelp "github.com/ipfs/boxo/datastore/dshelp" 31 | pb "github.com/ipfs/go-ds-crdt/pb" 32 | "go.uber.org/multierr" 33 | "google.golang.org/protobuf/proto" 34 | 35 | cid "github.com/ipfs/go-cid" 36 | ds "github.com/ipfs/go-datastore" 37 | query "github.com/ipfs/go-datastore/query" 38 | ipld "github.com/ipfs/go-ipld-format" 39 | logging "github.com/ipfs/go-log/v2" 40 | ) 41 | 42 | var _ ds.Datastore = (*Datastore)(nil) 43 | var _ ds.Batching = (*Datastore)(nil) 44 | 45 | // datastore namespace keys. Short keys save space and memory. 46 | const ( 47 | headsNs = "h" // heads 48 | setNs = "s" // set 49 | processedBlocksNs = "b" // blocks 50 | dirtyBitKey = "d" // dirty 51 | versionKey = "crdt_version" 52 | ) 53 | 54 | // Common errors. 55 | var ( 56 | ErrNoMoreBroadcast = errors.New("receiving blocks aborted since no new blocks will be broadcasted") 57 | ) 58 | 59 | // A Broadcaster provides a way to send (notify) an opaque payload to 60 | // all replicas and to retrieve payloads broadcasted. 61 | type Broadcaster interface { 62 | // Send payload to other replicas. 63 | Broadcast(context.Context, []byte) error 64 | // Obtain the next payload received from the network. 65 | Next(context.Context) ([]byte, error) 66 | } 67 | 68 | // A SessionDAGService is a Sessions-enabled DAGService. This type of DAG-Service 69 | // provides an optimized NodeGetter to make multiple related requests. The 70 | // same session-enabled NodeGetter is used to download DAG branches when 71 | // the DAGSyncer supports it. 72 | type SessionDAGService interface { 73 | ipld.DAGService 74 | Session(context.Context) ipld.NodeGetter 75 | } 76 | 77 | // Options holds configurable values for Datastore. 78 | type Options struct { 79 | Logger logging.StandardLogger 80 | RebroadcastInterval time.Duration 81 | // The PutHook function is triggered whenever an element 82 | // is successfully added to the datastore (either by a local 83 | // or remote update), and only when that addition is considered the 84 | // prevalent value. 85 | PutHook func(k ds.Key, v []byte) 86 | // The DeleteHook function is triggered whenever a version of an 87 | // element is successfully removed from the datastore (either by a 88 | // local or remote update). Unordered and concurrent updates may 89 | // result in the DeleteHook being triggered even though the element is 90 | // still present in the datastore because it was re-added or not fully 91 | // tombstoned. If that is relevant, use Has() to check if the removed 92 | // element is still part of the datastore. 93 | DeleteHook func(k ds.Key) 94 | // NumWorkers specifies the number of workers ready to walk DAGs 95 | NumWorkers int 96 | // DAGSyncerTimeout specifies how long to wait for a DAGSyncer. 97 | // Set to 0 to disable. 98 | DAGSyncerTimeout time.Duration 99 | // MaxBatchDeltaSize will automatically commit any batches whose 100 | // delta size gets too big. This helps keep DAG nodes small 101 | // enough that they will be transferred by the network. 102 | MaxBatchDeltaSize int 103 | // RepairInterval specifies how often to walk the full DAG until 104 | // the root(s) if it has been marked dirty. 0 to disable. 105 | RepairInterval time.Duration 106 | // MultiHeadProcessing lets several new heads to be processed in 107 | // parallel. This results in more branching in general. More 108 | // branching is not necessarily a bad thing and may improve 109 | // throughput, but everything depends on usage. 110 | MultiHeadProcessing bool 111 | } 112 | 113 | func (opts *Options) verify() error { 114 | if opts == nil { 115 | return errors.New("options cannot be nil") 116 | } 117 | 118 | if opts.RebroadcastInterval <= 0 { 119 | return errors.New("invalid RebroadcastInterval") 120 | } 121 | 122 | if opts.Logger == nil { 123 | return errors.New("the Logger is undefined") 124 | } 125 | 126 | if opts.NumWorkers <= 0 { 127 | return errors.New("bad number of NumWorkers") 128 | } 129 | 130 | if opts.DAGSyncerTimeout < 0 { 131 | return errors.New("invalid DAGSyncerTimeout") 132 | } 133 | 134 | if opts.MaxBatchDeltaSize <= 0 { 135 | return errors.New("invalid MaxBatchDeltaSize") 136 | } 137 | 138 | if opts.RepairInterval < 0 { 139 | return errors.New("invalid RepairInterval") 140 | } 141 | 142 | return nil 143 | } 144 | 145 | // DefaultOptions initializes an Options object with sensible defaults. 146 | func DefaultOptions() *Options { 147 | return &Options{ 148 | Logger: logging.Logger("crdt"), 149 | RebroadcastInterval: time.Minute, 150 | PutHook: nil, 151 | DeleteHook: nil, 152 | NumWorkers: 5, 153 | DAGSyncerTimeout: 5 * time.Minute, 154 | // always keeping 155 | // https://github.com/libp2p/go-libp2p-core/blob/master/network/network.go#L23 156 | // in sight 157 | MaxBatchDeltaSize: 1 * 1024 * 1024, // 1MB, 158 | RepairInterval: time.Hour, 159 | MultiHeadProcessing: false, 160 | } 161 | } 162 | 163 | // Datastore makes a go-datastore a distributed Key-Value store using 164 | // Merkle-CRDTs and IPLD. 165 | type Datastore struct { 166 | ctx context.Context 167 | cancel context.CancelFunc 168 | 169 | opts *Options 170 | logger logging.StandardLogger 171 | 172 | // permanent storage 173 | store ds.Datastore 174 | namespace ds.Key 175 | set *set 176 | heads *heads 177 | 178 | dagService ipld.DAGService 179 | broadcaster Broadcaster 180 | 181 | seenHeadsMux sync.RWMutex 182 | seenHeads map[cid.Cid]struct{} 183 | 184 | curDeltaMux sync.Mutex 185 | curDelta *pb.Delta // current, unpublished delta 186 | 187 | wg sync.WaitGroup 188 | 189 | jobQueue chan *dagJob 190 | sendJobs chan *dagJob 191 | // keep track of children to be fetched so only one job does every 192 | // child 193 | queuedChildren *cidSafeSet 194 | } 195 | 196 | type dagJob struct { 197 | ctx context.Context // A job context for tracing 198 | session *sync.WaitGroup // A waitgroup to wait for all related jobs to conclude 199 | nodeGetter *crdtNodeGetter // a node getter to use 200 | root cid.Cid // the root of the branch we are walking down 201 | rootPrio uint64 // the priority of the root delta 202 | delta *pb.Delta // the current delta 203 | node ipld.Node // the current ipld Node 204 | 205 | } 206 | 207 | // New returns a Merkle-CRDT-based Datastore using the given one to persist 208 | // all the necessary data under the given namespace. It needs a DAG-Service 209 | // component for IPLD nodes and a Broadcaster component to distribute and 210 | // receive information to and from the rest of replicas. Actual implementation 211 | // of these must be provided by the user, but it normally means using 212 | // ipfs-lite (https://github.com/hsanjuan/ipfs-lite) as a DAG Service and the 213 | // included libp2p PubSubBroadcaster as a Broadcaster. 214 | // 215 | // The given Datastore is used to back all CRDT-datastore contents and 216 | // accounting information. When using an asynchronous datastore, the user is 217 | // in charge of calling Sync() regularly. Sync() will persist paths related to 218 | // the given prefix, but note that if other replicas are modifying the 219 | // datastore, the prefixes that will need syncing are not only those modified 220 | // by the local replica. Therefore the user should consider calling Sync("/"), 221 | // with an empty prefix, in that case, or use a synchronous underlying 222 | // datastore that persists things directly on write. 223 | // 224 | // The CRDT-Datastore should call Close() before the given store is closed. 225 | func New( 226 | store ds.Datastore, 227 | namespace ds.Key, 228 | dagSyncer ipld.DAGService, 229 | bcast Broadcaster, 230 | opts *Options, 231 | ) (*Datastore, error) { 232 | if opts == nil { 233 | opts = DefaultOptions() 234 | } 235 | 236 | if err := opts.verify(); err != nil { 237 | return nil, err 238 | } 239 | 240 | // /set 241 | fullSetNs := namespace.ChildString(setNs) 242 | // /heads 243 | fullHeadsNs := namespace.ChildString(headsNs) 244 | 245 | setPutHook := func(k string, v []byte) { 246 | if opts.PutHook == nil { 247 | return 248 | } 249 | dsk := ds.NewKey(k) 250 | opts.PutHook(dsk, v) 251 | } 252 | 253 | setDeleteHook := func(k string) { 254 | if opts.DeleteHook == nil { 255 | return 256 | } 257 | dsk := ds.NewKey(k) 258 | opts.DeleteHook(dsk) 259 | } 260 | 261 | ctx, cancel := context.WithCancel(context.Background()) 262 | set, err := newCRDTSet(ctx, store, fullSetNs, dagSyncer, opts.Logger, setPutHook, setDeleteHook) 263 | if err != nil { 264 | cancel() 265 | return nil, fmt.Errorf("error setting up crdt set: %w", err) 266 | } 267 | heads, err := newHeads(ctx, store, fullHeadsNs, opts.Logger) 268 | if err != nil { 269 | cancel() 270 | return nil, fmt.Errorf("error building heads: %w", err) 271 | } 272 | 273 | dstore := &Datastore{ 274 | ctx: ctx, 275 | cancel: cancel, 276 | opts: opts, 277 | logger: opts.Logger, 278 | store: store, 279 | namespace: namespace, 280 | set: set, 281 | heads: heads, 282 | dagService: dagSyncer, 283 | broadcaster: bcast, 284 | seenHeads: make(map[cid.Cid]struct{}), 285 | jobQueue: make(chan *dagJob, opts.NumWorkers), 286 | sendJobs: make(chan *dagJob), 287 | queuedChildren: newCidSafeSet(), 288 | } 289 | 290 | err = dstore.applyMigrations(ctx) 291 | if err != nil { 292 | cancel() 293 | return nil, err 294 | } 295 | 296 | headList, maxHeight, err := dstore.heads.List(ctx) 297 | if err != nil { 298 | cancel() 299 | return nil, err 300 | } 301 | dstore.logger.Infof( 302 | "crdt Datastore created. Number of heads: %d. Current max-height: %d. Dirty: %t", 303 | len(headList), 304 | maxHeight, 305 | dstore.IsDirty(ctx), 306 | ) 307 | 308 | // sendJobWorker + NumWorkers 309 | dstore.wg.Add(1 + dstore.opts.NumWorkers) 310 | go func() { 311 | defer dstore.wg.Done() 312 | dstore.sendJobWorker(ctx) 313 | }() 314 | for i := 0; i < dstore.opts.NumWorkers; i++ { 315 | go func() { 316 | defer dstore.wg.Done() 317 | dstore.dagWorker() 318 | }() 319 | } 320 | dstore.wg.Add(4) 321 | go func() { 322 | defer dstore.wg.Done() 323 | dstore.handleNext(ctx) 324 | }() 325 | go func() { 326 | defer dstore.wg.Done() 327 | dstore.rebroadcast(ctx) 328 | }() 329 | 330 | go func() { 331 | defer dstore.wg.Done() 332 | dstore.repair(ctx) 333 | }() 334 | 335 | go func() { 336 | defer dstore.wg.Done() 337 | dstore.logStats(ctx) 338 | }() 339 | 340 | return dstore, nil 341 | } 342 | 343 | func (store *Datastore) handleNext(ctx context.Context) { 344 | if store.broadcaster == nil { // offline 345 | return 346 | } 347 | for { 348 | select { 349 | case <-ctx.Done(): 350 | return 351 | default: 352 | } 353 | 354 | data, err := store.broadcaster.Next(ctx) 355 | if err != nil { 356 | if err == ErrNoMoreBroadcast || ctx.Err() != nil { 357 | return 358 | } 359 | store.logger.Error(err) 360 | continue 361 | } 362 | 363 | bCastHeads, err := store.decodeBroadcast(ctx, data) 364 | if err != nil { 365 | store.logger.Error(err) 366 | continue 367 | } 368 | 369 | processHead := func(ctx context.Context, c cid.Cid) { 370 | err = store.handleBlock(ctx, c) //handleBlock blocks 371 | if err != nil { 372 | store.logger.Errorf("error processing new head: %s", err) 373 | // For posterity: do not mark the store as 374 | // Dirty if we could not handle a block. If an 375 | // error happens here, it means the node could 376 | // not be fetched, thus it could not be 377 | // processed, thus it did not leave a branch 378 | // half-processed and there's nothign to 379 | // recover. 380 | // disabled: store.MarkDirty() 381 | } 382 | } 383 | 384 | // if we have no heads, make seen-heads heads immediately. On 385 | // a fresh start, this allows us to start building on top of 386 | // recent heads, even if we have not fully synced rather than 387 | // creating new orphan branches. 388 | curHeadCount, err := store.heads.Len(ctx) 389 | if err != nil { 390 | store.logger.Error(err) 391 | continue 392 | } 393 | if curHeadCount == 0 { 394 | dg := &crdtNodeGetter{NodeGetter: store.dagService} 395 | for _, head := range bCastHeads { 396 | prio, err := dg.GetPriority(ctx, head) 397 | if err != nil { 398 | store.logger.Error(err) 399 | continue 400 | } 401 | err = store.heads.Add(ctx, head, prio) 402 | if err != nil { 403 | store.logger.Error(err) 404 | } 405 | } 406 | } 407 | 408 | // For each head, we process it. 409 | for _, head := range bCastHeads { 410 | // A thing to try here would be to process heads in 411 | // the same broadcast in parallel, but do not process 412 | // heads from multiple broadcasts in parallel. 413 | if store.opts.MultiHeadProcessing { 414 | go processHead(ctx, head) 415 | } else { 416 | processHead(ctx, head) 417 | } 418 | store.seenHeadsMux.Lock() 419 | store.seenHeads[head] = struct{}{} 420 | store.seenHeadsMux.Unlock() 421 | } 422 | 423 | // TODO: We should store trusted-peer signatures associated to 424 | // each head in a timecache. When we broadcast, attach the 425 | // signatures (along with our own) to the broadcast. 426 | // Other peers can use the signatures to verify that the 427 | // received CIDs have been issued by a trusted peer. 428 | } 429 | } 430 | 431 | func (store *Datastore) decodeBroadcast(ctx context.Context, data []byte) ([]cid.Cid, error) { 432 | // Make a list of heads we received 433 | bcastData := pb.CRDTBroadcast{} 434 | err := proto.Unmarshal(data, &bcastData) 435 | if err != nil { 436 | return nil, err 437 | } 438 | 439 | // Compatibility: before we were publishing CIDs directly 440 | msgReflect := bcastData.ProtoReflect() 441 | if len(msgReflect.GetUnknown()) > 0 { 442 | // Backwards compatibility 443 | c, err := cid.Cast(msgReflect.GetUnknown()) 444 | if err != nil { 445 | return nil, err 446 | } 447 | store.logger.Debugf("a legacy CID broadcast was received for: %s", c) 448 | return []cid.Cid{c}, nil 449 | } 450 | 451 | bCastHeads := make([]cid.Cid, len(bcastData.Heads)) 452 | for i, protoHead := range bcastData.Heads { 453 | c, err := cid.Cast(protoHead.Cid) 454 | if err != nil { 455 | return bCastHeads, err 456 | } 457 | bCastHeads[i] = c 458 | } 459 | return bCastHeads, nil 460 | } 461 | 462 | func (store *Datastore) encodeBroadcast(ctx context.Context, heads []cid.Cid) ([]byte, error) { 463 | bcastData := pb.CRDTBroadcast{} 464 | for _, c := range heads { 465 | bcastData.Heads = append(bcastData.Heads, &pb.Head{Cid: c.Bytes()}) 466 | } 467 | 468 | return proto.Marshal(&bcastData) 469 | } 470 | 471 | func randomizeInterval(d time.Duration) time.Duration { 472 | // 30% of the configured interval 473 | leeway := (d * 30 / 100) 474 | // A random number between -leeway|+leeway 475 | randGen := rand.New(rand.NewSource(time.Now().UnixNano())) 476 | randomInterval := time.Duration(randGen.Int63n(int64(leeway*2))) - leeway 477 | return d + randomInterval 478 | } 479 | 480 | func (store *Datastore) rebroadcast(ctx context.Context) { 481 | timer := time.NewTimer(randomizeInterval(store.opts.RebroadcastInterval)) 482 | 483 | for { 484 | select { 485 | case <-ctx.Done(): 486 | if !timer.Stop() { 487 | <-timer.C 488 | } 489 | return 490 | case <-timer.C: 491 | store.rebroadcastHeads(ctx) 492 | timer.Reset(randomizeInterval(store.opts.RebroadcastInterval)) 493 | } 494 | } 495 | } 496 | 497 | func (store *Datastore) repair(ctx context.Context) { 498 | if store.opts.RepairInterval == 0 { 499 | return 500 | } 501 | timer := time.NewTimer(0) // fire immediately on start 502 | for { 503 | select { 504 | case <-ctx.Done(): 505 | if !timer.Stop() { 506 | <-timer.C 507 | } 508 | return 509 | case <-timer.C: 510 | if !store.IsDirty(ctx) { 511 | store.logger.Info("store is marked clean. No need to repair") 512 | } else { 513 | store.logger.Warn("store is marked dirty. Starting DAG repair operation") 514 | err := store.repairDAG(ctx) 515 | if err != nil { 516 | store.logger.Error(err) 517 | } 518 | } 519 | timer.Reset(store.opts.RepairInterval) 520 | } 521 | } 522 | } 523 | 524 | // regularly send out a list of heads that we have not recently seen 525 | func (store *Datastore) rebroadcastHeads(ctx context.Context) { 526 | // Get our current list of heads 527 | heads, _, err := store.heads.List(ctx) 528 | if err != nil { 529 | store.logger.Error(err) 530 | return 531 | } 532 | 533 | var headsToBroadcast []cid.Cid 534 | store.seenHeadsMux.RLock() 535 | { 536 | headsToBroadcast = make([]cid.Cid, 0, len(store.seenHeads)) 537 | for _, h := range heads { 538 | if _, ok := store.seenHeads[h]; !ok { 539 | headsToBroadcast = append(headsToBroadcast, h) 540 | } 541 | } 542 | } 543 | store.seenHeadsMux.RUnlock() 544 | 545 | // Send them out 546 | err = store.broadcast(ctx, headsToBroadcast) 547 | if err != nil { 548 | store.logger.Warn("broadcast failed: %v", err) 549 | } 550 | 551 | // Reset the map 552 | store.seenHeadsMux.Lock() 553 | store.seenHeads = make(map[cid.Cid]struct{}) 554 | store.seenHeadsMux.Unlock() 555 | } 556 | 557 | // Log some stats every 5 minutes. 558 | func (store *Datastore) logStats(ctx context.Context) { 559 | ticker := time.NewTicker(5 * time.Minute) 560 | for { 561 | select { 562 | case <-ticker.C: 563 | heads, height, err := store.heads.List(ctx) 564 | if err != nil { 565 | store.logger.Errorf("error listing heads: %s", err) 566 | } 567 | 568 | store.logger.Infof( 569 | "Number of heads: %d. Current max height: %d. Queued jobs: %d. Dirty: %t", 570 | len(heads), 571 | height, 572 | len(store.jobQueue), 573 | store.IsDirty(ctx), 574 | ) 575 | case <-ctx.Done(): 576 | ticker.Stop() 577 | return 578 | } 579 | } 580 | } 581 | 582 | // handleBlock takes care of vetting, retrieving and applying 583 | // CRDT blocks to the Datastore. 584 | func (store *Datastore) handleBlock(ctx context.Context, c cid.Cid) error { 585 | // Ignore already processed blocks. 586 | // This includes the case when the block is a current 587 | // head. 588 | isProcessed, err := store.isProcessed(ctx, c) 589 | if err != nil { 590 | return fmt.Errorf("error checking for known block %s: %w", c, err) 591 | } 592 | if isProcessed { 593 | store.logger.Debugf("%s is known. Skip walking tree", c) 594 | return nil 595 | } 596 | 597 | return store.handleBranch(ctx, c, c) 598 | } 599 | 600 | // send job starting at the given CID in a branch headed by a given head. 601 | // this can be used to continue branch processing from a certain point. 602 | func (store *Datastore) handleBranch(ctx context.Context, head, c cid.Cid) error { 603 | // Walk down from this block 604 | cctx, cancel := context.WithCancel(ctx) 605 | defer cancel() 606 | 607 | dg := &crdtNodeGetter{NodeGetter: store.dagService} 608 | if sessionMaker, ok := store.dagService.(SessionDAGService); ok { 609 | dg = &crdtNodeGetter{NodeGetter: sessionMaker.Session(cctx)} 610 | } 611 | 612 | var session sync.WaitGroup 613 | err := store.sendNewJobs(ctx, &session, dg, head, 0, []cid.Cid{c}) 614 | session.Wait() 615 | return err 616 | } 617 | 618 | // dagWorker should run in its own goroutine. Workers are launched during 619 | // initialization in New(). 620 | func (store *Datastore) dagWorker() { 621 | for job := range store.jobQueue { 622 | ctx := job.ctx 623 | select { 624 | case <-ctx.Done(): 625 | // drain jobs from queue when we are done 626 | job.session.Done() 627 | continue 628 | default: 629 | } 630 | 631 | children, err := store.processNode( 632 | ctx, 633 | job.nodeGetter, 634 | job.root, 635 | job.rootPrio, 636 | job.delta, 637 | job.node, 638 | ) 639 | 640 | if err != nil { 641 | store.logger.Error(err) 642 | store.MarkDirty(ctx) 643 | job.session.Done() 644 | continue 645 | } 646 | go func(j *dagJob) { 647 | err := store.sendNewJobs(ctx, j.session, j.nodeGetter, j.root, j.rootPrio, children) 648 | if err != nil { 649 | store.logger.Error(err) 650 | store.MarkDirty(ctx) 651 | } 652 | j.session.Done() 653 | }(job) 654 | } 655 | } 656 | 657 | // sendNewJobs calls getDeltas (GetMany) on the crdtNodeGetter with the given 658 | // children and sends each response to the workers. It will block until all 659 | // jobs have been queued. 660 | func (store *Datastore) sendNewJobs(ctx context.Context, session *sync.WaitGroup, ng *crdtNodeGetter, root cid.Cid, rootPrio uint64, children []cid.Cid) error { 661 | if len(children) == 0 { 662 | return nil 663 | } 664 | 665 | cctx, cancel := context.WithTimeout(ctx, store.opts.DAGSyncerTimeout) 666 | defer cancel() 667 | 668 | // Special case for root 669 | if rootPrio == 0 { 670 | prio, err := ng.GetPriority(cctx, children[0]) 671 | if err != nil { 672 | return fmt.Errorf("error getting root delta priority: %w", err) 673 | } 674 | rootPrio = prio 675 | } 676 | 677 | goodDeltas := make(map[cid.Cid]struct{}) 678 | 679 | var err error 680 | loop: 681 | for deltaOpt := range ng.GetDeltas(cctx, children) { 682 | // we abort whenever we a delta comes back in error. 683 | if deltaOpt.err != nil { 684 | err = fmt.Errorf("error getting delta: %w", deltaOpt.err) 685 | break 686 | } 687 | goodDeltas[deltaOpt.node.Cid()] = struct{}{} 688 | 689 | session.Add(1) 690 | job := &dagJob{ 691 | ctx: ctx, 692 | session: session, 693 | nodeGetter: ng, 694 | root: root, 695 | delta: deltaOpt.delta, 696 | node: deltaOpt.node, 697 | rootPrio: rootPrio, 698 | } 699 | select { 700 | case store.sendJobs <- job: 701 | case <-ctx.Done(): 702 | // the job was never sent, so it cannot complete. 703 | session.Done() 704 | // We are in the middle of sending jobs, thus we left 705 | // something unprocessed. 706 | err = ctx.Err() 707 | break loop 708 | } 709 | } 710 | 711 | // This is a safe-guard in case GetDeltas() returns less deltas than 712 | // asked for. It clears up any children that could not be fetched from 713 | // the queue. The rest will remove themselves in processNode(). 714 | // Hector: as far as I know, this should not execute unless errors 715 | // happened. 716 | for _, child := range children { 717 | if _, ok := goodDeltas[child]; !ok { 718 | store.logger.Warn("GetDeltas did not include all children") 719 | store.queuedChildren.Remove(child) 720 | } 721 | } 722 | return err 723 | } 724 | 725 | // the only purpose of this worker is to be able to orderly shut-down job 726 | // workers without races by becoming the only sender for the store.jobQueue 727 | // channel. 728 | func (store *Datastore) sendJobWorker(ctx context.Context) { 729 | for { 730 | select { 731 | case <-ctx.Done(): 732 | if len(store.sendJobs) > 0 { 733 | // we left something in the queue 734 | store.MarkDirty(ctx) 735 | } 736 | close(store.jobQueue) 737 | return 738 | case j := <-store.sendJobs: 739 | store.jobQueue <- j 740 | } 741 | } 742 | } 743 | 744 | func (store *Datastore) processedBlockKey(c cid.Cid) ds.Key { 745 | return store.namespace.ChildString(processedBlocksNs).ChildString(dshelp.MultihashToDsKey(c.Hash()).String()) 746 | } 747 | 748 | func (store *Datastore) isProcessed(ctx context.Context, c cid.Cid) (bool, error) { 749 | return store.store.Has(ctx, store.processedBlockKey(c)) 750 | } 751 | 752 | func (store *Datastore) markProcessed(ctx context.Context, c cid.Cid) error { 753 | return store.store.Put(ctx, store.processedBlockKey(c), nil) 754 | } 755 | 756 | func (store *Datastore) dirtyKey() ds.Key { 757 | return store.namespace.ChildString(dirtyBitKey) 758 | } 759 | 760 | // MarkDirty marks the Datastore as dirty. 761 | func (store *Datastore) MarkDirty(ctx context.Context) { 762 | store.logger.Warn("marking datastore as dirty") 763 | err := store.store.Put(ctx, store.dirtyKey(), nil) 764 | if err != nil { 765 | store.logger.Errorf("error setting dirty bit: %s", err) 766 | } 767 | } 768 | 769 | // IsDirty returns whether the datastore is marked dirty. 770 | func (store *Datastore) IsDirty(ctx context.Context) bool { 771 | ok, err := store.store.Has(ctx, store.dirtyKey()) 772 | if err != nil { 773 | store.logger.Errorf("error checking dirty bit: %s", err) 774 | } 775 | return ok 776 | } 777 | 778 | // MarkClean removes the dirty mark from the datastore. 779 | func (store *Datastore) MarkClean(ctx context.Context) { 780 | store.logger.Info("marking datastore as clean") 781 | err := store.store.Delete(ctx, store.dirtyKey()) 782 | if err != nil { 783 | store.logger.Errorf("error clearing dirty bit: %s", err) 784 | } 785 | } 786 | 787 | // processNode merges the delta in a node and has the logic about what to do 788 | // then. 789 | func (store *Datastore) processNode(ctx context.Context, ng *crdtNodeGetter, root cid.Cid, rootPrio uint64, delta *pb.Delta, node ipld.Node) ([]cid.Cid, error) { 790 | // First, merge the delta in this node. 791 | current := node.Cid() 792 | blockKey := dshelp.MultihashToDsKey(current.Hash()).String() 793 | err := store.set.Merge(ctx, delta, blockKey) 794 | if err != nil { 795 | return nil, fmt.Errorf("error merging delta from %s: %w", current, err) 796 | } 797 | 798 | // Record that we have processed the node so that any other worker 799 | // can skip it. 800 | err = store.markProcessed(ctx, current) 801 | if err != nil { 802 | return nil, fmt.Errorf("error recording %s as processed: %w", current, err) 803 | } 804 | 805 | // Remove from the set that has the children which are queued for 806 | // processing. 807 | store.queuedChildren.Remove(node.Cid()) 808 | 809 | // Some informative logging 810 | if prio := delta.GetPriority(); prio%50 == 0 { 811 | store.logger.Infof("merged delta from node %s (priority: %d)", current, prio) 812 | } else { 813 | store.logger.Debugf("merged delta from node %s (priority: %d)", current, prio) 814 | } 815 | 816 | links := node.Links() 817 | children := []cid.Cid{} 818 | 819 | // We reached the bottom. Our head must become a new head. 820 | if len(links) == 0 { 821 | err := store.heads.Add(ctx, root, rootPrio) 822 | if err != nil { 823 | return nil, fmt.Errorf("error adding head %s: %w", root, err) 824 | } 825 | } 826 | 827 | // Return children that: 828 | // a) Are not processed 829 | // b) Are not going to be processed by someone else. 830 | // 831 | // For every other child, add our node as Head. 832 | 833 | addedAsHead := false // small optimization to avoid adding as head multiple times. 834 | for _, l := range links { 835 | child := l.Cid 836 | 837 | isHead, _, err := store.heads.IsHead(ctx, child) 838 | if err != nil { 839 | return nil, fmt.Errorf("error checking if %s is head: %w", child, err) 840 | } 841 | 842 | isProcessed, err := store.isProcessed(ctx, child) 843 | if err != nil { 844 | return nil, fmt.Errorf("error checking for known block %s: %w", child, err) 845 | } 846 | 847 | if isHead { 848 | // reached one of the current heads. Replace it with 849 | // the tip of this branch 850 | err := store.heads.Replace(ctx, child, root, rootPrio) 851 | if err != nil { 852 | return nil, fmt.Errorf("error replacing head: %s->%s: %w", child, root, err) 853 | } 854 | addedAsHead = true 855 | 856 | // If this head was already processed, continue this 857 | // protects the case when something is a head but was 858 | // not processed (potentially could happen during 859 | // first sync when heads are set before processing, a 860 | // both a node and its child are heads - which I'm not 861 | // sure if it can happen at all, but good to safeguard 862 | // for it). 863 | if isProcessed { 864 | continue 865 | } 866 | } 867 | 868 | // If the child has already been processed or someone else has 869 | // reserved it for processing, then we can make ourselves a 870 | // head right away because we are not meant to replace an 871 | // existing head. Otherwise, mark it for processing and 872 | // keep going down this branch. 873 | if isProcessed || !store.queuedChildren.Visit(child) { 874 | if !addedAsHead { 875 | err = store.heads.Add(ctx, root, rootPrio) 876 | if err != nil { 877 | // Don't let this failure prevent us 878 | // from processing the other links. 879 | store.logger.Error(fmt.Errorf("error adding head %s: %w", root, err)) 880 | } 881 | } 882 | addedAsHead = true 883 | continue 884 | } 885 | 886 | // We can return this child because it is not processed and we 887 | // reserved it in the queue. 888 | children = append(children, child) 889 | 890 | } 891 | 892 | return children, nil 893 | } 894 | 895 | // RepairDAG is used to walk down the chain until a non-processed node is 896 | // found and at that moment, queues it for processing. 897 | func (store *Datastore) repairDAG(ctx context.Context) error { 898 | start := time.Now() 899 | defer func() { 900 | store.logger.Infof("DAG repair finished. Took %s", time.Since(start).Truncate(time.Second)) 901 | }() 902 | 903 | getter := &crdtNodeGetter{store.dagService} 904 | 905 | heads, _, err := store.heads.List(ctx) 906 | if err != nil { 907 | return fmt.Errorf("error listing heads: %w", err) 908 | } 909 | 910 | type nodeHead struct { 911 | head cid.Cid 912 | node cid.Cid 913 | } 914 | 915 | var nodes []nodeHead 916 | queued := cid.NewSet() 917 | for _, h := range heads { 918 | nodes = append(nodes, nodeHead{head: h, node: h}) 919 | queued.Add(h) 920 | } 921 | 922 | // For logging 923 | var visitedNodes uint64 924 | var lastPriority uint64 925 | var queuedNodes uint64 926 | 927 | exitLogging := make(chan struct{}) 928 | defer close(exitLogging) 929 | go func() { 930 | ticker := time.NewTicker(5 * time.Minute) 931 | for { 932 | select { 933 | case <-exitLogging: 934 | ticker.Stop() 935 | return 936 | case <-ticker.C: 937 | store.logger.Infof( 938 | "DAG repair in progress. Visited nodes: %d. Last priority: %d. Queued nodes: %d", 939 | atomic.LoadUint64(&visitedNodes), 940 | atomic.LoadUint64(&lastPriority), 941 | atomic.LoadUint64(&queuedNodes), 942 | ) 943 | } 944 | } 945 | }() 946 | 947 | for { 948 | // GetDelta does not seem to respond well to context 949 | // cancellations (probably this goes down to the Blockstore 950 | // still working with a cancelled context). So we need to put 951 | // this here. 952 | select { 953 | case <-ctx.Done(): 954 | return nil 955 | default: 956 | } 957 | 958 | if len(nodes) == 0 { 959 | break 960 | } 961 | nh := nodes[0] 962 | nodes = nodes[1:] 963 | cur := nh.node 964 | head := nh.head 965 | 966 | cctx, cancel := context.WithTimeout(ctx, store.opts.DAGSyncerTimeout) 967 | n, delta, err := getter.GetDelta(cctx, cur) 968 | if err != nil { 969 | cancel() 970 | return fmt.Errorf("error getting node for reprocessing %s: %w", cur, err) 971 | } 972 | cancel() 973 | 974 | isProcessed, err := store.isProcessed(ctx, cur) 975 | if err != nil { 976 | return fmt.Errorf("error checking for reprocessed block %s: %w", cur, err) 977 | } 978 | if !isProcessed { 979 | store.logger.Debugf("reprocessing %s / %d", cur, delta.Priority) 980 | // start syncing from here. 981 | // do not add children to our queue. 982 | err = store.handleBranch(ctx, head, cur) 983 | if err != nil { 984 | return fmt.Errorf("error reprocessing block %s: %w", cur, err) 985 | } 986 | } 987 | links := n.Links() 988 | for _, l := range links { 989 | if queued.Visit(l.Cid) { 990 | nodes = append(nodes, (nodeHead{head: head, node: l.Cid})) 991 | } 992 | } 993 | 994 | atomic.StoreUint64(&queuedNodes, uint64(len(nodes))) 995 | atomic.AddUint64(&visitedNodes, 1) 996 | atomic.StoreUint64(&lastPriority, delta.Priority) 997 | } 998 | 999 | // If we are here we have successfully reprocessed the chain until the 1000 | // bottom. 1001 | store.MarkClean(ctx) 1002 | return nil 1003 | } 1004 | 1005 | // Repair triggers a DAG-repair, which tries to re-walk the CRDT-DAG from the 1006 | // current heads until the roots, processing currently unprocessed branches. 1007 | // 1008 | // Calling Repair will walk the full DAG even if the dirty bit is unset, but 1009 | // will mark the store as clean unpon successful completion. 1010 | func (store *Datastore) Repair(ctx context.Context) error { 1011 | return store.repairDAG(ctx) 1012 | } 1013 | 1014 | // Get retrieves the object `value` named by `key`. 1015 | // Get will return ErrNotFound if the key is not mapped to a value. 1016 | func (store *Datastore) Get(ctx context.Context, key ds.Key) (value []byte, err error) { 1017 | return store.set.Element(ctx, key.String()) 1018 | } 1019 | 1020 | // Has returns whether the `key` is mapped to a `value`. 1021 | // In some contexts, it may be much cheaper only to check for existence of 1022 | // a value, rather than retrieving the value itself. (e.g. HTTP HEAD). 1023 | // The default implementation is found in `GetBackedHas`. 1024 | func (store *Datastore) Has(ctx context.Context, key ds.Key) (exists bool, err error) { 1025 | return store.set.InSet(ctx, key.String()) 1026 | } 1027 | 1028 | // GetSize returns the size of the `value` named by `key`. 1029 | // In some contexts, it may be much cheaper to only get the size of the 1030 | // value rather than retrieving the value itself. 1031 | func (store *Datastore) GetSize(ctx context.Context, key ds.Key) (size int, err error) { 1032 | return ds.GetBackedSize(ctx, store, key) 1033 | } 1034 | 1035 | // Query searches the datastore and returns a query result. This function 1036 | // may return before the query actually runs. To wait for the query: 1037 | // 1038 | // result, _ := ds.Query(q) 1039 | // 1040 | // // use the channel interface; result may come in at different times 1041 | // for entry := range result.Next() { ... } 1042 | // 1043 | // // or wait for the query to be completely done 1044 | // entries, _ := result.Rest() 1045 | // for entry := range entries { ... } 1046 | func (store *Datastore) Query(ctx context.Context, q query.Query) (query.Results, error) { 1047 | qr, err := store.set.Elements(ctx, q) 1048 | if err != nil { 1049 | return nil, err 1050 | } 1051 | return query.NaiveQueryApply(q, qr), nil 1052 | } 1053 | 1054 | // Put stores the object `value` named by `key`. 1055 | func (store *Datastore) Put(ctx context.Context, key ds.Key, value []byte) error { 1056 | delta := store.set.Add(ctx, key.String(), value) 1057 | return store.publish(ctx, delta) 1058 | } 1059 | 1060 | // Delete removes the value for given `key`. 1061 | func (store *Datastore) Delete(ctx context.Context, key ds.Key) error { 1062 | delta, err := store.set.Rmv(ctx, key.String()) 1063 | if err != nil { 1064 | return err 1065 | } 1066 | 1067 | if len(delta.Tombstones) == 0 { 1068 | return nil 1069 | } 1070 | return store.publish(ctx, delta) 1071 | } 1072 | 1073 | // Sync ensures that all the data under the given prefix is flushed to disk in 1074 | // the underlying datastore. 1075 | func (store *Datastore) Sync(ctx context.Context, prefix ds.Key) error { 1076 | // This is a quick write up of the internals from the time when 1077 | // I was thinking many underlying datastore entries are affected when 1078 | // an add operation happens: 1079 | // 1080 | // When a key is added: 1081 | // - a new delta is made 1082 | // - Delta is marshalled and a DAG-node is created with the bytes, 1083 | // pointing to previous heads. DAG-node is added to DAGService. 1084 | // - Heads are replaced with new CID. 1085 | // - New CID is broadcasted to everyone 1086 | // - The new CID is processed (up until now the delta had not 1087 | // taken effect). Implementation detail: it is processed before 1088 | // broadcast actually. 1089 | // - processNode() starts processing that branch from that CID 1090 | // - it calls set.Merge() 1091 | // - that calls putElems() and putTombs() 1092 | // - that may make a batch for all the elems which is later committed 1093 | // - each element has a datastore entry /setNamespace/elemsNamespace// 1094 | // - each tomb has a datastore entry /setNamespace/tombsNamespace// 1095 | // - each value has a datastore entry /setNamespace/keysNamespace//valueSuffix 1096 | // - each value has an additional priority entry /setNamespace/keysNamespace//prioritySuffix 1097 | // - the last two are only written if the added entry has more priority than any the existing 1098 | // - For a value to not be lost, those entries should be fully synced. 1099 | // - In order to check if a value is in the set: 1100 | // - List all elements on /setNamespace/elemsNamespace/ (will return several block_ids) 1101 | // - If we find an element which is not tombstoned, then value is in the set 1102 | // - In order to retrieve an element's value: 1103 | // - Check that it is in the set 1104 | // - Read the value entry from the /setNamespace/keysNamespace//valueSuffix path 1105 | 1106 | // Be safe and just sync everything in our namespace 1107 | if prefix.String() == "/" { 1108 | return store.store.Sync(ctx, store.namespace) 1109 | } 1110 | 1111 | // attempt to be intelligent and sync only all heads and the 1112 | // set entries related to the given prefix. 1113 | err := store.set.datastoreSync(ctx, prefix) 1114 | err2 := store.store.Sync(ctx, store.heads.namespace) 1115 | return multierr.Combine(err, err2) 1116 | } 1117 | 1118 | // Close shuts down the CRDT datastore. It should not be used afterwards. 1119 | func (store *Datastore) Close() error { 1120 | store.cancel() 1121 | store.wg.Wait() 1122 | if store.IsDirty(store.ctx) { 1123 | store.logger.Warn("datastore is being closed marked as dirty") 1124 | } 1125 | return nil 1126 | } 1127 | 1128 | // Batch implements batching for writes by accumulating 1129 | // Put and Delete in the same CRDT-delta and only applying it and 1130 | // broadcasting it on Commit(). 1131 | func (store *Datastore) Batch(ctx context.Context) (ds.Batch, error) { 1132 | return &batch{ctx: ctx, store: store}, nil 1133 | } 1134 | 1135 | func deltaMerge(d1, d2 *pb.Delta) *pb.Delta { 1136 | result := &pb.Delta{ 1137 | Elements: append(d1.GetElements(), d2.GetElements()...), 1138 | Tombstones: append(d1.GetTombstones(), d2.GetTombstones()...), 1139 | Priority: d1.GetPriority(), 1140 | } 1141 | if h := d2.GetPriority(); h > result.Priority { 1142 | result.Priority = h 1143 | } 1144 | return result 1145 | } 1146 | 1147 | // returns delta size and error 1148 | func (store *Datastore) addToDelta(ctx context.Context, key string, value []byte) (int, error) { 1149 | return store.updateDelta(store.set.Add(ctx, key, value)), nil 1150 | 1151 | } 1152 | 1153 | // returns delta size and error 1154 | func (store *Datastore) rmvToDelta(ctx context.Context, key string) (int, error) { 1155 | delta, err := store.set.Rmv(ctx, key) 1156 | if err != nil { 1157 | return 0, err 1158 | } 1159 | 1160 | return store.updateDeltaWithRemove(key, delta), nil 1161 | } 1162 | 1163 | // to satisfy datastore semantics, we need to remove elements from the current 1164 | // batch if they were added. 1165 | func (store *Datastore) updateDeltaWithRemove(key string, newDelta *pb.Delta) int { 1166 | var size int 1167 | store.curDeltaMux.Lock() 1168 | { 1169 | elems := make([]*pb.Element, 0) 1170 | for _, e := range store.curDelta.GetElements() { 1171 | if e.GetKey() != key { 1172 | elems = append(elems, e) 1173 | } 1174 | } 1175 | store.curDelta = &pb.Delta{ 1176 | Elements: elems, 1177 | Tombstones: store.curDelta.GetTombstones(), 1178 | Priority: store.curDelta.GetPriority(), 1179 | } 1180 | store.curDelta = deltaMerge(store.curDelta, newDelta) 1181 | size = proto.Size(store.curDelta) 1182 | } 1183 | store.curDeltaMux.Unlock() 1184 | return size 1185 | } 1186 | 1187 | func (store *Datastore) updateDelta(newDelta *pb.Delta) int { 1188 | var size int 1189 | store.curDeltaMux.Lock() 1190 | { 1191 | store.curDelta = deltaMerge(store.curDelta, newDelta) 1192 | size = proto.Size(store.curDelta) 1193 | } 1194 | store.curDeltaMux.Unlock() 1195 | return size 1196 | } 1197 | 1198 | func (store *Datastore) publishDelta(ctx context.Context) error { 1199 | store.curDeltaMux.Lock() 1200 | defer store.curDeltaMux.Unlock() 1201 | err := store.publish(ctx, store.curDelta) 1202 | if err != nil { 1203 | return err 1204 | } 1205 | store.curDelta = nil 1206 | return nil 1207 | } 1208 | 1209 | func (store *Datastore) putBlock(ctx context.Context, heads []cid.Cid, height uint64, delta *pb.Delta) (ipld.Node, error) { 1210 | if delta != nil { 1211 | delta.Priority = height 1212 | } 1213 | node, err := makeNode(delta, heads) 1214 | if err != nil { 1215 | return nil, fmt.Errorf("error creating new block: %w", err) 1216 | } 1217 | 1218 | cctx, cancel := context.WithTimeout(ctx, store.opts.DAGSyncerTimeout) 1219 | defer cancel() 1220 | err = store.dagService.Add(cctx, node) 1221 | if err != nil { 1222 | return nil, fmt.Errorf("error writing new block %s: %w", node.Cid(), err) 1223 | } 1224 | 1225 | return node, nil 1226 | } 1227 | 1228 | func (store *Datastore) publish(ctx context.Context, delta *pb.Delta) error { 1229 | // curDelta might be nil if nothing has been added to it 1230 | if delta == nil { 1231 | return nil 1232 | } 1233 | c, err := store.addDAGNode(ctx, delta) 1234 | if err != nil { 1235 | return err 1236 | } 1237 | return store.broadcast(ctx, []cid.Cid{c}) 1238 | } 1239 | 1240 | func (store *Datastore) addDAGNode(ctx context.Context, delta *pb.Delta) (cid.Cid, error) { 1241 | heads, height, err := store.heads.List(ctx) 1242 | if err != nil { 1243 | return cid.Undef, fmt.Errorf("error listing heads: %w", err) 1244 | } 1245 | height = height + 1 // This implies our minimum height is 1 1246 | 1247 | delta.Priority = height 1248 | 1249 | // for _, e := range delta.GetElements() { 1250 | // e.Value = append(e.GetValue(), []byte(fmt.Sprintf(" height: %d", height))...) 1251 | // } 1252 | 1253 | nd, err := store.putBlock(ctx, heads, height, delta) 1254 | if err != nil { 1255 | return cid.Undef, err 1256 | } 1257 | 1258 | // Process new block. This makes that every operation applied 1259 | // to this store take effect (delta is merged) before 1260 | // returning. Since our block references current heads, children 1261 | // should be empty 1262 | store.logger.Debugf("processing generated block %s", nd.Cid()) 1263 | children, err := store.processNode( 1264 | ctx, 1265 | &crdtNodeGetter{store.dagService}, 1266 | nd.Cid(), 1267 | height, 1268 | delta, 1269 | nd, 1270 | ) 1271 | if err != nil { 1272 | store.MarkDirty(ctx) // not sure if this will fix much if this happens. 1273 | return cid.Undef, fmt.Errorf("error processing new block: %w", err) 1274 | } 1275 | if len(children) != 0 { 1276 | store.logger.Warnf("bug: created a block to unknown children") 1277 | } 1278 | 1279 | return nd.Cid(), nil 1280 | } 1281 | 1282 | func (store *Datastore) broadcast(ctx context.Context, cids []cid.Cid) error { 1283 | if store.broadcaster == nil { // offline 1284 | return nil 1285 | } 1286 | 1287 | if len(cids) == 0 { // nothing to rebroadcast 1288 | return nil 1289 | } 1290 | 1291 | select { 1292 | case <-ctx.Done(): 1293 | store.logger.Debugf("skipping broadcast: %s", ctx.Err()) 1294 | default: 1295 | } 1296 | 1297 | store.logger.Debugf("broadcasting %s", cids) 1298 | 1299 | bcastBytes, err := store.encodeBroadcast(ctx, cids) 1300 | if err != nil { 1301 | return err 1302 | } 1303 | 1304 | err = store.broadcaster.Broadcast(ctx, bcastBytes) 1305 | if err != nil { 1306 | return fmt.Errorf("error broadcasting %s: %w", cids, err) 1307 | } 1308 | return nil 1309 | } 1310 | 1311 | type batch struct { 1312 | ctx context.Context 1313 | store *Datastore 1314 | } 1315 | 1316 | func (b *batch) Put(ctx context.Context, key ds.Key, value []byte) error { 1317 | size, err := b.store.addToDelta(ctx, key.String(), value) 1318 | if err != nil { 1319 | return err 1320 | } 1321 | if size > b.store.opts.MaxBatchDeltaSize { 1322 | b.store.logger.Warn("delta size over MaxBatchDeltaSize. Commiting.") 1323 | return b.Commit(ctx) 1324 | } 1325 | return nil 1326 | } 1327 | 1328 | func (b *batch) Delete(ctx context.Context, key ds.Key) error { 1329 | size, err := b.store.rmvToDelta(ctx, key.String()) 1330 | if err != nil { 1331 | return err 1332 | } 1333 | if size > b.store.opts.MaxBatchDeltaSize { 1334 | b.store.logger.Warn("delta size over MaxBatchDeltaSize. Commiting.") 1335 | return b.Commit(ctx) 1336 | } 1337 | return nil 1338 | } 1339 | 1340 | // Commit writes the current delta as a new DAG node and publishes the new 1341 | // head. The publish step is skipped if the context is cancelled. 1342 | func (b *batch) Commit(ctx context.Context) error { 1343 | return b.store.publishDelta(ctx) 1344 | } 1345 | 1346 | // PrintDAG pretty prints the current Merkle-DAG to stdout in a pretty 1347 | // fashion. Only use for small DAGs. DotDAG is an alternative for larger DAGs. 1348 | func (store *Datastore) PrintDAG(ctx context.Context) error { 1349 | heads, _, err := store.heads.List(ctx) 1350 | if err != nil { 1351 | return err 1352 | } 1353 | 1354 | ng := &crdtNodeGetter{NodeGetter: store.dagService} 1355 | 1356 | set := cid.NewSet() 1357 | 1358 | for _, h := range heads { 1359 | err := store.printDAGRec(ctx, h, 0, ng, set) 1360 | if err != nil { 1361 | return err 1362 | } 1363 | } 1364 | return nil 1365 | } 1366 | 1367 | func (store *Datastore) printDAGRec(ctx context.Context, from cid.Cid, depth uint64, ng *crdtNodeGetter, set *cid.Set) error { 1368 | line := "" 1369 | for i := uint64(0); i < depth; i++ { 1370 | line += " " 1371 | } 1372 | 1373 | ok := set.Visit(from) 1374 | if !ok { 1375 | line += "..." 1376 | fmt.Println(line) 1377 | return nil 1378 | } 1379 | 1380 | cctx, cancel := context.WithTimeout(ctx, store.opts.DAGSyncerTimeout) 1381 | defer cancel() 1382 | nd, delta, err := ng.GetDelta(cctx, from) 1383 | if err != nil { 1384 | return err 1385 | } 1386 | cidStr := nd.Cid().String() 1387 | cidStr = cidStr[len(cidStr)-4:] 1388 | line += fmt.Sprintf("- %d | %s: ", delta.GetPriority(), cidStr) 1389 | line += "Add: {" 1390 | for _, e := range delta.GetElements() { 1391 | line += fmt.Sprintf("%s:%s,", e.GetKey(), e.GetValue()) 1392 | } 1393 | line += "}. Rmv: {" 1394 | for _, e := range delta.GetTombstones() { 1395 | line += fmt.Sprintf("%s,", e.GetKey()) 1396 | } 1397 | line += "}. Links: {" 1398 | for _, l := range nd.Links() { 1399 | cidStr := l.Cid.String() 1400 | cidStr = cidStr[len(cidStr)-4:] 1401 | line += fmt.Sprintf("%s,", cidStr) 1402 | } 1403 | line += "}" 1404 | 1405 | processed, err := store.isProcessed(ctx, nd.Cid()) 1406 | if err != nil { 1407 | return err 1408 | } 1409 | 1410 | if !processed { 1411 | line += " Unprocessed!" 1412 | } 1413 | 1414 | line += ":" 1415 | 1416 | fmt.Println(line) 1417 | for _, l := range nd.Links() { 1418 | store.printDAGRec(ctx, l.Cid, depth+1, ng, set) 1419 | } 1420 | return nil 1421 | } 1422 | 1423 | // DotDAG writes a dot-format representation of the CRDT DAG to the given 1424 | // writer. It can be converted to image format and visualized with graphviz 1425 | // tooling. 1426 | func (store *Datastore) DotDAG(ctx context.Context, w io.Writer) error { 1427 | heads, _, err := store.heads.List(ctx) 1428 | if err != nil { 1429 | return err 1430 | } 1431 | 1432 | fmt.Fprintln(w, "digraph CRDTDAG {") 1433 | 1434 | ng := &crdtNodeGetter{NodeGetter: store.dagService} 1435 | 1436 | set := cid.NewSet() 1437 | 1438 | fmt.Fprintln(w, "subgraph heads {") 1439 | for _, h := range heads { 1440 | fmt.Fprintln(w, h) 1441 | } 1442 | fmt.Fprintln(w, "}") 1443 | 1444 | for _, h := range heads { 1445 | err := store.dotDAGRec(ctx, w, h, 0, ng, set) 1446 | if err != nil { 1447 | return err 1448 | } 1449 | } 1450 | fmt.Fprintln(w, "}") 1451 | return nil 1452 | } 1453 | 1454 | func (store *Datastore) dotDAGRec(ctx context.Context, w io.Writer, from cid.Cid, depth uint64, ng *crdtNodeGetter, set *cid.Set) error { 1455 | cidLong := from.String() 1456 | cidShort := cidLong[len(cidLong)-4:] 1457 | 1458 | ok := set.Visit(from) 1459 | if !ok { 1460 | return nil 1461 | } 1462 | 1463 | cctx, cancel := context.WithTimeout(ctx, store.opts.DAGSyncerTimeout) 1464 | defer cancel() 1465 | nd, delta, err := ng.GetDelta(cctx, from) 1466 | if err != nil { 1467 | return err 1468 | } 1469 | 1470 | fmt.Fprintf(w, "%s [label=\"%d | %s: +%d -%d\"]\n", 1471 | cidLong, 1472 | delta.GetPriority(), 1473 | cidShort, 1474 | len(delta.GetElements()), 1475 | len(delta.GetTombstones()), 1476 | ) 1477 | fmt.Fprintf(w, "%s -> {", cidLong) 1478 | for _, l := range nd.Links() { 1479 | fmt.Fprintf(w, "%s ", l.Cid) 1480 | } 1481 | fmt.Fprintln(w, "}") 1482 | 1483 | fmt.Fprintf(w, "subgraph sg_%s {\n", cidLong) 1484 | for _, l := range nd.Links() { 1485 | fmt.Fprintln(w, l.Cid) 1486 | } 1487 | fmt.Fprintln(w, "}") 1488 | 1489 | for _, l := range nd.Links() { 1490 | store.dotDAGRec(ctx, w, l.Cid, depth+1, ng, set) 1491 | } 1492 | return nil 1493 | } 1494 | 1495 | // Stats wraps internal information about the datastore. 1496 | // Might be expanded in the future. 1497 | type Stats struct { 1498 | Heads []cid.Cid 1499 | MaxHeight uint64 1500 | QueuedJobs int 1501 | } 1502 | 1503 | // InternalStats returns internal datastore information like the current heads 1504 | // and max height. 1505 | func (store *Datastore) InternalStats(ctx context.Context) Stats { 1506 | heads, height, _ := store.heads.List(ctx) 1507 | 1508 | return Stats{ 1509 | Heads: heads, 1510 | MaxHeight: height, 1511 | QueuedJobs: len(store.jobQueue), 1512 | } 1513 | } 1514 | 1515 | type cidSafeSet struct { 1516 | set map[cid.Cid]struct{} 1517 | mux sync.RWMutex 1518 | } 1519 | 1520 | func newCidSafeSet() *cidSafeSet { 1521 | return &cidSafeSet{ 1522 | set: make(map[cid.Cid]struct{}), 1523 | } 1524 | } 1525 | 1526 | func (s *cidSafeSet) Visit(c cid.Cid) bool { 1527 | var b bool 1528 | s.mux.Lock() 1529 | { 1530 | if _, ok := s.set[c]; !ok { 1531 | s.set[c] = struct{}{} 1532 | b = true 1533 | } 1534 | } 1535 | s.mux.Unlock() 1536 | return b 1537 | } 1538 | 1539 | func (s *cidSafeSet) Remove(c cid.Cid) { 1540 | s.mux.Lock() 1541 | { 1542 | delete(s.set, c) 1543 | } 1544 | s.mux.Unlock() 1545 | } 1546 | 1547 | func (s *cidSafeSet) Has(c cid.Cid) (ok bool) { 1548 | s.mux.RLock() 1549 | { 1550 | _, ok = s.set[c] 1551 | } 1552 | s.mux.RUnlock() 1553 | return 1554 | } 1555 | -------------------------------------------------------------------------------- /crdt_norace_test.go: -------------------------------------------------------------------------------- 1 | //go:build !race 2 | // +build !race 3 | 4 | package crdt 5 | 6 | import ( 7 | "context" 8 | "testing" 9 | "time" 10 | 11 | query "github.com/ipfs/go-datastore/query" 12 | dstest "github.com/ipfs/go-datastore/test" 13 | ) 14 | 15 | func TestDatastoreSuite(t *testing.T) { 16 | ctx := context.Background() 17 | 18 | numReplicasOld := numReplicas 19 | numReplicas = 1 20 | defer func() { 21 | numReplicas = numReplicasOld 22 | }() 23 | opts := DefaultOptions() 24 | opts.MaxBatchDeltaSize = 200 * 1024 * 1024 // 200 MB 25 | replicas, closeReplicas := makeReplicas(t, opts) 26 | defer closeReplicas() 27 | dstest.SubtestAll(t, replicas[0]) 28 | time.Sleep(time.Second) 29 | 30 | for _, r := range replicas { 31 | q := query.Query{KeysOnly: true} 32 | results, err := r.Query(ctx, q) 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | defer results.Close() 37 | rest, err := results.Rest() 38 | if err != nil { 39 | t.Fatal(err) 40 | } 41 | if len(rest) != 0 { 42 | t.Error("all elements in the suite should be gone") 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /crdt_test.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "os" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | 14 | "github.com/dgraph-io/badger" 15 | blockstore "github.com/ipfs/boxo/blockstore" 16 | "github.com/ipfs/boxo/ipld/merkledag" 17 | mdutils "github.com/ipfs/boxo/ipld/merkledag/test" 18 | cid "github.com/ipfs/go-cid" 19 | ds "github.com/ipfs/go-datastore" 20 | query "github.com/ipfs/go-datastore/query" 21 | dssync "github.com/ipfs/go-datastore/sync" 22 | dstest "github.com/ipfs/go-datastore/test" 23 | badgerds "github.com/ipfs/go-ds-badger" 24 | ipld "github.com/ipfs/go-ipld-format" 25 | log "github.com/ipfs/go-log/v2" 26 | "github.com/multiformats/go-multihash" 27 | ) 28 | 29 | var numReplicas = 15 30 | var debug = false 31 | 32 | const ( 33 | mapStore = iota 34 | badgerStore 35 | ) 36 | 37 | var store int = mapStore 38 | 39 | func init() { 40 | dstest.ElemCount = 10 41 | } 42 | 43 | type testLogger struct { 44 | name string 45 | l log.StandardLogger 46 | } 47 | 48 | func (tl *testLogger) Debug(args ...interface{}) { 49 | args = append([]interface{}{tl.name}, args...) 50 | tl.l.Debug(args...) 51 | } 52 | func (tl *testLogger) Debugf(format string, args ...interface{}) { 53 | args = append([]interface{}{tl.name}, args...) 54 | tl.l.Debugf("%s "+format, args...) 55 | } 56 | func (tl *testLogger) Error(args ...interface{}) { 57 | args = append([]interface{}{tl.name}, args...) 58 | tl.l.Error(args...) 59 | } 60 | func (tl *testLogger) Errorf(format string, args ...interface{}) { 61 | args = append([]interface{}{tl.name}, args...) 62 | tl.l.Errorf("%s "+format, args...) 63 | } 64 | func (tl *testLogger) Fatal(args ...interface{}) { 65 | args = append([]interface{}{tl.name}, args...) 66 | tl.l.Fatal(args...) 67 | } 68 | func (tl *testLogger) Fatalf(format string, args ...interface{}) { 69 | args = append([]interface{}{tl.name}, args...) 70 | tl.l.Fatalf("%s "+format, args...) 71 | } 72 | func (tl *testLogger) Info(args ...interface{}) { 73 | args = append([]interface{}{tl.name}, args...) 74 | tl.l.Info(args...) 75 | } 76 | func (tl *testLogger) Infof(format string, args ...interface{}) { 77 | args = append([]interface{}{tl.name}, args...) 78 | tl.l.Infof("%s "+format, args...) 79 | } 80 | func (tl *testLogger) Panic(args ...interface{}) { 81 | args = append([]interface{}{tl.name}, args...) 82 | tl.l.Panic(args...) 83 | } 84 | func (tl *testLogger) Panicf(format string, args ...interface{}) { 85 | args = append([]interface{}{tl.name}, args...) 86 | tl.l.Panicf("%s "+format, args...) 87 | } 88 | func (tl *testLogger) Warn(args ...interface{}) { 89 | args = append([]interface{}{tl.name}, args...) 90 | tl.l.Warn(args...) 91 | } 92 | func (tl *testLogger) Warnf(format string, args ...interface{}) { 93 | args = append([]interface{}{tl.name}, args...) 94 | tl.l.Warnf("%s "+format, args...) 95 | } 96 | 97 | type mockBroadcaster struct { 98 | ctx context.Context 99 | chans []chan []byte 100 | myChan chan []byte 101 | dropProb *atomic.Int64 // probability of dropping a message instead of receiving it 102 | t testing.TB 103 | } 104 | 105 | func newBroadcasters(t testing.TB, n int) ([]*mockBroadcaster, context.CancelFunc) { 106 | ctx, cancel := context.WithCancel(context.Background()) 107 | broadcasters := make([]*mockBroadcaster, n) 108 | chans := make([]chan []byte, n) 109 | dropP := &atomic.Int64{} 110 | for i := range chans { 111 | chans[i] = make(chan []byte, 300) 112 | broadcasters[i] = &mockBroadcaster{ 113 | ctx: ctx, 114 | chans: chans, 115 | myChan: chans[i], 116 | dropProb: dropP, 117 | t: t, 118 | } 119 | } 120 | return broadcasters, cancel 121 | } 122 | 123 | func (mb *mockBroadcaster) Broadcast(ctx context.Context, data []byte) error { 124 | var wg sync.WaitGroup 125 | 126 | randg := rand.New(rand.NewSource(time.Now().UnixNano())) 127 | 128 | for i, ch := range mb.chans { 129 | n := randg.Int63n(100) 130 | if n < mb.dropProb.Load() { 131 | continue 132 | } 133 | wg.Add(1) 134 | go func(i int) { 135 | defer wg.Done() 136 | randg := rand.New(rand.NewSource(int64(i))) 137 | // randomize when we send a little bit 138 | if randg.Intn(100) < 30 { 139 | // Sleep for a very small time that will 140 | // effectively be pretty random 141 | time.Sleep(time.Nanosecond) 142 | 143 | } 144 | timer := time.NewTimer(5 * time.Second) 145 | defer timer.Stop() 146 | 147 | select { 148 | case ch <- data: 149 | case <-timer.C: 150 | mb.t.Errorf("broadcasting to %d timed out", i) 151 | } 152 | }(i) 153 | wg.Wait() 154 | } 155 | return nil 156 | } 157 | 158 | func (mb *mockBroadcaster) Next(ctx context.Context) ([]byte, error) { 159 | select { 160 | case data := <-mb.myChan: 161 | return data, nil 162 | case <-ctx.Done(): 163 | return nil, ErrNoMoreBroadcast 164 | case <-mb.ctx.Done(): 165 | return nil, ErrNoMoreBroadcast 166 | } 167 | } 168 | 169 | type mockDAGSvc struct { 170 | ipld.DAGService 171 | bs blockstore.Blockstore 172 | } 173 | 174 | func (mds *mockDAGSvc) Add(ctx context.Context, n ipld.Node) error { 175 | return mds.DAGService.Add(ctx, n) 176 | } 177 | 178 | func (mds *mockDAGSvc) Get(ctx context.Context, c cid.Cid) (ipld.Node, error) { 179 | nd, err := mds.DAGService.Get(ctx, c) 180 | if err != nil { 181 | return nd, err 182 | } 183 | return nd, nil 184 | } 185 | 186 | func (mds *mockDAGSvc) GetMany(ctx context.Context, cids []cid.Cid) <-chan *ipld.NodeOption { 187 | return mds.DAGService.GetMany(ctx, cids) 188 | } 189 | 190 | func storeFolder(i int) string { 191 | return fmt.Sprintf("test-badger-%d", i) 192 | } 193 | 194 | func makeStore(t testing.TB, i int) ds.Datastore { 195 | t.Helper() 196 | 197 | switch store { 198 | case mapStore: 199 | return dssync.MutexWrap(ds.NewMapDatastore()) 200 | case badgerStore: 201 | folder := storeFolder(i) 202 | err := os.MkdirAll(folder, 0700) 203 | if err != nil { 204 | t.Fatal(err) 205 | } 206 | 207 | badgerOpts := badger.DefaultOptions("") 208 | badgerOpts.SyncWrites = false 209 | badgerOpts.MaxTableSize = 1048576 210 | 211 | opts := badgerds.Options{Options: badgerOpts} 212 | dstore, err := badgerds.NewDatastore(folder, &opts) 213 | if err != nil { 214 | t.Fatal(err) 215 | } 216 | return dstore 217 | default: 218 | t.Fatal("bad store type selected for tests") 219 | return nil 220 | 221 | } 222 | } 223 | 224 | func makeNReplicas(t testing.TB, n int, opts *Options) ([]*Datastore, func()) { 225 | bcasts, bcastCancel := newBroadcasters(t, n) 226 | bs := mdutils.Bserv() 227 | dagserv := merkledag.NewDAGService(bs) 228 | 229 | replicaOpts := make([]*Options, n) 230 | for i := range replicaOpts { 231 | if opts == nil { 232 | replicaOpts[i] = DefaultOptions() 233 | } else { 234 | copy := *opts 235 | replicaOpts[i] = © 236 | } 237 | 238 | replicaOpts[i].Logger = &testLogger{ 239 | name: fmt.Sprintf("r#%d: ", i), 240 | l: DefaultOptions().Logger, 241 | } 242 | replicaOpts[i].RebroadcastInterval = time.Second * 5 243 | replicaOpts[i].NumWorkers = 5 244 | replicaOpts[i].DAGSyncerTimeout = time.Second 245 | } 246 | 247 | replicas := make([]*Datastore, n) 248 | for i := range replicas { 249 | dagsync := &mockDAGSvc{ 250 | DAGService: dagserv, 251 | bs: bs.Blockstore(), 252 | } 253 | 254 | var err error 255 | replicas[i], err = New( 256 | makeStore(t, i), 257 | // ds.NewLogDatastore( 258 | // makeStore(t, i), 259 | // fmt.Sprintf("crdt-test-%d", i), 260 | // ), 261 | ds.NewKey("crdttest"), 262 | dagsync, 263 | bcasts[i], 264 | replicaOpts[i], 265 | ) 266 | if err != nil { 267 | t.Fatal(err) 268 | } 269 | } 270 | if debug { 271 | log.SetLogLevel("crdt", "debug") 272 | } 273 | 274 | closeReplicas := func() { 275 | bcastCancel() 276 | for i, r := range replicas { 277 | err := r.Close() 278 | if err != nil { 279 | t.Error(err) 280 | } 281 | os.RemoveAll(storeFolder(i)) 282 | } 283 | } 284 | 285 | return replicas, closeReplicas 286 | } 287 | 288 | func makeReplicas(t testing.TB, opts *Options) ([]*Datastore, func()) { 289 | return makeNReplicas(t, numReplicas, opts) 290 | } 291 | 292 | func TestCRDT(t *testing.T) { 293 | ctx := context.Background() 294 | replicas, closeReplicas := makeReplicas(t, nil) 295 | defer closeReplicas() 296 | k := ds.NewKey("hi") 297 | err := replicas[0].Put(ctx, k, []byte("hola")) 298 | if err != nil { 299 | t.Fatal(err) 300 | } 301 | 302 | time.Sleep(time.Second) 303 | 304 | for _, r := range replicas { 305 | v, err := r.Get(ctx, k) 306 | if err != nil { 307 | t.Error(err) 308 | } 309 | if string(v) != "hola" { 310 | t.Error("bad content: ", string(v)) 311 | } 312 | } 313 | } 314 | 315 | func TestCRDTReplication(t *testing.T) { 316 | ctx := context.Background() 317 | nItems := 50 318 | randGen := rand.New(rand.NewSource(time.Now().UnixNano())) 319 | 320 | replicas, closeReplicas := makeReplicas(t, nil) 321 | defer closeReplicas() 322 | 323 | // Add nItems choosing the replica randomly 324 | for i := 0; i < nItems; i++ { 325 | k := ds.RandomKey() 326 | v := []byte(fmt.Sprintf("%d", i)) 327 | n := randGen.Intn(len(replicas)) 328 | err := replicas[n].Put(ctx, k, v) 329 | if err != nil { 330 | t.Fatal(err) 331 | } 332 | } 333 | 334 | time.Sleep(500 * time.Millisecond) 335 | 336 | // Query all items 337 | q := query.Query{ 338 | KeysOnly: true, 339 | } 340 | results, err := replicas[0].Query(ctx, q) 341 | if err != nil { 342 | t.Fatal(err) 343 | } 344 | defer results.Close() 345 | rest, err := results.Rest() 346 | if err != nil { 347 | t.Fatal(err) 348 | } 349 | if len(rest) != nItems { 350 | t.Fatalf("expected %d elements", nItems) 351 | } 352 | 353 | // make sure each item has arrived to every replica 354 | for _, res := range rest { 355 | for _, r := range replicas { 356 | ok, err := r.Has(ctx, ds.NewKey(res.Key)) 357 | if err != nil { 358 | t.Error(err) 359 | } 360 | if !ok { 361 | t.Error("replica should have key") 362 | } 363 | } 364 | } 365 | 366 | // give a new value for each item 367 | for _, r := range rest { 368 | n := randGen.Intn(len(replicas)) 369 | err := replicas[n].Put(ctx, ds.NewKey(r.Key), []byte("hola")) 370 | if err != nil { 371 | t.Error(err) 372 | } 373 | } 374 | 375 | time.Sleep(200 * time.Millisecond) 376 | 377 | // query everything again 378 | results, err = replicas[0].Query(ctx, q) 379 | if err != nil { 380 | t.Fatal(err) 381 | } 382 | defer results.Close() 383 | 384 | total := 0 385 | for r := range results.Next() { 386 | total++ 387 | if r.Error != nil { 388 | t.Error(err) 389 | } 390 | k := ds.NewKey(r.Key) 391 | for i, r := range replicas { 392 | v, err := r.Get(ctx, k) 393 | if err != nil { 394 | t.Error(err) 395 | } 396 | if string(v) != "hola" { 397 | t.Errorf("value should be hola for %s in replica %d", k, i) 398 | } 399 | } 400 | } 401 | if total != nItems { 402 | t.Fatalf("expected %d elements again", nItems) 403 | } 404 | 405 | for _, r := range replicas { 406 | list, _, err := r.heads.List(ctx) 407 | if err != nil { 408 | t.Fatal(err) 409 | } 410 | t.Log(list) 411 | } 412 | //replicas[0].PrintDAG() 413 | //fmt.Println("==========================================================") 414 | //replicas[1].PrintDAG() 415 | } 416 | 417 | // TestCRDTPriority tests that given multiple concurrent updates from several 418 | // replicas on the same key, the resulting values converge to the same key. 419 | // 420 | // It does this by launching one go routine for every replica, where it replica 421 | // writes the value #replica-number repeteadly (nItems-times). 422 | // 423 | // Finally, it puts a final value for a single key in the first replica and 424 | // checks that all replicas got it. 425 | // 426 | // If key priority rules are respected, the "last" update emitted for the key 427 | // K (which could have come from any replica) should take place everywhere. 428 | func TestCRDTPriority(t *testing.T) { 429 | ctx := context.Background() 430 | nItems := 50 431 | 432 | replicas, closeReplicas := makeReplicas(t, nil) 433 | defer closeReplicas() 434 | 435 | k := ds.NewKey("k") 436 | 437 | var wg sync.WaitGroup 438 | wg.Add(len(replicas)) 439 | for i, r := range replicas { 440 | go func(r *Datastore, i int) { 441 | defer wg.Done() 442 | for j := 0; j < nItems; j++ { 443 | err := r.Put(ctx, k, []byte(fmt.Sprintf("r#%d", i))) 444 | if err != nil { 445 | t.Error(err) 446 | } 447 | } 448 | }(r, i) 449 | } 450 | wg.Wait() 451 | time.Sleep(5000 * time.Millisecond) 452 | var v, lastv []byte 453 | var err error 454 | for i, r := range replicas { 455 | v, err = r.Get(ctx, k) 456 | if err != nil { 457 | t.Error(err) 458 | } 459 | t.Logf("Replica %d got value %s", i, string(v)) 460 | if lastv != nil && string(v) != string(lastv) { 461 | t.Error("value was different between replicas, but should be the same") 462 | } 463 | lastv = v 464 | } 465 | 466 | err = replicas[0].Put(ctx, k, []byte("final value")) 467 | if err != nil { 468 | t.Fatal(err) 469 | } 470 | 471 | time.Sleep(1000 * time.Millisecond) 472 | 473 | for i, r := range replicas { 474 | v, err := r.Get(ctx, k) 475 | if err != nil { 476 | t.Error(err) 477 | } 478 | if string(v) != "final value" { 479 | t.Errorf("replica %d has wrong final value: %s", i, string(v)) 480 | } 481 | } 482 | 483 | //replicas[14].PrintDAG() 484 | //fmt.Println("=======================================================") 485 | //replicas[1].PrintDAG() 486 | } 487 | 488 | func TestCRDTCatchUp(t *testing.T) { 489 | ctx := context.Background() 490 | nItems := 50 491 | replicas, closeReplicas := makeReplicas(t, nil) 492 | defer closeReplicas() 493 | 494 | r := replicas[len(replicas)-1] 495 | br := r.broadcaster.(*mockBroadcaster) 496 | br.dropProb.Store(101) 497 | 498 | // this items will not get to anyone 499 | for i := 0; i < nItems; i++ { 500 | k := ds.RandomKey() 501 | err := r.Put(ctx, k, nil) 502 | if err != nil { 503 | t.Fatal(err) 504 | } 505 | } 506 | 507 | time.Sleep(100 * time.Millisecond) 508 | br.dropProb.Store(0) 509 | 510 | // this message will get to everyone 511 | err := r.Put(ctx, ds.RandomKey(), nil) 512 | if err != nil { 513 | t.Fatal(err) 514 | } 515 | 516 | time.Sleep(500 * time.Millisecond) 517 | q := query.Query{KeysOnly: true} 518 | results, err := replicas[0].Query(ctx, q) 519 | if err != nil { 520 | t.Fatal(err) 521 | } 522 | defer results.Close() 523 | rest, err := results.Rest() 524 | if err != nil { 525 | t.Fatal(err) 526 | } 527 | if len(rest) != nItems+1 { 528 | t.Fatal("replica 0 did not get all the things") 529 | } 530 | } 531 | 532 | func TestCRDTPrintDAG(t *testing.T) { 533 | ctx := context.Background() 534 | 535 | nItems := 5 536 | replicas, closeReplicas := makeReplicas(t, nil) 537 | defer closeReplicas() 538 | 539 | // this items will not get to anyone 540 | for i := 0; i < nItems; i++ { 541 | k := ds.RandomKey() 542 | err := replicas[0].Put(ctx, k, nil) 543 | if err != nil { 544 | t.Fatal(err) 545 | } 546 | } 547 | err := replicas[0].PrintDAG(ctx) 548 | if err != nil { 549 | t.Fatal(err) 550 | } 551 | } 552 | 553 | func TestCRDTHooks(t *testing.T) { 554 | ctx := context.Background() 555 | 556 | var put int64 557 | var deleted int64 558 | 559 | opts := DefaultOptions() 560 | opts.PutHook = func(k ds.Key, v []byte) { 561 | atomic.AddInt64(&put, 1) 562 | } 563 | opts.DeleteHook = func(k ds.Key) { 564 | atomic.AddInt64(&deleted, 1) 565 | } 566 | 567 | replicas, closeReplicas := makeReplicas(t, opts) 568 | defer closeReplicas() 569 | 570 | k := ds.RandomKey() 571 | err := replicas[0].Put(ctx, k, nil) 572 | if err != nil { 573 | t.Fatal(err) 574 | } 575 | 576 | err = replicas[0].Delete(ctx, k) 577 | if err != nil { 578 | t.Fatal(err) 579 | } 580 | time.Sleep(100 * time.Millisecond) 581 | if atomic.LoadInt64(&put) != int64(len(replicas)) { 582 | t.Error("all replicas should have notified Put", put) 583 | } 584 | if atomic.LoadInt64(&deleted) != int64(len(replicas)) { 585 | t.Error("all replicas should have notified Remove", deleted) 586 | } 587 | } 588 | 589 | func TestCRDTBatch(t *testing.T) { 590 | ctx := context.Background() 591 | 592 | opts := DefaultOptions() 593 | opts.MaxBatchDeltaSize = 500 // bytes 594 | 595 | replicas, closeReplicas := makeReplicas(t, opts) 596 | defer closeReplicas() 597 | 598 | btch, err := replicas[0].Batch(ctx) 599 | if err != nil { 600 | t.Fatal(err) 601 | } 602 | 603 | // This should be batched 604 | k := ds.RandomKey() 605 | err = btch.Put(ctx, k, make([]byte, 200)) 606 | if err != nil { 607 | t.Fatal(err) 608 | } 609 | 610 | if _, err := replicas[0].Get(ctx, k); err != ds.ErrNotFound { 611 | t.Fatal("should not have commited the batch") 612 | } 613 | 614 | k2 := ds.RandomKey() 615 | err = btch.Put(ctx, k2, make([]byte, 400)) 616 | if err != nil { 617 | t.Fatal(err) 618 | } 619 | 620 | if _, err := replicas[0].Get(ctx, k2); err != nil { 621 | t.Fatal("should have commited the batch: delta size was over threshold") 622 | } 623 | 624 | err = btch.Delete(ctx, k) 625 | if err != nil { 626 | t.Fatal(err) 627 | } 628 | 629 | if _, err := replicas[0].Get(ctx, k); err != nil { 630 | t.Fatal("should not have committed the batch") 631 | } 632 | 633 | err = btch.Commit(ctx) 634 | if err != nil { 635 | t.Fatal(err) 636 | } 637 | 638 | time.Sleep(100 * time.Millisecond) 639 | for _, r := range replicas { 640 | if _, err := r.Get(ctx, k); err != ds.ErrNotFound { 641 | t.Error("k should have been deleted everywhere") 642 | } 643 | if _, err := r.Get(ctx, k2); err != nil { 644 | t.Error("k2 should be everywhere") 645 | } 646 | } 647 | } 648 | 649 | func TestCRDTNamespaceClash(t *testing.T) { 650 | ctx := context.Background() 651 | 652 | opts := DefaultOptions() 653 | replicas, closeReplicas := makeReplicas(t, opts) 654 | defer closeReplicas() 655 | 656 | k := ds.NewKey("path/to/something") 657 | err := replicas[0].Put(ctx, k, nil) 658 | if err != nil { 659 | t.Fatal(err) 660 | } 661 | 662 | time.Sleep(100 * time.Millisecond) 663 | 664 | k = ds.NewKey("path") 665 | ok, _ := replicas[0].Has(ctx, k) 666 | if ok { 667 | t.Error("it should not have the key") 668 | } 669 | 670 | _, err = replicas[0].Get(ctx, k) 671 | if err != ds.ErrNotFound { 672 | t.Error("should return err not found") 673 | } 674 | 675 | err = replicas[0].Put(ctx, k, []byte("hello")) 676 | if err != nil { 677 | t.Fatal(err) 678 | } 679 | 680 | v, err := replicas[0].Get(ctx, k) 681 | if err != nil { 682 | t.Fatal(err) 683 | } 684 | if string(v) != "hello" { 685 | t.Error("wrong value read from database") 686 | } 687 | 688 | err = replicas[0].Delete(ctx, ds.NewKey("path/to/something")) 689 | if err != nil { 690 | t.Fatal(err) 691 | } 692 | 693 | v, err = replicas[0].Get(ctx, k) 694 | if err != nil { 695 | t.Fatal(err) 696 | } 697 | if string(v) != "hello" { 698 | t.Error("wrong value read from database") 699 | } 700 | } 701 | 702 | var _ ds.Datastore = (*syncedTrackDs)(nil) 703 | 704 | type syncedTrackDs struct { 705 | ds.Datastore 706 | syncs map[ds.Key]struct{} 707 | set *set 708 | } 709 | 710 | func (st *syncedTrackDs) Sync(ctx context.Context, k ds.Key) error { 711 | st.syncs[k] = struct{}{} 712 | return st.Datastore.Sync(ctx, k) 713 | } 714 | 715 | func (st *syncedTrackDs) isSynced(k ds.Key) bool { 716 | prefixStr := k.String() 717 | mustBeSynced := []ds.Key{ 718 | st.set.elemsPrefix(prefixStr), 719 | st.set.tombsPrefix(prefixStr), 720 | st.set.keyPrefix(keysNs).Child(k), 721 | } 722 | 723 | for k := range st.syncs { 724 | synced := false 725 | for _, t := range mustBeSynced { 726 | if k == t || k.IsAncestorOf(t) { 727 | synced = true 728 | break 729 | } 730 | } 731 | if !synced { 732 | return false 733 | } 734 | } 735 | return true 736 | } 737 | 738 | func TestCRDTSync(t *testing.T) { 739 | ctx := context.Background() 740 | 741 | opts := DefaultOptions() 742 | replicas, closeReplicas := makeReplicas(t, opts) 743 | defer closeReplicas() 744 | 745 | syncedDs := &syncedTrackDs{ 746 | Datastore: replicas[0].set.store, 747 | syncs: make(map[ds.Key]struct{}), 748 | set: replicas[0].set, 749 | } 750 | 751 | replicas[0].set.store = syncedDs 752 | k1 := ds.NewKey("/hello/bye") 753 | k2 := ds.NewKey("/hello") 754 | k3 := ds.NewKey("/hell") 755 | 756 | err := replicas[0].Put(ctx, k1, []byte("value1")) 757 | if err != nil { 758 | t.Fatal(err) 759 | } 760 | 761 | err = replicas[0].Put(ctx, k2, []byte("value2")) 762 | if err != nil { 763 | t.Fatal(err) 764 | } 765 | 766 | err = replicas[0].Put(ctx, k3, []byte("value3")) 767 | if err != nil { 768 | t.Fatal(err) 769 | } 770 | 771 | err = replicas[0].Sync(ctx, ds.NewKey("/hello")) 772 | if err != nil { 773 | t.Fatal(err) 774 | } 775 | 776 | if !syncedDs.isSynced(k1) { 777 | t.Error("k1 should have been synced") 778 | } 779 | 780 | if !syncedDs.isSynced(k2) { 781 | t.Error("k2 should have been synced") 782 | } 783 | 784 | if syncedDs.isSynced(k3) { 785 | t.Error("k3 should have not been synced") 786 | } 787 | } 788 | 789 | func TestCRDTBroadcastBackwardsCompat(t *testing.T) { 790 | ctx := context.Background() 791 | mh, err := multihash.Sum([]byte("emacs is best"), multihash.SHA2_256, -1) 792 | if err != nil { 793 | t.Fatal(err) 794 | } 795 | cidV0 := cid.NewCidV0(mh) 796 | 797 | opts := DefaultOptions() 798 | replicas, closeReplicas := makeReplicas(t, opts) 799 | defer closeReplicas() 800 | 801 | cids, err := replicas[0].decodeBroadcast(ctx, cidV0.Bytes()) 802 | if err != nil { 803 | t.Fatal(err) 804 | } 805 | 806 | if len(cids) != 1 || !cids[0].Equals(cidV0) { 807 | t.Error("should have returned a single cidV0", cids) 808 | } 809 | 810 | data, err := replicas[0].encodeBroadcast(ctx, cids) 811 | if err != nil { 812 | t.Fatal(err) 813 | } 814 | 815 | cids2, err := replicas[0].decodeBroadcast(ctx, data) 816 | if err != nil { 817 | t.Fatal(err) 818 | } 819 | 820 | if len(cids2) != 1 || !cids[0].Equals(cidV0) { 821 | t.Error("should have reparsed cid0", cids2) 822 | } 823 | } 824 | 825 | func BenchmarkQueryElements(b *testing.B) { 826 | ctx := context.Background() 827 | replicas, closeReplicas := makeNReplicas(b, 1, nil) 828 | defer closeReplicas() 829 | 830 | for i := 0; i < b.N; i++ { 831 | k := ds.RandomKey() 832 | err := replicas[0].Put(ctx, k, make([]byte, 2000)) 833 | if err != nil { 834 | b.Fatal(err) 835 | } 836 | } 837 | 838 | b.ResetTimer() 839 | 840 | q := query.Query{ 841 | KeysOnly: false, 842 | } 843 | results, err := replicas[0].Query(ctx, q) 844 | if err != nil { 845 | b.Fatal(err) 846 | } 847 | defer results.Close() 848 | 849 | totalSize := 0 850 | for r := range results.Next() { 851 | if r.Error != nil { 852 | b.Error(r.Error) 853 | } 854 | totalSize += len(r.Value) 855 | } 856 | b.Log(totalSize) 857 | } 858 | 859 | func TestRandomizeInterval(t *testing.T) { 860 | prevR := 100 * time.Second 861 | for i := 0; i < 1000; i++ { 862 | r := randomizeInterval(100 * time.Second) 863 | if r < 70*time.Second || r > 130*time.Second { 864 | t.Error("r was ", r) 865 | } 866 | if prevR == r { 867 | t.Log("r and prevR were equal") 868 | } 869 | prevR = r 870 | } 871 | } 872 | 873 | func TestCRDTPutPutDelete(t *testing.T) { 874 | replicas, closeReplicas := makeNReplicas(t, 2, nil) 875 | defer closeReplicas() 876 | 877 | ctx := context.Background() 878 | 879 | br0 := replicas[0].broadcaster.(*mockBroadcaster) 880 | br0.dropProb.Store(101) 881 | 882 | br1 := replicas[1].broadcaster.(*mockBroadcaster) 883 | br1.dropProb.Store(101) 884 | 885 | k := ds.NewKey("k1") 886 | 887 | // r0 - put put delete 888 | err := replicas[0].Put(ctx, k, []byte("r0-1")) 889 | if err != nil { 890 | t.Fatal(err) 891 | } 892 | err = replicas[0].Put(ctx, k, []byte("r0-2")) 893 | if err != nil { 894 | t.Fatal(err) 895 | } 896 | err = replicas[0].Delete(ctx, k) 897 | if err != nil { 898 | t.Fatal(err) 899 | } 900 | 901 | // r1 - put 902 | err = replicas[1].Put(ctx, k, []byte("r1-1")) 903 | if err != nil { 904 | t.Fatal(err) 905 | } 906 | 907 | br0.dropProb.Store(0) 908 | br1.dropProb.Store(0) 909 | 910 | time.Sleep(15 * time.Second) 911 | 912 | r0Res, err := replicas[0].Get(ctx, ds.NewKey("k1")) 913 | if err != nil { 914 | if !errors.Is(err, ds.ErrNotFound) { 915 | t.Fatal(err) 916 | } 917 | } 918 | 919 | r1Res, err := replicas[1].Get(ctx, ds.NewKey("k1")) 920 | if err != nil { 921 | t.Fatal(err) 922 | } 923 | closeReplicas() 924 | 925 | if string(r0Res) != string(r1Res) { 926 | fmt.Printf("r0Res: %s\nr1Res: %s\n", string(r0Res), string(r1Res)) 927 | t.Log("r0 dag") 928 | replicas[0].PrintDAG(ctx) 929 | 930 | t.Log("r1 dag") 931 | replicas[1].PrintDAG(ctx) 932 | 933 | t.Fatal("r0 and r1 should have the same value") 934 | } 935 | } 936 | 937 | func TestMigration0to1(t *testing.T) { 938 | replicas, closeReplicas := makeNReplicas(t, 1, nil) 939 | defer closeReplicas() 940 | replica := replicas[0] 941 | ctx := context.Background() 942 | 943 | nItems := 200 944 | var keys []ds.Key 945 | // Add nItems 946 | for i := 0; i < nItems; i++ { 947 | k := ds.RandomKey() 948 | keys = append(keys, k) 949 | v := []byte(fmt.Sprintf("%d", i)) 950 | err := replica.Put(ctx, k, v) 951 | if err != nil { 952 | t.Fatal(err) 953 | } 954 | 955 | } 956 | 957 | // Overwrite n/2 items 5 times to have multiple tombstones per key 958 | // later... 959 | for j := 0; j < 5; j++ { 960 | for i := 0; i < nItems/2; i++ { 961 | v := []byte(fmt.Sprintf("%d", i)) 962 | err := replica.Put(ctx, keys[i], v) 963 | if err != nil { 964 | t.Fatal(err) 965 | } 966 | } 967 | } 968 | 969 | // delete keys 970 | for i := 0; i < nItems/2; i++ { 971 | err := replica.Delete(ctx, keys[i]) 972 | if err != nil { 973 | t.Fatal(err) 974 | } 975 | } 976 | 977 | // And write them again 978 | for i := 0; i < nItems/2; i++ { 979 | err := replica.Put(ctx, keys[i], []byte("final value")) 980 | if err != nil { 981 | t.Fatal(err) 982 | } 983 | } 984 | 985 | // And now we manually put the wrong value 986 | for i := 0; i < nItems/2; i++ { 987 | valueK := replica.set.valueKey(keys[i].String()) 988 | err := replica.set.store.Put(ctx, valueK, []byte("wrong value")) 989 | if err != nil { 990 | t.Fatal(err) 991 | } 992 | err = replica.set.setPriority(ctx, replica.set.store, keys[i].String(), 1) 993 | if err != nil { 994 | t.Fatal(err) 995 | } 996 | } 997 | 998 | err := replica.migrate0to1(ctx) 999 | if err != nil { 1000 | t.Fatal(err) 1001 | } 1002 | 1003 | for i := 0; i < nItems/2; i++ { 1004 | v, err := replica.Get(ctx, keys[i]) 1005 | if err != nil { 1006 | t.Fatal(err) 1007 | } 1008 | if string(v) != "final value" { 1009 | t.Fatalf("value for elem %d should be final value: %s", i, string(v)) 1010 | } 1011 | } 1012 | } 1013 | -------------------------------------------------------------------------------- /examples/globaldb/.gitignore: -------------------------------------------------------------------------------- 1 | data/ 2 | -------------------------------------------------------------------------------- /examples/globaldb/README.md: -------------------------------------------------------------------------------- 1 | # Example GlobalDB CLI 2 | 3 | This repository contains an example command-line interface (CLI) tool for joining a global, permissionless, CRDT-based database using CRDTs, IPFS & libp2p. 4 | 5 | ## Features 6 | 7 | - Join a global CRDT-based database with IPFS. 8 | - Store and retrieve key-value pairs in a distributed datastore. 9 | - Subscribe to a pubsub topic to receive updates in real-time. 10 | - Bootstrap and connect to other peers in the network. 11 | - Operate in daemon mode for continuous operation. 12 | - Simple CLI commands to interact with the database. 13 | 14 | ## Building 15 | 16 | To build the example GlobalDB CLI, clone this repository and build the binary: 17 | 18 | ```bash 19 | git clone https://github.com/ipfs/go-ds-crdt 20 | cd examples/globaldb 21 | go build -o globaldb 22 | ``` 23 | 24 | Ensure that you have Go installed and set up in your environment. 25 | 26 | ## Usage 27 | 28 | Run the CLI with: 29 | 30 | ```bash 31 | ./globaldb [options] 32 | ``` 33 | 34 | ### Options 35 | 36 | - `-daemon`: Run in daemon mode. 37 | - `-datadir`: Specify a directory for storing the local database and keys. 38 | 39 | ### Commands 40 | 41 | Once running, the CLI provides the following interactive commands: 42 | 43 | - `list`: List all items in the store. 44 | - `get `: Retrieve the value for a specified key. 45 | - `put `: Store a value with a specified key. 46 | - `connect `: Connect to a peer using its multiaddress. 47 | - `debug `: Enable or disable debug logging, list connected peers, show pubsub subscribers 48 | - `exit`: Quit the CLI. 49 | 50 | ### Example 51 | 52 | Starting the CLI: 53 | 54 | ```bash 55 | ./globaldb -datadir /path/to/data 56 | ``` 57 | 58 | Interacting with the database: 59 | 60 | ```plaintext 61 | > put exampleKey exampleValue 62 | > get exampleKey 63 | [exampleKey] -> exampleValue 64 | > list 65 | [exampleKey] -> exampleValue 66 | > connect /ip4/192.168.1.3/tcp/33123/p2p/12D3KooWEkgRTTXGsmFLBembMHxVPDcidJyqFcrqbm9iBE1xhdXq 67 | ``` 68 | 69 | ### Daemon Mode 70 | 71 | To run in daemon mode, use: 72 | 73 | ```bash 74 | ./globaldb -daemon -datadir /path/to/data 75 | ``` 76 | 77 | The CLI will keep running, periodically reporting the number of connected peers and those subscribed to the crdt topic. 78 | 79 | ## Technical Details 80 | 81 | The GlobalDB CLI leverages the following components: 82 | 83 | - **IPFS Lite**: Provides a lightweight IPFS node for peer-to-peer networking. 84 | - **Libp2p PubSub**: Enables decentralized communication using the GossipSub protocol. 85 | - **CRDTs**: Ensure conflict-free synchronization of data across distributed peers. 86 | - **Badger Datastore**: A high-performance datastore for storing key-value pairs. 87 | -------------------------------------------------------------------------------- /examples/globaldb/globaldb.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // This is a CLI that lets you join a global permissionless CRDT-based 4 | // database using CRDTs and IPFS. 5 | 6 | import ( 7 | "bufio" 8 | "context" 9 | "flag" 10 | "fmt" 11 | "os" 12 | "os/signal" 13 | "path/filepath" 14 | "strconv" 15 | "strings" 16 | "syscall" 17 | "time" 18 | 19 | ds "github.com/ipfs/go-datastore" 20 | "github.com/ipfs/go-datastore/query" 21 | badger "github.com/ipfs/go-ds-badger" 22 | crdt "github.com/ipfs/go-ds-crdt" 23 | logging "github.com/ipfs/go-log/v2" 24 | 25 | pubsub "github.com/libp2p/go-libp2p-pubsub" 26 | crypto "github.com/libp2p/go-libp2p/core/crypto" 27 | "github.com/libp2p/go-libp2p/core/host" 28 | "github.com/libp2p/go-libp2p/core/peer" 29 | 30 | ipfslite "github.com/hsanjuan/ipfs-lite" 31 | 32 | multiaddr "github.com/multiformats/go-multiaddr" 33 | ) 34 | 35 | var ( 36 | logger = logging.Logger("globaldb") 37 | topicName = "globaldb-example" 38 | netTopic = "globaldb-example-net" 39 | config = "globaldb-example" 40 | ) 41 | 42 | func main() { 43 | daemonMode := flag.Bool("daemon", false, "Run in daemon mode") 44 | dataDir := flag.String("datadir", "", "Use a custom data directory") 45 | port := flag.String("port", "0", "Specify the TCP port to listen on") 46 | 47 | flag.Parse() 48 | 49 | if *port != "" { 50 | parsedPort, err := strconv.ParseUint(*port, 10, 32) 51 | if err != nil || parsedPort > 65535 { 52 | logger.Fatal("Specify a valid TCP port") 53 | } 54 | } 55 | 56 | // Bootstrappers are using 1024 keys. See: 57 | // https://github.com/ipfs/infra/issues/378 58 | crypto.MinRsaKeyBits = 1024 59 | 60 | logging.SetLogLevel("*", "error") 61 | ctx, cancel := context.WithCancel(context.Background()) 62 | defer cancel() 63 | 64 | data := "" 65 | 66 | if dataDir == nil || *dataDir == "" { 67 | dir, err := os.MkdirTemp("", "globaldb-example") 68 | if err != nil { 69 | logger.Fatal(err) 70 | } 71 | defer os.RemoveAll(dir) 72 | data = dir + "/" + config 73 | } else { 74 | // check if the directory exists or create it 75 | _, err := os.Stat(*dataDir) 76 | if os.IsNotExist(err) { 77 | err = os.Mkdir(*dataDir, 0755) 78 | if err != nil { 79 | logger.Fatal(err) 80 | } 81 | } 82 | data = *dataDir + "/" + config 83 | } 84 | 85 | store, err := badger.NewDatastore(data, &badger.DefaultOptions) 86 | if err != nil { 87 | logger.Fatal(err) 88 | } 89 | defer store.Close() 90 | 91 | keyPath := filepath.Join(data, "key") 92 | var priv crypto.PrivKey 93 | _, err = os.Stat(keyPath) 94 | if os.IsNotExist(err) { 95 | priv, _, err = crypto.GenerateKeyPair(crypto.Ed25519, 1) 96 | if err != nil { 97 | logger.Fatal(err) 98 | } 99 | data, err := crypto.MarshalPrivateKey(priv) 100 | if err != nil { 101 | logger.Fatal(err) 102 | } 103 | err = os.WriteFile(keyPath, data, 0400) 104 | if err != nil { 105 | logger.Fatal(err) 106 | } 107 | } else if err != nil { 108 | logger.Fatal(err) 109 | } else { 110 | key, err := os.ReadFile(keyPath) 111 | if err != nil { 112 | logger.Fatal(err) 113 | } 114 | priv, err = crypto.UnmarshalPrivateKey(key) 115 | if err != nil { 116 | logger.Fatal(err) 117 | } 118 | 119 | } 120 | pid, err := peer.IDFromPublicKey(priv.GetPublic()) 121 | if err != nil { 122 | logger.Fatal(err) 123 | } 124 | 125 | listen, _ := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/" + *port) 126 | 127 | h, dht, err := ipfslite.SetupLibp2p( 128 | ctx, 129 | priv, 130 | nil, 131 | []multiaddr.Multiaddr{listen}, 132 | nil, 133 | ipfslite.Libp2pOptionsExtra..., 134 | ) 135 | if err != nil { 136 | logger.Fatal(err) 137 | } 138 | defer h.Close() 139 | defer dht.Close() 140 | 141 | psub, err := pubsub.NewGossipSub(ctx, h) 142 | if err != nil { 143 | logger.Fatal(err) 144 | } 145 | 146 | topic, err := psub.Join(netTopic) 147 | if err != nil { 148 | logger.Fatal(err) 149 | } 150 | 151 | netSubs, err := topic.Subscribe() 152 | if err != nil { 153 | logger.Fatal(err) 154 | } 155 | 156 | // Use a special pubsub topic to avoid disconnecting 157 | // from globaldb peers. 158 | go func() { 159 | for { 160 | msg, err := netSubs.Next(ctx) 161 | if err != nil { 162 | fmt.Println(err) 163 | break 164 | } 165 | h.ConnManager().TagPeer(msg.ReceivedFrom, "keep", 100) 166 | } 167 | }() 168 | 169 | go func() { 170 | for { 171 | select { 172 | case <-ctx.Done(): 173 | return 174 | default: 175 | topic.Publish(ctx, []byte("hi!")) 176 | time.Sleep(20 * time.Second) 177 | } 178 | } 179 | }() 180 | 181 | ipfs, err := ipfslite.New(ctx, store, nil, h, dht, nil) 182 | if err != nil { 183 | logger.Fatal(err) 184 | } 185 | 186 | psubCtx, psubCancel := context.WithCancel(ctx) 187 | pubsubBC, err := crdt.NewPubSubBroadcaster(psubCtx, psub, topicName) 188 | if err != nil { 189 | logger.Fatal(err) 190 | } 191 | 192 | opts := crdt.DefaultOptions() 193 | opts.Logger = logger 194 | opts.RebroadcastInterval = 5 * time.Second 195 | opts.PutHook = func(k ds.Key, v []byte) { 196 | fmt.Printf("Added: [%s] -> %s\n", k, string(v)) 197 | 198 | } 199 | opts.DeleteHook = func(k ds.Key) { 200 | fmt.Printf("Removed: [%s]\n", k) 201 | } 202 | 203 | crdt, err := crdt.New(store, ds.NewKey("crdt"), ipfs, pubsubBC, opts) 204 | if err != nil { 205 | logger.Fatal(err) 206 | } 207 | defer crdt.Close() 208 | defer psubCancel() 209 | 210 | fmt.Println("Bootstrapping...") 211 | 212 | bstr, _ := multiaddr.NewMultiaddr("/ip4/94.130.135.167/tcp/33123/ipfs/12D3KooWFta2AE7oiK1ioqjVAKajUJauZWfeM7R413K7ARtHRDAu") 213 | inf, _ := peer.AddrInfoFromP2pAddr(bstr) 214 | list := append(ipfslite.DefaultBootstrapPeers(), *inf) 215 | ipfs.Bootstrap(list) 216 | h.ConnManager().TagPeer(inf.ID, "keep", 100) 217 | 218 | fmt.Printf(` 219 | Peer ID: %s 220 | Topic: %s 221 | Data Folder: %s 222 | Listen addresses: 223 | %s 224 | 225 | Ready! 226 | `, 227 | pid, topicName, data, listenAddrs(h), 228 | ) 229 | 230 | if *daemonMode { 231 | fmt.Println("Running in daemon mode") 232 | go func() { 233 | for { 234 | fmt.Printf( 235 | "%s - %d connected peers - %d peers in topic\n", 236 | time.Now().Format(time.Stamp), 237 | len(connectedPeers(h)), 238 | len(topic.ListPeers()), 239 | ) 240 | time.Sleep(10 * time.Second) 241 | } 242 | }() 243 | signalChan := make(chan os.Signal, 20) 244 | signal.Notify( 245 | signalChan, 246 | syscall.SIGINT, 247 | syscall.SIGTERM, 248 | syscall.SIGHUP, 249 | ) 250 | <-signalChan 251 | return 252 | } 253 | 254 | commands := ` 255 | > (l)ist -> list items in the store 256 | > (g)get -> get value for a key 257 | > (p)ut -> store value on a key 258 | > (d)elete -> delete a key 259 | > (c)onnect -> connect a multiaddr 260 | > print -> Print DAG 261 | > debug -> enable/disable debug logging 262 | show connected peers 263 | show pubsub subscribers 264 | > exit -> quit 265 | 266 | 267 | ` 268 | fmt.Printf("%s", commands) 269 | 270 | fmt.Printf("> ") 271 | scanner := bufio.NewScanner(os.Stdin) 272 | for scanner.Scan() { 273 | text := scanner.Text() 274 | fields := strings.Fields(text) 275 | if len(fields) == 0 { 276 | fmt.Printf("> ") 277 | continue 278 | } 279 | 280 | cmd := fields[0] 281 | 282 | switch cmd { 283 | case "exit", "quit": 284 | return 285 | case "?", "help", "h": 286 | fmt.Printf("%s", commands) 287 | fmt.Printf("> ") 288 | continue 289 | case "debug": 290 | if len(fields) < 2 { 291 | fmt.Println("debug ") 292 | fmt.Println("> ") 293 | continue 294 | } 295 | st := fields[1] 296 | switch st { 297 | case "on": 298 | logging.SetLogLevel("globaldb", "debug") 299 | case "off": 300 | logging.SetLogLevel("globaldb", "error") 301 | case "peers": 302 | for _, p := range connectedPeers(h) { 303 | addrs, err := peer.AddrInfoToP2pAddrs(p) 304 | if err != nil { 305 | logger.Warn(err) 306 | continue 307 | } 308 | for _, a := range addrs { 309 | fmt.Println(a) 310 | } 311 | } 312 | case "subs": 313 | for _, p := range topic.ListPeers() { 314 | fmt.Println(p.String()) 315 | } 316 | } 317 | case "l", "list": 318 | q := query.Query{} 319 | results, err := crdt.Query(ctx, q) 320 | if err != nil { 321 | printErr(err) 322 | } 323 | for r := range results.Next() { 324 | if r.Error != nil { 325 | printErr(err) 326 | continue 327 | } 328 | fmt.Printf("[%s] -> %s\n", r.Key, string(r.Value)) 329 | } 330 | case "g", "get": 331 | if len(fields) < 2 { 332 | fmt.Println("get ") 333 | fmt.Printf("> ") 334 | continue 335 | } 336 | k := ds.NewKey(fields[1]) 337 | v, err := crdt.Get(ctx, k) 338 | if err != nil { 339 | printErr(err) 340 | continue 341 | } 342 | fmt.Printf("[%s] -> %s\n", k, string(v)) 343 | case "p", "put": 344 | if len(fields) < 3 { 345 | fmt.Println("put ") 346 | fmt.Printf("> ") 347 | continue 348 | } 349 | k := ds.NewKey(fields[1]) 350 | v := strings.Join(fields[2:], " ") 351 | err := crdt.Put(ctx, k, []byte(v)) 352 | if err != nil { 353 | printErr(err) 354 | continue 355 | } 356 | case "d", "delete": 357 | if len(fields) < 2 { 358 | fmt.Println("delete ") 359 | fmt.Printf("> ") 360 | continue 361 | } 362 | k := ds.NewKey(fields[1]) 363 | err := crdt.Delete(ctx, k) 364 | if err != nil { 365 | printErr(err) 366 | continue 367 | } 368 | case "c", "connect": 369 | if len(fields) < 2 { 370 | fmt.Println("connect ") 371 | fmt.Printf("> ") 372 | continue 373 | } 374 | ma, err := multiaddr.NewMultiaddr(fields[1]) 375 | if err != nil { 376 | printErr(err) 377 | continue 378 | } 379 | peerInfo, err := peer.AddrInfoFromP2pAddr(ma) 380 | if err != nil { 381 | printErr(err) 382 | continue 383 | } 384 | h.Peerstore().AddAddr(peerInfo.ID, peerInfo.Addrs[0], 300) 385 | err = h.Connect(ctx, *peerInfo) 386 | if err != nil { 387 | printErr(err) 388 | continue 389 | } 390 | case "print": 391 | crdt.PrintDAG(ctx) 392 | } 393 | fmt.Printf("> ") 394 | } 395 | } 396 | 397 | func printErr(err error) { 398 | fmt.Println("error:", err) 399 | fmt.Println("> ") 400 | } 401 | 402 | func connectedPeers(h host.Host) []*peer.AddrInfo { 403 | var pinfos []*peer.AddrInfo 404 | for _, c := range h.Network().Conns() { 405 | pinfos = append(pinfos, &peer.AddrInfo{ 406 | ID: c.RemotePeer(), 407 | Addrs: []multiaddr.Multiaddr{c.RemoteMultiaddr()}, 408 | }) 409 | } 410 | return pinfos 411 | } 412 | 413 | func listenAddrs(h host.Host) string { 414 | var addrs []string 415 | for _, c := range h.Addrs() { 416 | ma, _ := multiaddr.NewMultiaddr(c.String() + "/p2p/" + h.ID().String()) 417 | addrs = append(addrs, ma.String()) 418 | } 419 | return strings.Join(addrs, "\n") 420 | } 421 | -------------------------------------------------------------------------------- /examples/globaldb/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ipfs/go-ds-crdt/examples/globaldb 2 | 3 | go 1.24 4 | 5 | toolchain go1.24.0 6 | 7 | require ( 8 | github.com/hsanjuan/ipfs-lite v1.8.4 9 | github.com/ipfs/go-datastore v0.8.2 10 | github.com/ipfs/go-ds-badger v0.3.4 11 | github.com/ipfs/go-ds-crdt v0.6.4 12 | github.com/ipfs/go-log/v2 v2.5.1 13 | github.com/libp2p/go-libp2p v0.41.0 14 | github.com/libp2p/go-libp2p-pubsub v0.13.0 15 | github.com/multiformats/go-multiaddr v0.15.0 16 | ) 17 | 18 | require ( 19 | github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect 20 | github.com/Jorropo/jsync v1.0.1 // indirect 21 | github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect 22 | github.com/benbjohnson/clock v1.3.5 // indirect 23 | github.com/beorn7/perks v1.0.1 // indirect 24 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 25 | github.com/containerd/cgroups v1.1.0 // indirect 26 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 27 | github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect 28 | github.com/cskr/pubsub v1.0.2 // indirect 29 | github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect 30 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 31 | github.com/dgraph-io/badger v1.6.2 // indirect 32 | github.com/dgraph-io/ristretto v0.2.0 // indirect 33 | github.com/docker/go-units v0.5.0 // indirect 34 | github.com/dustin/go-humanize v1.0.1 // indirect 35 | github.com/elastic/gosigar v0.14.3 // indirect 36 | github.com/filecoin-project/go-clock v0.1.0 // indirect 37 | github.com/flynn/noise v1.1.0 // indirect 38 | github.com/francoispqt/gojay v1.2.13 // indirect 39 | github.com/gammazero/chanqueue v1.1.0 // indirect 40 | github.com/gammazero/deque v1.0.0 // indirect 41 | github.com/go-logr/logr v1.4.2 // indirect 42 | github.com/go-logr/stdr v1.2.2 // indirect 43 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 44 | github.com/godbus/dbus/v5 v5.1.0 // indirect 45 | github.com/gogo/protobuf v1.3.2 // indirect 46 | github.com/golang/protobuf v1.5.4 // indirect 47 | github.com/google/gopacket v1.1.19 // indirect 48 | github.com/google/pprof v0.0.0-20250315033105-103756e64e1d // indirect 49 | github.com/google/uuid v1.6.0 // indirect 50 | github.com/gorilla/websocket v1.5.3 // indirect 51 | github.com/hashicorp/golang-lru v1.0.2 // indirect 52 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 53 | github.com/huin/goupnp v1.3.0 // indirect 54 | github.com/ipfs/bbloom v0.0.4 // indirect 55 | github.com/ipfs/boxo v0.29.1 // indirect 56 | github.com/ipfs/go-bitfield v1.1.0 // indirect 57 | github.com/ipfs/go-block-format v0.2.0 // indirect 58 | github.com/ipfs/go-cid v0.5.0 // indirect 59 | github.com/ipfs/go-cidutil v0.1.0 // indirect 60 | github.com/ipfs/go-ipfs-delay v0.0.1 // indirect 61 | github.com/ipfs/go-ipfs-pq v0.0.3 // indirect 62 | github.com/ipfs/go-ipfs-util v0.0.3 // indirect 63 | github.com/ipfs/go-ipld-format v0.6.0 // indirect 64 | github.com/ipfs/go-ipld-legacy v0.2.1 // indirect 65 | github.com/ipfs/go-log v1.0.5 // indirect 66 | github.com/ipfs/go-metrics-interface v0.3.0 // indirect 67 | github.com/ipfs/go-peertaskqueue v0.8.2 // indirect 68 | github.com/ipld/go-codec-dagpb v1.7.0 // indirect 69 | github.com/ipld/go-ipld-prime v0.21.0 // indirect 70 | github.com/jackpal/go-nat-pmp v1.0.2 // indirect 71 | github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect 72 | github.com/klauspost/compress v1.18.0 // indirect 73 | github.com/klauspost/cpuid/v2 v2.2.10 // indirect 74 | github.com/koron/go-ssdp v0.0.5 // indirect 75 | github.com/libp2p/go-buffer-pool v0.1.0 // indirect 76 | github.com/libp2p/go-cidranger v1.1.0 // indirect 77 | github.com/libp2p/go-flow-metrics v0.2.0 // indirect 78 | github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect 79 | github.com/libp2p/go-libp2p-kad-dht v0.30.2 // indirect 80 | github.com/libp2p/go-libp2p-kbucket v0.6.5 // indirect 81 | github.com/libp2p/go-libp2p-record v0.3.1 // indirect 82 | github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect 83 | github.com/libp2p/go-msgio v0.3.0 // indirect 84 | github.com/libp2p/go-netroute v0.2.2 // indirect 85 | github.com/libp2p/go-reuseport v0.4.0 // indirect 86 | github.com/libp2p/go-yamux/v5 v5.0.0 // indirect 87 | github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect 88 | github.com/mattn/go-isatty v0.0.20 // indirect 89 | github.com/miekg/dns v1.1.63 // indirect 90 | github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect 91 | github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect 92 | github.com/minio/sha256-simd v1.0.1 // indirect 93 | github.com/mr-tron/base58 v1.2.0 // indirect 94 | github.com/multiformats/go-base32 v0.1.0 // indirect 95 | github.com/multiformats/go-base36 v0.2.0 // indirect 96 | github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect 97 | github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect 98 | github.com/multiformats/go-multibase v0.2.0 // indirect 99 | github.com/multiformats/go-multicodec v0.9.0 // indirect 100 | github.com/multiformats/go-multihash v0.2.3 // indirect 101 | github.com/multiformats/go-multistream v0.6.0 // indirect 102 | github.com/multiformats/go-varint v0.0.7 // indirect 103 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 104 | github.com/onsi/ginkgo/v2 v2.23.0 // indirect 105 | github.com/opencontainers/runtime-spec v1.2.1 // indirect 106 | github.com/opentracing/opentracing-go v1.2.0 // indirect 107 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect 108 | github.com/pion/datachannel v1.5.10 // indirect 109 | github.com/pion/dtls/v2 v2.2.12 // indirect 110 | github.com/pion/dtls/v3 v3.0.5 // indirect 111 | github.com/pion/ice/v4 v4.0.7 // indirect 112 | github.com/pion/interceptor v0.1.37 // indirect 113 | github.com/pion/logging v0.2.3 // indirect 114 | github.com/pion/mdns/v2 v2.0.7 // indirect 115 | github.com/pion/randutil v0.1.0 // indirect 116 | github.com/pion/rtcp v1.2.15 // indirect 117 | github.com/pion/rtp v1.8.13 // indirect 118 | github.com/pion/sctp v1.8.37 // indirect 119 | github.com/pion/sdp/v3 v3.0.11 // indirect 120 | github.com/pion/srtp/v3 v3.0.4 // indirect 121 | github.com/pion/stun v0.6.1 // indirect 122 | github.com/pion/stun/v3 v3.0.0 // indirect 123 | github.com/pion/transport/v2 v2.2.10 // indirect 124 | github.com/pion/transport/v3 v3.0.7 // indirect 125 | github.com/pion/turn/v4 v4.0.0 // indirect 126 | github.com/pion/webrtc/v4 v4.0.13 // indirect 127 | github.com/pkg/errors v0.9.1 // indirect 128 | github.com/polydawn/refmt v0.89.0 // indirect 129 | github.com/prometheus/client_golang v1.21.1 // indirect 130 | github.com/prometheus/client_model v0.6.1 // indirect 131 | github.com/prometheus/common v0.63.0 // indirect 132 | github.com/prometheus/procfs v0.15.1 // indirect 133 | github.com/quic-go/qpack v0.5.1 // indirect 134 | github.com/quic-go/quic-go v0.50.0 // indirect 135 | github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect 136 | github.com/raulk/go-watchdog v1.3.0 // indirect 137 | github.com/spaolacci/murmur3 v1.1.0 // indirect 138 | github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect 139 | github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect 140 | github.com/wlynxg/anet v0.0.5 // indirect 141 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 142 | go.opentelemetry.io/otel v1.35.0 // indirect 143 | go.opentelemetry.io/otel/metric v1.35.0 // indirect 144 | go.opentelemetry.io/otel/trace v1.35.0 // indirect 145 | go.uber.org/dig v1.18.1 // indirect 146 | go.uber.org/fx v1.23.0 // indirect 147 | go.uber.org/mock v0.5.0 // indirect 148 | go.uber.org/multierr v1.11.0 // indirect 149 | go.uber.org/zap v1.27.0 // indirect 150 | golang.org/x/crypto v0.36.0 // indirect 151 | golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect 152 | golang.org/x/mod v0.24.0 // indirect 153 | golang.org/x/net v0.37.0 // indirect 154 | golang.org/x/sync v0.12.0 // indirect 155 | golang.org/x/sys v0.31.0 // indirect 156 | golang.org/x/text v0.23.0 // indirect 157 | golang.org/x/tools v0.31.0 // indirect 158 | gonum.org/v1/gonum v0.15.1 // indirect 159 | google.golang.org/protobuf v1.36.5 // indirect 160 | lukechampine.com/blake3 v1.4.0 // indirect 161 | ) 162 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ipfs/go-ds-crdt 2 | 3 | go 1.24 4 | 5 | toolchain go1.24.0 6 | 7 | require ( 8 | github.com/dgraph-io/badger v1.6.2 9 | github.com/ipfs/boxo v0.29.1 10 | github.com/ipfs/go-cid v0.5.0 11 | github.com/ipfs/go-datastore v0.8.2 12 | github.com/ipfs/go-ds-badger v0.3.4 13 | github.com/ipfs/go-ipld-format v0.6.0 14 | github.com/ipfs/go-log/v2 v2.5.1 15 | github.com/libp2p/go-libp2p-pubsub v0.13.0 16 | github.com/multiformats/go-multihash v0.2.3 17 | go.uber.org/multierr v1.11.0 18 | google.golang.org/protobuf v1.36.5 19 | ) 20 | 21 | require ( 22 | github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect 23 | github.com/cespare/xxhash v1.1.0 // indirect 24 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 25 | github.com/dgraph-io/ristretto v0.0.2 // indirect 26 | github.com/dustin/go-humanize v1.0.1 // indirect 27 | github.com/gammazero/deque v1.0.0 // indirect 28 | github.com/go-logr/logr v1.4.2 // indirect 29 | github.com/go-logr/stdr v1.2.2 // indirect 30 | github.com/gogo/protobuf v1.3.2 // indirect 31 | github.com/golang/protobuf v1.5.3 // indirect 32 | github.com/google/uuid v1.6.0 // indirect 33 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 34 | github.com/ipfs/bbloom v0.0.4 // indirect 35 | github.com/ipfs/go-block-format v0.2.0 // indirect 36 | github.com/ipfs/go-detect-race v0.0.1 // indirect 37 | github.com/ipfs/go-ipfs-util v0.0.3 // indirect 38 | github.com/ipfs/go-ipld-legacy v0.2.1 // indirect 39 | github.com/ipfs/go-metrics-interface v0.3.0 // indirect 40 | github.com/ipld/go-codec-dagpb v1.6.0 // indirect 41 | github.com/ipld/go-ipld-prime v0.21.0 // indirect 42 | github.com/klauspost/cpuid/v2 v2.2.10 // indirect 43 | github.com/libp2p/go-buffer-pool v0.1.0 // indirect 44 | github.com/libp2p/go-libp2p v0.41.0 // indirect 45 | github.com/libp2p/go-msgio v0.3.0 // indirect 46 | github.com/mattn/go-isatty v0.0.20 // indirect 47 | github.com/minio/sha256-simd v1.0.1 // indirect 48 | github.com/mr-tron/base58 v1.2.0 // indirect 49 | github.com/multiformats/go-base32 v0.1.0 // indirect 50 | github.com/multiformats/go-base36 v0.2.0 // indirect 51 | github.com/multiformats/go-multiaddr v0.15.0 // indirect 52 | github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect 53 | github.com/multiformats/go-multibase v0.2.0 // indirect 54 | github.com/multiformats/go-multicodec v0.9.0 // indirect 55 | github.com/multiformats/go-multistream v0.6.0 // indirect 56 | github.com/multiformats/go-varint v0.0.7 // indirect 57 | github.com/pkg/errors v0.9.1 // indirect 58 | github.com/polydawn/refmt v0.89.0 // indirect 59 | github.com/spaolacci/murmur3 v1.1.0 // indirect 60 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 61 | go.opentelemetry.io/otel v1.34.0 // indirect 62 | go.opentelemetry.io/otel/metric v1.34.0 // indirect 63 | go.opentelemetry.io/otel/trace v1.34.0 // indirect 64 | go.uber.org/zap v1.27.0 // indirect 65 | golang.org/x/crypto v0.36.0 // indirect 66 | golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa // indirect 67 | golang.org/x/net v0.37.0 // indirect 68 | golang.org/x/sys v0.31.0 // indirect 69 | lukechampine.com/blake3 v1.4.0 // indirect 70 | ) 71 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= 2 | github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= 3 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 4 | github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= 5 | github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= 6 | github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= 7 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 8 | github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= 9 | github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 10 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 11 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 12 | github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 13 | github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= 14 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 15 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 16 | github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= 17 | github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= 18 | github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 19 | github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= 20 | github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 21 | github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= 22 | github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 23 | github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= 24 | github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= 25 | github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= 26 | github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= 27 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 28 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 29 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 30 | github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= 31 | github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= 32 | github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= 33 | github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= 34 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= 35 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= 36 | github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= 37 | github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= 38 | github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= 39 | github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= 40 | github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= 41 | github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 42 | github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= 43 | github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= 44 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= 45 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 46 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 47 | github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= 48 | github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= 49 | github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= 50 | github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= 51 | github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= 52 | github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= 53 | github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= 54 | github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= 55 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 56 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 57 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 58 | github.com/gammazero/chanqueue v1.1.0 h1:yiwtloc1azhgGLFo2gMloJtQvkYD936Ai7tBfa+rYJw= 59 | github.com/gammazero/chanqueue v1.1.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= 60 | github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= 61 | github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= 62 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 63 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 64 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 65 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 66 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 67 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 68 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 69 | github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= 70 | github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= 71 | github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 72 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 73 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 74 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 75 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 76 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= 77 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 78 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 79 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 80 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 81 | github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= 82 | github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= 83 | github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc= 84 | github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= 85 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 86 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 87 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 88 | github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= 89 | github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 90 | github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= 91 | github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= 92 | github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= 93 | github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= 94 | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= 95 | github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= 96 | github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= 97 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 98 | github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= 99 | github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= 100 | github.com/ipfs/boxo v0.29.1 h1:z61ZT4YDfTHLjXTsu/+3wvJ8aJlExthDSOCpx6Nh8xc= 101 | github.com/ipfs/boxo v0.29.1/go.mod h1:MkDJStXiJS9U99cbAijHdcmwNfVn5DKYBmQCOgjY2NU= 102 | github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= 103 | github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= 104 | github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= 105 | github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= 106 | github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= 107 | github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= 108 | github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= 109 | github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= 110 | github.com/ipfs/go-ds-badger v0.3.4 h1:MmqFicftE0KrwMC77WjXTrPuoUxhwyFsjKONSeWrlOo= 111 | github.com/ipfs/go-ds-badger v0.3.4/go.mod h1:HfqsKJcNnIr9ZhZ+rkwS1J5PpaWjJjg6Ipmxd7KPfZ8= 112 | github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= 113 | github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= 114 | github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= 115 | github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= 116 | github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= 117 | github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= 118 | github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= 119 | github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= 120 | github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= 121 | github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= 122 | github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= 123 | github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= 124 | github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= 125 | github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= 126 | github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= 127 | github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= 128 | github.com/ipfs/go-test v0.2.1 h1:/D/a8xZ2JzkYqcVcV/7HYlCnc7bv/pKHQiX5TdClkPE= 129 | github.com/ipfs/go-test v0.2.1/go.mod h1:dzu+KB9cmWjuJnXFDYJwC25T3j1GcN57byN+ixmK39M= 130 | github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= 131 | github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= 132 | github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= 133 | github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= 134 | github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= 135 | github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= 136 | github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= 137 | github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= 138 | github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= 139 | github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= 140 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 141 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 142 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 143 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 144 | github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= 145 | github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= 146 | github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk= 147 | github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w= 148 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 149 | github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 150 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 151 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 152 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 153 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 154 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 155 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 156 | github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= 157 | github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= 158 | github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= 159 | github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= 160 | github.com/libp2p/go-libp2p v0.41.0 h1:JRaD39dqf/tBBGapJ0T38N73vOaDCsWgcx3mE6HgXWk= 161 | github.com/libp2p/go-libp2p v0.41.0/go.mod h1:Be8QYqC4JW6Xq8buukNeoZJjyT1XUDcGoIooCHm1ye4= 162 | github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= 163 | github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= 164 | github.com/libp2p/go-libp2p-pubsub v0.13.0 h1:RmFQ2XAy3zQtbt2iNPy7Tt0/3fwTnHpCQSSnmGnt1Ps= 165 | github.com/libp2p/go-libp2p-pubsub v0.13.0/go.mod h1:m0gpUOyrXKXdE7c8FNQ9/HLfWbxaEw7xku45w+PaqZo= 166 | github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= 167 | github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= 168 | github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= 169 | github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= 170 | github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= 171 | github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= 172 | github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= 173 | github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= 174 | github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= 175 | github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= 176 | github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po= 177 | github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= 178 | github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= 179 | github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= 180 | github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= 181 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 182 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 183 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 184 | github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= 185 | github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= 186 | github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= 187 | github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= 188 | github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= 189 | github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= 190 | github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= 191 | github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= 192 | github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= 193 | github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= 194 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 195 | github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= 196 | github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= 197 | github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= 198 | github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= 199 | github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= 200 | github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= 201 | github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= 202 | github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= 203 | github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= 204 | github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= 205 | github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= 206 | github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= 207 | github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= 208 | github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= 209 | github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= 210 | github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= 211 | github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= 212 | github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= 213 | github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= 214 | github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= 215 | github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= 216 | github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= 217 | github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= 218 | github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= 219 | github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= 220 | github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= 221 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 222 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 223 | github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= 224 | github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= 225 | github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= 226 | github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= 227 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= 228 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= 229 | github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= 230 | github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= 231 | github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= 232 | github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= 233 | github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= 234 | github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U= 235 | github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg= 236 | github.com/pion/ice/v4 v4.0.6 h1:jmM9HwI9lfetQV/39uD0nY4y++XZNPhvzIPCb8EwxUM= 237 | github.com/pion/ice/v4 v4.0.6/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= 238 | github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= 239 | github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= 240 | github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= 241 | github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= 242 | github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= 243 | github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= 244 | github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= 245 | github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= 246 | github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= 247 | github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= 248 | github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= 249 | github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk= 250 | github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4= 251 | github.com/pion/sctp v1.8.36 h1:owNudmnz1xmhfYje5L/FCav3V9wpPRePHle3Zi+P+M0= 252 | github.com/pion/sctp v1.8.36/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= 253 | github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA= 254 | github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= 255 | github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M= 256 | github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ= 257 | github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= 258 | github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= 259 | github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= 260 | github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= 261 | github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= 262 | github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= 263 | github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= 264 | github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= 265 | github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM= 266 | github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA= 267 | github.com/pion/webrtc/v4 v4.0.10 h1:Hq/JLjhqLxi+NmCtE8lnRPDr8H4LcNvwg8OxVcdv56Q= 268 | github.com/pion/webrtc/v4 v4.0.10/go.mod h1:ViHLVaNpiuvaH8pdiuQxuA9awuE6KVzAXx3vVWilOck= 269 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 270 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 271 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 272 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 273 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 274 | github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= 275 | github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= 276 | github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA= 277 | github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= 278 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 279 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 280 | github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= 281 | github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= 282 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 283 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 284 | github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= 285 | github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= 286 | github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo= 287 | github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= 288 | github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= 289 | github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= 290 | github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= 291 | github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= 292 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 293 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 294 | github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= 295 | github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 296 | github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= 297 | github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= 298 | github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= 299 | github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= 300 | github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= 301 | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 302 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 303 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 304 | github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= 305 | github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= 306 | github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= 307 | github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= 308 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 309 | github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= 310 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 311 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 312 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 313 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 314 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 315 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 316 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 317 | github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= 318 | github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= 319 | github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= 320 | github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= 321 | github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= 322 | github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= 323 | github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= 324 | github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= 325 | github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= 326 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 327 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 328 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 329 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= 330 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= 331 | go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= 332 | go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= 333 | go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= 334 | go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= 335 | go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= 336 | go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= 337 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 338 | go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= 339 | go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= 340 | go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= 341 | go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= 342 | go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= 343 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 344 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 345 | go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= 346 | go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= 347 | go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= 348 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 349 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 350 | go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= 351 | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 352 | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 353 | golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 354 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 355 | golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 356 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 357 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 358 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 359 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 360 | golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4= 361 | golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk= 362 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 363 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 364 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 365 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 366 | golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= 367 | golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= 368 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 369 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 370 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 371 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 372 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 373 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 374 | golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= 375 | golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 376 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 377 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 378 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 379 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 380 | golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= 381 | golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 382 | golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 383 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 384 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 385 | golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 386 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 387 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 388 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 389 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 390 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 391 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 392 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 393 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 394 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 395 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 396 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 397 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 398 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 399 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 400 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 401 | golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 402 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 403 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 404 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 405 | golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 406 | golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= 407 | golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= 408 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 409 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 410 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 411 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 412 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 413 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 414 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 415 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 416 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 417 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 418 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 419 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 420 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 421 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 422 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 423 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 424 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 425 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 426 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 427 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 428 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 429 | lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= 430 | lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= 431 | -------------------------------------------------------------------------------- /heads.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/binary" 7 | "errors" 8 | "sort" 9 | "strings" 10 | "sync" 11 | 12 | dshelp "github.com/ipfs/boxo/datastore/dshelp" 13 | cid "github.com/ipfs/go-cid" 14 | ds "github.com/ipfs/go-datastore" 15 | "github.com/ipfs/go-datastore/query" 16 | logging "github.com/ipfs/go-log/v2" 17 | ) 18 | 19 | // heads manages the current Merkle-CRDT heads. 20 | type heads struct { 21 | store ds.Datastore 22 | // cache contains the current contents of the store 23 | cache map[cid.Cid]uint64 24 | cacheMux sync.RWMutex 25 | namespace ds.Key 26 | logger logging.StandardLogger 27 | } 28 | 29 | func newHeads(ctx context.Context, store ds.Datastore, namespace ds.Key, logger logging.StandardLogger) (*heads, error) { 30 | hh := &heads{ 31 | store: store, 32 | namespace: namespace, 33 | logger: logger, 34 | cache: make(map[cid.Cid]uint64), 35 | } 36 | if err := hh.primeCache(ctx); err != nil { 37 | return nil, err 38 | } 39 | return hh, nil 40 | } 41 | 42 | func (hh *heads) key(c cid.Cid) ds.Key { 43 | // // 44 | return hh.namespace.Child(dshelp.MultihashToDsKey(c.Hash())) 45 | } 46 | 47 | func (hh *heads) write(ctx context.Context, store ds.Write, c cid.Cid, height uint64) error { 48 | buf := make([]byte, binary.MaxVarintLen64) 49 | n := binary.PutUvarint(buf, height) 50 | if n == 0 { 51 | return errors.New("error encoding height") 52 | } 53 | return store.Put(ctx, hh.key(c), buf[0:n]) 54 | } 55 | 56 | func (hh *heads) delete(ctx context.Context, store ds.Write, c cid.Cid) error { 57 | err := store.Delete(ctx, hh.key(c)) 58 | // The go-datastore API currently says Delete doesn't return 59 | // ErrNotFound, but it used to say otherwise. Leave this 60 | // here to be safe. 61 | if err == ds.ErrNotFound { 62 | return nil 63 | } 64 | return err 65 | } 66 | 67 | // IsHead returns if a given cid is among the current heads. 68 | func (hh *heads) IsHead(ctx context.Context, c cid.Cid) (bool, uint64, error) { 69 | var height uint64 70 | var ok bool 71 | hh.cacheMux.RLock() 72 | { 73 | height, ok = hh.cache[c] 74 | } 75 | hh.cacheMux.RUnlock() 76 | return ok, height, nil 77 | } 78 | 79 | func (hh *heads) Len(ctx context.Context) (int, error) { 80 | var ret int 81 | hh.cacheMux.RLock() 82 | { 83 | ret = len(hh.cache) 84 | } 85 | hh.cacheMux.RUnlock() 86 | return ret, nil 87 | } 88 | 89 | // Replace replaces a head with a new cid. 90 | func (hh *heads) Replace(ctx context.Context, h, c cid.Cid, height uint64) error { 91 | hh.logger.Debugf("replacing DAG head: %s -> %s (new height: %d)", h, c, height) 92 | var store ds.Write = hh.store 93 | 94 | batchingDs, batching := store.(ds.Batching) 95 | var err error 96 | if batching { 97 | store, err = batchingDs.Batch(ctx) 98 | if err != nil { 99 | return err 100 | } 101 | } 102 | 103 | err = hh.write(ctx, store, c, height) 104 | if err != nil { 105 | return err 106 | } 107 | 108 | hh.cacheMux.Lock() 109 | defer hh.cacheMux.Unlock() 110 | 111 | if !batching { 112 | hh.cache[c] = height 113 | } 114 | 115 | err = hh.delete(ctx, store, h) 116 | if err != nil { 117 | return err 118 | } 119 | if !batching { 120 | delete(hh.cache, h) 121 | } 122 | 123 | if batching { 124 | err := store.(ds.Batch).Commit(ctx) 125 | if err != nil { 126 | return err 127 | } 128 | delete(hh.cache, h) 129 | hh.cache[c] = height 130 | } 131 | return nil 132 | } 133 | 134 | func (hh *heads) Add(ctx context.Context, c cid.Cid, height uint64) error { 135 | hh.logger.Debugf("adding new DAG head: %s (height: %d)", c, height) 136 | if err := hh.write(ctx, hh.store, c, height); err != nil { 137 | return err 138 | } 139 | 140 | hh.cacheMux.Lock() 141 | { 142 | hh.cache[c] = height 143 | } 144 | hh.cacheMux.Unlock() 145 | return nil 146 | } 147 | 148 | // List returns the list of current heads plus the max height. 149 | func (hh *heads) List(ctx context.Context) ([]cid.Cid, uint64, error) { 150 | var maxHeight uint64 151 | var heads []cid.Cid 152 | 153 | hh.cacheMux.RLock() 154 | { 155 | heads = make([]cid.Cid, 0, len(hh.cache)) 156 | for head, height := range hh.cache { 157 | heads = append(heads, head) 158 | if height > maxHeight { 159 | maxHeight = height 160 | } 161 | } 162 | } 163 | hh.cacheMux.RUnlock() 164 | 165 | sort.Slice(heads, func(i, j int) bool { 166 | ci := heads[i].Bytes() 167 | cj := heads[j].Bytes() 168 | return bytes.Compare(ci, cj) < 0 169 | }) 170 | 171 | return heads, maxHeight, nil 172 | } 173 | 174 | // primeCache builds the heads cache based on what's in storage; since 175 | // it is called from the constructor only we don't bother locking. 176 | func (hh *heads) primeCache(ctx context.Context) (ret error) { 177 | q := query.Query{ 178 | Prefix: hh.namespace.String(), 179 | KeysOnly: false, 180 | } 181 | 182 | results, err := hh.store.Query(ctx, q) 183 | if err != nil { 184 | return err 185 | } 186 | defer results.Close() 187 | 188 | for r := range results.Next() { 189 | if r.Error != nil { 190 | return r.Error 191 | } 192 | headKey := ds.NewKey(strings.TrimPrefix(r.Key, hh.namespace.String())) 193 | headCid, err := dshelp.DsKeyToCidV1(headKey, cid.DagProtobuf) 194 | if err != nil { 195 | return err 196 | } 197 | height, n := binary.Uvarint(r.Value) 198 | if n <= 0 { 199 | return errors.New("error decoding height") 200 | } 201 | 202 | hh.cache[headCid] = height 203 | } 204 | 205 | return nil 206 | } 207 | -------------------------------------------------------------------------------- /heads_test.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "math/rand" 7 | "reflect" 8 | "sort" 9 | "testing" 10 | "time" 11 | 12 | "github.com/ipfs/go-cid" 13 | ds "github.com/ipfs/go-datastore" 14 | dssync "github.com/ipfs/go-datastore/sync" 15 | "github.com/multiformats/go-multihash" 16 | ) 17 | 18 | var headsTestNS = ds.NewKey("headstest") 19 | 20 | var randg = rand.New(rand.NewSource(time.Now().UnixNano())) 21 | 22 | // TODO we should also test with a non-batching store 23 | func newTestHeads(t *testing.T) *heads { 24 | t.Helper() 25 | ctx := context.Background() 26 | store := dssync.MutexWrap(ds.NewMapDatastore()) 27 | heads, err := newHeads(ctx, store, headsTestNS, &testLogger{ 28 | name: t.Name(), 29 | l: DefaultOptions().Logger, 30 | }) 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | return heads 35 | } 36 | 37 | func newCID(t *testing.T) cid.Cid { 38 | t.Helper() 39 | var buf [32]byte 40 | _, _ = randg.Read(buf[:]) 41 | 42 | mh, err := multihash.Sum(buf[:], multihash.SHA2_256, -1) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | return cid.NewCidV1(cid.DagProtobuf, mh) 47 | } 48 | 49 | func TestHeadsBasic(t *testing.T) { 50 | ctx := context.Background() 51 | 52 | heads := newTestHeads(t) 53 | l, err := heads.Len(ctx) 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | if l != 0 { 58 | t.Errorf("new heads should have Len==0, got: %d", l) 59 | } 60 | 61 | cidHeights := make(map[cid.Cid]uint64) 62 | numHeads := 5 63 | for i := 0; i < numHeads; i++ { 64 | c, height := newCID(t), uint64(randg.Int()) 65 | cidHeights[c] = height 66 | err := heads.Add(ctx, c, height) 67 | if err != nil { 68 | t.Fatal(err) 69 | } 70 | } 71 | 72 | assertHeads(t, heads, cidHeights) 73 | 74 | for c := range cidHeights { 75 | newC, newHeight := newCID(t), uint64(randg.Int()) 76 | err := heads.Replace(ctx, c, newC, newHeight) 77 | if err != nil { 78 | t.Fatal(err) 79 | } 80 | delete(cidHeights, c) 81 | cidHeights[newC] = newHeight 82 | assertHeads(t, heads, cidHeights) 83 | } 84 | 85 | // Now try creating a new heads object and make sure what we 86 | // stored before is still there. 87 | err = heads.store.Sync(ctx, headsTestNS) 88 | if err != nil { 89 | t.Fatal(err) 90 | } 91 | 92 | heads, err = newHeads(ctx, heads.store, headsTestNS, &testLogger{ 93 | name: t.Name(), 94 | l: DefaultOptions().Logger, 95 | }) 96 | if err != nil { 97 | t.Fatal(err) 98 | } 99 | assertHeads(t, heads, cidHeights) 100 | } 101 | 102 | func assertHeads(t *testing.T, hh *heads, cidHeights map[cid.Cid]uint64) { 103 | t.Helper() 104 | ctx := context.Background() 105 | 106 | headCids, maxHeight, err := hh.List(ctx) 107 | if err != nil { 108 | t.Fatal(err) 109 | } 110 | 111 | var expectedMaxHeight uint64 112 | for _, height := range cidHeights { 113 | if height > expectedMaxHeight { 114 | expectedMaxHeight = height 115 | } 116 | } 117 | if maxHeight != expectedMaxHeight { 118 | t.Errorf("expected max height=%d, got=%d", expectedMaxHeight, maxHeight) 119 | } 120 | 121 | headsLen, err := hh.Len(ctx) 122 | if err != nil { 123 | t.Fatal(err) 124 | } 125 | if len(headCids) != headsLen { 126 | t.Errorf("expected len and list to agree, got listLen=%d, len=%d", len(headCids), headsLen) 127 | } 128 | 129 | cids := make([]cid.Cid, 0, len(cidHeights)) 130 | for c := range cidHeights { 131 | cids = append(cids, c) 132 | } 133 | sort.Slice(cids, func(i, j int) bool { 134 | ci := cids[i].Bytes() 135 | cj := cids[j].Bytes() 136 | return bytes.Compare(ci, cj) < 0 137 | }) 138 | if !reflect.DeepEqual(cids, headCids) { 139 | t.Errorf("given cids don't match cids returned by List: %v, %v", cids, headCids) 140 | } 141 | for _, c := range cids { 142 | present, height, err := hh.IsHead(ctx, c) 143 | if err != nil { 144 | t.Fatal(err) 145 | } 146 | 147 | if !present { 148 | t.Errorf("cid returned by List reported absent by IsHead: %v", c) 149 | } 150 | if height != cidHeights[c] { 151 | t.Errorf("expected cid %v to have height %d, got: %d", c, cidHeights[c], height) 152 | } 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /ipld.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | dag "github.com/ipfs/boxo/ipld/merkledag" 8 | cid "github.com/ipfs/go-cid" 9 | pb "github.com/ipfs/go-ds-crdt/pb" 10 | ipld "github.com/ipfs/go-ipld-format" 11 | "google.golang.org/protobuf/proto" 12 | ) 13 | 14 | // IPLD related things 15 | 16 | var _ ipld.NodeGetter = (*crdtNodeGetter)(nil) 17 | 18 | // crdtNodeGetter wraps an ipld.NodeGetter with some additional utility methods 19 | type crdtNodeGetter struct { 20 | ipld.NodeGetter 21 | } 22 | 23 | func (ng *crdtNodeGetter) GetDelta(ctx context.Context, c cid.Cid) (ipld.Node, *pb.Delta, error) { 24 | nd, err := ng.Get(ctx, c) 25 | if err != nil { 26 | return nil, nil, err 27 | } 28 | delta, err := extractDelta(nd) 29 | return nd, delta, err 30 | } 31 | 32 | // GetHeight returns the height of a block 33 | func (ng *crdtNodeGetter) GetPriority(ctx context.Context, c cid.Cid) (uint64, error) { 34 | _, delta, err := ng.GetDelta(ctx, c) 35 | if err != nil { 36 | return 0, err 37 | } 38 | return delta.Priority, nil 39 | } 40 | 41 | type deltaOption struct { 42 | delta *pb.Delta 43 | node ipld.Node 44 | err error 45 | } 46 | 47 | // GetDeltas uses GetMany to obtain many deltas. 48 | func (ng *crdtNodeGetter) GetDeltas(ctx context.Context, cids []cid.Cid) <-chan *deltaOption { 49 | deltaOpts := make(chan *deltaOption, 1) 50 | go func() { 51 | defer close(deltaOpts) 52 | nodeOpts := ng.GetMany(ctx, cids) 53 | for nodeOpt := range nodeOpts { 54 | if nodeOpt.Err != nil { 55 | deltaOpts <- &deltaOption{err: nodeOpt.Err} 56 | continue 57 | } 58 | delta, err := extractDelta(nodeOpt.Node) 59 | if err != nil { 60 | deltaOpts <- &deltaOption{err: err} 61 | continue 62 | } 63 | deltaOpts <- &deltaOption{ 64 | delta: delta, 65 | node: nodeOpt.Node, 66 | } 67 | } 68 | }() 69 | return deltaOpts 70 | } 71 | 72 | func extractDelta(nd ipld.Node) (*pb.Delta, error) { 73 | protonode, ok := nd.(*dag.ProtoNode) 74 | if !ok { 75 | return nil, errors.New("node is not a ProtoNode") 76 | } 77 | d := pb.Delta{} 78 | err := proto.Unmarshal(protonode.Data(), &d) 79 | return &d, err 80 | } 81 | 82 | func makeNode(delta *pb.Delta, heads []cid.Cid) (ipld.Node, error) { 83 | var data []byte 84 | var err error 85 | if delta != nil { 86 | data, err = proto.Marshal(delta) 87 | if err != nil { 88 | return nil, err 89 | } 90 | } 91 | 92 | nd := dag.NodeWithData(data) 93 | for _, h := range heads { 94 | err = nd.AddRawLink("", &ipld.Link{Cid: h}) 95 | if err != nil { 96 | return nil, err 97 | } 98 | } 99 | // Ensure we work with CIDv1 100 | nd.SetCidBuilder(dag.V1CidPrefix()) 101 | return nd, nil 102 | } 103 | -------------------------------------------------------------------------------- /migrations.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "strings" 9 | 10 | ds "github.com/ipfs/go-datastore" 11 | query "github.com/ipfs/go-datastore/query" 12 | ) 13 | 14 | // Use this to detect if we need to run migrations. 15 | var version uint64 = 1 16 | 17 | func (store *Datastore) versionKey() ds.Key { 18 | return store.namespace.ChildString(versionKey) 19 | } 20 | 21 | func (store *Datastore) getVersion(ctx context.Context) (uint64, error) { 22 | versionK := store.versionKey() 23 | data, err := store.store.Get(ctx, versionK) 24 | if err != nil { 25 | if err == ds.ErrNotFound { 26 | return 0, nil 27 | } 28 | return 0, err 29 | } 30 | 31 | v, n := binary.Uvarint(data) 32 | if n <= 0 { 33 | return v, errors.New("error decoding version") 34 | } 35 | return v - 1, nil 36 | } 37 | 38 | func (store *Datastore) setVersion(ctx context.Context, v uint64) error { 39 | versionK := store.versionKey() 40 | buf := make([]byte, binary.MaxVarintLen64) 41 | n := binary.PutUvarint(buf, v+1) 42 | if n == 0 { 43 | return errors.New("error encoding version") 44 | } 45 | 46 | return store.store.Put(ctx, versionK, buf[0:n]) 47 | } 48 | 49 | func (store *Datastore) applyMigrations(ctx context.Context) error { 50 | v, err := store.getVersion(ctx) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | switch v { 56 | case 0: // need to migrate 57 | err := store.migrate0to1(ctx) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | err = store.setVersion(ctx, 1) 63 | if err != nil { 64 | return err 65 | } 66 | fallthrough 67 | 68 | case version: 69 | store.logger.Infof("CRDT database format v%d", version) 70 | return nil 71 | } 72 | return nil 73 | } 74 | 75 | // migrate0to1 re-sets all the values and priorities of previously tombstoned 76 | // elements to deal with the aftermath of 77 | // https://github.com/ipfs/go-ds-crdt/issues/238. This bug caused that the 78 | // values/priorities of certain elements was wrong depending on tombstone 79 | // arrival order. 80 | func (store *Datastore) migrate0to1(ctx context.Context) error { 81 | // 1. Find keys for which we have tombstones 82 | // 2. Loop them 83 | // 3. Find/set best value for them 84 | 85 | s := store.set 86 | tombsPrefix := s.keyPrefix(tombsNs) // /ns/tombs 87 | q := query.Query{ 88 | Prefix: tombsPrefix.String(), 89 | KeysOnly: true, 90 | } 91 | 92 | var rStore = store.store 93 | var wStore ds.Write = store.store 94 | var err error 95 | batchingDs, batching := wStore.(ds.Batching) 96 | if batching { 97 | wStore, err = batchingDs.Batch(ctx) 98 | if err != nil { 99 | return err 100 | } 101 | } 102 | 103 | results, err := rStore.Query(ctx, q) 104 | if err != nil { 105 | return err 106 | } 107 | defer results.Close() 108 | 109 | // Results are not going to be ordered per key (I tested). Therefore, 110 | // we can keep a list of keys in memory to avoid findingBestValue for 111 | // every tombstone block entry, or we can repeat the operation every 112 | // time there is a tombstone for the same key. Given this is a one 113 | // time operation that only affects tombstoned keys, we opt to 114 | // de-duplicate. 115 | 116 | var total int 117 | doneKeys := make(map[string]struct{}) 118 | for r := range results.Next() { 119 | if r.Error != nil { 120 | return r.Error 121 | } 122 | 123 | // Switch from /ns/tombs/key/block to /key 124 | dskey := ds.NewKey( 125 | strings.TrimPrefix(r.Key, tombsPrefix.String())) 126 | // Switch from /key/block to /key 127 | key := dskey.Parent().String() 128 | if _, ok := doneKeys[key]; ok { 129 | continue 130 | } 131 | doneKeys[key] = struct{}{} 132 | 133 | valueK := s.valueKey(key) 134 | v, p, err := s.findBestValue(ctx, key, nil) 135 | if err != nil { 136 | return fmt.Errorf("error finding best value for %s: %w", key, err) 137 | } 138 | 139 | if v == nil { 140 | wStore.Delete(ctx, valueK) 141 | wStore.Delete(ctx, s.priorityKey(key)) 142 | } else { 143 | wStore.Put(ctx, valueK, v) 144 | s.setPriority(ctx, wStore, key, p) 145 | } 146 | total++ 147 | } 148 | 149 | if batching { 150 | err := wStore.(ds.Batch).Commit(ctx) 151 | if err != nil { 152 | return err 153 | } 154 | } 155 | 156 | s.logger.Infof("Migration v0 to v1 finished (%d elements affected)", total) 157 | return nil 158 | } 159 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "hsanjuan", 3 | "bugs": { 4 | "url": "https://github.com/ipfs/go-ds-crdt" 5 | }, 6 | "gx": { 7 | "dvcsimport": "github.com/ipfs/go-ds-crdt" 8 | }, 9 | "gxDependencies": [ 10 | { 11 | "author": "why", 12 | "hash": "QmP9i4G9nRcfKBnpk1A7CwU7ppLkSn2j6vJeWn2AJ8rfcN", 13 | "name": "go-merkledag", 14 | "version": "1.1.36" 15 | }, 16 | { 17 | "author": "hsanjuan", 18 | "hash": "QmcUuDu8FfWiUgZ8q3Rvvr4mwFTX5t1TKmVoWsB3FVHmhQ", 19 | "name": "protobuf", 20 | "version": "3.6.1" 21 | } 22 | ], 23 | "gxVersion": "0.14.0", 24 | "language": "go", 25 | "license": "Apache-2.0/MIT", 26 | "name": "go-ds-crdt", 27 | "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", 28 | "version": "0.0.1" 29 | } 30 | 31 | -------------------------------------------------------------------------------- /pb/bcast.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.35.1 4 | // protoc v5.28.3 5 | // source: bcast.proto 6 | 7 | package pb 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | type CRDTBroadcast struct { 24 | state protoimpl.MessageState 25 | sizeCache protoimpl.SizeCache 26 | unknownFields protoimpl.UnknownFields 27 | 28 | Heads []*Head `protobuf:"bytes,1,rep,name=Heads,proto3" json:"Heads,omitempty"` // A list of heads 29 | } 30 | 31 | func (x *CRDTBroadcast) Reset() { 32 | *x = CRDTBroadcast{} 33 | mi := &file_bcast_proto_msgTypes[0] 34 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 35 | ms.StoreMessageInfo(mi) 36 | } 37 | 38 | func (x *CRDTBroadcast) String() string { 39 | return protoimpl.X.MessageStringOf(x) 40 | } 41 | 42 | func (*CRDTBroadcast) ProtoMessage() {} 43 | 44 | func (x *CRDTBroadcast) ProtoReflect() protoreflect.Message { 45 | mi := &file_bcast_proto_msgTypes[0] 46 | if x != nil { 47 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 48 | if ms.LoadMessageInfo() == nil { 49 | ms.StoreMessageInfo(mi) 50 | } 51 | return ms 52 | } 53 | return mi.MessageOf(x) 54 | } 55 | 56 | // Deprecated: Use CRDTBroadcast.ProtoReflect.Descriptor instead. 57 | func (*CRDTBroadcast) Descriptor() ([]byte, []int) { 58 | return file_bcast_proto_rawDescGZIP(), []int{0} 59 | } 60 | 61 | func (x *CRDTBroadcast) GetHeads() []*Head { 62 | if x != nil { 63 | return x.Heads 64 | } 65 | return nil 66 | } 67 | 68 | type Head struct { 69 | state protoimpl.MessageState 70 | sizeCache protoimpl.SizeCache 71 | unknownFields protoimpl.UnknownFields 72 | 73 | Cid []byte `protobuf:"bytes,1,opt,name=Cid,proto3" json:"Cid,omitempty"` 74 | } 75 | 76 | func (x *Head) Reset() { 77 | *x = Head{} 78 | mi := &file_bcast_proto_msgTypes[1] 79 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 80 | ms.StoreMessageInfo(mi) 81 | } 82 | 83 | func (x *Head) String() string { 84 | return protoimpl.X.MessageStringOf(x) 85 | } 86 | 87 | func (*Head) ProtoMessage() {} 88 | 89 | func (x *Head) ProtoReflect() protoreflect.Message { 90 | mi := &file_bcast_proto_msgTypes[1] 91 | if x != nil { 92 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 93 | if ms.LoadMessageInfo() == nil { 94 | ms.StoreMessageInfo(mi) 95 | } 96 | return ms 97 | } 98 | return mi.MessageOf(x) 99 | } 100 | 101 | // Deprecated: Use Head.ProtoReflect.Descriptor instead. 102 | func (*Head) Descriptor() ([]byte, []int) { 103 | return file_bcast_proto_rawDescGZIP(), []int{1} 104 | } 105 | 106 | func (x *Head) GetCid() []byte { 107 | if x != nil { 108 | return x.Cid 109 | } 110 | return nil 111 | } 112 | 113 | var File_bcast_proto protoreflect.FileDescriptor 114 | 115 | var file_bcast_proto_rawDesc = []byte{ 116 | 0x0a, 0x0b, 0x62, 0x63, 0x61, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 117 | 0x72, 0x64, 0x74, 0x2e, 0x70, 0x62, 0x22, 0x34, 0x0a, 0x0d, 0x43, 0x52, 0x44, 0x54, 0x42, 0x72, 118 | 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x05, 0x48, 0x65, 0x61, 0x64, 0x73, 119 | 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x63, 0x72, 0x64, 0x74, 0x2e, 0x70, 0x62, 120 | 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x05, 0x48, 0x65, 0x61, 0x64, 0x73, 0x22, 0x18, 0x0a, 0x04, 121 | 0x48, 0x65, 0x61, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 122 | 0x0c, 0x52, 0x03, 0x43, 0x69, 0x64, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62, 0x06, 123 | 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 124 | } 125 | 126 | var ( 127 | file_bcast_proto_rawDescOnce sync.Once 128 | file_bcast_proto_rawDescData = file_bcast_proto_rawDesc 129 | ) 130 | 131 | func file_bcast_proto_rawDescGZIP() []byte { 132 | file_bcast_proto_rawDescOnce.Do(func() { 133 | file_bcast_proto_rawDescData = protoimpl.X.CompressGZIP(file_bcast_proto_rawDescData) 134 | }) 135 | return file_bcast_proto_rawDescData 136 | } 137 | 138 | var file_bcast_proto_msgTypes = make([]protoimpl.MessageInfo, 2) 139 | var file_bcast_proto_goTypes = []any{ 140 | (*CRDTBroadcast)(nil), // 0: crdt.pb.CRDTBroadcast 141 | (*Head)(nil), // 1: crdt.pb.Head 142 | } 143 | var file_bcast_proto_depIdxs = []int32{ 144 | 1, // 0: crdt.pb.CRDTBroadcast.Heads:type_name -> crdt.pb.Head 145 | 1, // [1:1] is the sub-list for method output_type 146 | 1, // [1:1] is the sub-list for method input_type 147 | 1, // [1:1] is the sub-list for extension type_name 148 | 1, // [1:1] is the sub-list for extension extendee 149 | 0, // [0:1] is the sub-list for field type_name 150 | } 151 | 152 | func init() { file_bcast_proto_init() } 153 | func file_bcast_proto_init() { 154 | if File_bcast_proto != nil { 155 | return 156 | } 157 | type x struct{} 158 | out := protoimpl.TypeBuilder{ 159 | File: protoimpl.DescBuilder{ 160 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 161 | RawDescriptor: file_bcast_proto_rawDesc, 162 | NumEnums: 0, 163 | NumMessages: 2, 164 | NumExtensions: 0, 165 | NumServices: 0, 166 | }, 167 | GoTypes: file_bcast_proto_goTypes, 168 | DependencyIndexes: file_bcast_proto_depIdxs, 169 | MessageInfos: file_bcast_proto_msgTypes, 170 | }.Build() 171 | File_bcast_proto = out.File 172 | file_bcast_proto_rawDesc = nil 173 | file_bcast_proto_goTypes = nil 174 | file_bcast_proto_depIdxs = nil 175 | } 176 | -------------------------------------------------------------------------------- /pb/bcast.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package crdt.pb; 3 | 4 | option go_package=".;pb"; 5 | 6 | message CRDTBroadcast { 7 | repeated Head Heads = 1; // A list of heads 8 | } 9 | 10 | message Head { 11 | bytes Cid = 1; 12 | } -------------------------------------------------------------------------------- /pb/delta.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.35.1 4 | // protoc v5.28.3 5 | // source: delta.proto 6 | 7 | package pb 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | type Delta struct { 24 | state protoimpl.MessageState 25 | sizeCache protoimpl.SizeCache 26 | unknownFields protoimpl.UnknownFields 27 | 28 | Elements []*Element `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` 29 | Tombstones []*Element `protobuf:"bytes,2,rep,name=tombstones,proto3" json:"tombstones,omitempty"` 30 | Priority uint64 `protobuf:"varint,3,opt,name=priority,proto3" json:"priority,omitempty"` 31 | } 32 | 33 | func (x *Delta) Reset() { 34 | *x = Delta{} 35 | mi := &file_delta_proto_msgTypes[0] 36 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 37 | ms.StoreMessageInfo(mi) 38 | } 39 | 40 | func (x *Delta) String() string { 41 | return protoimpl.X.MessageStringOf(x) 42 | } 43 | 44 | func (*Delta) ProtoMessage() {} 45 | 46 | func (x *Delta) ProtoReflect() protoreflect.Message { 47 | mi := &file_delta_proto_msgTypes[0] 48 | if x != nil { 49 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 50 | if ms.LoadMessageInfo() == nil { 51 | ms.StoreMessageInfo(mi) 52 | } 53 | return ms 54 | } 55 | return mi.MessageOf(x) 56 | } 57 | 58 | // Deprecated: Use Delta.ProtoReflect.Descriptor instead. 59 | func (*Delta) Descriptor() ([]byte, []int) { 60 | return file_delta_proto_rawDescGZIP(), []int{0} 61 | } 62 | 63 | func (x *Delta) GetElements() []*Element { 64 | if x != nil { 65 | return x.Elements 66 | } 67 | return nil 68 | } 69 | 70 | func (x *Delta) GetTombstones() []*Element { 71 | if x != nil { 72 | return x.Tombstones 73 | } 74 | return nil 75 | } 76 | 77 | func (x *Delta) GetPriority() uint64 { 78 | if x != nil { 79 | return x.Priority 80 | } 81 | return 0 82 | } 83 | 84 | type Element struct { 85 | state protoimpl.MessageState 86 | sizeCache protoimpl.SizeCache 87 | unknownFields protoimpl.UnknownFields 88 | 89 | // key+id must form a unique identifier 90 | Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` 91 | Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` 92 | Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` 93 | } 94 | 95 | func (x *Element) Reset() { 96 | *x = Element{} 97 | mi := &file_delta_proto_msgTypes[1] 98 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 99 | ms.StoreMessageInfo(mi) 100 | } 101 | 102 | func (x *Element) String() string { 103 | return protoimpl.X.MessageStringOf(x) 104 | } 105 | 106 | func (*Element) ProtoMessage() {} 107 | 108 | func (x *Element) ProtoReflect() protoreflect.Message { 109 | mi := &file_delta_proto_msgTypes[1] 110 | if x != nil { 111 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 112 | if ms.LoadMessageInfo() == nil { 113 | ms.StoreMessageInfo(mi) 114 | } 115 | return ms 116 | } 117 | return mi.MessageOf(x) 118 | } 119 | 120 | // Deprecated: Use Element.ProtoReflect.Descriptor instead. 121 | func (*Element) Descriptor() ([]byte, []int) { 122 | return file_delta_proto_rawDescGZIP(), []int{1} 123 | } 124 | 125 | func (x *Element) GetKey() string { 126 | if x != nil { 127 | return x.Key 128 | } 129 | return "" 130 | } 131 | 132 | func (x *Element) GetId() string { 133 | if x != nil { 134 | return x.Id 135 | } 136 | return "" 137 | } 138 | 139 | func (x *Element) GetValue() []byte { 140 | if x != nil { 141 | return x.Value 142 | } 143 | return nil 144 | } 145 | 146 | var File_delta_proto protoreflect.FileDescriptor 147 | 148 | var file_delta_proto_rawDesc = []byte{ 149 | 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 150 | 0x72, 0x64, 0x74, 0x2e, 0x70, 0x62, 0x22, 0x83, 0x01, 0x0a, 0x05, 0x44, 0x65, 0x6c, 0x74, 0x61, 151 | 0x12, 0x2c, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 152 | 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x72, 0x64, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6c, 0x65, 153 | 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 154 | 0x0a, 0x0a, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 155 | 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x72, 0x64, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6c, 0x65, 156 | 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x73, 157 | 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 158 | 0x28, 0x04, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x41, 0x0a, 0x07, 159 | 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 160 | 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 161 | 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 162 | 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 163 | 0x06, 0x5a, 0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 164 | } 165 | 166 | var ( 167 | file_delta_proto_rawDescOnce sync.Once 168 | file_delta_proto_rawDescData = file_delta_proto_rawDesc 169 | ) 170 | 171 | func file_delta_proto_rawDescGZIP() []byte { 172 | file_delta_proto_rawDescOnce.Do(func() { 173 | file_delta_proto_rawDescData = protoimpl.X.CompressGZIP(file_delta_proto_rawDescData) 174 | }) 175 | return file_delta_proto_rawDescData 176 | } 177 | 178 | var file_delta_proto_msgTypes = make([]protoimpl.MessageInfo, 2) 179 | var file_delta_proto_goTypes = []any{ 180 | (*Delta)(nil), // 0: crdt.pb.Delta 181 | (*Element)(nil), // 1: crdt.pb.Element 182 | } 183 | var file_delta_proto_depIdxs = []int32{ 184 | 1, // 0: crdt.pb.Delta.elements:type_name -> crdt.pb.Element 185 | 1, // 1: crdt.pb.Delta.tombstones:type_name -> crdt.pb.Element 186 | 2, // [2:2] is the sub-list for method output_type 187 | 2, // [2:2] is the sub-list for method input_type 188 | 2, // [2:2] is the sub-list for extension type_name 189 | 2, // [2:2] is the sub-list for extension extendee 190 | 0, // [0:2] is the sub-list for field type_name 191 | } 192 | 193 | func init() { file_delta_proto_init() } 194 | func file_delta_proto_init() { 195 | if File_delta_proto != nil { 196 | return 197 | } 198 | type x struct{} 199 | out := protoimpl.TypeBuilder{ 200 | File: protoimpl.DescBuilder{ 201 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 202 | RawDescriptor: file_delta_proto_rawDesc, 203 | NumEnums: 0, 204 | NumMessages: 2, 205 | NumExtensions: 0, 206 | NumServices: 0, 207 | }, 208 | GoTypes: file_delta_proto_goTypes, 209 | DependencyIndexes: file_delta_proto_depIdxs, 210 | MessageInfos: file_delta_proto_msgTypes, 211 | }.Build() 212 | File_delta_proto = out.File 213 | file_delta_proto_rawDesc = nil 214 | file_delta_proto_goTypes = nil 215 | file_delta_proto_depIdxs = nil 216 | } 217 | -------------------------------------------------------------------------------- /pb/delta.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package crdt.pb; 3 | 4 | option go_package = ".;pb"; 5 | 6 | message Delta { 7 | repeated Element elements = 1; 8 | repeated Element tombstones = 2; 9 | uint64 priority = 3; 10 | } 11 | 12 | message Element { 13 | // key+id must form a unique identifier 14 | string key = 1; 15 | string id = 2; 16 | bytes value = 3; 17 | } 18 | -------------------------------------------------------------------------------- /pb/generate.go: -------------------------------------------------------------------------------- 1 | // Package pb contains generated protobuf types 2 | //go:generate protoc -I=. --go_out=. delta.proto bcast.proto 3 | package pb 4 | -------------------------------------------------------------------------------- /pubsub_broadcaster.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | pubsub "github.com/libp2p/go-libp2p-pubsub" 8 | ) 9 | 10 | var _ Broadcaster = (*PubSubBroadcaster)(nil) 11 | 12 | // PubSubBroadcaster implements a Broadcaster using libp2p PubSub. 13 | type PubSubBroadcaster struct { 14 | ctx context.Context 15 | psub *pubsub.PubSub 16 | topic *pubsub.Topic 17 | subs *pubsub.Subscription 18 | } 19 | 20 | // NewPubSubBroadcaster returns a new broadcaster using the given PubSub and 21 | // a topic to subscribe/broadcast to. The given context can be used to cancel 22 | // the broadcaster. 23 | // Please register any topic validators before creating the Broadcaster. 24 | // 25 | // The broadcaster can be shut down by cancelling the given context. 26 | // This must be done before Closing the crdt.Datastore, otherwise things 27 | // may hang. 28 | func NewPubSubBroadcaster(ctx context.Context, psub *pubsub.PubSub, topic string) (*PubSubBroadcaster, error) { 29 | psubTopic, err := psub.Join(topic) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | subs, err := psubTopic.Subscribe() 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | go func(ctx context.Context, subs *pubsub.Subscription) { 40 | <-ctx.Done() 41 | subs.Cancel() 42 | // subs.Next returns error when subscription closed. Subscription must 43 | // be closed before psubTopic can be closed. 44 | var err error 45 | for err == nil { 46 | _, err = subs.Next(ctx) 47 | } 48 | psubTopic.Close() 49 | }(ctx, subs) 50 | 51 | return &PubSubBroadcaster{ 52 | ctx: ctx, 53 | psub: psub, 54 | topic: psubTopic, 55 | subs: subs, 56 | }, nil 57 | } 58 | 59 | // Broadcast publishes some data. 60 | func (pbc *PubSubBroadcaster) Broadcast(ctx context.Context, data []byte) error { 61 | return pbc.topic.Publish(ctx, data) 62 | } 63 | 64 | // Next returns published data. 65 | func (pbc *PubSubBroadcaster) Next(ctx context.Context) ([]byte, error) { 66 | var msg *pubsub.Message 67 | var err error 68 | 69 | select { 70 | case <-pbc.ctx.Done(): 71 | return nil, ErrNoMoreBroadcast 72 | case <-ctx.Done(): 73 | return nil, ErrNoMoreBroadcast 74 | default: 75 | } 76 | 77 | msg, err = pbc.subs.Next(ctx) 78 | if err != nil { 79 | if strings.Contains(err.Error(), "subscription cancelled") || 80 | strings.Contains(err.Error(), "context") { 81 | return nil, ErrNoMoreBroadcast 82 | } 83 | return nil, err 84 | } 85 | 86 | return msg.GetData(), nil 87 | } 88 | -------------------------------------------------------------------------------- /set.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/binary" 7 | "errors" 8 | "strings" 9 | "sync" 10 | 11 | dshelp "github.com/ipfs/boxo/datastore/dshelp" 12 | cid "github.com/ipfs/go-cid" 13 | pb "github.com/ipfs/go-ds-crdt/pb" 14 | ipld "github.com/ipfs/go-ipld-format" 15 | logging "github.com/ipfs/go-log/v2" 16 | multierr "go.uber.org/multierr" 17 | 18 | ds "github.com/ipfs/go-datastore" 19 | query "github.com/ipfs/go-datastore/query" 20 | ) 21 | 22 | var ( 23 | elemsNs = "s" // /elements namespace /set/s// 24 | tombsNs = "t" // /tombstones namespace /set/t// 25 | keysNs = "k" // /keys namespace /set/k//{v,p} 26 | valueSuffix = "v" // for /keys namespace 27 | prioritySuffix = "p" 28 | ) 29 | 30 | // set implements an Add-Wins Observed-Remove Set using delta-CRDTs 31 | // (https://arxiv.org/abs/1410.2803) and backing all the data in a 32 | // go-datastore. It is fully agnostic to MerkleCRDTs or the delta distribution 33 | // layer. It chooses the Value with most priority for a Key as the current 34 | // Value. When two values have the same priority, it chooses by alphabetically 35 | // sorting their unique IDs alphabetically. 36 | type set struct { 37 | store ds.Datastore 38 | dagService ipld.DAGService 39 | namespace ds.Key 40 | putHook func(key string, v []byte) 41 | deleteHook func(key string) 42 | logger logging.StandardLogger 43 | 44 | // Avoid merging two things at the same time since 45 | // we read-write value-priorities in a non-atomic way. 46 | putElemsMux sync.Mutex 47 | } 48 | 49 | func newCRDTSet( 50 | ctx context.Context, 51 | d ds.Datastore, 52 | namespace ds.Key, 53 | dagService ipld.DAGService, 54 | logger logging.StandardLogger, 55 | putHook func(key string, v []byte), 56 | deleteHook func(key string), 57 | ) (*set, error) { 58 | 59 | set := &set{ 60 | namespace: namespace, 61 | store: d, 62 | dagService: dagService, 63 | logger: logger, 64 | putHook: putHook, 65 | deleteHook: deleteHook, 66 | } 67 | 68 | return set, nil 69 | } 70 | 71 | // Add returns a new delta-set adding the given key/value. 72 | func (s *set) Add(ctx context.Context, key string, value []byte) *pb.Delta { 73 | return &pb.Delta{ 74 | Elements: []*pb.Element{ 75 | { 76 | Key: key, 77 | Value: value, 78 | }, 79 | }, 80 | Tombstones: nil, 81 | } 82 | } 83 | 84 | // Rmv returns a new delta-set removing the given key. 85 | func (s *set) Rmv(ctx context.Context, key string) (*pb.Delta, error) { 86 | delta := &pb.Delta{} 87 | 88 | // /namespace//elements 89 | prefix := s.elemsPrefix(key) 90 | q := query.Query{ 91 | Prefix: prefix.String(), 92 | KeysOnly: true, 93 | } 94 | 95 | results, err := s.store.Query(ctx, q) 96 | if err != nil { 97 | return nil, err 98 | } 99 | defer results.Close() 100 | 101 | for r := range results.Next() { 102 | if r.Error != nil { 103 | return nil, r.Error 104 | } 105 | id := strings.TrimPrefix(r.Key, prefix.String()) 106 | if !ds.RawKey(id).IsTopLevel() { 107 | // our prefix matches blocks from other keys i.e. our 108 | // prefix is "hello" and we have a different key like 109 | // "hello/bye" so we have a block id like 110 | // "bye/". If we got the right key, then the id 111 | // should be the block id only. 112 | continue 113 | } 114 | 115 | // check if its already tombed, which case don't add it to the 116 | // Rmv delta set. 117 | deleted, err := s.inTombsKeyID(ctx, key, id) 118 | if err != nil { 119 | return nil, err 120 | } 121 | if !deleted { 122 | delta.Tombstones = append(delta.Tombstones, &pb.Element{ 123 | Key: key, 124 | Id: id, 125 | }) 126 | } 127 | } 128 | return delta, nil 129 | } 130 | 131 | // Element retrieves the value of an element from the CRDT set. 132 | func (s *set) Element(ctx context.Context, key string) ([]byte, error) { 133 | // We can only GET an element if it's part of the Set (in 134 | // "elements" and not in "tombstones"). 135 | 136 | // * If the key has a value in the store it means that it has been 137 | // written and is alive. putTombs will delete the value if all elems 138 | // are tombstoned, or leave the best one. 139 | 140 | valueK := s.valueKey(key) 141 | value, err := s.store.Get(ctx, valueK) 142 | if err != nil { // not found is fine, we just return it 143 | return value, err 144 | } 145 | return value, nil 146 | } 147 | 148 | // Elements returns all the elements in the set. 149 | func (s *set) Elements(ctx context.Context, q query.Query) (query.Results, error) { 150 | // This will cleanup user the query prefix first. 151 | // This makes sure the use of things like "/../" in the query 152 | // does not affect our setQuery. 153 | srcQueryPrefixKey := ds.NewKey(q.Prefix) 154 | 155 | keyNamespacePrefix := s.keyPrefix(keysNs) 156 | keyNamespacePrefixStr := keyNamespacePrefix.String() 157 | setQueryPrefix := keyNamespacePrefix.Child(srcQueryPrefixKey).String() 158 | vSuffix := "/" + valueSuffix 159 | 160 | // We are going to be reading everything in the /set/ namespace which 161 | // will return items in the form: 162 | // * /set//value 163 | // * /set/priority (a Uvarint) 164 | 165 | // It is clear that KeysOnly=true should be used here when the original 166 | // query only wants keys. 167 | // 168 | // However, there is a question of what is best when the original 169 | // query wants also values: 170 | // * KeysOnly: true avoids reading all the priority key values 171 | // which are skipped at the cost of doing a separate Get() for the 172 | // values (50% of the keys). 173 | // * KeysOnly: false reads everything from the start. Priorities 174 | // and tombstoned values are read for nothing 175 | // 176 | // In-mem benchmarking shows no clear winner. Badger docs say that 177 | // KeysOnly "is several order of magnitudes faster than regular 178 | // iteration". Contrary to my original feeling, however, live testing 179 | // with a 50GB badger with millions of keys shows more speed when 180 | // querying with value. It may be that speed is fully affected by the 181 | // current state of table compaction as well. 182 | setQuery := query.Query{ 183 | Prefix: setQueryPrefix, 184 | KeysOnly: false, 185 | } 186 | 187 | // send the result and returns false if we must exit. 188 | sendResult := func(ctx, qctx context.Context, r query.Result, out chan<- query.Result) bool { 189 | select { 190 | case out <- r: 191 | case <-ctx.Done(): 192 | return false 193 | case <-qctx.Done(): 194 | return false 195 | } 196 | return r.Error == nil 197 | } 198 | 199 | // The code below was very inspired in the Query implementation in 200 | // flatfs. 201 | 202 | // Originally we were able to set the output channel capacity and it 203 | // was set to 128 even though not much difference to 1 could be 204 | // observed on mem-based testing. 205 | 206 | // Using KeysOnly still gives a 128-item channel. 207 | // See: https://github.com/ipfs/go-datastore/issues/40 208 | r := query.ResultsWithContext(q, func(qctx context.Context, out chan<- query.Result) { 209 | // qctx is a Background context for the query. It is not 210 | // associated to ctx. It is closed when this function finishes 211 | // along with the output channel, or when the Results are 212 | // Closed directly. 213 | results, err := s.store.Query(ctx, setQuery) 214 | if err != nil { 215 | sendResult(ctx, qctx, query.Result{Error: err}, out) 216 | return 217 | } 218 | defer results.Close() 219 | 220 | var entry query.Entry 221 | for r := range results.Next() { 222 | if r.Error != nil { 223 | sendResult(ctx, qctx, query.Result{Error: r.Error}, out) 224 | return 225 | } 226 | 227 | // We will be getting keys in the form of 228 | // /namespace/keys//v and /namespace/keys//p 229 | // We discard anything not ending in /v and sanitize 230 | // those from: 231 | // /namespace/keys//v -> 232 | if !strings.HasSuffix(r.Key, vSuffix) { // "/v" 233 | continue 234 | } 235 | 236 | key := strings.TrimSuffix( 237 | strings.TrimPrefix(r.Key, keyNamespacePrefixStr), 238 | "/"+valueSuffix, 239 | ) 240 | 241 | entry.Key = key 242 | entry.Value = r.Value 243 | entry.Size = r.Size 244 | entry.Expiration = r.Expiration 245 | 246 | // The fact that /v is set means it is not tombstoned, 247 | // as tombstoning removes /v and /p or sets them to 248 | // the best value. 249 | 250 | if q.KeysOnly { 251 | entry.Size = -1 252 | entry.Value = nil 253 | } 254 | if !sendResult(ctx, qctx, query.Result{Entry: entry}, out) { 255 | return 256 | } 257 | } 258 | }) 259 | 260 | return r, nil 261 | } 262 | 263 | // InSet returns true if the key belongs to one of the elements in the "elems" 264 | // set, and this element is not tombstoned. 265 | func (s *set) InSet(ctx context.Context, key string) (bool, error) { 266 | // If we do not have a value this key was never added or it was fully 267 | // tombstoned. 268 | valueK := s.valueKey(key) 269 | return s.store.Has(ctx, valueK) 270 | } 271 | 272 | // /namespace/ 273 | func (s *set) keyPrefix(key string) ds.Key { 274 | return s.namespace.ChildString(key) 275 | } 276 | 277 | // /namespace/elems/ 278 | func (s *set) elemsPrefix(key string) ds.Key { 279 | return s.keyPrefix(elemsNs).ChildString(key) 280 | } 281 | 282 | // /namespace/tombs/ 283 | func (s *set) tombsPrefix(key string) ds.Key { 284 | return s.keyPrefix(tombsNs).ChildString(key) 285 | } 286 | 287 | // /namespace/keys//value 288 | func (s *set) valueKey(key string) ds.Key { 289 | return s.keyPrefix(keysNs).ChildString(key).ChildString(valueSuffix) 290 | } 291 | 292 | // /namespace/keys//priority 293 | func (s *set) priorityKey(key string) ds.Key { 294 | return s.keyPrefix(keysNs).ChildString(key).ChildString(prioritySuffix) 295 | } 296 | 297 | func (s *set) getPriority(ctx context.Context, key string) (uint64, error) { 298 | prioK := s.priorityKey(key) 299 | data, err := s.store.Get(ctx, prioK) 300 | if err != nil { 301 | if err == ds.ErrNotFound { 302 | return 0, nil 303 | } 304 | return 0, err 305 | } 306 | 307 | prio, n := binary.Uvarint(data) 308 | if n <= 0 { 309 | return prio, errors.New("error decoding priority") 310 | } 311 | return prio - 1, nil 312 | } 313 | 314 | func (s *set) setPriority(ctx context.Context, writeStore ds.Write, key string, prio uint64) error { 315 | prioK := s.priorityKey(key) 316 | buf := make([]byte, binary.MaxVarintLen64) 317 | n := binary.PutUvarint(buf, prio+1) 318 | if n == 0 { 319 | return errors.New("error encoding priority") 320 | } 321 | 322 | return writeStore.Put(ctx, prioK, buf[0:n]) 323 | } 324 | 325 | // sets a value if priority is higher. When equal, it sets if the 326 | // value is lexicographically higher than the current value. 327 | func (s *set) setValue(ctx context.Context, writeStore ds.Write, key, id string, value []byte, prio uint64) error { 328 | // If this key was tombstoned already, do not store/update the value. 329 | deleted, err := s.inTombsKeyID(ctx, key, id) 330 | if err != nil || deleted { 331 | return err 332 | } 333 | 334 | curPrio, err := s.getPriority(ctx, key) 335 | if err != nil { 336 | return err 337 | } 338 | 339 | if prio < curPrio { 340 | return nil 341 | } 342 | valueK := s.valueKey(key) 343 | 344 | if prio == curPrio { 345 | curValue, _ := s.store.Get(ctx, valueK) 346 | // new value greater than old 347 | if bytes.Compare(curValue, value) >= 0 { 348 | return nil 349 | } 350 | } 351 | 352 | // store value 353 | err = writeStore.Put(ctx, valueK, value) 354 | if err != nil { 355 | return err 356 | } 357 | 358 | // store priority 359 | err = s.setPriority(ctx, writeStore, key, prio) 360 | if err != nil { 361 | return err 362 | } 363 | 364 | // trigger add hook 365 | s.putHook(key, value) 366 | return nil 367 | } 368 | 369 | // findBestValue looks for all entries for the given key, figures out their 370 | // priority from their delta (skipping the blocks by the given pendingTombIDs) 371 | // and returns the value with the highest priority that is not tombstoned nor 372 | // about to be tombstoned. 373 | func (s *set) findBestValue(ctx context.Context, key string, pendingTombIDs []string) ([]byte, uint64, error) { 374 | // /namespace/elems/ 375 | prefix := s.elemsPrefix(key) 376 | q := query.Query{ 377 | Prefix: prefix.String(), 378 | KeysOnly: true, 379 | } 380 | 381 | results, err := s.store.Query(ctx, q) 382 | if err != nil { 383 | return nil, 0, err 384 | } 385 | defer results.Close() 386 | 387 | var bestValue []byte 388 | var bestPriority uint64 389 | var deltaCid cid.Cid 390 | ng := crdtNodeGetter{NodeGetter: s.dagService} 391 | 392 | // range all the /namespace/elems//. 393 | NEXT: 394 | for r := range results.Next() { 395 | if r.Error != nil { 396 | return nil, 0, err 397 | } 398 | 399 | id := strings.TrimPrefix(r.Key, prefix.String()) 400 | if !ds.RawKey(id).IsTopLevel() { 401 | // our prefix matches blocks from other keys i.e. our 402 | // prefix is "hello" and we have a different key like 403 | // "hello/bye" so we have a block id like 404 | // "bye/". If we got the right key, then the id 405 | // should be the block id only. 406 | continue 407 | } 408 | // if block is one of the pending tombIDs, continue 409 | for _, tombID := range pendingTombIDs { 410 | if tombID == id { 411 | continue NEXT 412 | } 413 | } 414 | 415 | // if tombstoned, continue 416 | inTomb, err := s.inTombsKeyID(ctx, key, id) 417 | if err != nil { 418 | return nil, 0, err 419 | } 420 | if inTomb { 421 | continue 422 | } 423 | 424 | // get the block 425 | mhash, err := dshelp.DsKeyToMultihash(ds.NewKey(id)) 426 | if err != nil { 427 | return nil, 0, err 428 | } 429 | deltaCid = cid.NewCidV1(cid.DagProtobuf, mhash) 430 | _, delta, err := ng.GetDelta(ctx, deltaCid) 431 | if err != nil { 432 | return nil, 0, err 433 | } 434 | 435 | // discard this delta. 436 | if delta.Priority < bestPriority { 437 | continue 438 | } 439 | 440 | // When equal priority, choose the greatest among values in 441 | // the delta and current. When higher priority, choose the 442 | // greatest only among those in the delta. 443 | var greatestValueInDelta []byte 444 | for _, elem := range delta.GetElements() { 445 | if elem.GetKey() != key { 446 | continue 447 | } 448 | v := elem.GetValue() 449 | if bytes.Compare(greatestValueInDelta, v) < 0 { 450 | greatestValueInDelta = v 451 | } 452 | } 453 | 454 | if delta.Priority > bestPriority { 455 | bestValue = greatestValueInDelta 456 | bestPriority = delta.Priority 457 | continue 458 | } 459 | 460 | // equal priority 461 | if bytes.Compare(bestValue, greatestValueInDelta) < 0 { 462 | bestValue = greatestValueInDelta 463 | } 464 | } 465 | 466 | return bestValue, bestPriority, nil 467 | } 468 | 469 | // putElems adds items to the "elems" set. It will also set current 470 | // values and priorities for each element. This needs to run in a lock, 471 | // as otherwise races may occur when reading/writing the priorities, resulting 472 | // in bad behaviours. 473 | // 474 | // Technically the lock should only affect the keys that are being written, 475 | // but with the batching optimization the locks would need to be hold until 476 | // the batch is written), and one lock per key might be way worse than a single 477 | // global lock in the end. 478 | func (s *set) putElems(ctx context.Context, elems []*pb.Element, id string, prio uint64) error { 479 | s.putElemsMux.Lock() 480 | defer s.putElemsMux.Unlock() 481 | 482 | if len(elems) == 0 { 483 | return nil 484 | } 485 | 486 | var store ds.Write = s.store 487 | var err error 488 | batchingDs, batching := store.(ds.Batching) 489 | if batching { 490 | store, err = batchingDs.Batch(ctx) 491 | if err != nil { 492 | return err 493 | } 494 | } 495 | 496 | for _, e := range elems { 497 | e.Id = id // overwrite the identifier as it would come unset 498 | key := e.GetKey() 499 | // /namespace/elems// 500 | k := s.elemsPrefix(key).ChildString(id) 501 | err := store.Put(ctx, k, nil) 502 | if err != nil { 503 | return err 504 | } 505 | 506 | // update the value if applicable: 507 | // * higher priority than we currently have. 508 | // * not tombstoned before. 509 | err = s.setValue(ctx, store, key, id, e.GetValue(), prio) 510 | if err != nil { 511 | return err 512 | } 513 | } 514 | 515 | if batching { 516 | err := store.(ds.Batch).Commit(ctx) 517 | if err != nil { 518 | return err 519 | } 520 | } 521 | return nil 522 | } 523 | 524 | func (s *set) putTombs(ctx context.Context, tombs []*pb.Element) error { 525 | if len(tombs) == 0 { 526 | return nil 527 | } 528 | 529 | var store ds.Write = s.store 530 | var err error 531 | batchingDs, batching := store.(ds.Batching) 532 | if batching { 533 | store, err = batchingDs.Batch(ctx) 534 | if err != nil { 535 | return err 536 | } 537 | } 538 | 539 | // key -> tombstonedBlockID. Carries the tombstoned blocks for each 540 | // element in this delta. 541 | deletedElems := make(map[string][]string) 542 | 543 | for _, e := range tombs { 544 | // /namespace/tombs// 545 | key := e.GetKey() 546 | id := e.GetId() 547 | valueK := s.valueKey(key) 548 | deletedElems[key] = append(deletedElems[key], id) 549 | 550 | // Find best value for element that we are going to delete 551 | v, p, err := s.findBestValue(ctx, key, deletedElems[key]) 552 | if err != nil { 553 | return err 554 | } 555 | if v == nil { 556 | store.Delete(ctx, valueK) 557 | store.Delete(ctx, s.priorityKey(key)) 558 | } else { 559 | store.Put(ctx, valueK, v) 560 | s.setPriority(ctx, store, key, p) 561 | } 562 | 563 | // Write tomb into store. 564 | k := s.tombsPrefix(key).ChildString(id) 565 | err = store.Put(ctx, k, nil) 566 | if err != nil { 567 | return err 568 | } 569 | } 570 | 571 | if batching { 572 | err := store.(ds.Batch).Commit(ctx) 573 | if err != nil { 574 | return err 575 | } 576 | } 577 | 578 | // run delete hook only once for all versions of the same element 579 | // tombstoned in this delta. Note it may be that the element was not 580 | // fully deleted and only a different value took its place. 581 | for del := range deletedElems { 582 | s.deleteHook(del) 583 | } 584 | 585 | return nil 586 | } 587 | 588 | func (s *set) Merge(ctx context.Context, d *pb.Delta, id string) error { 589 | err := s.putTombs(ctx, d.GetTombstones()) 590 | if err != nil { 591 | return err 592 | } 593 | 594 | return s.putElems(ctx, d.GetElements(), id, d.GetPriority()) 595 | } 596 | 597 | // currently unused 598 | // func (s *set) inElemsKeyID(key, id string) (bool, error) { 599 | // k := s.elemsPrefix(key).ChildString(id) 600 | // return s.store.Has(k) 601 | // } 602 | 603 | func (s *set) inTombsKeyID(ctx context.Context, key, id string) (bool, error) { 604 | k := s.tombsPrefix(key).ChildString(id) 605 | return s.store.Has(ctx, k) 606 | } 607 | 608 | // currently unused 609 | // // inSet returns if the given cid/block is in elems and not in tombs (and 610 | // // thus, it is an element of the set). 611 | // func (s *set) inSetKeyID(key, id string) (bool, error) { 612 | // inTombs, err := s.inTombsKeyID(key, id) 613 | // if err != nil { 614 | // return false, err 615 | // } 616 | // if inTombs { 617 | // return false, nil 618 | // } 619 | 620 | // return s.inElemsKeyID(key, id) 621 | // } 622 | 623 | // perform a sync against all the paths associated with a key prefix 624 | func (s *set) datastoreSync(ctx context.Context, prefix ds.Key) error { 625 | prefixStr := prefix.String() 626 | toSync := []ds.Key{ 627 | s.elemsPrefix(prefixStr), 628 | s.tombsPrefix(prefixStr), 629 | s.keyPrefix(keysNs).Child(prefix), // covers values and priorities 630 | } 631 | 632 | errs := make([]error, len(toSync)) 633 | 634 | for i, k := range toSync { 635 | if err := s.store.Sync(ctx, k); err != nil { 636 | errs[i] = err 637 | } 638 | } 639 | 640 | return multierr.Combine(errs...) 641 | } 642 | --------------------------------------------------------------------------------