├── .eslintrc.json ├── .gitignore ├── .travis.yml ├── ARCHITECTURE.md ├── LICENSE ├── README.md ├── example.js ├── index.js ├── lib ├── derive.js ├── differ.js ├── get.js ├── hash.js ├── history.js ├── iterator.js ├── key-history.js ├── messages.js ├── normalize.js ├── options.js ├── put.js ├── trie-encoding.js └── watch.js ├── package.json ├── schema.proto └── test ├── auth.js ├── autogenerated.js ├── basic.js ├── collisions.js ├── content-feeds.js ├── corruption.js ├── deletes.js ├── diff.js ├── fuzzing.js ├── helpers ├── create.js ├── fuzzing.js ├── put.js ├── replicate.js └── run.js ├── history.js ├── hooks.js ├── iterator-order.js ├── iterator.js ├── key-history.js ├── read-stream.js ├── reopen-and-write.js ├── replicate.js ├── trie-encoding.js └── watch.js /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { "extends": ["standard"] } 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | sandbox 3 | db 4 | *.db 5 | *.log 6 | NOTES.txt 7 | package-lock.json 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | sudo: false 3 | node_js: 4 | - 4 5 | - 6 6 | - 8 7 | - 10 8 | -------------------------------------------------------------------------------- /ARCHITECTURE.md: -------------------------------------------------------------------------------- 1 | # HyperDB Architecture 2 | 3 | HyperDB is a scalable peer-to-peer key-value database. 4 | 5 | ## Filesystem metaphor 6 | 7 | HyperDB is structured to be used much like a traditional hierarchical 8 | filesystem. A value can be written and read at locations like `/foo/bar/baz`, 9 | and the API supports querying or tracking values at subpaths, like how watching 10 | for changes on `/foo/bar` will report both changes to `/foo/bar/baz` and also 11 | `/foo/bar/19`. 12 | 13 | ## Set of append-only logs (feeds) 14 | 15 | A HyperDB is fundamentally a set of 16 | [hypercore](https://github.com/mafintosh/hypercore)s. A *hypercore* is a secure 17 | append-only log that is identified by a public key, and can only be written to 18 | by the holder of the corresponding private key. Because it is append-only, old 19 | values cannot be deleted nor modified. Because it is secure, a feed can be 20 | downloaded from even untrustworthy peers and verified to be accurate. Any 21 | modifications (malicious or otherwise) to the original feed data by someone 22 | other than the author can be readily detected. 23 | 24 | Each entry in a hypercore has a *sequence number*, that increments by 1 with 25 | each write, starting at 0 (`seq=0`). 26 | 27 | HyperDB builds its hierarchical key-value store on top of these hypercore feeds, 28 | and also provides facilities for authorization, and replication of those member 29 | hypercores. 30 | 31 | ### Directed acyclic graph 32 | 33 | The combination of all operations performed on a HyperDB by all of its members 34 | forms a DAG (*directed acyclic graph*). Each write to the database (setting a 35 | key to a value) includes information to point backward at all of the known 36 | "heads" in the graph. 37 | 38 | To illustrate what this means, let's say Alice starts a new HyperDB and writes 2 39 | values to it: 40 | 41 | ``` 42 | // Feed 43 | 44 | 0 (/foo/bar = 'baz') 45 | 1 (/foo/2 = '{ "some": "json" }') 46 | 47 | 48 | // Graph 49 | 50 | Alice: 0 <--- 1 51 | ``` 52 | 53 | Where sequence number 1 (the second entry) refers to sequence number 0 on the 54 | same feed (Alice's). 55 | 56 | Now Alice *authorizes* Bob to write to the HyperDB. Internally, this means Alice 57 | writes a special message to her feed saying that Bob's feed (identified by his 58 | public key) should be read and replicated in by other participants. Her feed 59 | becomes 60 | 61 | ``` 62 | // Feed 63 | 64 | 0 (/foo/bar = 'baz') 65 | 1 (/foo/2 = '{ "some": "json" }') 66 | 2 ('' = '') 67 | 68 | 69 | // Graph 70 | 71 | Alice: 0 <--- 1 <--- 2 72 | ``` 73 | 74 | Authorization is formatted internally in a special way so that it isn't 75 | interpreted as a key/value pair. 76 | 77 | Now Bob writes a value to his feed, and then Alice and Bob sync. The result is: 78 | 79 | ``` 80 | // Feed 81 | 82 | //// Alice 83 | 0 (/foo/bar = 'baz') 84 | 1 (/foo/2 = '{ "some": "json" }') 85 | 2 ('' = '') 86 | 87 | //// Bob 88 | 0 (/a/b = '12') 89 | 90 | 91 | // Graph 92 | 93 | Alice: 0 <--- 1 <--- 2 94 | Bob : 0 95 | ``` 96 | 97 | Notice that none of Alice's entries refer to Bob's, and vice versa. This is 98 | because neither has written any entries to their feeds since the two became 99 | aware of each other (authorized & replicated each other's feeds). 100 | 101 | Right now there are two "heads" of the graph: Alice's feed at seq 2, and Bob's 102 | feed at seq 0. 103 | 104 | Next, Alice writes a new value, and her latest entry will refer to Bob's: 105 | 106 | ``` 107 | // Feed 108 | 109 | //// Alice 110 | 0 (/foo/bar = 'baz') 111 | 1 (/foo/2 = '{ "some": "json" }') 112 | 2 ('' = '') 113 | 3 (/foo/hup = 'beep') 114 | 115 | //// Bob 116 | 0 (/a/b = '12') 117 | 118 | 119 | // Graph 120 | 121 | Alice: 0 <--- 1 <--- 2 <--/ 3 122 | Bob : 0 <-------------------/ 123 | ``` 124 | 125 | Because Alice's latest feed entry refers to Bob's latest feed entry, there is 126 | now only one "head" in the database. That means there is enough information in 127 | Alice's seq=3 entry to find any other key in the database. In the last example, 128 | there were two heads (Alice's seq=2 and Bob's seq=0); both of which would need 129 | to be read internally in order to locate any key in the database. 130 | 131 | Now there is only one "head": Alice's feed at seq 3. 132 | 133 | ## Authorization 134 | 135 | The set of hypercores are *authorized* in that the original author of the first 136 | hypercore in a hyperdb must explicitly denote in their append-only log that the 137 | public key of a new hypercore is permitted to edit the database. Any authorized 138 | member may authorize more members. There is no revocation or other author 139 | management elements currently. 140 | 141 | ## Incremental index 142 | 143 | HyperDB builds an *incremental index* with every new key/value pairs ("nodes") 144 | written. This means a separate data structure doesn't need to be maintained 145 | elsewhere for fast writes and lookups: each node written has enough information 146 | to look up any other key quickly and otherwise navigate the database. 147 | 148 | Each node stores the following basic information: 149 | 150 | - `key`: the key that is being created or modified. e.g. `/home/sww/dev.md` 151 | - `value`: the value stored at that key. 152 | - `seq`: the sequence number of this entry in the owner's hypercore. 0 is the 153 | first, 1 the second, and so forth. 154 | - `feed`: the ID of the hypercore writer that wrote this 155 | - `path`: a 2-bit hash sequence of the key's components 156 | - `trie`: a navigation structure used with `path` to find a desired key 157 | - `clock`: vector clock to determine node insertion causality 158 | - `feeds`: an array of { feedKey, seq } for decoding a `clock` 159 | 160 | ### Vector clock 161 | 162 | Each node stores a [vector clock](https://en.wikipedia.org/wiki/Vector_clock) of 163 | the last known sequence number from each feed it knows about. This is what forms 164 | the DAG structure. 165 | 166 | A vector clock on a node of, say, `[0, 2, 5]` means: 167 | 168 | - when this node was written, the largest seq # in my local fed is 0 169 | - when this node was written, the largest seq # in the second feed I have is 2 170 | - when this node was written, the largest seq # in the third feed I have is 5 171 | 172 | For example, Bob's vector clock for Alice's seq=3 entry above would be `[0, 3]` 173 | since he knows of her latest entry (seq=3) and his own (seq=0). 174 | 175 | The vector clock is used for correctly traversing history. This is necessary for 176 | the `db#heads` API as well as `db#createHistoryStream`. 177 | 178 | ### Prefix trie 179 | 180 | Given a HyperDB with hundreds of entries, how can a key like `/a/b/c` be looked 181 | up quickly? 182 | 183 | Each node stores a *prefix [trie](https://en.wikipedia.org/wiki/Trie)* that 184 | assists with finding the shortest path to the desired key. 185 | 186 | When a node is written, its *prefix hash* is computed. This done by first 187 | splitting the key into its components (`a`, `b`, and `c` for `/a/b/c`), and then 188 | hashing each component into a 32-character hash, where one character is a 2-bit 189 | value (0, 1, 2, or 3). The `prefix` hash for `/a/b/c` is 190 | 191 | ```js 192 | node.path = [ 193 | 1, 2, 0, 1, 2, 0, 2, 2, 3, 0, 1, 2, 1, 3, 0, 3, 0, 0, 2, 1, 0, 2, 0, 0, 2, 0, 0, 3, 2, 1, 1, 2, 194 | 0, 1, 2, 3, 2, 2, 2, 0, 3, 1, 1, 3, 0, 3, 1, 3, 0, 1, 0, 1, 3, 2, 0, 2, 2, 3, 2, 2, 3, 3, 2, 3, 195 | 0, 1, 1, 0, 1, 2, 3, 2, 2, 2, 0, 0, 3, 1, 2, 1, 3, 3, 3, 3, 3, 3, 0, 3, 3, 2, 3, 2, 3, 0, 1, 0, 196 | 4 ] 197 | ``` 198 | 199 | Each component is divided by a newline. `4` is a special value indicating the 200 | end of the prefix. 201 | 202 | #### Example 203 | 204 | Consider a fresh HyperDB. We write `/a/b = 24` and get back this node: 205 | 206 | ```js 207 | { key: '/a/b', 208 | value: '24', 209 | clock: [ 0 ], 210 | trie: [], 211 | feeds: [ [Object] ], 212 | feedSeq: 0, 213 | feed: 0, 214 | seq: 0, 215 | path: 216 | [ 1, 2, 0, 1, 2, 0, 2, 2, 3, 0, 1, 2, 1, 3, 0, 3, 0, 0, 2, 1, 0, 2, 0, 0, 2, 0, 0, 3, 2, 1, 1, 2, 217 | 0, 1, 2, 3, 2, 2, 2, 0, 3, 1, 1, 3, 0, 3, 1, 3, 0, 1, 0, 1, 3, 2, 0, 2, 2, 3, 2, 2, 3, 3, 2, 3, 218 | 4 ] } 219 | ``` 220 | 221 | If you compare this path to the one for `/a/b/c` above, you'll see that the 222 | first 64 2-bit characters match. This is because `/a/b` is a prefix of `/a/b/c`. 223 | 224 | Since this is the first entry, `seq` is 0. Since this is the only known feed, 225 | `feed` is also 0. `feeds` is an array of entries of the form `{ key: Buffer, 226 | seq: Number }` that let you map the numeric value `feed` to a hypercore key and 227 | its sequence number head. `feeds` isn't always set: it only gets included when 228 | it changes compared to `node.seq - 1`, in the interest of storing less data per 229 | node. 230 | 231 | Now we write `/a/c = hello` and get this node: 232 | 233 | ```js 234 | { key: '/a/c', 235 | value: 'hello', 236 | clock: [ 0 ], 237 | trie: [ , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , [ , , [ { feed: 0, seq: 0 } ] ] ], 238 | feeds: [], 239 | feedSeq: 0, 240 | feed: 0, 241 | seq: 1, 242 | path: 243 | [ 1, 2, 0, 1, 2, 0, 2, 2, 3, 0, 1, 2, 1, 3, 0, 3, 0, 0, 2, 1, 0, 2, 0, 0, 2, 0, 0, 3, 2, 1, 1, 2, 244 | 0, 1, 1, 0, 1, 2, 3, 2, 2, 2, 0, 0, 3, 1, 2, 1, 3, 3, 3, 3, 3, 3, 0, 3, 3, 2, 3, 2, 3, 0, 1, 0, 245 | 4 ] } 246 | ``` 247 | 248 | As expected, this node has the same `feed` value as before (since we're only 249 | writing to one feed). Its `seq` is 1, since the last was 0. Notice that `feeds` 250 | isn't included, because the mapping of the numeric `feed` value to a key hasn't 251 | changed. 252 | 253 | Also, this and the previous node have the first 32 characters of their `path` in 254 | common (the prefix `/a`). 255 | 256 | Notice though that `trie` is set. It's a long but sparse array. It has 35 257 | entries, with the last one referencing the first node inserted (`a/b/`). Why? 258 | 259 | (If it wasn't stored as a sparse array, you'd actually see 64 entries (the 260 | length of the `path`). But since the other 29 entries are also empty, hyperdb 261 | doesn't bother allocating them.) 262 | 263 | If you visually compare this node's `path` with the previous node's `path`, how 264 | many entries do they have in common? At which entry do the 2-bit numbers 265 | diverge? 266 | 267 | At the 35th entry. 268 | 269 | What this is saying is "if the hash of the key you're looking for differs from 270 | mine on the 35th entry, you want to travel to `{ feed: 0, seq: 0 }` to find the 271 | node you're looking for. 272 | 273 | This is how finding a node works, starting at any other node: 274 | 275 | 1. Compute the 2-bit hash sequence of the key you're after (e.g. `a/b`) 276 | 2. Lookup the newest entry in the feed. 277 | 3. Compare its `path` against the hash you just computed. 278 | 4. If you discover that the `path` and your hash match, then this is the node 279 | you're looking for! 280 | 5. Otherwise, once a 2-bit character from `path` and your hash disagree, note 281 | the index # where they differ and look up that value in the node's `trie`. 282 | Fetch that node at the given feed and sequence number, and go back to step 3. 283 | Repeat until you reach step 4 (match) or there is no entry in the node's trie 284 | for the key you're after (no match). 285 | 286 | What if there are multiple feeds in the HyperDB? The lookup algorithm changes 287 | slightly. Replace the above step 2 for: 288 | 289 | > 2. Fetch the latest entry from *every* feed. For each head node, proceed to 290 | > the next step. 291 | 292 | The other steps are the same as before. 293 | 294 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Mathias Buus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Moved, see https://github.com/holepunchto/hyperdb. 2 | -------------------------------------------------------------------------------- /example.js: -------------------------------------------------------------------------------- 1 | var hyperdb = require('./') 2 | 3 | var db = hyperdb('./my.db', { 4 | valueEncoding: 'utf-8', 5 | reduce: (a, b) => a 6 | }) 7 | 8 | db.put('/hello', 'world', function (err) { 9 | if (err) throw err 10 | db.get('/hello', function (err, node) { 11 | if (err) throw err 12 | console.log('/hello --> ' + node.value) 13 | }) 14 | }) 15 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | var hypercore = require('hypercore') 2 | var protocol = require('hypercore-protocol') 3 | var thunky = require('thunky') 4 | var remove = require('unordered-array-remove') 5 | var toStream = require('nanoiterator/to-stream') 6 | var varint = require('varint') 7 | var mutexify = require('mutexify') 8 | var codecs = require('codecs') 9 | var raf = require('random-access-file') 10 | var path = require('path') 11 | var util = require('util') 12 | var bulk = require('bulk-write-stream') 13 | var events = require('events') 14 | var sodium = require('sodium-universal') 15 | var alru = require('array-lru') 16 | var inherits = require('inherits') 17 | var hash = require('./lib/hash') 18 | var iterator = require('./lib/iterator') 19 | var differ = require('./lib/differ') 20 | var history = require('./lib/history') 21 | var keyHistory = require('./lib/key-history') 22 | var get = require('./lib/get') 23 | var put = require('./lib/put') 24 | var messages = require('./lib/messages') 25 | var trie = require('./lib/trie-encoding') 26 | var watch = require('./lib/watch') 27 | var normalizeKey = require('./lib/normalize') 28 | var derive = require('./lib/derive') 29 | 30 | module.exports = HyperDB 31 | 32 | function HyperDB (storage, key, opts) { 33 | if (!(this instanceof HyperDB)) return new HyperDB(storage, key, opts) 34 | events.EventEmitter.call(this) 35 | 36 | if (isOptions(key)) { 37 | opts = key 38 | key = null 39 | } 40 | 41 | opts = Object.assign({}, opts) 42 | if (opts.firstNode) opts.reduce = reduceFirst 43 | 44 | var checkout = opts.checkout 45 | 46 | this.key = typeof key === 'string' ? Buffer.from(key, 'hex') : key 47 | this.discoveryKey = this.key ? hypercore.discoveryKey(this.key) : null 48 | this.source = checkout ? checkout.source : null 49 | this.local = checkout ? checkout.local : null 50 | this.localContent = checkout ? checkout.localContent : null 51 | this.feeds = checkout ? checkout.feeds : [] 52 | this.contentFeeds = checkout ? checkout.contentFeeds : (opts.contentFeed ? [] : null) 53 | this.ready = thunky(this._ready.bind(this)) 54 | this.opened = false 55 | this.sparse = !!opts.sparse 56 | this.sparseContent = opts.sparseContent !== undefined ? !!opts.sparseContent : this.sparse 57 | this.id = Buffer.alloc(32) 58 | sodium.randombytes_buf(this.id) 59 | 60 | this._storage = createStorage(storage) 61 | this._contentStorage = typeof opts.contentFeed === 'function' 62 | ? opts.contentFeed 63 | : opts.contentFeed ? this._storage : null 64 | this._writers = checkout ? checkout._writers : [] 65 | this._watching = checkout ? checkout._watching : [] 66 | this._replicating = [] 67 | this._localWriter = null 68 | this._byKey = new Map() 69 | this._heads = opts.heads || null 70 | this._version = opts.version || null 71 | this._checkout = checkout || null 72 | this._lock = mutexify() 73 | this._map = opts.map || null 74 | this._reduce = opts.reduce || null 75 | this._valueEncoding = codecs(opts.valueEncoding || 'binary') 76 | this._batching = null 77 | this._batchingNodes = null 78 | this._secretKey = opts.secretKey || null 79 | this._storeSecretKey = opts.storeSecretKey !== false 80 | this._onwrite = opts.onwrite || null 81 | this._authorized = [] 82 | 83 | this.ready() 84 | } 85 | 86 | inherits(HyperDB, events.EventEmitter) 87 | 88 | HyperDB.prototype.batch = function (batch, cb) { 89 | if (!cb) cb = noop 90 | 91 | var self = this 92 | 93 | this._lock(function (release) { 94 | var clock = self._clock() 95 | 96 | self._batching = [] 97 | self._batchingNodes = [] 98 | 99 | self.heads(function (err, heads) { 100 | if (err) return cb(err) 101 | 102 | var i = 0 103 | 104 | loop(null) 105 | 106 | function loop (err, node) { 107 | if (err) return done(err) 108 | 109 | if (node) { 110 | node.path = hash(node.key, true) 111 | heads = [node] 112 | } 113 | 114 | if (i === batch.length) { 115 | self.local.append(self._batching, done) 116 | return 117 | } 118 | 119 | var next = batch[i++] 120 | put(self, clock, heads, normalizeKey(next.key), next.value, {delete: next.type === 'del'}, loop) 121 | } 122 | 123 | function done (err) { 124 | var nodes = self._batchingNodes 125 | self._batching = null 126 | self._batchingNodes = null 127 | return release(cb, err, nodes) 128 | } 129 | }) 130 | }) 131 | } 132 | 133 | HyperDB.prototype.put = function (key, val, opts, cb) { 134 | if (typeof opts === 'function') return this.put(key, val, null, opts) 135 | if (!cb) cb = noop 136 | 137 | if (this._checkout) { 138 | return process.nextTick(cb, new Error('Cannot put on a checkout')) 139 | } 140 | 141 | var self = this 142 | 143 | key = normalizeKey(key) 144 | 145 | this._lock(function (release) { 146 | var clock = self._clock() 147 | self._getHeads(false, function (err, heads) { 148 | if (err) return unlock(err) 149 | put(self, clock, heads, key, val, opts, unlock) 150 | }) 151 | 152 | function unlock (err, node) { 153 | release(cb, err, node) 154 | } 155 | }) 156 | } 157 | 158 | HyperDB.prototype.del = function (key, cb) { 159 | this.put(key, null, { delete: true }, cb) 160 | } 161 | 162 | HyperDB.prototype.watch = function (key, cb) { 163 | if (typeof key === 'function') return this.watch('', key) 164 | return watch(this, normalizeKey(key), cb) 165 | } 166 | 167 | HyperDB.prototype.get = function (key, opts, cb) { 168 | if (typeof opts === 'function') return this.get(key, null, opts) 169 | 170 | var self = this 171 | 172 | this._getHeads((opts && opts.update) !== false, function (err, heads) { 173 | if (err) return cb(err) 174 | get(self, heads, normalizeKey(key), opts, cb) 175 | }) 176 | } 177 | 178 | HyperDB.prototype.version = function (cb) { 179 | var self = this 180 | 181 | this.heads(function (err, heads) { 182 | if (err) return cb(err) 183 | 184 | var buffers = [] 185 | 186 | for (var i = 0; i < heads.length; i++) { 187 | buffers.push(self.feeds[heads[i].feed].key) 188 | buffers.push(Buffer.from(varint.encode(heads[i].seq))) 189 | } 190 | 191 | cb(null, Buffer.concat(buffers)) 192 | }) 193 | } 194 | 195 | HyperDB.prototype.checkout = function (version, opts) { 196 | if (!opts) opts = {} 197 | 198 | if (typeof version === 'string') { 199 | version = Buffer.from(version, 'hex') 200 | } 201 | 202 | if (Array.isArray(version)) { 203 | opts.heads = version 204 | version = null 205 | } 206 | 207 | return new HyperDB(this._storage, this.key, { 208 | checkout: this, 209 | version: version, 210 | map: opts.map !== undefined ? opts.map : this._map, 211 | reduce: opts.reduce !== undefined ? opts.reduce : this._reduce, 212 | heads: opts.heads 213 | }) 214 | } 215 | 216 | HyperDB.prototype.snapshot = function (opts) { 217 | return this.checkout(null, opts) 218 | } 219 | 220 | HyperDB.prototype.heads = function (cb) { 221 | this._getHeads(true, cb) 222 | } 223 | 224 | HyperDB.prototype._getHeads = function (update, cb) { 225 | if (!this.opened) return readyAndHeads(this, update, cb) 226 | if (this._heads) return process.nextTick(cb, null, this._heads) 227 | 228 | // This is a bit of a hack. Basically when the db is empty 229 | // we wanna wait for data to come in. TODO: We should guarantee 230 | // that the db always has a single block of data (like a header) 231 | if (update && this._waitForUpdate()) { 232 | this.setMaxListeners(0) 233 | this.once('remote-update', this.heads.bind(this, cb)) 234 | return 235 | } 236 | 237 | var self = this 238 | var len = this._writers.length 239 | var missing = len 240 | var error = null 241 | var nodes = new Array(len) 242 | 243 | for (var i = 0; i < len; i++) { 244 | this._writers[i].head(onhead) 245 | } 246 | 247 | function onhead (err, head, i) { 248 | if (err) error = err 249 | else nodes[i] = head 250 | 251 | if (--missing) return 252 | 253 | if (error) return cb(error) 254 | if (len !== self._writers.length) return self.heads(cb) 255 | 256 | if (nodes.length === 1) return cb(null, nodes[0] ? nodes : []) 257 | cb(null, filterHeads(nodes)) 258 | } 259 | } 260 | 261 | HyperDB.prototype._waitForUpdate = function () { 262 | return !this._writers[0].length() && this.local.length < 2 263 | } 264 | 265 | HyperDB.prototype._index = function (key) { 266 | if (key.key) key = key.key 267 | for (var i = 0; i < this.feeds.length; i++) { 268 | if (this.feeds[i].key.equals(key)) return i 269 | } 270 | return -1 271 | } 272 | 273 | HyperDB.prototype.authorized = function (key, cb) { 274 | var self = this 275 | 276 | this._getHeads(false, function (err) { 277 | if (err) return cb(err) 278 | // writers[0] is the source, always authed 279 | cb(null, self._writers[0].authorizes(key, null)) 280 | }) 281 | } 282 | 283 | HyperDB.prototype.authorize = function (key, cb) { 284 | if (!cb) cb = noop 285 | 286 | var self = this 287 | 288 | this.heads(function (err) { // populates .feeds to be up to date 289 | if (err) return cb(err) 290 | self._addWriter(key, function (err) { 291 | if (err) return cb(err) 292 | self.put('', null, cb) 293 | }) 294 | }) 295 | } 296 | 297 | HyperDB.prototype.replicate = function (opts) { 298 | opts = Object.assign({}, opts) 299 | 300 | var self = this 301 | var expectedFeeds = Math.max(1, this._authorized.length) 302 | var factor = this.contentFeeds ? 2 : 1 303 | 304 | opts.expectedFeeds = expectedFeeds * factor 305 | if (!opts.id) opts.id = this.id 306 | 307 | if (!opts.stream) opts.stream = protocol(opts) 308 | var stream = opts.stream 309 | 310 | if (!opts.live) stream.on('prefinalize', prefinalize) 311 | 312 | this.ready(onready) 313 | 314 | return stream 315 | 316 | function onready (err) { 317 | if (err) return stream.destroy(err) 318 | if (stream.destroyed) return 319 | 320 | // bootstrap content feeds 321 | if (self.contentFeeds && !self.contentFeeds[0]) self._writers[0].get(1, noop) 322 | 323 | var i = 0 324 | 325 | self._replicating.push(replicate) 326 | stream.on('close', onclose) 327 | stream.on('end', onclose) 328 | 329 | replicate() 330 | 331 | function oncontent () { 332 | this._contentFeed.replicate(opts) 333 | } 334 | 335 | function replicate () { 336 | for (; i < self._authorized.length; i++) { 337 | var j = self._authorized[i] 338 | self.feeds[j].replicate(opts) 339 | if (!self.contentFeeds) continue 340 | var w = self._writers[j] 341 | if (w._contentFeed) w._contentFeed.replicate(opts) 342 | else w.once('content-feed', oncontent) 343 | } 344 | } 345 | 346 | function onclose () { 347 | var i = self._replicating.indexOf(replicate) 348 | if (i > -1) remove(self._replicating, i) 349 | for (i = 0; i < self._writers.length; i++) { 350 | self._writers[i].removeListener('content-feed', oncontent) 351 | } 352 | } 353 | } 354 | 355 | function prefinalize (cb) { 356 | self.heads(function (err) { 357 | if (err) return cb(err) 358 | stream.expectedFeeds += factor * (self._authorized.length - expectedFeeds) 359 | expectedFeeds = self._writers.length 360 | cb() 361 | }) 362 | } 363 | } 364 | 365 | HyperDB.prototype._clock = function () { 366 | var clock = new Array(this._writers.length) 367 | 368 | for (var i = 0; i < clock.length; i++) { 369 | var w = this._writers[i] 370 | clock[i] = w === this._localWriter ? w._clock : w.length() 371 | } 372 | 373 | return clock 374 | } 375 | 376 | HyperDB.prototype._getPointer = function (feed, index, isPut, cb) { 377 | if (isPut && this._batching && feed === this._localWriter._id && index >= this._localWriter._feed.length) { 378 | process.nextTick(cb, null, this._batchingNodes[index - this._localWriter._feed.length]) 379 | return 380 | } 381 | this._writers[feed].get(index, cb) 382 | } 383 | 384 | HyperDB.prototype._getAllPointers = function (list, isPut, cb) { 385 | var error = null 386 | var result = new Array(list.length) 387 | var missing = result.length 388 | 389 | if (!missing) return process.nextTick(cb, null, result) 390 | 391 | for (var i = 0; i < result.length; i++) { 392 | this._getPointer(list[i].feed, list[i].seq, isPut, done) 393 | } 394 | 395 | function done (err, node) { 396 | if (err) error = err 397 | else result[indexOf(list, node)] = node 398 | if (!--missing) cb(error, result) 399 | } 400 | } 401 | 402 | HyperDB.prototype._writer = function (dir, key, opts) { 403 | var writer = key && this._byKey.get(key.toString('hex')) 404 | if (writer) return writer 405 | 406 | opts = Object.assign({}, opts, { 407 | sparse: this.sparse, 408 | onwrite: this._onwrite ? onwrite : null 409 | }) 410 | 411 | var self = this 412 | var feed = hypercore(storage, key, opts) 413 | 414 | writer = new Writer(self, feed) 415 | feed.on('append', onappend) 416 | feed.on('remote-update', onremoteupdate) 417 | feed.on('sync', onreloadhead) 418 | 419 | if (key) addWriter(null) 420 | else feed.ready(addWriter) 421 | 422 | return writer 423 | 424 | function onwrite (index, data, peer, cb) { 425 | if (!index) return cb(null) // do not intercept the header 426 | if (peer) peer.maxRequests++ 427 | if (index >= writer._writeLength) writer._writeLength = index + 1 428 | writer._writes.set(index, data) 429 | writer._decode(index, data, function (err, entry) { 430 | if (err) return done(cb, index, peer, err) 431 | self._onwrite(entry, peer, function (err) { 432 | done(cb, index, peer, err) 433 | }) 434 | }) 435 | } 436 | 437 | function done (cb, index, peer, err) { 438 | if (peer) peer.maxRequests-- 439 | writer._writes.delete(index) 440 | cb(err) 441 | } 442 | 443 | function onremoteupdate () { 444 | self.emit('remote-update', feed, writer._id) 445 | } 446 | 447 | function onreloadhead () { 448 | // read writer head to see if any new writers are added on full sync 449 | writer.head(noop) 450 | } 451 | 452 | function onappend () { 453 | for (var i = 0; i < self._watching.length; i++) self._watching[i]._kick() 454 | self.emit('append', feed, writer._id) 455 | } 456 | 457 | function addWriter (err) { 458 | if (!err) self._byKey.set(feed.key.toString('hex'), writer) 459 | } 460 | 461 | function storage (name) { 462 | return self._storage(dir + '/' + name, {feed}) 463 | } 464 | } 465 | 466 | HyperDB.prototype._getWriter = function (key) { 467 | return this._byKey.get(key.toString('hex')) 468 | } 469 | 470 | HyperDB.prototype._addWriter = function (key, cb) { 471 | var self = this 472 | var writer = this._writer('peers/' + hypercore.discoveryKey(key).toString('hex'), key) 473 | 474 | writer._feed.ready(function (err) { 475 | if (err) return cb(err) 476 | if (self._index(key) <= -1) self._pushWriter(writer) 477 | cb(null) 478 | }) 479 | } 480 | 481 | HyperDB.prototype._pushWriter = function (writer) { 482 | writer._id = this._writers.push(writer) - 1 483 | this.feeds.push(writer._feed) 484 | if (this.contentFeeds) this.contentFeeds.push(null) 485 | 486 | if (!this.opened) return 487 | 488 | for (var i = 0; i < this._replicating.length; i++) { 489 | this._replicating[i]() 490 | } 491 | } 492 | 493 | HyperDB.prototype.list = function (prefix, opts, cb) { 494 | if (typeof prefix === 'function') return this.list('', null, prefix) 495 | if (typeof opts === 'function') return this.list(prefix, null, opts) 496 | 497 | var ite = this.iterator(prefix, opts) 498 | var list = [] 499 | 500 | ite.next(loop) 501 | 502 | function loop (err, nodes) { 503 | if (err) return cb(err) 504 | if (!nodes) return cb(null, list) 505 | list.push(nodes) 506 | ite.next(loop) 507 | } 508 | } 509 | 510 | HyperDB.prototype.history = function (opts) { 511 | return history(this, opts) 512 | } 513 | 514 | HyperDB.prototype.keyHistory = function (prefix, opts) { 515 | return keyHistory(this, prefix, opts) 516 | } 517 | 518 | HyperDB.prototype.diff = function (other, prefix, opts) { 519 | if (isOptions(prefix)) return this.diff(other, null, prefix) 520 | return differ(this, other || checkoutEmpty(this), prefix || '', opts) 521 | } 522 | 523 | HyperDB.prototype.iterator = function (prefix, opts) { 524 | if (isOptions(prefix)) return this.iterator('', prefix) 525 | return iterator(this, normalizeKey(prefix || ''), opts) 526 | } 527 | 528 | HyperDB.prototype.createHistoryStream = function (opts) { 529 | return toStream(this.history(opts)) 530 | } 531 | 532 | HyperDB.prototype.createKeyHistoryStream = function (prefix, opts) { 533 | return toStream(this.keyHistory(prefix, opts)) 534 | } 535 | 536 | HyperDB.prototype.createDiffStream = function (other, prefix, opts) { 537 | if (isOptions(prefix)) return this.createDiffStream(other, '', prefix) 538 | return toStream(this.diff(other, prefix, opts)) 539 | } 540 | 541 | HyperDB.prototype.createReadStream = function (prefix, opts) { 542 | return toStream(this.iterator(prefix, opts)) 543 | } 544 | 545 | HyperDB.prototype.createWriteStream = function (cb) { 546 | var self = this 547 | return bulk.obj(write) 548 | 549 | function write (batch, cb) { 550 | var flattened = [] 551 | for (var i = 0; i < batch.length; i++) { 552 | var content = batch[i] 553 | if (Array.isArray(content)) { 554 | for (var j = 0; j < content.length; j++) { 555 | flattened.push(content[j]) 556 | } 557 | } else { 558 | flattened.push(content) 559 | } 560 | } 561 | self.batch(flattened, cb) 562 | } 563 | } 564 | 565 | HyperDB.prototype._ready = function (cb) { 566 | var self = this 567 | 568 | if (this._checkout) { 569 | if (this._heads) oncheckout(null, this._heads) 570 | else if (this._version) this._checkout.heads(onversion) 571 | else this._checkout.heads(oncheckout) 572 | return 573 | } 574 | 575 | if (!this.source) { 576 | this.source = feed('source', this.key, { 577 | secretKey: this._secretKey, 578 | storeSecretKey: this._storeSecretKey 579 | }) 580 | } 581 | 582 | this.source.ready(function (err) { 583 | if (err) return done(err) 584 | if (self.source.writable) self.local = self.source 585 | if (!self.local) self.local = feed('local') 586 | 587 | self.key = self.source.key 588 | self.discoveryKey = self.source.discoveryKey 589 | self._writers[0].authorize() // source is always authorized 590 | 591 | self.local.ready(function (err) { 592 | if (err) return done(err) 593 | 594 | self._localWriter = self._writers[self.feeds.indexOf(self.local)] 595 | 596 | if (self._contentStorage) { 597 | self._localWriter._ensureContentFeed(null) 598 | self.localContent = self._localWriter._contentFeed 599 | } 600 | 601 | self._localWriter.head(function (err) { 602 | if (err) return done(err) 603 | if (!self.localContent) return done(null) 604 | self.localContent.ready(done) 605 | }) 606 | }) 607 | }) 608 | 609 | function done (err) { 610 | if (err) return cb(err) 611 | self._localWriter.ensureHeader(onheader) 612 | 613 | function onheader (err) { 614 | if (err) return cb(err) 615 | self.opened = true 616 | self.emit('ready') 617 | cb(null) 618 | } 619 | } 620 | 621 | function feed (dir, key, feedOpts) { 622 | var writer = self._writer(dir, key, feedOpts) 623 | self._pushWriter(writer) 624 | return writer._feed 625 | } 626 | 627 | function onversion (err) { 628 | if (err) return done(err) 629 | 630 | var offset = 0 631 | var missing = 0 632 | var nodes = [] 633 | var error = null 634 | 635 | if (typeof self._version === 'number') { 636 | missing = 1 637 | self._checkout._writers[0].get(self._version, onnode) 638 | return 639 | } 640 | 641 | while (offset < self._version.length) { 642 | var key = self._version.slice(offset, offset + 32) 643 | var seq = varint.decode(self._version, offset + 32) 644 | offset += 32 + varint.decode.bytes 645 | var writer = self._checkout._byKey.get(key.toString('hex')) 646 | if (!writer) { 647 | error = new Error('Invalid version') 648 | continue 649 | } 650 | missing++ 651 | writer.get(seq, onnode) 652 | } 653 | 654 | if (!missing) oncheckout(error, []) 655 | 656 | function onnode (err, node) { 657 | if (err) error = err 658 | else nodes.push(node) 659 | if (!--missing) oncheckout(error, nodes) 660 | } 661 | } 662 | 663 | function oncheckout (err, heads) { 664 | if (err) return done(err) 665 | 666 | self.opened = true 667 | self.source = self._checkout.source 668 | self.local = self._checkout.local 669 | self.localContent = self._checkout.localContent 670 | self._localWriter = self._checkout._localWriter 671 | self.key = self._checkout.key 672 | self.discoveryKey = self._checkout.discoveryKey 673 | self._heads = heads 674 | 675 | done(null) 676 | } 677 | } 678 | 679 | function Writer (db, feed) { 680 | events.EventEmitter.call(this) 681 | 682 | this._id = 0 683 | this._db = db 684 | this._feed = feed 685 | this._contentFeed = null 686 | this._feeds = 0 687 | this._feedsMessage = null 688 | this._feedsLoaded = -1 689 | this._entry = 0 690 | this._clock = 0 691 | this._encodeMap = [] 692 | this._decodeMap = [] 693 | this._checkout = false 694 | this._length = 0 695 | this._authorized = false 696 | 697 | this._cache = alru(4096) 698 | 699 | this._writes = new Map() 700 | this._writeLength = 0 701 | 702 | this.setMaxListeners(0) 703 | } 704 | 705 | inherits(Writer, events.EventEmitter) 706 | 707 | Writer.prototype.authorize = function () { 708 | if (this._authorized) return 709 | this._authorized = true 710 | this._db._authorized.push(this._id) 711 | if (this._feedsMessage) this._updateFeeds() 712 | } 713 | 714 | Writer.prototype.ensureHeader = function (cb) { 715 | if (this._feed.length) return cb(null) 716 | 717 | var header = { 718 | protocol: 'hyperdb' 719 | } 720 | 721 | this._feed.append(messages.Header.encode(header), cb) 722 | } 723 | 724 | Writer.prototype.append = function (entry, cb) { 725 | if (!this._clock) this._clock = this._feed.length 726 | 727 | var enc = messages.Entry 728 | this._entry = this._clock++ 729 | 730 | entry.clock[this._id] = this._clock 731 | entry.seq = this._clock - 1 732 | entry.feed = this._id 733 | entry[util.inspect.custom] = inspect 734 | 735 | var mapped = { 736 | key: entry.key, 737 | value: null, 738 | deleted: entry.deleted, 739 | inflate: 0, 740 | clock: null, 741 | trie: null, 742 | feeds: null, 743 | contentFeed: null 744 | } 745 | 746 | if (this._needsInflate()) { 747 | enc = messages.InflatedEntry 748 | mapped.feeds = this._mapList(this._db.feeds, this._encodeMap, null) 749 | if (this._db.contentFeeds) mapped.contentFeed = this._db.contentFeeds[this._id].key 750 | this._feedsMessage = mapped 751 | this._feedsLoaded = this._feeds = this._entry 752 | this._updateFeeds() 753 | } 754 | 755 | mapped.clock = this._mapList(entry.clock, this._encodeMap, 0) 756 | mapped.inflate = this._feeds 757 | mapped.trie = trie.encode(entry.trie, this._encodeMap) 758 | if (!isNullish(entry.value)) mapped.value = this._db._valueEncoding.encode(entry.value) 759 | 760 | if (this._db._batching) { 761 | this._db._batching.push(enc.encode(mapped)) 762 | this._db._batchingNodes.push(entry) 763 | return cb(null) 764 | } 765 | 766 | this._feed.append(enc.encode(mapped), cb) 767 | } 768 | 769 | Writer.prototype._needsInflate = function () { 770 | var msg = this._feedsMessage 771 | return !msg || msg.feeds.length !== this._db.feeds.length 772 | } 773 | 774 | Writer.prototype._maybeUpdateFeeds = function () { 775 | if (!this._feedsMessage) return 776 | var writers = this._feedsMessage.feeds || [] 777 | if ( 778 | this._decodeMap.length !== writers.length || 779 | this._encodeMap.length !== this._db.feeds.length 780 | ) { 781 | this._updateFeeds() 782 | } 783 | } 784 | 785 | Writer.prototype._decode = function (seq, buf, cb) { 786 | try { 787 | var val = messages.Entry.decode(buf) 788 | } catch (e) { 789 | return cb(e) 790 | } 791 | val[util.inspect.custom] = inspect 792 | val.seq = seq 793 | val.path = hash(val.key, true) 794 | try { 795 | val.value = val.value && this._db._valueEncoding.decode(val.value) 796 | } catch (e) { 797 | return cb(e) 798 | } 799 | 800 | if (this._feedsMessage && this._feedsLoaded === val.inflate) { 801 | this._maybeUpdateFeeds() 802 | val.feed = this._id 803 | if (val.clock.length > this._decodeMap.length) { 804 | return cb(new Error('Missing feed mappings')) 805 | } 806 | val.clock = this._mapList(val.clock, this._decodeMap, 0) 807 | val.trie = trie.decode(val.trie, this._decodeMap) 808 | this._cache.set(val.seq, val) 809 | return cb(null, val, this._id) 810 | } 811 | 812 | this._loadFeeds(val, buf, cb) 813 | } 814 | 815 | Writer.prototype.get = function (seq, cb) { 816 | var self = this 817 | var cached = this._cache.get(seq) 818 | if (cached) return process.nextTick(cb, null, cached, this._id) 819 | 820 | this._getFeed(seq, function (err, val) { 821 | if (err) return cb(err) 822 | self._decode(seq, val, cb) 823 | }) 824 | } 825 | 826 | Writer.prototype._getFeed = function (seq, cb) { 827 | if (this._writes && this._writes.size) { 828 | var buf = this._writes.get(seq) 829 | if (buf) return process.nextTick(cb, null, buf) 830 | } 831 | this._feed.get(seq, cb) 832 | } 833 | 834 | Writer.prototype.head = function (cb) { 835 | var len = this.length() 836 | if (len < 2) return process.nextTick(cb, null, null, this._id) 837 | this.get(len - 1, cb) 838 | } 839 | 840 | Writer.prototype._mapList = function (list, map, def) { 841 | var mapped = [] 842 | var i 843 | for (i = 0; i < map.length; i++) mapped[map[i]] = i < list.length ? list[i] : def 844 | for (; i < list.length; i++) mapped[i] = list[i] 845 | for (i = 0; i < mapped.length; i++) { 846 | if (!mapped[i]) mapped[i] = def 847 | } 848 | return mapped 849 | } 850 | 851 | Writer.prototype._loadFeeds = function (head, buf, cb) { 852 | var self = this 853 | 854 | if (head.feeds) done(head) 855 | else if (head.inflate === head.seq) onfeeds(null, buf) 856 | else this._getFeed(head.inflate, onfeeds) 857 | 858 | function onfeeds (err, buf) { 859 | if (err) return cb(err) 860 | try { 861 | var msg = messages.InflatedEntry.decode(buf) 862 | } catch (e) { 863 | return cb(e) 864 | } 865 | done(msg) 866 | } 867 | 868 | function done (msg) { 869 | self._addWriters(head, msg, cb) 870 | } 871 | } 872 | 873 | Writer.prototype._addWriters = function (head, inflated, cb) { 874 | var self = this 875 | var id = this._id 876 | var writers = inflated.feeds || [] 877 | var missing = writers.length + 1 878 | var error = null 879 | 880 | for (var i = 0; i < writers.length; i++) { 881 | this._db._addWriter(writers[i].key, done) 882 | } 883 | 884 | done(null) 885 | 886 | function done (err) { 887 | if (err) error = err 888 | if (--missing) return 889 | if (error) return cb(error) 890 | var seq = head.inflate 891 | if (seq > self._feedsLoaded) { 892 | self._feedsLoaded = self._feeds = seq 893 | self._feedsMessage = inflated 894 | } 895 | self._updateFeeds() 896 | head.feed = self._id 897 | if (head.clock.length > self._decodeMap.length) { 898 | return cb(new Error('Missing feed mappings')) 899 | } 900 | head.clock = self._mapList(head.clock, self._decodeMap, 0) 901 | head.trie = trie.decode(head.trie, self._decodeMap) 902 | self._cache.set(head.seq, head) 903 | cb(null, head, id) 904 | } 905 | } 906 | 907 | Writer.prototype._ensureContentFeed = function (key) { 908 | if (this._contentFeed) return 909 | 910 | var self = this 911 | var secretKey = null 912 | 913 | if (!key) { 914 | var pair = derive(this._db.local.secretKey) 915 | secretKey = pair.secretKey 916 | key = pair.publicKey 917 | } 918 | 919 | this._contentFeed = hypercore(storage, key, { 920 | sparse: this._db.sparseContent, 921 | storeSecretKey: false, 922 | secretKey 923 | }) 924 | 925 | if (this._db.contentFeeds) this._db.contentFeeds[this._id] = this._contentFeed 926 | this.emit('content-feed') 927 | 928 | function storage (name) { 929 | name = 'content/' + self._feed.discoveryKey.toString('hex') + '/' + name 930 | return self._db._contentStorage(name, { 931 | metadata: self._feed, 932 | feed: self._contentFeed 933 | }) 934 | } 935 | } 936 | 937 | Writer.prototype._updateFeeds = function () { 938 | var i 939 | var updateReplicates = false 940 | 941 | if (this._feedsMessage.contentFeed && this._db.contentFeeds && !this._contentFeed) { 942 | this._ensureContentFeed(this._feedsMessage.contentFeed) 943 | updateReplicates = true 944 | } 945 | 946 | var writers = this._feedsMessage.feeds || [] 947 | var map = new Map() 948 | 949 | for (i = 0; i < this._db.feeds.length; i++) { 950 | map.set(this._db.feeds[i].key.toString('hex'), i) 951 | } 952 | 953 | for (i = 0; i < writers.length; i++) { 954 | var id = map.get(writers[i].key.toString('hex')) 955 | this._decodeMap[i] = id 956 | this._encodeMap[id] = i 957 | if (this._authorized) { 958 | this._db._writers[id].authorize() 959 | updateReplicates = true 960 | } 961 | } 962 | 963 | if (!updateReplicates) return 964 | 965 | for (i = 0; i < this._db._replicating.length; i++) { 966 | this._db._replicating[i]() 967 | } 968 | } 969 | 970 | Writer.prototype.authorizes = function (key, visited) { 971 | if (!visited) visited = new Array(this._db._writers.length) 972 | 973 | if (this._feed.key.equals(key)) return true 974 | if (!this._feedsMessage || visited[this._id]) return false 975 | visited[this._id] = true 976 | 977 | var feeds = this._feedsMessage.feeds || [] 978 | for (var i = 0; i < feeds.length; i++) { 979 | var authedKey = feeds[i].key 980 | if (authedKey.equals(key)) return true 981 | var authedWriter = this._db._getWriter(authedKey) 982 | if (authedWriter.authorizes(key, visited)) return true 983 | } 984 | 985 | return false 986 | } 987 | 988 | Writer.prototype.length = function () { 989 | if (this._checkout) return this._length 990 | return Math.max(this._writeLength, Math.max(this._feed.length, this._feed.remoteLength)) 991 | } 992 | 993 | function filterHeads (list) { 994 | var heads = [] 995 | for (var i = 0; i < list.length; i++) { 996 | if (isHead(list[i], list)) heads.push(list[i]) 997 | } 998 | return heads 999 | } 1000 | 1001 | function isHead (node, list) { 1002 | if (!node) return false 1003 | 1004 | var clock = node.seq + 1 1005 | 1006 | for (var i = 0; i < list.length; i++) { 1007 | var other = list[i] 1008 | if (other === node || !other) continue 1009 | if ((other.clock[node.feed] || 0) >= clock) return false 1010 | } 1011 | 1012 | return true 1013 | } 1014 | 1015 | function checkoutEmpty (db) { 1016 | db = db.checkout(Buffer.from([])) 1017 | return db 1018 | } 1019 | 1020 | function readyAndHeads (self, update, cb) { 1021 | self.ready(function (err) { 1022 | if (err) return cb(err) 1023 | self._getHeads(update, cb) 1024 | }) 1025 | } 1026 | 1027 | function indexOf (list, ptr) { 1028 | for (var i = 0; i < list.length; i++) { 1029 | var p = list[i] 1030 | if (ptr.feed === p.feed && ptr.seq === p.seq) return i 1031 | } 1032 | return -1 1033 | } 1034 | 1035 | function isOptions (opts) { 1036 | return typeof opts === 'object' && !!opts && !Buffer.isBuffer(opts) 1037 | } 1038 | 1039 | function createStorage (st) { 1040 | if (typeof st === 'function') return st 1041 | return function (name) { 1042 | return raf(path.join(st, name)) 1043 | } 1044 | } 1045 | 1046 | function reduceFirst (a, b) { 1047 | return a 1048 | } 1049 | 1050 | function isNullish (v) { 1051 | return v === null || v === undefined 1052 | } 1053 | 1054 | function noop () {} 1055 | 1056 | function inspect () { 1057 | return `Node(key=${this.key}` + 1058 | `, value=${util.inspect(this.value)}` + 1059 | `, seq=${this.seq}` + 1060 | `, feed=${this.feed})` + 1061 | `)` 1062 | } 1063 | -------------------------------------------------------------------------------- /lib/derive.js: -------------------------------------------------------------------------------- 1 | var sodium = require('sodium-universal') 2 | 3 | var CONTEXT = Buffer.from('hyperdb1') // hyperdb v1 4 | 5 | module.exports = deriveKeyPair 6 | 7 | function deriveKeyPair (secretKey) { 8 | var seed = Buffer.alloc(sodium.crypto_sign_SEEDBYTES) 9 | var keyPair = { 10 | publicKey: Buffer.alloc(sodium.crypto_sign_PUBLICKEYBYTES), 11 | secretKey: Buffer.alloc(sodium.crypto_sign_SECRETKEYBYTES) 12 | } 13 | 14 | sodium.crypto_kdf_derive_from_key(seed, 1, CONTEXT, secretKey) 15 | sodium.crypto_sign_seed_keypair(keyPair.publicKey, keyPair.secretKey, seed) 16 | seed.fill(0) 17 | 18 | return keyPair 19 | } 20 | -------------------------------------------------------------------------------- /lib/differ.js: -------------------------------------------------------------------------------- 1 | var nanoiterator = require('nanoiterator') 2 | var inherits = require('inherits') 3 | var iterator = require('./iterator') 4 | var options = require('./options') 5 | 6 | module.exports = Differ 7 | 8 | function Differ (db, otherDb, prefix, opts) { 9 | if (!(this instanceof Differ)) return new Differ(db, otherDb, prefix, opts) 10 | nanoiterator.call(this) 11 | 12 | this._map = options.map(opts, db) 13 | this._reduce = options.reduce(opts, db) 14 | 15 | this._left = iterator(db, prefix, opts) 16 | this._right = iterator(otherDb, prefix, opts) 17 | this._leftNodes = null 18 | this._rightNodes = null 19 | 20 | // do not map/reduce the iterators - we just reset them here 21 | // cause that is easy peasy instead of extending the options 22 | noMapReduce(this._left) 23 | noMapReduce(this._right) 24 | } 25 | 26 | inherits(Differ, nanoiterator) 27 | 28 | Differ.prototype._next = function (cb) { 29 | var self = this 30 | 31 | this._nextLeft(function (err, l) { 32 | if (err) return cb(err) 33 | 34 | self._nextRight(function (err, r) { 35 | if (err) return cb(err) 36 | 37 | if (!r && !l) return cb(null, null) 38 | 39 | if (!r || !l) { 40 | self._leftNodes = self._rightNodes = null 41 | return cb(null, {left: self._prereturn(l), right: self._prereturn(r)}) 42 | } 43 | 44 | var kl = l[0].key 45 | var kr = r[0].key 46 | 47 | if (kl === kr) { 48 | if (same(l, r)) return self._skip(cb) 49 | // update / conflict 50 | self._leftNodes = self._rightNodes = null 51 | return cb(null, {left: self._prereturn(l), right: self._prereturn(r)}) 52 | } 53 | 54 | // sort keys 55 | var sl = l[0].path.join('') + '@' + kl 56 | var sr = r[0].path.join('') + '@' + kr 57 | 58 | if (sl < sr) { // move left 59 | self._leftNodes = null 60 | cb(null, {left: self._prereturn(l), right: null}) 61 | } else { // move right 62 | self._rightNodes = null 63 | cb(null, {left: null, right: self._prereturn(r)}) 64 | } 65 | }) 66 | }) 67 | } 68 | 69 | Differ.prototype._prereturn = function (nodes) { 70 | if (!nodes) return nodes 71 | if (this._map) nodes = nodes.map(this._map) 72 | if (this._reduce) nodes = nodes.reduce(this._reduce) 73 | return nodes 74 | } 75 | 76 | Differ.prototype._skip = function (cb) { 77 | /* 78 | // TODO: this can be greatly simplified 79 | var map = new Map() 80 | 81 | this._left._workers.forEach(function (t) { 82 | t.stack.forEach(index) 83 | }) 84 | this._right._workers.forEach(function (t) { 85 | t.stack.forEach(index) 86 | }) 87 | this._left._workers.forEach(function (t) { 88 | t.stack = t.stack.filter(filter) 89 | }) 90 | this._right._workers.forEach(function (t) { 91 | t.stack = t.stack.filter(filter) 92 | }) 93 | 94 | function index (s) { 95 | if (!s.node) return 96 | var k = s.node.feed + '@' + s.node.seq + '@' + s.i 97 | map.set(k, 1 + (map.get(k) || 0)) 98 | } 99 | 100 | function filter (s) { 101 | if (!s.node) return true 102 | var k = s.node.feed + '@' + s.node.seq + '@' + s.i 103 | return map.get(k) < 2 104 | } 105 | */ 106 | this._leftNodes = this._rightNodes = null 107 | this._next(cb) 108 | } 109 | 110 | Differ.prototype._nextRight = function (cb) { 111 | if (this._rightNodes) return cb(null, this._rightNodes) 112 | var self = this 113 | this._right.next(function (err, nodes) { 114 | if (err) return cb(err) 115 | self._rightNodes = nodes 116 | cb(null, nodes) 117 | }) 118 | } 119 | 120 | Differ.prototype._nextLeft = function (cb) { 121 | if (this._leftNodes) return cb(null, this._leftNodes) 122 | var self = this 123 | this._left.next(function (err, nodes) { 124 | if (err) return cb(err) 125 | self._leftNodes = nodes 126 | cb(null, nodes) 127 | }) 128 | } 129 | 130 | function same (l, r) { 131 | if (l.length !== r.length) return false 132 | // TODO: sort order should be same, but should verify that 133 | for (var i = 0; i < l.length; i++) { 134 | var a = l[i] 135 | var b = r[i] 136 | if (a.feed !== b.feed || a.seq !== b.seq) return false 137 | } 138 | return true 139 | } 140 | 141 | function noMapReduce (ite) { 142 | // if the iterator options are updated we *have* to 143 | // update them here 144 | ite._map = ite._reduce = null 145 | } 146 | -------------------------------------------------------------------------------- /lib/get.js: -------------------------------------------------------------------------------- 1 | var hash = require('./hash') 2 | var options = require('./options') 3 | 4 | module.exports = get 5 | 6 | function get (db, heads, key, opts, cb) { 7 | if (typeof opts === 'function') return get(db, heads, key, null, opts) 8 | 9 | var req = new GetRequest(db, key, opts) 10 | req.start(heads, cb) 11 | } 12 | 13 | function GetRequest (db, key, opts) { 14 | this.key = key 15 | this.results = [] 16 | this._deletes = !!(opts && opts.deletes) 17 | this._callback = noop 18 | this._options = opts || null 19 | this._prefixed = !!(opts && opts.prefix) 20 | this._path = (opts && opts.path) || hash(key, !this._prefixed) 21 | this._onlookup = (opts && opts.onlookup) || null 22 | this._db = db 23 | this._error = null 24 | this._active = 0 25 | this._workers = [] 26 | } 27 | 28 | GetRequest.prototype._push = function (node) { 29 | if (this._prefixed) { 30 | this.results.push(node) 31 | } else if (node.key === this.key) { 32 | this.results.push(node) 33 | } 34 | } 35 | 36 | GetRequest.prototype.start = function (heads, cb) { 37 | if (cb) this._callback = cb 38 | if (!heads.length) return process.nextTick(finalize, this) 39 | 40 | if (this._onlookup) { 41 | for (var i = 0; i < heads.length; i++) { 42 | this._onlookup({feed: heads[i].feed, seq: heads[i].seq}) 43 | } 44 | } 45 | 46 | this._update(heads, null) 47 | } 48 | 49 | GetRequest.prototype._update = function (nodes, worker) { 50 | if (worker) { 51 | var r = this._workers.indexOf(worker) 52 | if (r > -1) this._workers.splice(r, 1) 53 | } 54 | 55 | this._active += nodes.length 56 | 57 | for (var i = 0; i < nodes.length; i++) { 58 | var next = new Worker(nodes[i], worker ? worker.i + 1 : 0) 59 | this._workers.push(next) 60 | if (this._isHead(next.lock, next)) this._moveCloser(next) 61 | else this._end(next, null, true) 62 | } 63 | 64 | if (worker) { 65 | this._end(worker, null) 66 | } 67 | } 68 | 69 | GetRequest.prototype._end = function (worker, err, removeWorker) { 70 | if (removeWorker) { 71 | var i = this._workers.indexOf(worker) 72 | if (i > -1) this._workers.splice(i, 1) 73 | } 74 | 75 | if (err) this._error = err 76 | if (--this._active) return 77 | this._finalize() 78 | } 79 | 80 | GetRequest.prototype._finalize = function () { 81 | var error = this._error 82 | var cb = this._callback 83 | 84 | this._error = this._callback = null 85 | 86 | if (error) cb(error) 87 | else cb(null, this._prereturn(this.results)) 88 | } 89 | 90 | GetRequest.prototype._prereturn = function (results) { 91 | if (!this._deletes && allDeletes(results) && !this._prefixed) results = [] 92 | 93 | var map = options.map(this._options, this._db) 94 | var reduce = options.reduce(this._options, this._db) 95 | if (map) results = results.map(map) 96 | if (reduce) return results.length ? results.reduce(reduce) : null 97 | 98 | return results 99 | } 100 | 101 | GetRequest.prototype._updatePointers = function (ptrs, worker) { 102 | var self = this 103 | 104 | if (this._onlookup) mapPointers(this._onlookup, ptrs) 105 | this._db._getAllPointers(ptrs, false, onnodes) 106 | 107 | function onnodes (err, nodes) { 108 | if (err) return self._end(worker, err, false) 109 | self._update(nodes, worker) 110 | } 111 | } 112 | 113 | GetRequest.prototype._getAndMoveCloser = function (ptr, worker) { 114 | var self = this 115 | 116 | // TODO: make this optimisation *everywhere* (ie isHead(ptr) vs isHead(node)) 117 | // if (!self._isHead(ptr, worker)) return self._end(worker, null) 118 | if (this._onlookup) this._onlookup(ptr) 119 | this._db._getPointer(ptr.feed, ptr.seq, false, onnode) 120 | 121 | function onnode (err, node) { 122 | if (err) return self._end(worker, err, false) 123 | 124 | if (!self._isHead(node, worker)) return self._end(worker, null, true) 125 | 126 | worker.head = node 127 | worker.i++ 128 | self._moveCloser(worker) 129 | } 130 | } 131 | 132 | GetRequest.prototype._pushPointers = function (ptrs, worker) { 133 | var self = this 134 | 135 | if (this._onlookup) mapPointers(this._onlookup, ptrs) 136 | this._db._getAllPointers(ptrs, false, onresults) 137 | 138 | function onresults (err, nodes) { 139 | if (err) return self._end(worker, err, false) 140 | 141 | for (var i = 0; i < nodes.length; i++) { 142 | var node = nodes[i] 143 | if (self._isHead(node, worker)) self._push(node) 144 | } 145 | 146 | self._end(worker, null, false) 147 | } 148 | } 149 | 150 | GetRequest.prototype._moveCloser = function (worker) { 151 | var path = this._path 152 | var head = worker.head 153 | 154 | // If no head -> 404 155 | if (!head) return this._end(worker, null, false) 156 | 157 | // We want to find the key closest to our path. 158 | // At max, we need to go through path.length iterations 159 | for (; worker.i < path.length; worker.i++) { 160 | var i = worker.i 161 | var val = path[i] 162 | if (head.path[i] === val) continue 163 | 164 | // We need a closer node. See if the trie has one that 165 | // matches the path value 166 | var remoteBucket = head.trie[i] || [] 167 | var remoteValues = remoteBucket[val] || [] 168 | 169 | // No closer ones -> 404 170 | if (!remoteValues.length) return this._end(worker, null, false) 171 | 172 | // More than one reference -> We have forks. 173 | if (remoteValues.length > 1) this._updatePointers(remoteValues, worker) 174 | else this._getAndMoveCloser(remoteValues[0], worker) 175 | return 176 | } 177 | 178 | this._push(head) 179 | 180 | // TODO: not sure if this is even needed! 181 | // check if we had a collision, or similar 182 | // (our last bucket contains more stuff) 183 | 184 | var top = path.length - 1 185 | var last = head.trie[top] 186 | var lastValues = last && last[path[top]] 187 | if (!lastValues || !lastValues.length) return this._end(worker, null, false) 188 | 189 | this._pushPointers(lastValues, worker) 190 | } 191 | 192 | GetRequest.prototype._isHead = function (head, worker) { 193 | var clock = head.seq + 1 194 | 195 | for (var i = 0; i < this._workers.length; i++) { 196 | var otherWorker = this._workers[i] 197 | if (otherWorker === worker) continue 198 | 199 | var otherClock = otherWorker.lock.clock[head.feed] 200 | if (clock <= otherClock) return false 201 | } 202 | 203 | return true 204 | } 205 | 206 | function Worker (head, i) { 207 | this.i = i 208 | this.head = head 209 | this.lock = head 210 | } 211 | 212 | function noop () {} 213 | 214 | function allDeletes (list) { 215 | for (var i = 0; i < list.length; i++) { 216 | if (!list[i].deleted) return false 217 | } 218 | return true 219 | } 220 | 221 | function finalize (req) { 222 | req._finalize() 223 | } 224 | 225 | function mapPointers (fn, ptrs) { 226 | for (var i = 0; i < ptrs.length; i++) fn(ptrs[i]) 227 | } 228 | -------------------------------------------------------------------------------- /lib/hash.js: -------------------------------------------------------------------------------- 1 | var sodium = require('sodium-universal') 2 | 3 | var KEY = Buffer.alloc(16) 4 | var OUT = Buffer.alloc(8) 5 | 6 | hash.TERMINATE = 4 7 | hash.LENGTH = 32 8 | 9 | module.exports = hash 10 | 11 | function hash (keys, terminate) { 12 | if (typeof keys === 'string') keys = split(keys) 13 | 14 | var all = new Array(keys.length * 32 + (terminate ? 1 : 0)) 15 | 16 | for (var i = 0; i < keys.length; i++) { 17 | sodium.crypto_shorthash(OUT, Buffer.from(keys[i]), KEY) 18 | expandHash(OUT, all, i * 32) 19 | } 20 | 21 | if (terminate) all[all.length - 1] = 4 22 | 23 | return all 24 | } 25 | 26 | function expandHash (next, out, offset) { 27 | for (var i = 0; i < next.length; i++) { 28 | var n = next[i] 29 | 30 | for (var j = 0; j < 4; j++) { 31 | var r = n & 3 32 | out[offset++] = r 33 | n -= r 34 | n /= 4 35 | } 36 | } 37 | } 38 | 39 | function split (key) { 40 | var list = key.split('/') 41 | if (list[0] === '') list.shift() 42 | if (list[list.length - 1] === '') list.pop() 43 | return list 44 | } 45 | -------------------------------------------------------------------------------- /lib/history.js: -------------------------------------------------------------------------------- 1 | var nanoiterator = require('nanoiterator') 2 | var inherits = require('inherits') 3 | 4 | module.exports = Iterator 5 | 6 | function Iterator (db, opts) { 7 | if (!(this instanceof Iterator)) return new Iterator(db, opts) 8 | nanoiterator.call(this) 9 | 10 | this._db = db 11 | this._reverse = !!(opts && opts.reverse) 12 | this._end = [] 13 | this._nodes = [] 14 | } 15 | 16 | inherits(Iterator, nanoiterator) 17 | 18 | Iterator.prototype._open = function (cb) { 19 | var self = this 20 | 21 | this._db.heads(function (err, heads) { 22 | if (err) return cb(err) 23 | 24 | var writers = self._db._writers 25 | 26 | for (var i = 0; i < writers.length; i++) { 27 | self._end.push(highestClock(heads, i)) 28 | self._nodes.push(null) 29 | } 30 | 31 | self._updateAll(cb) 32 | }) 33 | } 34 | 35 | Iterator.prototype._updateAll = function (cb) { 36 | var self = this 37 | var missing = 0 38 | var error = null 39 | var writers = this._db._writers 40 | 41 | for (var i = 0; i < this._nodes.length; i++) { 42 | if (this._end[i] > 1 && !this._nodes[i]) { 43 | missing++ 44 | writers[i].get(this._reverse ? this._end[i] - 1 : 1, onnode) 45 | } 46 | } 47 | 48 | if (!missing) cb(null) 49 | 50 | function onnode (err, node) { 51 | if (err) error = err 52 | else self._nodes[node.feed] = node 53 | if (!--missing) cb(error) 54 | } 55 | } 56 | 57 | Iterator.prototype._next = function (cb) { 58 | var node = this._reverse ? this._max() : this._min() 59 | if (!node) return process.nextTick(cb, null, null) 60 | 61 | if (this._reverse) this._pop(node, cb) 62 | else this._shift(node, cb) 63 | } 64 | 65 | Iterator.prototype._pop = function (node, cb) { 66 | var self = this 67 | var writers = self._db._writers 68 | var w = writers[node.feed] 69 | var seq = node.seq - 1 70 | 71 | if (seq <= 0) { 72 | this._nodes[node.feed] = null 73 | return process.nextTick(cb, null, node) 74 | } 75 | 76 | w.get(seq, function (err, next) { 77 | if (err) return cb(err) 78 | self._nodes[next.feed] = next 79 | cb(null, node) 80 | }) 81 | } 82 | 83 | Iterator.prototype._shift = function (node, cb) { 84 | var self = this 85 | var writers = self._db._writers 86 | var w = writers[node.feed] 87 | var seq = node.seq + 1 88 | 89 | if (seq >= this._end[node.feed]) { 90 | this._nodes[node.feed] = null 91 | return process.nextTick(cb, null, node) 92 | } 93 | 94 | w.get(seq, function (err, next) { 95 | if (err) return cb(err) 96 | self._nodes[next.feed] = next 97 | cb(null, node) 98 | }) 99 | } 100 | 101 | Iterator.prototype._compare = function (fn) { 102 | var node = null 103 | for (var i = 0; i < this._nodes.length; i++) { 104 | var t = this._nodes[i] 105 | if (!t || (node && !fn(t, node))) continue 106 | node = t 107 | } 108 | return node 109 | } 110 | 111 | Iterator.prototype._min = function () { 112 | return this._compare(lt) 113 | } 114 | 115 | Iterator.prototype._max = function () { 116 | return this._compare(gt) 117 | } 118 | 119 | function lt (a, b) { 120 | var clock = a.feed < b.clock.length ? b.clock[a.feed] : 0 121 | return a.seq + 1 < clock 122 | } 123 | 124 | function gt (a, b) { 125 | var clock = a.feed < b.clock.length ? b.clock[a.feed] : 0 126 | return a.seq + 1 > clock 127 | } 128 | 129 | function highestClock (heads, i) { 130 | var max = 0 131 | for (var j = 0; j < heads.length; j++) { 132 | if (heads[j].clock.length <= i) continue 133 | max = Math.max(max, heads[j].clock[i]) 134 | } 135 | return max 136 | } 137 | -------------------------------------------------------------------------------- /lib/iterator.js: -------------------------------------------------------------------------------- 1 | var nanoiterator = require('nanoiterator') 2 | var inherits = require('inherits') 3 | var cmp = require('compare') 4 | var hash = require('./hash') 5 | var options = require('./options') 6 | 7 | var SORT_GT = [3, 2, 1, 0] 8 | var SORT_GTE = [3, 2, 1, 0, 4] 9 | 10 | module.exports = Iterator 11 | 12 | function Iterator (db, prefix, opts) { 13 | if (!(this instanceof Iterator)) return new Iterator(db, prefix, opts) 14 | if (!opts) opts = {} 15 | 16 | nanoiterator.call(this) 17 | 18 | this._db = db 19 | this._stack = [{ 20 | path: prefix ? hash(prefix, false) : [], 21 | node: null, 22 | i: 0 23 | }] 24 | 25 | this._recursive = opts.recursive !== false 26 | this._reverse = !!opts.reverse 27 | this._order = { 28 | gt: this._reverse ? SORT_GT.slice().reverse() : SORT_GT, 29 | gte: this._reverse ? SORT_GTE.slice().reverse() : SORT_GTE 30 | } 31 | 32 | this._gt = !!opts.gt 33 | this._start = this._stack[0].path.length 34 | this._end = this._recursive ? Infinity : this._start + hash.LENGTH 35 | this._map = options.map(opts, db) 36 | this._reduce = options.reduce(opts, db) 37 | this._collisions = [] 38 | this._deletes = !!(opts && opts.deletes) 39 | 40 | this._prefix = prefix 41 | this._pending = 0 42 | this._error = null 43 | } 44 | 45 | inherits(Iterator, nanoiterator) 46 | 47 | Iterator.prototype._pushPointer = function (ptr, i, cb) { 48 | var self = this 49 | var top = {path: null, node: null, i} 50 | 51 | this._pending++ 52 | this._stack.push(top) 53 | this._db._getPointer(ptr.feed, ptr.seq, false, done) 54 | 55 | function done (err, node) { 56 | if (err) self._error = err 57 | else top.node = node 58 | if (--self._pending) return 59 | if (self._error) return cb(self._error) 60 | self._next(cb) 61 | } 62 | } 63 | 64 | Iterator.prototype._pushNode = function (node, i) { 65 | this._stack.push({ 66 | path: null, 67 | node, 68 | i 69 | }) 70 | } 71 | 72 | Iterator.prototype._pushPrefix = function (path, i, val) { 73 | this._stack.push({ 74 | path: (i < path.length ? path.slice(0, i) : path).concat(val), 75 | node: null, 76 | i 77 | }) 78 | } 79 | 80 | // fast case 81 | Iterator.prototype._singleNode = function (top, cb) { 82 | var node = top.node 83 | var end = Math.min(this._end, node.trie.length) 84 | 85 | for (var i = top.i; i < end; i++) { 86 | var bucket = i < node.trie.length && node.trie[i] 87 | if (!bucket) continue 88 | 89 | var val = node.path[i] 90 | var order = this._sortOrder(i) 91 | 92 | for (var j = 0; j < order.length; j++) { 93 | var sortValue = order[j] 94 | var values = sortValue < bucket.length && bucket[sortValue] 95 | 96 | if (sortValue === val) { 97 | if (values) this._pushPrefix(node.path, i, sortValue) 98 | else this._pushNode(node, i + 1) 99 | continue 100 | } 101 | 102 | if (!values) continue 103 | if (values.length > 1) this._pushPrefix(node.path, i, sortValue) 104 | else this._pushPointer(values[0], i + 1, cb) 105 | } 106 | 107 | return this._pending === 0 108 | } 109 | 110 | if ((!this._deletes && node.deleted) || !isPrefix(node.key, this._prefix)) return true 111 | cb(null, this._prereturn([node])) 112 | return false 113 | } 114 | 115 | // slow case 116 | Iterator.prototype._multiNode = function (path, nodes, cb) { 117 | if (!nodes.length) return this._next(cb) 118 | if (nodes.length === 1) { 119 | this._pushNode(nodes[0], path.length) 120 | return this._next(cb) 121 | } 122 | 123 | var ptr = path.length 124 | 125 | if (ptr < this._end) { 126 | var order = this._sortOrder(ptr) 127 | 128 | for (var i = 0; i < order.length; i++) { 129 | var sortValue = order[i] 130 | if (!visitTrie(nodes, ptr, sortValue)) continue 131 | this._pushPrefix(path, path.length, sortValue) 132 | } 133 | } 134 | 135 | nodes = this._filterResult(nodes, ptr) 136 | if (nodes && (this._deletes || !allDeletes(nodes))) return cb(null, this._prereturn(nodes)) 137 | this._next(cb) 138 | } 139 | 140 | Iterator.prototype._filterResult = function (nodes, i) { 141 | var result = null 142 | 143 | nodes.sort(byKey, this._reverse) 144 | 145 | for (var j = 0; j < nodes.length; j++) { 146 | var node = nodes[j] 147 | if (node.path.length !== i && i !== this._end) continue 148 | if (!isPrefix(node.key, this._prefix)) continue 149 | 150 | if (!result) result = [] 151 | 152 | if (result.length && result[0].key !== node.key) { 153 | this._collisions.push(result) 154 | result = [] 155 | } 156 | 157 | result.push(node) 158 | } 159 | 160 | return result 161 | } 162 | 163 | Iterator.prototype._next = function (cb) { 164 | var nodes = drain(this._collisions) 165 | if (nodes) return cb(null, this._prereturn(nodes)) 166 | 167 | var top = null 168 | 169 | while (true) { 170 | top = this._stack.pop() 171 | if (!top) return cb(null, null) 172 | if (!top.node) break 173 | if (!this._singleNode(top, cb)) return 174 | } 175 | 176 | this._lookupPrefix(top.path, cb) 177 | } 178 | 179 | Iterator.prototype._lookupPrefix = function (path, cb) { 180 | var self = this 181 | 182 | this._db.get('', {path, prefix: true, map: false, reduce: false}, done) 183 | 184 | function done (err, nodes) { 185 | if (err) return cb(err) 186 | self._multiNode(path, nodes, cb) 187 | } 188 | } 189 | 190 | Iterator.prototype._prereturn = function (nodes) { 191 | if (this._map) nodes = nodes.map(this._map) 192 | if (this._reduce) return nodes.reduce(this._reduce) 193 | return nodes 194 | } 195 | 196 | Iterator.prototype._sortOrder = function (i) { 197 | var gt = this._gt || !this._start 198 | return gt && this._start === i ? this._order.gt : this._order.gte 199 | } 200 | 201 | function byKey (a, b, reverse) { 202 | var k = cmp(b.key, a.key) 203 | return (reverse ? -1 : 1) * (k || b.feed - a.feed) 204 | } 205 | 206 | function allDeletes (nodes) { 207 | for (var i = 0; i < nodes.length; i++) { 208 | if (!nodes[i].deleted) return false 209 | } 210 | return true 211 | } 212 | 213 | function visitTrie (nodes, ptr, val) { 214 | for (var i = 0; i < nodes.length; i++) { 215 | var node = nodes[i] 216 | var bucket = ptr < node.trie.length && node.trie[ptr] 217 | if (bucket && bucket[val]) return true 218 | if (node.path[ptr] === val) return true 219 | } 220 | return false 221 | } 222 | 223 | function drain (collisions) { 224 | while (collisions.length) { 225 | var collision = collisions.pop() 226 | if (!this._deletes && allDeletes(collision)) continue 227 | return collision 228 | } 229 | 230 | return null 231 | } 232 | 233 | function isPrefix (s, prefix) { 234 | if (!prefix) return true 235 | if (s.startsWith) return s.startsWith(prefix) 236 | return s.slice(0, prefix.length) === prefix 237 | } 238 | -------------------------------------------------------------------------------- /lib/key-history.js: -------------------------------------------------------------------------------- 1 | var nanoiterator = require('nanoiterator') 2 | var inherits = require('inherits') 3 | var get = require('./get') 4 | var normalizeKey = require('./normalize') 5 | 6 | module.exports = Iterator 7 | 8 | function Iterator (db, prefix, opts) { 9 | if (!(this instanceof Iterator)) return new Iterator(db, prefix, opts) 10 | nanoiterator.call(this) 11 | this._db = db 12 | this._prefix = normalizeKey(prefix) 13 | this._heads = undefined 14 | } 15 | 16 | inherits(Iterator, nanoiterator) 17 | 18 | Iterator.prototype._open = function (cb) { 19 | this._db.heads((err, heads) => { 20 | if (err) return cb(err) 21 | this._heads = heads 22 | cb() 23 | }) 24 | } 25 | 26 | Iterator.prototype._next = function (cb) { 27 | if (!this._heads || !this._heads.length) return cb(null, null) 28 | get(this._db, this._heads, this._prefix, 29 | { reduce: false, deletes: true }, 30 | (err, nodes) => { 31 | if (err) return cb(err) 32 | if (nodes.length === 0) return cb(null, null) 33 | this._nextHeads(nodes, (err, heads) => { 34 | if (err) return cb(err) 35 | this._heads = heads 36 | cb(null, nodes) 37 | }) 38 | }) 39 | } 40 | 41 | Iterator.prototype._nextHeads = function (nodes, cb) { 42 | var i 43 | var heads = [] 44 | var error = null 45 | var missing = 0 46 | 47 | for (i = 0; i < nodes.length; i++) { 48 | var node = nodes[i] 49 | for (var c = 0; c < node.clock.length; c++) { 50 | var seq = node.clock[c] 51 | if (c !== node.feed && seq > 2) { 52 | missing++ 53 | this._db._writers[c].get(seq - 1, onHead) 54 | } else if (c === node.feed && node.seq > 1) { 55 | missing++ 56 | this._db._writers[node.feed].get(node.seq - 1, onHead) 57 | } 58 | } 59 | } 60 | if (missing === 0) cb(null, undefined) 61 | 62 | function onHead (err, head) { 63 | if (head) heads.push(head) 64 | if (err) error = err 65 | if (--missing) return 66 | 67 | cb(error, filterHeads(heads)) 68 | } 69 | } 70 | 71 | function filterHeads (list) { 72 | var heads = [] 73 | for (var i = 0; i < list.length; i++) { 74 | if (isHead(list[i], list)) heads.push(list[i]) 75 | } 76 | return heads 77 | } 78 | 79 | function isHead (node, list) { 80 | if (!node) return false 81 | var clock = node.seq + 1 82 | for (var i = 0; i < list.length; i++) { 83 | var other = list[i] 84 | if (other === node || !other) { 85 | continue 86 | } 87 | if ((other.clock[node.feed] || 0) >= clock) return false 88 | } 89 | return true 90 | } 91 | -------------------------------------------------------------------------------- /lib/messages.js: -------------------------------------------------------------------------------- 1 | // This file is auto generated by the protocol-buffers cli tool 2 | 3 | /* eslint-disable quotes */ 4 | /* eslint-disable indent */ 5 | /* eslint-disable no-redeclare */ 6 | /* eslint-disable camelcase */ 7 | 8 | // Remember to `npm install --save protocol-buffers-encodings` 9 | var encodings = require('protocol-buffers-encodings') 10 | var varint = encodings.varint 11 | var skip = encodings.skip 12 | 13 | var Entry = exports.Entry = { 14 | buffer: true, 15 | encodingLength: null, 16 | encode: null, 17 | decode: null 18 | } 19 | 20 | var InflatedEntry = exports.InflatedEntry = { 21 | buffer: true, 22 | encodingLength: null, 23 | encode: null, 24 | decode: null 25 | } 26 | 27 | var Header = exports.Header = { 28 | buffer: true, 29 | encodingLength: null, 30 | encode: null, 31 | decode: null 32 | } 33 | 34 | defineEntry() 35 | defineInflatedEntry() 36 | defineHeader() 37 | 38 | function defineEntry () { 39 | var enc = [ 40 | encodings.string, 41 | encodings.bytes, 42 | encodings.bool, 43 | encodings.varint 44 | ] 45 | 46 | Entry.encodingLength = encodingLength 47 | Entry.encode = encode 48 | Entry.decode = decode 49 | 50 | function encodingLength (obj) { 51 | var length = 0 52 | if (!defined(obj.key)) throw new Error("key is required") 53 | var len = enc[0].encodingLength(obj.key) 54 | length += 1 + len 55 | if (defined(obj.value)) { 56 | var len = enc[1].encodingLength(obj.value) 57 | length += 1 + len 58 | } 59 | if (defined(obj.deleted)) { 60 | var len = enc[2].encodingLength(obj.deleted) 61 | length += 1 + len 62 | } 63 | if (!defined(obj.trie)) throw new Error("trie is required") 64 | var len = enc[1].encodingLength(obj.trie) 65 | length += 1 + len 66 | if (defined(obj.clock)) { 67 | for (var i = 0; i < obj.clock.length; i++) { 68 | if (!defined(obj.clock[i])) continue 69 | var len = enc[3].encodingLength(obj.clock[i]) 70 | length += 1 + len 71 | } 72 | } 73 | if (defined(obj.inflate)) { 74 | var len = enc[3].encodingLength(obj.inflate) 75 | length += 1 + len 76 | } 77 | return length 78 | } 79 | 80 | function encode (obj, buf, offset) { 81 | if (!offset) offset = 0 82 | if (!buf) buf = Buffer.allocUnsafe(encodingLength(obj)) 83 | var oldOffset = offset 84 | if (!defined(obj.key)) throw new Error("key is required") 85 | buf[offset++] = 10 86 | enc[0].encode(obj.key, buf, offset) 87 | offset += enc[0].encode.bytes 88 | if (defined(obj.value)) { 89 | buf[offset++] = 18 90 | enc[1].encode(obj.value, buf, offset) 91 | offset += enc[1].encode.bytes 92 | } 93 | if (defined(obj.deleted)) { 94 | buf[offset++] = 24 95 | enc[2].encode(obj.deleted, buf, offset) 96 | offset += enc[2].encode.bytes 97 | } 98 | if (!defined(obj.trie)) throw new Error("trie is required") 99 | buf[offset++] = 34 100 | enc[1].encode(obj.trie, buf, offset) 101 | offset += enc[1].encode.bytes 102 | if (defined(obj.clock)) { 103 | for (var i = 0; i < obj.clock.length; i++) { 104 | if (!defined(obj.clock[i])) continue 105 | buf[offset++] = 40 106 | enc[3].encode(obj.clock[i], buf, offset) 107 | offset += enc[3].encode.bytes 108 | } 109 | } 110 | if (defined(obj.inflate)) { 111 | buf[offset++] = 48 112 | enc[3].encode(obj.inflate, buf, offset) 113 | offset += enc[3].encode.bytes 114 | } 115 | encode.bytes = offset - oldOffset 116 | return buf 117 | } 118 | 119 | function decode (buf, offset, end) { 120 | if (!offset) offset = 0 121 | if (!end) end = buf.length 122 | if (!(end <= buf.length && offset <= buf.length)) throw new Error("Decoded message is not valid") 123 | var oldOffset = offset 124 | var obj = { 125 | key: "", 126 | value: null, 127 | deleted: false, 128 | trie: null, 129 | clock: [], 130 | inflate: 0 131 | } 132 | var found0 = false 133 | var found3 = false 134 | while (true) { 135 | if (end <= offset) { 136 | if (!found0 || !found3) throw new Error("Decoded message is not valid") 137 | decode.bytes = offset - oldOffset 138 | return obj 139 | } 140 | var prefix = varint.decode(buf, offset) 141 | offset += varint.decode.bytes 142 | var tag = prefix >> 3 143 | switch (tag) { 144 | case 1: 145 | obj.key = enc[0].decode(buf, offset) 146 | offset += enc[0].decode.bytes 147 | found0 = true 148 | break 149 | case 2: 150 | obj.value = enc[1].decode(buf, offset) 151 | offset += enc[1].decode.bytes 152 | break 153 | case 3: 154 | obj.deleted = enc[2].decode(buf, offset) 155 | offset += enc[2].decode.bytes 156 | break 157 | case 4: 158 | obj.trie = enc[1].decode(buf, offset) 159 | offset += enc[1].decode.bytes 160 | found3 = true 161 | break 162 | case 5: 163 | obj.clock.push(enc[3].decode(buf, offset)) 164 | offset += enc[3].decode.bytes 165 | break 166 | case 6: 167 | obj.inflate = enc[3].decode(buf, offset) 168 | offset += enc[3].decode.bytes 169 | break 170 | default: 171 | offset = skip(prefix & 7, buf, offset) 172 | } 173 | } 174 | } 175 | } 176 | 177 | function defineInflatedEntry () { 178 | var Feed = InflatedEntry.Feed = { 179 | buffer: true, 180 | encodingLength: null, 181 | encode: null, 182 | decode: null 183 | } 184 | 185 | defineFeed() 186 | 187 | function defineFeed () { 188 | var enc = [ 189 | encodings.bytes 190 | ] 191 | 192 | Feed.encodingLength = encodingLength 193 | Feed.encode = encode 194 | Feed.decode = decode 195 | 196 | function encodingLength (obj) { 197 | var length = 0 198 | if (!defined(obj.key)) throw new Error("key is required") 199 | var len = enc[0].encodingLength(obj.key) 200 | length += 1 + len 201 | return length 202 | } 203 | 204 | function encode (obj, buf, offset) { 205 | if (!offset) offset = 0 206 | if (!buf) buf = Buffer.allocUnsafe(encodingLength(obj)) 207 | var oldOffset = offset 208 | if (!defined(obj.key)) throw new Error("key is required") 209 | buf[offset++] = 10 210 | enc[0].encode(obj.key, buf, offset) 211 | offset += enc[0].encode.bytes 212 | encode.bytes = offset - oldOffset 213 | return buf 214 | } 215 | 216 | function decode (buf, offset, end) { 217 | if (!offset) offset = 0 218 | if (!end) end = buf.length 219 | if (!(end <= buf.length && offset <= buf.length)) throw new Error("Decoded message is not valid") 220 | var oldOffset = offset 221 | var obj = { 222 | key: null 223 | } 224 | var found0 = false 225 | while (true) { 226 | if (end <= offset) { 227 | if (!found0) throw new Error("Decoded message is not valid") 228 | decode.bytes = offset - oldOffset 229 | return obj 230 | } 231 | var prefix = varint.decode(buf, offset) 232 | offset += varint.decode.bytes 233 | var tag = prefix >> 3 234 | switch (tag) { 235 | case 1: 236 | obj.key = enc[0].decode(buf, offset) 237 | offset += enc[0].decode.bytes 238 | found0 = true 239 | break 240 | default: 241 | offset = skip(prefix & 7, buf, offset) 242 | } 243 | } 244 | } 245 | } 246 | 247 | var enc = [ 248 | encodings.string, 249 | encodings.bytes, 250 | encodings.bool, 251 | encodings.varint, 252 | Feed 253 | ] 254 | 255 | InflatedEntry.encodingLength = encodingLength 256 | InflatedEntry.encode = encode 257 | InflatedEntry.decode = decode 258 | 259 | function encodingLength (obj) { 260 | var length = 0 261 | if (!defined(obj.key)) throw new Error("key is required") 262 | var len = enc[0].encodingLength(obj.key) 263 | length += 1 + len 264 | if (defined(obj.value)) { 265 | var len = enc[1].encodingLength(obj.value) 266 | length += 1 + len 267 | } 268 | if (defined(obj.deleted)) { 269 | var len = enc[2].encodingLength(obj.deleted) 270 | length += 1 + len 271 | } 272 | if (!defined(obj.trie)) throw new Error("trie is required") 273 | var len = enc[1].encodingLength(obj.trie) 274 | length += 1 + len 275 | if (defined(obj.clock)) { 276 | for (var i = 0; i < obj.clock.length; i++) { 277 | if (!defined(obj.clock[i])) continue 278 | var len = enc[3].encodingLength(obj.clock[i]) 279 | length += 1 + len 280 | } 281 | } 282 | if (defined(obj.inflate)) { 283 | var len = enc[3].encodingLength(obj.inflate) 284 | length += 1 + len 285 | } 286 | if (defined(obj.feeds)) { 287 | for (var i = 0; i < obj.feeds.length; i++) { 288 | if (!defined(obj.feeds[i])) continue 289 | var len = enc[4].encodingLength(obj.feeds[i]) 290 | length += varint.encodingLength(len) 291 | length += 1 + len 292 | } 293 | } 294 | if (defined(obj.contentFeed)) { 295 | var len = enc[1].encodingLength(obj.contentFeed) 296 | length += 1 + len 297 | } 298 | return length 299 | } 300 | 301 | function encode (obj, buf, offset) { 302 | if (!offset) offset = 0 303 | if (!buf) buf = Buffer.allocUnsafe(encodingLength(obj)) 304 | var oldOffset = offset 305 | if (!defined(obj.key)) throw new Error("key is required") 306 | buf[offset++] = 10 307 | enc[0].encode(obj.key, buf, offset) 308 | offset += enc[0].encode.bytes 309 | if (defined(obj.value)) { 310 | buf[offset++] = 18 311 | enc[1].encode(obj.value, buf, offset) 312 | offset += enc[1].encode.bytes 313 | } 314 | if (defined(obj.deleted)) { 315 | buf[offset++] = 24 316 | enc[2].encode(obj.deleted, buf, offset) 317 | offset += enc[2].encode.bytes 318 | } 319 | if (!defined(obj.trie)) throw new Error("trie is required") 320 | buf[offset++] = 34 321 | enc[1].encode(obj.trie, buf, offset) 322 | offset += enc[1].encode.bytes 323 | if (defined(obj.clock)) { 324 | for (var i = 0; i < obj.clock.length; i++) { 325 | if (!defined(obj.clock[i])) continue 326 | buf[offset++] = 40 327 | enc[3].encode(obj.clock[i], buf, offset) 328 | offset += enc[3].encode.bytes 329 | } 330 | } 331 | if (defined(obj.inflate)) { 332 | buf[offset++] = 48 333 | enc[3].encode(obj.inflate, buf, offset) 334 | offset += enc[3].encode.bytes 335 | } 336 | if (defined(obj.feeds)) { 337 | for (var i = 0; i < obj.feeds.length; i++) { 338 | if (!defined(obj.feeds[i])) continue 339 | buf[offset++] = 58 340 | varint.encode(enc[4].encodingLength(obj.feeds[i]), buf, offset) 341 | offset += varint.encode.bytes 342 | enc[4].encode(obj.feeds[i], buf, offset) 343 | offset += enc[4].encode.bytes 344 | } 345 | } 346 | if (defined(obj.contentFeed)) { 347 | buf[offset++] = 66 348 | enc[1].encode(obj.contentFeed, buf, offset) 349 | offset += enc[1].encode.bytes 350 | } 351 | encode.bytes = offset - oldOffset 352 | return buf 353 | } 354 | 355 | function decode (buf, offset, end) { 356 | if (!offset) offset = 0 357 | if (!end) end = buf.length 358 | if (!(end <= buf.length && offset <= buf.length)) throw new Error("Decoded message is not valid") 359 | var oldOffset = offset 360 | var obj = { 361 | key: "", 362 | value: null, 363 | deleted: false, 364 | trie: null, 365 | clock: [], 366 | inflate: 0, 367 | feeds: [], 368 | contentFeed: null 369 | } 370 | var found0 = false 371 | var found3 = false 372 | while (true) { 373 | if (end <= offset) { 374 | if (!found0 || !found3) throw new Error("Decoded message is not valid") 375 | decode.bytes = offset - oldOffset 376 | return obj 377 | } 378 | var prefix = varint.decode(buf, offset) 379 | offset += varint.decode.bytes 380 | var tag = prefix >> 3 381 | switch (tag) { 382 | case 1: 383 | obj.key = enc[0].decode(buf, offset) 384 | offset += enc[0].decode.bytes 385 | found0 = true 386 | break 387 | case 2: 388 | obj.value = enc[1].decode(buf, offset) 389 | offset += enc[1].decode.bytes 390 | break 391 | case 3: 392 | obj.deleted = enc[2].decode(buf, offset) 393 | offset += enc[2].decode.bytes 394 | break 395 | case 4: 396 | obj.trie = enc[1].decode(buf, offset) 397 | offset += enc[1].decode.bytes 398 | found3 = true 399 | break 400 | case 5: 401 | obj.clock.push(enc[3].decode(buf, offset)) 402 | offset += enc[3].decode.bytes 403 | break 404 | case 6: 405 | obj.inflate = enc[3].decode(buf, offset) 406 | offset += enc[3].decode.bytes 407 | break 408 | case 7: 409 | var len = varint.decode(buf, offset) 410 | offset += varint.decode.bytes 411 | obj.feeds.push(enc[4].decode(buf, offset, offset + len)) 412 | offset += enc[4].decode.bytes 413 | break 414 | case 8: 415 | obj.contentFeed = enc[1].decode(buf, offset) 416 | offset += enc[1].decode.bytes 417 | break 418 | default: 419 | offset = skip(prefix & 7, buf, offset) 420 | } 421 | } 422 | } 423 | } 424 | 425 | function defineHeader () { 426 | var enc = [ 427 | encodings.string 428 | ] 429 | 430 | Header.encodingLength = encodingLength 431 | Header.encode = encode 432 | Header.decode = decode 433 | 434 | function encodingLength (obj) { 435 | var length = 0 436 | if (!defined(obj.protocol)) throw new Error("protocol is required") 437 | var len = enc[0].encodingLength(obj.protocol) 438 | length += 1 + len 439 | return length 440 | } 441 | 442 | function encode (obj, buf, offset) { 443 | if (!offset) offset = 0 444 | if (!buf) buf = Buffer.allocUnsafe(encodingLength(obj)) 445 | var oldOffset = offset 446 | if (!defined(obj.protocol)) throw new Error("protocol is required") 447 | buf[offset++] = 10 448 | enc[0].encode(obj.protocol, buf, offset) 449 | offset += enc[0].encode.bytes 450 | encode.bytes = offset - oldOffset 451 | return buf 452 | } 453 | 454 | function decode (buf, offset, end) { 455 | if (!offset) offset = 0 456 | if (!end) end = buf.length 457 | if (!(end <= buf.length && offset <= buf.length)) throw new Error("Decoded message is not valid") 458 | var oldOffset = offset 459 | var obj = { 460 | protocol: "" 461 | } 462 | var found0 = false 463 | while (true) { 464 | if (end <= offset) { 465 | if (!found0) throw new Error("Decoded message is not valid") 466 | decode.bytes = offset - oldOffset 467 | return obj 468 | } 469 | var prefix = varint.decode(buf, offset) 470 | offset += varint.decode.bytes 471 | var tag = prefix >> 3 472 | switch (tag) { 473 | case 1: 474 | obj.protocol = enc[0].decode(buf, offset) 475 | offset += enc[0].decode.bytes 476 | found0 = true 477 | break 478 | default: 479 | offset = skip(prefix & 7, buf, offset) 480 | } 481 | } 482 | } 483 | } 484 | 485 | function defined (val) { 486 | return val !== null && val !== undefined && (typeof val !== 'number' || !isNaN(val)) 487 | } 488 | -------------------------------------------------------------------------------- /lib/normalize.js: -------------------------------------------------------------------------------- 1 | module.exports = function normalizeKey (key) { 2 | if (!key.length) return '' 3 | return key[0] === '/' ? key.slice(1) : key 4 | } 5 | -------------------------------------------------------------------------------- /lib/options.js: -------------------------------------------------------------------------------- 1 | exports.map = function (opts, db) { 2 | if (!opts) return db._map 3 | var map = opts.map 4 | return map === undefined ? db._map : map 5 | } 6 | 7 | exports.reduce = function (opts, db) { 8 | if (!opts) return db._reduce 9 | var reduce = opts.reduce 10 | return reduce === undefined ? db._reduce : reduce 11 | } 12 | -------------------------------------------------------------------------------- /lib/put.js: -------------------------------------------------------------------------------- 1 | var hash = require('./hash') 2 | 3 | module.exports = put 4 | 5 | function put (db, clock, heads, key, value, opts, cb) { 6 | if (typeof opts === 'function') return put(db, clock, heads, key, value, null, opts) 7 | var req = new PutRequest(db, key, value, clock, opts) 8 | req.start(heads, cb) 9 | } 10 | 11 | function PutRequest (db, key, value, clock, opts) { 12 | this.key = key 13 | this.value = value 14 | this.delete = !!(opts && opts.delete) 15 | this.ifNotExists = !!(opts && opts.ifNotExists) 16 | 17 | this._clock = clock 18 | this._active = 0 19 | this._error = null 20 | this._callback = noop 21 | this._db = db 22 | this._path = hash(key, true) 23 | this._trie = [] 24 | } 25 | 26 | PutRequest.prototype.start = function (heads, cb) { 27 | if (cb) this._callback = cb 28 | if (!heads.length) return this._finalize() 29 | this._update(heads, 0) 30 | } 31 | 32 | PutRequest.prototype._finalize = function () { 33 | var cb = this._callback 34 | var err = this._error 35 | 36 | this._error = this._callback = null 37 | 38 | if (err) return cb(err) 39 | 40 | // TODO: would be a cleaner api if we didn't require the clock to be passed in 41 | // but instead inferred it from the heads. Investigate... 42 | 43 | var node = { 44 | key: this.key, 45 | value: this.value, 46 | trie: this._trie, 47 | clock: this._clock 48 | } 49 | 50 | if (this.delete) node.deleted = true 51 | 52 | this._db._localWriter.append(node, function (err) { 53 | if (err) return cb(err) 54 | cb(null, node) 55 | }) 56 | } 57 | 58 | PutRequest.prototype._update = function (heads, offset) { 59 | this._active += heads.length 60 | for (var i = 0; i < heads.length; i++) { 61 | var worker = new Worker(heads[i], offset) 62 | this._moveCloser(worker) 63 | } 64 | } 65 | 66 | PutRequest.prototype._updateHead = function (worker, feed, seq) { 67 | var self = this 68 | 69 | worker.pending++ 70 | this._db._getPointer(feed, seq, true, function (err, node) { 71 | if (!err) worker.head = node 72 | self._workerDone(worker, err) 73 | }) 74 | } 75 | 76 | PutRequest.prototype._workerDone = function (worker, err) { 77 | if (err) worker.error = err 78 | if (--worker.pending) return 79 | 80 | if (worker.error || worker.ended) { 81 | this._end(worker, worker.error) 82 | } else { 83 | worker.i++ 84 | this._moveCloser(worker) 85 | } 86 | } 87 | 88 | PutRequest.prototype._fork = function (worker, ptrs) { 89 | var self = this 90 | 91 | worker.pending++ 92 | this._db._getAllPointers(ptrs, true, function (err, nodes) { 93 | if (err) return self._workerDone(worker, err) 94 | self._update(nodes, worker.i + 1) 95 | self._workerDone(worker, null) 96 | }) 97 | } 98 | 99 | PutRequest.prototype._checkCollision = function (worker, i, feed, seq) { 100 | var self = this 101 | 102 | worker.pending++ 103 | this._db._getPointer(feed, seq, true, function (err, node) { 104 | if (err) return self._workerDone(worker, err) 105 | if (node.key !== self.key) self._push(worker, i, feed, seq) 106 | self._workerDone(worker, null) 107 | }) 108 | } 109 | 110 | PutRequest.prototype._copyTrie = function (worker, bucket, val) { 111 | for (var i = 0; i < bucket.length; i++) { 112 | // check if we are the closest node, if so skip this 113 | // except if we are terminating the val. if so we 114 | // need to check for collions before making the decision 115 | if (i === val && val !== 4) continue 116 | 117 | var ptrs = bucket[i] || [] 118 | for (var k = 0; k < ptrs.length; k++) { 119 | var ptr = ptrs[k] 120 | // if termination value, push if get(ptr).key !== key 121 | if (val === 4) this._checkCollision(worker, i, ptr.feed, ptr.seq) 122 | else this._push(worker, i, ptr.feed, ptr.seq) 123 | } 124 | } 125 | } 126 | 127 | PutRequest.prototype._splitTrie = function (worker, bucket, val) { 128 | var head = worker.head 129 | var headVal = head.path[worker.i] 130 | 131 | // check if we need to split the trie at all 132 | // i.e. is head still closest and is head not a conflict 133 | if (headVal === val && (headVal < 4 || head.key === this.key)) return 134 | 135 | // push head to the trie 136 | this._push(worker, headVal, head.feed, head.seq) 137 | 138 | var ptrs = bucket[val] 139 | 140 | if (!ptrs || !ptrs.length) { 141 | worker.ended = true 142 | return 143 | } 144 | 145 | this._updateHead(worker, ptrs[0].feed, ptrs[0].seq) 146 | if (ptrs.length > 1) this._fork(worker, ptrs.slice(1)) 147 | } 148 | 149 | PutRequest.prototype._moveCloser = function (worker) { 150 | var path = this._path 151 | var head = worker.head 152 | 153 | for (; worker.i < path.length; worker.i++) { 154 | var i = worker.i 155 | var val = path[i] 156 | var bucket = head.trie[i] || [] 157 | 158 | this._copyTrie(worker, bucket, val) 159 | this._splitTrie(worker, bucket, val) 160 | 161 | if (worker.pending) return 162 | if (worker.ended) break 163 | } 164 | 165 | this._end(worker, worker.error) 166 | } 167 | 168 | PutRequest.prototype._end = function (worker, err) { 169 | if (err) this._error = err 170 | if (!--this._active) { 171 | if (this.ifNotExists && worker.head.key === this.key) { 172 | if (this._callback) return this._callback(err, worker.head) 173 | return 174 | } 175 | this._finalize() 176 | } 177 | } 178 | 179 | PutRequest.prototype._push = function (worker, val, feed, seq) { 180 | var i = worker.i 181 | var bucket = this._trie[i] 182 | if (!bucket) bucket = this._trie[i] = [] 183 | var values = bucket[val] 184 | if (!values) bucket[val] = values = [] 185 | 186 | for (var j = 0; j < values.length; j++) { 187 | var ref = values[j] 188 | if (ref.feed === feed && ref.seq === seq) return 189 | } 190 | 191 | values.push({feed, seq}) 192 | } 193 | 194 | function Worker (head, i) { 195 | this.i = i 196 | this.head = head 197 | this.lock = head 198 | this.pending = 0 199 | this.error = null 200 | this.ended = false 201 | } 202 | 203 | function noop () {} 204 | -------------------------------------------------------------------------------- /lib/trie-encoding.js: -------------------------------------------------------------------------------- 1 | var varint = require('varint') 2 | 3 | var buf = Buffer.allocUnsafe(512 * 1024) 4 | var offset = 0 5 | 6 | exports.encode = encode 7 | exports.decode = decode 8 | 9 | // encoding: i+bitfield+vals+... 10 | // val = (feed << 1)+more?,seq 11 | 12 | function encode (trie, map) { 13 | if (buf.length - offset < 65536) { 14 | offset = 0 15 | buf = Buffer.allocUnsafe(buf.length) 16 | } 17 | 18 | var oldOffset = offset 19 | for (var i = 0; i < trie.length; i++) { 20 | if (!trie[i]) continue 21 | varint.encode(i, buf, offset) 22 | offset += varint.encode.bytes 23 | offset = encodeBucket(trie[i], map, buf, offset) 24 | } 25 | 26 | return buf.slice(oldOffset, offset) 27 | } 28 | 29 | function encodeBucket (bucket, map, buf, offset) { 30 | var i 31 | var bits = 0 32 | var bit = 1 33 | 34 | for (i = 0; i < bucket.length; i++) { 35 | if (bucket[i] && bucket[i].length) bits |= bit 36 | bit *= 2 37 | } 38 | 39 | varint.encode(bits, buf, offset) 40 | offset += varint.encode.bytes 41 | 42 | for (i = 0; i < bucket.length; i++) { 43 | var vals = bucket[i] 44 | if (!vals) continue 45 | 46 | for (var j = 0; j < vals.length; j++) { 47 | offset = encodeValue(vals[j], j < vals.length - 1, map, buf, offset) 48 | } 49 | } 50 | 51 | return offset 52 | } 53 | 54 | function encodeValue (ptr, more, map, buf, offset) { 55 | varint.encode(map[ptr.feed] * 2 + (more ? 1 : 0), buf, offset) 56 | offset += varint.encode.bytes 57 | varint.encode(ptr.seq, buf, offset) 58 | offset += varint.encode.bytes 59 | return offset 60 | } 61 | 62 | function decode (buf, map) { 63 | var trie = [] 64 | var offset = 0 65 | 66 | while (offset < buf.length) { 67 | var i = varint.decode(buf, offset) 68 | offset += varint.decode.bytes 69 | trie[i] = [] 70 | offset = decodeBucket(buf, offset, trie[i], map) 71 | } 72 | 73 | return trie 74 | } 75 | 76 | function decodeBucket (buf, offset, bucket, map) { 77 | var i = 0 78 | var bits = varint.decode(buf, offset) 79 | offset += varint.decode.bytes 80 | 81 | while (bits) { 82 | if (bits & 1) { 83 | bucket[i] = [] 84 | offset = decodeValues(buf, offset, bucket[i], map) 85 | bits = (bits - 1) / 2 86 | } else { 87 | bits /= 2 88 | } 89 | i++ 90 | } 91 | 92 | return offset 93 | } 94 | 95 | function decodeValues (buf, offset, values, map) { 96 | var more = 1 97 | while (more) { 98 | var feed = varint.decode(buf, offset) 99 | offset += varint.decode.bytes 100 | var seq = varint.decode(buf, offset) 101 | offset += varint.decode.bytes 102 | more = feed & 1 103 | feed = (feed - more) / 2 104 | if (feed < map.length) feed = map[feed] 105 | values.push({feed, seq}) 106 | } 107 | return offset 108 | } 109 | -------------------------------------------------------------------------------- /lib/watch.js: -------------------------------------------------------------------------------- 1 | var unordered = require('unordered-set') 2 | var util = require('util') 3 | var events = require('events') 4 | 5 | module.exports = watch 6 | 7 | function watch (db, key, cb) { 8 | var w = new Watcher(db, key) 9 | w._index = db._watching.push(w) - 1 10 | w.start(cb) 11 | return w 12 | } 13 | 14 | function Watcher (db, key) { 15 | events.EventEmitter.call(this) 16 | 17 | this.key = key 18 | 19 | this._index = 0 20 | this._db = db 21 | this._kicked = 0 22 | this._nodes = null 23 | this._destroyed = false 24 | this._onkick = onkick.bind(this) 25 | } 26 | 27 | util.inherits(Watcher, events.EventEmitter) 28 | 29 | Watcher.prototype.destroy = function (err) { 30 | if (this._destroyed) return 31 | this._destroyed = true 32 | 33 | unordered.remove(this._db._watching, this) 34 | if (err) this.emit('error', err) 35 | this.emit('close') 36 | } 37 | 38 | Watcher.prototype.start = function (onchange) { 39 | if (onchange) this.on('change', onchange) 40 | this._kick() 41 | } 42 | 43 | Watcher.prototype._kick = function () { 44 | this._kicked++ 45 | this._db.get(this.key, {update: false, prefix: true, map: false, reduce: false}, this._onkick) 46 | } 47 | 48 | function same (a, b) { 49 | if (a.length !== b.length) return false 50 | 51 | for (var i = 0; i < a.length; i++) { 52 | if (a[i].feed !== b[i].feed || a[i].seq !== b[i].seq) return false 53 | } 54 | 55 | return true 56 | } 57 | 58 | function sortByFeed (a, b) { 59 | return a.feed - b.feed 60 | } 61 | 62 | function onkick (err, nodes) { 63 | if (err) return this.destroy(err) 64 | 65 | var kicked = this._kicked 66 | 67 | this._kicked = 0 68 | nodes = nodes.sort(sortByFeed) 69 | 70 | if (!this._nodes) { 71 | this._nodes = nodes 72 | this.emit('watching') 73 | } 74 | 75 | if (!same(nodes, this._nodes)) { 76 | this._nodes = nodes 77 | this.emit('change') 78 | return 79 | } 80 | 81 | // there is a chance the db has been updated while we 82 | // ran the query - retry 83 | if (kicked > 1) this._kick() 84 | } 85 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hyperdb", 3 | "version": "3.5.0", 4 | "description": "Distributed scalable database", 5 | "main": "index.js", 6 | "dependencies": { 7 | "array-lru": "^1.1.1", 8 | "bulk-write-stream": "^1.1.3", 9 | "codecs": "^1.2.1", 10 | "compare": "^2.0.0", 11 | "hypercore": "^6.13.0", 12 | "hypercore-protocol": "^6.6.4", 13 | "inherits": "^2.0.3", 14 | "mutexify": "^1.2.0", 15 | "nanoiterator": "^1.1.0", 16 | "protocol-buffers-encodings": "^1.1.0", 17 | "random-access-file": "^2.0.1", 18 | "sodium-universal": "^2.0.0", 19 | "thunky": "^1.0.2", 20 | "unordered-array-remove": "^1.0.2", 21 | "unordered-set": "^2.0.0", 22 | "varint": "^5.0.0" 23 | }, 24 | "devDependencies": { 25 | "prettier": "^1.12.0", 26 | "protocol-buffers": "^4.0.4", 27 | "random-access-latency": "^1.0.0", 28 | "random-access-memory": "^3.0.0", 29 | "seed-random": "^2.2.0", 30 | "standard": "^11.0.0", 31 | "stream-collector": "^1.0.1", 32 | "tape": "^4.9.0" 33 | }, 34 | "scripts": { 35 | "test": "standard && tape test/*.js", 36 | "protobuf": "protocol-buffers schema.proto -o lib/messages.js" 37 | }, 38 | "repository": { 39 | "type": "git", 40 | "url": "https://github.com/mafintosh/hyperdb.git" 41 | }, 42 | "author": "Mathias Buus (@mafintosh)", 43 | "license": "MIT", 44 | "bugs": { 45 | "url": "https://github.com/mafintosh/hyperdb/issues" 46 | }, 47 | "homepage": "https://github.com/mafintosh/hyperdb#readme", 48 | "standard": { 49 | "ignore": [ 50 | "test/helpers/fuzzing.js" 51 | ] 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /schema.proto: -------------------------------------------------------------------------------- 1 | message Entry { 2 | required string key = 1; 3 | optional bytes value = 2; 4 | optional bool deleted = 3; 5 | required bytes trie = 4; 6 | repeated uint64 clock = 5; 7 | optional uint64 inflate = 6; 8 | } 9 | 10 | message InflatedEntry { 11 | message Feed { 12 | required bytes key = 1; 13 | } 14 | 15 | required string key = 1; 16 | optional bytes value = 2; 17 | optional bool deleted = 3; 18 | required bytes trie = 4; 19 | repeated uint64 clock = 5; 20 | optional uint64 inflate = 6; 21 | repeated Feed feeds = 7; 22 | optional bytes contentFeed = 8; 23 | } 24 | 25 | message Header { 26 | required string protocol = 1; 27 | } 28 | -------------------------------------------------------------------------------- /test/auth.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var replicate = require('./helpers/replicate') 4 | var run = require('./helpers/run') 5 | 6 | tape('authorized writer passes "authorized" api', function (t) { 7 | create.two(function (a, b) { 8 | a.put('foo', 'bar', function (err) { 9 | t.error(err) 10 | a.authorized(a.local.key, function (err, auth) { 11 | t.error(err) 12 | t.equals(auth, true) 13 | b.authorized(b.local.key, function (err, auth) { 14 | t.error(err) 15 | t.equals(auth, true) 16 | t.end() 17 | }) 18 | }) 19 | }) 20 | }) 21 | }) 22 | 23 | tape('authorized writer passes "authorized" api', function (t) { 24 | create.two(function (a, b) { 25 | b.put('foo', 'bar', function (err) { 26 | t.error(err) 27 | a.authorized(a.local.key, function (err, auth) { 28 | t.error(err) 29 | t.equals(auth, true) 30 | b.authorized(b.local.key, function (err, auth) { 31 | t.error(err) 32 | t.equals(auth, true) 33 | t.end() 34 | }) 35 | }) 36 | }) 37 | }) 38 | }) 39 | 40 | tape('unauthorized writer fails "authorized" api', function (t) { 41 | var a = create.one() 42 | a.ready(function () { 43 | var b = create.one(a.key) 44 | b.ready(function () { 45 | b.authorized(b.local.key, function (err, auth) { 46 | t.error(err) 47 | t.equals(auth, false) 48 | t.end() 49 | }) 50 | }) 51 | }) 52 | }) 53 | 54 | tape('local unauthorized writes =/> authorized', function (t) { 55 | var a = create.one() 56 | a.ready(function () { 57 | var b = create.one(a.key) 58 | b.ready(function () { 59 | b.put('/foo', 'bar', function (err) { 60 | t.error(err) 61 | b.authorized(b.local.key, function (err, auth) { 62 | t.error(err) 63 | t.equals(auth, false) 64 | b.authorized(a.local.key, function (err, auth) { 65 | t.error(err) 66 | t.equals(auth, true) 67 | t.end() 68 | }) 69 | }) 70 | }) 71 | }) 72 | }) 73 | }) 74 | 75 | tape('unauthorized writer doing a put after replication', function (t) { 76 | t.plan(1) 77 | var a = create.one() 78 | a.ready(function () { 79 | var b = create.one(a.key) 80 | b.ready(function () { 81 | replicate(a, b, function () { 82 | b.put('foo', 'bar', function (err) { 83 | t.error(err) 84 | }) 85 | }) 86 | }) 87 | }) 88 | }) 89 | 90 | tape('unauthorized writer fails "authorized" after some writes', function (t) { 91 | var a = create.one() 92 | a.ready(function () { 93 | run( 94 | cb => a.put('foo', 'bar', cb), 95 | cb => a.put('foo', 'bar2', cb), 96 | cb => a.put('foo', 'bar3', cb), 97 | cb => a.put('foo', 'bar4', cb), 98 | done 99 | ) 100 | 101 | function done (err) { 102 | t.error(err) 103 | var b = create.one(a.key) 104 | b.ready(function () { 105 | replicate(a, b, function () { 106 | b.authorized(b.local.key, function (err, auth) { 107 | t.error(err) 108 | t.equals(auth, false) 109 | t.end() 110 | }) 111 | }) 112 | }) 113 | } 114 | }) 115 | }) 116 | 117 | tape('authorized is consistent', function (t) { 118 | t.plan(5) 119 | 120 | var a = create.one(null, {contentFeed: true}) 121 | a.ready(function () { 122 | var b = create.one(a.key, {contentFeed: true, latency: 10}) 123 | 124 | run( 125 | cb => b.put('bar', 'foo', cb), 126 | cb => a.put('foo', 'bar', cb), 127 | auth, 128 | replicate.bind(null, a, b), 129 | done 130 | ) 131 | 132 | function done (err) { 133 | t.error(err, 'no error') 134 | a.authorized(b.local.key, function (err, auth) { 135 | t.error(err, 'no error') 136 | t.ok(auth) 137 | }) 138 | b.authorized(b.local.key, function (err, auth) { 139 | t.error(err, 'no error') 140 | t.ok(auth) 141 | }) 142 | } 143 | 144 | function auth (cb) { 145 | a.authorize(b.local.key, cb) 146 | } 147 | }) 148 | }) 149 | -------------------------------------------------------------------------------- /test/autogenerated.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var run = require('./helpers/run') 3 | var put = require('./helpers/put') 4 | var create = require('./helpers/create') 5 | var validate = require('./helpers/fuzzing').validate 6 | 7 | tape('autogenerated failing fuzz test #1', function (t) { 8 | var writesPerReplication = [ 9 | [ 10 | ['fefhe', [null, 'hgbbhiegadhdeegdebfa']], 11 | ['gjegc/gjegc', ['jegahfeddccagaaghgce']], 12 | ['gjegc/gidjb', ['jjbchagfijbhiiijcaig']], 13 | ['gjegc/feffb', ['eiebhdddigbhheaacjdj']], 14 | ['gjegc/feigc', [null, 'ajfgdihjedbcdhjgcaed']], 15 | ['gjegc/adifa', [null, 'fghhbhchhdbidbbbfabh']] 16 | ], 17 | [ 18 | ['gjegc/caghc', ['gfjcgaebibbccjceggej']], 19 | ['gjegc/ghbdb', [null, 'ggbddcahebaejbegaceh']], 20 | ['gjegc/aaahi/aaahi', ['bibggfheidahefhhgdci']] 21 | ] 22 | ].map(b => new Map(b)) 23 | 24 | create.many(2, function (err, dbs, replicateByIndex) { 25 | t.error(err) 26 | run( 27 | cb => { 28 | put( 29 | dbs[1], 30 | [ 31 | { 32 | key: 'fefhe', 33 | value: 'hgbbhiegadhdeegdebfa' 34 | } 35 | ], 36 | cb 37 | ) 38 | }, 39 | cb => { 40 | put( 41 | dbs[0], 42 | [ 43 | { 44 | key: 'gjegc/gjegc', 45 | value: 'jegahfeddccagaaghgce' 46 | } 47 | ], 48 | cb 49 | ) 50 | }, 51 | cb => { 52 | put( 53 | dbs[0], 54 | [ 55 | { 56 | key: 'gjegc/gidjb', 57 | value: 'jjbchagfijbhiiijcaig' 58 | } 59 | ], 60 | cb 61 | ) 62 | }, 63 | cb => { 64 | put( 65 | dbs[0], 66 | [ 67 | { 68 | key: 'gjegc/feffb', 69 | value: 'eiebhdddigbhheaacjdj' 70 | } 71 | ], 72 | cb 73 | ) 74 | }, 75 | cb => { 76 | put( 77 | dbs[1], 78 | [ 79 | { 80 | key: 'gjegc/feigc', 81 | value: 'ajfgdihjedbcdhjgcaed' 82 | } 83 | ], 84 | cb 85 | ) 86 | }, 87 | cb => { 88 | put( 89 | dbs[1], 90 | [ 91 | { 92 | key: 'gjegc/adifa', 93 | value: 'fghhbhchhdbidbbbfabh' 94 | } 95 | ], 96 | cb 97 | ) 98 | }, 99 | cb => replicateByIndex(cb), 100 | cb => validate(t, dbs[0], writesPerReplication.slice(0, 1), cb), 101 | cb => { 102 | put( 103 | dbs[0], 104 | [ 105 | { 106 | key: 'gjegc/caghc', 107 | value: 'gfjcgaebibbccjceggej' 108 | } 109 | ], 110 | cb 111 | ) 112 | }, 113 | cb => { 114 | put( 115 | dbs[1], 116 | [ 117 | { 118 | key: 'gjegc/ghbdb', 119 | value: 'ggbddcahebaejbegaceh' 120 | } 121 | ], 122 | cb 123 | ) 124 | }, 125 | cb => { 126 | put( 127 | dbs[0], 128 | [ 129 | { 130 | key: 'gjegc/aaahi/aaahi', 131 | value: 'bibggfheidahefhhgdci' 132 | } 133 | ], 134 | cb 135 | ) 136 | }, 137 | cb => replicateByIndex(cb), 138 | cb => validate(t, dbs[0], writesPerReplication.slice(0, 2), cb), 139 | err => { 140 | t.error(err) 141 | t.end() 142 | } 143 | ) 144 | }) 145 | }) 146 | -------------------------------------------------------------------------------- /test/basic.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var Readable = require('stream').Readable 3 | 4 | var create = require('./helpers/create') 5 | var run = require('./helpers/run') 6 | 7 | tape('basic put/get', function (t) { 8 | var db = create.one() 9 | db.put('hello', 'world', function (err, node) { 10 | t.same(node.key, 'hello') 11 | t.same(node.value, 'world') 12 | t.error(err, 'no error') 13 | db.get('hello', function (err, node) { 14 | t.error(err, 'no error') 15 | t.same(node.key, 'hello', 'same key') 16 | t.same(node.value, 'world', 'same value') 17 | t.end() 18 | }) 19 | }) 20 | }) 21 | 22 | tape('get on empty db', function (t) { 23 | var db = create.one() 24 | 25 | db.get('hello', function (err, node) { 26 | t.error(err, 'no error') 27 | t.same(node, null, 'node is not found') 28 | t.end() 29 | }) 30 | }) 31 | 32 | tape('not found', function (t) { 33 | var db = create.one() 34 | db.put('hello', 'world', function (err) { 35 | t.error(err, 'no error') 36 | db.get('hej', function (err, node) { 37 | t.error(err, 'no error') 38 | t.same(node, null, 'node is not found') 39 | t.end() 40 | }) 41 | }) 42 | }) 43 | 44 | tape('leading / is ignored', function (t) { 45 | t.plan(7) 46 | var db = create.one() 47 | db.put('/hello', 'world', function (err) { 48 | t.error(err, 'no error') 49 | db.get('/hello', function (err, node) { 50 | t.error(err, 'no error') 51 | t.same(node.key, 'hello', 'same key') 52 | t.same(node.value, 'world', 'same value') 53 | }) 54 | db.get('hello', function (err, node) { 55 | t.error(err, 'no error') 56 | t.same(node.key, 'hello', 'same key') 57 | t.same(node.value, 'world', 'same value') 58 | }) 59 | }) 60 | }) 61 | 62 | tape('multiple put/get', function (t) { 63 | t.plan(8) 64 | 65 | var db = create.one() 66 | 67 | db.put('hello', 'world', function (err) { 68 | t.error(err, 'no error') 69 | db.put('world', 'hello', function (err) { 70 | t.error(err, 'no error') 71 | db.get('hello', function (err, node) { 72 | t.error(err, 'no error') 73 | t.same(node.key, 'hello', 'same key') 74 | t.same(node.value, 'world', 'same value') 75 | }) 76 | db.get('world', function (err, node) { 77 | t.error(err, 'no error') 78 | t.same(node.key, 'world', 'same key') 79 | t.same(node.value, 'hello', 'same value') 80 | }) 81 | }) 82 | }) 83 | }) 84 | 85 | tape('overwrites', function (t) { 86 | var db = create.one() 87 | 88 | db.put('hello', 'world', function (err) { 89 | t.error(err, 'no error') 90 | db.get('hello', function (err, node) { 91 | t.error(err, 'no error') 92 | t.same(node.key, 'hello', 'same key') 93 | t.same(node.value, 'world', 'same value') 94 | db.put('hello', 'verden', function (err) { 95 | t.error(err, 'no error') 96 | db.get('hello', function (err, node) { 97 | t.error(err, 'no error') 98 | t.same(node.key, 'hello', 'same key') 99 | t.same(node.value, 'verden', 'same value') 100 | t.end() 101 | }) 102 | }) 103 | }) 104 | }) 105 | }) 106 | 107 | tape('put/gets namespaces', function (t) { 108 | t.plan(8) 109 | 110 | var db = create.one() 111 | 112 | db.put('hello/world', 'world', function (err) { 113 | t.error(err, 'no error') 114 | db.put('world', 'hello', function (err) { 115 | t.error(err, 'no error') 116 | db.get('hello/world', function (err, node) { 117 | t.error(err, 'no error') 118 | t.same(node.key, 'hello/world', 'same key') 119 | t.same(node.value, 'world', 'same value') 120 | }) 121 | db.get('world', function (err, node) { 122 | t.error(err, 'no error') 123 | t.same(node.key, 'world', 'same key') 124 | t.same(node.value, 'hello', 'same value') 125 | }) 126 | }) 127 | }) 128 | }) 129 | 130 | tape('put in tree', function (t) { 131 | t.plan(8) 132 | 133 | var db = create.one() 134 | 135 | db.put('hello', 'a', function (err) { 136 | t.error(err, 'no error') 137 | db.put('hello/world', 'b', function (err) { 138 | t.error(err, 'no error') 139 | db.get('hello', function (err, node) { 140 | t.error(err, 'no error') 141 | t.same(node.key, 'hello', 'same key') 142 | t.same(node.value, 'a', 'same value') 143 | }) 144 | db.get('hello/world', function (err, node) { 145 | t.error(err, 'no error') 146 | t.same(node.key, 'hello/world', 'same key') 147 | t.same(node.value, 'b', 'same value') 148 | }) 149 | }) 150 | }) 151 | }) 152 | 153 | tape('put in tree reverse order', function (t) { 154 | t.plan(8) 155 | 156 | var db = create.one() 157 | 158 | db.put('hello/world', 'b', function (err) { 159 | t.error(err, 'no error') 160 | db.put('hello', 'a', function (err) { 161 | t.error(err, 'no error') 162 | db.get('hello', function (err, node) { 163 | t.error(err, 'no error') 164 | t.same(node.key, 'hello', 'same key') 165 | t.same(node.value, 'a', 'same value') 166 | }) 167 | db.get('hello/world', function (err, node) { 168 | t.error(err, 'no error') 169 | t.same(node.key, 'hello/world', 'same key') 170 | t.same(node.value, 'b', 'same value') 171 | }) 172 | }) 173 | }) 174 | }) 175 | 176 | tape('multiple put in tree', function (t) { 177 | t.plan(13) 178 | 179 | var db = create.one() 180 | 181 | db.put('hello/world', 'b', function (err) { 182 | t.error(err, 'no error') 183 | db.put('hello', 'a', function (err) { 184 | t.error(err, 'no error') 185 | db.put('hello/verden', 'c', function (err) { 186 | t.error(err, 'no error') 187 | db.put('hello', 'd', function (err) { 188 | t.error(err, 'no error') 189 | db.get('hello', function (err, node) { 190 | t.error(err, 'no error') 191 | t.same(node.key, 'hello', 'same key') 192 | t.same(node.value, 'd', 'same value') 193 | }) 194 | db.get('hello/world', function (err, node) { 195 | t.error(err, 'no error') 196 | t.same(node.key, 'hello/world', 'same key') 197 | t.same(node.value, 'b', 'same value') 198 | }) 199 | db.get('hello/verden', function (err, node) { 200 | t.error(err, 'no error') 201 | t.same(node.key, 'hello/verden', 'same key') 202 | t.same(node.value, 'c', 'same value') 203 | }) 204 | }) 205 | }) 206 | }) 207 | }) 208 | }) 209 | 210 | tape('insert 100 values and get them all', function (t) { 211 | var db = create.one() 212 | var max = 100 213 | var i = 0 214 | 215 | t.plan(3 * max) 216 | 217 | loop() 218 | 219 | function loop () { 220 | if (i === max) return validate() 221 | db.put('#' + i, '#' + (i++), loop) 222 | } 223 | 224 | function validate () { 225 | for (var i = 0; i < max; i++) { 226 | db.get('#' + i, same('#' + i)) 227 | } 228 | } 229 | 230 | function same (key) { 231 | return function (err, node) { 232 | t.error(err, 'no error') 233 | t.same(node.key, key, 'same key') 234 | t.same(node.value, key, 'same value') 235 | } 236 | } 237 | }) 238 | 239 | tape('race works', function (t) { 240 | t.plan(40) 241 | 242 | var missing = 10 243 | var db = create.one() 244 | 245 | for (var i = 0; i < 10; i++) db.put('#' + i, '#' + i, done) 246 | 247 | function done (err) { 248 | t.error(err, 'no error') 249 | if (--missing) return 250 | for (var i = 0; i < 10; i++) same('#' + i) 251 | } 252 | 253 | function same (val) { 254 | db.get(val, function (err, node) { 255 | t.error(err, 'no error') 256 | t.same(node.key, val, 'same key') 257 | t.same(node.value, val, 'same value') 258 | }) 259 | } 260 | }) 261 | 262 | tape('version', function (t) { 263 | var db = create.one() 264 | 265 | db.version(function (err, version) { 266 | t.error(err, 'no error') 267 | t.same(version, Buffer.alloc(0)) 268 | db.put('hello', 'world', function () { 269 | db.version(function (err, version) { 270 | t.error(err, 'no error') 271 | db.put('hello', 'verden', function () { 272 | db.checkout(version).get('hello', function (err, node) { 273 | t.error(err, 'no error') 274 | t.same(node.value, 'world') 275 | t.end() 276 | }) 277 | }) 278 | }) 279 | }) 280 | }) 281 | }) 282 | 283 | tape('basic batch', function (t) { 284 | t.plan(1 + 3 + 3) 285 | 286 | var db = create.one() 287 | 288 | db.batch([ 289 | {key: 'hello', value: 'world'}, 290 | {key: 'hej', value: 'verden'}, 291 | {key: 'hello', value: 'welt'} 292 | ], function (err) { 293 | t.error(err, 'no error') 294 | db.get('hello', function (err, node) { 295 | t.error(err, 'no error') 296 | t.same(node.key, 'hello') 297 | t.same(node.value, 'welt') 298 | }) 299 | db.get('hej', function (err, node) { 300 | t.error(err, 'no error') 301 | t.same(node.key, 'hej') 302 | t.same(node.value, 'verden') 303 | }) 304 | }) 305 | }) 306 | 307 | tape('batch with del', function (t) { 308 | t.plan(1 + 1 + 3 + 2) 309 | 310 | var db = create.one() 311 | 312 | db.batch([ 313 | {key: 'hello', value: 'world'}, 314 | {key: 'hej', value: 'verden'}, 315 | {key: 'hello', value: 'welt'} 316 | ], function (err) { 317 | t.error(err, 'no error') 318 | db.batch([ 319 | {key: 'hello', value: 'verden'}, 320 | {type: 'del', key: 'hej'} 321 | ], function (err) { 322 | t.error(err, 'no error') 323 | db.get('hello', function (err, node) { 324 | t.error(err, 'no error') 325 | t.same(node.key, 'hello') 326 | t.same(node.value, 'verden') 327 | }) 328 | db.get('hej', function (err, node) { 329 | t.error(err, 'no error') 330 | t.same(node, null) 331 | }) 332 | }) 333 | }) 334 | }) 335 | tape('multiple batches', function (t) { 336 | t.plan(19) 337 | 338 | var db = create.one() 339 | 340 | db.batch([{ 341 | type: 'put', 342 | key: 'foo', 343 | value: 'foo' 344 | }, { 345 | type: 'put', 346 | key: 'bar', 347 | value: 'bar' 348 | }], function (err, nodes) { 349 | t.error(err) 350 | t.same(2, nodes.length) 351 | same('foo', 'foo') 352 | same('bar', 'bar') 353 | db.batch([{ 354 | type: 'put', 355 | key: 'foo', 356 | value: 'foo2' 357 | }, { 358 | type: 'put', 359 | key: 'bar', 360 | value: 'bar2' 361 | }, { 362 | type: 'put', 363 | key: 'baz', 364 | value: 'baz' 365 | }], function (err, nodes) { 366 | t.error(err) 367 | t.same(3, nodes.length) 368 | same('foo', 'foo2') 369 | same('bar', 'bar2') 370 | same('baz', 'baz') 371 | }) 372 | }) 373 | 374 | function same (key, val) { 375 | db.get(key, function (err, node) { 376 | t.error(err, 'no error') 377 | t.same(node.key, key) 378 | t.same(node.value, val) 379 | }) 380 | } 381 | }) 382 | 383 | tape('createWriteStream', function (t) { 384 | t.plan(10) 385 | var db = create.one() 386 | var writer = db.createWriteStream() 387 | 388 | writer.write([{ 389 | type: 'put', 390 | key: 'foo', 391 | value: 'foo' 392 | }, { 393 | type: 'put', 394 | key: 'bar', 395 | value: 'bar' 396 | }]) 397 | 398 | writer.write({ 399 | type: 'put', 400 | key: 'baz', 401 | value: 'baz' 402 | }) 403 | 404 | writer.end(function (err) { 405 | t.error(err, 'no error') 406 | same('foo', 'foo') 407 | same('bar', 'bar') 408 | same('baz', 'baz') 409 | }) 410 | 411 | function same (key, val) { 412 | db.get(key, function (err, node) { 413 | t.error(err, 'no error') 414 | t.same(node.key, key) 415 | t.same(node.value, val) 416 | }) 417 | } 418 | }) 419 | 420 | tape('createWriteStream pipe', function (t) { 421 | t.plan(10) 422 | var db = create.one() 423 | var writer = db.createWriteStream() 424 | var index = 0 425 | var reader = new Readable({ 426 | objectMode: true, 427 | read: function (size) { 428 | var value = (index < 1000) ? { 429 | type: 'put', 430 | key: 'foo' + index, 431 | value: index++ 432 | } : null 433 | this.push(value) 434 | } 435 | }) 436 | reader.pipe(writer) 437 | writer.on('finish', function (err) { 438 | t.error(err, 'no error') 439 | same('foo1', '1') 440 | same('foo50', '50') 441 | same('foo999', '999') 442 | }) 443 | 444 | function same (key, val) { 445 | db.get(key, function (err, node) { 446 | t.error(err, 'no error') 447 | t.same(node.key, key) 448 | t.same(node.value, val) 449 | }) 450 | } 451 | }) 452 | 453 | tape('create with precreated keypair', function (t) { 454 | var crypto = require('hypercore/lib/crypto') 455 | var keyPair = crypto.keyPair() 456 | 457 | var db = create.one(keyPair.publicKey, {secretKey: keyPair.secretKey}) 458 | db.put('hello', 'world', function (err, node) { 459 | t.same(node.value, 'world') 460 | t.error(err, 'no error') 461 | t.same(db.key, keyPair.publicKey, 'pubkey matches') 462 | db.source._storage.secretKey.read(0, keyPair.secretKey.length, function (err, secretKey) { 463 | t.error(err, 'no error') 464 | t.same(secretKey, keyPair.secretKey, 'secret key is stored') 465 | }) 466 | db.get('hello', function (err, node) { 467 | t.error(err, 'no error') 468 | t.same(node.value, 'world', 'same value') 469 | t.end() 470 | }) 471 | }) 472 | }) 473 | 474 | tape('can insert falsy values', function (t) { 475 | t.plan(2 * 2 + 3 + 1) 476 | 477 | var db = create.one(null, {valueEncoding: 'json'}) 478 | 479 | db.put('hello', 0, function () { 480 | db.put('world', false, function () { 481 | db.get('hello', function (err, node) { 482 | t.error(err, 'no error') 483 | t.same(node && node.value, 0) 484 | }) 485 | db.get('world', function (err, node) { 486 | t.error(err, 'no error') 487 | t.same(node && node.value, false) 488 | }) 489 | 490 | var ite = db.iterator() 491 | var result = {} 492 | 493 | ite.next(function loop (err, node) { 494 | t.error(err, 'no error') 495 | 496 | if (!node) { 497 | t.same(result, {hello: 0, world: false}) 498 | return 499 | } 500 | 501 | result[node.key] = node.value 502 | ite.next(loop) 503 | }) 504 | }) 505 | }) 506 | }) 507 | 508 | tape('can put/get a null value', function (t) { 509 | t.plan(3) 510 | 511 | var db = create.one(null, {valueEncoding: 'json'}) 512 | db.put('some key', null, function (err) { 513 | t.error(err, 'no error') 514 | db.get('some key', function (err, node) { 515 | t.error(err, 'no error') 516 | t.same(node.value, null) 517 | }) 518 | }) 519 | }) 520 | 521 | tape('does not reinsert if isNotExists is true in put', function (t) { 522 | t.plan(4) 523 | 524 | var db = create.one(null, {valueEncoding: 'utf8'}) 525 | db.put('some key', 'hello', function (err) { 526 | t.error(err, 'no error') 527 | db.put('some key', 'goodbye', { ifNotExists: true }, function (err) { 528 | t.error(err, 'no error') 529 | db.get('some key', function (err, node) { 530 | t.error(err, 'no error') 531 | t.same(node.value, 'hello') 532 | }) 533 | }) 534 | }) 535 | }) 536 | 537 | tape('normal insertions work with ifNotExists', function (t) { 538 | t.plan(5) 539 | 540 | var db = create.one() 541 | run( 542 | cb => db.put('some key', 'hello', { ifNotExists: true }, cb), 543 | cb => db.put('some key', 'goodbye', { ifNotExists: true }, cb), 544 | cb => db.put('another key', 'something else', { ifNotExists: true }, cb), 545 | done 546 | ) 547 | 548 | function done (err) { 549 | t.error(err, 'no error') 550 | db.get('some key', function (err, node) { 551 | t.error(err, 'no error') 552 | t.same(node.value, 'hello') 553 | db.get('another key', function (err, node) { 554 | t.error(err, 'no error') 555 | t.same(node.value, 'something else') 556 | }) 557 | }) 558 | } 559 | }) 560 | 561 | tape('put with ifNotExists does not reinsert with conflict', function (t) { 562 | t.plan(5) 563 | 564 | create.two(function (db1, db2, replicate) { 565 | run( 566 | cb => db1.put('0', '0', cb), 567 | replicate, 568 | cb => db1.put('1', '1a', cb), 569 | cb => db2.put('1', '1b', cb), 570 | cb => db1.put('10', '10', cb), 571 | replicate, 572 | cb => db1.put('2', '2', cb), 573 | cb => db1.put('1/0', '1/0', cb), 574 | done 575 | ) 576 | 577 | function done (err) { 578 | t.error(err, 'no error') 579 | db1.put('1', '1c', { ifNotExists: true }, function (err) { 580 | t.error(err, 'no error') 581 | db1.get('1', function (err, nodes) { 582 | t.error(err, 'no error') 583 | t.same(nodes.length, 2) 584 | var vals = nodes.map(function (n) { 585 | return n.value 586 | }) 587 | t.same(vals, ['1b', '1a']) 588 | }) 589 | }) 590 | } 591 | }) 592 | }) 593 | 594 | tape('opts is not mutated', function (t) { 595 | var opts = {firstNode: true} 596 | create.one(opts) 597 | t.deepEqual(opts, {firstNode: true}) 598 | t.end() 599 | }) 600 | -------------------------------------------------------------------------------- /test/collisions.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | 4 | tape('two keys with same siphash', function (t) { 5 | t.plan(2 + 2) 6 | 7 | var db = create.one() 8 | 9 | db.put('idgcmnmna', 'a', function () { 10 | db.put('mpomeiehc', 'b', function () { 11 | db.get('idgcmnmna', function (err, node) { 12 | t.error(err, 'no error') 13 | t.same(node.value, 'a') 14 | }) 15 | db.get('mpomeiehc', function (err, node) { 16 | t.error(err, 'no error') 17 | t.same(node.value, 'b') 18 | }) 19 | }) 20 | }) 21 | }) 22 | 23 | tape('two keys with same siphash (iterator)', function (t) { 24 | var db = create.one() 25 | 26 | db.put('idgcmnmna', 'a', function () { 27 | db.put('mpomeiehc', 'b', function () { 28 | var ite = db.iterator() 29 | 30 | ite.next(function (err, node) { 31 | t.error(err, 'no error') 32 | t.same(node.value, 'a') 33 | }) 34 | ite.next(function (err, node) { 35 | t.error(err, 'no error') 36 | t.same(node.value, 'b') 37 | }) 38 | ite.next(function (err, node) { 39 | t.error(err, 'no error') 40 | t.same(node, null) 41 | t.end() 42 | }) 43 | }) 44 | }) 45 | }) 46 | 47 | tape('two prefixes with same siphash (iterator)', function (t) { 48 | var db = create.one() 49 | 50 | db.put('idgcmnmna/a', 'a', function () { 51 | db.put('mpomeiehc/b', 'b', function () { 52 | var ite = db.iterator('idgcmnmna') 53 | 54 | ite.next(function (err, node) { 55 | t.error(err, 'no error') 56 | t.same(node.value, 'a') 57 | }) 58 | ite.next(function (err, node) { 59 | t.error(err, 'no error') 60 | t.same(node, null) 61 | t.end() 62 | }) 63 | }) 64 | }) 65 | }) 66 | -------------------------------------------------------------------------------- /test/content-feeds.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var replicate = require('./helpers/replicate') 4 | 5 | tape('basic content feed', function (t) { 6 | var db = create.one(null, {contentFeed: true, valueEncoding: 'json'}) 7 | 8 | db.ready(function (err) { 9 | t.error(err, 'no error') 10 | db.localContent.append('lots of data') 11 | db.put('hello', {start: 0, end: 1}, function (err) { 12 | t.error(err, 'no error') 13 | db.get('hello', function (err, node) { 14 | t.error(err, 'no error') 15 | t.ok(db.localContent === db.contentFeeds[node.feed]) 16 | db.contentFeeds[node.feed].get(node.value.start, function (err, buf) { 17 | t.error(err, 'no error') 18 | t.same(buf, Buffer.from('lots of data')) 19 | t.end() 20 | }) 21 | }) 22 | }) 23 | }) 24 | }) 25 | 26 | tape('replicating content feeds', function (t) { 27 | var db = create.one(null, {contentFeed: true}) 28 | db.put('hello', 'world', function () { 29 | var clone = create.one(db.key, {contentFeed: true}) 30 | db.localContent.append('data', function () { 31 | replicate(db, clone, function () { 32 | clone.get('hello', function (err, node) { 33 | t.error(err, 'no error') 34 | t.same(node.value, 'world') 35 | clone.contentFeeds[node.feed].get(0, function (err, buf) { 36 | t.error(err, 'no error') 37 | t.same(buf, Buffer.from('data')) 38 | t.end() 39 | }) 40 | }) 41 | }) 42 | }) 43 | }) 44 | }) 45 | -------------------------------------------------------------------------------- /test/corruption.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var run = require('./helpers/run') 4 | var hyperdb = require('..') 5 | var messages = require('../lib/messages') 6 | 7 | tape('feed with corrupted inflate generates error', function (t) { 8 | create.three(function (a, b, c) { 9 | var corrupted 10 | 11 | run( 12 | cb => a.put('foo', 'bar', cb), 13 | testUncorrupted, 14 | corruptInflateRecord, 15 | openCorruptedDb, 16 | done 17 | ) 18 | 19 | function done (err) { 20 | t.error(err, 'no error') 21 | t.end() 22 | } 23 | 24 | function testUncorrupted (cb) { 25 | t.equal(a._writers.length, 3, 'uncorrupted length') 26 | cb() 27 | } 28 | 29 | function corruptInflateRecord (cb) { 30 | var index = 3 31 | a.source.get(index, function (err, data) { 32 | t.error(err, 'no error') 33 | var val = messages.Entry.decode(data) 34 | val.inflate = 1 // Introduce corruption 35 | val.deleted = undefined // To keep the same size 36 | var corruptData = messages.Entry.encode(val) 37 | var storage = a.source._storage 38 | storage.dataOffset(index, [], function (err, offset, size) { 39 | t.error(err, 'no error') 40 | storage.data.write(offset, corruptData, cb) 41 | }) 42 | }) 43 | } 44 | 45 | function openCorruptedDb (cb) { 46 | corrupted = hyperdb(reuseStorage(a)) 47 | corrupted.ready(function (err) { 48 | t.ok(err, 'expected error') 49 | t.equal(err.message, 'Missing feed mappings', 'error message') 50 | t.equal(corrupted._writers.length, 2, 'corrupted length') 51 | cb() 52 | }) 53 | } 54 | }) 55 | }) 56 | 57 | function reuseStorage (db) { 58 | return function (name) { 59 | var match = name.match(/^source\/(.*)/) 60 | if (match) { 61 | name = match[1] 62 | if (name === 'secret_key') return db.source._storage.secretKey 63 | return db.source._storage[name] 64 | } 65 | match = name.match(/^peers\/([0-9a-f]+)\/(.*)/) 66 | if (match) { 67 | var hex = match[1] 68 | name = match[2] 69 | var peerWriter = db._writers.find(function (writer) { 70 | return writer && writer._feed.discoveryKey.toString('hex') === hex 71 | }) 72 | if (!peerWriter) throw new Error('mismatch') 73 | var feed = peerWriter._feed 74 | if (name === 'secret_key') return feed._storage.secretKey 75 | return feed._storage[name] 76 | } else { 77 | throw new Error('mismatch') 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /test/deletes.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var run = require('./helpers/run') 4 | 5 | tape('basic delete', function (t) { 6 | var db = create.one() 7 | 8 | db.put('hello', 'world', function () { 9 | db.get('hello', function (err, node) { 10 | t.error(err, 'no error') 11 | t.same(node.value, 'world') 12 | db.del('hello', function (err) { 13 | t.error(err, 'no error') 14 | db.get('hello', function (err, node) { 15 | t.error(err, 'no error') 16 | t.ok(!node, 'was deleted') 17 | t.end() 18 | }) 19 | }) 20 | }) 21 | }) 22 | }) 23 | 24 | tape('delete one in many', function (t) { 25 | t.plan(1 + 2 + 2) 26 | 27 | var db = create.one() 28 | var keys = [] 29 | 30 | for (var i = 0; i < 50; i++) { 31 | keys.push('' + i) 32 | } 33 | 34 | run( 35 | keys.map(k => cb => db.put(k, k, cb)), 36 | cb => db.del('42', cb), 37 | done 38 | ) 39 | 40 | function done (err) { 41 | t.error(err, 'no error') 42 | db.get('42', function (err, node) { 43 | t.error(err, 'no error') 44 | t.ok(!node, 'was deleted') 45 | }) 46 | db.get('43', function (err, node) { 47 | t.error(err, 'no erro') 48 | t.same(node.value, '43') 49 | }) 50 | } 51 | }) 52 | 53 | tape('delete one in many (iteration)', function (t) { 54 | var db = create.one() 55 | var keys = [] 56 | 57 | for (var i = 0; i < 50; i++) { 58 | keys.push('' + i) 59 | } 60 | 61 | run( 62 | keys.map(k => cb => db.put(k, k, cb)), 63 | cb => db.del('42', cb), 64 | done 65 | ) 66 | 67 | function done (err) { 68 | t.error(err, 'no error') 69 | 70 | var ite = db.iterator() 71 | var actual = [] 72 | 73 | ite.next(function loop (err, node) { 74 | if (err) return t.error(err, 'no error') 75 | 76 | if (!node) { 77 | var expected = keys.slice(0, 42).concat(keys.slice(43)) 78 | t.same(actual.sort(), expected.sort(), 'all except deleted one') 79 | t.end() 80 | return 81 | } 82 | 83 | actual.push(node.value) 84 | ite.next(loop) 85 | }) 86 | } 87 | }) 88 | 89 | tape('delete marks node as deleted', function (t) { 90 | var db = create.one() 91 | var expected = [{key: 'hello', value: 'world', deleted: false}, {key: 'hello', value: null, deleted: true}] 92 | 93 | db.put('hello', 'world', function () { 94 | db.del('hello', function () { 95 | db.createHistoryStream() 96 | .on('data', function (data) { 97 | t.same({key: data.key, value: data.value, deleted: data.deleted}, expected.shift()) 98 | }) 99 | .on('end', function () { 100 | t.same(expected.length, 0) 101 | t.end() 102 | }) 103 | }) 104 | }) 105 | }) 106 | -------------------------------------------------------------------------------- /test/diff.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var cmp = require('compare') 3 | var collect = require('stream-collector') 4 | var create = require('./helpers/create') 5 | var replicate = require('./helpers/replicate') 6 | var put = require('./helpers/put') 7 | 8 | tape('empty diff', function (t) { 9 | var db = create.one() 10 | 11 | var rs = db.createDiffStream(null, 'a') 12 | collect(rs, function (err, actual) { 13 | t.error(err, 'no error') 14 | t.deepEqual(actual, [], 'diff as expected') 15 | t.end() 16 | }) 17 | }) 18 | 19 | tape('implicit checkout', function (t) { 20 | var db = create.one() 21 | 22 | db.put('a', '2', function (err) { 23 | t.error(err, 'no error') 24 | var rs = db.createDiffStream(null, 'a') 25 | collect(rs, function (err, actual) { 26 | t.error(err, 'no error') 27 | t.equals(actual.length, 1) 28 | // t.equals(actual[0].type, 'put') 29 | t.equals(actual[0].left.key, 'a') 30 | t.equals(actual[0].left.value, '2') 31 | t.equals(actual[0].right, null) 32 | t.end() 33 | }) 34 | }) 35 | }) 36 | 37 | tape('new value', function (t) { 38 | var db = create.one() 39 | 40 | db.put('a', '1', function (err) { 41 | t.error(err, 'no error') 42 | db.put('a', '2', function (err) { 43 | t.error(err, 'no error') 44 | var rs = db.createDiffStream(null, 'a') 45 | collect(rs, function (err, actual) { 46 | t.error(err, 'no error') 47 | t.equals(actual.length, 1) 48 | // t.equals(actual[0].type, 'put') 49 | t.equals(actual[0].left.key, 'a') 50 | t.equals(actual[0].left.value, '2') 51 | t.equals(actual[0].right, null) 52 | t.end() 53 | }) 54 | }) 55 | }) 56 | }) 57 | 58 | tape('two new nodes', function (t) { 59 | var db = create.one() 60 | 61 | db.put('a/foo', 'quux', function (err) { 62 | t.error(err, 'no error') 63 | db.put('a/bar', 'baz', function (err) { 64 | t.error(err, 'no error') 65 | var rs = db.createDiffStream(null, 'a') 66 | collect(rs, function (err, actual) { 67 | t.error(err, 'no error') 68 | actual.sort(sort) 69 | t.equals(actual.length, 2) 70 | // t.equals(actual[0].type, 'put') 71 | t.equals(actual[0].left.key, 'a/bar') 72 | t.equals(actual[0].left.value, 'baz') 73 | t.equals(actual[0].right, null) 74 | // t.equals(actual[1].type, 'put') 75 | t.equals(actual[1].left.key, 'a/foo') 76 | t.equals(actual[1].left.value, 'quux') 77 | t.equals(actual[1].right, null) 78 | t.end() 79 | }) 80 | }) 81 | }) 82 | }) 83 | 84 | tape('checkout === head', function (t) { 85 | var db = create.one() 86 | 87 | db.put('a', '2', function (err) { 88 | t.error(err, 'no error') 89 | var rs = db.createDiffStream(db, 'a') 90 | collect(rs, function (err, actual) { 91 | t.error(err, 'no error') 92 | t.equals(actual.length, 0) 93 | t.end() 94 | }) 95 | }) 96 | }) 97 | 98 | tape('new value, twice', function (t) { 99 | var db = create.one() 100 | var snap = db.snapshot() 101 | 102 | db.put('/a', '1', function (err) { 103 | t.error(err, 'no error') 104 | db.put('/a', '2', function (err) { 105 | t.error(err, 'no error') 106 | var rs = db.createDiffStream(snap, 'a') 107 | collect(rs, function (err, actual) { 108 | t.error(err, 'no error') 109 | t.equals(actual.length, 1) 110 | t.equals(actual[0].left.key, 'a') 111 | t.equals(actual[0].left.value, '2') 112 | t.equals(actual[0].right, null) 113 | t.end() 114 | }) 115 | }) 116 | }) 117 | }) 118 | 119 | tape('untracked value', function (t) { 120 | var db = create.one() 121 | 122 | db.put('a', '1', function (err) { 123 | t.error(err, 'no error') 124 | var snap = db.snapshot() 125 | db.put('a', '2', function (err) { 126 | t.error(err, 'no error') 127 | db.put('b', '17', function (err) { 128 | t.error(err, 'no error') 129 | var rs = db.createDiffStream(snap, 'a') 130 | collect(rs, function (err, actual) { 131 | t.error(err, 'no error') 132 | t.equals(actual.length, 1) 133 | t.equals(actual[0].left.key, 'a') 134 | t.equals(actual[0].left.value, '2') 135 | t.equals(actual[0].right.key, 'a') 136 | t.equals(actual[0].right.value, '1') 137 | t.end() 138 | }) 139 | }) 140 | }) 141 | }) 142 | }) 143 | 144 | tape('diff root', function (t) { 145 | var db = create.one() 146 | 147 | db.put('a', '1', function (err) { 148 | t.error(err, 'no error') 149 | var snap = db.snapshot() 150 | db.put('a', '2', function (err) { 151 | t.error(err, 'no error') 152 | db.put('b', '17', function (err) { 153 | t.error(err, 'no error') 154 | var rs = db.createDiffStream(snap) 155 | collect(rs, function (err, actual) { 156 | t.error(err, 'no error') 157 | actual.sort(sort) 158 | t.equals(actual.length, 2) 159 | t.equals(actual[0].left.key, 'a') 160 | t.equals(actual[0].left.value, '2') 161 | t.equals(actual[0].right.key, 'a') 162 | t.equals(actual[0].right.value, '1') 163 | t.equals(actual[1].left.key, 'b') 164 | t.equals(actual[1].left.value, '17') 165 | t.equals(actual[1].right, null) 166 | t.end() 167 | }) 168 | }) 169 | }) 170 | }) 171 | }) 172 | 173 | tape('updated value', function (t) { 174 | var db = create.one() 175 | 176 | db.put('a/d/r', '1', function (err) { 177 | t.error(err, 'no error') 178 | var snap = db.snapshot() 179 | db.put('a/d/r', '3', function (err) { 180 | t.error(err, 'no error') 181 | var rs = db.createDiffStream(snap, 'a') 182 | collect(rs, function (err, actual) { 183 | t.error(err, 'no error') 184 | t.equals(actual.length, 1) 185 | t.equals(actual[0].left.key, 'a/d/r') 186 | t.equals(actual[0].left.value, '3') 187 | t.equals(actual[0].right.key, 'a/d/r') 188 | t.equals(actual[0].right.value, '1') 189 | t.end() 190 | }) 191 | }) 192 | }) 193 | }) 194 | 195 | tape('basic with 2 feeds', function (t) { 196 | create.two(function (a, b) { 197 | a.put('a', 'a', function () { 198 | replicate(a, b, validate) 199 | }) 200 | 201 | function validate () { 202 | var rs = b.createDiffStream(null, 'a', {reduce: (a, b) => a}) 203 | collect(rs, function (err, actual) { 204 | t.error(err, 'no error') 205 | t.equals(actual.length, 1) 206 | t.equals(actual[0].left.key, 'a') 207 | t.equals(actual[0].left.value, 'a') 208 | t.end() 209 | }) 210 | } 211 | }) 212 | }) 213 | 214 | tape('two feeds /w competing for a value', function (t) { 215 | create.two(function (a, b) { 216 | a.put('a', 'a', function () { 217 | b.put('a', 'b', function () { 218 | replicate(a, b, validate) 219 | }) 220 | }) 221 | 222 | function validate () { 223 | var rs = b.createDiffStream(null, 'a') 224 | collect(rs, function (err, actual) { 225 | t.error(err, 'no error') 226 | t.equals(actual.length, 1) 227 | actual[0].left.sort(sortByValue) 228 | t.equals(actual[0].left[0].key, 'a') 229 | t.equals(actual[0].left[0].value, 'a') 230 | t.equals(actual[0].left[1].key, 'a') 231 | t.equals(actual[0].left[1].value, 'b') 232 | t.end() 233 | }) 234 | } 235 | }) 236 | }) 237 | 238 | tape('small diff on big db', function (t) { 239 | var db = create.one() 240 | 241 | put(db, range(1000), function (err) { 242 | t.error(err, 'no error') 243 | var snap = db.snapshot() 244 | db.put('42', '42*', function (err) { 245 | t.error(err, 'no error') 246 | var rs = db.createDiffStream(snap) 247 | collect(rs, function (err, actual) { 248 | t.error(err, 'no error') 249 | t.equals(actual.length, 1) 250 | t.equals(actual[0].left.key, '42') 251 | t.equals(actual[0].left.value, '42*') 252 | t.equals(actual[0].right.key, '42') 253 | t.equals(actual[0].right.value, '42') 254 | t.end() 255 | }) 256 | }) 257 | }) 258 | }) 259 | 260 | function range (n) { 261 | return Array(n).join('.').split('.').map((_, i) => '' + i) 262 | } 263 | 264 | function sortByValue (a, b) { 265 | return cmp(a.value, b.value) 266 | } 267 | 268 | function sort (a, b) { 269 | var ak = (a.left || a.right).key 270 | var bk = (b.left || b.right).key 271 | return cmp(ak, bk) 272 | } 273 | -------------------------------------------------------------------------------- /test/fuzzing.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | 3 | var run = require('./helpers/run') 4 | var fuzzRunner = require('./helpers/fuzzing').fuzzRunner 5 | 6 | tape('fuzz testing', function (t) { 7 | run( 8 | cb => fuzzRunner(t, { 9 | keys: 20, 10 | dirs: 2, 11 | dirSize: 2, 12 | conflicts: 0, 13 | replications: 2 14 | }, cb), 15 | cb => fuzzRunner(t, { 16 | keys: 9, 17 | dirs: 1, 18 | dirSize: 2, 19 | conflicts: 0, 20 | writers: 2, 21 | replications: 1 22 | }, cb), 23 | function (err) { 24 | t.error(err) 25 | t.end() 26 | } 27 | ) 28 | }) 29 | -------------------------------------------------------------------------------- /test/helpers/create.js: -------------------------------------------------------------------------------- 1 | var hyperdb = require('../../') 2 | var ram = require('random-access-memory') 3 | var latency = require('random-access-latency') 4 | var replicate = require('./replicate') 5 | var reduce = (a, b) => a 6 | 7 | exports.one = function (key, opts) { 8 | if (!opts) opts = {} 9 | opts.reduce = reduce 10 | opts.valueEncoding = opts.valueEncoding || 'utf-8' 11 | var storage = opts.latency ? name => latency(opts.latency, ram()) : ram 12 | return hyperdb(storage, key, opts) 13 | } 14 | 15 | exports.two = function (cb) { 16 | createMany(2, function (err, dbs, replicateByIndex) { 17 | if (err) return cb(err) 18 | dbs.push(replicateByIndex.bind(null, [0, 1])) 19 | return cb.apply(null, dbs) 20 | }) 21 | } 22 | 23 | exports.three = function (cb) { 24 | createMany(3, function (err, dbs, replicateByIndex) { 25 | if (err) return cb(err) 26 | dbs.push(replicateByIndex.bind(null, [0, 1, 2])) 27 | return cb.apply(null, dbs) 28 | }) 29 | } 30 | 31 | exports.many = createMany 32 | 33 | function createMany (count, cb) { 34 | var dbs = [] 35 | var remaining = count - 1 36 | 37 | var first = hyperdb(ram, { valueEncoding: 'utf-8' }) 38 | first.ready(function (err) { 39 | if (err) return cb(err) 40 | dbs.push(first) 41 | insertNext() 42 | }) 43 | 44 | function insertNext () { 45 | if (remaining === 0) { 46 | // After the databases have been created, replicate all the authorizations. 47 | return replicateByIndex(err => { 48 | if (err) return cb(err) 49 | return cb(null, dbs, replicateByIndex) 50 | }) 51 | } 52 | var db = hyperdb(ram, first.key, { valueEncoding: 'utf-8' }) 53 | db.ready(function (err) { 54 | if (err) return cb(err) 55 | first.authorize(db.local.key, function (err) { 56 | if (err) return cb(err) 57 | dbs.push(db) 58 | remaining-- 59 | return insertNext() 60 | }) 61 | }) 62 | } 63 | 64 | function replicateByIndex (indices, cb) { 65 | if (typeof indices === 'function') { 66 | cb = indices 67 | indices = dbs.map((_, i) => i) 68 | } 69 | if (indices.length === 0) return cb() 70 | 71 | var pairs = [] 72 | for (var i = 0; i < indices.length; i++) { 73 | for (var j = 1; j < indices.length; j++) { 74 | if (i !== j) pairs.push([i, j]) 75 | } 76 | } 77 | 78 | var remaining = pairs.length 79 | doReplicate() 80 | 81 | function doReplicate () { 82 | if (remaining === 0) return cb(null) 83 | var pair = pairs[pairs.length - remaining--] 84 | replicate(dbs[pair[0]], dbs[pair[1]], function (err) { 85 | if (err) return cb(null) 86 | return doReplicate() 87 | }) 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /test/helpers/fuzzing.js: -------------------------------------------------------------------------------- 1 | var prettier = require('prettier') 2 | var standard = require('standard') 3 | var seed = require('seed-random') 4 | 5 | var normalizeKey = require('../../lib/normalize') 6 | var create = require('./create') 7 | var run = require('./run') 8 | var put = require('./put') 9 | 10 | const ALPHABET = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] 11 | 12 | module.exports.validate = validate 13 | module.exports.fuzzRunner = fuzzRunner 14 | 15 | /** 16 | * Fuzzing tests can be specified using an options dictionary with this form: 17 | * (Defaults specified in the example below). 18 | * 19 | * { 20 | * keys: 1000, // The total number of records. 21 | * writers: 1, // The total number of writers. 22 | * keyDepth: 1, // The maximum number of path components per key. 23 | * dirs: 20, // The approximate number of directories that will be created. 24 | * dirSize: 10, // The approximate number of keys at each prefix level. 25 | * prefixSize: 5, // The size of each path component. 26 | * conflicts: 0, // The approximate number of conflicting keys. 27 | * replications: 10, // The approximate number of all-to-all replications/tests. 28 | * valueSize: 20, // The size of each value. 29 | * seed: 'hello' // The seed (for repeatable testing). 30 | * } 31 | */ 32 | function defaultOpts (opts) { 33 | return Object.assign({ 34 | keys: 1000, 35 | writers: 1, 36 | keyDepth: 2, 37 | dirs: 20, 38 | dirSize: 10, 39 | prefixSize: 5, 40 | conflicts: 0, 41 | replications: 10, 42 | valueSize: 20, 43 | seed: 'hello' 44 | }, opts || {}) 45 | } 46 | 47 | function test (probability, rand) { 48 | return rand() < probability 49 | } 50 | 51 | function sample (arr, count, rand, withReplacement) { 52 | if (count > arr.length && !withReplacement) throw Error('Invalid sampling arguments.') 53 | var result = [] 54 | while (result.length !== count) { 55 | var candidate = arr[Math.floor(rand() * arr.length)] 56 | if (withReplacement) result.push(candidate) 57 | else if (result.indexOf(candidate) === -1) result.push(candidate) 58 | } 59 | return result 60 | } 61 | 62 | function makeDatabases (opts, cb) { 63 | create.many(opts.writers, function (err, dbs, replicateByIndex) { 64 | if (err) throw err 65 | return cb(dbs, replicateByIndex) 66 | }) 67 | } 68 | 69 | function generateData (opts) { 70 | var random = seed(opts.seed) 71 | 72 | var keysPerReplication = [] 73 | var writesPerReplication = [] 74 | var writers = new Array(opts.writers).fill(0).map((_, i) => i) 75 | 76 | // Generate the list of all keys that will be inserted. 77 | var stack = [] 78 | for (var i = 0; i < opts.keys; i++) { 79 | var prefix = sample(ALPHABET, opts.prefixSize, random, true).join('') 80 | 81 | var shouldPushDir = test(opts.dirs / opts.keys, random) && 82 | stack.length < opts.keyDepth 83 | var shouldPopDir = stack.length && test(1 / opts.dirSize, random) 84 | var shouldReplicate = test(opts.replications / opts.keys, random) 85 | 86 | if (shouldPushDir) stack.push(prefix) 87 | if (shouldPopDir) stack.pop() 88 | 89 | var batchIdx = (!keysPerReplication.length) ? 0 : keysPerReplication.length - 1 90 | 91 | if (!keysPerReplication[batchIdx]) keysPerReplication.push([]) 92 | 93 | keysPerReplication[batchIdx].push(normalizeKey(stack.join('/') + '/' + prefix)) 94 | if (shouldReplicate) keysPerReplication.push([]) 95 | } 96 | 97 | // Generate the values for those keys (including possible conflicts). 98 | for (i = 0; i < keysPerReplication.length; i++) { 99 | var keyBatch = keysPerReplication[i] 100 | var writeBatch = new Map() 101 | for (var j = 0; j < keyBatch.length; j++) { 102 | var shouldConflict = opts.conflicts && test(opts.conflicts / opts.keys, random) 103 | var keyWriters = null 104 | 105 | var numConflicts = shouldConflict ? Math.floor(random() * opts.writers) + 1 : 1 106 | keyWriters = sample(writers, numConflicts, random, false) 107 | 108 | var values = [] 109 | for (var z = 0; z < keyWriters.length; z++) { 110 | var valueString = sample(ALPHABET, opts.valueSize, random, true).join('') 111 | values[keyWriters[z]] = valueString 112 | } 113 | 114 | writeBatch.set(keyBatch[j], values) 115 | } 116 | writesPerReplication.push(writeBatch) 117 | } 118 | 119 | return writesPerReplication 120 | } 121 | 122 | function validate (t, db, processedBatches, cb) { 123 | var expectedWrites = new Map() 124 | for (var i = 0; i < processedBatches.length; i++) { 125 | processedBatches[i].forEach((v, k) => expectedWrites.set(k, v)) 126 | } 127 | 128 | t.test(`validating after ${processedBatches.length} replications`, function (t) { 129 | t.plan(expectedWrites.size + 1) 130 | 131 | var readStream = db.createReadStream('/') 132 | readStream.on('end', function () { 133 | var keys = expectedWrites.size === 0 ? 'none' : Array.from(expectedWrites.keys()).join(',') 134 | t.same(expectedWrites.size, 0, `missing keys: ${keys}`) 135 | 136 | if (expectedWrites.size === 0) return cb() 137 | return cb(new Error(`missing keys: ${keys}`)) 138 | }) 139 | readStream.on('error', cb) 140 | readStream.on('data', function (nodes) { 141 | if (!nodes) return 142 | var key = nodes[0].key 143 | var values = nodes.map(node => node.value) 144 | t.same(values, expectedWrites.get(key).filter(v => !!v)) 145 | expectedWrites.delete(key) 146 | }) 147 | }) 148 | } 149 | 150 | function generateFailingTest (dbCount, writesPerReplication, writeOps) { 151 | writeOps.push(err => { 152 | t.error(err) 153 | t.end() 154 | }).toString() 155 | 156 | var writeArrays = writesPerReplication.map(m => Array.from(m)) 157 | 158 | console.log('\n Generated Test Case:\n') 159 | var source = prettier.format(` 160 | var tape = require('tape') 161 | 162 | var run = require('./helpers/run') 163 | var put = require('./helpers/put') 164 | var create = require('./helpers/create') 165 | var validate = require('./helpers/fuzzing').validate 166 | 167 | tape('autogenerated failing fuzz test', function (t) { 168 | var writesPerReplication = ${JSON.stringify(writeArrays)}.map(b => new Map(b)) 169 | 170 | create.many(${dbCount}, function (err, dbs, replicateByIndex) { 171 | t.error(err) 172 | run(${writeOps.map(op => op.toString())}) 173 | }) 174 | })`, { singleQuote: true, semi: false }) 175 | 176 | var standardized = standard.lintTextSync(source, { fix: true }) 177 | console.log(standardized.results[0].output) 178 | console.log('\n') 179 | } 180 | 181 | function fuzzRunner (t, opts, cb) { 182 | opts = defaultOpts(opts) 183 | 184 | var writesPerReplication = generateData(opts) 185 | 186 | makeDatabases(opts, function (dbs, replicateByIndex) { 187 | var ops = [] 188 | for (var i = 0; i < writesPerReplication.length; i++) { 189 | var batch = writesPerReplication[i] 190 | var batchOps = [] 191 | for (var b of batch) { 192 | var key = b[0] 193 | var values = b[1] 194 | for (var j = 0; j < values.length; j++) { 195 | var value = values[j] 196 | if (!value) continue 197 | batchOps.push( 198 | // Evaling here so that function.toString contains variable values. 199 | // (Used for test code generation). 200 | eval(`(cb => { 201 | put(dbs[${j}], [{ 202 | key: '${key}', 203 | value: '${value}' 204 | }], cb) 205 | })`) 206 | ) 207 | } 208 | } 209 | ops.push(batchOps) 210 | // Intersperse replication/validation/failing-test generation between write batches. 211 | ops.push([ 212 | // Currently replicating between all databases at every replication point. 213 | cb => replicateByIndex(cb), 214 | // Evaling to capture `i` for test generation. 215 | eval(`(cb => validate(t, dbs[0], writesPerReplication.slice(0, ${i + 1}), cb))`) 216 | ]) 217 | } 218 | 219 | var finished = 0 220 | doRun() 221 | 222 | function doRun (err) { 223 | if (err) { 224 | // Don't include the validation/replication ops in the test case generation. 225 | // Also don't include the doRun callback in each batch 226 | var failingBatches = ops.slice(0, finished).map(batch => batch.slice(0, -1)) 227 | generateFailingTest(opts.writers, writesPerReplication, failingBatches) 228 | return cb(null) 229 | } else if (finished === ops.length) { 230 | return cb(null) 231 | } 232 | ops[finished].push(doRun) 233 | run.apply(null, ops[finished++]) 234 | } 235 | }) 236 | } 237 | -------------------------------------------------------------------------------- /test/helpers/put.js: -------------------------------------------------------------------------------- 1 | module.exports = function (db, list, cb) { 2 | var i = 0 3 | loop(null) 4 | 5 | function loop (err) { 6 | if (err) return cb(err) 7 | if (i === list.length) return cb(null) 8 | 9 | var next = list[i++] 10 | if (typeof next === 'string') next = {key: next, value: next} 11 | db.put(next.key, next.value, loop) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /test/helpers/replicate.js: -------------------------------------------------------------------------------- 1 | module.exports = replicate 2 | 3 | function replicate (a, b, opts, cb) { 4 | if (typeof opts === 'function') return replicate(a, b, null, opts) 5 | 6 | var s1 = a.replicate(opts) 7 | var s2 = b.replicate(opts) 8 | 9 | s1.pipe(s2).pipe(s1).on('end', function () { 10 | if (cb) cb() 11 | }) 12 | } 13 | -------------------------------------------------------------------------------- /test/helpers/run.js: -------------------------------------------------------------------------------- 1 | module.exports = run 2 | 3 | function run () { 4 | var fns = [].concat.apply([], arguments) // flatten 5 | loop(null) 6 | 7 | function loop (err) { 8 | if (fns.length === 1 || err) return fns.pop()(err) 9 | fns.shift()(loop) 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/history.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var collect = require('stream-collector') 3 | var create = require('./helpers/create') 4 | var replicate = require('./helpers/replicate') 5 | 6 | tape('empty history', function (t) { 7 | var db = create.one() 8 | var expected = [] 9 | 10 | var rs = db.createHistoryStream() 11 | collect(rs, function (err, actual) { 12 | t.error(err, 'no error') 13 | t.deepEqual(actual, expected, 'diff as expected') 14 | t.end() 15 | }) 16 | }) 17 | 18 | tape('single value', function (t) { 19 | var db = create.one() 20 | 21 | db.put('a', '2', function (err) { 22 | t.error(err, 'no error') 23 | var rs = db.createHistoryStream() 24 | collect(rs, function (err, actual) { 25 | t.error(err, 'no error') 26 | t.equals(actual.length, 1) 27 | t.equals(actual[0].key, 'a') 28 | t.equals(actual[0].value, '2') 29 | t.end() 30 | }) 31 | }) 32 | }) 33 | 34 | tape('multiple values', function (t) { 35 | var db = create.one() 36 | 37 | db.put('a', '2', function (err) { 38 | t.error(err, 'no error') 39 | db.put('b/0', 'boop', function (err) { 40 | t.error(err, 'no error') 41 | var rs = db.createHistoryStream() 42 | collect(rs, function (err, actual) { 43 | t.error(err, 'no error') 44 | t.equals(actual.length, 2) 45 | t.equals(actual[0].key, 'a') 46 | t.equals(actual[0].value, '2') 47 | t.equals(actual[1].key, 'b/0') 48 | t.equals(actual[1].value, 'boop') 49 | t.end() 50 | }) 51 | }) 52 | }) 53 | }) 54 | 55 | tape('multiple values: same key', function (t) { 56 | var db = create.one() 57 | 58 | db.put('a', '2', function (err) { 59 | t.error(err, 'no error') 60 | db.put('a', 'boop', function (err) { 61 | t.error(err, 'no error') 62 | var rs = db.createHistoryStream() 63 | collect(rs, function (err, actual) { 64 | t.error(err, 'no error') 65 | t.equals(actual.length, 2) 66 | t.equals(actual[0].key, 'a') 67 | t.equals(actual[0].value, '2') 68 | t.equals(actual[1].key, 'a') 69 | t.equals(actual[1].value, 'boop') 70 | t.end() 71 | }) 72 | }) 73 | }) 74 | }) 75 | 76 | tape('2 feeds', function (t) { 77 | create.two(function (a, b) { 78 | a.put('a', 'a', function () { 79 | b.put('b', '12', function () { 80 | replicate(a, b, validate) 81 | }) 82 | }) 83 | 84 | function validate () { 85 | var rs = b.createHistoryStream() 86 | var bi = b.feeds.indexOf(b.local) 87 | var ai = bi === 0 ? 1 : 0 88 | 89 | collect(rs, function (err, actual) { 90 | t.error(err, 'no error') 91 | t.equals(actual.length, 3) 92 | t.equals(actual[0].feed, ai) 93 | t.equals(actual[0].seq, 1) 94 | t.equals(actual[1].feed, ai) 95 | t.equals(actual[1].seq, 2) 96 | t.equals(actual[2].feed, bi) 97 | t.equals(actual[2].seq, 1) 98 | t.end() 99 | }) 100 | } 101 | }) 102 | }) 103 | 104 | tape('reverse', function (t) { 105 | create.two(function (a, b) { 106 | a.put('a', 'a', function () { 107 | b.put('b', '12', function () { 108 | replicate(a, b, validate) 109 | }) 110 | }) 111 | 112 | function validate () { 113 | var rs = b.createHistoryStream({reverse: true}) 114 | var bi = b.feeds.indexOf(b.local) 115 | var ai = bi === 0 ? 1 : 0 116 | 117 | collect(rs, function (err, actual) { 118 | t.error(err, 'no error') 119 | t.equals(actual.length, 3) 120 | t.equals(actual[0].feed, bi) 121 | t.equals(actual[0].seq, 1) 122 | t.equals(actual[1].feed, ai) 123 | t.equals(actual[1].seq, 2) 124 | t.equals(actual[2].feed, ai) 125 | t.equals(actual[2].seq, 1) 126 | t.end() 127 | }) 128 | } 129 | }) 130 | }) 131 | -------------------------------------------------------------------------------- /test/hooks.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | 4 | tape('onlookup hook', function (t) { 5 | var db = create.one() 6 | var batch = [] 7 | var path = [] 8 | 9 | for (var i = 0; i < 200; i++) { 10 | batch.push({type: 'put', key: '' + i, value: '' + i}) 11 | } 12 | 13 | db.batch(batch, function (err) { 14 | t.error(err, 'no error') 15 | db.get('0', {onlookup}, function (err, node) { 16 | t.error(err, 'no error') 17 | db._getAllPointers(path, false, function (err, nodes) { 18 | t.error(err, 'no error') 19 | t.same(nodes[0].seq, db.feeds[0].length - 1, 'first is head') 20 | for (var i = 1; i < nodes.length; i++) { 21 | t.ok(inTrie(nodes[i - 1], nodes[i]), 'in trie') 22 | } 23 | t.same(nodes[nodes.length - 1].seq, node.seq, 'last node is the found one') 24 | t.end() 25 | }) 26 | }) 27 | 28 | function inTrie (node, ptr) { 29 | return node.trie.some(function (bucket) { 30 | if (!bucket) return false 31 | return bucket.some(function (values) { 32 | if (!values) return false 33 | return values.some(function (val) { 34 | return val.feed === ptr.feed && val.seq === ptr.seq 35 | }) 36 | }) 37 | }) 38 | } 39 | 40 | function onlookup (ptr) { 41 | path.push(ptr) 42 | } 43 | }) 44 | }) 45 | -------------------------------------------------------------------------------- /test/iterator-order.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var cmp = require('compare') 3 | var create = require('./helpers/create') 4 | var put = require('./helpers/put') 5 | var run = require('./helpers/run') 6 | var hash = require('../lib/hash') 7 | 8 | function sortByHash (a, b) { 9 | var ha = hash(typeof a === 'string' ? a : a.key).join('') 10 | var hb = hash(typeof b === 'string' ? b : b.key).join('') 11 | return cmp(ha, hb) 12 | } 13 | 14 | function reverseSortByHash (a, b) { 15 | return -1 * sortByHash(a, b) 16 | } 17 | 18 | const cases = { 19 | 'simple': ['a', 'b', 'c'], 20 | 'mixed depth from root': ['a/a', 'a/b', 'a/c', 'b', 'c'], 21 | '3 paths deep': ['a', 'a/a', 'a/b', 'a/c', 'a/a/a', 'a/a/b', 'a/a/c'] 22 | } 23 | 24 | Object.keys(cases).forEach((key) => { 25 | tape('iterator is hash order sorted (' + key + ')', function (t) { 26 | var keysToTest = cases[key] 27 | run( 28 | cb => testSingleFeedWithKeys(t, keysToTest, cb), 29 | cb => testTwoFeedsWithKeys(t, keysToTest, cb), 30 | cb => testSingleFeedWithKeys(t, keysToTest, { reverse: true, sort: reverseSortByHash }, cb), 31 | cb => testTwoFeedsWithKeys(t, keysToTest, { reverse: true, sort: reverseSortByHash }, cb), 32 | cb => t.end() 33 | ) 34 | }) 35 | }) 36 | 37 | tape('fully visit a folder before visiting the next one', function (t) { 38 | t.plan(12) 39 | var db = create.one() 40 | put(db, ['a', 'a/b', 'a/b/c', 'b/c', 'b/c/d'], function (err) { 41 | t.error(err, 'no error') 42 | var ite = db.iterator() 43 | 44 | ite.next(function loop (err, val) { 45 | t.error(err, 'no error') 46 | if (!val) return t.end() 47 | 48 | if (val.key[0] === 'b') { 49 | t.same(val.key, 'b/c') 50 | ite.next(function (err, val) { 51 | t.error(err, 'no error') 52 | t.same(val.key, 'b/c/d') 53 | ite.next(loop) 54 | }) 55 | } else { 56 | t.same(val.key, 'a') 57 | ite.next(function (err, val) { 58 | t.error(err, 'no error') 59 | t.same(val.key, 'a/b') 60 | ite.next(function (err, val) { 61 | t.error(err, 'no error') 62 | t.same(val.key, 'a/b/c') 63 | ite.next(loop) 64 | }) 65 | }) 66 | } 67 | }) 68 | }) 69 | }) 70 | 71 | function testSingleFeedWithKeys (t, keys, opts, cb) { 72 | if (typeof opts === 'function') return testSingleFeedWithKeys(t, keys, null, opts) 73 | opts = opts || {} 74 | 75 | var sortFunc = opts.sort || sortByHash 76 | 77 | t.comment('with single feed') 78 | var db = create.one() 79 | put(db, keys, function (err) { 80 | t.error(err, 'no error') 81 | testIteratorOrder(t, db.iterator(opts), keys, sortFunc, cb) 82 | }) 83 | } 84 | 85 | function testTwoFeedsWithKeys (t, keys, opts, cb) { 86 | if (typeof opts === 'function') return testTwoFeedsWithKeys(t, keys, null, opts) 87 | opts = opts || {} 88 | 89 | var sortFunc = opts.sort || sortByHash 90 | 91 | t.comment('with values split across two feeds') 92 | create.two(function (db1, db2, replicate) { 93 | var half = Math.floor(keys.length / 2) 94 | run( 95 | cb => put(db1, keys.slice(0, half), cb), 96 | cb => put(db2, keys.slice(half), cb), 97 | cb => replicate(cb), 98 | cb => testIteratorOrder(t, db1.iterator(opts), keys, sortFunc, cb), 99 | cb => testIteratorOrder(t, db2.iterator(opts), keys, sortFunc, cb), 100 | done 101 | ) 102 | }) 103 | function done () { 104 | if (!cb) t.end() 105 | else cb() 106 | } 107 | } 108 | 109 | function testIteratorOrder (t, iterator, expected, sortFunc, done) { 110 | var sorted = expected.slice(0).sort(sortFunc) 111 | each(iterator, onEach, onDone) 112 | function onEach (err, node) { 113 | t.error(err, 'no error') 114 | var key = node.key || node[0].key 115 | t.same(key, sorted.shift()) 116 | } 117 | function onDone () { 118 | t.same(sorted.length, 0) 119 | if (done === undefined) t.end() 120 | else done() 121 | } 122 | } 123 | 124 | function each (ite, cb, done) { 125 | ite.next(function loop (err, node) { 126 | if (err) return cb(err) 127 | if (!node) return done() 128 | cb(null, node) 129 | ite.next(loop) 130 | }) 131 | } 132 | -------------------------------------------------------------------------------- /test/iterator.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var put = require('./helpers/put') 4 | var run = require('./helpers/run') 5 | 6 | tape('basic iteration', function (t) { 7 | var db = create.one() 8 | var vals = ['a', 'b', 'c'] 9 | var expected = toMap(vals) 10 | 11 | put(db, vals, function (err) { 12 | t.error(err, 'no error') 13 | all(db.iterator(), function (err, map) { 14 | t.error(err, 'no error') 15 | t.same(map, expected, 'iterated all values') 16 | t.end() 17 | }) 18 | }) 19 | }) 20 | 21 | tape('iterate a big db', function (t) { 22 | var db = create.one() 23 | 24 | var vals = range(1000, '#') 25 | var expected = toMap(vals) 26 | 27 | put(db, vals, function (err) { 28 | t.error(err, 'no error') 29 | all(db.iterator(), function (err, map) { 30 | t.error(err, 'no error') 31 | t.same(map, expected, 'iterated all values') 32 | t.end() 33 | }) 34 | }) 35 | }) 36 | 37 | tape('prefix basic iteration', function (t) { 38 | var db = create.one() 39 | var vals = ['foo/a', 'foo/b', 'foo/c'] 40 | var expected = toMap(vals) 41 | 42 | vals = vals.concat(['a', 'b', 'c']) 43 | 44 | put(db, vals, function (err) { 45 | t.error(err, 'no error') 46 | all(db.iterator('foo'), function (err, map) { 47 | t.error(err, 'no error') 48 | t.same(map, expected, 'iterated all values') 49 | t.end() 50 | }) 51 | }) 52 | }) 53 | 54 | tape('empty prefix iteration', function (t) { 55 | var db = create.one() 56 | var vals = ['foo/a', 'foo/b', 'foo/c'] 57 | var expected = {} 58 | 59 | put(db, vals, function (err) { 60 | t.error(err, 'no error') 61 | all(db.iterator('bar'), function (err, map) { 62 | t.error(err, 'no error') 63 | t.same(map, expected, 'iterated all values') 64 | t.end() 65 | }) 66 | }) 67 | }) 68 | 69 | tape('prefix iterate a big db', function (t) { 70 | var db = create.one() 71 | 72 | var vals = range(1000, 'foo/#') 73 | var expected = toMap(vals) 74 | 75 | vals = vals.concat(range(1000, '#')) 76 | 77 | put(db, vals, function (err) { 78 | t.error(err, 'no error') 79 | all(db.iterator('foo'), function (err, map) { 80 | t.error(err, 'no error') 81 | t.same(map, expected, 'iterated all values') 82 | t.end() 83 | }) 84 | }) 85 | }) 86 | 87 | tape('non recursive iteration', function (t) { 88 | var db = create.one() 89 | 90 | var vals = [ 91 | 'a', 92 | 'a/b/c/d', 93 | 'a/c', 94 | 'b', 95 | 'b/b/c', 96 | 'c/a', 97 | 'c' 98 | ] 99 | 100 | put(db, vals, function (err) { 101 | t.error(err, 'no error') 102 | all(db.iterator({recursive: false}), function (err, map) { 103 | t.error(err, 'no error') 104 | var keys = Object.keys(map).map(k => k.split('/')[0]) 105 | t.same(keys.sort(), ['a', 'b', 'c'], 'iterated all values') 106 | t.end() 107 | }) 108 | }) 109 | }) 110 | 111 | tape('mixed nested and non nexted iteration', function (t) { 112 | var db = create.one() 113 | var vals = ['a', 'a/a', 'a/b', 'a/c', 'a/a/a', 'a/a/b', 'a/a/c'] 114 | var expected = toMap(vals) 115 | 116 | put(db, vals, function (err) { 117 | t.error(err, 'no error') 118 | all(db.iterator(), function (err, map) { 119 | t.error(err, 'no error') 120 | t.same(map, expected, 'iterated all values') 121 | t.end() 122 | }) 123 | }) 124 | }) 125 | 126 | tape('two writers, simple fork', function (t) { 127 | t.plan(2 * 2 + 1) 128 | 129 | create.two(function (db1, db2, replicate) { 130 | run( 131 | cb => db1.put('0', '0', cb), 132 | replicate, 133 | cb => db1.put('1', '1a', cb), 134 | cb => db2.put('1', '1b', cb), 135 | cb => db1.put('10', '10', cb), 136 | replicate, 137 | cb => db1.put('2', '2', cb), 138 | cb => db1.put('1/0', '1/0', cb), 139 | done 140 | ) 141 | 142 | function done (err) { 143 | t.error(err, 'no error') 144 | all(db1.iterator(), ondb1all) 145 | all(db2.iterator(), ondb2all) 146 | } 147 | 148 | function ondb2all (err, map) { 149 | t.error(err, 'no error') 150 | t.same(map, {'0': ['0'], '1': ['1a', '1b'], '10': ['10']}) 151 | } 152 | 153 | function ondb1all (err, map) { 154 | t.error(err, 'no error') 155 | t.same(map, {'0': ['0'], '1': ['1a', '1b'], '10': ['10'], '2': ['2'], '1/0': ['1/0']}) 156 | } 157 | }) 158 | }) 159 | 160 | tape('two writers, one fork', function (t) { 161 | create.two(function (db1, db2, replicate) { 162 | run( 163 | cb => db1.put('0', '0', cb), 164 | cb => db2.put('2', '2', cb), 165 | cb => db2.put('3', '3', cb), 166 | cb => db2.put('4', '4', cb), 167 | cb => db2.put('5', '5', cb), 168 | cb => db2.put('6', '6', cb), 169 | cb => db2.put('7', '7', cb), 170 | cb => db2.put('8', '8', cb), 171 | cb => db2.put('9', '9', cb), 172 | cb => replicate(cb), 173 | cb => db1.put('1', '1a', cb), 174 | cb => db2.put('1', '1b', cb), 175 | cb => replicate(cb), 176 | cb => db1.put('0', '00', cb), 177 | cb => replicate(cb), 178 | cb => db2.put('hi', 'ho', cb), 179 | done 180 | ) 181 | 182 | function done (err) { 183 | t.error(err, 'no error') 184 | all(db1.iterator(), function (err, vals) { 185 | t.error(err, 'no error') 186 | t.same(vals, { 187 | '0': ['00'], 188 | '1': ['1a', '1b'], 189 | '2': ['2'], 190 | '3': ['3'], 191 | '4': ['4'], 192 | '5': ['5'], 193 | '6': ['6'], 194 | '7': ['7'], 195 | '8': ['8'], 196 | '9': ['9'] 197 | }) 198 | 199 | all(db2.iterator(), function (err, vals) { 200 | t.error(err, 'no error') 201 | t.same(vals, { 202 | '0': ['00'], 203 | '1': ['1a', '1b'], 204 | '2': ['2'], 205 | '3': ['3'], 206 | '4': ['4'], 207 | '5': ['5'], 208 | '6': ['6'], 209 | '7': ['7'], 210 | '8': ['8'], 211 | '9': ['9'], 212 | 'hi': ['ho'] 213 | }) 214 | t.end() 215 | }) 216 | }) 217 | } 218 | }) 219 | }) 220 | 221 | tape('two writers, one fork, many values', function (t) { 222 | var r = range(100, 'i') 223 | 224 | create.two(function (db1, db2, replicate) { 225 | run( 226 | cb => db1.put('0', '0', cb), 227 | cb => db2.put('2', '2', cb), 228 | cb => db2.put('3', '3', cb), 229 | cb => db2.put('4', '4', cb), 230 | cb => db2.put('5', '5', cb), 231 | cb => db2.put('6', '6', cb), 232 | cb => db2.put('7', '7', cb), 233 | cb => db2.put('8', '8', cb), 234 | cb => db2.put('9', '9', cb), 235 | cb => replicate(cb), 236 | cb => db1.put('1', '1a', cb), 237 | cb => db2.put('1', '1b', cb), 238 | cb => replicate(cb), 239 | cb => db1.put('0', '00', cb), 240 | r.map(i => cb => db1.put(i, i, cb)), 241 | cb => replicate(cb), 242 | done 243 | ) 244 | 245 | function done (err) { 246 | t.error(err, 'no error') 247 | 248 | var expected = { 249 | '0': ['00'], 250 | '1': ['1a', '1b'], 251 | '2': ['2'], 252 | '3': ['3'], 253 | '4': ['4'], 254 | '5': ['5'], 255 | '6': ['6'], 256 | '7': ['7'], 257 | '8': ['8'], 258 | '9': ['9'] 259 | } 260 | 261 | r.forEach(function (v) { 262 | expected[v] = [v] 263 | }) 264 | 265 | all(db1.iterator(), function (err, vals) { 266 | t.error(err, 'no error') 267 | t.same(vals, expected) 268 | all(db2.iterator(), function (err, vals) { 269 | t.error(err, 'no error') 270 | t.same(vals, expected) 271 | t.end() 272 | }) 273 | }) 274 | } 275 | }) 276 | }) 277 | 278 | tape('two writers, fork', function (t) { 279 | t.plan(2 * 2 + 1) 280 | 281 | create.two(function (a, b, replicate) { 282 | run( 283 | cb => a.put('a', 'a', cb), 284 | replicate, 285 | cb => b.put('a', 'b', cb), 286 | cb => a.put('b', 'c', cb), 287 | replicate, 288 | done 289 | ) 290 | 291 | function done (err) { 292 | t.error(err, 'no error') 293 | 294 | all(a.iterator(), onall) 295 | all(b.iterator(), onall) 296 | 297 | function onall (err, map) { 298 | t.error(err, 'no error') 299 | t.same(map, {b: ['c'], a: ['b']}) 300 | } 301 | } 302 | }) 303 | }) 304 | 305 | tape('three writers, two forks', function (t) { 306 | t.plan(2 * 3 + 1) 307 | 308 | var replicate = require('./helpers/replicate') 309 | 310 | create.three(function (a, b, c, replicateAll) { 311 | run( 312 | cb => a.put('a', 'a', cb), 313 | replicateAll, 314 | cb => b.put('a', 'ab', cb), 315 | cb => a.put('some', 'some', cb), 316 | cb => replicate(a, c, cb), 317 | cb => c.put('c', 'c', cb), 318 | replicateAll, 319 | done 320 | ) 321 | 322 | function done (err) { 323 | t.error(err, 'no error') 324 | all(a.iterator(), onall) 325 | all(b.iterator(), onall) 326 | all(c.iterator(), onall) 327 | 328 | function onall (err, map) { 329 | t.error(err, 'no error') 330 | t.same(map, {a: ['ab'], c: ['c'], some: ['some']}) 331 | } 332 | } 333 | }) 334 | }) 335 | 336 | tape('list buffers an iterator', function (t) { 337 | var db = create.one() 338 | 339 | put(db, ['a', 'b', 'b/c'], function (err) { 340 | t.error(err, 'no error') 341 | db.list(function (err, all) { 342 | t.error(err, 'no error') 343 | t.same(all.map(v => v.key).sort(), ['a', 'b', 'b/c']) 344 | db.list('b', {gt: true}, function (err, all) { 345 | t.error(err, 'no error') 346 | t.same(all.length, 1) 347 | t.same(all[0].key, 'b/c') 348 | t.end() 349 | }) 350 | }) 351 | }) 352 | }) 353 | 354 | tape('options to get deleted keys', function (t) { 355 | var db = create.one() 356 | run( 357 | cb => put(db, ['a', 'b', 'c'], cb), 358 | cb => db.del('a', cb), 359 | done 360 | ) 361 | function done () { 362 | all(db.iterator({ deletes: true }), function (err, map) { 363 | t.error(err, 'no error') 364 | t.same(map, { a: null, 'b': 'b', c: 'c' }, 'iterated all values') 365 | t.end() 366 | }) 367 | } 368 | }) 369 | 370 | tape('three writers, two forks with deletes', function (t) { 371 | t.plan(2 * 3 + 1) 372 | 373 | var replicate = require('./helpers/replicate') 374 | 375 | create.three(function (a, b, c, replicateAll) { 376 | run( 377 | cb => a.put('a', 'a', cb), 378 | replicateAll, 379 | cb => b.put('a', 'ab', cb), 380 | cb => a.put('some', 'some', cb), 381 | cb => replicate(a, c, cb), 382 | cb => c.put('c', 'c', cb), 383 | cb => c.del('c', cb), 384 | cb => a.del('a', cb), 385 | cb => a.del('some', cb), 386 | replicateAll, 387 | done 388 | ) 389 | 390 | function done (err) { 391 | t.error(err, 'no error') 392 | all(a.iterator({ deletes: true }), onall) 393 | all(b.iterator({ deletes: true }), onall) 394 | all(c.iterator({ deletes: true }), onall) 395 | 396 | function onall (err, map) { 397 | t.error(err, 'no error') 398 | t.same(map, {a: ['ab', null], c: [null], some: [null]}) 399 | } 400 | } 401 | }) 402 | }) 403 | 404 | function range (n, v) { 405 | // #0, #1, #2, ... 406 | return new Array(n).join('.').split('.').map((a, i) => v + i) 407 | } 408 | 409 | function toMap (list) { 410 | var map = {} 411 | for (var i = 0; i < list.length; i++) { 412 | map[list[i]] = list[i] 413 | } 414 | return map 415 | } 416 | 417 | function all (ite, cb) { 418 | var vals = {} 419 | 420 | ite.next(function loop (err, node) { 421 | if (err) return cb(err) 422 | if (!node) return cb(null, vals) 423 | var key = Array.isArray(node) ? node[0].key : node.key 424 | if (vals[key]) return cb(new Error('duplicate node for ' + key)) 425 | vals[key] = Array.isArray(node) ? node.map(n => n.value).sort() : node.value 426 | ite.next(loop) 427 | }) 428 | } 429 | -------------------------------------------------------------------------------- /test/key-history.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | 3 | var replicate = require('./helpers/replicate') 4 | var create = require('./helpers/create') 5 | var put = require('./helpers/put') 6 | var run = require('./helpers/run') 7 | 8 | tape('empty db', (t) => { 9 | var db = create.one() 10 | run( 11 | cb => testHistory(t, db, 'hello', [], cb), 12 | t.end 13 | ) 14 | }, { timeout: 1000 }) 15 | 16 | tape('single feed', (t) => { 17 | var db = create.one() 18 | run( 19 | cb => put(db, [ 20 | { key: 'hello', value: 'welt' }, 21 | { key: 'null', value: 'void' }, 22 | { key: 'hello', value: 'world' } 23 | ], cb), 24 | cb => testHistory(t, db, 'hello', ['world', 'welt'], cb), 25 | cb => testHistory(t, db, 'null', ['void'], cb), 26 | t.end 27 | ) 28 | }, { timeout: 1000 }) 29 | 30 | tape('single feed (same value)', (t) => { 31 | var db = create.one() 32 | run( 33 | cb => put(db, [ 34 | { key: 'hello', value: 'welt' }, 35 | { key: 'hello', value: 'darkness' }, 36 | { key: 'hello', value: 'world' } 37 | ], cb), 38 | cb => testHistory(t, db, 'hello', ['world', 'darkness', 'welt'], cb), 39 | t.end 40 | ) 41 | }, { timeout: 1000 }) 42 | 43 | tape('two feeds', (t) => { 44 | create.two((db1, db2, replicate) => { 45 | run( 46 | cb => put(db1, [ 47 | { key: 'hello', value: 'welt' }, 48 | { key: 'null', value: 'void' } 49 | ], cb), 50 | replicate, 51 | cb => put(db2, [ 52 | { key: 'hello', value: 'world' } 53 | ], cb), 54 | replicate, 55 | cb => testHistory(t, db1, 'hello', ['world', 'welt'], cb), 56 | t.end 57 | ) 58 | }) 59 | }, { timeout: 1000 }) 60 | 61 | tape('two feeds with conflict', (t) => { 62 | create.two((db1, db2, replicate) => { 63 | run( 64 | cb => put(db1, [ 65 | { key: 'hello', value: 'welt' }, 66 | { key: 'null', value: 'void' } 67 | ], cb), 68 | cb => put(db2, [ 69 | { key: 'hello', value: 'world' } 70 | ], cb), 71 | replicate, 72 | cb => testHistory(t, db1, 'hello', [['world', 'welt']], cb), 73 | t.end 74 | ) 75 | }) 76 | }, { timeout: 1000 }) 77 | 78 | tape('three feeds with conflict', (t) => { 79 | create.three((db1, db2, db3, replicateAll) => { 80 | run( 81 | cb => put(db1, [ 82 | { key: 'hello', value: 'welt' }, 83 | { key: 'null', value: 'void' } 84 | ], cb), 85 | cb => replicate(db1, db2, cb), 86 | cb => put(db2, [ 87 | { key: 'hello', value: 'world' } 88 | ], cb), 89 | cb => replicate(db1, db2, cb), 90 | cb => put(db3, [ 91 | { key: 'hello', value: 'again' } 92 | ], cb), 93 | replicateAll, 94 | cb => testHistory(t, db1, 'hello', [['world', 'again'], 'welt'], cb), 95 | t.end 96 | ) 97 | }) 98 | }, { timeout: 1000 }) 99 | 100 | tape('three feeds with all conflicting', (t) => { 101 | create.three((db1, db2, db3, replicateAll) => { 102 | run( 103 | cb => put(db1, [ 104 | { key: 'hello', value: 'welt' }, 105 | { key: 'null', value: 'void' } 106 | ], cb), 107 | cb => put(db2, [ 108 | { key: 'hello', value: 'world' } 109 | ], cb), 110 | cb => put(db3, [ 111 | { key: 'hello', value: 'again' } 112 | ], cb), 113 | replicateAll, 114 | cb => testHistory(t, db1, 'hello', [['world', 'again', 'welt']], cb), 115 | t.end 116 | ) 117 | }) 118 | }, { timeout: 1000 }) 119 | 120 | tape('three feeds (again)', (t) => { 121 | var toVersion = v => ({ key: 'version', value: v }) 122 | create.three((db1, db2, db3, replicateAll) => { 123 | var len = 5 124 | var expected = [] 125 | for (var i = 0; i < len * 3; i++) { 126 | expected.push(i.toString()) 127 | } 128 | run( 129 | cb => put(db1, expected.slice(0, len).map(toVersion), cb), 130 | replicateAll, 131 | cb => put(db2, expected.slice(len, len * 2).map(toVersion), cb), 132 | replicateAll, 133 | cb => put(db3, expected.slice(len * 2).map(toVersion), cb), 134 | replicateAll, 135 | cb => testHistory(t, db1, 'version', expected.reverse(), cb), 136 | t.end 137 | ) 138 | }) 139 | }, { timeout: 1000 }) 140 | 141 | function testHistory (t, db, key, expected, cb) { 142 | var results = expected.slice(0) 143 | var stream = db.createKeyHistoryStream(key) 144 | stream.on('data', (data) => { 145 | var expected = results.shift() 146 | t.notEqual(expected, undefined) 147 | if (!Array.isArray(expected)) expected = [expected] 148 | t.same(data.length, expected.length) 149 | expected.forEach((value, i) => { 150 | t.same(data[i].value, value) 151 | }) 152 | }) 153 | stream.on('end', () => { 154 | t.same(results.length, 0) 155 | cb() 156 | }) 157 | stream.on('error', cb) 158 | } 159 | -------------------------------------------------------------------------------- /test/read-stream.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var replicate = require('./helpers/replicate') 4 | var put = require('./helpers/put') 5 | 6 | function toKeyValuePairs (value) { 7 | return (k) => ({ key: k, value: value || k }) 8 | } 9 | 10 | function indexWithKey (key) { 11 | return v => v.key === key 12 | } 13 | 14 | tape('basic readStream', { timeout: 1000 }, function (t) { 15 | var db = create.one() 16 | var vals = ['foo', 'foo/a', 'foo/b', 'a', 'bar/a', 'foo/abc', 'foo/b', 'bar/b', 'foo/bar', 'foo/a/b'] 17 | var expected = ['foo/a', 'foo/abc', 'foo/b', 'foo/bar', 'foo/a/b'] 18 | put(db, vals, validate) 19 | 20 | function validate (err) { 21 | t.error(err, 'no error') 22 | var reader = db.createReadStream('foo/', {gt: true}) 23 | reader.on('data', (data) => { 24 | var index = expected.indexOf(data.key) 25 | t.ok(index !== -1, 'key is expected') 26 | if (index >= 0) expected.splice(index, 1) 27 | }) 28 | reader.on('end', () => { 29 | t.equals(expected.length, 0) 30 | t.end() 31 | }) 32 | reader.on('error', (err) => { 33 | t.fail(err.message) 34 | t.end() 35 | }) 36 | } 37 | }) 38 | 39 | tape('basic readStream (again)', { timeout: 1000 }, function (t) { 40 | var db = create.one() 41 | var vals = ['foo/a', 'foo/abc', 'foo/a/b'] 42 | var expected = ['foo/a', 'foo/a/b'] 43 | put(db, vals, validate) 44 | 45 | function validate (err) { 46 | t.error(err, 'no error') 47 | var reader = db.createReadStream('foo/a') 48 | reader.on('data', (data) => { 49 | var index = expected.indexOf(data.key) 50 | t.ok(index !== -1, 'key is expected') 51 | if (index >= 0) expected.splice(index, 1) 52 | }) 53 | reader.on('end', () => { 54 | t.equals(expected.length, 0) 55 | t.end() 56 | }) 57 | reader.on('error', (err) => { 58 | t.fail(err.message) 59 | t.end() 60 | }) 61 | } 62 | }) 63 | 64 | tape('readStream with two feeds', { timeout: 1000 }, function (t) { 65 | create.two((a, b) => { 66 | var aValues = ['b/a', 'a/b/c', 'b/c', 'b/c/d'].map(toKeyValuePairs('A')) 67 | var bValues = ['a/b', 'a/b/c', 'b/c/d', 'b/c'].map(toKeyValuePairs('B')) 68 | put(a, aValues, (err) => { 69 | t.error(err, 'no error') 70 | replicate(a, b, () => { 71 | put(b, bValues, (err) => { 72 | t.error(err, 'no error') 73 | replicate(a, b, validate) 74 | }) 75 | }) 76 | }) 77 | function validate (err) { 78 | t.error(err, 'no error') 79 | var reader = a.createReadStream('b/') 80 | var expected = [ 81 | { key: 'b/c/d', value: 'B' }, 82 | { key: 'b/c', value: 'B' }, 83 | { key: 'b/a', value: 'A' } 84 | ] 85 | reader.on('data', (nodes) => { 86 | t.equals(nodes.length, 1) 87 | const index = expected.findIndex(indexWithKey(nodes[0].key)) 88 | t.ok(index !== -1, 'key is expected') 89 | if (index >= 0) { 90 | var found = expected.splice(index, 1) 91 | t.same(found[0].value, nodes[0].value) 92 | } 93 | }) 94 | reader.on('end', () => { 95 | t.ok(expected.length === 0, 'received all expected') 96 | t.pass('stream ended ok') 97 | t.end() 98 | }) 99 | reader.on('error', (err) => { 100 | t.fail(err.message) 101 | t.end() 102 | }) 103 | } 104 | }) 105 | }) 106 | 107 | tape('readStream with two feeds (again)', { timeout: 1000 }, function (t) { 108 | var aValues = ['/a/a', '/a/b', '/a/c'].map(toKeyValuePairs('A')) 109 | var bValues = ['/b/a', '/b/b', '/b/c', '/a/a', '/a/b', '/a/c'].map(toKeyValuePairs('B')) 110 | create.two((a, b) => { 111 | put(a, aValues, (err) => { 112 | t.error(err) 113 | replicate(a, b, () => { 114 | put(b, bValues, (err) => { 115 | t.error(err) 116 | replicate(a, b, validate) 117 | }) 118 | }) 119 | }) 120 | function validate () { 121 | var reader = b.createReadStream('/') 122 | var expected = ['b/a', 'b/b', 'b/c', 'a/a', 'a/b', 'a/c'] 123 | reader.on('data', (data) => { 124 | t.equals(data.length, 1) 125 | var index = expected.indexOf(data[0].key) 126 | t.ok(index !== -1, 'key is expected') 127 | t.same(data[0].value, 'B') 128 | if (index >= 0) expected.splice(index, 1) 129 | }) 130 | reader.on('end', () => { 131 | t.ok(expected.length === 0, 'received all expected') 132 | t.pass('stream ended ok') 133 | t.end() 134 | }) 135 | reader.on('error', (err) => { 136 | t.fail(err.message) 137 | t.end() 138 | }) 139 | } 140 | }) 141 | }) 142 | 143 | tape('readStream with conflicting feeds', { timeout: 2000 }, function (t) { 144 | var conflictingKeys = ['c/a', 'c/b', 'c/c', 'c/d'] 145 | create.two((a, b) => { 146 | put(a, ['a/a', 'a/b', 'a/c'].map(toKeyValuePairs('A')), (err) => { 147 | t.error(err) 148 | replicate(a, b, () => { 149 | put(b, ['b/a', 'b/b', 'b/c'].map(toKeyValuePairs('B')), (err) => { 150 | t.error(err) 151 | replicate(a, b, (err) => { 152 | t.error(err) 153 | put(a, conflictingKeys.map(toKeyValuePairs('A')), (err) => { 154 | t.error(err) 155 | put(b, conflictingKeys.reverse().map(toKeyValuePairs('B')), (err) => { 156 | t.error(err) 157 | replicate(a, b, validate) 158 | }) 159 | }) 160 | }) 161 | }) 162 | }) 163 | }) 164 | function validate () { 165 | var expected = ['a/a', 'a/b', 'a/c', 'b/a', 'b/b', 'b/c', 'c/b', 'c/c', 'c/a', 'c/d'] 166 | var reader = a.createReadStream('/') 167 | reader.on('data', (data) => { 168 | var isConflicting = conflictingKeys.indexOf(data[0].key) >= 0 169 | if (isConflicting) { 170 | t.equals(data.length, 2) 171 | } else { 172 | t.equals(data.length, 1) 173 | } 174 | var index = expected.indexOf(data[0].key) 175 | 176 | t.ok(index !== -1, 'key is expected') 177 | if (index >= 0) expected.splice(index, 1) 178 | }) 179 | reader.on('end', () => { 180 | t.ok(expected.length === 0, 'received all expected') 181 | t.pass('stream ended ok') 182 | t.end() 183 | }) 184 | reader.on('error', (err) => { 185 | t.fail(err.message) 186 | t.end() 187 | }) 188 | } 189 | }) 190 | }) 191 | 192 | tape('returns no data if db is empty', function (t) { 193 | var db = create.one() 194 | var reader = db.createReadStream('foo/') 195 | 196 | reader.on('data', (data) => { 197 | t.fail('should be no data') 198 | t.end() 199 | }) 200 | reader.on('end', () => { 201 | t.ok('everything is ok') 202 | t.end() 203 | }) 204 | reader.on('error', (err) => { 205 | t.fail(err.message) 206 | t.end() 207 | }) 208 | }) 209 | -------------------------------------------------------------------------------- /test/reopen-and-write.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var run = require('./helpers/run') 4 | var hyperdb = require('..') 5 | var messages = require('../lib/messages') 6 | 7 | tape('3 writers, re-open and write, re-open again', function (t) { 8 | create.three(function (a, b, c) { 9 | var reopened 10 | 11 | run( 12 | cb => a.put('foo', 'bar', cb), 13 | testUncorrupted, 14 | reopenDb, 15 | cb => reopened.put('foo2', 'bar2', cb), 16 | reopenDb, 17 | testInflateValue, 18 | done 19 | ) 20 | 21 | function done (err) { 22 | t.error(err, 'no error') 23 | t.end() 24 | } 25 | 26 | function testUncorrupted (cb) { 27 | t.equal(a._writers.length, 3, 'correct number of writers') 28 | cb() 29 | } 30 | 31 | function reopenDb (cb) { 32 | reopened = hyperdb(reuseStorage(a)) 33 | reopened.ready(function (err) { 34 | t.error(err, 'no error') 35 | cb() 36 | }) 37 | } 38 | 39 | function testInflateValue (cb) { 40 | t.equals(reopened.source.length, 5, 'correct length') 41 | reopened.source.get(4, function (err, data) { 42 | t.error(err, 'no error') 43 | var val = messages.Entry.decode(data) 44 | t.equal(val.inflate, 2, 'correct inflate for new entry') 45 | cb() 46 | }) 47 | } 48 | }) 49 | }) 50 | 51 | function reuseStorage (db) { 52 | return function (name) { 53 | var match = name.match(/^source\/(.*)/) 54 | if (match) { 55 | name = match[1] 56 | if (name === 'secret_key') return db.source._storage.secretKey 57 | return db.source._storage[name] 58 | } 59 | match = name.match(/^peers\/([0-9a-f]+)\/(.*)/) 60 | if (match) { 61 | var hex = match[1] 62 | name = match[2] 63 | var peerWriter = db._writers.find(function (writer) { 64 | return writer && writer._feed.discoveryKey.toString('hex') === hex 65 | }) 66 | if (!peerWriter) throw new Error('mismatch') 67 | var feed = peerWriter._feed 68 | if (name === 'secret_key') return feed._storage.secretKey 69 | return feed._storage[name] 70 | } else { 71 | throw new Error('mismatch') 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /test/replicate.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var cmp = require('compare') 3 | var create = require('./helpers/create') 4 | var run = require('./helpers/run') 5 | var replicate = require('./helpers/replicate') 6 | 7 | tape('two writers, no conflicts, many values', function (t) { 8 | t.plan(1 + 3 * 4) 9 | 10 | create.two(function (db1, db2, replicate) { 11 | var r = [] 12 | for (var i = 0; i < 1000; i++) r.push('i' + i) 13 | 14 | run( 15 | cb => db1.put('0', '0', cb), 16 | cb => replicate(cb), 17 | cb => db2.put('a', 'a', cb), 18 | cb => replicate(cb), 19 | cb => db2.put('2', '2', cb), 20 | cb => db2.put('3', '3', cb), 21 | cb => db2.put('4', '4', cb), 22 | cb => db2.put('5', '5', cb), 23 | cb => db2.put('6', '6', cb), 24 | cb => db2.put('7', '7', cb), 25 | cb => db2.put('8', '8', cb), 26 | cb => db2.put('9', '9', cb), 27 | cb => replicate(cb), 28 | cb => db1.put('b', 'b', cb), 29 | cb => db2.put('c', 'c', cb), 30 | cb => replicate(cb), 31 | cb => db2.put('d', 'd', cb), 32 | cb => replicate(cb), 33 | r.map(i => cb => db1.put(i, i, cb)), 34 | done 35 | ) 36 | 37 | function done (err) { 38 | t.error(err, 'no error') 39 | db2.get('a', expect('a')) 40 | db1.get('0', expect('0')) 41 | db1.get('i424', expect('i424')) 42 | 43 | function expect (v) { 44 | return function (err, nodes) { 45 | t.error(err, 'no error') 46 | t.same(nodes.length, 1) 47 | t.same(nodes[0].key, v) 48 | t.same(nodes[0].value, v) 49 | } 50 | } 51 | } 52 | }) 53 | }) 54 | 55 | tape('two writers, one conflict', function (t) { 56 | t.plan(1 + 4 * 2 + 6 * 2) 57 | create.two(function (db1, db2, replicate) { 58 | run( 59 | cb => db1.put('a', 'a', cb), 60 | cb => replicate(cb), 61 | cb => db1.put('b', 'b', cb), 62 | cb => db2.put('b', 'B', cb), 63 | cb => replicate(cb), 64 | cb => db1.put('a', 'A', cb), 65 | cb => replicate(cb), 66 | done 67 | ) 68 | 69 | function done (err) { 70 | t.error(err, 'no error') 71 | 72 | db1.get('a', ona) 73 | db2.get('a', ona) 74 | db1.get('b', onb) 75 | db2.get('b', onb) 76 | 77 | function onb (err, nodes) { 78 | t.error(err, 'no error') 79 | nodes.sort((a, b) => cmp(a.value, b.value)) 80 | t.same(nodes.length, 2) 81 | t.same(nodes[0].key, 'b') 82 | t.same(nodes[0].value, 'B') 83 | t.same(nodes[1].key, 'b') 84 | t.same(nodes[1].value, 'b') 85 | } 86 | 87 | function ona (err, nodes) { 88 | t.error(err, 'no error') 89 | t.same(nodes.length, 1) 90 | t.same(nodes[0].key, 'a') 91 | t.same(nodes[0].value, 'A') 92 | } 93 | } 94 | }) 95 | }) 96 | 97 | tape('two writers, fork', function (t) { 98 | t.plan(4 * 2 + 1) 99 | 100 | create.two(function (a, b, replicate) { 101 | run( 102 | cb => a.put('a', 'a', cb), 103 | replicate, 104 | cb => b.put('a', 'b', cb), 105 | cb => a.put('b', 'c', cb), 106 | replicate, 107 | done 108 | ) 109 | 110 | function done (err) { 111 | t.error(err, 'no error') 112 | a.get('a', ona) 113 | b.get('a', ona) 114 | } 115 | 116 | function ona (err, nodes) { 117 | t.error(err, 'no error') 118 | t.same(nodes.length, 1) 119 | t.same(nodes[0].key, 'a') 120 | t.same(nodes[0].value, 'b') 121 | } 122 | }) 123 | }) 124 | 125 | tape('three writers, two forks', function (t) { 126 | t.plan(4 * 3 + 1) 127 | 128 | create.three(function (a, b, c, replicateAll) { 129 | run( 130 | cb => a.put('a', 'a', cb), 131 | replicateAll, 132 | cb => b.put('a', 'ab', cb), 133 | cb => a.put('some', 'some', cb), 134 | cb => replicate(a, c, cb), 135 | cb => c.put('c', 'c', cb), 136 | replicateAll, 137 | done 138 | ) 139 | 140 | function done (err) { 141 | t.error(err, 'no error') 142 | a.get('a', ona) 143 | b.get('a', ona) 144 | c.get('a', ona) 145 | 146 | function ona (err, nodes) { 147 | t.error(err, 'no error') 148 | t.same(nodes.length, 1, 'one node') 149 | t.same(nodes[0].key, 'a') 150 | t.same(nodes[0].value, 'ab') 151 | } 152 | } 153 | }) 154 | }) 155 | 156 | tape('two writers, simple fork', function (t) { 157 | t.plan(1 + 2 * (4 + 6) + 2 + 4) 158 | create.two(function (db1, db2, replicate) { 159 | run( 160 | cb => db1.put('0', '0', cb), 161 | replicate, 162 | cb => db1.put('1', '1a', cb), 163 | cb => db2.put('1', '1b', cb), 164 | replicate, 165 | cb => db1.put('2', '2', cb), 166 | done 167 | ) 168 | 169 | function done (err) { 170 | t.error(err, 'no error') 171 | db1.get('0', on0) 172 | db1.get('1', on1) 173 | db1.get('2', on2db1) 174 | db2.get('0', on0) 175 | db2.get('1', on1) 176 | db2.get('2', on2db2) 177 | } 178 | 179 | function on0 (err, nodes) { 180 | t.error(err, 'no error') 181 | t.same(nodes.length, 1) 182 | t.same(nodes[0].key, '0') 183 | t.same(nodes[0].value, '0') 184 | } 185 | 186 | function on1 (err, nodes) { 187 | t.error(err, 'no error') 188 | t.same(nodes.length, 2) 189 | nodes.sort((a, b) => cmp(a.value, b.value)) 190 | t.same(nodes[0].key, '1') 191 | t.same(nodes[0].value, '1a') 192 | t.same(nodes[1].key, '1') 193 | t.same(nodes[1].value, '1b') 194 | } 195 | 196 | function on2db1 (err, nodes) { 197 | t.error(err, 'no error') 198 | t.same(nodes.length, 1) 199 | t.same(nodes[0].key, '2') 200 | t.same(nodes[0].value, '2') 201 | } 202 | 203 | function on2db2 (err, nodes) { 204 | t.error(err, 'no error') 205 | t.same(nodes.length, 0) 206 | } 207 | }) 208 | }) 209 | 210 | tape('three writers, no conflicts, forks', function (t) { 211 | t.plan(1 + 4 * 3) 212 | 213 | create.three(function (a, b, c, replicateAll) { 214 | run( 215 | cb => c.put('a', 'ac', cb), 216 | replicateAll, 217 | cb => a.put('foo', 'bar', cb), 218 | replicateAll, 219 | cb => a.put('a', 'aa', cb), 220 | cb => replicate(a, b, cb), 221 | range(50).map(key => cb => b.put(key, key, cb)), 222 | replicateAll, 223 | range(5).map(key => cb => c.put(key, 'c' + key, cb)), 224 | done 225 | ) 226 | 227 | function done (err) { 228 | t.error(err, 'no error') 229 | a.get('a', ona) 230 | b.get('a', ona) 231 | c.get('a', ona) 232 | } 233 | 234 | function ona (err, nodes) { 235 | t.error(err, 'no error') 236 | t.same(nodes.length, 1) 237 | t.same(nodes[0].key, 'a') 238 | t.same(nodes[0].value, 'aa') 239 | } 240 | }) 241 | }) 242 | 243 | tape('replication to two new peers, only authorize one writer', function (t) { 244 | var a = create.one() 245 | a.ready(function () { 246 | var b = create.one(a.key) 247 | var c = create.one(a.key) 248 | 249 | run( 250 | cb => b.ready(cb), 251 | cb => c.ready(cb), 252 | cb => a.put('foo', 'bar', cb), 253 | cb => a.authorize(b.local.key, cb), 254 | cb => replicate(a, b, cb), 255 | cb => replicate(a, c, cb), 256 | done 257 | ) 258 | 259 | function done (err) { 260 | t.error(err, 'no error') 261 | c.authorized(c.local.key, function (err, auth) { 262 | t.error(err, 'no error') 263 | t.notOk(auth) 264 | t.end() 265 | }) 266 | } 267 | }) 268 | }) 269 | 270 | tape('2 unauthed clones', function (t) { 271 | t.plan(1 + 2 * 2) 272 | 273 | var db = create.one(null) 274 | 275 | db.ready(function () { 276 | var clone1 = create.one(db.key) 277 | var clone2 = create.one(db.key) 278 | 279 | run( 280 | cb => db.put('hello', 'world', cb), 281 | cb => clone1.ready(cb), 282 | cb => replicate(db, clone1, cb), 283 | cb => clone2.ready(cb), 284 | cb => replicate(clone1, clone2, cb), 285 | done 286 | ) 287 | 288 | function done (err) { 289 | t.error(err, 'no error') 290 | clone1.get('hello', onhello) 291 | clone2.get('hello', onhello) 292 | 293 | function onhello (err, node) { 294 | t.error(err, 'no error') 295 | t.same(node.value, 'world') 296 | } 297 | } 298 | }) 299 | }) 300 | 301 | tape('opts is not mutated', function (t) { 302 | var db = create.one() 303 | var opts = {} 304 | db.replicate(opts) 305 | t.deepEqual(opts, {}) 306 | t.end() 307 | }) 308 | 309 | function range (n) { 310 | return Array(n).join(',').split(',').map((_, i) => '' + i) 311 | } 312 | -------------------------------------------------------------------------------- /test/trie-encoding.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var enc = require('../lib/trie-encoding') 3 | 4 | tape('encode trie', function (t) { 5 | t.same(enc.encode([], []), Buffer.alloc(0), 'empty trie') 6 | t.same(enc.encode([[[{feed: 0, seq: 0}]]], [0]), Buffer.from([0, 1, 0, 0])) 7 | t.same(enc.encode(set(1, [[{feed: 0, seq: 0}]]), [0]), Buffer.from([1, 1, 0, 0])) 8 | t.same(enc.encode(set(1, [[{feed: 0, seq: 0}]]), [1]), Buffer.from([1, 1, 2, 0])) 9 | t.same(enc.encode(set(1, [[{feed: 0, seq: 0}, {feed: 0, seq: 1}]]), [0]), Buffer.from([1, 1, 1, 0, 0, 1])) 10 | t.same(enc.encode(set(1, set(1, [{feed: 0, seq: 0}, {feed: 0, seq: 1}])), [0]), Buffer.from([1, 2, 1, 0, 0, 1])) 11 | t.end() 12 | }) 13 | 14 | tape('decode trie', function (t) { 15 | t.same(enc.decode(Buffer.alloc(0), []), [], 'empty trie') 16 | t.same(enc.decode(Buffer.from([0, 1, 0, 0]), [0]), [[[{feed: 0, seq: 0}]]]) 17 | t.same(enc.decode(Buffer.from([1, 1, 0, 0]), [0]), set(1, [[{feed: 0, seq: 0}]])) 18 | t.same(enc.decode(Buffer.from([1, 1, 2, 0]), [1, 0]), set(1, [[{feed: 0, seq: 0}]])) 19 | t.same(enc.decode(Buffer.from([1, 1, 1, 0, 0, 1]), [0]), set(1, [[{feed: 0, seq: 0}, {feed: 0, seq: 1}]])) 20 | t.same(enc.decode(Buffer.from([1, 2, 1, 0, 0, 1]), [0]), set(1, set(1, [{feed: 0, seq: 0}, {feed: 0, seq: 1}]))) 21 | t.end() 22 | }) 23 | 24 | tape('encode and decode complex trie', function (t) { 25 | var target = set(32, set(2, [{feed: 0, seq: 0}], [{feed: 0, seq: 2}], [{feed: 0, seq: 0}])) 26 | var clone = enc.decode(enc.encode(target, [0]), [0]) 27 | 28 | t.same(clone, target) 29 | t.end() 30 | }) 31 | 32 | function set (i, val) { 33 | var arr = [] 34 | arr[i] = val 35 | return arr 36 | } 37 | -------------------------------------------------------------------------------- /test/watch.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var create = require('./helpers/create') 3 | var replicate = require('./helpers/replicate') 4 | var run = require('./helpers/run') 5 | 6 | tape('basic watch', function (t) { 7 | var db = create.one() 8 | 9 | db.watch(function () { 10 | t.pass('watch triggered') 11 | t.end() 12 | }) 13 | 14 | db.put('hello', 'world') 15 | }) 16 | 17 | tape('watch prefix', function (t) { 18 | var db = create.one() 19 | var changed = false 20 | 21 | db.watch('foo', function () { 22 | t.ok(changed) 23 | t.end() 24 | }) 25 | 26 | db.put('hello', 'world', function (err) { 27 | t.error(err) 28 | setImmediate(function () { 29 | changed = true 30 | db.put('foo/bar', 'baz') 31 | }) 32 | }) 33 | }) 34 | 35 | tape('recursive watch', function (t) { 36 | t.plan(20) 37 | 38 | var i = 0 39 | var db = create.one() 40 | 41 | db.watch('foo', function () { 42 | if (i === 20) return 43 | t.pass('watch triggered') 44 | db.put('foo', 'bar-' + (++i)) 45 | }) 46 | 47 | db.put('foo', 'bar') 48 | }) 49 | 50 | tape('watch and stop watching', function (t) { 51 | var db = create.one() 52 | var once = true 53 | 54 | var w = db.watch('foo', function () { 55 | t.ok(once) 56 | once = false 57 | w.destroy() 58 | db.put('foo/bar/baz', 'qux', function () { 59 | t.end() 60 | }) 61 | }) 62 | 63 | db.put('foo/bar', 'baz') 64 | }) 65 | 66 | tape('remote watch', function (t) { 67 | var db = create.one() 68 | 69 | db.ready(function () { 70 | var clone = create.one(db.key) 71 | 72 | for (var i = 0; i < 100; i++) db.put('hello-' + i, 'world-' + i) 73 | db.put('flush', 'flush', function () { 74 | clone.watch(function () { 75 | t.pass('remote watch triggered') 76 | t.end() 77 | }) 78 | 79 | replicate(db, clone) 80 | }) 81 | }) 82 | }) 83 | 84 | tape('watch with 3rd-party authorize', function (t) { 85 | create.two(function (a, b) { 86 | t.plan(3) // once per writer updated in the namespace (b.auth and c.put) and .error 87 | 88 | a.watch(function () { 89 | t.pass('watch called') 90 | }) 91 | 92 | var c = create.one(a.key) 93 | c.ready(function () { 94 | run( 95 | cb => replicate(a, b, cb), 96 | cb => replicate(b, c, cb), 97 | cb => b.authorize(c.local.key, cb), 98 | cb => replicate(b, c, cb), 99 | cb => c.put('hello2', 'world2', cb), 100 | cb => replicate(b, c, cb), 101 | cb => replicate(a, b, cb), 102 | done 103 | ) 104 | 105 | function done (err) { 106 | t.error(err, 'no error') 107 | } 108 | }) 109 | }) 110 | }) 111 | --------------------------------------------------------------------------------