├── .gitignore ├── README.md ├── group.js ├── index.js ├── lib ├── messages.js ├── mux.js ├── record.js ├── schema.js ├── schemas.js ├── sync-map.js └── util.js ├── package.json ├── schema.proto ├── test ├── bench.js ├── lib │ └── util.js ├── query-bitfield.js ├── source-minimal.js ├── sources.js └── test.js └── views ├── helpers.js ├── indexes.js ├── kv.js └── records.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | pnpm-lock.yaml 3 | pnpm-debug.log 4 | package-lock.json 5 | shrinkwrap.yaml 6 | yarn.lock 7 | SANDBOX 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kappa-record-db 2 | 3 | A peer-to-peer database built on [hypercores](https://github.com/mafintosh/hypercore) and [kappa-core@experimental](https://github.com/Frando/kappa-core#experimental). 4 | 5 | *Note: Docs are a little out of date and will be updated soon* 6 | 7 | * Index a set of hypercores efficiently into materialized-view style secondary indexes 8 | * A simple and universal *record* data model: Each record has an *id*, a *schema*, and a *value*. 9 | * Is developed for [Sonar](https://github.com/arso-project/sonar) which adds full-text search, binary assets, an HTTP API, a CLI and a UI. 10 | 11 | Basicaly this means: put in json, optionally specify its JSON schema (for validation and uniformness), and sync effortlessly with other peers. 12 | A *database* refers to a particular set of feeds. The current model starts with a single feed, and that feed then can add other feeds to the set of authorized sources. An second model, where all feeds that swarm under a shared key are considered authorized, will be added soon. 13 | 14 | Internally, the database uses [unordered-materialized-kv](https://github.com/digidem/unordered-materialized-kv/) to have a shared notion of the latest versions of a record. 15 | 16 | ## Installation 17 | 18 | #### `npm install kappa-record-db` 19 | 20 | ## API 21 | 22 | `const Database = require('kappa-record-db')` 23 | 24 | #### `const db = new Database(opts)` 25 | 26 | required opts are: 27 | * either `corestore`: A [corestore](https://github.com/andrewosh/corestore) instance. 28 | * or `storage`: A string to a file system path or a [random-access-storage](https://github.com/random-access-storage/) instance. 29 | opts are: 30 | 31 | optional opts are: 32 | * `key`: A unique key for this database. Will be a random key otherwise. 33 | * `db`: A [levelup](https://github.com/Level/levelup) instance (optional, defaults to inmemory level-mem). 34 | * `validate`: Enable strict validation of all records being put into the database (default: true). 35 | 36 | #### `db.ready(cb)` 37 | 38 | `cb` is called after the database is fully initialized. 39 | 40 | #### `db.replicate(opts)` 41 | 42 | Create a hypercore-protocol replication stream. If piped into the replicate method of another database with the same key, the databases will exchange their updates. 43 | 44 | #### `db.putSource(key, cb)` 45 | 46 | Add an additional source feed. The key of the database's local writable feed is available at `db.localKey`. 47 | 48 | #### `db.put(record, cb)` 49 | 50 | Put a record into the database. 51 | 52 | * `record` is a plain js object: `{ id, schema, value }`. 53 | * `id` is the record's unique id. Leave empty when you want to create a new record. 54 | * `schema` is a string that sets the record's type. If the `validate` opt is true, the put only succeeds if a schema by that name is in the database and if the record correctly validates with this schema. 55 | * `value` is a JavaScript object. It has to be serializable to JSON. If the record's schema has its definition stored in the database, the value has to conform to the schema. 56 | * `cb` is a callback that will be called with `(err, id)`. 57 | 58 | 59 | #### `db.get(req, [opts], cb)` 60 | 61 | Get a record from the database. `req` is a plain js object with: 62 | 63 | ```javascript 64 | { 65 | id: 'string' 66 | schema: 'string' 67 | key: 'string' 68 | seq: int 69 | } 70 | ``` 71 | Either `id` or both `key` and `seq` are required. `opts` are the same as `db.query`. 72 | 73 | #### `db.putSchema(name, schema, cb)` 74 | 75 | Save a schema into the database. The schema has to be valid [JSON Schema](https://json-schema.org), with additional optional properties. The top level properties of the JSON schema will be filled automatically. 76 | 77 | ```javascript 78 | const schema = { 79 | properties: { 80 | title: { 81 | type: 'string', 82 | index: true 83 | }, 84 | date: { 85 | type: 'string', 86 | index: true 87 | } 88 | } 89 | } 90 | db.putSchema('event', schema, (err) => { 91 | if (!err) console.log('schema saved.') 92 | }) 93 | ``` 94 | 95 | Additional optional properties per property: 96 | 97 | * `index`: Set on a top-level simple field to index values of that field in the database. 98 | 99 | #### `db.getSchema(id, cb)` 100 | 101 | Get a schema definition from the database. 102 | 103 | #### `db.query(name, args, [opts], [cb])` 104 | 105 | Query the database. Queries are defined by views (see below). 106 | 107 | Returns a readable stream of results. If `cb` is a function, it will be called with `(err, results)` instead of returning a readable stream. 108 | 109 | Opts are: 110 | * `waitForSync`: if true, wait for running operations to complete before executing the query (default: false) 111 | 112 | #### `db.use(name, createView, [opts])` 113 | 114 | Register a new database view. Views are functions that will be called whenever records are being put or deleted. The database maintains the state of each view so that they catch up on updates automatically. See [kappa-core](https://github.com/kappa-db/kappa-core) for good introduction on how to work with kappa views. 115 | 116 | `name` is the name of the view. It has to be unique per database. 117 | 118 | `createView` is a constructor function. It will be called with `(level, db, opts)`: 119 | 120 | * `level`: an [LevelUp](https://github.com/Level/levelup)-compatible LevelDB instance for this view 121 | * `db`: the database 122 | * `opts`: optional opts passed into `useRecordView` 123 | 124 | The constructor function has to return a view object with the following keys: 125 | 126 | * `map: function (records, next) {}` 127 | This function will be called with a batch of records. Process the entries (e.g. by inserting rows into the leveldb). Call `next()` when done. 128 | * `api`: An object of query functions that this view exposes to the outside world. They should be safe to call (may not modify data) as they may be called from the client side. 129 | 130 | If the view provides a `query` function on its api object, this function will be callable as a query on the main database. The query function has to return a readable stream of objects with either `{ id }` or `{ key, seq }` properties. Result objects may also include a `meta` property. 131 | -------------------------------------------------------------------------------- /group.js: -------------------------------------------------------------------------------- 1 | const ram = require('random-access-memory') 2 | const sub = require('subleveldown') 3 | const memdb = require('level-mem') 4 | const collect = require('stream-collector') 5 | const { Transform } = require('stream') 6 | const hypercore = require('hypercore') 7 | const debug = require('debug')('db') 8 | const inspect = require('inspect-custom-symbol') 9 | const pretty = require('pretty-hash') 10 | const pump = require('pump') 11 | const mutex = require('mutexify') 12 | const LRU = require('lru-cache') 13 | const Bitfield = require('fast-bitfield') 14 | const hcrypto = require('hypercore-crypto') 15 | const Nanoresource = require('nanoresource/emitter') 16 | 17 | const Kappa = require('kappa-core') 18 | const Indexer = require('kappa-sparse-indexer') 19 | const Corestore = require('corestore') 20 | 21 | const { uuid, through, noop, once } = require('./lib/util') 22 | const { Header } = require('./lib/messages') 23 | const mux = require('./lib/mux') 24 | const SyncMap = require('./lib/sync-map') 25 | 26 | const LEN = Symbol('record-size') 27 | const INFO = Symbol('feed-info') 28 | 29 | const MAX_CACHE_SIZE = 16777216 // 16M 30 | const DEFAULT_MAX_BATCH = 500 31 | const DEFAULT_FEED_TYPE = 'kappa-records' 32 | const DEFAULT_NAMESPACE = 'kappa-group' 33 | 34 | const LOCAL_WRITER_NAME = '_localwriter' 35 | const ROOT_FEED_NAME = '_root' 36 | 37 | const Mode = { 38 | MULTIFEED: 'multifeed', 39 | ROOTFEED: 'rootfeed' 40 | } 41 | 42 | module.exports = class Group extends Nanoresource { 43 | static uuid () { 44 | return uuid() 45 | } 46 | 47 | constructor (opts = {}) { 48 | super() 49 | const self = this 50 | this.opts = opts 51 | this.handlers = opts.handlers 52 | 53 | if (opts.swarmMode && Object.values(Mode).indexOf(opts.swarmMode) === -1) { 54 | throw new Error('Invalid swarm mode') 55 | } 56 | 57 | this._name = opts.name 58 | this._alias = opts.alias 59 | this._id = opts.id || uuid() 60 | 61 | this._level = opts.db || memdb() 62 | this._store = new SyncMap(sub(this._level, 's'), { 63 | valueEncoding: 'json' 64 | }) 65 | 66 | if (opts.key) { 67 | this.address = Buffer.isBuffer(opts.key) ? opts.key : Buffer.from(opts.key, 'hex') 68 | } 69 | 70 | this.kappa = opts.kappa || new Kappa() 71 | this.corestore = opts.corestore || new Corestore(opts.storage || ram) 72 | // Patch in a recursive namespace method if we got a namespaced corestore. 73 | if (!this.corestore.namespace && this.corestore.store) { 74 | this.corestore.namespace = name => this.corestore.store.namespace(this.corestore.name + ':' + name) 75 | } 76 | this.indexer = opts.indexer || new Indexer({ 77 | name: this._name, 78 | db: sub(this._level, 'indexer'), 79 | // Load and decode value. 80 | loadValue (req, next) { 81 | self.load(req, (err, message) => { 82 | if (err) return next(null) 83 | next(message) 84 | }) 85 | } 86 | }) 87 | this.lock = mutex() 88 | 89 | this.defaultFeedType = opts.defaultFeedType || DEFAULT_FEED_TYPE 90 | 91 | // Cache for records. Max cache size can be set as an option. 92 | // The length for each record is the buffer length of the serialized record, 93 | // so the actual cache size will be a bit higher. 94 | this._recordCache = new LRU({ 95 | max: opts.maxCacheSize || MAX_CACHE_SIZE, 96 | length (record) { 97 | return record[LEN] || 64 98 | } 99 | }) 100 | // Cache for query bitfields. This will usually take 4KB per bitfield. 101 | // We cache max 4096 bitfields, amounting to max 16MB bitfield cache size. 102 | this._queryBitfields = new LRU({ 103 | max: 4096 104 | }) 105 | 106 | this._swarmMode = opts.swarmMode || Mode.ROOTFEED 107 | if (this._swarmMode === Mode.MULTIFEED) { 108 | this.on('feed', (feed, info) => { 109 | mux.forwardLiveFeedAnnouncements(this, feed, info) 110 | }) 111 | } 112 | 113 | this._feedNames = {} 114 | this._feeds = [] 115 | this._streams = [] 116 | this._feedTypes = {} 117 | 118 | this.ready = this.open.bind(this) 119 | } 120 | 121 | registerFeedType (name, handlers) { 122 | this._feedTypes[name] = handlers 123 | } 124 | 125 | get view () { 126 | return this.kappa.view 127 | } 128 | 129 | get api () { 130 | return this.kappa.api 131 | } 132 | 133 | use (name, createView, opts = {}) { 134 | const self = this 135 | const viewdb = sub(this._level, 'view.' + name) 136 | const view = createView(viewdb, opts.context || self, opts) 137 | const sourceOpts = { 138 | maxBatch: opts.maxBatch || view.maxBatch || DEFAULT_MAX_BATCH, 139 | filter (messages, next) { 140 | next(messages.filter(msg => msg.seq !== 0)) 141 | } 142 | } 143 | if (!opts.context) opts.context = this 144 | this.kappa.use(name, this.indexer.source(sourceOpts), view, opts) 145 | } 146 | 147 | replicate (isInitiator, opts) { 148 | if (this._swarmMode === Mode.MULTIFEED) { 149 | return mux.replicate(this, isInitiator, opts) 150 | } else { 151 | return this.corestore.replicate(isInitiator, opts) 152 | } 153 | } 154 | 155 | _close (cb) { 156 | this.kappa.close(() => { 157 | this.corestore.close(cb) 158 | }) 159 | } 160 | 161 | _open (cb) { 162 | const self = this 163 | cb = once(cb) 164 | this.corestore.ready(() => { 165 | this._store.open(() => { 166 | this._initFeeds((err) => { 167 | if (err) prefinish(err) 168 | else prefinish() 169 | }) 170 | }) 171 | }) 172 | 173 | function prefinish (err) { 174 | if (err) return cb(err) 175 | let pending = 1 176 | for (const feedType of Object.values(self._feedTypes)) { 177 | if (feedType.onopen) ++pending && feedType.onopen(done) 178 | } 179 | done() 180 | function done () { 181 | if (err) return finish(err) 182 | if (--pending === 0) finish() 183 | } 184 | } 185 | 186 | function finish (err) { 187 | if (err) return cb(err) 188 | self.kappa.resume() 189 | self.opened = true 190 | self.emit('open') 191 | cb() 192 | } 193 | } 194 | 195 | _initFeeds (cb) { 196 | const self = this 197 | for (const [key, info] of this._store.entries()) { 198 | this._addFeedInternally(key, info) 199 | } 200 | 201 | if (this._swarmMode === Mode.ROOTFEED) { 202 | initRootFeed(this.address, (err, feed) => { 203 | if (err) finish(err) 204 | else finish(null, feed.key, feed.discoveryKey) 205 | }) 206 | } else { 207 | finish(null, this.address || hcrypto.keyPair().publicKey) 208 | } 209 | 210 | function initRootFeed (key, cb) { 211 | self.addFeed({ name: ROOT_FEED_NAME, key }, (err, rootfeed) => { 212 | if (err) return cb(err) 213 | if (rootfeed.writable) { 214 | self.addFeed({ name: LOCAL_WRITER_NAME, key: rootfeed.key }, err => cb(err, rootfeed)) 215 | } else { 216 | self.addFeed({ name: LOCAL_WRITER_NAME }, err => cb(err, rootfeed)) 217 | } 218 | }) 219 | } 220 | 221 | function finish (err, key, discoveryKey) { 222 | if (err) return cb(err) 223 | self.address = key 224 | self.key = key 225 | self.discoveryKey = discoveryKey || hcrypto.discoveryKey(key) 226 | cb() 227 | } 228 | } 229 | 230 | _createFeed (key, opts) { 231 | const { name, persist } = opts 232 | let feed 233 | if (persist === false) { 234 | if (!key) { 235 | const keyPair = hcrypto.keyPair() 236 | key = keyPair.key 237 | opts.secretKey = keyPair.secretKey 238 | } 239 | feed = hypercore(ram, key, opts) 240 | } else if (!key) { 241 | // No key was given, create new feed. 242 | feed = this.corestore.namespace(DEFAULT_NAMESPACE + ':' + name).default(opts) 243 | key = feed.key 244 | this.corestore.get({ ...opts, key }) 245 | } else { 246 | // Key was given, get from corestore. 247 | feed = this.corestore.get({ ...opts, key }) 248 | } 249 | return feed 250 | } 251 | 252 | _addFeedInternally (key, opts) { 253 | const feed = this._createFeed(key, opts) 254 | feed.on('remote-update', () => this.emit('remote-update')) 255 | 256 | if (!key && feed.key) key = feed.key.toString('hex') 257 | if (!key) throw new Error('Missing key for feed') 258 | const { name, type } = opts 259 | const id = this._feeds.length 260 | feed[INFO] = { name, type, id, key, ...opts.info || {} } 261 | this._feeds.push(feed) 262 | this._feedNames[name] = id 263 | this._feedNames[key] = id 264 | this.indexer.add(feed, { scan: true }) 265 | this.emit('feed', feed, { ...feed[INFO] }) 266 | debug('[%s] add feed key %s name %s type %s', this._name, pretty(feed.key), name, type) 267 | 268 | return feed 269 | } 270 | 271 | // Write header to feed. 272 | // TODO: Delegate this to a feed type handler. 273 | _initFeed (feed, cb) { 274 | if (!feed[INFO]) return cb(new Error('Invalid feed: has no info')) 275 | const { type } = feed[INFO] 276 | const header = Header.encode({ 277 | type, 278 | metadata: Buffer.from(JSON.stringify({ encodingVersion: 1 })) 279 | }) 280 | feed.append(header, cb) 281 | } 282 | 283 | feedInfo (keyOrName) { 284 | const feed = this.feed(keyOrName) 285 | if (feed && feed[INFO]) return feed[INFO] 286 | return null 287 | } 288 | 289 | feed (keyOrName) { 290 | if (Buffer.isBuffer(keyOrName)) keyOrName = keyOrName.toString('hex') 291 | if (this._feedNames[keyOrName] !== undefined) { 292 | return this._feeds[this._feedNames[keyOrName]] 293 | } 294 | return null 295 | } 296 | 297 | getRootFeed () { 298 | return this.feed(ROOT_FEED_NAME) 299 | } 300 | 301 | getDefaultWriter () { 302 | return this.feed(LOCAL_WRITER_NAME) 303 | } 304 | 305 | addFeed (opts, cb = noop) { 306 | const self = this 307 | let { name, key } = opts 308 | if (!name && !key) return cb(new Error('Either key or name is required')) 309 | if (key && Buffer.isBuffer(key)) key = key.toString('hex') 310 | if (this.feed(key)) { 311 | const info = this.feedInfo(key) 312 | const feed = this.feed(key) 313 | if (info && info.name !== name) { 314 | this._feedNames[name] = info.id 315 | } 316 | return onready(feed, cb) 317 | } 318 | if (this.feed(name)) { 319 | const info = this.feedInfo(name) 320 | const feed = this.feed(name) 321 | if (key && info.key !== key) return cb(new Error('Invalid key for name')) 322 | return onready(feed, cb) 323 | } 324 | 325 | if (!opts.type) opts.type = this.defaultFeedType 326 | if (!opts.name) opts.name = uuid() 327 | const feed = this._addFeedInternally(key, opts) 328 | 329 | feed.ready(() => { 330 | if (feed.writable && !feed.length) { 331 | this._initFeed(feed, finish) 332 | } else { 333 | finish() 334 | } 335 | }) 336 | 337 | function finish (err) { 338 | if (err) return cb(err) 339 | const info = { 340 | key: feed.key.toString('hex'), 341 | name: opts.name, 342 | type: opts.type, 343 | ...opts.info || {} 344 | } 345 | self._store.setFlush(info.key, info, err => { 346 | cb(err, feed) 347 | }) 348 | } 349 | } 350 | 351 | stats (cb) { 352 | return this.status(cb) 353 | } 354 | 355 | status (cb) { 356 | const stats = { feeds: [] } 357 | for (const feed of this._feeds) { 358 | stats.feeds.push({ 359 | key: feed.key.toString('hex'), 360 | writable: feed.writable, 361 | length: feed.length, 362 | byteLength: feed.byteLength, 363 | downloadedBlocks: feed.downloaded(0, feed.length), 364 | stats: feed.stats, 365 | info: feed[INFO] 366 | }) 367 | } 368 | 369 | if (!cb) return stats 370 | 371 | stats.kappa = {} 372 | let pending = Object.values(this.kappa.flows).length 373 | for (const flow of Object.values(this.kappa.flows)) { 374 | flow._source.subscription.getState((_err, state) => { 375 | stats.kappa[flow.name] = state 376 | if (--pending === 0) cb(null, stats) 377 | }) 378 | } 379 | } 380 | 381 | writer (opts, cb) { 382 | if (typeof opts === 'string') { 383 | opts = { name: opts } 384 | } else if (typeof opts === 'function') { 385 | cb = opts 386 | opts = null 387 | } 388 | if (!opts) { 389 | opts = { name: LOCAL_WRITER_NAME } 390 | } 391 | this.addFeed(opts, (err, feed) => { 392 | if (err) return cb(err) 393 | if (!feed.writable) return cb(new Error('Feed is not writable')) 394 | cb(null, feed) 395 | }) 396 | } 397 | 398 | append (message, opts, cb) { 399 | const self = this 400 | if (typeof opts === 'function') { 401 | cb = opts 402 | opts = {} 403 | } 404 | if (!opts) opts = {} 405 | if (!cb) cb = noop 406 | 407 | this.lock(release => { 408 | self.writer(opts.feed, (err, feed) => { 409 | if (err) return release(cb, err) 410 | opts.feedType = feed[INFO].type 411 | self._onappend(message, opts, (err, buf, result) => { 412 | if (err) return release(cb, err) 413 | feed.append(buf, err => { 414 | if (err) return release(cb, err) 415 | // if (!result.key) result.key = feed.key 416 | // if (!result.seq) result.seq = feed.length - 1 417 | release(cb, err, result) 418 | }) 419 | }) 420 | }) 421 | }) 422 | } 423 | 424 | _onappend (message, opts, cb) { 425 | const { feedType } = opts 426 | if (this._feedTypes[feedType] && this._feedTypes[feedType].onappend) { 427 | this._feedTypes[feedType].onappend(message, opts, cb) 428 | } else if (this.handlers.onappend) { 429 | this.handlers.onappend(message, opts, cb) 430 | } else { 431 | cb(null, message, {}) 432 | } 433 | } 434 | 435 | _onload (message, opts, cb) { 436 | const { feedType } = message 437 | if (this._feedTypes[feedType] && this._feedTypes[feedType].onload) { 438 | this._feedTypes[feedType].onload(message, opts, cb) 439 | } else if (this.handlers.onload) { 440 | this.handlers.onload(message, opts, cb) 441 | } else { 442 | cb(null, message) 443 | } 444 | } 445 | 446 | batch (messages, opts, cb) { 447 | if (typeof opts === 'function') { 448 | cb = opts 449 | opts = {} 450 | } 451 | const self = this 452 | this.lock(release => { 453 | const batch = [] 454 | const errs = [] 455 | const results = [] 456 | let pending = messages.length 457 | self.writer(opts.feed, (err, feed) => { 458 | if (err) return release(cb, err) 459 | opts.feedType = feed[INFO].type 460 | for (const message of messages) { 461 | process.nextTick(() => this._onappend(message, opts, done)) 462 | } 463 | function done (err, buf, result) { 464 | if (err) errs.push(err) 465 | else { 466 | batch.push(buf) 467 | results.push(result) 468 | } 469 | if (--pending !== 0) return 470 | 471 | if (errs.length) { 472 | let err = new Error(`Batch failed with ${errs.length} errors. First error: ${errs[0].message}`) 473 | err.errors = errs 474 | release(cb, err) 475 | return 476 | } 477 | 478 | feed.append(batch, err => release(cb, err, results)) 479 | } 480 | }) 481 | }) 482 | } 483 | 484 | get (keyOrName, seq, opts, cb) { 485 | if (typeof opts === 'function') { 486 | cb = opts 487 | opts = {} 488 | } 489 | if (opts.wait === undefined) opts.wait = false 490 | const feed = this.feed(keyOrName) 491 | if (!feed) return cb(new Error('Feed does not exist: ' + keyOrName)) 492 | feed.get(seq, opts, (err, value) => { 493 | if (err) return cb(err) 494 | const { key, type: feedType } = feed[INFO] 495 | const message = { key, seq, value, feedType } 496 | this._onload(message, opts, cb) 497 | }) 498 | } 499 | 500 | load (req, opts, cb) { 501 | if (typeof opts === 'function') { 502 | cb = opts 503 | opts = {} 504 | } 505 | const self = this 506 | this._resolveRequest(req, (err, req) => { 507 | if (err) return cb(err) 508 | // TODO: Keep this? 509 | if (req.seq === 0) return cb(new Error('seq 0 is the header, not a record')) 510 | 511 | if (this._recordCache.has(req.lseq)) { 512 | return cb(null, this._recordCache.get(req.lseq)) 513 | } 514 | 515 | this.get(req.key, req.seq, opts, finish) 516 | 517 | function finish (err, message) { 518 | if (err) return cb(err) 519 | message.lseq = req.lseq 520 | self._recordCache.set(req.lseq, message) 521 | if (req.meta) { 522 | message = { ...message, meta: req.meta } 523 | } 524 | cb(null, message) 525 | } 526 | }) 527 | } 528 | 529 | _resolveRequest (req, cb) { 530 | if (!empty(req.lseq) && empty(req.seq)) { 531 | this.indexer.lseqToKeyseq(req.lseq, (err, keyseq) => { 532 | if (!err && keyseq) { 533 | req.key = keyseq.key 534 | req.seq = keyseq.seq 535 | } 536 | finish(req) 537 | }) 538 | } else if (empty(req.lseq)) { 539 | this.indexer.keyseqToLseq(req.key, req.seq, (err, lseq) => { 540 | if (!err && lseq) req.lseq = lseq 541 | finish(req) 542 | }) 543 | } else finish(req) 544 | 545 | function finish (req) { 546 | if (empty(req.key) || empty(req.seq)) return cb(new Error('Invalid get request')) 547 | req.seq = parseInt(req.seq) 548 | if (!empty(req.lseq)) req.lseq = parseInt(req.lseq) 549 | if (Buffer.isBuffer(req.key)) req.key = req.key.toString('hex') 550 | cb(null, req) 551 | } 552 | } 553 | 554 | loadRecord (req, cb) { 555 | this.load(req, cb) 556 | } 557 | 558 | createLoadStream (opts = {}) { 559 | const self = this 560 | 561 | const { cacheid } = opts 562 | 563 | let bitfield 564 | if (cacheid) { 565 | if (!this._queryBitfields.has(cacheid)) { 566 | this._queryBitfields.set(cacheid, Bitfield()) 567 | } 568 | bitfield = this._queryBitfields.get(cacheid) 569 | } 570 | 571 | const transform = through(function (req, _enc, next) { 572 | self._resolveRequest(req, (err, req) => { 573 | if (err) return next() 574 | if (bitfield && bitfield.get(req.lseq)) { 575 | this.push({ lseq: req.lseq, meta: req.meta }) 576 | return next() 577 | } 578 | self.load(req, (err, message) => { 579 | if (err) return next() 580 | if (bitfield) bitfield.set(req.lseq, 1) 581 | this.push(message) 582 | next() 583 | }) 584 | }) 585 | }) 586 | return transform 587 | } 588 | 589 | createQueryStream (name, args, opts = {}) { 590 | const self = this 591 | if (typeof opts.load === 'undefined') opts.load = true 592 | 593 | const proxy = new Transform({ 594 | objectMode: true, 595 | transform (chunk, enc, next) { 596 | this.push(chunk) 597 | next() 598 | } 599 | }) 600 | 601 | if (!this.view[name] || !this.view[name].query) { 602 | proxy.destroy(new Error('Invalid query name: ' + name)) 603 | return proxy 604 | } 605 | 606 | if (opts.waitForSync) { 607 | this.sync(createStream) 608 | } else { 609 | createStream() 610 | } 611 | 612 | return proxy 613 | 614 | function createStream () { 615 | const qs = self.view[name].query(args, opts) 616 | qs.once('sync', () => proxy.emit('sync')) 617 | qs.on('error', err => proxy.emit('error', err)) 618 | if (opts.load !== false) pump(qs, self.createLoadStream(opts), proxy) 619 | else pump(qs, proxy) 620 | } 621 | } 622 | 623 | sync (views, cb) { 624 | process.nextTick(() => { 625 | this.lock(release => { 626 | this.kappa.ready(views, cb) 627 | release() 628 | }) 629 | }) 630 | } 631 | 632 | query (name, args, opts = {}, cb) { 633 | if (typeof opts === 'function') { 634 | cb = opts 635 | opts = {} 636 | } 637 | 638 | if (cb && opts.live) { 639 | return cb(new Error('Cannot use live mode with callbacks')) 640 | } 641 | 642 | const qs = this.createQueryStream(name, args, opts) 643 | return collect(qs, cb) 644 | } 645 | 646 | [inspect] (depth, opts) { 647 | const { stylize } = opts 648 | var indent = '' 649 | if (typeof opts.indentationLvl === 'number') { 650 | while (indent.length < opts.indentationLvl) indent += ' ' 651 | } 652 | 653 | return 'Database(\n' + 654 | indent + ' address : ' + stylize((this.key && pretty(this.key)), 'string') + '\n' + 655 | indent + ' discoveryKey: ' + stylize((this.discoveryKey && pretty(this.discoveryKey)), 'string') + '\n' + 656 | indent + ' swarmMode: ' + stylize(this._swarmMode) + '\n' + 657 | indent + ' feeds: : ' + stylize(this._feeds.length) + '\n' + 658 | indent + ' opened : ' + stylize(this.opened, 'boolean') + '\n' + 659 | indent + ' name : ' + stylize(this._name, 'string') + '\n' + 660 | indent + ')' 661 | } 662 | } 663 | 664 | function empty (value) { 665 | return value === undefined || value === null 666 | } 667 | 668 | function onready (feed, cb) { 669 | if (feed.opened) cb(null, feed) 670 | else feed.ready(() => cb(null, feed)) 671 | } 672 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | const debug = require('debug')('db') 2 | const pretty = require('pretty-hash') 3 | const thunky = require('thunky') 4 | 5 | const Group = require('./group') 6 | const { uuid, sink, noop } = require('./lib/util') 7 | const createKvView = require('./views/kv') 8 | const createRecordsView = require('./views/records') 9 | const createIndexView = require('./views/indexes') 10 | const Record = require('./lib/record') 11 | const Schema = require('./lib/schema') 12 | 13 | const FEED_TYPE = 'kappa-records' 14 | const SCHEMA_SOURCE = 'core/source' 15 | 16 | const bind = (obj, fn) => fn.bind(obj) 17 | 18 | module.exports = defaultDatabase 19 | module.exports.Group = Group 20 | 21 | function defaultDatabase (opts = {}) { 22 | opts.swarmMode = opts.swarmMode || 'rootfeed' 23 | // opts.swarmMode = 'multifeed' 24 | 25 | const group = new Group(opts) 26 | const db = new Database(group, opts) 27 | 28 | group.put = bind(db, db.put) 29 | group.del = bind(db, db.del) 30 | group.putSchema = bind(db, db.putSchema) 31 | group.getSchema = bind(db, db.getSchema) 32 | group.getSchemas = bind(db, db.getSchemas) 33 | group.putSource = bind(db, db.putSource) 34 | group.db = db 35 | 36 | if (opts.swarmMode === 'rootfeed') { 37 | const sources = new Sources(group, { 38 | onput: bind(db, db.put), 39 | onsource: bind(group, group.addFeed) 40 | }) 41 | group.putSource = bind(sources, sources.put) 42 | sources.open(noop) 43 | } else { 44 | group.putSource = function (key, info = {}, cb) { 45 | if (typeof info === 'function') { 46 | cb = info 47 | info = {} 48 | } 49 | group.addFeed({ key, ...info }, cb) 50 | } 51 | } 52 | 53 | return group 54 | } 55 | 56 | class Database { 57 | constructor (group, opts) { 58 | this.group = group 59 | this.opts = opts 60 | this.schemas = new Schema() 61 | this.group.registerFeedType(FEED_TYPE, { 62 | onopen: this._onopen.bind(this), 63 | onload: this._onload.bind(this), 64 | onappend: this._onappend.bind(this) 65 | }) 66 | const viewOpts = { schemas: this.schemas } 67 | this.group.use('kv', createKvView) 68 | this.group.use('records', createRecordsView, viewOpts) 69 | this.group.use('index', createIndexView, viewOpts) 70 | } 71 | 72 | _onopen (cb) { 73 | this.schemas.open(this.group, cb) 74 | } 75 | 76 | _onload (message, opts, cb) { 77 | const { key, seq, lseq, value, feedType } = message 78 | const record = Record.decode(value, { key, seq, lseq, feedType }) 79 | cb(null, record) 80 | } 81 | 82 | _onappend (record, opts, cb) { 83 | if (!record.schema) return cb(new Error('schema is required')) 84 | if (record.op === undefined) record.op = Record.PUT 85 | if (record.op === 'put') record.op = Record.PUT 86 | if (record.op === 'del') record.op = Record.DEL 87 | if (!record.id) record.id = uuid() 88 | 89 | record.schema = this.schemas.resolveName(record.schema) 90 | 91 | if (record.op === Record.PUT) { 92 | let validate = false 93 | if (this.opts.validate) validate = true 94 | if (typeof opts.validate !== 'undefined') validate = !!opts.validate 95 | 96 | if (validate) { 97 | if (!this.schemas.validate(record)) return cb(this.schemas.error) 98 | } 99 | } 100 | 101 | record.timestamp = Date.now() 102 | 103 | this.group.view.kv.getLinks(record, (err, links) => { 104 | if (err && err.status !== 404) return cb(err) 105 | record.links = links || [] 106 | const buf = Record.encode(record) 107 | cb(null, buf, record.id) 108 | }) 109 | } 110 | 111 | put (record, opts, cb) { 112 | record.op = Record.PUT 113 | this.group.append(record, opts, cb) 114 | } 115 | 116 | del ({ id, schema }, opts, cb) { 117 | const record = { 118 | id, 119 | schema, 120 | op: Record.DEL 121 | } 122 | this.group.append(record, opts, cb) 123 | } 124 | 125 | putSchema (name, schema, cb) { 126 | this.group.ready(() => { 127 | const value = this.schemas.parseSchema(name, schema) 128 | if (!value) return cb(this.schemas.error) 129 | const record = { 130 | schema: 'core/schema', 131 | value 132 | } 133 | this.schemas.put(value) 134 | this.group.put(record, cb) 135 | }) 136 | } 137 | 138 | getSchema (name) { 139 | return this.schemas.get(name) 140 | } 141 | 142 | getSchemas () { 143 | return this.schemas.list() 144 | } 145 | 146 | putSource (key, info = {}, cb) { 147 | // opts should/can include: { alias } 148 | if (typeof info === 'function') { 149 | cb = info 150 | info = {} 151 | } 152 | if (Buffer.isBuffer(key)) key = key.toString('hex') 153 | this.put({ 154 | schema: SCHEMA_SOURCE, 155 | id: key, 156 | value: { 157 | type: FEED_TYPE, 158 | key, 159 | ...info 160 | } 161 | }, cb) 162 | } 163 | } 164 | 165 | class Sources { 166 | constructor (group, handlers) { 167 | this.group = group 168 | this.handlers = handlers 169 | } 170 | 171 | open (cb) { 172 | const qs = this.group.createQueryStream('records', { schema: 'core/source' }, { live: true }) 173 | qs.once('sync', cb) 174 | qs.pipe(sink((record, next) => { 175 | const { alias, key, type, ...info } = record.value 176 | if (type !== FEED_TYPE) return next() 177 | debug('[%s] source:add key %s alias %s type %s', this.group._name, pretty(key), alias, type) 178 | const opts = { alias, key, type, info } 179 | this.handlers.onsource(opts) 180 | next() 181 | })) 182 | } 183 | 184 | put (key, opts = {}, cb) { 185 | // opts should/can include: { alias } 186 | if (typeof opts === 'function') { 187 | cb = opts 188 | opts = {} 189 | } 190 | if (Buffer.isBuffer(key)) key = key.toString('hex') 191 | const record = { 192 | schema: 'core/source', 193 | id: key, 194 | value: { 195 | type: FEED_TYPE, 196 | key, 197 | ...opts 198 | } 199 | } 200 | this.handlers.onput(record, cb) 201 | } 202 | } 203 | 204 | module.exports.Database = Database 205 | -------------------------------------------------------------------------------- /lib/messages.js: -------------------------------------------------------------------------------- 1 | // This file is auto generated by the protocol-buffers compiler 2 | 3 | /* eslint-disable quotes */ 4 | /* eslint-disable indent */ 5 | /* eslint-disable no-redeclare */ 6 | /* eslint-disable camelcase */ 7 | 8 | // Remember to `npm install --save protocol-buffers-encodings` 9 | var encodings = require('protocol-buffers-encodings') 10 | var varint = encodings.varint 11 | var skip = encodings.skip 12 | 13 | var Record = exports.Record = { 14 | buffer: true, 15 | encodingLength: null, 16 | encode: null, 17 | decode: null 18 | } 19 | 20 | var Header = exports.Header = { 21 | buffer: true, 22 | encodingLength: null, 23 | encode: null, 24 | decode: null 25 | } 26 | 27 | defineRecord() 28 | defineHeader() 29 | 30 | function defineRecord () { 31 | Record.Op = { 32 | PUT: 0, 33 | DEL: 1 34 | } 35 | 36 | Record.encodingLength = encodingLength 37 | Record.encode = encode 38 | Record.decode = decode 39 | 40 | function encodingLength (obj) { 41 | var length = 0 42 | if (!defined(obj.id)) throw new Error("id is required") 43 | var len = encodings.string.encodingLength(obj.id) 44 | length += 1 + len 45 | if (!defined(obj.op)) throw new Error("op is required") 46 | var len = encodings.enum.encodingLength(obj.op) 47 | length += 1 + len 48 | if (defined(obj.schema)) { 49 | var len = encodings.string.encodingLength(obj.schema) 50 | length += 1 + len 51 | } 52 | if (defined(obj.value)) { 53 | var len = encodings.bytes.encodingLength(obj.value) 54 | length += 1 + len 55 | } 56 | if (defined(obj.timestamp)) { 57 | var len = encodings.varint.encodingLength(obj.timestamp) 58 | length += 1 + len 59 | } 60 | if (defined(obj.links)) { 61 | for (var i = 0; i < obj.links.length; i++) { 62 | if (!defined(obj.links[i])) continue 63 | var len = encodings.string.encodingLength(obj.links[i]) 64 | length += 1 + len 65 | } 66 | } 67 | return length 68 | } 69 | 70 | function encode (obj, buf, offset) { 71 | if (!offset) offset = 0 72 | if (!buf) buf = Buffer.allocUnsafe(encodingLength(obj)) 73 | var oldOffset = offset 74 | if (!defined(obj.id)) throw new Error("id is required") 75 | buf[offset++] = 10 76 | encodings.string.encode(obj.id, buf, offset) 77 | offset += encodings.string.encode.bytes 78 | if (!defined(obj.op)) throw new Error("op is required") 79 | buf[offset++] = 16 80 | encodings.enum.encode(obj.op, buf, offset) 81 | offset += encodings.enum.encode.bytes 82 | if (defined(obj.schema)) { 83 | buf[offset++] = 26 84 | encodings.string.encode(obj.schema, buf, offset) 85 | offset += encodings.string.encode.bytes 86 | } 87 | if (defined(obj.value)) { 88 | buf[offset++] = 34 89 | encodings.bytes.encode(obj.value, buf, offset) 90 | offset += encodings.bytes.encode.bytes 91 | } 92 | if (defined(obj.timestamp)) { 93 | buf[offset++] = 40 94 | encodings.varint.encode(obj.timestamp, buf, offset) 95 | offset += encodings.varint.encode.bytes 96 | } 97 | if (defined(obj.links)) { 98 | for (var i = 0; i < obj.links.length; i++) { 99 | if (!defined(obj.links[i])) continue 100 | buf[offset++] = 50 101 | encodings.string.encode(obj.links[i], buf, offset) 102 | offset += encodings.string.encode.bytes 103 | } 104 | } 105 | encode.bytes = offset - oldOffset 106 | return buf 107 | } 108 | 109 | function decode (buf, offset, end) { 110 | if (!offset) offset = 0 111 | if (!end) end = buf.length 112 | if (!(end <= buf.length && offset <= buf.length)) throw new Error("Decoded message is not valid") 113 | var oldOffset = offset 114 | var obj = { 115 | id: "", 116 | op: 0, 117 | schema: "", 118 | value: null, 119 | timestamp: 0, 120 | links: [] 121 | } 122 | var found0 = false 123 | var found1 = false 124 | while (true) { 125 | if (end <= offset) { 126 | if (!found0 || !found1) throw new Error("Decoded message is not valid") 127 | decode.bytes = offset - oldOffset 128 | return obj 129 | } 130 | var prefix = varint.decode(buf, offset) 131 | offset += varint.decode.bytes 132 | var tag = prefix >> 3 133 | switch (tag) { 134 | case 1: 135 | obj.id = encodings.string.decode(buf, offset) 136 | offset += encodings.string.decode.bytes 137 | found0 = true 138 | break 139 | case 2: 140 | obj.op = encodings.enum.decode(buf, offset) 141 | offset += encodings.enum.decode.bytes 142 | found1 = true 143 | break 144 | case 3: 145 | obj.schema = encodings.string.decode(buf, offset) 146 | offset += encodings.string.decode.bytes 147 | break 148 | case 4: 149 | obj.value = encodings.bytes.decode(buf, offset) 150 | offset += encodings.bytes.decode.bytes 151 | break 152 | case 5: 153 | obj.timestamp = encodings.varint.decode(buf, offset) 154 | offset += encodings.varint.decode.bytes 155 | break 156 | case 6: 157 | obj.links.push(encodings.string.decode(buf, offset)) 158 | offset += encodings.string.decode.bytes 159 | break 160 | default: 161 | offset = skip(prefix & 7, buf, offset) 162 | } 163 | } 164 | } 165 | } 166 | 167 | function defineHeader () { 168 | Header.encodingLength = encodingLength 169 | Header.encode = encode 170 | Header.decode = decode 171 | 172 | function encodingLength (obj) { 173 | var length = 0 174 | if (!defined(obj.type)) throw new Error("type is required") 175 | var len = encodings.string.encodingLength(obj.type) 176 | length += 1 + len 177 | if (defined(obj.metadata)) { 178 | var len = encodings.bytes.encodingLength(obj.metadata) 179 | length += 1 + len 180 | } 181 | return length 182 | } 183 | 184 | function encode (obj, buf, offset) { 185 | if (!offset) offset = 0 186 | if (!buf) buf = Buffer.allocUnsafe(encodingLength(obj)) 187 | var oldOffset = offset 188 | if (!defined(obj.type)) throw new Error("type is required") 189 | buf[offset++] = 10 190 | encodings.string.encode(obj.type, buf, offset) 191 | offset += encodings.string.encode.bytes 192 | if (defined(obj.metadata)) { 193 | buf[offset++] = 18 194 | encodings.bytes.encode(obj.metadata, buf, offset) 195 | offset += encodings.bytes.encode.bytes 196 | } 197 | encode.bytes = offset - oldOffset 198 | return buf 199 | } 200 | 201 | function decode (buf, offset, end) { 202 | if (!offset) offset = 0 203 | if (!end) end = buf.length 204 | if (!(end <= buf.length && offset <= buf.length)) throw new Error("Decoded message is not valid") 205 | var oldOffset = offset 206 | var obj = { 207 | type: "", 208 | metadata: null 209 | } 210 | var found0 = false 211 | while (true) { 212 | if (end <= offset) { 213 | if (!found0) throw new Error("Decoded message is not valid") 214 | decode.bytes = offset - oldOffset 215 | return obj 216 | } 217 | var prefix = varint.decode(buf, offset) 218 | offset += varint.decode.bytes 219 | var tag = prefix >> 3 220 | switch (tag) { 221 | case 1: 222 | obj.type = encodings.string.decode(buf, offset) 223 | offset += encodings.string.decode.bytes 224 | found0 = true 225 | break 226 | case 2: 227 | obj.metadata = encodings.bytes.decode(buf, offset) 228 | offset += encodings.bytes.decode.bytes 229 | break 230 | default: 231 | offset = skip(prefix & 7, buf, offset) 232 | } 233 | } 234 | } 235 | } 236 | 237 | function defined (val) { 238 | return val !== null && val !== undefined && (typeof val !== 'number' || !isNaN(val)) 239 | } 240 | -------------------------------------------------------------------------------- /lib/mux.js: -------------------------------------------------------------------------------- 1 | const multiplexer = require('multifeed/mux') 2 | const through = require('through2') 3 | const debug = require('debug')('mux') 4 | const pretty = require('pretty-hash') 5 | 6 | module.exports = { replicate, forwardLiveFeedAnnouncements } 7 | // Taken and adapted from multifeed/index.js 8 | function replicate (self, isInitiator, opts) { 9 | if (!self.opened) { 10 | var tmp = through() 11 | process.nextTick(function () { 12 | tmp.emit('error', new Error('tried to use "replicate" before multifeed is ready')) 13 | }) 14 | return tmp 15 | } 16 | 17 | if (!opts) opts = {} 18 | self.writerLock = self.lock 19 | 20 | var rootKey = self.address 21 | 22 | var mux = multiplexer(isInitiator, rootKey, Object.assign({}, opts, { _id: self._id })) 23 | 24 | // Add key exchange listener 25 | var onManifest = function (m) { 26 | mux.requestFeeds(m.keys) 27 | } 28 | mux.on('manifest', onManifest) 29 | 30 | // Add replication listener 31 | var onReplicate = function (keys, repl) { 32 | addMissingKeys(keys, function (err) { 33 | if (err) return mux.stream.destroy(err) 34 | 35 | // Create a look up table with feed-keys as keys 36 | // (since not all keys in self._feeds are actual feed-keys) 37 | var key2feed = values(self._feeds).reduce(function (h, feed) { 38 | h[feed.key.toString('hex')] = feed 39 | return h 40 | }, {}) 41 | 42 | // Select feeds by key from LUT 43 | var feeds = keys.map(function (k) { return key2feed[k] }) 44 | repl(feeds) 45 | }) 46 | } 47 | mux.on('replicate', onReplicate) 48 | 49 | // Start streaming 50 | self.ready(function (err) { 51 | if (err) return mux.stream.destroy(err) 52 | if (mux.stream.destroyed) return 53 | mux.ready(function () { 54 | var keys = values(self._feeds).map(function (feed) { return feed.key.toString('hex') }) 55 | mux.offerFeeds(keys) 56 | }) 57 | 58 | // Push session to _streams array 59 | self._streams.push(mux) 60 | 61 | // Register removal 62 | var cleanup = function (err) { 63 | mux.removeListener('manifest', onManifest) 64 | mux.removeListener('replicate', onReplicate) 65 | self._streams.splice(self._streams.indexOf(mux), 1) 66 | debug('[REPLICATION] Client connection destroyed', err) 67 | } 68 | mux.stream.once('end', cleanup) 69 | mux.stream.once('error', cleanup) 70 | }) 71 | 72 | return mux.stream 73 | 74 | // Helper functions 75 | 76 | function addMissingKeys (keys, cb) { 77 | self.ready(function (err) { 78 | if (err) return cb(err) 79 | self.writerLock(function (release) { 80 | addMissingKeysLocked(keys, function (err) { 81 | release(cb, err) 82 | }) 83 | }) 84 | }) 85 | } 86 | 87 | function addMissingKeysLocked (keys, cb) { 88 | var pending = 0 89 | debug(self._id + ' [REPLICATION] recv\'d ' + keys.length + ' keys') 90 | var filtered = keys.filter(function (key) { 91 | return !Number.isNaN(parseInt(key, 16)) && key.length === 64 92 | }) 93 | filtered.forEach(function (key) { 94 | var feeds = values(self._feeds).filter(function (feed) { 95 | return feed.key.toString('hex') === key 96 | }) 97 | if (!feeds.length) { 98 | pending++ 99 | debug(self._id + ' [REPLICATION] trying to create new local hypercore, key=' + key.toString('hex')) 100 | // self._createFeedFromKey(key, null, function (err) { 101 | self.addFeed({ key }, function (err) { 102 | if (err) { 103 | debug(self._id + ' [REPLICATION] failed to create new local hypercore, key=' + key.toString('hex')) 104 | debug(self._id + err.toString()) 105 | } else { 106 | debug(self._id + ' [REPLICATION] succeeded in creating new local hypercore, key=' + key.toString('hex')) 107 | } 108 | if (!--pending) cb() 109 | }) 110 | } 111 | }) 112 | if (!pending) cb() 113 | } 114 | } 115 | 116 | function forwardLiveFeedAnnouncements (self, feed, name) { 117 | if (!self._streams.length) return // no-op if no live-connections 118 | var hexKey = feed.key.toString('hex') 119 | // Tell each remote that we have a new key available unless 120 | // it's already being replicated 121 | self._streams.forEach(function (mux) { 122 | if (mux.knownFeeds().indexOf(hexKey) === -1) { 123 | debug('Forwarding new feed to existing peer:', hexKey) 124 | mux.offerFeeds([hexKey]) 125 | } 126 | }) 127 | } 128 | 129 | function init (self) { 130 | self._streams = [] 131 | self.on('feed', (feed, info) => { 132 | forwardLiveFeedAnnouncements(feed, info.name) 133 | }) 134 | } 135 | 136 | function values (obj) { 137 | return Object.keys(obj).map(function (k) { return obj[k] }) 138 | } 139 | -------------------------------------------------------------------------------- /lib/record.js: -------------------------------------------------------------------------------- 1 | const { Record: RecordEncoding } = require('./messages') 2 | 3 | module.exports = class Record { 4 | static get PUT () { return RecordEncoding.Op.PUT } 5 | static get DEL () { return RecordEncoding.Op.DEL } 6 | 7 | static decode (buf, props = {}) { 8 | let record = RecordEncoding.decode(buf) 9 | record.value = Record.decodeValue(record) 10 | 11 | // Assign key and seq if provided (these are not part of the encoded record, but 12 | // must be provided when loading the record from the feed). 13 | if (props.key) { 14 | record.key = Buffer.isBuffer(props.key) ? props.key.toString('hex') : props.key 15 | } 16 | if (typeof props.seq !== 'undefined') { 17 | record.seq = Number(props.seq) 18 | } 19 | record = { ...props, ...record } 20 | return record 21 | } 22 | 23 | static encode (record) { 24 | const value = Record.encodeValue(record) 25 | const buf = RecordEncoding.encode({ ...record, value }) 26 | return buf 27 | } 28 | 29 | static decodeValue (record) { 30 | if (record.value) return JSON.parse(record.value) 31 | return null 32 | } 33 | 34 | static encodeValue (record) { 35 | if (record.value) return JSON.stringify(record.value) 36 | return null 37 | } 38 | } 39 | 40 | -------------------------------------------------------------------------------- /lib/schema.js: -------------------------------------------------------------------------------- 1 | const Ajv = require('ajv') 2 | const debug = require('debug')('db:schema') 3 | const SCHEMAS = require('./schemas') 4 | const { sink } = require('./util') 5 | 6 | module.exports = class SchemaStore { 7 | constructor (opts = {}) { 8 | this.key = opts.key 9 | this.schemas = {} 10 | this.ajv = new Ajv() 11 | 12 | for (const schema of SCHEMAS) { 13 | this.put(schema) 14 | } 15 | } 16 | 17 | setKey (key) { 18 | this.key = key 19 | } 20 | 21 | open (db, cb) { 22 | this.key = db.key 23 | const qs = db.createQueryStream('records', { schema: 'core/schema' }, { live: true }) 24 | qs.once('sync', cb) 25 | qs.pipe(sink((record, next) => { 26 | this.put(record.value) 27 | next() 28 | })) 29 | } 30 | 31 | put (schema) { 32 | const name = schema.name || schema.$id 33 | // TODO: Handle error. 34 | if (!name) return false 35 | schema = this.parseSchema(name, schema) 36 | // TODO: Handle error 37 | try { 38 | const valid = this.ajv.addSchema(schema, name) 39 | if (!valid) this._lastError = new ValidationError(this.ajv.errorsText(), this.ajv.errors) 40 | if (!valid) return false 41 | this.schemas[name] = schema 42 | return true 43 | } catch (err) { 44 | this._lastError = err 45 | return false 46 | } 47 | } 48 | 49 | get (name) { 50 | if (typeof name === 'object') { 51 | let { schema, key } = name 52 | name = this.resolveName(schema, key) 53 | } else { 54 | name = this.resolveName(name) 55 | } 56 | return this.schemas[name] 57 | } 58 | 59 | has (name) { 60 | return !!this.get(name) 61 | } 62 | 63 | list () { 64 | return { ...this.schemas } 65 | } 66 | 67 | fake (msg) { 68 | const schema = { 69 | $id: msg.schema, 70 | type: 'object' 71 | } 72 | this.put(schema) 73 | } 74 | 75 | validate (record) { 76 | const name = this.resolveName(record.schema) 77 | let result = false 78 | try { 79 | result = this.ajv.validate(name, record.value) 80 | if (!result) this._lastError = new ValidationError(this.ajv.errorsText(), this.ajv.errors) 81 | } catch (err) { 82 | this._lastError = err 83 | } 84 | return result 85 | } 86 | 87 | get error () { 88 | return this._lastError 89 | } 90 | 91 | resolveName (name, key) { 92 | if (!key) key = this.key 93 | if (Buffer.isBuffer(key)) key = key.toString('hex') 94 | if (name.indexOf('/') === -1) name = key + '/' + name 95 | // if (name.indexOf('@') === -1) { 96 | // TODO: Support versions 97 | // name = name + '@0' 98 | // } 99 | return name 100 | } 101 | 102 | parseSchema (name, schema, key) { 103 | name = this.resolveName(name, key) 104 | return { 105 | properties: {}, 106 | type: 'object', 107 | ...schema, 108 | $id: name, 109 | name 110 | } 111 | } 112 | } 113 | 114 | class ValidationError extends Error { 115 | constructor (message, errors) { 116 | super(message) 117 | this.errors = errors 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /lib/schemas.js: -------------------------------------------------------------------------------- 1 | const SCHEMA_SCHEMA = { 2 | $id: 'core/schema', 3 | type: 'object', 4 | properties: { 5 | name: { 6 | type: 'string', 7 | index: true 8 | } 9 | } 10 | } 11 | 12 | const SOURCE_SCHEMA = { 13 | $id: 'core/source', 14 | type: 'object', 15 | properties: { 16 | type: { type: 'string', title: 'Type' }, 17 | key: { type: 'string', pattern: '^[0-9a-f]{64}$', title: 'key' }, 18 | alias: { type: 'string', title: 'Alias' }, 19 | description: { type: 'string', title: 'Description' } 20 | } 21 | } 22 | 23 | const ANY_SCHEMA = { 24 | $id: 'core/any', type: 'object' 25 | } 26 | 27 | module.exports = [ 28 | SCHEMA_SCHEMA, SOURCE_SCHEMA, ANY_SCHEMA 29 | ] 30 | -------------------------------------------------------------------------------- /lib/sync-map.js: -------------------------------------------------------------------------------- 1 | const codecs = require('codecs') 2 | const Nanoresource = require('nanoresource') 3 | const mutex = require('mutexify') 4 | const { createPromiseProxy } = require('./util') 5 | 6 | const CALLBACK_METHODS = [ 7 | 'open', 8 | 'close', 9 | 'flush', 10 | 'getFlush', 11 | 'setFlush', 12 | 'deleteFlush', 13 | 'batchFlush' 14 | ] 15 | 16 | module.exports = class SyncMap extends Nanoresource { 17 | constructor (db, opts = {}) { 18 | super() 19 | this.db = db 20 | this._data = {} 21 | this._queue = {} 22 | this._valueEncoding = codecs(opts.valueEncoding || 'utf8') 23 | this._condition = opts.condition 24 | this._lock = mutex() 25 | 26 | this.promise = createPromiseProxy(this, CALLBACK_METHODS) 27 | } 28 | 29 | _open (cb) { 30 | const rs = this.db.createReadStream() 31 | rs.on('data', ({ key, value }) => { 32 | this._data[key] = this._valueEncoding.decode(value) 33 | }) 34 | rs.on('end', () => { 35 | this.opened = true 36 | cb() 37 | }) 38 | } 39 | 40 | _close (cb) { 41 | this.flush(cb) 42 | } 43 | 44 | entries () { 45 | return Object.entries(this._data) 46 | } 47 | 48 | get (key) { 49 | return this._data[key] 50 | } 51 | 52 | has (key) { 53 | return this._data[key] !== undefined 54 | } 55 | 56 | set (key, value, flush = true) { 57 | this._data[key] = value 58 | this._queue[key] = { value, type: 'put' } 59 | if (flush) this._queueFlush() 60 | } 61 | 62 | setFlush (key, value, cb) { 63 | this.set(key, value) 64 | this.flush(cb) 65 | } 66 | 67 | batch (data, flush = true) { 68 | Object.entries(data).map(([key, value]) => { 69 | this.set(key, value, { flush: false }) 70 | }) 71 | if (flush) this._queueFlush() 72 | } 73 | 74 | batchFlush (data, cb) { 75 | this.batch(data) 76 | this.flush(cb) 77 | } 78 | 79 | delete (key, cb, flush = true) { 80 | this._data[key] = undefined 81 | this._queue[key] = { type: 'del' } 82 | if (flush) this._queueFlush() 83 | } 84 | 85 | deleteFlush (key, cb) { 86 | this.delete(key) 87 | this.flush(cb) 88 | } 89 | 90 | _queueFlush (opts, cb) { 91 | if (this._flushQueued) return 92 | this._flushQueued = true 93 | process.nextTick(() => { 94 | this._flushQueued = false 95 | this.flush() 96 | }) 97 | } 98 | 99 | flush (cb = noop) { 100 | this._lock(release => { 101 | if (!Object.keys(this._queue).length) return release(cb) 102 | const queue = Object.entries(this._queue).map(([key, { value, type }]) => { 103 | if (value) value = this._valueEncoding.encode(value) 104 | return { key, value, type } 105 | }) 106 | this.queue = {} 107 | this.db.batch(queue, release.bind(null, cb)) 108 | }) 109 | } 110 | } 111 | 112 | function noop () {} 113 | -------------------------------------------------------------------------------- /lib/util.js: -------------------------------------------------------------------------------- 1 | const crypto = require('crypto') 2 | const base32 = require('base32') 3 | const { Writable, Transform } = require('stream') 4 | 5 | exports.keyseq = function (record) { 6 | return record.key + '@' + record.seq 7 | } 8 | 9 | exports.uuid = function () { 10 | return base32.encode(crypto.randomBytes(16)) 11 | } 12 | 13 | exports.through = function (transform) { 14 | return new Transform({ 15 | objectMode: true, 16 | transform 17 | }) 18 | } 19 | 20 | exports.sink = function (fn) { 21 | return new Writable({ 22 | objectMode: true, 23 | write (msg, enc, next) { 24 | fn(msg, next) 25 | } 26 | }) 27 | } 28 | 29 | exports.once = function (fn) { 30 | let called = false 31 | return (...args) => { 32 | if (!called) fn(...args) 33 | called = true 34 | } 35 | } 36 | 37 | exports.defaultTrue = function (val) { 38 | if (typeof val === 'undefined') return true 39 | return !!val 40 | } 41 | 42 | exports.noop = function () {} 43 | 44 | const kCache = Symbol('promise-cache') 45 | module.exports.createPromiseProxy = function (instance, callbackMethods) { 46 | if (!instance[kCache]) instance[kCache] = {} 47 | const cache = instance[kCache] 48 | return new Proxy(instance, { 49 | get (target, propKey) { 50 | const value = Reflect.get(target, propKey) 51 | if (typeof value !== 'function') return value 52 | if (!cache[propKey]) { 53 | cache[propKey] = promisify(target, propKey, value) 54 | } 55 | return cache[propKey] 56 | } 57 | }) 58 | 59 | function promisify (target, propKey, func) { 60 | if (!callbackMethods.includes(propKey)) { 61 | return function (...args) { 62 | Reflect.apply(func, target, args) 63 | } 64 | } 65 | return function (...args) { 66 | // Support callbacks if last arg is a function. 67 | if (typeof args[args.length - 1] === 'function') { 68 | return Reflect.apply(func, target, args) 69 | } 70 | 71 | // Otherwise, return a promise. 72 | return new Promise((resolve, reject) => { 73 | args.push((err, ...result) => { 74 | if (err) return reject(err) 75 | if (result.length > 1) resolve(result) 76 | else resolve(result[0]) 77 | }) 78 | Reflect.apply(func, target, args) 79 | }) 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kappa-record-db", 3 | "version": "0.10.0", 4 | "description": "", 5 | "main": "index.js", 6 | "keywords": [], 7 | "author": "Franz Heinzmann (Frando)", 8 | "license": "GPL-3.0", 9 | "scripts": { 10 | "test": "tape test/*.js", 11 | "build-protobuf": "protocol-buffers schema.proto -o lib/messages.js" 12 | }, 13 | "dependencies": { 14 | "ajv": "^6.10.2", 15 | "base32": "^0.0.6", 16 | "charwise": "^3.0.1", 17 | "corestore": "^5.4.0", 18 | "debug": "^4.1.1", 19 | "fast-bitfield": "^1.2.2", 20 | "hypercore": "^9.1.0", 21 | "hypercore-protocol": "^8.0.0", 22 | "inspect-custom-symbol": "^1.1.1", 23 | "kappa-core": "github:Frando/kappa-core#exp-0.2.4", 24 | "kappa-sparse-indexer": "^0.6.0", 25 | "level-live": "github:Frando/level-live#key-encodings", 26 | "level-mem": "^5.0.1", 27 | "lru-cache": "^5.1.1", 28 | "multifeed": "^5.2.1", 29 | "mutexify": "^1.2.0", 30 | "nanoresource": "^1.3.0", 31 | "pretty-hash": "^1.0.1", 32 | "protocol-buffers-encodings": "^1.1.0", 33 | "pump": "^3.0.0", 34 | "random-access-file": "^2.1.3", 35 | "random-access-memory": "^3.1.1", 36 | "stream-collector": "^1.0.1", 37 | "subleveldown": "^5.0.0", 38 | "through2": "^3.0.1", 39 | "thunky": "^1.1.0", 40 | "unordered-materialized-kv": "^1.3.0" 41 | }, 42 | "devDependencies": { 43 | "nanoiterator": "^1.2.1", 44 | "protocol-buffers": "^4.1.2", 45 | "tape": "^5.0.1" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /schema.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | message Record { 4 | enum Op { 5 | PUT = 0; 6 | DEL = 1; 7 | } 8 | required string id = 1; 9 | required Op op = 2; 10 | optional string schema = 3; 11 | optional bytes value = 4; 12 | optional uint32 timestamp = 5; 13 | repeated string links = 6; 14 | // TODO: We need to store the schema version. 15 | // Enable with next breaking change and have it next to schema. 16 | // optional uint32 schemaVersion = 7; 17 | } 18 | 19 | // The Header is DEP-0007 compatible. 20 | // It is written as the first message to every feed. 21 | // Metadata is currently empty. 22 | message Header { 23 | required string type = 1; 24 | optional bytes metadata = 2; 25 | } 26 | -------------------------------------------------------------------------------- /test/bench.js: -------------------------------------------------------------------------------- 1 | const tape = require('tape') 2 | const { runAll, replicate } = require('./lib/util') 3 | const Database = require('..') 4 | 5 | const MAX = 10000 6 | 7 | tape.skip('insert many', t => { 8 | const db = new Database({ name: 'db1', alias: 'w1', validate: false }) 9 | const timer = clock() 10 | runAll([ 11 | cb => db.ready(cb), 12 | cb => logtime(cb, timer, 'ready'), 13 | 14 | // cb => { 15 | // let pending = 0 16 | // for (let i = 1; i <= MAX; i++) { 17 | // ++pending 18 | // db.put({ schema: 'doc', value: 'bar' }, done) 19 | // } 20 | // function done () { 21 | // if (--pending === 0) cb() 22 | // } 23 | // }, 24 | 25 | cb => { 26 | let batch = [] 27 | for (let i = 1; i <= MAX; i++) { 28 | batch.push({ schema: 'doc', value: 'bar' }) 29 | } 30 | db.batch(batch, cb) 31 | }, 32 | 33 | cb => logtime(cb, timer, 'inserted ' + MAX), 34 | cb => db.query('records', { schema: 'doc' }, { waitForSync: true }, (err, records) => { 35 | t.error(err) 36 | t.equal(records.length, MAX) 37 | cb() 38 | }), 39 | cb => logtime(cb, timer, 'queried'), 40 | cb => db.query('records', { schema: 'doc' }, { waitForSync: true }, (err, records) => { 41 | t.error(err) 42 | t.equal(records.length, MAX) 43 | cb() 44 | }), 45 | cb => logtime(cb, timer, 'queried'), 46 | cb => logtime(cb, timer.total, 'total'), 47 | 48 | cb => rundb2(cb), 49 | cb => t.end() 50 | ]) 51 | 52 | function rundb2 (finish) { 53 | const timer = clock() 54 | const db2 = new Database({ name: 'db2', key: db.key }) 55 | console.log() 56 | console.log('now sync to db2') 57 | runAll([ 58 | cb => db2.ready(cb), 59 | cb => logtime(cb, timer, 'ready'), 60 | cb => replicate(db, db2, cb), 61 | cb => db2.query('records', { schema: 'doc' }, { waitForSync: true }, (err, records) => { 62 | t.error(err) 63 | t.equal(records.length, MAX) 64 | cb() 65 | }), 66 | cb => logtime(cb, timer, 'queried'), 67 | cb => db2.query('records', { schema: 'doc' }, { waitForSync: true }, (err, records) => { 68 | t.error(err) 69 | t.equal(records.length, MAX) 70 | cb() 71 | }), 72 | cb => logtime(cb, timer, 'queried'), 73 | cb => logtime(cb, timer.total, 'total'), 74 | cb => finish() 75 | ]) 76 | } 77 | }) 78 | 79 | function logtime (cb, timer, message) { 80 | console.log(timer(), message) 81 | cb() 82 | } 83 | 84 | function clock () { 85 | let start = process.hrtime() 86 | let last = start 87 | function measure () { 88 | const interval = process.hrtime(last) 89 | last = process.hrtime() 90 | return fmt(interval) 91 | } 92 | measure.total = function () { 93 | const interval = process.hrtime(start) 94 | return fmt(interval) 95 | } 96 | return measure 97 | } 98 | 99 | function fmt (time) { 100 | const ds = time[0] 101 | const dn = time[1] 102 | const ns = (ds * 1e9) + dn 103 | const ms = round(ns / 1e6) 104 | const s = round(ms / 1e3) 105 | if (s >= 1) return s + 's' 106 | if (ms >= 0.01) return ms + 'ms' 107 | if (ns) return ns + 'ns' 108 | } 109 | 110 | function round (num, decimals = 2) { 111 | return Math.round(num * Math.pow(10, decimals)) / Math.pow(10, decimals) 112 | } 113 | -------------------------------------------------------------------------------- /test/lib/util.js: -------------------------------------------------------------------------------- 1 | exports.runAll = function runAll (ops) { 2 | return new Promise((resolve, reject) => { 3 | runNext(ops.shift()) 4 | function runNext (op) { 5 | op(err => { 6 | if (err) return reject(err) 7 | let next = ops.shift() 8 | if (!next) return resolve() 9 | return runNext(next) 10 | }) 11 | } 12 | }) 13 | } 14 | 15 | exports.replicate = function replicate (a, b, opts, cb) { 16 | if (typeof opts === 'function') return replicate(a, b, null, opts) 17 | if (!opts) opts = { live: true } 18 | const stream = a.replicate(true, opts) 19 | b.once('remote-update', () => { 20 | // setImmediate(() => setTimeout(cb, 10)) 21 | cb() 22 | }) 23 | stream.pipe(b.replicate(false, opts)).pipe(stream) 24 | // setImmediate(cb) 25 | } 26 | -------------------------------------------------------------------------------- /test/query-bitfield.js: -------------------------------------------------------------------------------- 1 | const { runAll } = require('./lib/util') 2 | const tape = require('tape') 3 | const collect = require('stream-collector') 4 | const Database = require('..') 5 | 6 | tape('query bitfield', t => { 7 | const db = new Database({ validate: false }) 8 | runAll([ 9 | cb => db.ready(cb), 10 | cb => { 11 | const ops = ['a', 'b', 'c', 'd'].map(title => ( 12 | { op: 'put', schema: 'doc', value: { title } } 13 | )) 14 | db.batch(ops, cb) 15 | }, 16 | cb => db.sync(cb), 17 | cb => { 18 | db.query('records', { schema: 'doc' }, { cacheid: 'test' }, (err, records) => { 19 | t.error(err) 20 | t.equal(records.length, 4) 21 | t.deepEqual(records.map(r => r.value.title).sort(), ['a', 'b', 'c', 'd']) 22 | cb() 23 | }) 24 | }, 25 | cb => { 26 | db.query('records', { schema: 'doc' }, { cacheid: 'test' }, (err, records) => { 27 | t.error(err) 28 | t.equal(records.length, 4) 29 | t.deepEqual(records.map(r => r.value), [undefined, undefined, undefined, undefined]) 30 | cb() 31 | }) 32 | }, 33 | cb => t.end() 34 | ]) 35 | }) 36 | -------------------------------------------------------------------------------- /test/source-minimal.js: -------------------------------------------------------------------------------- 1 | const tape = require('tape') 2 | const { runAll, replicate } = require('./lib/util') 3 | const Database = require('..') 4 | 5 | tape('minimal kv test', t => { 6 | const db = new Database({ name: 'db1', alias: 'w1', validate: false }) 7 | let id 8 | runAll([ 9 | cb => db.ready(cb), 10 | cb => db.put({ schema: 'doc', value: 'foo' }, (err, _id) => { 11 | t.error(err) 12 | id = _id 13 | cb() 14 | }), 15 | cb => db.put({ schema: 'doc', value: 'bar', id }, cb), 16 | cb => db.query('records', { schema: 'doc' }, { waitForSync: true }, (err, records) => { 17 | t.error(err) 18 | t.equal(records.length, 1) 19 | t.equal(records[0].value, 'bar') 20 | cb() 21 | }), 22 | cb => t.end() 23 | ]) 24 | }) 25 | 26 | function doc (value, id) { 27 | return { schema: 'doc', value, id } 28 | } 29 | 30 | tape('minimal kv test with two sourcees', t => { 31 | const db = new Database({ name: 'db1', alias: 'w1', validate: false }) 32 | // db.on('remote-update', () => console.log('db1 remote-update')) 33 | let db2 34 | let id 35 | runAll([ 36 | cb => db.ready(cb), 37 | cb => db.put(doc('1rev1'), (err, _id) => { 38 | t.error(err) 39 | id = _id 40 | cb() 41 | }), 42 | cb => db.sync(cb), 43 | cb => db.put(doc('1rev2', id), cb), 44 | cb => { 45 | db2 = new Database({ key: db.key, name: 'db2', alias: 'w2', validate: false }) 46 | // db2.on('remote-update', () => console.log('db2 remote-update')) 47 | db2.ready(cb) 48 | }, 49 | cb => db2.writer(cb), 50 | cb => checkOne(t, db, { schema: 'doc' }, '1rev2', 'init db1 ok', cb), 51 | cb => replicate(db, db2, cb), 52 | cb => db2.indexer.once('update', cb), 53 | cb => checkOne(t, db2, { schema: 'doc' }, '1rev2', 'init db2 ok', cb), 54 | cb => db2.writer(cb), 55 | cb => { 56 | const db2localfeed = db2.getDefaultWriter() 57 | const db2localkey = db2localfeed.key 58 | db.putSource(db2localkey, { alias: 'w2' }, cb) 59 | }, 60 | cb => db.sync(cb), 61 | cb => db2.put(doc('2rev1', id), cb), 62 | cb => checkOne(t, db, { schema: 'doc' }, '2rev1', 'end db1 ok', cb), 63 | cb => checkOne(t, db2, { schema: 'doc' }, '2rev1', 'end db2 ok', cb), 64 | cb => { 65 | // console.log('db1', db.status()) 66 | // console.log('db2', db.status()) 67 | cb() 68 | }, 69 | cb => t.end() 70 | ]) 71 | }) 72 | 73 | function checkOne (t, db, query, value, msg, cb) { 74 | db.query('records', query, { waitForSync: true }, (err, records) => { 75 | t.error(err, msg + ' (no err)') 76 | t.equal(records.length, 1, msg + ' (result len)') 77 | t.equal(records[0].value, value, msg + '(value)') 78 | cb() 79 | }) 80 | } 81 | -------------------------------------------------------------------------------- /test/sources.js: -------------------------------------------------------------------------------- 1 | const { runAll } = require('./lib/util') 2 | const tape = require('tape') 3 | const collect = require('stream-collector') 4 | const Database = require('..') 5 | 6 | const DOC = { 7 | properties: { 8 | title: { type: 'string' }, 9 | body: { type: 'string' }, 10 | tags: { type: 'array', index: true, items: { type: 'string' } } 11 | } 12 | } 13 | 14 | const GROUP = { 15 | properties: { 16 | name: { type: 'string' }, 17 | docs: { 18 | type: 'array', index: true, items: { type: 'string' } 19 | } 20 | } 21 | } 22 | 23 | const DOC1 = { 24 | schema: 'doc', 25 | value: { title: 'hello', body: 'world', tags: ['red'] } 26 | } 27 | 28 | tape('replication', async t => { 29 | const db = new Database({ name: 'db1', alias: 'w1' }) 30 | let db2, id1, docIds 31 | await runAll([ 32 | cb => db.ready(cb), 33 | cb => { 34 | db2 = new Database({ key: db.key, name: 'db1-2', alias: 'w2' }) 35 | cb() 36 | }, 37 | cb => db2.ready(cb), 38 | cb => { 39 | // console.log('DB', db) 40 | // console.log('DB2', db2) 41 | cb() 42 | }, 43 | cb => db.putSchema('doc', DOC, cb), 44 | cb => db.put(DOC1, (err, id) => { 45 | t.error(err) 46 | id1 = id 47 | process.nextTick(cb) 48 | }), 49 | // cb => { 50 | // db.putSource(db2.localKey, cb) 51 | // db2.putSource(db.localKey, cb) 52 | // }, 53 | // cb => { 54 | // setTimeout(() => { 55 | // // t.end() 56 | // }, 200) 57 | // }, 58 | cb => { 59 | const stream = db.replicate(true, { live: true }) 60 | stream.pipe(db2.replicate(false, { live: true })).pipe(stream) 61 | setTimeout(cb, 200) 62 | }, 63 | cb => { 64 | // console.log('After replication') 65 | // console.log('DB', db) 66 | // console.log('DB2', db2) 67 | db2.query('records', { schema: 'doc' }, (err, records) => { 68 | t.error(err) 69 | t.equal(records.length, 1) 70 | t.equal(records[0].id, id1) 71 | cb() 72 | }) 73 | }, 74 | cb => { 75 | db2.put({ schema: 'doc', value: { title: 'hi', body: 'mars', tags: ['green'] } }, cb) 76 | }, 77 | cb => { 78 | db2.put({ schema: 'doc', value: { title: 'ola', body: 'moon', tags: ['green'] }, id: id1 }, cb) 79 | }, 80 | cb => { 81 | const db2localfeed = db2.getDefaultWriter() 82 | const db2localkey = db2localfeed.key 83 | db.putSource(db2localkey, cb) 84 | }, 85 | // TODO: Find a way to remove the timeout. 86 | cb => setTimeout(cb, 100), 87 | cb => db.sync(cb), 88 | cb => { 89 | // console.log('After putSource') 90 | // console.log('DB', db) 91 | // console.log('DB2', db2) 92 | db.query('records', { schema: 'doc' }, (err, records) => { 93 | t.error(err) 94 | t.equal(records.length, 2, 'records get len') 95 | t.deepEqual(records.map(r => r.value.title).sort(), ['hi', 'ola'], 'record get vals') 96 | docIds = records.map(r => r.id) 97 | cb() 98 | }) 99 | }, 100 | cb => { 101 | db.query('index', { schema: 'doc', prop: 'tags', value: 'green' }, (err, records) => { 102 | t.error(err) 103 | t.deepEqual(records.map(r => r.value.body).sort(), ['mars', 'moon'], 'query') 104 | cb() 105 | }) 106 | }, 107 | cb => { 108 | db.putSchema('group', GROUP, cb) 109 | }, 110 | cb => setTimeout(cb, 100), 111 | cb => { 112 | db.put({ 113 | schema: 'group', 114 | value: { 115 | name: 'stories', 116 | docs: docIds 117 | } 118 | }, cb) 119 | }, 120 | cb => setTimeout(cb, 100), 121 | cb => { 122 | db2.kappa.ready('kv', () => { 123 | // collect(db.loadStream(db.api.records.get({ schema: 'group' }), (err, records) => { 124 | 125 | db2.query('records', { schema: 'group' }, { load: true }, (err, records) => { 126 | t.error(err) 127 | t.equal(records.length, 1) 128 | t.equal(records[0].value.name, 'stories') 129 | t.end() 130 | }) 131 | }) 132 | } 133 | ]) 134 | t.end() 135 | }) 136 | -------------------------------------------------------------------------------- /test/test.js: -------------------------------------------------------------------------------- 1 | const { runAll } = require('./lib/util') 2 | const tape = require('tape') 3 | const collect = require('stream-collector') 4 | const Database = require('..') 5 | 6 | const docSchema = { 7 | properties: { 8 | title: { type: 'string' }, 9 | body: { type: 'string' }, 10 | tags: { type: 'array', index: true, items: { type: 'string' } } 11 | } 12 | } 13 | 14 | const groupSchema = { 15 | properties: { 16 | name: { type: 'string' }, 17 | docs: { 18 | type: 'array', index: true, items: { type: 'string' } 19 | } 20 | } 21 | } 22 | 23 | tape('minimal', t => { 24 | const db = new Database() 25 | runAll([ 26 | cb => db.putSchema('doc', docSchema, cb), 27 | cb => db.put({ schema: 'doc', value: { title: 'hello', body: 'world' } }, cb), 28 | cb => db.query('records', { schema: 'doc' }, { waitForSync: true }, (err, records) => { 29 | t.error(err) 30 | t.equal(records.length, 1) 31 | t.equal(records[0].value.title, 'hello') 32 | db.put({ id: records[0].id, schema: 'doc', value: { title: 'hello2', body: 'world' } }, cb) 33 | }), 34 | cb => db.query('records', { schema: 'doc' }, { waitForSync: true }, (err, records) => { 35 | t.error(err) 36 | t.equal(records.length, 1) 37 | t.equal(records[0].value.title, 'hello2') 38 | cb() 39 | }), 40 | () => t.end() 41 | ]) 42 | }) 43 | 44 | tape('delete', t => { 45 | const db = new Database() 46 | const schema = 'doc' 47 | let id 48 | runAll([ 49 | cb => db.putSchema(schema, docSchema, cb), 50 | cb => db.put({ schema, value: { title: 'hello', body: 'world' } }, (err, id1) => { 51 | if (err) return cb(err) 52 | id = id1 53 | cb() 54 | }), 55 | cb => db.sync(cb), 56 | cb => db.query('records', { id }, (err, records) => { 57 | t.error(err) 58 | t.equal(records.length, 1, 'queried one record') 59 | cb() 60 | }), 61 | cb => db.del({ id, schema }, cb), 62 | cb => db.sync(cb), 63 | cb => db.query('records', { id }, (err, records) => { 64 | t.error(err) 65 | t.equal(records.length, 0, 'queried zero records') 66 | cb() 67 | }), 68 | cb => t.end() 69 | ]) 70 | }) 71 | 72 | tape('kitchen sink', async t => { 73 | const db = new Database() 74 | let id1 75 | let docIds 76 | await runAll([ 77 | cb => db.ready(cb), 78 | cb => db.putSchema('doc', docSchema, cb), 79 | cb => db.put({ schema: 'doc', value: { title: 'hello', body: 'world', tags: ['red'] } }, (err, id) => { 80 | t.error(err) 81 | // console.log('put', id) 82 | id1 = id 83 | process.nextTick(cb) 84 | }), 85 | cb => { 86 | db.put({ schema: 'doc', value: { title: 'hi', body: 'mars', tags: ['green'] } }, cb) 87 | }, 88 | cb => { 89 | db.put({ schema: 'doc', value: { title: 'hello', body: 'moon', tags: ['green'] }, id: id1 }, cb) 90 | }, 91 | cb => { 92 | // db.kappa.ready('records', () => { 93 | setTimeout(() => { 94 | db.query('records', { schema: 'doc' }, (err, records) => { 95 | // console.log('oi') 96 | t.error(err) 97 | t.equal(records.length, 2, 'records get len') 98 | t.deepEqual(records.map(r => r.value.title).sort(), ['hello', 'hi'], 'record get vals') 99 | docIds = records.map(r => r.id) 100 | cb() 101 | }) 102 | }, 100) 103 | }, 104 | cb => { 105 | db.query('index', { schema: 'doc', prop: 'tags', value: 'green' }, (err, records) => { 106 | t.deepEqual(records.map(r => r.value.body).sort(), ['mars', 'moon'], 'query') 107 | cb() 108 | }) 109 | }, 110 | cb => { 111 | db.putSchema('group', groupSchema, cb) 112 | }, 113 | cb => setTimeout(cb, 100), 114 | cb => { 115 | db.put({ 116 | schema: 'group', 117 | value: { 118 | name: 'stories', 119 | docs: docIds 120 | } 121 | }, (err, id) => { 122 | t.error(err) 123 | t.ok(id) 124 | db.query('records', { id }, { waitForSync: true }, (err, res) => { 125 | t.error(err) 126 | t.equal(res.length, 1) 127 | t.equal(res[0].value.name, 'stories') 128 | cb() 129 | }) 130 | }) 131 | }, 132 | cb => setTimeout(() => cb(), 200), 133 | cb => { 134 | db.kappa.ready(() => { 135 | db.query('records', { schema: 'group' }, (err, records) => { 136 | t.error(err) 137 | t.equal(records.length, 1) 138 | t.equal(records[0].value.name, 'stories') 139 | cb() 140 | }) 141 | }) 142 | }, 143 | cb => { 144 | db.put({ 145 | schema: 'doc', 146 | value: { 147 | title: 'foosaturn', 148 | tags: ['saturn'] 149 | } 150 | }) 151 | let pending = 2 152 | const query = { schema: 'doc', prop: 'tags', value: 'saturn' } 153 | db.query('index', query, { waitForSync: true }, (err, res) => { 154 | t.error(err) 155 | t.equal(res.length, 1) 156 | t.equal(res[0].value.title, 'foosaturn', 'waitforsync true has result') 157 | if (--pending === 0) cb() 158 | }) 159 | db.query('index', query, { waitForSync: false }, (err, res) => { 160 | t.error(err) 161 | t.equal(res.length, 0, 'waitforsync false no result') 162 | if (--pending === 0) cb() 163 | }) 164 | } 165 | 166 | ]) 167 | t.end() 168 | }) 169 | -------------------------------------------------------------------------------- /views/helpers.js: -------------------------------------------------------------------------------- 1 | const Record = require('../lib/record') 2 | module.exports = { mapRecordsIntoLevelDB } 3 | 4 | function mapRecordsIntoLevelDB (opts, next) { 5 | const { records, map, level, db } = opts 6 | let pending = records.length 7 | const ops = [] 8 | 9 | records.forEach(record => collectOps(db, record, map, done)) 10 | 11 | function done (err, curOps) { 12 | if (!err) ops.push(...curOps) 13 | if (--pending === 0) finish() 14 | } 15 | 16 | function finish () { 17 | level.batch(ops, next) 18 | } 19 | } 20 | 21 | function collectOps (db, record, mapFn, cb) { 22 | const ops = [] 23 | // check if the current record is already outdated, 24 | // this means do nothing. 25 | db.api.kv.isLinked(record, (err, isLinked) => { 26 | if (err) return cb(err) 27 | if (isLinked) return cb(null, []) 28 | 29 | // check if we have to delete other records because they are 30 | // linked to by this record. 31 | collectLinkedRecords(db, record, (err, linkedRecords) => { 32 | if (err) return cb(null, []) 33 | 34 | // map linked records to delete ops 35 | for (const linkedRecord of linkedRecords) { 36 | Array.prototype.push.apply(ops, mapToDel(db, linkedRecord, mapFn)) 37 | } 38 | 39 | // map the current record itself 40 | if (record.op === Record.PUT) { 41 | Array.prototype.push.apply(ops, mapToPut(db, record, mapFn)) 42 | } else if (record.op === Record.DEL) { 43 | Array.prototype.push.apply(ops, mapToDel(db, record, mapFn)) 44 | } 45 | 46 | cb(null, ops) 47 | }) 48 | }) 49 | } 50 | 51 | function collectLinkedRecords (db, record, cb) { 52 | if (!record.links.length) return cb(null, []) 53 | const records = [] 54 | let pending = record.links.length 55 | record.links.forEach(link => { 56 | var [key, seq] = link.split('@') 57 | db.loadRecord({ key, seq }, (err, record) => { 58 | if (!err && record) records.push(record) 59 | if (--pending === 0) cb(null, records) 60 | }) 61 | }) 62 | } 63 | 64 | function mapToPut (db, record, mapFn) { 65 | let ops = mapFn(record, db) 66 | return mapResult(ops, 'put') 67 | } 68 | 69 | function mapToDel (db, record, mapFn) { 70 | let ops = mapFn(record, db) 71 | return mapResult(ops, 'del') 72 | } 73 | 74 | function mapResult (ops, type) { 75 | if (!ops) return [] 76 | if (!Array.isArray(ops)) ops = [ops] 77 | return ops.map(op => { 78 | if (typeof op === 'string') op = { key: op } 79 | if (type === 'put' && !op.value) op.value = '' 80 | op.type = type 81 | return op 82 | }) 83 | } 84 | -------------------------------------------------------------------------------- /views/indexes.js: -------------------------------------------------------------------------------- 1 | const through = require('through2') 2 | const pump = require('pump') 3 | 4 | const { mapRecordsIntoLevelDB } = require('./helpers') 5 | 6 | const CHAR_END = '\uffff' 7 | const CHAR_SPLIT = '\u0000' 8 | const CHAR_START = '\u0001' 9 | 10 | module.exports = function indexedView (lvl, db, opts) { 11 | const schemas = opts.schemas 12 | return { 13 | name: 'indexes', 14 | map (records, next) { 15 | mapRecordsIntoLevelDB({ 16 | db, records, map: mapToIndex, level: lvl 17 | }, next) 18 | }, 19 | api: { 20 | query (kappa, opts, cb) { 21 | // const { schema, prop, value, gt, lt, gte, lte, reverse, limit } = opts 22 | const proxy = transform(opts) 23 | if (!opts.schema || !opts.prop) { 24 | proxy.destroy(new Error('schema and prop are required.')) 25 | } else { 26 | opts.schema = schemas.resolveName(opts.schema) 27 | if (!opts.schema) return proxy.destroy(new Error('Invalid schema name.')) 28 | const lvlopts = queryOptsToLevelOpts(opts) 29 | lvl.createReadStream(lvlopts).pipe(proxy) 30 | } 31 | return proxy 32 | } 33 | } 34 | } 35 | 36 | function mapToIndex (msg, db) { 37 | const schema = schemas.get(msg) 38 | const ops = [] 39 | const { id, key: source, seq, schema: schemaName, value, lseq } = msg 40 | if (!schema || !schema.properties) return ops 41 | if (!value || typeof value !== 'object') return ops 42 | // TODO: Recursive? 43 | for (const [field, def] of Object.entries(schema.properties)) { 44 | // Only care for fields that want to be indexed and are not undefined. 45 | if (!def.index) continue 46 | if (typeof value[field] === 'undefined') continue 47 | 48 | if (def.type === 'array' && Array.isArray(value)) var values = value[field] 49 | else values = [value[field]] 50 | values.forEach(value => { 51 | ops.push({ 52 | key: [schemaName, field, value, lseq].join(CHAR_SPLIT), 53 | value: '' 54 | }) 55 | }) 56 | } 57 | return ops 58 | } 59 | } 60 | 61 | function queryOptsToLevelOpts (opts) { 62 | let { schema, prop, reverse, limit, offset, value, gt, gte, lt, lte } = opts 63 | if (offset && limit) limit = limit + offset 64 | const lvlopts = { reverse, limit } 65 | const key = schema + CHAR_SPLIT + prop + CHAR_SPLIT 66 | lvlopts.gt = key + CHAR_SPLIT 67 | lvlopts.lt = key + CHAR_END 68 | if (value) { 69 | lvlopts.gt = key + value + CHAR_SPLIT 70 | lvlopts.lt = key + value + CHAR_SPLIT + CHAR_END 71 | } else if (gt) { 72 | lvlopts.gt = key + gt + CHAR_SPLIT 73 | lvlopts.lt = key + gt + CHAR_END 74 | } else if (gte) { 75 | lvlopts.gte = key + gte + CHAR_SPLIT 76 | lvlopts.lt = key + gte + CHAR_END 77 | } 78 | if (lt) { 79 | lvlopts.lt = key + lt + CHAR_START 80 | } else if (lte) { 81 | lvlopts.lt = undefined 82 | lvlopts.lte = key + lte + CHAR_END 83 | } 84 | return lvlopts 85 | } 86 | 87 | function transform (opts) { 88 | let offset = opts.offset || null 89 | let i = 0 90 | return through.obj(function (row, enc, next) { 91 | i += 1 92 | if (offset && i < offset) return next() 93 | const decoded = decodeNode(row) 94 | this.push(decoded) 95 | next() 96 | }) 97 | } 98 | 99 | function decodeNode (node) { 100 | const { key, value: _ } = node 101 | const [schema, prop, value, lseq] = key.split(CHAR_SPLIT) 102 | // return { schema, id, key: source, seq, params: { prop, value } } 103 | return { lseq } 104 | } 105 | -------------------------------------------------------------------------------- /views/kv.js: -------------------------------------------------------------------------------- 1 | const umkv = require('unordered-materialized-kv') 2 | const { keyseq, once } = require('../lib/util') 3 | 4 | module.exports = function kvView (lvl, db) { 5 | const kv = umkv(lvl, { 6 | // onupdate, onremove 7 | }) 8 | 9 | return { 10 | name: 'kv', 11 | map (msgs, next) { 12 | const ops = msgs.map(record => ({ 13 | key: kvkey(record), 14 | id: keyseq(record), 15 | links: record.links 16 | })) 17 | kv.batch(ops, next) 18 | }, 19 | api: { 20 | getLinks (kappa, record, cb) { 21 | kv.get(kvkey(record), cb) 22 | }, 23 | isLinked (kappa, record, cb) { 24 | kv.isLinked(keyseq(record), cb) 25 | }, 26 | filterOutdated (kappa, records, cb) { 27 | cb = once(cb) 28 | let pending = records.length 29 | let filtered = [] 30 | records.forEach(record => { 31 | kv.isLinked(record, (err, isOutdated) => { 32 | if (err) return cb(err) 33 | if (!isOutdated) filtered.push(record) 34 | if (--pending === 0) cb(null, records) 35 | }) 36 | }) 37 | } 38 | } 39 | } 40 | 41 | // function onupdate (msg) { 42 | // console.log('onupdate', msg) 43 | // } 44 | // function onremove (msg) { 45 | // console.log('onremove', msg) 46 | // } 47 | } 48 | 49 | function kvkey (record) { 50 | return record.schema + ':' + record.id 51 | } 52 | -------------------------------------------------------------------------------- /views/records.js: -------------------------------------------------------------------------------- 1 | const through = require('through2') 2 | const keyEncoding = require('charwise') 3 | const { mapRecordsIntoLevelDB } = require('./helpers') 4 | const Live = require('level-live') 5 | // const debug = require('debug')('db') 6 | // const collect = require('stream-collector') 7 | 8 | const INDEXES = { 9 | is: ['id', 'schema', 'key'], 10 | si: ['schema', 'id', 'key'] 11 | } 12 | 13 | module.exports = function createReordView (lvl, db, opts) { 14 | const schemas = opts.schemas 15 | return { 16 | map (records, next) { 17 | mapRecordsIntoLevelDB({ 18 | db, records, map: mapToPutOp, level: lvl 19 | }, () => { 20 | next() 21 | }) 22 | }, 23 | 24 | api: { 25 | query (kappa, req, opts = {}) { 26 | if (!req) return this.view.all(opts) 27 | if (typeof req === 'string') req = { id: req } 28 | let { schema, id, key, seq, all } = req 29 | 30 | if (schema) schema = schemas.resolveName(schema) 31 | 32 | let filter 33 | if (all) { 34 | filter = includerange(['is']) 35 | } else if (schema && !id) { 36 | filter = includerange(['si', schema]) 37 | } else if (!schema && id) { 38 | filter = includerange(['is', id]) 39 | } else { 40 | filter = includerange(['is', id, schema]) 41 | } 42 | 43 | let rs = query(lvl, { ...opts, ...filter }) 44 | if (key) rs = rs.pipe(filterSource(key, seq)) 45 | return rs 46 | }, 47 | 48 | get (kappa, req, opts) { 49 | return this.view.query(req, opts) 50 | }, 51 | 52 | all (kappa, cb, opts) { 53 | return query(lvl, includerange(['is']), opts) 54 | }, 55 | 56 | bySchema (kappa, schema, opts) { 57 | return this.view.query({ schema }, opts) 58 | }, 59 | 60 | byId (kappa, id, opts) { 61 | return this.view.query({ id }, opts) 62 | }, 63 | 64 | byIdAndSchema (kappa, id, schema, opts) { 65 | return this.view.query({ id, schema }, opts) 66 | } 67 | } 68 | } 69 | } 70 | 71 | function query (db, opts) { 72 | opts.keyEncoding = keyEncoding 73 | const transform = parseRow() 74 | let rs 75 | if (opts.live) { 76 | rs = new Live(db, opts) 77 | rs.once('sync', () => transform.emit('sync')) 78 | } else { 79 | rs = db.createReadStream(opts) 80 | } 81 | return rs.pipe(transform) 82 | } 83 | 84 | function validate (msg) { 85 | const result = msg.id && msg.schema && msg.key && typeof msg.seq !== 'undefined' 86 | return result 87 | } 88 | 89 | function mapToPutOp (msg, db) { 90 | if (!validate(msg)) return [] 91 | const ops = [] 92 | const value = msg.seq || 0 93 | const shared = { value, keyEncoding } 94 | Object.entries(INDEXES).forEach(([key, fields]) => { 95 | fields = fields.map(field => msg[field]) 96 | ops.push({ 97 | key: [key, ...fields], 98 | ...shared 99 | }) 100 | }) 101 | return ops 102 | } 103 | 104 | function parseRow () { 105 | return through.obj(function (row, enc, next) { 106 | const { key, value: seq } = row 107 | const idx = key.shift() 108 | const index = INDEXES[idx] 109 | if (!index) return next() 110 | const record = { seq: Number(seq) } 111 | for (let i = 0; i < key.length; i++) { 112 | record[index[i]] = key[i] 113 | } 114 | this.push(record) 115 | next() 116 | }) 117 | } 118 | 119 | function filterSource (key, seq) { 120 | return through.obj(function (row, enc, next) { 121 | if (row.key === key) { 122 | if (!seq || seq === row.seq) this.push(row) 123 | } 124 | next() 125 | }) 126 | } 127 | 128 | function includerange (key) { 129 | return { 130 | gte: [...key], 131 | lte: [...key, undefined] 132 | } 133 | } 134 | --------------------------------------------------------------------------------