├── bin └── inspect.js ├── index.js ├── lib ├── couch-replicate.js └── http.js ├── package.json ├── readme.md └── tests ├── benchmark ├── compare-edit-writes.js ├── compare-new-writes.js └── run_couchup.js ├── npmregistry └── replicate_meta.js ├── run.js ├── test-basic.js ├── test-compaction.js ├── test-delete.js ├── test-http.js └── test-sleep.js /bin/inspect.js: -------------------------------------------------------------------------------- 1 | var couchup = require('../') 2 | , bytewise = require('bytewise') 3 | ; 4 | 5 | if (process.argv.length < 3) throw new Error('must supply a leveldb filename') 6 | var filename = process.argv[process.argv.length - 1] 7 | 8 | var store = couchup(filename) 9 | 10 | var range = store.lev.createKeyStream({start:null, end:{}}) 11 | 12 | var databases = 0 13 | , meta = 0 14 | , current = null 15 | ; 16 | 17 | function print () { 18 | var str = current.split(',') 19 | if (str[1] === '0') { 20 | str[1] = 'sequences' 21 | } else if (str[1] === '1') { 22 | str[1] = 'documents' 23 | } 24 | console.log(str[0], str[1], meta) 25 | } 26 | 27 | range.on('data', function (obj) { 28 | var key = bytewise.decode(obj) 29 | if (key[0] === 0) { 30 | databases += 1 31 | } else { 32 | if (current === null) { 33 | console.log('databases', databases) 34 | current = key[0]+','+key[1] 35 | } 36 | if (current !== key[0]+','+key[1]) { 37 | print() 38 | current = key[0]+','+key[1] 39 | meta = 0 40 | } 41 | meta += 1 42 | } 43 | 44 | }) 45 | range.on('end', function () { 46 | print() 47 | }) -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | var util = require('util') 2 | , events = require('events') 3 | , levelup = require('levelup') 4 | , once = require('once') 5 | , lru = require('lru-cache') 6 | , uuid = require('node-uuid') 7 | , peek = require('level-peek') 8 | , http = require('./lib/http') 9 | , mutex = require('level-mutex') 10 | , crypto = require('crypto') 11 | , byteslice = require('byteslice') 12 | , bloomfilter = require('bloomfilter') 13 | , sleepref = require('sleep-ref') 14 | , noop = function () {} 15 | ; 16 | 17 | function Deferring () { 18 | this.deferred = [] 19 | } 20 | util.inherits(Deferring, events.EventEmitter) 21 | Deferring.prototype.defer = function (fn) { 22 | if (this.deferring) this.deferred.push(fn) 23 | else fn() 24 | } 25 | Deferring.prototype.kick = function () { 26 | this.deferring = false 27 | while (this.deferred.length) { 28 | this.deferred.shift()() 29 | } 30 | } 31 | 32 | function Store (opts) { 33 | Deferring.call(this) 34 | var self = this 35 | opts.keyEncoding = 'binary' 36 | opts.valueEncoding = 'json' 37 | // if (!opts.cacheSize) opts.cacheSize = 32 * 1024 * 1024 38 | // if (!opts.writeBufferSize) opts.cacheSize = 32 * 1024 * 1024 39 | this.opts = opts 40 | this.lev = levelup(opts.location, opts) 41 | this.mutex = mutex(this.lev) 42 | this._writes = [] 43 | this.deferring = true 44 | this.databases = {} 45 | 46 | var opts = 47 | { start: this.bytes.encode([null]) 48 | , end: this.bytes.encode([{}]) 49 | } 50 | 51 | var reader = this.mutex.lev.createReadStream(opts) 52 | reader.on('data', function (data) { 53 | var key = self.bytes.decode(data.key) 54 | self.databases[key[0]] = new Database(self, key[0], data.value) 55 | }) 56 | reader.on('end', function () { 57 | self.kick() 58 | }) 59 | } 60 | util.inherits(Store, Deferring) 61 | Store.prototype.bytes = byteslice(['_meta', 'couchup']) 62 | Store.prototype.put = function (name, cb) { 63 | var self = this 64 | this.defer(function () { 65 | if (self.databases[name]) return cb(new Error("Database already exists.")) 66 | self.mutex.put(self.bytes.encode([name]), 0, function (err) { 67 | if (err) return cb(err) 68 | }) 69 | self.databases[name] = new Database(self, name, 0) 70 | self.databases[name].once('init', function () { 71 | cb(null, self.databases[name]) 72 | }) 73 | }) 74 | } 75 | Store.prototype.get = function (name, cb) { 76 | var self = this 77 | this.defer(function () { 78 | if (!self.databases[name]) return cb(new Error('Database does not exist.')) 79 | if (!self.databases[name]._init) { 80 | self.databases[name].once('init', function () { 81 | cb(null, self.databases[name]) 82 | }) 83 | } else { 84 | cb(null, self.databases[name]) 85 | } 86 | }) 87 | } 88 | Store.prototype.delete = function (name, cb) { 89 | var p = {} 90 | , self = this 91 | ; 92 | self.get(name, function (e, db) { 93 | if (e) return cb(e) 94 | db.deleteDatabase(cb) 95 | }) 96 | } 97 | 98 | function hashdoc (doc) { 99 | if (doc._rev) { 100 | var rev = doc._rev 101 | delete doc._rev 102 | } 103 | var hash = crypto.createHash('md5').update(JSON.stringify(doc)).digest("hex") 104 | if (rev) { 105 | doc._rev = rev 106 | } 107 | return hash 108 | } 109 | 110 | function revint (rev) { 111 | var seq 112 | if (!rev) seq = 0 113 | else seq = parseInt(rev.slice(0, rev.indexOf('-'))) 114 | if (isNaN(seq)) { console.error('BAD!'); seq = 0} 115 | return seq 116 | } 117 | 118 | function Database (store, name, seq) { 119 | this.store = store 120 | this.name = name 121 | this.mutex = mutex(store.lev) 122 | this.cache = lru() 123 | this.pending = [] 124 | 125 | this.bytes = byteslice([name, 'couchup']) 126 | 127 | var self = this 128 | self.mutex.on('flushed', function () { 129 | self.pending = [] 130 | }) 131 | 132 | // get sequence 133 | // because this is the first read sent to the mutex 134 | // we'll be able to get the first sequence 135 | // before we do any writes. 136 | 137 | var lastSeekOptions = 138 | { end: self.bytes.encode(['seq', {}]) 139 | , start: self.bytes.encode(['seq', null]) 140 | } 141 | 142 | self.mutex.peekLast(lastSeekOptions, function (e, key, info) { 143 | if (e) { 144 | self.sequence = 0 145 | self.doc_count = 0 146 | } else { 147 | key = self.bytes.decode(key) 148 | self.sequence = key[1] 149 | self.doc_count = info[1] 150 | } 151 | self.emit('init') 152 | }) 153 | 154 | self.once('init', function () { 155 | self._init = true 156 | }) 157 | 158 | // start bloom filter 159 | if (typeof store.opts.bloom === 'undefined') { 160 | store.opts.bloom = {size: 64 * 256 * 256, hashes: 16} 161 | } 162 | if (store.opts.bloom) { 163 | var changes = self.sleep() 164 | self._bloom = new bloomfilter.BloomFilter(store.opts.bloom.size, store.opts.bloom.hashes) 165 | function onRow (row) { 166 | self._bloom.add(row.id) 167 | } 168 | self.on('change', onRow) 169 | changes.on('row', onRow) 170 | changes.on('end', function () { 171 | // Make sure we don't create documents that aren't in the bloom 172 | // filter before the sequence has been initialized. 173 | if (typeof self.sequence !== 'undefined') self.bloom = self._bloom 174 | else self.on('init', function () { self.bloom = self._bloom }) 175 | }) 176 | } 177 | } 178 | util.inherits(Database, events.EventEmitter) 179 | Database.prototype.deleteDatabase = function (cb) { 180 | var self = this 181 | 182 | self.mutex.del(self.store.bytes.encode([self.name]), function () { 183 | var all = self.mutex.lev.createKeyStream( 184 | { start: self.bytes.encode([null]) 185 | , end: self.bytes.encode([{}]) 186 | }) 187 | , count = 0 188 | ; 189 | all.on('data', function (rawkey) { 190 | self.mutex.del(rawkey, noop) 191 | count += 1 192 | }) 193 | all.on('end', function () { 194 | delete self.store.databases[self.name] 195 | if (count === 0) cb(null) 196 | else self.mutex.afterWrite(function () {cb(null)}) 197 | }) 198 | }) 199 | } 200 | Database.prototype.keys = function (cb) { 201 | var self = this 202 | if (!cb) { 203 | cb = seq 204 | seq = {} 205 | } 206 | var opts = 207 | { start:self.bytes.encode(['seq', 0]) 208 | , end: self.bytes.encode(['seq', {}]) 209 | } 210 | var r = self.mutex.lev.createReadStream(opts) 211 | , keys = [] 212 | ; 213 | r.on('data', function (row) { 214 | if (row.value[0]._deleted) return 215 | keys.push(row.value[0].id) 216 | }) 217 | r.on('end', function () { 218 | cb(null, keys) 219 | }) 220 | r.on('error', cb) 221 | } 222 | Database.prototype.get = function (id, seq, cb) { 223 | var self = this 224 | if (!cb) { 225 | cb = seq 226 | seq = {} 227 | } 228 | var opts = 229 | { start:self.bytes.encode(['docs', id, null]) 230 | , end: self.bytes.encode(['docs', id, seq, {}]) 231 | } 232 | self.mutex.peekLast(opts, function (err, key, value) { 233 | if (err) return cb(new Error('Not found.')) 234 | if (value._deleted) return cb(new Error('Not found. Deleted.')) 235 | cb(null, value) 236 | }) 237 | } 238 | 239 | Database.prototype.put = function (doc, opts, cb) { 240 | var self = this 241 | if (!cb) { 242 | cb = opts 243 | opts = {} 244 | } 245 | if (typeof doc._id !== 'string') doc._id = uuid() 246 | 247 | function _save (meta) { 248 | var seq 249 | ; 250 | // There is a question about whether or not this is necessary. 251 | // Because we don't store the whole rev tree metadata there isn't much that people 252 | // will do with revs that don't win. 253 | // But, I'm concerned that replication clients will write and then immediately read 254 | // the rev they just wrote, which will cause problems if we don't have it stored while they 255 | // replicate. Regardless, it'll be gone after compaction if it doesn't win. 256 | if (opts.new_edits === false) { 257 | // If the current revs match exactly we already have this version, succeed early. 258 | if (meta.rev === doc._rev) return cb(null, meta) 259 | 260 | // If the newedit rev wins (has more edits) write that rev as a new seq 261 | // If the newedit doesn't win write it to the document store 262 | // but don't write a sequence, use -1 as the sequence in meta 263 | var prev = revint(meta.rev) 264 | , curr = revint(doc._rev) 265 | ; 266 | if (curr === prev) { 267 | // string check (Q: is this compatible with other implementations?) 268 | if (meta.rev > doc._rev) { 269 | seq = -1 270 | } else { 271 | seq = self.sequence + 1 272 | } 273 | } else if (curr > prev) { 274 | seq = self.sequence + 1 275 | } else { 276 | seq = -1 277 | } 278 | } else { 279 | var prev = revint(meta.rev) 280 | doc._rev = prev + 1 + '-' + hashdoc(doc) 281 | seq = self.sequence + 1 282 | } 283 | 284 | meta.rev = doc._rev 285 | meta.deleted = doc._deleted 286 | meta.seq = seq 287 | meta.id = doc._id 288 | 289 | self.sequence = seq 290 | 291 | if (meta.deleted) self.doc_count = self.doc_count - 1 292 | else self.doc_count = self.doc_count + 1 293 | 294 | // Write the new sequence 295 | var key = self.bytes.encode(['seq', meta.seq]) 296 | , val = [meta, self.doc_count] 297 | ; 298 | if (seq !== -1) { 299 | self.mutex.put(key, val, noop) 300 | self.emit('seq', {key:key, value:val}) 301 | } 302 | // Write an entry for this revision 303 | self.mutex.put(self.bytes.encode(['docs', doc._id, meta.seq, doc._rev, !!doc._deleted]), doc, function (e) { 304 | if (e) return cb(e) 305 | self.emit('change', meta, doc) 306 | cb(null, meta) 307 | }) 308 | 309 | // write cache and pending 310 | self.cache.set(doc._id, meta) 311 | self.pending.push(doc._id) 312 | } 313 | 314 | function _write (e, meta) { 315 | if (_checkPending()) return 316 | 317 | if (!e && opts.new_edits === false && meta.rev === doc._rev) cb(null, meta) 318 | else if (e || meta.rev === doc._rev || opts.new_edits === false) { 319 | _save(meta || {}) 320 | } else { 321 | cb(new Error('rev does not match.')) 322 | } 323 | } 324 | 325 | function _checkPending () { 326 | // If a write on this document is already pending then 327 | // we *know* the rev is out of date. 328 | if (self.pending.indexOf(doc._id) !== -1) { 329 | if (opts.new_edits === false) self.meta(doc._id, _write) 330 | else cb(new Error('rev does not match.')) 331 | return true 332 | } 333 | return false 334 | } 335 | if (_checkPending()) return 336 | 337 | if (self.cache.has(doc._id)) { 338 | _write(null, this.cache.get(doc._id)) 339 | } else if (self.bloom && !doc._rev && !self.bloom.test(doc._id)) { 340 | _write(true) 341 | } else { 342 | self.meta(doc._id, _write) 343 | } 344 | } 345 | Database.prototype.del = function (doc, cb) { 346 | if (!doc._id) return cb(new Error('must have _id.')) 347 | doc._deleted = true 348 | this.put(doc, cb) 349 | } 350 | Database.prototype.compact = function (cb) { 351 | var self = this 352 | , keys = self.store.lev.createKeyStream( 353 | { end: self.bytes.encode(['docs', null]) 354 | , start: self.bytes.encode(['docs', {}]) 355 | , reverse: true 356 | }) 357 | , current = null 358 | , pending = null 359 | , count = 0 360 | ; 361 | keys.on('data', function (rawkey) { 362 | var key = self.bytes.decode(rawkey) 363 | , seq = key[2] 364 | , docid = key[1] 365 | ; 366 | 367 | if (current === docid) { 368 | self.mutex.del(self.bytes.encode(['seq', seq]), noop) 369 | self.mutex.del(rawkey, noop) 370 | count = count + 1 371 | } 372 | current = docid 373 | }) 374 | keys.on('end', function () { 375 | if (count === 0) cb() 376 | else self.mutex.afterWrite(function () { cb(null, count) }) 377 | }) 378 | } 379 | Database.prototype.sleep = function (opts) { 380 | var self = this 381 | , ee = new events.EventEmitter() 382 | , pendingRows = [] 383 | ; 384 | if (!opts) opts = {} 385 | 386 | var r = this.mutex.lev.createReadStream( 387 | { start: self.bytes.encode(['seq', opts.since || 0]) 388 | , end: self.bytes.encode(['seq', {}]) 389 | }) 390 | 391 | function onRow (row) { 392 | var value = row.value[0] 393 | var r = 394 | { id: value.id 395 | , seq: value.seq 396 | , rev: value.rev 397 | } 398 | if (value.deleted) r.deleted = value.deleted 399 | if (opts.include_data) { 400 | self.get(r.id, r.seq, function (e, doc) { 401 | r.data = doc 402 | ee.emit('entry', r) 403 | }) 404 | } else { 405 | ee.emit('entry', r) 406 | } 407 | } 408 | 409 | function onSeq (row) { 410 | pendingRows.push(row) 411 | } 412 | self.on('seq', onSeq) 413 | r.on('data', onRow) 414 | function onEnd () { 415 | pendingRows.forEach(onRow) 416 | pendingRows = null 417 | self.removeListener('seq', onSeq) 418 | if (opts.continuous) { 419 | self.on('seq', onRow) 420 | ee.abort = function () { 421 | self.removeListener('seq', onRow) 422 | } 423 | } else { 424 | self.mutex.get('0', function () { 425 | ee.emit('end') 426 | }) 427 | } 428 | } 429 | r.on('end', onEnd) 430 | 431 | ee.abort = function () { 432 | r.removeListener('data', onRow) 433 | r.removeListener('end', onEnd) 434 | self.removeListener('seq', onSeq) 435 | pendingRows = null 436 | } 437 | ee.end = function () { 438 | ee.abort() 439 | } 440 | 441 | return ee 442 | } 443 | Database.prototype.delete = Database.prototype.del 444 | Database.prototype.meta = function (id, cb) { 445 | var self = this 446 | , opts = 447 | { end: self.bytes.encode(['docs', id, {}]) 448 | , start: self.bytes.encode(['docs', id, null]) 449 | } 450 | self.mutex.peekLast(opts, function (err, key, value) { 451 | if (err) return cb(new Error('Not found.')) 452 | key = self.bytes.decode(key) 453 | cb(null, {deleted: key[4], rev: key[3], id: id, seq: key[2]}) 454 | }) 455 | } 456 | Database.prototype.info = function (cb) { 457 | var self = this 458 | if (typeof this.sequence !== 'undefined') { 459 | cb(null, {update_seq:self.sequence, doc_count:self.doc_count}) 460 | } else { 461 | self.on('init', function () { self.info(cb) }) 462 | } 463 | } 464 | Database.prototype.pull = function (remote, opts, cb) { 465 | var self = this 466 | if (!cb) { 467 | cb = opts 468 | opts = {include_data:true} 469 | } 470 | self.mutex.get(self.bytes.encode(['sleep', remote]), function (e, seq) { 471 | if (!e) opts.since = seq 472 | self._clone(remote, sleepref.client(remote, opts), cb) 473 | }) 474 | } 475 | Database.prototype.clone = function (db, cb) { 476 | var opts = {include_data:true} 477 | , ref = db.name 478 | , self = this 479 | ; 480 | if (!cb) opts.continuous = true 481 | self.mutex.get(self.bytes.encode(['sleep', ref]), function (e, seq) { 482 | if (!e) opts.since = seq 483 | self._clone(ref, db.sleep(opts), cb || noop) 484 | }) 485 | } 486 | Database.prototype._clone = function (ref, sleep, cb) { 487 | var self = this 488 | , lastSeq = 0 489 | ; 490 | sleep.on('entry', function (entry) { 491 | if (!entry.data) return console.warn('Skipping entry because it does not include data.') 492 | self.put(entry.data, {new_edits:false}, function () { 493 | if (entry.seq > lastSeq) { 494 | self.mutex.put(self.bytes.encode(['sleep', ref]), entry.seq, noop) 495 | lastSeq = entry.seq 496 | } 497 | }) 498 | }) 499 | sleep.on('end', function () { 500 | // hacky, the mutex should give a better one of these. 501 | self.mutex.get('0', function () { 502 | self.mutex.afterWrite(function () { 503 | cb(null, lastSeq) 504 | }) 505 | }) 506 | }) 507 | } 508 | 509 | function couchup (filename) { 510 | return new Store({location:filename}) 511 | } 512 | 513 | module.exports = couchup 514 | module.exports.http = http -------------------------------------------------------------------------------- /lib/couch-replicate.js: -------------------------------------------------------------------------------- 1 | var request = require('request').defaults({headers:{accept:'application/json'}}) 2 | , jsonstream = require('JSONStream') 3 | , once = require('once') 4 | , async = require('async') 5 | , bytewise = require('bytewise') 6 | ; 7 | 8 | function revint (rev) { 9 | var seq 10 | if (!rev) seq = 0 11 | else seq = parseInt(rev.slice(0, rev.indexOf('-'))) 12 | if (isNaN(seq)) { console.error('BAD!'); seq = 0} 13 | return seq 14 | } 15 | 16 | function compactRevision (changes) { 17 | var c = changes.map(function (c) {return revint(c.rev)}) 18 | c = c.reduce(function (x,y) {return x+y}, 0)+changes[0].rev.slice(changes[0].rev.indexOf('-')) 19 | return c 20 | } 21 | 22 | function pull (db, url, opts, cb) { 23 | if (!cb) { 24 | cb = opts 25 | opts = null 26 | } 27 | cb = once(cb) 28 | if (url[url.length - 1] !== '/') url += '/' 29 | 30 | function _pull (seq) { 31 | var json = jsonstream.parse('results.*') 32 | , success = 0 33 | ; 34 | 35 | var pending = [] 36 | , writing = false 37 | ; 38 | 39 | function saveSequence (seq, cb) { 40 | db.mutex.put(bytewise.encode([db.name, 4, url]), seq, cb) 41 | } 42 | 43 | function writePending (cb) { 44 | var p = pending 45 | pending = [] 46 | 47 | function onRow (row, cb) { 48 | row.doc._rev = compactRevision(row.changes) 49 | db.put(row.doc, {new_edits:false}, cb) 50 | } 51 | 52 | async.map(p, onRow, function (e, results) { 53 | if (e) { 54 | r.abort() 55 | return cb(e) 56 | } 57 | 58 | saveSequence(p[p.length - 1].seq, function (e) { 59 | if (e) { 60 | r.abort() 61 | return cb(e) 62 | } 63 | 64 | if (pending.length) { 65 | writePending(cb) 66 | } else { 67 | cb() 68 | } 69 | }) 70 | }) 71 | } 72 | 73 | json.on('data', function (row) { 74 | pending.push(row) 75 | if (!writing) { 76 | writing = true 77 | writePending(function (e) { 78 | if (e) throw e 79 | writing = false 80 | }) 81 | } 82 | }) 83 | 84 | var r = request(url+'_changes?style=all_docs&include_docs=true&since='+seq) 85 | r.on('response', function (resp) { 86 | // resp.on('data', function (c) {console.log(c.toString())}) 87 | resp.pipe(json) 88 | resp.on('error', cb) 89 | json.on('error', cb) 90 | json.on('end', function () { 91 | cb(null) 92 | }) 93 | }) 94 | } 95 | 96 | db.mutex.get(bytewise.encode([db.name, 4, url]), function (e, seq) { 97 | if (e) seq = 0 98 | _pull(seq) 99 | }) 100 | 101 | } 102 | 103 | module.exports = pull 104 | 105 | -------------------------------------------------------------------------------- /lib/http.js: -------------------------------------------------------------------------------- 1 | var mapleTree = require('mapleTree') 2 | , _ = require('lodash') 3 | , crypto = require('crypto') 4 | , md5 = function (data){ return crypto.createHash('md5').update(data).digest('hex') } 5 | , async = require('async') 6 | , once = require('once') 7 | , uuid = require('node-uuid') 8 | ; 9 | 10 | function body (req, cb) { 11 | var chunks = [] 12 | , size = 0 13 | ; 14 | cb = once(cb) 15 | req.on('data', function (chunk) { 16 | chunks.push(chunk) 17 | size = size + chunk.length 18 | }) 19 | req.on('end', function () { 20 | var b = Buffer.concat(chunks) 21 | , obj = JSON.parse(b.toString()) 22 | ; 23 | cb(null, obj) 24 | }) 25 | req.on('error', function (e) { 26 | cb(e) 27 | }) 28 | } 29 | 30 | function error (resp, e) { 31 | console.error(e.stack) 32 | resp.statusCode = 500 33 | resp.end() 34 | } 35 | 36 | function json (resp, obj, statusCode) { 37 | var buffer = new Buffer(JSON.stringify(obj)) 38 | resp.statusCode = statusCode || 200 39 | resp.setHeader('content-type', 'application/json') 40 | resp.setHeader('etag', obj._rev || md5(buffer)) 41 | resp.setHeader('content-length', buffer.length) 42 | resp.end(buffer) 43 | } 44 | 45 | function build (store) { 46 | var tree = new mapleTree.RouteTree() 47 | function route (path, cb) { 48 | tree.define(path, function () { 49 | return cb 50 | }) 51 | } 52 | route('/:db', function (r, req, resp) { 53 | if (req.method === 'PUT') { 54 | store.put(r.params.db, function (e, db) { 55 | if (e) return error(resp, e) 56 | json(resp, {ok:true}, 201) 57 | }) 58 | } else if (req.method === 'GET' || req.method === 'HEAD') { 59 | store.get(r.params.db, function (e, db) { 60 | if (e) return error(resp, e) 61 | // http://wiki.apache.org/couchdb/HTTP_database_API 62 | // { 63 | // "compact_running": false, 64 | // "db_name": "dj", 65 | // "disk_format_version": 5, 66 | // "disk_size": 12377, 67 | // "doc_count": 1, 68 | // "doc_del_count": 1, 69 | // "instance_start_time": "1267612389906234", 70 | // "purge_seq": 0, 71 | // "update_seq": 4 72 | // } 73 | db.info(function (e, info) { 74 | if (e) return error(resp, e) 75 | json(resp, _.extend({dbname: r.params.db}, info)) 76 | }) 77 | }) 78 | } else if (req.method === 'POST') { 79 | var p = 80 | { body: body.bind(body, req) 81 | , db: store.get.bind(store, r.params.db) 82 | } 83 | async.parallel(p, function (e, results) { 84 | if (e) return error(resp, e) 85 | if (!results.body._id) results.body._id = uuid() 86 | results.db.put(results.body, function (e, info) { 87 | if (e) return error(resp, e) 88 | json(resp, info, 201) 89 | }) 90 | }) 91 | } else if (req.method === 'DELETE') { 92 | store.delete(r.params.db, function (e) { 93 | if (e) return error(resp, e) 94 | resp.statusCode = 201 95 | resp.end() 96 | }) 97 | } else { 98 | // invalid http method 99 | resp.statusCode = 405 100 | resp.end() 101 | } 102 | }) 103 | route('/:db/:doc', function (r, req, resp) { 104 | if (req.method === 'GET' || req.method === 'HEAD') { 105 | store.get(r.params.db, function (e, db) { 106 | if (e) return error(resp, e) 107 | db.get(r.params.doc, function (e, doc) { 108 | if (e) return error(resp, e) 109 | json(resp, doc) 110 | }) 111 | }) 112 | } 113 | }) 114 | return tree 115 | } 116 | 117 | function app (store) { 118 | var tree = build(store) 119 | function handler (req, res) { 120 | var match = tree.match(req.url) 121 | if (!match.fn) { 122 | res.statusCode = 404 123 | res.end() 124 | return 125 | } 126 | match.fn()(match, req, res) 127 | } 128 | return handler 129 | } 130 | module.exports = app -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "couchup", 3 | "version": "0.8.1", 4 | "description": "A CouchDB implementation on top of levelup.", 5 | "main": "index.js", 6 | "dependencies": { 7 | "bytewise": "~0.4.0", 8 | "level-peek": "~1.0.6", 9 | "leveldown": "~0.6.1", 10 | "levelup": "~0.10.0", 11 | "lru-cache": "~2.3.0", 12 | "mapleTree": "~0.5.0-1", 13 | "node-uuid": "~1.4.0", 14 | "once": "~1.1.1", 15 | "lodash": "~1.3.1", 16 | "async": "~0.2.9", 17 | "level-mutex": "~0.2.0", 18 | "bloomfilter": "0.0.12", 19 | "JSONStream": "~0.6.4", 20 | "byteslice": "~0.2.0", 21 | "sleep-ref": "~0.3.0" 22 | }, 23 | "devDependencies": { 24 | "cleanup": "~0.3.0", 25 | "rimraf": "~2.1.4", 26 | "okdone": "~0.3.0" 27 | }, 28 | "scripts": { 29 | "test": "node tests/run.js" 30 | }, 31 | "repository": { 32 | "type": "git", 33 | "url": "https://github.com/mikeal/couchup" 34 | }, 35 | "keywords": [ 36 | "couchdb", 37 | "database", 38 | "leveldb" 39 | ], 40 | "author": "Mikeal Rogers", 41 | "license": "BSD", 42 | "readmeFilename": "readme.md" 43 | } 44 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | ## couchup 2 | 3 | `couchup` is a database. The goal is to build a data model well suited for mobile applications that may need to work offline and sync later on and maintain smart client side caches. This data model is inspired by CouchDB but diverges greatly in the way it handles and resolves the revision history and conflicts. `couchup` implements a "most writes wins" conflict resolution scheme and does not require or even allow user specific conflict resolution. 4 | 5 | Another goal of `couchup` is to be performant and modular. This repository only implements the base document storage layer. Indexes, attachments and replicators are implemented as additional modules. 6 | 7 | The tradeoffs `couchup` has made in revision tree storage along with some other simple optimizations mean that `couchup` already has [better write performance than CouchDB](https://gist.github.com/mikeal/5847297) and the same consistency guarantees. 8 | 9 | ## API 10 | 11 | ```javascript 12 | var couchup = require('couchup') 13 | , store = couchup('./dbdir') 14 | ; 15 | 16 | db.put('databaseName', function (e, db) { 17 | if (e) throw e 18 | db.put({_id:'key', prop:'value'}, function (e, info) { 19 | if (e) throw e 20 | db.put({_id:'key', _rev:info.rev, prop:'newvalue'}, function (e, info) { 21 | if (e) throw e 22 | db.get('key', function (e, doc) { 23 | if (e) throw new Error('doc not found') 24 | console.log(doc) 25 | }) 26 | }) 27 | }) 28 | }) 29 | ``` 30 | 31 | ```javascript 32 | db.compact() // remove old revisions and sequences from the database. 33 | ``` 34 | 35 | ```javascript 36 | db.info(function (e, i) { 37 | if (e) throw e 38 | console.log(i.update_seq, i.doc_count) 39 | }) 40 | ``` 41 | 42 | ### SLEEP Support 43 | 44 | ```javascript 45 | var changes = db.sleep() 46 | changes.on('entry', function (entry) { 47 | console.log(entry.seq, entry.id) 48 | }) 49 | changes.on('end' function () { 50 | console.log('done') 51 | }) 52 | ``` 53 | 54 | And can be used with `sleep-ref` for replicating over the network via tcp,tls,http and https. 55 | 56 | ```javascript 57 | var sleepref = require('sleep-ref') 58 | , s = sleepref(db.sleep.bind(db)) 59 | ; 60 | http.createServer(s.httpHandler.bind(s)).listen(8080, function () { 61 | db2.pull('http://localhost:8080/', function (e) { 62 | if (e) throw e 63 | // all replicated over the network 64 | }) 65 | }) 66 | ``` 67 | 68 | You can also replicate between database objects in process. 69 | 70 | ```javascript 71 | db.clone(db2, function (e) { 72 | if (e) throw e 73 | // all replicated 74 | }) 75 | ``` 76 | 77 | #### Incompatibilities w/ CouchDB 78 | 79 | Pull replication from CouchDB works and will continue to work continuously if you aren't updating the `couchup` node you're writing it to. Bi-Directional replication with CouchDB will eventually result in conflicts on the CouchDB side because `couchup` converts CouchDB's revision tree to a linear revision sequence. 80 | 81 | Similarly, push replication to Apache CouchDB will work once but writing again will likely cause unnecessary conflicts on the CouchDB side. 82 | 83 | -------------------------------------------------------------------------------- /tests/benchmark/compare-edit-writes.js: -------------------------------------------------------------------------------- 1 | var request = require('request') 2 | , exec = require('child_process').exec 3 | , http = require('http') 4 | , assert = require('assert') 5 | , concastack = require('concastack') 6 | , duration = 60 * 1000 7 | , parallel = 10 8 | , uuid = require('node-uuid') 9 | ; 10 | 11 | http.globalAgent.maxSockets = parallel 12 | 13 | function test (str, port, cb) { 14 | put(port, function () { 15 | startTest(port, str) 16 | setTimeout(function () { 17 | stopTest(str, function () { 18 | deleteDatabase(port, function () { 19 | cb() 20 | }) 21 | }) 22 | }, duration) 23 | }) 24 | } 25 | 26 | function put (port, cb) { 27 | request.put('http://localhost:'+port+'/test-benchmark', {json:true}, function (e, resp, body) { 28 | if (e) throw e 29 | assert(resp.statusCode, 201) 30 | cb(null, body) 31 | }) 32 | } 33 | 34 | function deleteDatabase (port, cb) { 35 | request.del('http://localhost:'+port+'/test-benchmark', {json:true}, function (e, resp, body) { 36 | if (e) throw concastack(e, new Error()) 37 | assert(resp.statusCode, 201) 38 | cb(null, body) 39 | }) 40 | } 41 | 42 | var testRunning = false 43 | , writes = 0 44 | , inflight = 0 45 | ; 46 | 47 | function startTest (port, cb) { 48 | testRunning = true 49 | 50 | function go (id, rev) { 51 | if (!id) id = uuid() 52 | request.post('http://localhost:'+port+'/test-benchmark', {json:{_id:id, _rev:rev, test:1}}, function (e, resp, body) { 53 | if (!testRunning) return inflight = inflight - 1 54 | if (e) throw concastack(e, new Error()) 55 | assert.equal(resp.statusCode, 201) 56 | writes = writes + 1 57 | go(id, body.rev || rev) 58 | }) 59 | } 60 | 61 | for (var i=0;i 100) return finish() 49 | db.put({_id:uuid(), test:2}, function (e, info) { 50 | write(i + 1) 51 | }) 52 | } 53 | write(0) 54 | 55 | function finish () { 56 | db.put({_id:'asdf', test:2, _rev:'asdf'}, function (e, info) { 57 | assert.ok(e) 58 | ok('bad rev') 59 | c.delete('test', function (e) { 60 | c.get('test', function (e) { 61 | assert.ok(e) 62 | count(c.lev, function (e, count) { 63 | assert.equal(count, 0) 64 | d.cleanup() 65 | }) 66 | }) 67 | }) 68 | }) 69 | } 70 | 71 | }) 72 | }) 73 | }) 74 | }) 75 | }) 76 | }) -------------------------------------------------------------------------------- /tests/test-compaction.js: -------------------------------------------------------------------------------- 1 | var couchup = require('../') 2 | , cleanup = require('cleanup') 3 | , rimraf = require('rimraf') 4 | , assert = require('assert') 5 | , ok = require('okdone') 6 | , bytewise = require('bytewise') 7 | , uuid = require('node-uuid') 8 | ; 9 | 10 | var d = cleanup(function (error) { 11 | rimraf.sync(__dirname+'/testdb') 12 | if (error) process.exit(1) 13 | ok.done() 14 | }) 15 | 16 | var store = couchup(__dirname+'/testdb') 17 | 18 | function storelength (cb) { 19 | var l = 0 20 | var s = store.lev.createKeyStream() 21 | s.on('data', function () {l += 1}) 22 | s.on('end', function () {cb(null, l)}) 23 | s.on('error', cb) 24 | } 25 | 26 | store.put('db', function (e, db) { 27 | var i = 0 28 | , rev 29 | ; 30 | function write (e, meta) { 31 | if (e) throw e 32 | i += 1 33 | if (i === 100) return compact() 34 | var doc = {_id:'testid', test:1} 35 | if (meta) doc._rev = meta.rev 36 | db.put(doc, write) 37 | } 38 | write() 39 | 40 | function compact () { 41 | ok('writes') 42 | db.mutex.afterWrite(function () { 43 | storelength(function (e, l) { 44 | db.compact(function (e, m) { 45 | assert.equal(m, 98) 46 | ok('compact') 47 | storelength(function (e, l) { 48 | assert.equal(3, l) 49 | ok('length') 50 | d.cleanup() 51 | }) 52 | }) 53 | }) 54 | }) 55 | } 56 | }) 57 | 58 | ok.expect(3) 59 | 60 | -------------------------------------------------------------------------------- /tests/test-delete.js: -------------------------------------------------------------------------------- 1 | var couchup = require('../') 2 | , cleanup = require('cleanup') 3 | , rimraf = require('rimraf') 4 | , assert = require('assert') 5 | , ok = require('okdone') 6 | ; 7 | 8 | var d = cleanup(function (error) { 9 | rimraf.sync(__dirname+'/testdb') 10 | if (error) process.exit(1) 11 | ok.done() 12 | }) 13 | 14 | function count (lev, cb) { 15 | var r = lev.createReadStream() 16 | , i = 0 17 | ; 18 | r.on('data', function () { i = i + 1}) 19 | r.on('end', function () {cb(null, i)}) 20 | r.on('error', function (err) {cb(err)}) 21 | } 22 | 23 | var c = couchup(__dirname+'/testdb') 24 | c.put('test', function (e, db) { 25 | if (e) throw e 26 | ok('create db') 27 | db.put({_id:'asdf', test:1}, function (e, info) { 28 | if (e) throw e 29 | db.delete({_id:'asdf'}, function (e) { 30 | assert.ok(e) 31 | ok('delete') 32 | db.delete({_id:'asdf', _rev:info.rev}, function (e) { 33 | db.get('asdf', function (e, info) { 34 | assert.ok(e) 35 | ok('removed') 36 | count(db.store.lev, function (e, num) { 37 | if (e) throw e 38 | assert.equal(num, 5) 39 | ok('size') 40 | d.cleanup() 41 | // db.compact(function (e) { 42 | // if (e) throw e 43 | // count(db.store.lev, function (e, num) { 44 | // assert.equal(1, num) 45 | // }) 46 | // }) 47 | }) 48 | }) 49 | }) 50 | }) 51 | }) 52 | }) -------------------------------------------------------------------------------- /tests/test-http.js: -------------------------------------------------------------------------------- 1 | var couchup = require('../') 2 | , path = require('path') 3 | , http = require('http') 4 | , request = require('request') 5 | , assert = require('assert') 6 | , cleanup = require('cleanup') 7 | , rimraf = require('rimraf') 8 | , ok = require('okdone') 9 | , port = 5984 10 | , url = 'http://localhost:'+port 11 | ; 12 | 13 | var d = cleanup(function (error) { 14 | rimraf.sync(__dirname+'/testdb') 15 | if (error) return process.exit(1) 16 | server.close() 17 | ok.done() 18 | }) 19 | 20 | var server = http.createServer(couchup.http(couchup(path.join(__dirname, 'testdb')))) 21 | server.listen(port, function () { 22 | request.put(url+'/testdb', function (e, resp, body) { 23 | if (e) throw e 24 | assert.equal(resp.statusCode, 201) 25 | ok('create database') 26 | request.get(url+'/testdb', function (e, resp, body) { 27 | if (e) throw e 28 | assert.equal(resp.statusCode, 200) 29 | ok('get database') 30 | request.post(url+'/testdb', {json:{test:1}}, function (e, resp, info) { 31 | if (e) throw e 32 | assert.equal(resp.statusCode, 201) 33 | assert.ok(info) 34 | ok('create document') 35 | request.get(url+'/testdb/'+info.id, {json:true}, function (e, resp, doc) { 36 | if (e) throw e 37 | assert.equal(resp.statusCode, 200) 38 | assert.ok(doc) 39 | assert.ok(doc._id, info.id) 40 | assert.ok(doc._rev, info.rev) 41 | ok('get document') 42 | d.cleanup() 43 | }) 44 | }) 45 | }) 46 | }) 47 | }) -------------------------------------------------------------------------------- /tests/test-sleep.js: -------------------------------------------------------------------------------- 1 | var couchup = require('../') 2 | , cleanup = require('cleanup') 3 | , rimraf = require('rimraf') 4 | , assert = require('assert') 5 | , ok = require('okdone') 6 | , bytewise = require('bytewise') 7 | , uuid = require('node-uuid') 8 | , async = require('async') 9 | , sleepref = require('sleep-ref') 10 | , http = require('http') 11 | ; 12 | 13 | var d = cleanup(function (error) { 14 | rimraf.sync(__dirname+'/testdb') 15 | if (error) process.exit(1) 16 | ok.done() 17 | process.exit() 18 | }) 19 | 20 | ok.expect(4) 21 | 22 | function count (lev, cb) { 23 | var r = lev.createReadStream() 24 | , i = 0 25 | ; 26 | r.on('data', function (data) {console.log('left', bytewise.decode(data.key)); i = i + 1}) 27 | r.on('end', function () {cb(null, i)}) 28 | r.on('error', function (err) {cb(err)}) 29 | } 30 | 31 | function writeThings (prefix, c, cb) { 32 | async.map([0,1,2,3,4,5,6,7,8,9], function (i, cb) { 33 | c.put({_id:prefix+i, test:i}, cb) 34 | }, cb) 35 | } 36 | 37 | function testServers (db, db2) { 38 | writeThings('new', db, function () { 39 | var s = sleepref(db.sleep.bind(db)) 40 | 41 | http.createServer(s.httpHandler.bind(s)).listen(8080, function () { 42 | db2.pull('http://localhost:8080', function (e) { 43 | if (e) throw e 44 | ok('pull') 45 | assert.equal(db.doc_count, db2.doc_count) 46 | ok('matching count') 47 | d.cleanup() 48 | }) 49 | }) 50 | 51 | }) 52 | } 53 | 54 | var c = couchup(__dirname+'/testdb') 55 | c.put('db', function (e, db) { 56 | if (e) throw e 57 | 58 | writeThings('test1', db, function (x) { 59 | var changes = db.sleep({since:2}) 60 | var len = 0 61 | , lastSeq = 0 62 | ; 63 | changes.on('entry', function (entry) { 64 | assert.ok(entry.seq > lastSeq) 65 | lastSeq = entry.seq 66 | len += 1 67 | if (len === 2) { 68 | db.put({_id:"testmiddle", test:1}, function (e) { 69 | if (e) throw e 70 | }) 71 | } 72 | }) 73 | changes.on('end', function () { 74 | assert.equal(len, 10) 75 | ok('changes') 76 | c.put('db2', function (e, db2) { 77 | db2.clone(db, function (e) { 78 | assert.equal(db2.doc_count, db.doc_count) 79 | ok('clone') 80 | testServers(db, db2) 81 | }) 82 | }) 83 | }) 84 | }) 85 | 86 | }) 87 | 88 | 89 | 90 | // 91 | // 92 | // c.put('test', function (e, db) { 93 | // if (e) throw e 94 | // ok('create db') 95 | // db.put({_id:'asdf', test:1}, function (e, info) { 96 | // if (e) throw e 97 | // db.get('asdf', function (e, doc) { 98 | // if (e) throw e 99 | // assert.equal(doc.test, 1) 100 | // assert.equal(doc._rev, info.rev) 101 | // ok('write 1') 102 | // db.put({_id:'asdf', test:2, _rev:info.rev}, function (e, info) { 103 | // if (e) throw e 104 | // db.get('asdf', function (e, doc) { 105 | // if (e) throw e 106 | // assert.equal(doc.test, 2) 107 | // assert.equal(doc._rev, info.rev) 108 | // ok('write 2') 109 | // db.put({_id:'asdf', test:2}, function (e, info) { 110 | // assert.ok(e) 111 | // ok('no _rev') 112 | // 113 | // function write (i) { 114 | // if (i > 100) return finish() 115 | // db.put({_id:uuid(), test:2}, function (e, info) { 116 | // write(i + 1) 117 | // }) 118 | // } 119 | // write(0) 120 | // 121 | // function finish () { 122 | // db.put({_id:'asdf', test:2, _rev:'asdf'}, function (e, info) { 123 | // assert.ok(e) 124 | // ok('bad rev') 125 | // c.delete('test', function (e) { 126 | // c.get('test', function (e) { 127 | // assert.ok(e) 128 | // count(c.lev, function (e, count) { 129 | // assert.equal(count, 0) 130 | // d.cleanup() 131 | // }) 132 | // }) 133 | // }) 134 | // }) 135 | // } 136 | // 137 | // }) 138 | // }) 139 | // }) 140 | // }) 141 | // }) 142 | // }) --------------------------------------------------------------------------------