├── .eslintignore ├── .eslintrc.js ├── .gitignore ├── .travis.yml ├── README.md ├── borough.js ├── borough.png ├── lib ├── cluster-connections.js ├── cluster.js ├── default-options.js ├── iterator.js ├── partition.js ├── request.js ├── subnode-topology.js ├── subnode.js └── timeout-error.js ├── package.json └── test ├── borough-cluster-comms.js ├── borough-cluster-partition-data.js ├── borough-leveldown.js ├── borough-one-node.js ├── borough-other-partition-data.js ├── borough-topology-changes.js └── helpers └── keys.js /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | coverage 3 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | "extends": "standard", 3 | "plugins": [ 4 | "standard" 5 | ] 6 | }; -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | npm-debug.log 4 | coverage 5 | data 6 | Vagrantfile 7 | .vagrant 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "4" 4 | - "6" 5 | - "7" 6 | env: 7 | - CXX=g++-4.8 8 | addons: 9 | apt: 10 | sources: 11 | - ubuntu-toolchain-r-test 12 | packages: 13 | - g++-4.8 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Borough](borough.png) 2 | 3 | [![Build Status](https://travis-ci.org/pgte/borough.svg?branch=master)](https://travis-ci.org/pgte/borough) 4 | 5 | Partitioned, fault-tolerant and survivable database and application server. 6 | 7 | The marriage of a Hashring, the Raft consensus protocol and an embedded LevelDB database. 8 | 9 | Ensures that all the data your application needs is local. Automatic redundancy and fail-over. 10 | 11 | # Install and use 12 | 13 | ```bash 14 | $ npm install borough --save 15 | ``` 16 | 17 | ```javascript 18 | const Borough = require('borough') 19 | 20 | const node = Borough() 21 | 22 | node.on('request', handleRequest) 23 | 24 | function handleRequest (req, reply) { 25 | console.log('handling partition %s', req,partition.name) 26 | 27 | // use local partition 28 | req.partition.get('key', (err, value) => { 29 | // ... 30 | }) 31 | 32 | // use remote partition 33 | req.partition('partition name').put('key', {some: 'value'}, err => { 34 | // ... 35 | }) 36 | } 37 | ``` 38 | 39 | # API: 40 | 41 | # Borough (options) 42 | 43 | Creates and returns a Borough node. 44 | 45 | ```javascript 46 | const Borough = require('borough') 47 | const options = { 48 | // ... 49 | } 50 | const node = Borough(options) 51 | ``` 52 | 53 | Options:+ 54 | 55 | * base (array, defaults to []): the address of one or more peers to form the cluster from. 56 | * cluster (object): contains the following attributes: 57 | - name (string, defaults to "borough"): the name of the cluster we're creating. Nodes participating on the same cluster will need to have the same cluster name 58 | 59 | See [all options and defaults here](lib/default-options.js). 60 | 61 | # node 62 | 63 | ## node.start (callback) 64 | 65 | Start the node. Invokes callback when successful or when an error occurs, with an error as the first argument. 66 | 67 | ## node.partition (partition) 68 | 69 | Returns a partition instance, which is a [LevelDown](https://github.com/level/leveldown#readme) instance. 70 | 71 | ## node.request (partition, payload, callback) 72 | 73 | Make a request to the cluster. Arguments: 74 | 75 | * `partition` (string): the partition name to make the request to 76 | * `payload` (object): the request object, can be any object. All the streams contained in the `streams` attribute will be streamed. 77 | * `callback` (function (err, reply)): a function that gets called when there is a reply. If there was an error, first argument contains it. If not, second argument contains the reply object. All the streams contained in the `reply.streams` attribute will be streamed in. 78 | 79 | ## node.join (address, cb) 80 | 81 | Make the instance join another node or set of nodes. The callback is called after the join is completed, or with error as first argument if not. 82 | 83 | ## node.whoami () 84 | 85 | Returns the identifier string for this node. 86 | 87 | ## Events emitted by node 88 | 89 | * `request (request, reply)`: when there is a request for a partition. 90 | * `request`: an instance of the `Request` class (see below). 91 | * `reply` (function): A function you have to call to reply. Accepts any object as payload. All the streams contained in the `streams` attribute will be streamed in. 92 | 93 | # Request 94 | 95 | ## request.partition 96 | 97 | The current partition 98 | 99 | ## request.body 100 | 101 | The request body. All the streams contained in the `request.body.streams` attribute will be streamed in. 102 | 103 | ## request.otherPartition(partitionName) 104 | 105 | Access a different, (probably remote) partition. Returns a partition object. 106 | 107 | # Partition 108 | 109 | The partition exposed as [a LevelDown-compatible interface](https://github.com/level/leveldown#readme). 110 | 111 | ## partition.name 112 | 113 | Contains the name of the partition. 114 | 115 | ## partition.get(key, callback) 116 | 117 | Reads a partition value from a key. 118 | 119 | ## partition.put(key, value, callback) 120 | 121 | Writes a partition value into the specified key. 122 | 123 | ## partition.iterator(options) 124 | 125 | Gets an iterator 126 | 127 | ## partition.info(callback) 128 | 129 | Gets some partition statistics. Arguments: 130 | 131 | * `callback` (function, mandatory): gets called once there is info about the partition: 132 | 133 | ```js 134 | partition.info((err, info) => { 135 | if (err) { 136 | console.error(err) 137 | } else { 138 | console.log('info: %j', info) 139 | } 140 | }) 141 | ``` 142 | 143 | `info` is an object contaning: 144 | 145 | * `peers` (array): a list of the skiff peer addresses for that partition 146 | 147 | # Sponsors 148 | 149 | Borough development is sponsored by [YLD](https://www.yld.io). 150 | 151 | -------------------------------------------------------------------------------- /borough.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('borough.borough') 4 | const EventEmitter = require('events') 5 | const async = require('async') 6 | const merge = require('deepmerge') 7 | const Skiff = require('skiff') 8 | const freeport = require('freeport') 9 | const clone = require('clone-deep') 10 | const debounce = require('debounce') 11 | 12 | const Cluster = require('./lib/cluster') 13 | const Subnode = require('./lib/subnode') 14 | const Request = require('./lib/request') 15 | const Partition = require('./lib/partition') 16 | const Iterator = require('./lib/iterator') 17 | const defaultOptions = require('./lib/default-options') 18 | 19 | class Borough extends EventEmitter { 20 | 21 | constructor (options) { 22 | super() 23 | debug('new node with options %j', options) 24 | this._options = merge(clone(defaultOptions), options || {}) 25 | this._startState = 'stopped' 26 | 27 | this._address = clone(this._options.address) 28 | this._partitions = {} 29 | } 30 | 31 | // ---- 32 | // Start and stop 33 | 34 | start (done) { 35 | debug('starting borough node...') 36 | if (this._startState !== 'stopped') { 37 | throw new Error('already starting...') 38 | } 39 | this._startState = 'starting' 40 | async.series( 41 | [ 42 | this._startNetwork.bind(this), 43 | this._startCluster.bind(this) 44 | ], 45 | err => { 46 | this._startState = 'started' 47 | debug('%s started: %j', this.whoami(), err) 48 | done(err) 49 | }) 50 | } 51 | 52 | stop (done) { 53 | debug('stopping borough node...') 54 | this._startState = 'stopped' 55 | async.series( 56 | [ 57 | this._stopNetwork.bind(this), 58 | this._stopAllSubnodes.bind(this), 59 | this._stopCluster.bind(this) 60 | ], 61 | done) 62 | } 63 | 64 | _startCluster (done) { 65 | const options = merge(this._options.cluster, { 66 | base: this._options.base, 67 | address: this._address 68 | }) 69 | this._cluster = new Cluster(this, options) 70 | 71 | const debouncedTopologyChange = debounce(this._onTopologyChange.bind(this), 100) 72 | 73 | this._cluster.start(done) 74 | this._cluster.on('error', err => this.emit('error', err)) 75 | this._cluster.on('peerUp', debouncedTopologyChange) 76 | this._cluster.on('peerDown', debouncedTopologyChange) 77 | } 78 | 79 | _startNetwork (done) { 80 | freeport((err, port) => { 81 | if (err) { 82 | return done(err) 83 | } 84 | this._address.port = port 85 | 86 | this._network = Skiff.createNetwork({ 87 | passive: { 88 | server: clone(this._address) 89 | } 90 | }) 91 | this._network.active.on('error', (err) => { 92 | this.emit('error', err) 93 | }) 94 | this._network.passive.on('error', (err) => { 95 | this.emit('error', err) 96 | }) 97 | process.nextTick(done) 98 | }) 99 | } 100 | 101 | _stopNetwork (done) { 102 | if (this._network) { 103 | debug('%s: stopping network..', this.whoami()) 104 | this._network.passive.once('closed', err => { 105 | debug('%s: network stopped', this.whoami(), err) 106 | done(err) 107 | }) 108 | this._network.active.end() 109 | this._network.passive.end() 110 | } else { 111 | process.nextTick(done) 112 | } 113 | } 114 | 115 | _stopAllSubnodes (done) { 116 | const partitions = Object.keys(this._partitions) 117 | debug('stopping all %d subnodes..', partitions.length) 118 | async.each( 119 | partitions.map(part => this._partitions[part]), 120 | (subnode, cb) => subnode.then( 121 | sn => sn.stop(cb), 122 | err => { 123 | debug('error stopping node:', err.message) 124 | this.emit('warning', err) 125 | cb() 126 | }), 127 | err => { 128 | debug('all subnodes stopped', err) 129 | done(err) 130 | }) 131 | } 132 | 133 | _stopCluster (done) { 134 | debug('stopping cluster..') 135 | this._cluster.stop(err => { 136 | debug('cluster stopped', err) 137 | done(err) 138 | }) 139 | } 140 | 141 | // ---- 142 | // Topology 143 | 144 | whoami () { 145 | return this._cluster.whoami() 146 | } 147 | 148 | _onTopologyChange () { 149 | debug('%s: topology changed', this.whoami()) 150 | const partitions = Object.keys(this._partitions) 151 | partitions.forEach(this._reconfigurePartition.bind(this)) 152 | } 153 | 154 | _reconfigurePartition (partition) { 155 | debug('%s: reconfiguring partition %s', this.whoami(), partition) 156 | this.partitionSubnode(partition, {}, (err, subnode) => { 157 | if (err) { 158 | this.emit('warning', err) 159 | } else { 160 | subnode.topologyUpdated() 161 | } 162 | }) 163 | } 164 | 165 | partitionSubnodeAddresses (partition, done) { 166 | const nodeAddresses = this._cluster.nodesForPartition(partition) 167 | debug('%s: node addresses for partition %s: %j', this.whoami(), partition, nodeAddresses) 168 | async.map( 169 | nodeAddresses, 170 | this._cluster.remotePartitionAddress.bind(this._cluster, partition), 171 | (err, addresses) => { 172 | debug('%s: subnode addresses result: err = %j, addresses = %j', err && err.message, addresses) 173 | if (!err && addresses) { 174 | done(null, addresses.filter(a => !!a)) 175 | } else { 176 | done(err) 177 | } 178 | }) 179 | } 180 | 181 | partitionPeers (partition, done) { 182 | const nodeAddresses = this._cluster.nodesForPartition(partition) 183 | debug('%s: node addresses for partition %s: %j', this.whoami(), partition, nodeAddresses) 184 | async.map( 185 | nodeAddresses, 186 | this._cluster.remotePartitionAddress.bind(this._cluster, partition), 187 | (err, addresses) => { 188 | debug('%s: subnode addresses result: err = %j, addresses = %j', err && err.message, addresses) 189 | if (!err && addresses) { 190 | const peers = addresses.map((addr, index) => { 191 | return { 192 | address: nodeAddresses[index], 193 | skiff: addr 194 | } 195 | }) 196 | done(null, peers) 197 | } else { 198 | done(err) 199 | } 200 | }) 201 | } 202 | 203 | partitionSubnode (partition, options, done) { 204 | debug('node for partition %j, options = %j', partition, options) 205 | const subnode = this._partitions[partition] 206 | if (!subnode) { 207 | debug('does not exist yet, creating partition subnode for partition %s', partition) 208 | this._createPartitionSubnode(partition, options, err => { 209 | if (err) { 210 | debug('error creating partition subnode:', err) 211 | this.emit('warning', err) 212 | } else { 213 | this._partitions[partition].then( 214 | sn => done(null, sn), 215 | done) 216 | } 217 | }) 218 | } else { 219 | subnode.then( 220 | sn => { 221 | done(null, sn) 222 | }, 223 | done) 224 | } 225 | } 226 | 227 | localPartitionSubnodeAddress (partition) { 228 | return this._address && 229 | this._address.host && 230 | this._address.port && 231 | Subnode.idFromAddress(this._address, partition) 232 | } 233 | 234 | localPartitionInfo (partition, done) { 235 | this.partitionSubnode(partition, {}, (err, subnode) => { 236 | if (err) { 237 | done(err) 238 | } else { 239 | subnode.info((err, info) => { 240 | if (err) { 241 | done(err) 242 | } else { 243 | done(null, { 244 | node: this.whoami(), 245 | subnode: info 246 | }) 247 | } 248 | }) 249 | } 250 | }) 251 | } 252 | 253 | _createPartitionSubnode (partition, options, done) { 254 | debug('%s: create partition subnode: %s, options: %j', this.whoami(), partition, options) 255 | const self = this 256 | 257 | this._partitions[partition] = new Promise((resolve, reject) => { 258 | debug('%s: getting partition subnode addresses..', this.whoami()) 259 | if (options.peers) { 260 | create(options.peers) 261 | } else { 262 | this.partitionSubnodeAddresses(partition, (err, peers) => { 263 | debug('%s: partition subnode addresses result: err = %j, peers = %j', this.whoami(), err && err.message, peers) 264 | if (err) { 265 | reject(err) 266 | done(err) 267 | } else { 268 | create(peers) 269 | } 270 | }) 271 | } 272 | 273 | function create (peers) { 274 | debug('%s: peer subnode addresses for partition %s: %j', self.whoami(), partition, peers) 275 | const subnodeOptions = merge(self._options.subnode, { peers }) 276 | const subnode = new Subnode( 277 | self, 278 | self._address, 279 | partition, 280 | self._network, 281 | self._cluster, 282 | subnodeOptions) 283 | 284 | subnode.on('warning', self.emit.bind(self, 'warning')) 285 | 286 | debug('%s: starting node for partition %s...', self.whoami(), partition) 287 | 288 | const shouldBeLeader = (peers.indexOf(subnode.id) === 0) 289 | if (shouldBeLeader) { 290 | debug('%s: I should be leader of partition %s', self.whoami(), partition) 291 | } 292 | const startOptions = { 293 | waitForState: shouldBeLeader ? 'leader' : 'weakened', 294 | weakenDurationMS: self._options.secondarySubnodeWeakenAtStartupMS, 295 | forceRemotes: options.forceRemotes 296 | } 297 | 298 | subnode.start(startOptions, err => { 299 | debug('%s: subnode for partition %s started', self.whoami(), partition) 300 | if (err) { 301 | reject(err) 302 | done(err) 303 | } else { 304 | resolve(subnode) 305 | done(null, subnode) 306 | } 307 | }) 308 | } 309 | }) 310 | } 311 | 312 | leavePartition (partition, done) { 313 | const node = this._partitions[partition] 314 | if (node) { 315 | delete this._partitions[partition] 316 | node.then(node => { 317 | node.removeAllListeners('warning') 318 | node.stop(done) 319 | }, 320 | done) 321 | } else { 322 | process.nextTick(done) 323 | } 324 | } 325 | 326 | // ---- 327 | // Operations 328 | 329 | request (partition, req, done) { 330 | this._cluster.userRequest(partition, req, done) 331 | } 332 | 333 | remoteCommand (partition, command, done) { 334 | this._cluster.command(partition, command, done) 335 | } 336 | 337 | localCommand (partition, command, done) { 338 | debug('%s: local command (partition = %j, command = %j)', this.whoami(), partition, command) 339 | this.partitionSubnode(partition, {forceRemotes: true}, (err, subnode) => { 340 | if (err) { 341 | done(err) 342 | } else { 343 | subnode.command(command, done) 344 | } 345 | }) 346 | } 347 | 348 | localUserRequest (partition, req, reply) { 349 | debug('%s: local user request (partition = %j, req = %j)', this.whoami(), partition, req) 350 | this.partitionSubnode(partition, {forceRemotes: true}, (err, subnode) => { 351 | if (err) { 352 | reply(err) 353 | } else { 354 | const haveListeners = this.emit( 355 | 'request', 356 | new Request(this.partition(partition), req, this), 357 | reply) 358 | if (!haveListeners) { 359 | reply(new Error('no request listener')) 360 | } 361 | } 362 | }) 363 | } 364 | 365 | partition (partition) { 366 | return new Partition(partition, this) 367 | } 368 | 369 | iterator (db, partition, options) { 370 | return new Iterator(db, this._cluster, partition, options) 371 | } 372 | 373 | localReadStream (partition, options, reply) { 374 | debug('local read stream for partition %s, options = %j', partition, options) 375 | this.partitionSubnode(partition, {}, (err, subnode) => { 376 | if (err) { 377 | reply(err) 378 | } else { 379 | reply(null, { 380 | streams: { 381 | read: subnode.readStream(options) 382 | } 383 | }) 384 | } 385 | }) 386 | } 387 | } 388 | 389 | module.exports = createBorough 390 | 391 | function createBorough (options) { 392 | return new Borough(options) 393 | } 394 | -------------------------------------------------------------------------------- /borough.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yldio/borough/a87179ab0e2d815bfaa0cf2ecce92eb651c3fff7/borough.png -------------------------------------------------------------------------------- /lib/cluster-connections.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('borough:cluster.connections') 4 | const timers = require('timers') 5 | const EventEmitter = require('events') 6 | const once = require('once') 7 | const merge = require('deepmerge') 8 | 9 | const TimeoutError = require('./timeout-error') 10 | 11 | class ClusterConnections extends EventEmitter { 12 | 13 | constructor (hashring, options) { 14 | super() 15 | this._hashring = hashring 16 | this._options = options 17 | } 18 | 19 | request (peerId, payload, _options, _done) { 20 | if (!peerId) { 21 | throw new Error('need peer id') 22 | } 23 | 24 | if ((typeof peerId) !== 'string') { 25 | throw new Error('peer id should be string') 26 | } 27 | debug('requesting %s, payload = %j', peerId, payload) 28 | 29 | const done = once(_done) 30 | 31 | const options = merge( 32 | { 33 | timeout: this._options.requestTimeoutMS, 34 | tries: 1 35 | }, 36 | _options || {}) 37 | 38 | if (options.tries > this._options.maxRetries) { 39 | return done(new Error('exceeded retry count')) 40 | } 41 | 42 | let peer 43 | try { 44 | peer = this._hashring._hashring.peers(true).find(p => p.id === peerId) 45 | } catch (err) { 46 | if (err.message.match(/hashring not up yet/i)) { 47 | debug('local hashring not up yet, retrying once it\'s up') 48 | this._hashring.once('up', () => { 49 | debug('hashring is up, going to retry request..') 50 | this.request(peerId, payload, options, done) 51 | }) 52 | } else { 53 | done(err) 54 | } 55 | return 56 | } 57 | 58 | if (peer) { 59 | const conn = this._hashring.peerConn(peer) 60 | 61 | const timeout = timers.setTimeout(() => { 62 | debug('request timed out after %d ms', this._options.requestTimeoutMS) 63 | done(new TimeoutError(peerId, payload)) 64 | }, options.timeout) 65 | 66 | debug('have peer connection, doing the request %j now', payload) 67 | conn.request(payload, (err, result) => { 68 | debug('request replied err = %j, result = %j', err && err.message, result) 69 | timers.clearTimeout(timeout) 70 | if (err && err.message.match(/hashring not up yet/i)) { 71 | timers.setTimeout(() => { 72 | options.tries ++ 73 | this.request(peerId, payload, options, done) 74 | }, this._options.retryOnWarningMS) 75 | } else { 76 | done(err, result) 77 | } 78 | }) 79 | } else { 80 | debug('peer %s not found', peerId) 81 | done(new Error(`peer ${peerId} not found`)) 82 | } 83 | } 84 | 85 | stop () { 86 | // nothing to do here.. 87 | } 88 | } 89 | 90 | module.exports = ClusterConnections 91 | -------------------------------------------------------------------------------- /lib/cluster.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('borough:cluster') 4 | 5 | const Upring = require('upring') 6 | const EventEmitter = require('events') 7 | const merge = require('deepmerge') 8 | const timers = require('timers') 9 | 10 | const Connections = require('./cluster-connections') 11 | 12 | const interestingEvents = [ 13 | 'move', 'steal', 'peerUp', 'peerDown' 14 | ] 15 | 16 | const commands = [ 'put', 'get', 'del', 'batch', 'join', 'leave' ] 17 | 18 | class Cluster extends EventEmitter { 19 | 20 | constructor (borough, options) { 21 | super() 22 | this._borough = borough 23 | this._options = options 24 | this._started = false 25 | 26 | const upringOpts = merge(this._options.upring, { 27 | base: options.base, 28 | name: options.name 29 | }) 30 | debug('creating hashring from options %j', upringOpts) 31 | this._hashring = Upring(upringOpts) 32 | this._hashring.once('up', () => { 33 | this._started = true 34 | }) 35 | 36 | this._connections = new Connections(this._hashring, this._options) 37 | this._connections.on('error', err => this.emit('error', err)) 38 | 39 | interestingEvents.forEach(event => this._hashring.on(event, this.emit.bind(this, event))) 40 | 41 | this._hashring.add('ping', this._onLocalPing.bind(this)) 42 | this._hashring.add('user request', this._onLocalUserRequest.bind(this)) 43 | this._hashring.add('ensure partition', this._onLocalEnsurePartitionRequest.bind(this)) 44 | this._hashring.add('partition address', this._onLocalPartitionAddressRequest.bind(this)) 45 | this._hashring.add('info', this._onLocalInfoRequest.bind(this)) 46 | 47 | this._hashring.add({ cmd: { type: 'read stream' } }, this._onLocalReadStream.bind(this)) 48 | commands.forEach( 49 | command => this._hashring.add({ cmd: { type: command } }, 50 | this._onLocalCommandRequest.bind(this))) 51 | } 52 | 53 | // ------ 54 | // Start and stop 55 | 56 | start (done) { 57 | debug('starting..') 58 | if (this._started) { 59 | process.nextTick(done) 60 | } else { 61 | this._hashring.once('up', () => { done() }) 62 | } 63 | } 64 | 65 | stop (done) { 66 | debug('stopping..') 67 | this._connections.stop() 68 | this._hashring.close(err => { 69 | debug('closed hashring', err) 70 | done(err) 71 | }) 72 | } 73 | 74 | // ------ 75 | // Topology 76 | 77 | whoami () { 78 | return this._hashring.whoami() 79 | } 80 | 81 | leaderForPartition (partition) { 82 | return this._hashring._hashring.lookup(partition).id 83 | } 84 | 85 | nodesForPartition (partition, excludeSelf) { 86 | debug('%s: getting nodes for partition:', this._skiff && this._skiff.id) 87 | const leader = this._hashring._hashring.lookup(partition) 88 | debug('%s: leader for partition %s is %j', this._skiff && this._skiff.id, partition, leader.id) 89 | let nodes = [leader.id].concat(this._nextNodes(partition, this._options.redundancy, [leader.id])) 90 | if (excludeSelf) { 91 | const self = this.whoami() 92 | nodes = nodes.filter(n => n !== self) 93 | } 94 | return nodes 95 | } 96 | 97 | _nextNodes (partition, count, _exclude) { 98 | let node 99 | const nodes = [] 100 | const exclude = _exclude.slice() 101 | do { 102 | node = this._hashring._hashring.next(partition, exclude) 103 | if (node) { 104 | nodes.push(node.id) 105 | exclude.push(node.id) 106 | } 107 | } while (nodes.length < count && node) 108 | 109 | debug('next nodes for partition %s are: %j', partition, nodes) 110 | 111 | return nodes 112 | } 113 | 114 | ensureRemotePartition (partition, peer, peers, done) { 115 | debug('adding remote partition %s to %s', partition, peer) 116 | const self = this 117 | let tries = 0 118 | 119 | if (peer === this.whoami()) { 120 | done(null, this._borough.localPartitionSubnodeAddress(partition)) 121 | return 122 | } 123 | 124 | tryEnsuring() 125 | 126 | function tryEnsuring () { 127 | if (self.nodesForPartition(partition).indexOf(peer) >= 0) { 128 | tries++ 129 | self._connections.request( 130 | peer, 131 | { 132 | cmd: 'ensure partition', 133 | partition, 134 | peers 135 | }, 136 | { timeout: self._options.remotePartitionAddressTimeoutMS }, 137 | (err, address) => { 138 | if (err) { 139 | self.emit('warning', err) 140 | if (tries < self._options.remotePartitionAddressMaxRetries) { 141 | timers.setTimeout(tryEnsuring, self._options.remotePartitionAddressRetryMS) 142 | } else { 143 | done(err) 144 | } 145 | } else { 146 | done(err, address) 147 | } 148 | }) 149 | } 150 | } 151 | } 152 | 153 | _onLocalEnsurePartitionRequest (req, reply) { 154 | const peers = req.peers 155 | const localAddress = this._borough.localPartitionSubnodeAddress(req.partition) 156 | if (peers.indexOf(localAddress) < 0) { 157 | peers.push(localAddress) 158 | } 159 | 160 | this._borough.partitionSubnode( 161 | req.partition, 162 | { 163 | forceRemotes: false, 164 | peers: req.peers 165 | }, 166 | (err, subnode) => { 167 | if (err) { 168 | reply(err) 169 | } else { 170 | reply(null, subnode.id) 171 | } 172 | } 173 | ) 174 | } 175 | 176 | remotePartitionAddress (partition, peer, done) { 177 | debug('adding remote partition %s to %s', partition, peer) 178 | const self = this 179 | let tries = 0 180 | 181 | if (peer === this.whoami()) { 182 | done(null, this._borough.localPartitionSubnodeAddress(partition)) 183 | return 184 | } 185 | 186 | debug('%s: getting remote partition address for peer %j and partition %j', this._borough.whoami(), peer, partition) 187 | 188 | tryQuerying() 189 | 190 | function tryQuerying () { 191 | tries++ 192 | self._connections.request( 193 | peer, 194 | { 195 | cmd: 'partition address', 196 | partition 197 | }, 198 | { timeout: self._options.remotePartitionAddressTimeoutMS }, 199 | (err, address) => { 200 | if (err) { 201 | self.emit('warning', err) 202 | if (tries < self._options.remotePartitionAddressMaxRetries) { 203 | timers.setTimeout(tryQuerying, self._options.remotePartitionAddressRetryMS) 204 | } else { 205 | done(err) 206 | } 207 | } else { 208 | done(err, address) 209 | } 210 | } 211 | ) 212 | } 213 | } 214 | 215 | _onLocalPartitionAddressRequest (req, reply) { 216 | debug('%s: getting local partition address for partition %j', this._borough.whoami(), req.partition) 217 | reply(null, this._borough.localPartitionSubnodeAddress(req.partition)) 218 | } 219 | 220 | ping (peer, done) { 221 | this._connections.request(peer, { cmd: 'ping' }, { timeout: 1000 }, done) 222 | } 223 | 224 | _onLocalPing (req, reply) { 225 | reply(null, { ok: true }) 226 | } 227 | 228 | _onLocalInfoRequest (req, reply) { 229 | this._borough.localPartitionInfo(req.key, reply) 230 | } 231 | 232 | // ------ 233 | // Commands 234 | 235 | command (partition, command, done) { 236 | debug('command (partition: %s, command: %j)', partition, command) 237 | this._hashring.request( 238 | { 239 | key: partition, 240 | cmd: command 241 | }, 242 | done) 243 | } 244 | 245 | _onLocalCommandRequest (req, reply) { 246 | debug('local request (command: %j)', req.cmd) 247 | this._borough.localCommand(req.key, req.cmd, reply) 248 | } 249 | 250 | userRequest (partition, req, done) { 251 | const self = this 252 | let retries = 0 253 | this._userRequest(partition, req, handleResult) 254 | 255 | function handleResult (err, result) { 256 | if (err) { 257 | if (err.message.match(/not the leader/i) && retries < self._options.maxRetries) { 258 | retries++ 259 | timers.setTimeout(() => { 260 | self._userRequest(partition, req, handleResult) 261 | }, self._options.retryWaitMS) 262 | } else { 263 | done(err) 264 | } 265 | } else { 266 | done(err, result) 267 | } 268 | } 269 | } 270 | 271 | _userRequest (partition, req, done) { 272 | debug('user request (partition = %j, req = %j)', partition, req) 273 | this._hashring.request( 274 | { 275 | key: partition, 276 | cmd: 'user request', 277 | req 278 | }, 279 | done) 280 | } 281 | 282 | _onLocalUserRequest (req, reply) { 283 | debug('local user request %j', req) 284 | this._borough.localUserRequest(req.key, req.req, reply) 285 | } 286 | 287 | _onLocalReadStream (req, reply) { 288 | debug('answering read stream, req = %j', req) 289 | this._borough.localReadStream(req.key, req.cmd.options, reply) 290 | } 291 | } 292 | 293 | module.exports = Cluster 294 | -------------------------------------------------------------------------------- /lib/default-options.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const networkAddress = require('network-address') 4 | 5 | module.exports = { 6 | base: [], 7 | address: { 8 | host: networkAddress() 9 | }, 10 | cluster: { 11 | redundancy: 2, 12 | name: 'borough', 13 | upring: { 14 | logLevel: 'error', 15 | hashring: {} 16 | }, 17 | requestTimeoutMS: 5000, 18 | retryOnWarningMS: 500, 19 | remotePartitionAddressTimeoutMS: 10000, 20 | remotePartitionAddressRetryMS: 500, 21 | remotePartitionAddressMaxRetries: 10, 22 | retryWaitMS: 200 23 | }, 24 | subnode: { 25 | skiff: { 26 | rpcTimeoutMS: 30000 27 | }, 28 | quitter: { 29 | pollTimeoutMS: 4000 30 | }, 31 | peers: [], 32 | retryMS: 2000, 33 | maxRetries: 10, 34 | weakenWhenCandidateAndLeaderIsUpDurationMS: 1000, 35 | maybeQuitIntervalMS: 5000 36 | }, 37 | secondarySubnodeWeakenAtStartupMS: 2000 38 | } 39 | -------------------------------------------------------------------------------- /lib/iterator.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('borough:iterator') 4 | const AbstractIterator = require('abstract-leveldown').AbstractIterator 5 | const through = require('through2') 6 | 7 | class Iterator extends AbstractIterator { 8 | 9 | constructor (db, cluster, partition, options) { 10 | debug('%s: creating iterator for partition %s', cluster.whoami(), partition) 11 | super(db) 12 | this._stream = through.obj() 13 | 14 | const req = { 15 | type: 'read stream', 16 | options 17 | } 18 | 19 | debug('sending cluster command %j', req) 20 | cluster.command(partition, req, (err, result) => { 21 | debug('cluster command replied', err, result) 22 | if (err) { 23 | this._stream.emit('error', err) 24 | } else { 25 | result.streams.read.pipe(this._stream) 26 | } 27 | }) 28 | } 29 | 30 | _next (callback, cleanup) { 31 | debug('_next') 32 | if (cleanup) { 33 | cleanup() 34 | } 35 | this._tryRead(callback) 36 | } 37 | 38 | _tryRead (callback) { 39 | debug('tryRead') 40 | // const callback = once(callback) 41 | const self = this 42 | let onReadable 43 | const item = this._stream.read() 44 | 45 | if (item) { 46 | debug('have item: %j', item) 47 | callback(null, item.key, item.value) 48 | } else { 49 | onReadable = this._next.bind(this, callback, cleanup) 50 | debug('waiting for readable..') 51 | this._stream.on('readable', onReadable) 52 | this._stream.on('end', callbackAndCleanup) 53 | this._stream.on('error', callbackAndCleanup) 54 | } 55 | 56 | function cleanup () { 57 | self._stream.removeListener('readable', onReadable) 58 | self._stream.removeListener('end', cleanup) 59 | self._stream.removeListener('error', cleanup) 60 | } 61 | 62 | function callbackAndCleanup (err) { 63 | cleanup() 64 | callback(err) 65 | } 66 | } 67 | 68 | _end (callback) { 69 | if (this._stream) { 70 | this._stream.end() 71 | } else { 72 | process.nextTick(callback) 73 | } 74 | } 75 | } 76 | 77 | module.exports = Iterator 78 | -------------------------------------------------------------------------------- /lib/partition.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('borough:partition') 4 | const AbstractLevelDown = require('abstract-leveldown').AbstractLevelDOWN 5 | 6 | class Partition extends AbstractLevelDown { 7 | constructor (name, borough) { 8 | super(name) 9 | this.name = name 10 | this._borough = borough 11 | } 12 | 13 | info (done) { 14 | this._borough.remoteCommand(this.name, 'info', done) 15 | } 16 | 17 | // AbstractLevelDown 18 | 19 | _close (done) { 20 | // do nothing 21 | process.nextTick(done) 22 | } 23 | 24 | _get (key, options, done) { 25 | debug('get %j', key) 26 | this._borough.remoteCommand(this.name, { type: 'get', key }, done) 27 | } 28 | 29 | _put (key, value, options, done) { 30 | debug('put %j, %j', key, value) 31 | this._borough.remoteCommand(this.name, { type: 'put', key, value }, done) 32 | } 33 | 34 | _del (key, options, done) { 35 | debug('del %j', key) 36 | this._borough.remoteCommand(this.name, { type: 'del', key }, done) 37 | } 38 | 39 | _batch (array, options, done) { 40 | debug('batch %j', array) 41 | this._borough.remoteCommand(this.name, { type: 'batch', array }, done) 42 | } 43 | 44 | _iterator (options) { 45 | return this._borough.iterator(this, this.name, options) 46 | } 47 | 48 | } 49 | 50 | module.exports = Partition 51 | -------------------------------------------------------------------------------- /lib/request.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const Partition = require('./partition') 4 | 5 | class Request { 6 | constructor (partition, body, borough) { 7 | this.partition = partition 8 | this.body = body 9 | this._borough = borough 10 | } 11 | 12 | otherPartition (partition) { 13 | return new Partition(partition, this._borough) 14 | } 15 | 16 | } 17 | 18 | module.exports = Request 19 | -------------------------------------------------------------------------------- /lib/subnode-topology.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('borough:subnode:topology') 4 | const async = require('async') 5 | const EventEmitter = require('events') 6 | const timers = require('timers') 7 | 8 | class SubnodeTopology extends EventEmitter { 9 | 10 | constructor (subnode, cluster, options) { 11 | super() 12 | 13 | this._subnode = subnode 14 | this._cluster = cluster 15 | this._options = options 16 | 17 | this._needsAnotherUpdate = false 18 | this._peers = (options.peers || []) 19 | 20 | this._updating = false 21 | this._needsUpdate = false 22 | 23 | this._leaving = [] 24 | this._joining = [] 25 | } 26 | 27 | start () { 28 | // nothing to start 29 | } 30 | 31 | stop () { 32 | // nothing to stop 33 | } 34 | 35 | topologyUpdated (force) { 36 | debug('%s: topology updated, force = %j', this._subnode.id) 37 | const partition = this._subnode.partition() 38 | const leader = this._cluster.leaderForPartition(partition) 39 | const amILeader = leader === this._cluster.whoami() 40 | debug('%s: leader for partition: %s: %s. am I leader? : %j', this._subnode.id, partition, leader, amILeader) 41 | if (force || amILeader) { 42 | debug('%s: I should be the leader of partition %s', this._subnode.id, partition) 43 | this._updateTopology() 44 | } 45 | } 46 | 47 | _updateTopology () { 48 | debug('%s: update topology', this._subnode.id) 49 | 50 | const self = this 51 | 52 | if (this._updating) { 53 | this._needsUpdate = true 54 | } else { 55 | debug('%s: update topology', this._subnode.id) 56 | this._updating = true 57 | this._needsUpdate = false 58 | 59 | const peerNodes = this._cluster.nodesForPartition(this._subnode.partition()) 60 | debug('%s: peer nodes are: %j', this._subnode.id, peerNodes) 61 | debug('%s: going to ensure partition on peer nodes %j', this._subnode.id, peerNodes) 62 | 63 | async.map(peerNodes, this._ensurePartition.bind(this), (err, peers) => { 64 | debug('%s: ensured partition on peer nodes %j, remote addresses are %j', this._subnode.id, peerNodes, peers) 65 | if (err) { 66 | handleError(err) 67 | return 68 | } 69 | debug('%s: partition subnode peers are: %j', this._subnode.id, peers) 70 | this._setPeers(peers, err => { 71 | this._updating = false 72 | if (err) { 73 | handleError(err) 74 | } 75 | if (this._needsUpdate) { 76 | this._updateTopology() 77 | } 78 | }) 79 | }) 80 | } 81 | 82 | function handleError (err) { 83 | debug(err) 84 | self._updating = false 85 | self._needsAnotherUpdate = true 86 | self._retryLater() 87 | self.emit('warning', err) 88 | } 89 | } 90 | 91 | _setPeers (peers, done) { 92 | // TODO: only join, don't process leaves here 93 | debug('%s: set peers to %j', this._subnode.id, peers) 94 | const changes = this._calculateChanges(peers) 95 | this._applyChanges(changes, done) 96 | } 97 | 98 | _calculateChanges (peers) { 99 | let changes = [] 100 | 101 | const joins = peers.filter(p => this._peers.indexOf(p) < 0) 102 | debug('%s: joins: %j', this._subnode.id, joins) 103 | changes = changes.concat(joins.map(peer => { 104 | return {type: 'join', peer} 105 | })) 106 | 107 | const leaves = this._peers.filter(p => peers.indexOf(p) < 0) 108 | debug('%s: leaves: %j', this._subnode.id, leaves) 109 | changes = changes.concat(leaves.map(peer => { 110 | return {type: 'leave', peer} 111 | })) 112 | 113 | return changes 114 | } 115 | 116 | _applyChanges (changes, done) { 117 | debug('%s: applying changes: %j', this._subnode.id, changes) 118 | async.eachSeries(changes, this._applyChange.bind(this), done) 119 | } 120 | 121 | _applyChange (change, done) { 122 | switch (change.type) { 123 | case 'leave': 124 | this._applyLeave(change.peer, done) 125 | break 126 | case 'join': 127 | this._applyJoin(change.peer, done) 128 | break 129 | } 130 | } 131 | 132 | _applyLeave (peer, done) { 133 | debug('%s: going to apply leave to %s', this._subnode.id, peer) 134 | if (this._leaving.indexOf(peer) < 0) { 135 | this._leaving.push(peer) 136 | this._subnode.leave(peer, err => { 137 | this._leaving = this._leaving.filter(addr => addr !== peer) 138 | done(err) 139 | }) 140 | } else { 141 | process.nextTick(done) 142 | } 143 | } 144 | 145 | _applyJoin (peer, done) { 146 | if (this._joining.indexOf(peer) < 0) { 147 | debug('%s: going to apply join to %s', this._subnode.id, peer) 148 | this._joining.push(peer) 149 | this._subnode.join(peer, err => { 150 | this._joining = this._joining.filter(addr => addr !== peer) 151 | done(err) 152 | }) 153 | } else { 154 | process.nextTick(done) 155 | } 156 | } 157 | 158 | peerJoined (peer) { 159 | debug('%s: peer %s has joined', this._subnode.id, peer) 160 | if ((this._peers.indexOf(peer) < 0)) { 161 | this._peers = this._peers.concat(peer) 162 | } 163 | } 164 | 165 | peerLeft (peer) { 166 | debug('%s: peer %s left', this._subnode.id, peer) 167 | this._peers = this._peers.filter(p => p !== peer) 168 | } 169 | 170 | _retryLater () { 171 | timers.setTimeout(this._updateTopology.bind(this), this._options.retryMS) 172 | } 173 | 174 | _getPartitionAddress (node, done) { 175 | this._cluster.remotePartitionAddress( 176 | this._subnode.partition(), 177 | node, 178 | done) 179 | } 180 | 181 | _ensurePartition (node, done) { 182 | this._cluster.ensureRemotePartition( 183 | this._subnode.partition(), 184 | node, 185 | this._peers, 186 | done) 187 | } 188 | 189 | _warningOnError (cb) { 190 | return (err, result) => { 191 | if (err) { 192 | this.emit('warning', err) 193 | } 194 | if (cb) { 195 | cb(null, result) 196 | } 197 | } 198 | } 199 | } 200 | 201 | module.exports = SubnodeTopology 202 | -------------------------------------------------------------------------------- /lib/subnode.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('borough:subnode') 4 | const async = require('async') 5 | const EventEmitter = require('events') 6 | const Skiff = require('skiff') 7 | const IteratorStream = require('level-iterator-stream') 8 | const Topology = require('./subnode-topology') 9 | 10 | class Subnode extends EventEmitter { 11 | 12 | constructor (borough, address, partition, network, cluster, options) { 13 | debug('creating subnode with address %j for partition %s, with peers %j', address, partition, options.peers) 14 | super() 15 | this.id = idFromAddress(address, partition) 16 | this._borough = borough 17 | this._address = address 18 | this._partition = partition 19 | this._network = network 20 | this._cluster = cluster 21 | this._options = options 22 | this._startState = 'stopped' 23 | 24 | this._topology = new Topology(this, cluster, options) 25 | this._topology.on('warning', this.emit.bind(this, 'warning')) 26 | this._topology.start() 27 | } 28 | 29 | partition () { 30 | return this._partition 31 | } 32 | 33 | // ------ 34 | // State 35 | 36 | is (state) { 37 | return this._skiff && this._skiff.is(state) 38 | } 39 | 40 | // ------ 41 | // Start and stop 42 | 43 | start (options, done) { 44 | if (!done && (typeof options === 'function')) { 45 | done = options 46 | options = {} 47 | } 48 | debug('starting subnode for partition %s with options %j', this._partition, options) 49 | if (this._startState === 'stopped') { 50 | this._startState = 'starting' 51 | this._start(options, err => { 52 | if (err) { 53 | this._startState = 'stopped' 54 | done(err) 55 | } else { 56 | this._startState = 'started' 57 | this.emit('started') 58 | done() 59 | } 60 | }) 61 | } else if (this._startState === 'started') { 62 | process.nextTick(done) 63 | } else if (this._startState === 'starting') { 64 | this.once('started', done) 65 | } 66 | 67 | if (options.forceRemotes) { 68 | this.topologyUpdated(true) 69 | } 70 | } 71 | 72 | _start (options, done) { 73 | const skiffOptions = Object.assign({}, this._options.skiff, { 74 | peers: this._options.peers, 75 | network: this._network 76 | }) 77 | debug('%s: creating skiff with peers %j', this.id, skiffOptions.peers) 78 | 79 | this._skiff = Skiff(idFromAddress(this._address, this._partition), skiffOptions) 80 | this._skiff.on('warning', this.emit.bind(this, 'warning')) 81 | this._skiff.on('new state', this._onNewState.bind(this)) 82 | this._skiff.on('joined', this._peerJoined.bind(this)) 83 | this._skiff.on('left', this._peerLeft.bind(this)) 84 | 85 | // skip sync new follower state 86 | async.series( 87 | [ 88 | this._skiff.start.bind(this._skiff), 89 | done => { 90 | this._waitForState(options.waitForState, done) 91 | if (options.waitForState === 'weakened') { 92 | this._skiff.weaken(options.weakenDurationMS) 93 | } 94 | } 95 | ], done) 96 | } 97 | 98 | _waitForState (states, done) { 99 | const self = this 100 | const skiff = this._skiff 101 | if (!Array.isArray(states)) { 102 | states = [states] 103 | } 104 | const currentState = skiff._node._stateName 105 | debug('%s: current state for %s is %s', this.id, this.id, currentState) 106 | if (states.find(s => skiff.is(s))) { 107 | process.nextTick(done) 108 | } else { 109 | skiff.on('new state', onStateChange) 110 | } 111 | 112 | function onStateChange (state) { 113 | if (states.indexOf(state) >= 0) { 114 | debug('%s: _waitForState: %s reached state %s', self.id, self.id, state) 115 | skiff.removeListener('new state', onStateChange) 116 | done() 117 | } 118 | } 119 | } 120 | 121 | _onNewState (state) { 122 | debug('new state for node %s: %s', this.id, state) 123 | if (state === 'candidate') { 124 | this._maybeWeaken() 125 | } else if (state === 'leader') { 126 | this._topology.topologyUpdated() 127 | } 128 | this.emit('new state', state) 129 | } 130 | 131 | _maybeWeaken () { 132 | // I'm a candidate 133 | // let's see if I'm the supposed leader 134 | debug('%s: maybe weaken?', this.id) 135 | const leader = this._cluster.leaderForPartition(this._partition) 136 | if (leader !== this._cluster.whoami()) { 137 | debug('%s: i should not be the leader', this.id) 138 | // I shouldn't be the leader. Let's see if the leader is up.. 139 | this._isLeaderUp((err, isUp) => { 140 | if (err) { 141 | this.emit('warning', err) 142 | } else { 143 | debug('%s: leader is up', this.id) 144 | // Am I still a candidate and is the leader up? 145 | if (this.is('candidate') && isUp) { 146 | debug('%s: leader is up and it\'s not me: weakening myself', this.id) 147 | this._skiff.weaken(this._options.weakenWhenCandidateAndLeaderIsUpDurationMS) 148 | } 149 | } 150 | }) 151 | } 152 | } 153 | 154 | _isLeaderUp (done) { 155 | this._cluster.ping(this._cluster.leaderForPartition(this._partition), (err) => { 156 | done(err, !err) 157 | }) 158 | } 159 | 160 | stop (done) { 161 | debug('%s: stopping', this.id) 162 | this._topology.stop() 163 | if (this._skiff) { 164 | this._skiff.stop(done) 165 | } else { 166 | process.nextTick(done) 167 | } 168 | } 169 | 170 | // ------ 171 | // Commands 172 | 173 | command (command, done) { 174 | if (command.type === 'batch') { 175 | command = command.array 176 | } 177 | debug('%s: command: %j', this.id, command) 178 | if (!this._skiff) { 179 | throw new Error('must start before') 180 | } 181 | this._skiff.command(command, done) 182 | } 183 | 184 | // ------ 185 | // Topology 186 | 187 | topologyUpdated (force) { 188 | this._topology.topologyUpdated(force) 189 | } 190 | 191 | join (peer, done) { 192 | debug('%s: skiff.join %s', this.id, peer) 193 | this._skiff.join(peer, done) 194 | } 195 | 196 | leave (peer, done) { 197 | debug('%s: skiff.leave %s', this.id, peer) 198 | this._skiff.leave(peer, done) 199 | } 200 | 201 | _peerLeft (peer) { 202 | debug('%s: peer %s left', this.id, peer) 203 | this._topology.peerLeft(peer) 204 | if (this.id === peer) { 205 | this._leftSelf() 206 | } 207 | } 208 | 209 | _leftSelf () { 210 | this._borough.leavePartition(this._partition, this._warningOnError()) 211 | } 212 | 213 | _peerJoined (peer) { 214 | debug('%s: peer %s joined', this.id, peer) 215 | this._topology.peerJoined(peer) 216 | } 217 | 218 | _warningOnError (cb) { 219 | return err => { 220 | if (err) { 221 | debug('warning:', err.message) 222 | this.emit('warning', err) 223 | } 224 | if (cb) { 225 | cb() 226 | } 227 | } 228 | } 229 | 230 | // ------ 231 | // Info 232 | 233 | info (done) { 234 | this._skiff.peers((err, peers) => { 235 | if (err) { 236 | done(err) 237 | } else { 238 | done(null, { 239 | source: this.id, 240 | peers 241 | }) 242 | } 243 | }) 244 | } 245 | 246 | // ----- 247 | // Read stream 248 | 249 | readStream (options) { 250 | debug('%s: creating read stream with options %j', this.id, options) 251 | return new IteratorStream(this._skiff.leveldown().iterator(options)) 252 | } 253 | } 254 | 255 | Subnode.idFromAddress = idFromAddress 256 | 257 | function idFromAddress (address, partition) { 258 | if ((typeof address.host !== 'string') || (typeof address.port !== 'number')) { 259 | throw new Error('invalid address: ' + JSON.stringify(address)) 260 | } 261 | const addr = `/ip4/${address.host}/tcp/${address.port}/p/${partition}` 262 | return addr 263 | } 264 | 265 | module.exports = Subnode 266 | -------------------------------------------------------------------------------- /lib/timeout-error.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | class TimeoutError extends Error { 4 | constructor (peer, payload) { 5 | super(`operation timed out talking to ${peer}, payload is ${JSON.stringify(payload)}`) 6 | this.code = 'ETIMEOUT' 7 | } 8 | } 9 | 10 | module.exports = TimeoutError 11 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "borough", 3 | "version": "0.1.0", 4 | "description": "Borough", 5 | "main": "borough.js", 6 | "scripts": { 7 | "test": "lab test/borough-one-node.js && lab test/borough-cluster-comms.js && lab test/borough-cluster-partition-data.js && lab test/borough-other-partition-data.js && lab test/borough-topology-changes.js", 8 | "style": "eslint ." 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "git+https://github.com/pgte/borough.git" 13 | }, 14 | "author": "pgte", 15 | "bugs": { 16 | "url": "https://github.com/pgte/borough/issues" 17 | }, 18 | "homepage": "https://github.com/pgte/borough#readme", 19 | "devDependencies": { 20 | "code": "^4.0.0", 21 | "concat-stream": "^1.5.2", 22 | "eslint": "^3.6.0", 23 | "eslint-config-standard": "^6.2.1", 24 | "eslint-plugin-promise": "^3.3.1", 25 | "eslint-plugin-standard": "^2.0.0", 26 | "lab": "^11.0.1", 27 | "memdown": "^1.2.2", 28 | "pre-commit": "^1.1.3" 29 | }, 30 | "dependencies": { 31 | "abstract-leveldown": "^2.6.1", 32 | "async": "^2.0.1", 33 | "clone-deep": "^0.2.4", 34 | "debounce": "^1.0.0", 35 | "debug": "^2.2.0", 36 | "deepmerge": "^0.2.10", 37 | "freeport": "^1.0.5", 38 | "level-iterator-stream": "^1.3.1", 39 | "multiaddr": "^2.0.3", 40 | "network-address": "^1.1.0", 41 | "once": "^1.4.0", 42 | "skiff": "^1.10.0", 43 | "through2": "^2.0.1", 44 | "upring": "^0.11.0" 45 | }, 46 | "pre-commit": [ 47 | "style", 48 | "test" 49 | ], 50 | "license": "GPL" 51 | } 52 | -------------------------------------------------------------------------------- /test/borough-cluster-comms.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | 13 | const Borough = require('../') 14 | 15 | describe('borough cluster comms', () => { 16 | let baseNode 17 | let nodes = [1, 2, 3, 4] 18 | 19 | before(done => { 20 | baseNode = Borough({ 21 | subnode: { 22 | skiff: { 23 | db: Memdown 24 | } 25 | } 26 | }) 27 | baseNode.start(done) 28 | }) 29 | 30 | before(done => { 31 | nodes = nodes.map((index) => Borough({ 32 | base: [baseNode.whoami()], 33 | subnode: { 34 | skiff: { 35 | db: Memdown 36 | } 37 | } 38 | })) 39 | done() 40 | }) 41 | 42 | before({timeout: 10000}, done => { 43 | async.each(nodes, (node, cb) => node.start(cb), done) 44 | }) 45 | 46 | after({timeout: 10000}, done => { 47 | async.each(nodes.concat(baseNode), (node, cb) => node.stop(cb), done) 48 | }) 49 | 50 | it('can setup a request handler', done => { 51 | nodes.concat(baseNode).forEach(node => { 52 | node.on('request', onRequest) 53 | }) 54 | done() 55 | 56 | function onRequest (req, reply) { 57 | expect(req.partition.name).to.equal('partition 1') 58 | reply(null, Object.assign({}, req.body, {reply: true})) 59 | } 60 | }) 61 | 62 | it('can make a request from a random node', {timeout: 10000}, done => { 63 | const node = nodes[nodes.length - 2] 64 | node.request('partition 1', {a: 1, b: 2}, (err, result) => { 65 | expect(err).to.be.null() 66 | expect(result).to.equal({a: 1, b: 2, reply: true}) 67 | done() 68 | }) 69 | }) 70 | }) 71 | -------------------------------------------------------------------------------- /test/borough-cluster-partition-data.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | 13 | const Borough = require('../') 14 | 15 | describe('borough cluster partition data', () => { 16 | let baseNode 17 | let nodes = [1, 2, 3, 4] 18 | 19 | before(done => { 20 | baseNode = Borough({ 21 | subnode: { 22 | skiff: { 23 | db: Memdown 24 | } 25 | } 26 | }) 27 | baseNode.on('request', onRequest) 28 | baseNode.start(done) 29 | }) 30 | 31 | before(done => { 32 | nodes = nodes.map((index) => Borough({ 33 | base: [baseNode.whoami()], 34 | subnode: { 35 | skiff: { 36 | db: Memdown 37 | } 38 | } 39 | })) 40 | done() 41 | }) 42 | 43 | before({timeout: 10000}, done => { 44 | async.each(nodes, (node, cb) => node.start(cb), done) 45 | }) 46 | 47 | after(done => { 48 | async.each(nodes.concat(baseNode), (node, cb) => node.stop(cb), done) 49 | }) 50 | 51 | it('can setup a request handler', done => { 52 | nodes.forEach(node => { 53 | node.on('request', onRequest) 54 | }) 55 | done() 56 | }) 57 | 58 | it('can make a put request from a random node', { timeout: 10000 }, done => { 59 | const node = nodes[nodes.length - 2] 60 | node.request('partition 1', {type: 'put', key: 'a', value: 'b'}, done) 61 | }) 62 | 63 | it('can make a get request from a random node', { timeout: 10000 }, done => { 64 | const node = nodes[nodes.length - 1] 65 | node.request('partition 1', {type: 'get', key: 'a'}, (err, result) => { 66 | expect(err).to.be.null() 67 | expect(result).to.equal('b') 68 | done() 69 | }) 70 | }) 71 | }) 72 | 73 | function onRequest (req, reply) { 74 | expect(req.partition.name).to.equal('partition 1') 75 | const body = req.body 76 | if (body.type === 'put') { 77 | req.partition.put(body.key, body.value, reply) 78 | } else if (body.type === 'get') { 79 | req.partition.get(body.key, reply) 80 | } else { 81 | reply(new Error('command type not found')) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /test/borough-leveldown.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | const IteratorStream = require('level-iterator-stream') 13 | const ConcatStream = require('concat-stream') 14 | 15 | const Borough = require('../') 16 | 17 | describe('borough partition leveldown interface', () => { 18 | let baseNode 19 | let nodes = [1, 2, 3, 4] 20 | 21 | before(done => { 22 | baseNode = Borough({ 23 | subnode: { 24 | skiff: { 25 | db: Memdown 26 | } 27 | } 28 | }) 29 | baseNode.on('request', onRequest) 30 | baseNode.start(done) 31 | }) 32 | 33 | before(done => { 34 | nodes = nodes.map((index) => Borough({ 35 | base: [baseNode.whoami()], 36 | subnode: { 37 | skiff: { 38 | db: Memdown 39 | } 40 | } 41 | })) 42 | done() 43 | }) 44 | 45 | before({timeout: 10000}, done => { 46 | async.each(nodes, (node, cb) => node.start(cb), done) 47 | }) 48 | 49 | before(done => { 50 | nodes.forEach(node => { 51 | node.on('request', onRequest) 52 | }) 53 | done() 54 | }) 55 | 56 | after(done => { 57 | async.each(nodes.concat(baseNode), (node, cb) => node.stop(cb), done) 58 | }) 59 | 60 | it('can make a put request from a random node', done => { 61 | const node = nodes[nodes.length - 4] 62 | node.request('partition 1', {type: 'put', key: 'a', value: 'b'}, done) 63 | }) 64 | 65 | it('can make a get request from a random node', done => { 66 | const node = nodes[nodes.length - 3] 67 | node.request('partition 1', {type: 'get', key: 'a'}, (err, result) => { 68 | expect(err).to.be.null() 69 | expect(result).to.equal('b') 70 | done() 71 | }) 72 | }) 73 | 74 | it('can make a del request from a random node', done => { 75 | const node = nodes[nodes.length - 2] 76 | node.request('partition 1', { type: 'del', key: 'a' }, (err, result) => { 77 | expect(!err).to.be.true() 78 | done() 79 | }) 80 | }) 81 | 82 | it('can make a get request from a random node', done => { 83 | const node = nodes[nodes.length - 1] 84 | node.request('partition 1', { type: 'get', key: 'a' }, (err, result) => { 85 | expect(!!err).to.be.true() 86 | expect(err.message).to.equal('Key not found in database') 87 | done() 88 | }) 89 | }) 90 | 91 | it('can make a batch request from a random node', done => { 92 | const node = nodes[nodes.length - 4] 93 | const array = [ 94 | { type: 'put', key: 'a', value: 'c' }, 95 | { type: 'put', key: 'b', value: 'd' }, 96 | { type: 'put', key: 'c', value: 'e' } 97 | ] 98 | node.request('partition 1', { type: 'batch', array }, (err, result) => { 99 | expect(!err).to.be.true() 100 | done() 101 | }) 102 | }) 103 | 104 | it('can make a get request from a random node', done => { 105 | const node = nodes[nodes.length - 3] 106 | node.request('partition 1', {type: 'get', key: 'a'}, (err, result) => { 107 | expect(!err).to.be.true() 108 | expect(result).to.equal('c') 109 | done() 110 | }) 111 | }) 112 | 113 | it('can make a get request from a random node', done => { 114 | const node = nodes[nodes.length - 2] 115 | node.request('partition 1', {type: 'get', key: 'c'}, (err, result) => { 116 | expect(!err).to.be.true() 117 | expect(result).to.equal('e') 118 | done() 119 | }) 120 | }) 121 | 122 | it('can create iterator', done => { 123 | const node = nodes[nodes.length - 1] 124 | node.request('partition 1', { type: 'read stream' }, (err, reply) => { 125 | expect(!err).to.be.true() 126 | reply.streams.read.pipe(ConcatStream(results => { 127 | expect(results).to.equal([ 128 | {key: 'a', value: 'c'}, 129 | {key: 'b', value: 'd'}, 130 | {key: 'c', value: 'e'}]) 131 | done() 132 | })) 133 | }) 134 | }) 135 | }) 136 | 137 | function onRequest (req, reply) { 138 | expect(req.partition.name).to.equal('partition 1') 139 | const body = req.body 140 | const key = body.key 141 | const value = body.value 142 | if (body.type === 'put') { 143 | req.partition.put(key, value, reply) 144 | } else if (body.type === 'get') { 145 | req.partition.get(key, reply) 146 | } else if (body.type === 'del') { 147 | req.partition.del(key, reply) 148 | } else if (body.type === 'batch') { 149 | req.partition.batch(body.array, reply) 150 | } else if (body.type === 'read stream') { 151 | reply(null, { 152 | streams: { 153 | read: IteratorStream(req.partition.iterator(body.options)) 154 | } 155 | }) 156 | } else { 157 | reply(new Error('command type not found')) 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /test/borough-one-node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const it = lab.it 7 | const expect = require('code').expect 8 | 9 | const Memdown = require('memdown') 10 | const timers = require('timers') 11 | 12 | const Borough = require('../') 13 | 14 | describe('borough one node cluster', () => { 15 | let node 16 | 17 | it('allows you to create a node with no options', done => { 18 | node = Borough({subnode: { skiff: { db: Memdown } }}) 19 | done() 20 | }) 21 | 22 | it('can start the node', done => node.start(done)) 23 | 24 | before({timeout: 5000}, done => timers.setTimeout(done, 4000)) 25 | 26 | it('can setup a request handler', done => { 27 | node.on('request', (req, reply) => { 28 | expect(req.partition.name).to.equal('partition 1') 29 | expect(req.body).to.equal({a: 1, b: 2}) 30 | reply(null, Object.assign({}, req.body, {replied: true})) 31 | }) 32 | done() 33 | }) 34 | 35 | it('can perform a request', done => { 36 | node.request('partition 1', {a: 1, b: 2}, (err, reply) => { 37 | expect(err).to.be.null() 38 | expect(reply).to.equal({a: 1, b: 2, replied: true}) 39 | done() 40 | }) 41 | }) 42 | }) 43 | -------------------------------------------------------------------------------- /test/borough-other-partition-data.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | 13 | const Borough = require('../') 14 | 15 | describe('borough cluster partition data', () => { 16 | let baseNode 17 | let nodes = [1, 2, 3, 4] 18 | 19 | before(done => { 20 | baseNode = Borough({ 21 | subnode: { 22 | skiff: { 23 | db: Memdown 24 | } 25 | } 26 | }) 27 | baseNode.start(done) 28 | }) 29 | 30 | before(done => { 31 | nodes = nodes.map(index => Borough({ 32 | base: [baseNode.whoami()], 33 | subnode: { 34 | skiff: { 35 | db: Memdown 36 | } 37 | } 38 | })) 39 | done() 40 | }) 41 | 42 | before({timeout: 10000}, done => { 43 | async.each(nodes, (node, cb) => node.start(cb), done) 44 | }) 45 | 46 | after(done => { 47 | async.each(nodes.concat(baseNode), (node, cb) => node.stop(cb), done) 48 | }) 49 | 50 | it('can setup a request handler', done => { 51 | nodes.concat(baseNode).forEach(node => { 52 | node.on('request', onRequest) 53 | }) 54 | done() 55 | 56 | function onRequest (req, reply) { 57 | const body = req.body 58 | const part = body.otherPartition ? req.otherPartition(body.otherPartition) : req.partition 59 | if (body.type === 'put') { 60 | part.put(body.key, body.value, reply) 61 | } else if (body.type === 'get') { 62 | part.get(body.key, reply) 63 | } else { 64 | reply(new Error('command type not found')) 65 | } 66 | } 67 | }) 68 | 69 | it('can make a put request on partition 1 from a random node', {timeout: 10000}, done => { 70 | const node = nodes[nodes.length - 2] 71 | node.request( 72 | 'partition 1', 73 | { 74 | type: 'put', 75 | key: 'a', 76 | value: 'b', 77 | otherPartition: 'partition 2' 78 | }, 79 | done) 80 | }) 81 | 82 | it('can make a put request on partition 2 from a random node', done => { 83 | const node = nodes[nodes.length - 2] 84 | node.request( 85 | 'partition 2', 86 | { 87 | type: 'put', 88 | key: 'c', 89 | value: 'd', 90 | otherPartition: 'partition 1' 91 | }, done) 92 | }) 93 | 94 | it('data got stored in the correct partition', done => { 95 | const node = nodes[nodes.length - 1] 96 | node.request('partition 1', {type: 'get', key: 'c'}, (err, result) => { 97 | expect(err).to.be.null() 98 | expect(result).to.equal('d') 99 | done() 100 | }) 101 | }) 102 | 103 | it('data did not get stored in the wrong partition', done => { 104 | const node = nodes[nodes.length - 1] 105 | node.request('partition 2', {type: 'get', key: 'c'}, (err, result) => { 106 | expect(err.message).to.match(/key not found in database/i) 107 | done() 108 | }) 109 | }) 110 | }) 111 | -------------------------------------------------------------------------------- /test/borough-topology-changes.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const Memdown = require('memdown') 11 | const timers = require('timers') 12 | const async = require('async') 13 | 14 | const Borough = require('../') 15 | 16 | const CLIENT_TIMEOUT_MS = 10000 // TODO: take this down 17 | 18 | describe('borough cluster topology changes', () => { 19 | let working = true 20 | let baseNode 21 | let nodes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] 22 | let peerCount 23 | let counter = 0 24 | 25 | function onRequest (req, reply) { 26 | const part = req.partition 27 | expect(part.name).to.equal('partition 1') 28 | const body = req.body 29 | if (body.type === 'put') { 30 | part.put(body.key, body.value, reply) 31 | } else if (body.type === 'get') { 32 | part.get(body.key, reply) 33 | } else { 34 | reply(new Error('unknown op ' + body.type)) 35 | } 36 | } 37 | 38 | before(done => { 39 | baseNode = Borough({ 40 | subnode: { 41 | skiff: { 42 | db: Memdown 43 | } 44 | } 45 | }) 46 | baseNode.on('request', onRequest) 47 | baseNode.on('warning', logError) 48 | baseNode.start(done) 49 | }) 50 | 51 | before(done => { 52 | let lastValue 53 | 54 | const partition = baseNode.partition('partition 1') 55 | 56 | timers.setInterval(() => { 57 | partition.info((err, info) => { 58 | if (err) { 59 | console.error('Error getting info:', err.message) 60 | } else { 61 | const peers = info.subnode.peers 62 | console.log('\n%d peers. peers: %j', peers.length, peers) 63 | } 64 | }) 65 | }, 1000) 66 | 67 | request() 68 | done() 69 | 70 | function request () { 71 | if (!working) return 72 | const timeout = timers.setTimeout(onTimeout, CLIENT_TIMEOUT_MS) 73 | const isPut = !(counter % 2) 74 | const isGet = !isPut 75 | if (isPut) { 76 | lastValue = counter 77 | } 78 | counter++ 79 | 80 | if (isGet) { 81 | partition.get('a', (err, resp) => { 82 | timers.clearTimeout(timeout) 83 | if (err) { 84 | return handleError(err) 85 | } 86 | process.stdout.write('.') 87 | expect(err).to.be.null() 88 | expect(Number(resp)).to.equal(lastValue) 89 | process.nextTick(request) 90 | }) 91 | } else { 92 | partition.put('a', lastValue, err => { 93 | timers.clearTimeout(timeout) 94 | if (err) { 95 | return handleError(err) 96 | } 97 | process.stdout.write('.') 98 | process.nextTick(request) 99 | }) 100 | } 101 | 102 | function onTimeout () { 103 | console.error('REQUEST TIMEOUT') 104 | if (working) { 105 | throw new Error('request timeout') 106 | } 107 | } 108 | 109 | function handleError (err) { 110 | if (working) { 111 | throw err 112 | } 113 | } 114 | } 115 | }) 116 | 117 | after(done => { 118 | working = false 119 | async.parallel( 120 | [ 121 | baseNode.stop.bind(baseNode), 122 | done => { 123 | async.each(nodes, (node, done) => { 124 | if ((typeof node) === 'object') { 125 | console.log('stopping %s', node.whoami()) 126 | node.stop((err) => { 127 | console.log('stopped %s', node.whoami(), err) 128 | done(err) 129 | }) 130 | } else { 131 | done() 132 | } 133 | }, done) 134 | } 135 | ], 136 | done) 137 | }) 138 | 139 | it('can rail in nodes', {timeout: (nodes.length * 2) * 11000}, done => { 140 | async.eachSeries( 141 | nodes, 142 | (index, done) => { 143 | timers.setTimeout(() => { 144 | console.log('NEW NODE %d\n\n\n\n\n', index) 145 | const newNode = nodes[index] = Borough({ 146 | base: [baseNode.whoami()], 147 | subnode: { 148 | skiff: { 149 | db: Memdown 150 | } 151 | } 152 | }) 153 | newNode.on('request', onRequest) 154 | newNode.on('warning', logError) 155 | newNode.start(done) 156 | }, 8000) 157 | }, 158 | done) 159 | }) 160 | 161 | it('waits a bit', {timeout: 6000}, done => timers.setTimeout(done, 5000)) 162 | 163 | it('partition has only 3 nodes', done => { 164 | baseNode.partition('partition 1').info((err, info) => { 165 | expect(!err).to.be.true() 166 | expect(info.subnode.source).to.match(/^\/ip4\/.*\/p\/partition 1$/) 167 | peerCount = info.subnode.peers.length 168 | expect(peerCount).to.equal(3) 169 | done() 170 | }) 171 | }) 172 | 173 | it('a big amount of requests were performed', done => { 174 | const minimum = 200 * nodes.length 175 | expect(counter).to.be.least(minimum) 176 | done() 177 | }) 178 | 179 | it('rails out nodes', {timeout: (nodes.length * 2) * 11000}, done => { 180 | let count = nodes.length + 1 181 | async.eachSeries( 182 | nodes.slice(1), 183 | (node, done) => { 184 | timers.setTimeout(() => { 185 | count-- 186 | console.log('\n\nstopping node %d...\n\n\n', count) 187 | node.stop(err => { 188 | console.log('\n\nstopped.') 189 | if (err) { 190 | done(err) 191 | } else { 192 | nodes = nodes.filter(n => n !== node) 193 | done() 194 | } 195 | }) 196 | }, 12000) 197 | }, 198 | done) 199 | }) 200 | 201 | it('waits a bit', {timeout: CLIENT_TIMEOUT_MS + 1000}, done => timers.setTimeout(done, CLIENT_TIMEOUT_MS + 500)) 202 | 203 | it('partition has only 2 nodes', done => { 204 | baseNode.partition('partition 1').info((err, info) => { 205 | expect(!err).to.be.true() 206 | peerCount = info.subnode.peers.length 207 | expect(peerCount).to.equal(2) 208 | done() 209 | }) 210 | }) 211 | }) 212 | 213 | function logError (err) { 214 | console.error('Warning: ' + err.stack) 215 | } 216 | -------------------------------------------------------------------------------- /test/helpers/keys.js: -------------------------------------------------------------------------------- 1 | module.exports = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'w', 'v', 'x', 'y', 'z'] 2 | --------------------------------------------------------------------------------