├── .eslintignore ├── .eslintrc.js ├── .gitignore ├── .istanbul.yml ├── .jshintrc ├── .travis.yml ├── LICENSE ├── README.md ├── lib ├── address.js ├── client.js ├── command-queue.js ├── commands.js ├── db.js ├── default-options.js ├── incoming-dispatcher.js ├── iterator.js ├── leveldown.js ├── log.js ├── network │ ├── active │ │ ├── errors.js │ │ ├── index.js │ │ ├── network.js │ │ ├── peer.js │ │ └── reconnect.js │ ├── index.js │ ├── network-node.js │ └── passive │ │ ├── index.js │ │ ├── network.js │ │ └── server.js ├── node.js ├── peer-leader.js ├── rpc.js ├── states │ ├── base.js │ ├── candidate.js │ ├── follower.js │ ├── index.js │ ├── leader.js │ └── weakened.js └── utils │ ├── batch-transform-stream.js │ ├── clear-db.js │ └── not-leader-error.js ├── package.json ├── skiff-logo.png ├── skiff.js └── test ├── active-network.js ├── election.js ├── incoming-dispatcher.js ├── leveldown.js ├── levelup.js ├── log-compaction.js ├── log-replication-catchup.js ├── log-replication.js ├── passive-network.js ├── persistence.js ├── remote-commands.js ├── resilience ├── resilience-chaos-disk.js ├── resilience-chaos-memory.js ├── resilience-order-disk.js ├── resilience-order-memory.js └── setup │ ├── client.js │ ├── index.js │ ├── node.js │ └── server.js └── weakening.js /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | coverage 3 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | "extends": "standard", 3 | "plugins": [ 4 | "standard" 5 | ] 6 | }; -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | npm-debug.log 4 | coverage 5 | data 6 | Vagrantfile 7 | .vagrant 8 | -------------------------------------------------------------------------------- /.istanbul.yml: -------------------------------------------------------------------------------- 1 | instrumentation: 2 | excludes: ['test', 'node_modules'] 3 | check: 4 | global: 5 | lines: 100 6 | branches: 100 7 | statements: 100 8 | functions: 100 9 | -------------------------------------------------------------------------------- /.jshintrc: -------------------------------------------------------------------------------- 1 | { 2 | "boss": true, 3 | "node": true, 4 | "strict": true, 5 | "white": true, 6 | "smarttabs": true, 7 | "maxlen": 80, 8 | "newcap": false, 9 | "undef": true, 10 | "unused": true, 11 | "onecase": true, 12 | "indent": 2, 13 | "sub": true 14 | } -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "4" 4 | - "6" 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Pedro Teixeira 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Skiff](skiff-logo.png) 2 | 3 | # Skiff 4 | 5 | [Raft](https://raft.github.io/) Consensus Algorithm implementation for Node.js. 6 | 7 | [![npm version](https://badge.fury.io/js/skiff.svg)](https://badge.fury.io/js/skiff) 8 | [![Build Status](https://travis-ci.org/pgte/skiff.svg?branch=master)](https://travis-ci.org/pgte/skiff) 9 | 10 | * Persists to LevelDB (or any database exposing a [LevelDown](https://github.com/level/leveldown) interface). 11 | * Exposes the cluster as a [Levelup](https://github.com/level/levelup#readme) or [Leveldown](https://github.com/level/leveldown#readme)-compatible interface, with which you can extend using [the Levelup plugins](https://github.com/Level/levelup/wiki/Modules#plugins). 12 | * Encodes messages using Msgpack 13 | 14 | ## Installation 15 | 16 | ```bash 17 | $ npm install skiff --save 18 | ``` 19 | 20 | ## Usage 21 | 22 | ```javascript 23 | const Skiff = require('skiff') 24 | 25 | const options = { 26 | db: require('memdown'), // in memory database 27 | peers: [ // peer addresses 28 | '/ip4/127.0.0.1/tcp/9491', 29 | '/ip4/127.0.0.1/tcp/9492' 30 | ] 31 | } 32 | const skiff = Skiff('/ip4/127.0.0.1/tcp/9490', options) 33 | 34 | // expose the cluster as a Levelup-compatible database 35 | const db = skiff.levelup() 36 | 37 | skiff.start(err => { 38 | if (err) { 39 | console.error('Error starting skiff node: ', err.message) 40 | } else { 41 | console.log('Skiff node started') 42 | 43 | db.put('key', 'value', (err) => { 44 | // ... 45 | }) 46 | } 47 | }) 48 | ``` 49 | 50 | # API 51 | 52 | ## Skiff (address, options) 53 | 54 | Returns a new skiff node. 55 | 56 | Arguments: 57 | 58 | * `address` (string, mandatory): an address in the [multiaddr](https://github.com/multiformats/js-multiaddr#readme) format (example: `"/ip/127.0.0.1/tcp/5398"`). 59 | * `options` (object): 60 | * `network` (object): if you want to share the network with other skiff nodes on the same process, create a network using `Skiff.createNetwork(options)` (see below) 61 | * `server` (object): 62 | * `port` (integer): TCP port. Defaults to the port in `address` 63 | * `host` (string): host name to bind the server to. Defaults to the host name in the `address` 64 | * rpcTimeoutMS (integer, defaults to `2000`): Timeout for RPC calls. 65 | * peers (array of strings, defaults to `[]`): The addresses of the peers (also in the [multiaddr](https://github.com/multiformats/js-multiaddr#readme) format). __If the database you're using is persisted to disk (which is the default), these peers will be overrridden by whatever is loaded from the latest snapshot once the node starts.__ 66 | * `levelup` (object): options to the internal Levelup database. Defaults to: 67 | 68 | ```javascript 69 | { 70 | keyEncoding: 'utf8', 71 | valueEncoding: 'json' 72 | } 73 | ``` 74 | 75 | * `location` (string): Location of the base directory for the leveldb files. Defaults to the `data` directory on the root of this package (not recommended) 76 | * `db` (function, defaults to [Leveldown](https://github.com/Level/leveldown#readme) implementation): Database constructor, should return a [Leveldown](https://github.com/Level/leveldown#readme) implementation. 77 | 78 | > (You can use this to create a in-memory database using [Memdown](https://github.com/Level/memdown#readme)) 79 | 80 | * #### Advanced options 81 | 82 | * `appendEntriesIntervalMS` (integer, defaults to `100`): The interval (ms) with which a leader sends `AppendEntries` messages to the followers (ping). 83 | * `electionTimeoutMinMS` (integer, defaults to `300`): The minimum election timeout (ms) for a node. It's the minimum time a node has to wait until no `AppendEntries` message triggers an election. 84 | * `electionTimeoutMaxMS` (integer, defaults to `600`): The maximum election timeout (ms) for a node. It's the maximum time a node has to wait until no `AppendEntries` message triggers an election. 85 | * `installSnapshotChunkSize` (integer, defaults to `10`): The maximum number of database records on each `InstallSnapshot` message. 86 | * `batchEntriesLimit` (integer, defaults to `10`): The maximum number of log entries in a `AppendEntries` message. 87 | * `clientRetryRPCTimeout` (integer, defaults to 200): The number of miliseconds the internal client has to wait until retrying 88 | * `clientMaxRetries` (integer, defaults to 10): The maximum number of times the client is allowed to retry the remote call. 89 | 90 | ## skiff.start (callback) 91 | 92 | Starts the node, initializing. Calls back with no argument when started, or with error in the first argument. 93 | 94 | ## skiff.stop (callback) 95 | 96 | Stops the node, shutting down server, disconnects from all peers and stops activity. Calls back once all this is done, or when an error is encountered, with an error in the first argument. 97 | 98 | ## skiff.levelup () 99 | 100 | Returns a new [Levelup-compatible](https://github.com/level/levelup) object for you to interact with the cluster. 101 | 102 | ## skiff.leveldown () 103 | 104 | Returns a new [Leveldown-compatible](https://github.com/level/leveldown) object for you to interact with the cluster. 105 | 106 | ## skiff.join (peerAddress, callback) 107 | 108 | Adds a peer to the cluster. Calls back once the cluster reaches consensus, or with an error if no consensus can be reached. 109 | 110 | ## skiff.leave (peerAddress, callback) 111 | 112 | Removes a peer from the cluster. Calls back once the cluster reaches consensus, or with an error if no consensus can be reached. 113 | 114 | ## skiff.stats () 115 | 116 | Returns some interesting stats for this node. 117 | 118 | ## skiff.peers (callback) 119 | 120 | Invokes the error-first callback function with the cluster peers and some interesting stats from each. 121 | 122 | ## skiff.term () 123 | 124 | Returns the current term (integer). 125 | 126 | ## skiff.weaken (durationMS) 127 | 128 | Weakens the node for the duration. During this period, the node transitions to a special `weakened` state, in which the node does not react to election timeouts. This period ends once it learns a new leader or the period runs out. 129 | 130 | ## skiff.readConsensus(callback) 131 | 132 | Asks for read consensus from the cluster. Calls back when there is an error (with the error as the first argument) or succeeded. 133 | 134 | ## Events 135 | 136 | A skiff instance emits the following events: 137 | 138 | * `started`: once the node is started (network server is up and persisted state is loaded) 139 | * `warning (err)`: if a non-fatal error was encountered 140 | * `connect (peer)`: once a leader node is connected to a peer 141 | * `disconnect (peer)`: once a leader node is disconnected from a peer 142 | * `new state (state)`: once a node changes state (possible states are `follower`, `candidate` and `leader`) 143 | * `leader`: once the node becomes the cluster leader 144 | * `joined (peerAddress)`: when a peer joined the cluster 145 | * `left (peerAddress)`: whan a peer left the cluster 146 | * `rpc latency (ms)`: the latency for an RPC call, in milisenconds 147 | 148 | ## Skiff.createNetwork (options) 149 | 150 | Creates a network you can share amongst several Skiff nodes in the same process. 151 | 152 | Options: 153 | 154 | * `active` (object): 155 | * `innactivityTimeout` (integer, miliseconds, defaults to `5000`): The amount of time to wait before a client connection is closed because of innactivity. 156 | * `passive` (object): 157 | * `server` (object): 158 | * `port` (integer, defaults to `9163`): the port the server should listen on 159 | * `host` (string, defaults to `"0.0.0.0"`): the interface address the server should listen to 160 | * `exclusive` (boolean, defaults to `true`): if true, the server is not shareable with other processes (see [`Server#listen()` on Node.js docs](https://nodejs.org/api/net.html#net_server_listen_options_callback)). 161 | 162 | # Sponsors 163 | 164 | Development of Skiff is sponsored by [YLD](https://yld.io). 165 | 166 | # License 167 | 168 | [MIT](LICENSE) 169 | 170 | # Copyright 171 | 172 | Copyright (c) 2016 Pedro Teixeira 173 | -------------------------------------------------------------------------------- /lib/address.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const Multiaddr = require('multiaddr') 4 | 5 | class Address { 6 | 7 | constructor (addr) { 8 | this._address = addr 9 | this._multiAddr = Multiaddr(addr.toString().split('/').slice(0, 5).join('/')) 10 | } 11 | 12 | nodeAddress () { 13 | return this._multiAddr.nodeAddress() 14 | } 15 | 16 | toString () { 17 | return this._address 18 | } 19 | 20 | toJSON () { 21 | return this._address 22 | } 23 | } 24 | 25 | module.exports = createAddress 26 | 27 | function createAddress (addr) { 28 | if (addr instanceof Address) { 29 | return addr 30 | } 31 | return new Address(addr) 32 | } 33 | -------------------------------------------------------------------------------- /lib/client.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.client') 4 | 5 | const timers = require('timers') 6 | 7 | const NotLeaderError = require('./utils/not-leader-error') 8 | 9 | class Client { 10 | 11 | constructor (node, options) { 12 | this._node = node 13 | this._options = options 14 | } 15 | 16 | command (command, options, done) { 17 | const self = this 18 | debug('command %j', command) 19 | const node = this._pickNode() 20 | if (!node) { 21 | done(new NotLeaderError(this._node.leader())) 22 | } else { 23 | if (!options.tries) { 24 | options.tries = 1 25 | } 26 | options.tries ++ 27 | if (node === this._node.id) { 28 | // local call 29 | this._node.command(command, options, handleReply) 30 | } else { 31 | // remote call 32 | debug('rpcing command to %s, command: %j', node, command) 33 | const rpcOptions = Object.assign({}, options, { remote: true }) 34 | this._node.rpc({ 35 | from: this._node.id, 36 | to: node, 37 | action: 'Command', 38 | params: { command, options: rpcOptions } 39 | }, handlingRPCReply(handleReply)) 40 | } 41 | } 42 | 43 | function handleReply (err, result) { 44 | debug('reply to command %j: err: %s, reply: %j', command, err && err.message, result) 45 | if (err) { 46 | if (err.message === 'not connected') { 47 | maybeRetry() 48 | } else if (err.code === 'ENOTLEADER' || err.code === 'ENOMAJORITY' || err.code === 'EOUTDATEDTERM') { 49 | if (err.leader) { 50 | maybeRetry(true) // immediate 51 | } else { 52 | maybeRetry() 53 | } 54 | } else { 55 | done(err) 56 | } 57 | } else { 58 | done(null, result) 59 | } 60 | } 61 | 62 | function maybeRetry (immediate) { 63 | if (options.tries < self._options.clientMaxRetries) { 64 | if (immediate) { 65 | timers.setImmediate(() => self._node.command(command, options, done)) 66 | } else { 67 | timers.setTimeout( 68 | () => self._node.command(command, options, done), 69 | self._options.clientRetryRPCTimeout) 70 | } 71 | } else { 72 | done(new NotLeaderError(self._node.leader())) 73 | } 74 | } 75 | } 76 | 77 | _pickNode () { 78 | let node = this._node.leader() 79 | if (!node) { 80 | node = this._randomNode() 81 | } 82 | return node 83 | } 84 | 85 | _randomNode () { 86 | const peers = this._node.peers() 87 | return peers[Math.floor(Math.random() * peers.length)] 88 | } 89 | } 90 | 91 | function handlingRPCReply (callback) { 92 | return function (err, reply) { 93 | if (!err && reply.params && reply.params.error) { 94 | err = reply.params.error 95 | if (typeof err === 'object') { 96 | err = new Error(err.message) 97 | err.code = reply.params.error.code 98 | err.leader = reply.params.error.leader 99 | } else { 100 | err = new Error(err) 101 | } 102 | } 103 | callback(err, reply && reply.params && reply.params.result) 104 | } 105 | } 106 | 107 | module.exports = Client 108 | -------------------------------------------------------------------------------- /lib/command-queue.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.command-queue') 4 | const Writable = require('stream').Writable 5 | const merge = require('deepmerge') 6 | 7 | const defaultOptions = { 8 | objectMode: true 9 | } 10 | 11 | class CommandQueue extends Writable { 12 | 13 | constructor (_options) { 14 | const options = merge(defaultOptions, _options || {}) 15 | super(options) 16 | this._options = options 17 | this._pending = [] 18 | } 19 | 20 | next (message) { 21 | return this._pending.shift() 22 | } 23 | 24 | _write (message, _, callback) { 25 | debug('_write %j', message) 26 | this._pending.push(message) 27 | callback() 28 | this.emit('readable') 29 | } 30 | } 31 | 32 | module.exports = CommandQueue 33 | -------------------------------------------------------------------------------- /lib/commands.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.commands') 4 | 5 | class Commands { 6 | 7 | constructor (id, queue, state) { 8 | this.id = id 9 | this._queue = queue 10 | this._state = state 11 | this._dispatch() 12 | } 13 | 14 | _dispatch () { 15 | const commandMessage = this._queue.next() 16 | if (!commandMessage) { 17 | this._queue.once('readable', this._dispatch.bind(this)) 18 | } else { 19 | const command = commandMessage.command 20 | const callback = commandMessage.callback 21 | const options = commandMessage.options 22 | debug('%s: got command from queue: %j', this.id, command) 23 | this._handleCommand(command, options, (err, result) => { 24 | if (callback) { 25 | callback(err, result) 26 | } 27 | process.nextTick(this._dispatch.bind(this)) 28 | }) 29 | } 30 | } 31 | 32 | _handleCommand (command, options, done) { 33 | this._state.command(command, options, done) 34 | } 35 | } 36 | 37 | module.exports = Commands 38 | -------------------------------------------------------------------------------- /lib/db.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.db') 4 | const timers = require('timers') 5 | const Sublevel = require('level-sublevel') 6 | const Once = require('once') 7 | const async = require('async') 8 | const ConcatStream = require('concat-stream') 9 | const Leveldown = require('leveldown') 10 | const Levelup = require('levelup') 11 | const clearDB = require('./utils/clear-db') 12 | const join = require('path').join 13 | 14 | const ALLOWED_TYPES = ['put', 'del'] 15 | 16 | class DB { 17 | 18 | constructor (_location, id, db, options) { 19 | this.id = id 20 | const dbName = id.toString().replace(/\//g, '_').replace(/\./g, '_') 21 | const leveldown = db || Leveldown 22 | const location = join(_location, dbName) 23 | this._levelup = new Levelup(location, Object.assign({}, options, {db: leveldown})) 24 | this._leveldown = this._levelup.db 25 | this.db = Sublevel(this._levelup) 26 | 27 | this.log = this.db.sublevel('log') 28 | this.meta = this.db.sublevel('meta') 29 | this.state = this.db.sublevel('state') 30 | this.state.clear = clearDB 31 | 32 | // for debugging purposes 33 | this.log.toJSON = function () { return 'log' } 34 | this.meta.toJSON = function () { return 'meta' } 35 | this.state.toJSON = function () { return 'state' } 36 | } 37 | 38 | load (done) { 39 | async.parallel({ 40 | log: cb => { 41 | const s = this.log.createReadStream() 42 | s.once('error', cb) 43 | s.pipe(ConcatStream(entries => { 44 | cb(null, entries.sort(sortEntries).map(fixLoadedEntry)) 45 | })) 46 | }, 47 | meta: cb => { 48 | async.parallel({ 49 | currentTerm: cb => this.meta.get('currentTerm', notFoundIsOk(cb)), 50 | votedFor: cb => this.meta.get('votedFor', notFoundIsOk(cb)), 51 | peers: cb => this.meta.get('peers', notFoundIsOk(cb)) 52 | }, cb) 53 | } 54 | }, done) 55 | 56 | function sortEntries (a, b) { 57 | const keyA = a.key 58 | const keyB = b.key 59 | const keyAParts = keyA.split(':') 60 | const keyBParts = keyB.split(':') 61 | const aTerm = Number(keyAParts[0]) 62 | const bTerm = Number(keyBParts[0]) 63 | if (aTerm !== bTerm) { 64 | return aTerm - bTerm 65 | } 66 | const aIndex = Number(keyAParts[1]) 67 | const bIndex = Number(keyBParts[1]) 68 | 69 | return aIndex - bIndex 70 | } 71 | 72 | function notFoundIsOk (cb) { 73 | return function (err, result) { 74 | if (err && err.message.match(/not found/i)) { 75 | cb() 76 | } else { 77 | cb(err, result) 78 | } 79 | } 80 | } 81 | } 82 | 83 | persist (state, done) { 84 | debug('%s: persisting state', this.id) 85 | this._getPersistBatch(state, (err, batch) => { 86 | if (err) { 87 | done(err) 88 | } else { 89 | this.db.batch(batch, done) 90 | } 91 | }) 92 | } 93 | 94 | command (state, command, options, done) { 95 | this._getPersistBatch(state, (err, batch) => { 96 | if (err) { 97 | done(err) 98 | } else { 99 | const isQuery = (command.type === 'get') 100 | const isTopology = (command.type === 'join' || command.type === 'leave') 101 | debug('%s: going to apply batch: %j', this.id, batch) 102 | this.db.batch(batch, err => { 103 | debug('%s: applied batch command err = %j', this.id, err) 104 | if (!err) { 105 | if (isQuery) { 106 | this.state.get(command.key, done) 107 | } else if (isTopology) { 108 | state.applyTopologyCommand(command) 109 | done() 110 | } else { 111 | done() 112 | } 113 | } else { 114 | done(err) 115 | } 116 | }) 117 | } 118 | }) 119 | } 120 | 121 | applyEntries (entries, applyTopology, done) { 122 | if (entries.length) { 123 | debug('%s: applying entries %j', this.id, entries) 124 | } 125 | 126 | let dbCommands = [] 127 | const topologyCommands = [] 128 | entries.forEach(command => { 129 | if (command.type === 'join' || command.type === 'leave') { 130 | topologyCommands.push(command) 131 | } else { 132 | dbCommands.push(command) 133 | } 134 | }) 135 | if (topologyCommands.length) { 136 | applyTopology(topologyCommands) 137 | } 138 | 139 | dbCommands = dbCommands.reduce((acc, command) => acc.concat(command), []) 140 | 141 | const batch = dbCommands 142 | .filter(entry => ALLOWED_TYPES.indexOf(entry.type) >= 0) 143 | .map(entry => Object.assign(entry, { prefix: this.state })) 144 | 145 | if (batch.length) { 146 | this.db.batch(batch, done) 147 | } else { 148 | timers.setImmediate(done) 149 | } 150 | } 151 | 152 | _getPersistBatch (state, done) { 153 | this._getPersistLog(state, (err, _batch) => { 154 | if (err) { 155 | done(err) 156 | } else { 157 | done(null, _batch.concat(this._getPersistMeta(state))) 158 | } 159 | }) 160 | } 161 | 162 | _getPersistMeta (state) { 163 | const snapshot = state.snapshot() 164 | return [ 165 | { 166 | key: 'currentTerm', 167 | value: snapshot.currentTerm, 168 | prefix: this.meta 169 | }, 170 | { 171 | key: 'votedFor', 172 | value: snapshot.votedFor, 173 | prefix: this.meta 174 | } 175 | ] 176 | } 177 | 178 | _getPersistLog (state, _done) { 179 | debug('%s: persisting log', this.id) 180 | const done = Once(_done) 181 | const entries = state.logEntries() 182 | const byKey = entries.reduce((acc, entry) => { 183 | const key = `${entry.t}:${entry.i}` 184 | acc[key] = entry.c 185 | return acc 186 | }, {}) 187 | debug('%s: log by key: %j', this.id, byKey) 188 | const removeKeys = [] 189 | this.log.createKeyStream() 190 | .on('data', key => { 191 | if (!byKey.hasOwnProperty(key)) { 192 | // remove key not present in the log any more 193 | removeKeys.push(key) 194 | } else { 195 | // remove entries already in the database 196 | delete byKey[key] 197 | } 198 | }) 199 | .once('error', done) 200 | .once('end', () => { 201 | debug('%s: will remove keys: %j', this.id, byKey) 202 | const operations = 203 | removeKeys.map(removeKey => { 204 | return { 205 | type: 'del', 206 | key: removeKey, 207 | prefix: this.log 208 | } 209 | }) 210 | .concat(Object.keys(byKey).map(key => { 211 | return { 212 | type: 'put', 213 | key: key, 214 | value: byKey[key], 215 | prefix: this.log 216 | } 217 | })) 218 | 219 | done(null, operations) 220 | }) 221 | } 222 | 223 | _commandToBatch (command) { 224 | return (Array.isArray(command) ? command : [command]) 225 | .map(this._transformCommand.bind(this)) 226 | } 227 | 228 | _transformCommand (command) { 229 | return Object.assign({}, command, { prefix: this.state }) 230 | } 231 | 232 | } 233 | 234 | function fixLoadedEntry (entry) { 235 | const keyParts = entry.key.split(':') 236 | const term = Number(keyParts[0]) 237 | const index = Number(keyParts[1]) 238 | return { 239 | i: index, 240 | t: term, 241 | c: entry.value 242 | } 243 | } 244 | 245 | module.exports = DB 246 | -------------------------------------------------------------------------------- /lib/default-options.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const join = require('path').join 4 | 5 | module.exports = { 6 | network: undefined, 7 | server: {}, 8 | rpcTimeoutMS: 2000, 9 | peers: [], 10 | levelup: { 11 | keyEncoding: 'utf8', 12 | valueEncoding: 'json' 13 | }, 14 | location: join(__dirname, '..', 'data'), 15 | electionTimeout: true, 16 | appendEntriesIntervalMS: 100, 17 | electionTimeoutMinMS: 300, 18 | electionTimeoutMaxMS: 600, 19 | installSnapshotChunkSize: 10, 20 | batchEntriesLimit: 10, 21 | clientRetryRPCTimeout: 200, 22 | clientMaxRetries: 10, 23 | waitBeforeLeaveMS: 4000 24 | } 25 | -------------------------------------------------------------------------------- /lib/incoming-dispatcher.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.incoming-dispatcher') 4 | const Writable = require('stream').Writable 5 | const merge = require('deepmerge') 6 | 7 | const defaultOptions = { 8 | maxPending: 100, 9 | objectMode: true 10 | } 11 | 12 | class Dispatcher extends Writable { 13 | 14 | constructor (_options) { 15 | const options = merge(defaultOptions, _options || {}) 16 | super(options) 17 | this._options = options 18 | this._pending = [] 19 | } 20 | 21 | next () { 22 | const message = this._pending.shift() 23 | return message 24 | } 25 | 26 | _write (message, _, callback) { 27 | debug('_write %j', message) 28 | this._pending.push(message) 29 | this._cap() 30 | callback() 31 | this.emit('readable') 32 | } 33 | 34 | _cap () { 35 | // cap at the bottom, remove the oldest messages if we need space 36 | if (this._pending.length > this._options.maxPending) { 37 | this._pending.splice(0, this._pending.length - this._options.maxPending) 38 | } 39 | } 40 | 41 | } 42 | 43 | module.exports = Dispatcher 44 | -------------------------------------------------------------------------------- /lib/iterator.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const AbstractIterator = require('abstract-leveldown').AbstractIterator 4 | 5 | class Iterator extends AbstractIterator { 6 | constructor (node, db, options) { 7 | super(db) 8 | this.__db = db 9 | this._node = node 10 | this._options = options 11 | this._haveConsensus = false 12 | } 13 | 14 | _next (callback, _cleanup) { 15 | if (_cleanup) { 16 | _cleanup() 17 | } 18 | if (!this._haveConsensus) { 19 | this._node.readConsensus(err => { 20 | if (err) { 21 | callback(err) 22 | } else { 23 | this._haveConsensus = true 24 | this._next(callback, _cleanup) 25 | } 26 | }) 27 | return 28 | } 29 | if (!this._rs) { 30 | this._rs = this.__db.createReadStream(this._options) 31 | } 32 | const rs = this._rs 33 | rs.on('close', onClose) 34 | rs.on('end', onClose) 35 | rs.on('finish', onClose) 36 | rs.on('error', onError) 37 | 38 | const item = this._rs.read() 39 | if (item) { 40 | cleanup() 41 | callback(null, item.key, item.value) 42 | } else { 43 | this._rs.once('readable', this._next.bind(this, callback, cleanup)) 44 | } 45 | 46 | function cleanup () { 47 | rs.removeListener('close', onClose) 48 | rs.removeListener('end', onClose) 49 | rs.removeListener('finish', onClose) 50 | rs.removeListener('error', onError) 51 | } 52 | 53 | function onClose () { 54 | cleanup() 55 | callback() 56 | } 57 | 58 | function onError (err) { 59 | cleanup() 60 | callback(err) 61 | } 62 | } 63 | 64 | _end (callback) { 65 | this._rs.once('close', callback) 66 | this._rs.destroy() 67 | } 68 | } 69 | 70 | module.exports = Iterator 71 | -------------------------------------------------------------------------------- /lib/leveldown.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.leveldown') 4 | const AbstractLevelDown = require('abstract-leveldown').AbstractLevelDOWN 5 | 6 | class LevelDown extends AbstractLevelDown { 7 | 8 | constructor (node) { 9 | super(node.id.toString()) 10 | this._node = node 11 | } 12 | 13 | _close (callback) { 14 | this._node.stop(callback) 15 | } 16 | 17 | _get (key, options, callback) { 18 | debug('get %j', key) 19 | this._node.command({type: 'get', key}, options, callback) 20 | } 21 | 22 | _put (key, value, options, callback) { 23 | debug('put %j, %j', key, value) 24 | this._node.command({type: 'put', key, value}, options, callback) 25 | } 26 | 27 | _del (key, options, callback) { 28 | debug('del %j', key) 29 | this._node.command({type: 'del', key}, options, callback) 30 | } 31 | 32 | _batch (array, options, callback) { 33 | debug('batch %j', array) 34 | this._node.command(array, options, callback) 35 | } 36 | 37 | _iterator (options) { 38 | return this._node.iterator(options) 39 | } 40 | 41 | } 42 | 43 | module.exports = LevelDown 44 | -------------------------------------------------------------------------------- /lib/log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.log') 4 | const assert = require('assert') 5 | const timers = require('timers') 6 | 7 | const defaultOptions = { 8 | minLogRetention: 100 9 | } 10 | 11 | class Log { 12 | 13 | constructor (node, options) { 14 | this._options = Object.assign({}, defaultOptions, options) 15 | this._node = node 16 | this._lastLogIndex = 0 17 | this._firstLogIndex = 0 18 | this._lastLogTerm = 0 19 | this._commitIndex = 0 20 | this._lastApplied = 0 21 | this._lastAppliedTerm = 0 22 | this._entries = [] 23 | } 24 | 25 | setEntries (entries) { 26 | this._entries = entries 27 | } 28 | 29 | push (command) { 30 | const newLogIndex = ++this._lastLogIndex 31 | const newEntry = { 32 | t: this._node.term(), // term 33 | i: newLogIndex, // index 34 | c: command // command 35 | } 36 | debug('%s: about to push new entry %j', this._node.id, newEntry) 37 | 38 | this._entries.push(newEntry) 39 | this._lastLogIndex = newLogIndex 40 | this._compact() 41 | 42 | return newLogIndex 43 | } 44 | 45 | head () { 46 | return this._entries[this._entries.length - 1] 47 | } 48 | 49 | atLogIndex (index) { 50 | let entry 51 | for (let i = this._entries.length - 1; i >= 0; i--) { 52 | entry = this._entries[i] 53 | if (!entry) { 54 | return 55 | } 56 | if (entry.i === index) { 57 | return entry 58 | } 59 | } 60 | } 61 | 62 | appendAfter (index, entries) { 63 | debug('%s: append after %d: %j', this._node.id, index, entries) 64 | 65 | // truncate 66 | let head 67 | while ((head = this.head()) && head.i > index) { 68 | this._entries.pop() 69 | } 70 | 71 | for (let i = 0; i < entries.length; i++) { 72 | this._entries.push(entries[i]) 73 | } 74 | const last = entries[entries.length - 1] 75 | if (last) { 76 | this._lastLogIndex = last.i 77 | this._lastLogTerm = last.t 78 | } 79 | 80 | this._compact() 81 | } 82 | 83 | commit (index, done) { 84 | if (typeof index !== 'number') { 85 | throw new Error('index needs to be a number') 86 | } 87 | if (typeof done !== 'function') { 88 | throw new Error('done needs to be a function') 89 | } 90 | debug('%s: commit %d', this._node.id, index) 91 | 92 | const entriesToApply = this.entriesFromTo(this._commitIndex + 1, index) 93 | 94 | if (!entriesToApply.length) { 95 | timers.setImmediate(done) 96 | return 97 | } 98 | const lastEntry = entriesToApply[entriesToApply.length - 1] 99 | debug('%s: lastEntry: %j', this._node.id, lastEntry) 100 | 101 | this._commitIndex = lastEntry.i 102 | this._node.applyEntries(entriesToApply.map(entry => entry.c), (err) => { 103 | if (err) { 104 | done(err) 105 | } else { 106 | debug('%s: done commiting index %d', this._node.id, lastEntry.i) 107 | this._lastApplied = lastEntry.i 108 | this._lastAppliedTerm = lastEntry.t 109 | this._compact() 110 | done() 111 | } 112 | }) 113 | } 114 | 115 | lastIndexForTerm (term) { 116 | let entry 117 | if (this._lastLogTerm === term) { 118 | return this._lastLogIndex 119 | } 120 | for (let i = this._entries.length - 1; i >= 0; i--) { 121 | entry = this._entries[i] 122 | if (!entry) { 123 | return 124 | } 125 | if (entry.t === term) { 126 | return entry.i 127 | } 128 | } 129 | } 130 | 131 | entries () { 132 | return this._entries 133 | } 134 | 135 | entriesFrom (index) { 136 | const physicalIndex = this._physicalIndexFor(index) 137 | if (physicalIndex === -1) { 138 | return null 139 | } 140 | debug('physical index for %d is %d', index, physicalIndex) 141 | const entries = this._entries.slice(physicalIndex) 142 | if (entries.length) { 143 | assert.equal(entries[0].i, index) 144 | } 145 | debug('entries from %d are %j', index, entries) 146 | return entries.map(cleanupEntry) 147 | } 148 | 149 | lastAppliedEntry () { 150 | return this.atLogIndex(this._lastApplied) 151 | } 152 | 153 | entriesFromTo (from, to) { 154 | const pFrom = this._physicalIndexFor(from) 155 | const entries = this._entries.slice(pFrom, pFrom + to - from + 1) 156 | if (entries.length) { 157 | assert(entries[0].i === from, `expected first entry to be index ${from} and was ${entries[0].i}`) 158 | } 159 | return entries 160 | } 161 | 162 | _physicalIndexFor (index) { 163 | debug('physical index for %d', index) 164 | if (index < this._firstLogIndex) { 165 | debug('index %d is smaller tham first index %d', index, this._firstLogIndex) 166 | return -1 167 | } 168 | if (index === 0) { 169 | return 0 170 | } 171 | debug('_firstLogIndex is %d', this._firstLogIndex) 172 | let entry 173 | for (let i = this._entries.length - 1; i >= 0; i--) { 174 | entry = this._entries[i] 175 | if (entry.i === index) { 176 | return i 177 | } else if (entry.i < index) { 178 | return i + 1 179 | } 180 | } 181 | return 0 182 | } 183 | 184 | _compact () { 185 | if (this._entries.length > this._options.minLogRetention) { 186 | const maxPhysicalIndex = this._entries.length - this._options.minLogRetention 187 | const maxIndex = this._entries[maxPhysicalIndex].i 188 | let canRemove = maxPhysicalIndex 189 | if (maxIndex > this._lastApplied) { 190 | canRemove -= (maxIndex - this._lastApplied) 191 | } 192 | this._entries.splice(0, canRemove) 193 | } 194 | if (this._entries.length) { 195 | this._firstLogIndex = this._entries[0].i 196 | } 197 | } 198 | } 199 | 200 | function cleanupEntry (_entry) { 201 | const entry = Object.assign({}, _entry) 202 | if (entry.c) { 203 | entry.c = Object.assign({}, entry.c) 204 | if (entry.c.prefix) { 205 | delete entry.c.prefix 206 | } 207 | } 208 | return entry 209 | } 210 | 211 | module.exports = Log 212 | -------------------------------------------------------------------------------- /lib/network/active/errors.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | exports.OK_ERRORS = [ 4 | 'ECONNREFUSED' 5 | ] 6 | -------------------------------------------------------------------------------- /lib/network/active/index.js: -------------------------------------------------------------------------------- 1 | const Network = require('./network') 2 | 3 | module.exports = createNetwork 4 | 5 | function createNetwork (options) { 6 | return new Network(options) 7 | } 8 | -------------------------------------------------------------------------------- /lib/network/active/network.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.network.active') 4 | const Writable = require('stream').Writable 5 | 6 | const Peer = require('./peer') 7 | const NetworkNode = require('../network-node') 8 | 9 | const defaultOptions = { 10 | objectMode: true, 11 | highWaterMark: 50 12 | } 13 | 14 | class Network extends Writable { 15 | 16 | constructor (_options) { 17 | const options = Object.assign({}, _options || {}, defaultOptions) 18 | debug('creating network with options %j', options) 19 | super(options) 20 | this._peers = {} 21 | this._nodes = {} 22 | this._options = options 23 | this._ended = false 24 | 25 | this.once('finish', () => { 26 | debug('network finished') 27 | debug('ending peers') 28 | Object.keys(this._peers).forEach((address) => this._peers[address].end()) 29 | }) 30 | } 31 | 32 | node (address) { 33 | let node = this._nodes[address] 34 | if (!node) { 35 | node = this._nodes[address] = new NetworkNode(address, this, this._options) 36 | node.once('finish', () => { 37 | node.removeAllListeners() 38 | this.removeListener('warning', onWarning) 39 | this.removeListener('connect', onConnect) 40 | this.removeListener('disconnect', onDisconnect) 41 | delete this._nodes[address] 42 | }) 43 | this 44 | .on('warning', onWarning) 45 | .on('connect', onConnect) 46 | .on('disconnect', onDisconnect) 47 | } 48 | return node 49 | 50 | function onWarning (err, peer) { 51 | if (node.match(peer)) { 52 | node.emit('warning', err) 53 | } 54 | } 55 | 56 | function onConnect (peer) { 57 | node.emit('connect', peer) 58 | } 59 | 60 | function onDisconnect (peer) { 61 | node.emit('disconnect', peer) 62 | } 63 | } 64 | 65 | disconnect (address) { 66 | const peer = this._peers[address] 67 | if (peer) { 68 | peer.end() 69 | peer.removeAllListeners() 70 | delete this._peers[address] 71 | } 72 | } 73 | 74 | _write (message, _, callback) { 75 | debug('writing %j', message) 76 | if (!this._ended) { 77 | const peer = this._ensurePeer(message.to) 78 | peer.write(message, callback) 79 | } 80 | } 81 | 82 | end (buf) { 83 | this._ended = true 84 | return super.end(buf) 85 | } 86 | 87 | _ensurePeer (_address) { 88 | const address = _address.toString() 89 | debug('ensuring peer %s', address) 90 | let peer = this._peers[address] 91 | if (!peer) { 92 | peer = this._peers[address] = new Peer(address, this._options) 93 | peer 94 | .on('error', (err) => { 95 | this.emit('warning', err, address) 96 | }) 97 | .once('finish', () => { 98 | debug('peer %s closed', address) 99 | delete this._peers[address] 100 | }) 101 | .on('data', (message) => { 102 | debug('have message from peer: %j', message) 103 | this._deliver(message) 104 | }) 105 | .on('connect', () => { 106 | this.emit('connect', address) 107 | }) 108 | .on('disconnect', () => { 109 | this.emit('disconnect', address) 110 | }) 111 | .on('innactivity timeout', () => { 112 | this.disconnect(address) 113 | }) 114 | } 115 | 116 | return peer 117 | } 118 | 119 | _deliver (message) { 120 | if (message.to && (typeof message.to) !== 'string') { 121 | throw new Error(`message.to shouldnt be a ${typeof message.to}`) 122 | } 123 | if (message.from && (typeof message.from) !== 'string') { 124 | throw new Error(`message.from shouldnt be a ${typeof message.from}`) 125 | } 126 | Object.keys(this._nodes) 127 | .map(address => this._nodes[address]) 128 | .filter(node => node.match(message.to)) 129 | .forEach(node => node.push(message)) 130 | } 131 | 132 | peerStats (address) { 133 | const peer = this._peers[address] 134 | if (peer) { 135 | return peer.stats() 136 | } 137 | } 138 | 139 | } 140 | 141 | module.exports = Network 142 | -------------------------------------------------------------------------------- /lib/network/active/peer.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.network.peer') 4 | const Duplex = require('stream').Duplex 5 | const timers = require('timers') 6 | const Msgpack = require('msgpack5') 7 | 8 | const Address = require('../../address') 9 | const reconnect = require('./reconnect') 10 | const OK_ERRORS = require('./errors').OK_ERRORS 11 | 12 | const defaultOptions = { 13 | objectMode: true, 14 | highWaterMark: 50, 15 | innactivityTimeout: 5000 16 | } 17 | 18 | const interestingEvents = [ 19 | 'connect', 20 | 'reconnect', 21 | 'disconnect', 22 | 'error' 23 | ] 24 | 25 | const reconnectOptions = { 26 | immediate: true, 27 | maxDelay: 5000 28 | } 29 | 30 | class Peer extends Duplex { 31 | 32 | constructor (address, _options) { 33 | debug('constructing peer from address %j', address) 34 | const options = Object.assign({}, defaultOptions, _options) 35 | super(options) 36 | this._options = options 37 | this._address = Address(address) 38 | 39 | this._stats = { 40 | receivedMessageCount: 0, 41 | sentMessageCount: 0, 42 | lastReceived: 0, 43 | lastSent: 0 44 | } 45 | 46 | this.once('finish', this._finish.bind(this)) 47 | 48 | this._connect() 49 | } 50 | 51 | end (buf) { 52 | debug('peer end() called for peer %s', this._address) 53 | super.end(buf) 54 | } 55 | 56 | _connect () { 57 | debug('connecting to %s', this._address) 58 | const peer = this 59 | this._reconnect = reconnect(reconnectOptions, (peerRawConn) => { 60 | debug('connected to peer %s', this._address) 61 | let innactivityTimeout 62 | resetInnactivityTimeout() 63 | 64 | const msgpack = Msgpack() 65 | 66 | // to peer 67 | this._out = msgpack.encoder() 68 | 69 | this._out.pipe(peerRawConn) 70 | 71 | // from peer 72 | const fromPeer = msgpack.decoder() 73 | peerRawConn.pipe(fromPeer) 74 | 75 | fromPeer.on('data', (data) => { 76 | this._stats.lastReceived = Date.now() 77 | this._stats.receivedMessageCount ++ 78 | resetInnactivityTimeout() 79 | debug('some data from peer: %j', data) 80 | peer.push(data) 81 | }) 82 | 83 | peerRawConn.on('error', handlePeerError) 84 | fromPeer.on('error', handlePeerError) 85 | 86 | peerRawConn.on('close', () => { 87 | this._out = undefined 88 | timers.clearTimeout(innactivityTimeout) 89 | }) 90 | 91 | process.nextTick(() => peer.emit('connect')) 92 | 93 | function resetInnactivityTimeout () { 94 | if (innactivityTimeout) { 95 | timers.clearTimeout(innactivityTimeout) 96 | } 97 | innactivityTimeout = timers.setTimeout( 98 | onInnactivityTimeout, peer._options.innactivityTimeout) 99 | } 100 | 101 | function onInnactivityTimeout () { 102 | peer.emit('innactivity timeout') 103 | } 104 | }) 105 | .on('error', handlePeerError) 106 | .on('disconnect', () => { 107 | debug('disconnected from %s', this._address) 108 | this._out = undefined 109 | this.emit('disconnect') 110 | }) 111 | 112 | interestingEvents.forEach((event) => { 113 | this._reconnect.on(event, (payload) => { 114 | this.emit(event, payload) 115 | }) 116 | }) 117 | 118 | this._reconnect.connect(this._address) 119 | 120 | function handlePeerError (err) { 121 | if (OK_ERRORS.indexOf(err.code) === -1) { 122 | debug('relaying error:\n%s', err.stack) 123 | peer.emit('error', err) 124 | } 125 | } 126 | } 127 | 128 | _read (size) { 129 | // do nothing, we'll emit data when the peer emits data 130 | } 131 | 132 | _write (message, encoding, callback) { 133 | debug('writing %j to %s', message, this._address) 134 | 135 | if (!message) { 136 | return 137 | } 138 | if (message.to) { 139 | message.to = message.to.toString() 140 | } 141 | if (message.from) { 142 | message.from = message.from.toString() 143 | } 144 | 145 | if (this._out) { 146 | try { 147 | this._out.write(message, (err) => { 148 | if (err) { 149 | this.emit('warning', err) 150 | } 151 | // keep the juice flowing 152 | callback() 153 | }) 154 | this._stats.lastSent = Date.now() 155 | this._stats.sentMessageCount ++ 156 | } catch (err) { 157 | this.emit('warning', err, this._address.toString()) 158 | } 159 | } else { 160 | debug('have message, but not connected to peer %s', this._address) 161 | // if we're not connected we discard the message 162 | // and reply with error 163 | timers.setImmediate(() => { 164 | this.push({ 165 | type: 'reply', 166 | from: message.to, 167 | id: message.id, 168 | error: 'not connected', 169 | fake: true, 170 | params: { 171 | success: false, 172 | reason: 'not connected' 173 | } 174 | }) 175 | }) 176 | callback() 177 | } 178 | } 179 | 180 | _finish () { 181 | debug('finishing connection to peer %s', this._address) 182 | this._reconnect.disconnect() 183 | } 184 | 185 | stats () { 186 | return this._stats 187 | } 188 | 189 | } 190 | 191 | module.exports = Peer 192 | -------------------------------------------------------------------------------- /lib/network/active/reconnect.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.network.reconnect') 4 | const Reconnect = require('reconnect-core') 5 | const net = require('net') 6 | 7 | module.exports = Reconnect((maddr) => { 8 | const nodeAddr = maddr.nodeAddress() 9 | const addr = { 10 | port: nodeAddr.port, 11 | host: nodeAddr.address 12 | } 13 | debug('connecting to %j', addr) 14 | return net.connect(addr) 15 | }) 16 | -------------------------------------------------------------------------------- /lib/network/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const PassiveNetwork = require('./passive') 4 | const ActiveNetwork = require('./active') 5 | 6 | module.exports = createNetwork 7 | 8 | function createNetwork (options) { 9 | return { 10 | active: new ActiveNetwork(options.active), 11 | passive: new PassiveNetwork(options.passive) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /lib/network/network-node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.network.node') 4 | const Duplex = require('stream').Duplex 5 | 6 | class NetworkNode extends Duplex { 7 | 8 | constructor (address, out, options) { 9 | super(options) 10 | this._matchAddress = address.toString().split('/') 11 | this._out = out 12 | this._ended = false 13 | this._out.once('finish', () => { 14 | debug('out channel finished') 15 | this._ended = true 16 | }) 17 | } 18 | 19 | match (_address) { 20 | const address = _address && _address.toString() 21 | const parts = address && address.split('/') 22 | const matches = parts && this._matchAddress.every((part, index) => parts[index] === part) 23 | debug('match %j to own %j. matches: %j', parts, this._matchAddress, matches) 24 | return matches 25 | } 26 | 27 | _read () { 28 | // do nothing 29 | } 30 | 31 | _write (message, _, callback) { 32 | if (!this._ended) { 33 | try { 34 | this._out.write(message, () => { 35 | callback() 36 | // ignore the errors, keep stream alive 37 | }) 38 | } catch (err) { 39 | this.emit('warning', err) 40 | // fixme: catch write after end errors 41 | } 42 | } 43 | } 44 | } 45 | 46 | module.exports = NetworkNode 47 | -------------------------------------------------------------------------------- /lib/network/passive/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const Network = require('./network') 4 | 5 | module.exports = createNetwork 6 | 7 | function createNetwork (options) { 8 | return new Network(options) 9 | } 10 | -------------------------------------------------------------------------------- /lib/network/passive/network.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.network.passive') 4 | const Writable = require('stream').Writable 5 | const merge = require('deepmerge') 6 | 7 | const Server = require('./server') 8 | const NetworkNode = require('../network-node') 9 | 10 | const defaultOptions = { 11 | objectMode: true, 12 | server: { 13 | port: 9163, 14 | host: '0.0.0.0', 15 | exclusive: true 16 | } 17 | } 18 | 19 | class Network extends Writable { 20 | 21 | constructor (_options) { 22 | // TODO: merge options.server 23 | const options = merge(defaultOptions, _options || {}) 24 | debug('creating network with options %j', options) 25 | super(options) 26 | this._nodes = {} 27 | this._options = options 28 | this._listening = false 29 | this.once('finish', this._finish.bind(this)) 30 | this._listen() 31 | } 32 | 33 | node (address) { 34 | let node = this._nodes[address] 35 | if (!node) { 36 | node = this._nodes[address] = new NetworkNode(address, this, this._options) 37 | node.once('finish', () => delete this._nodes[address]) 38 | } 39 | 40 | return node 41 | } 42 | 43 | _listen () { 44 | debug('network listen()') 45 | this._server = new Server(this._options.server) 46 | this._server 47 | .once('listening', (options) => { 48 | this._listening = true 49 | this.emit('listening', options) 50 | }) 51 | .on('data', message => { 52 | debug('incoming message from server: %j', message) 53 | this._deliver(message) 54 | }) 55 | .on('warning', warn => this.emit('warning', warn)) 56 | .once('closed', () => { 57 | this.emit('closed') 58 | }) 59 | } 60 | 61 | listening () { 62 | return this._listening 63 | } 64 | 65 | _deliver (message) { 66 | Object.keys(this._nodes) 67 | .map(address => this._nodes[address]) 68 | .filter(node => node.match(message.to)) 69 | .forEach(node => node.push(message)) 70 | } 71 | 72 | _write (message, _, callback) { 73 | debug('writing %j', message) 74 | 75 | if (!message) { 76 | return callback() 77 | } 78 | 79 | if (message.to) { 80 | message.to = message.to.toString() 81 | } 82 | if (message.from) { 83 | message.from = message.from.toString() 84 | } 85 | this._server.write(message, callback) 86 | } 87 | 88 | _finish () { 89 | this._server.close() 90 | } 91 | 92 | } 93 | 94 | module.exports = Network 95 | -------------------------------------------------------------------------------- /lib/network/passive/server.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.network.passive.server') 4 | const net = require('net') 5 | const Duplex = require('stream').Duplex 6 | const Msgpack = require('msgpack5') 7 | const merge = require('deepmerge') 8 | 9 | const defaultOptions = { 10 | objectMode: true 11 | } 12 | 13 | class Server extends Duplex { 14 | 15 | constructor (_options) { 16 | const options = merge(_options, defaultOptions) 17 | debug('building server with options %j', options) 18 | super(options) 19 | this._options = options 20 | this._server = net.createServer(this._onConnection.bind(this)) 21 | this._server.once('close', () => { 22 | this.emit('closed') 23 | }) 24 | this._peers = {} 25 | this._listen() 26 | } 27 | 28 | close () { 29 | this._server.close() 30 | } 31 | 32 | _listen () { 33 | this._server.listen(this._options, () => { 34 | debug('server listening with options %j', this._options) 35 | this.emit('listening', this._options) 36 | }) 37 | } 38 | 39 | _read () { 40 | // do nothing 41 | } 42 | 43 | _write (message, _, callback) { 44 | debug('server trying to write %j', message) 45 | const peer = this._peers[message.to] 46 | if (peer) { 47 | debug('I have peer for message to %s', message.to) 48 | peer.write(message, callback) 49 | } else { 50 | debug('I have no peer to send to') 51 | callback() 52 | } 53 | } 54 | 55 | _onConnection (conn) { 56 | debug('new server connection') 57 | const server = this 58 | const msgpack = Msgpack() 59 | 60 | conn.once('finish', () => debug('connection ended')) 61 | 62 | const fromPeer = msgpack.decoder() 63 | conn 64 | .pipe(fromPeer) 65 | .on('error', onPeerError) 66 | 67 | const toPeer = msgpack.encoder() 68 | toPeer 69 | .pipe(conn) 70 | .on('error', onPeerError) 71 | 72 | fromPeer.on('data', this._onMessage.bind(this, conn, toPeer)) 73 | 74 | function onPeerError (err) { 75 | debug('peer error: %s', err.stack) 76 | server.emit('warning', err) 77 | } 78 | } 79 | 80 | _onMessage (conn, toPeer, message) { 81 | debug('incoming message: %j', message) 82 | const from = message.from 83 | if (from) { 84 | const peer = this._peers[from] 85 | if (!peer || peer !== toPeer) { 86 | debug('setting up peer %s', from) 87 | this._peers[from] = toPeer 88 | conn.once('finish', () => delete this._peers[from]) 89 | if (peer) { 90 | peer.end() 91 | } 92 | } else { 93 | debug('no need to setup new peer') 94 | } 95 | debug('pushing out message from %s', from) 96 | this.push(message) 97 | } else { 98 | debug('no .from in message') 99 | } 100 | } 101 | 102 | } 103 | 104 | module.exports = Server 105 | -------------------------------------------------------------------------------- /lib/node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.node') 4 | const Through = require('through2') 5 | const EventEmitter = require('events') 6 | const assert = require('assert') 7 | const timers = require('timers') 8 | 9 | const States = require('./states') 10 | const Log = require('./log') 11 | const RPC = require('./rpc') 12 | const Client = require('./client') 13 | const NotLeaderError = require('./utils/not-leader-error') 14 | 15 | const importantStateEvents = ['election timeout'] 16 | 17 | class Node extends EventEmitter { 18 | 19 | constructor (id, connections, dispatcher, db, peers, options) { 20 | super() 21 | this.id = id 22 | this._stopped = false 23 | this._connections = connections 24 | this._options = options 25 | this._dispatcher = dispatcher 26 | this._db = db 27 | this._getPeers = peers 28 | this.passive = this._outStream() 29 | this.active = this._outStream() 30 | this._replies = this._replyStream() 31 | 32 | this._stateName = undefined 33 | this._handlingRequest = false // to detect race conditions 34 | this._weakenedBefore = Date.now() 35 | 36 | this._leaving = [] 37 | 38 | // persisted state 39 | this._term = 0 40 | this._votedFor = null 41 | this._log = new Log( 42 | { 43 | id: this.id, 44 | applyEntries: this._applyEntries.bind(this), 45 | term: this._getTerm.bind(this) 46 | }, 47 | options) 48 | this._peers = options.peers.filter(address => address !== this.id.toString()) 49 | 50 | debug('id:', this.id.toString()) 51 | debug('peers:', this._peers) 52 | 53 | this._stateServices = { 54 | id, 55 | name: this._getStateName.bind(this), 56 | term: this._getTerm.bind(this), 57 | setTerm: this._setTerm.bind(this), 58 | transition: this._transition.bind(this), 59 | incrementTerm: this._incrementTerm.bind(this), 60 | getVotedFor: this._getVotedFor.bind(this), 61 | setVotedFor: this._setVotedFor.bind(this), 62 | log: this._log, 63 | db, 64 | untilNotWeakened: this._untilNotWeakened.bind(this) 65 | } 66 | 67 | this._rpc = RPC(this._stateServices, this.active, this._replies, this, this._options) 68 | 69 | this._client = new Client({ 70 | id, 71 | rpc: this._rpc.bind(this), 72 | leader: this._getLeader.bind(this), 73 | peers: this._getLocalPeerList.bind(this), 74 | command: this.command.bind(this) 75 | }, this._options) 76 | 77 | this._networkingServices = { 78 | id: this.id, 79 | rpc: this._rpc, 80 | reply: this._reply.bind(this), 81 | isMajority: this._isMajority.bind(this), 82 | peers: this._getLocalPeerList.bind(this), 83 | setPeers: this._setPeers.bind(this) 84 | } 85 | 86 | this._dbServices = { 87 | snapshot: this._getPersistableState.bind(this), 88 | logEntries: this.getLogEntries.bind(this), 89 | applyTopologyCommand: this._applyTopologyCommand.bind(this) 90 | } 91 | 92 | this._dispatch() 93 | } 94 | 95 | stop () { 96 | this._stopped = true 97 | if (this._state) { 98 | this._state.stop() 99 | } 100 | } 101 | 102 | is (state) { 103 | return this._stateName === state 104 | } 105 | 106 | // ------------- 107 | // Peers 108 | 109 | join (address, done) { 110 | if (this._peers.indexOf(address) >= 0) { 111 | process.nextTick(done) 112 | } else { 113 | this.command({type: 'join', peer: address}, {}, done) 114 | } 115 | } 116 | 117 | leave (address, done) { 118 | debug('%s: leave %s', this.id, address) 119 | if (address !== this.id.toString() && this._peers.indexOf(address) === -1) { 120 | process.nextTick(done) 121 | } else { 122 | this.command({type: 'leave', peer: address}, {}, done) 123 | } 124 | } 125 | 126 | peers (network, done) { 127 | if (this._stateName === 'leader') { 128 | if (network && network.active) { 129 | const peers = this._peers 130 | .map(peer => { 131 | return { id: peer } 132 | }) 133 | .filter(peer => peer.id !== this.id.toString()) 134 | .concat({ 135 | id: this.id.toString(), 136 | leader: true 137 | }) 138 | 139 | peers.forEach(peer => { 140 | peer.stats = network.active._out.peerStats(peer.id) 141 | if (peer.stats) { 142 | peer.stats.lastReceivedAgo = Date.now() - peer.stats.lastReceived 143 | peer.stats.lastSentAgo = Date.now() - peer.stats.lastSent 144 | delete peer.stats.lastReceived 145 | delete peer.stats.lastSent 146 | } 147 | peer.connected = this._connections.isConnectedTo(peer.id) 148 | }) 149 | done(null, peers) 150 | } else { 151 | done(null, {}) 152 | } 153 | } else { 154 | this._client.command('peers', {tries: 0}, done) 155 | } 156 | } 157 | 158 | _getLocalPeerList () { 159 | return this._peers.slice() 160 | } 161 | 162 | _setPeers (peers) { 163 | this._peers = peers.filter(p => p !== this.id.toString()) 164 | this._peers.forEach(peer => this._state.join(peer)) 165 | } 166 | 167 | _ensurePeer (address) { 168 | if ((this._peers.indexOf(address) < 0) && address !== this.id.toString()) { 169 | debug('%s is joining %s', this.id, address) 170 | this._peers.push(address) 171 | } 172 | } 173 | 174 | _isMajority (count) { 175 | const quorum = Math.floor((this._peers.length + 1) / 2) + 1 176 | const isMajority = count >= quorum 177 | debug('%s: is %d majority? %j', this.id, count, isMajority) 178 | if (!isMajority) { 179 | debug('%s: still need %d votes to reach majority', this.id, quorum - count) 180 | } 181 | return isMajority 182 | } 183 | 184 | // ------------- 185 | // Internal state 186 | 187 | _transition (state, force) { 188 | debug('%s: asked to transition to state %s', this.id, state) 189 | if (force || state !== this._stateName) { 190 | debug('node %s is transitioning to state %s', this.id, state) 191 | const oldState = this._state 192 | if (oldState) { 193 | oldState.stop() 194 | } 195 | 196 | const State = States(state) 197 | this._state = new State({ 198 | id: this.id.toString(), 199 | state: this._stateServices, 200 | network: this._networkingServices, 201 | log: this._log, 202 | command: this.command.bind(this), 203 | leader: this._getLeader.bind(this) 204 | }, this._options) 205 | 206 | importantStateEvents.forEach(event => { 207 | this._state.on(event, arg => this.emit(event, arg)) 208 | }) 209 | this._stateName = state 210 | this._state.start() 211 | 212 | this.emit('new state', state) 213 | this.emit(state) 214 | } 215 | } 216 | 217 | _getStateName () { 218 | return this._stateName 219 | } 220 | 221 | _incrementTerm () { 222 | this._votedFor = null 223 | const term = ++this._term 224 | return term 225 | } 226 | 227 | _getTerm () { 228 | return this._term 229 | } 230 | 231 | _setTerm (term) { 232 | if (typeof term !== 'number') { 233 | throw new Error('term needs to be a number and was %j', term) 234 | } 235 | this._votedFor = null 236 | this._term = term 237 | return this._term 238 | } 239 | 240 | _getVotedFor () { 241 | return this._votedFor 242 | } 243 | 244 | _setVotedFor (peer) { 245 | debug('%s: setting voted for to %s', this.id, peer) 246 | this._votedFor = peer 247 | } 248 | 249 | weaken (duration) { 250 | this._weakenedBefore = Date.now() + duration 251 | this._transition('weakened') 252 | } 253 | 254 | _untilNotWeakened (callback) { 255 | const now = Date.now() 256 | if (this._weakenedBefore > now) { 257 | timers.setTimeout(callback, this._weakenedBefore - now) 258 | } else { 259 | process.nextTick(callback) 260 | } 261 | } 262 | 263 | // ------------- 264 | // Networking 265 | 266 | _reply (to, messageId, params, callback) { 267 | debug('%s: replying to: %s, messageId: %s, params: %j', this.id, to, messageId, params) 268 | this.passive.write({ 269 | to: to, 270 | type: 'reply', 271 | from: this.id, 272 | id: messageId, 273 | params 274 | }, callback) 275 | } 276 | 277 | _dispatch () { 278 | debug('%s: _dispatch', this.id) 279 | 280 | if (this._stopped) { 281 | return 282 | } 283 | 284 | const message = this._dispatcher.next() 285 | if (!message) { 286 | this._dispatcher.once('readable', this._dispatch.bind(this)) 287 | } else { 288 | debug('%s: got message from dispatcher: %j', this.id, message) 289 | 290 | this.emit('message received') 291 | 292 | if (message.params) { 293 | if (message.params.term < this._term) { 294 | // discard message if term is greater than current term 295 | debug('%s: message discarded because term %d is smaller than my current term %d', 296 | this.id, message.params.term, this._term) 297 | return process.nextTick(this._dispatch.bind(this)) 298 | } 299 | 300 | if (message.params.leaderId) { 301 | this._leaderId = message.params.leaderId 302 | } 303 | 304 | debug('%s: current term: %d', this.id, this._term) 305 | 306 | if (message.params.term > this._term) { 307 | debug('%s is going to transition to state follower because of outdated term', this.id) 308 | this._setTerm(message.params.term) 309 | this._transition('follower') 310 | } 311 | } 312 | 313 | if (message.type === 'request') { 314 | debug('%s: request message from dispatcher: %j', this.id, message) 315 | this._handleRequest(message, this._dispatch.bind(this)) 316 | } else if (message.type === 'reply') { 317 | debug('%s: reply message from dispatcher: %j', this.id, message) 318 | this._handleReply(message, this._dispatch.bind(this)) 319 | } 320 | } 321 | } 322 | 323 | _handleRequest (message, done) { 324 | assert(!this._handlingRequest, 'race: already handling request') 325 | this.emit('rpc received', message.action) 326 | this._handlingRequest = true 327 | 328 | const from = message.from 329 | if (from) { 330 | debug('%s: handling message: %j', this.id, message) 331 | this._ensurePeer(from) 332 | this._state.handleRequest(message, err => { 333 | this.persist(persistError => { 334 | debug('%s: persisted', this.id) 335 | this._handlingRequest = false 336 | 337 | if (err) { 338 | done(err) 339 | } else { 340 | done(persistError) 341 | } 342 | }) 343 | }) 344 | } else { 345 | done() 346 | } 347 | } 348 | 349 | _handleReply (message, done) { 350 | debug('%s: handling reply %j', this.id, message) 351 | this._replies.write(message) 352 | done() 353 | } 354 | 355 | _outStream () { 356 | const self = this 357 | return Through.obj(transform) 358 | 359 | function transform (message, _, callback) { 360 | message.from = self.id.toString() 361 | this.push(message) 362 | callback() 363 | } 364 | } 365 | 366 | _replyStream () { 367 | const stream = Through.obj(transform) 368 | stream.setMaxListeners(Infinity) 369 | return stream 370 | 371 | function transform (message, _, callback) { 372 | this.push(message) 373 | callback() 374 | } 375 | } 376 | 377 | _getLeader () { 378 | return this._leaderId 379 | } 380 | 381 | // ------- 382 | // Commands 383 | 384 | command (command, options, done) { 385 | if (this._stateName !== 'leader') { 386 | if (!options.remote) { 387 | this._client.command(command, options, done) 388 | } else { 389 | done(new NotLeaderError(this._leaderId)) 390 | } 391 | } else { 392 | const consensuses = [this._peers.slice()] 393 | 394 | if (command === 'peers') { 395 | return this._getPeers(done) 396 | } 397 | 398 | // joint consensus 399 | if (command.type === 'join') { 400 | if (this._peers.indexOf(command.peer) < 0 && command.peer !== this.id.toString()) { 401 | this._peers.push(command.peer) 402 | } 403 | consensuses.push(this._peers.concat(command.peer)) 404 | } else if (command.type === 'leave') { 405 | consensuses.push(this._peers.filter(p => p !== command.peer)) 406 | } 407 | this._state.command(consensuses, command, options, (err, result) => { 408 | debug('command %s finished, err = %j, result = %j', command, err, result) 409 | if (err) { 410 | done(err) 411 | } else { 412 | this._db.command(this._dbServices, command, options, done) 413 | } 414 | }) 415 | } 416 | } 417 | 418 | readConsensus (done) { 419 | this.command({ type: 'read' }, { alsoWaitFor: this.id.toString() }, done) 420 | } 421 | 422 | waitFor (peer, done) { 423 | this.command({ type: 'read' }, { alsoWaitFor: peer }, done) 424 | } 425 | 426 | // ------- 427 | // Persistence 428 | 429 | _getPersistableState () { 430 | return { 431 | currentTerm: this._getTerm(), 432 | votedFor: this._votedFor, 433 | peers: this._peers 434 | } 435 | } 436 | 437 | getLogEntries () { 438 | return this._log.entries() 439 | } 440 | 441 | _applyEntries (entries, done) { 442 | this._db.applyEntries(entries, this._applyTopologyCommands.bind(this), done) 443 | } 444 | 445 | _applyTopologyCommands (commands) { 446 | debug('%s: _applyTopologyCommands %j', this.id, commands) 447 | commands.forEach(this._applyTopologyCommand.bind(this)) 448 | } 449 | 450 | _applyTopologyCommand (command) { 451 | debug('%s: applying topology command: %j', this.id, command) 452 | if (command.type === 'join') { 453 | if (command.peer !== this.id.toString()) { 454 | if (this._peers.indexOf(command.peer) === -1) { 455 | this._peers = this._peers.concat(command.peer) 456 | } 457 | this._state.join(command.peer) 458 | } 459 | this.emit('joined', command.peer) 460 | } else if (command.type === 'leave') { 461 | debug('%s: applying leave command: %j', this.id, command) 462 | if (this._leaving.indexOf(command.peer) < 0) { 463 | this._leaving.push(command.peer) 464 | timers.setTimeout(() => { 465 | this._segregatePeer(command.peer) 466 | this._leaving = this._leaving.filter(p => p !== command.peer) 467 | }, this._options.waitBeforeLeaveMS) 468 | } 469 | } 470 | } 471 | 472 | _segregatePeer (peer) { 473 | debug('%s: segregating peer', this.id, peer) 474 | this._peers = this._peers.filter(p => p !== peer) 475 | debug('%s: peers now are: %j', this.id, this._peers) 476 | this._state.leave(peer) 477 | if (this._network) { 478 | this._network.active.disconnect(peer) 479 | } 480 | this.emit('left', peer) 481 | debug('%s: emitted left for peer', this.id, peer) 482 | } 483 | 484 | persist (done) { 485 | debug('%s: persisting', this.id) 486 | this._db.persist(this._dbServices, done) 487 | } 488 | 489 | } 490 | 491 | module.exports = Node 492 | -------------------------------------------------------------------------------- /lib/peer-leader.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.peer-leader') 4 | const timers = require('timers') 5 | const EventEmitter = require('events') 6 | 7 | const BatchTransformStream = require('./utils/batch-transform-stream') 8 | 9 | class PeerLeader extends EventEmitter { 10 | 11 | constructor (address, node, options) { 12 | if (typeof address !== 'string') { 13 | throw new Error('need address to be a string') 14 | } 15 | 16 | super() 17 | 18 | this._address = address 19 | this._node = node 20 | this._options = options 21 | this._nextIndex = this._node.log._lastLogIndex + 1 22 | this._matchIndex = 0 23 | this._needsIndex = 0 24 | this._installingSnapshot = false 25 | this._lastSent = 0 26 | this._stopped = false 27 | 28 | this._appendEntries() 29 | } 30 | 31 | stop () { 32 | this._stopped = true 33 | this._clearAppendEntriesTimeout() 34 | } 35 | 36 | needsIndex (index) { 37 | if (index > this._needsIndex) { 38 | this._needsIndex = index 39 | } 40 | if (this._needsMore()) { 41 | timers.setImmediate(this._appendEntries.bind(this)) 42 | } 43 | } 44 | 45 | _needsMore () { 46 | return this._nextIndex <= this._needsIndex 47 | } 48 | 49 | _appendEntries () { 50 | debug('sending AppendEntries to %s', this._address) 51 | 52 | if (this._stopped) { 53 | return 54 | } 55 | 56 | if (this._installingSnapshot) { 57 | this._resetAppendEntriesTimeout() 58 | return 59 | } 60 | 61 | const log = this._node.log 62 | const currentTerm = this._node.state.term() 63 | 64 | const entries = this._entries() 65 | if (entries) { 66 | debug('%s: entries for %s are: %j', this._node.state.id, this._address, entries) 67 | 68 | const previousEntry = this._previousEntry() 69 | const lastEntry = entries[entries.length - 1] 70 | const leaderCommit = log._commitIndex 71 | 72 | const appendEntriesArgs = { 73 | term: currentTerm, 74 | leaderId: this._node.state.id.toString(), 75 | prevLogIndex: previousEntry && previousEntry.i || 0, 76 | prevLogTerm: previousEntry && previousEntry.t || 0, 77 | entries, 78 | leaderCommit 79 | } 80 | 81 | this._lastSent = Date.now() 82 | 83 | this._resetAppendEntriesTimeout() 84 | 85 | this._node.network.rpc( 86 | { 87 | to: this._address, 88 | action: 'AppendEntries', 89 | params: appendEntriesArgs 90 | }, 91 | (err, reply) => { // callback 92 | debug('%s: got reply to AppendEntries from %s: %j', this._node.state.id, this._address, reply) 93 | if (err) { 94 | debug('%s: error on AppendEntries reply:\n%s', this._node.state.id, err.stack) 95 | } else if (reply && reply.params) { 96 | if (reply.params.success) { 97 | this._matchIndex = leaderCommit 98 | if (lastEntry) { 99 | this._nextIndex = lastEntry.i + 1 100 | } 101 | const commitedEntry = lastEntry || previousEntry 102 | const commitedIndex = commitedEntry && commitedEntry.i || 0 103 | this.emit('committed', this, commitedIndex) 104 | } else { 105 | debug('%s: reply next log index is %d', this._node.state.id, reply.params.nextLogIndex) 106 | if (reply.params.nextLogIndex !== undefined) { 107 | this._nextIndex = reply.params.nextLogIndex 108 | } else if (!reply.fake) { 109 | this._nextIndex -- 110 | } 111 | } 112 | 113 | if (!reply.fake && this._needsMore()) { 114 | timers.setImmediate(this._appendEntries.bind(this)) 115 | } 116 | } 117 | } 118 | ) 119 | } else { 120 | // no log entries for peer that's lagging behind 121 | debug('%s: peer %s is lagging behind (next index is %d), going to install snapshot', 122 | this._node.state.id, this._address, this._nextIndex) 123 | 124 | this._resetAppendEntriesTimeout() 125 | return this._installSnapshot() 126 | } 127 | } 128 | 129 | _clearAppendEntriesTimeout () { 130 | if (this._appendEntriesTimeout) { 131 | timers.clearTimeout(this._appendEntriesTimeout) 132 | } 133 | this._appendEntriesTimeout = null 134 | } 135 | 136 | _setAppendEntriesTimeout () { 137 | debug('%s: setting the append entries timeout to %d ms', 138 | this._node.state.id, this._options.appendEntriesIntervalMS) 139 | 140 | this._appendEntriesTimeout = timers.setTimeout( 141 | this._onAppendEntriesTimeout.bind(this), 142 | this._options.appendEntriesIntervalMS) 143 | } 144 | 145 | _resetAppendEntriesTimeout () { 146 | this._clearAppendEntriesTimeout() 147 | this._setAppendEntriesTimeout() 148 | } 149 | 150 | _onAppendEntriesTimeout () { 151 | debug('%s: AppendEntries timedout', this._node.state.id) 152 | this._appendEntries() 153 | } 154 | 155 | _entries () { 156 | debug('follower %s next index is %d', this._address, this._nextIndex) 157 | let entries = this._node.log.entriesFrom(this._nextIndex) 158 | if (entries) { 159 | entries = entries.slice(0, this._options.batchEntriesLimit) 160 | } 161 | return entries 162 | } 163 | 164 | _previousEntry () { 165 | return this._node.log.atLogIndex(this._nextIndex - 1) 166 | } 167 | 168 | // Install snapshot 169 | 170 | _installSnapshot () { 171 | debug('%s: _installSnapshot on %s', this._node.state.id, this._address) 172 | 173 | if (this._stopped) { 174 | return 175 | } 176 | 177 | const self = this 178 | const log = this._node.state.log 179 | const peers = this._node.network.peers() 180 | .concat(this._node.id.toString()) 181 | .filter(p => p !== this._address.toString()) 182 | 183 | this._clearAppendEntriesTimeout() 184 | let finished = false 185 | let offset = 0 186 | 187 | this._installingSnapshot = true 188 | 189 | const lastIncludedIndex = log._lastApplied 190 | const lastIncludedTerm = log._lastAppliedTerm 191 | 192 | const rs = this._node.state.db.state.createReadStream() 193 | const stream = rs.pipe( 194 | new BatchTransformStream({ 195 | batchSize: this._options.installSnapshotChunkSize 196 | }) 197 | ) 198 | 199 | stream.on('data', installSnapshot) 200 | 201 | function installSnapshot (data) { 202 | debug('%s: have chunks %j, finished = %j', self._node.state.id, data.chunks, data.finished) 203 | debug('%s: installSnapshot on leader: have chunks %j, finished = %j', self._node.state.id, data.chunks, data.finished) 204 | stream.pause() 205 | 206 | const installSnapshotArgs = { 207 | term: self._node.state.term(), 208 | leaderId: self._node.id.toString(), 209 | lastIncludedIndex, 210 | lastIncludedTerm, 211 | offset, 212 | peers, 213 | data: data.chunks, 214 | done: data.finished 215 | } 216 | 217 | offset += data.chunks.length 218 | 219 | self._node.network.rpc( 220 | { 221 | to: self._address, 222 | action: 'InstallSnapshot', 223 | params: installSnapshotArgs 224 | }, 225 | (err, reply) => { // callback 226 | debug('%s: got InstallSnapshot reply', self._node.state.id, err, reply) 227 | if (err) { 228 | cleanup() 229 | } else { 230 | if (data.finished) { 231 | debug('%s: data finished, setting next index of %j to %d', 232 | self._node.state.id, self._address, lastIncludedIndex) 233 | self._matchIndex = lastIncludedIndex 234 | self._nextIndex = lastIncludedIndex + 1 235 | cleanup() 236 | this.emit('committed', self, lastIncludedIndex) 237 | } else { 238 | debug('resuming stream...') 239 | stream.resume() 240 | } 241 | } 242 | } 243 | ) 244 | 245 | debug('%s: sent InstallSnapshot', self._node.state.id) 246 | } 247 | 248 | function cleanup () { 249 | if (!finished) { 250 | finished = true 251 | self._installingSnapshot = false 252 | self._resetAppendEntriesTimeout() 253 | rs.destroy() 254 | } 255 | } 256 | } 257 | 258 | state () { 259 | return { 260 | address: this._address, 261 | stopped: this._stopped, 262 | nextIndex: this._nextIndex, 263 | matchIndex: this._matchIndex, 264 | installingSnapshot: this._installingSnapshot, 265 | sentAppendEntriesAgoMS: Date.now() - this._lastSent 266 | } 267 | } 268 | } 269 | 270 | module.exports = PeerLeader 271 | -------------------------------------------------------------------------------- /lib/rpc.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.rpc') 4 | const once = require('once') 5 | const uuid = require('uuid').v4 6 | const timers = require('timers') 7 | 8 | module.exports = function createRPC (node, network, replies, emitter, defaults) { 9 | return function rpc (options, callback) { 10 | debug('%s: rpc to: %s, action: %s, params: %j', node.id, options.to, options.action, options.params) 11 | const term = node.term() 12 | const done = once(callback) 13 | const id = uuid() 14 | 15 | const timeoutMS = options.timeout || defaults.rpcTimeoutMS 16 | const timeout = timers.setTimeout(onTimeout, timeoutMS) 17 | const started = Date.now() 18 | 19 | network.write({ 20 | from: node.id.toString(), 21 | id, 22 | type: 'request', 23 | to: options.to, 24 | action: options.action, 25 | params: options.params 26 | }, err => { 27 | if (err) { 28 | cancel() 29 | done(err) 30 | } else { 31 | emitter.emit('message sent') 32 | emitter.emit('rpc sent', options.action) 33 | } 34 | }) 35 | replies.on('data', onReplyData) 36 | 37 | function onReplyData (message) { 38 | if (!message.fake) { 39 | timers.setImmediate(() => emitter.emit('rpc latency', Date.now() - started)) 40 | } 41 | 42 | const accept = ( 43 | message.type === 'reply' && 44 | message.from === options.to && 45 | message.id === id) 46 | 47 | if (node.term() > term) { 48 | onOutdatedTerm() 49 | } else if (accept) { 50 | debug('%s: this is a reply I was expecting: %j', node.id, message) 51 | cancel() 52 | const error = message.error 53 | done(error, !error && message) 54 | } 55 | } 56 | 57 | function onTimeout () { 58 | debug('RPC timeout') 59 | cancel() 60 | done(Object.assign(new Error(`timeout RPC to ${options.to}, action = ${options.action}`), { code: 'ETIMEOUT' })) 61 | } 62 | 63 | function onOutdatedTerm () { 64 | debug('Outdated term') 65 | cancel() 66 | done(Object.assign(new Error('outdated term'), { code: 'EOUTDATEDTERM' })) 67 | } 68 | 69 | function cancel () { 70 | replies.removeListener('data', onReplyData) 71 | timers.clearTimeout(timeout) 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /lib/states/base.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.states.base') 4 | const timers = require('timers') 5 | const EventEmitter = require('events') 6 | const async = require('async') 7 | 8 | class Base extends EventEmitter { 9 | 10 | constructor (node, options) { 11 | super() 12 | this.id = node.id 13 | this._node = node 14 | this._options = options 15 | this._stopped = true 16 | } 17 | 18 | start () { 19 | this._stopped = false 20 | this._resetElectionTimeout() 21 | } 22 | 23 | stop () { 24 | this._stopped = true 25 | this._clearElectionTimeout() 26 | } 27 | 28 | _clearElectionTimeout () { 29 | if (this._electionTimeout) { 30 | timers.clearTimeout(this._electionTimeout) 31 | } 32 | this._electionTimeout = null 33 | } 34 | 35 | _setElectionTimeout () { 36 | if (this._stopped) { 37 | return 38 | } 39 | 40 | if (this._options.electionTimeout) { 41 | this._electionTimeout = timers.setTimeout( 42 | this._onElectionTimeout.bind(this), 43 | this._randomElectionTimeout()) 44 | } 45 | } 46 | 47 | _resetElectionTimeout () { 48 | debug('%s: resetting election timeout', this.id) 49 | this._clearElectionTimeout() 50 | this._setElectionTimeout() 51 | } 52 | 53 | _onElectionTimeout () { 54 | debug('%s: election timeout', this.id) 55 | this.emit('election timeout') 56 | this._electionTimeout = undefined 57 | this._node.state.transition('candidate', true) 58 | } 59 | 60 | _randomElectionTimeout () { 61 | const min = this._options.electionTimeoutMinMS 62 | const max = this._options.electionTimeoutMaxMS 63 | return min + Math.floor(Math.random() * (max - min)) 64 | } 65 | 66 | handleRequest (message, done) { 67 | debug('%s: handling request %j', this.id, message) 68 | 69 | switch (message.action) { 70 | 71 | case 'AppendEntries': 72 | this._appendEntriesReceived(message, done) 73 | break 74 | 75 | case 'RequestVote': 76 | this._requestVoteReceived(message) 77 | done() 78 | break 79 | 80 | case 'InstallSnapshot': 81 | this._installSnapshotReceived(message, done) 82 | break 83 | 84 | case 'Command': 85 | this._handleCommand(message, done) 86 | break 87 | 88 | default: 89 | if (this._handleRequest) { 90 | this._handleRequest(message, done) 91 | } else { 92 | debug('%s: not handling message %j', this.id, message) 93 | done() 94 | } 95 | } 96 | } 97 | 98 | _handleCommand (message, done) { 99 | done() 100 | debug('handling command %j', message) 101 | this._node.command(message.params.command, message.params.options, (err, result) => { 102 | const currentTerm = this._node.state.term() 103 | if (err) { 104 | this._node.network.reply( 105 | message.from, 106 | message.id, 107 | { 108 | replyTo: 'Command', 109 | term: currentTerm, 110 | error: { 111 | message: err.message, 112 | code: err.code, 113 | leader: this._node.leader() 114 | } 115 | }) 116 | } else { 117 | this._node.network.reply( 118 | message.from, 119 | message.id, 120 | { 121 | replyTo: 'Command', 122 | term: currentTerm, 123 | result 124 | }) 125 | } 126 | }) 127 | } 128 | 129 | _requestVoteReceived (message, done) { 130 | debug('%s: request vote received: %j', this.id, message) 131 | 132 | const voteGranted = this._perhapsGrantVote(message) 133 | 134 | if (voteGranted) { 135 | debug('vote granted') 136 | this._node.state.setVotedFor(message.from) 137 | this._resetElectionTimeout() 138 | this._node.state.transition('follower', true) 139 | } 140 | 141 | this._node.network.reply( 142 | message.from, 143 | message.id, 144 | { 145 | term: this._node.state.term(), 146 | voteGranted 147 | }, done) 148 | } 149 | 150 | _perhapsGrantVote (message) { 151 | debug('%s: perhaps grant vote to %j', this.id, message) 152 | const currentTerm = this._node.state.term() 153 | debug('%s: current term is: %d', this.id, currentTerm) 154 | const votedFor = this._node.state.getVotedFor() 155 | const termIsAcceptable = (message.params.term >= currentTerm) 156 | const votedForIsAcceptable = (currentTerm < message.params.term) || !votedFor || (votedFor === message.from) 157 | const logIndexIsAcceptable = (message.params.lastLogIndex >= this._node.log._lastLogIndex) 158 | 159 | const voteGranted = termIsAcceptable && votedForIsAcceptable && logIndexIsAcceptable 160 | 161 | if (!voteGranted) { 162 | debug('%s: vote was not granted because: %j', this.id, { 163 | termIsAcceptable, votedForIsAcceptable, logIndexIsAcceptable 164 | }) 165 | } 166 | 167 | return voteGranted 168 | } 169 | 170 | _appendEntriesReceived (message, done) { 171 | const self = this 172 | const log = this._node.log 173 | const params = message.params || {} 174 | 175 | let success = false 176 | let entry 177 | let reason 178 | let prevLogMatches = false 179 | let commitIndex = this._node.log._commitIndex 180 | const currentTerm = this._node.state.term() 181 | const termIsAcceptable = (params.term >= currentTerm) 182 | if (!termIsAcceptable) { 183 | reason = 'term is not acceptable' 184 | debug('term is not acceptable') 185 | } 186 | 187 | if (termIsAcceptable) { 188 | this._resetElectionTimeout() 189 | debug('%s: term is acceptable', this.id) 190 | entry = log.atLogIndex(params.prevLogIndex) 191 | debug('%s: entry at previous log index: %j', this.id, entry) 192 | prevLogMatches = 193 | (!params.prevLogIndex) || 194 | (!entry && (log._lastLogIndex === params.prevLogIndex && log._lastLogTerm === params.prevLogTerm)) || 195 | (entry && entry.t === params.prevLogTerm && entry.i === params.prevLogIndex) 196 | 197 | debug('%s: previous log matches: %j', this.id, prevLogMatches) 198 | if (!prevLogMatches) { 199 | reason = `prev log term or index does not match: ${ 200 | entry 201 | ? `prevLogIndex was ${entry.i} and prevLogTerm was ${entry.t}` 202 | : `no existing last entry. last log index is ${log._lastLogIndex} and last log term is ${log._lastLogTerm}` 203 | }` 204 | 205 | debug( 206 | '%s: %s', 207 | this.id, 208 | reason) 209 | debug('%s: last log index: %j, last log term: %j', this.id, log._lastLogIndex, log._lastLogTerm) 210 | } else { 211 | success = true 212 | const newEntries = message.params.entries 213 | log.appendAfter(message.params.prevLogIndex || 0, newEntries) 214 | const leaderCommit = message.params.leaderCommit 215 | if (leaderCommit > commitIndex) { 216 | commitIndex = leaderCommit 217 | } 218 | } 219 | } 220 | 221 | debug('%s: AppendEntries success? %j', this.id, success) 222 | 223 | if (success && commitIndex > 0) { 224 | this._node.log.commit(commitIndex, (err) => { 225 | if (err) { 226 | success = false 227 | reason = err.message 228 | } 229 | reply() 230 | }) 231 | } else { 232 | reply() 233 | } 234 | 235 | function reply () { 236 | let nextLogIndex = 0 237 | if (!success && entry) { 238 | nextLogIndex = log.lastIndexForTerm(entry.t) 239 | } else if (success) { 240 | nextLogIndex = log._lastLogIndex + 1 241 | } 242 | self._node.network.reply( 243 | message.from, 244 | message.id, 245 | { 246 | replyTo: 'AppendEntries', 247 | term: currentTerm, 248 | nextLogIndex, 249 | success, 250 | reason 251 | }, done) 252 | 253 | debug('AppendEntries replied with success = %j to %s', success, message.from) 254 | 255 | if (termIsAcceptable) { 256 | self._resetElectionTimeout() 257 | } 258 | 259 | if (success) { 260 | self._node.state.transition('follower') 261 | } 262 | } 263 | } 264 | 265 | _installSnapshotReceived (message, done) { 266 | debug('%s: _installSnapshotReceived %j', this.id, message) 267 | 268 | this._resetElectionTimeout() 269 | 270 | const self = this 271 | const tasks = [] 272 | const db = this._node.state.db.state 273 | 274 | if (message.params.offset === 0) { 275 | tasks.push(db.clear.bind(db)) 276 | } 277 | 278 | if (message.params.done) { 279 | const log = this._node.state.log 280 | log._lastLogIndex = message.params.lastIncludedIndex 281 | log._commitIndex = message.params.lastIncludedIndex 282 | log._lastApplied = message.params.lastIncludedIndex 283 | log._lastLogTerm = message.params.lastIncludedTerm 284 | log._lastAppliedTerm = message.params.lastIncludedTerm 285 | if (message.params.peers) { 286 | this._node.network.setPeers(message.params.peers) 287 | } 288 | } 289 | 290 | tasks.push(insertData) 291 | tasks.push(reply) 292 | 293 | async.series(tasks, done) 294 | 295 | function insertData (cb) { 296 | const data = message.params.data 297 | if (!data || !data.length) { 298 | cb() 299 | } else { 300 | db.batch(data, cb) 301 | } 302 | } 303 | 304 | function reply (cb) { 305 | self._node.network.reply( 306 | message.from, 307 | message.id, 308 | { 309 | replyTo: 'InstallSnapshot', 310 | term: self._node.state.term() 311 | }, 312 | cb) 313 | 314 | self._resetElectionTimeout() 315 | } 316 | } 317 | 318 | join () {} 319 | leave () {} 320 | 321 | } 322 | 323 | module.exports = Base 324 | -------------------------------------------------------------------------------- /lib/states/candidate.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.states.candidate') 4 | const Base = require('./base') 5 | 6 | class Candidate extends Base { 7 | 8 | start () { 9 | debug('%s is candidate', this.id) 10 | this.name = 'candidate' 11 | super.start() 12 | this._stopped = false 13 | this._node.state.incrementTerm() 14 | // vote for self 15 | this._node.state.setVotedFor(this.id) 16 | process.nextTick(this._gatherVotes.bind(this)) 17 | } 18 | 19 | stop () { 20 | super.stop() 21 | this._stopped = true 22 | } 23 | 24 | _gatherVotes () { 25 | debug('gathering votes...') 26 | const self = this 27 | let majorityReached = false 28 | let votedForMe = 1 29 | let voteCount = 1 30 | 31 | maybeDone() 32 | 33 | this._node.network.peers().forEach(peer => { 34 | debug('candidate requesting vote from %s', peer) 35 | const requestVoteArgs = { 36 | term: this._node.state.term(), 37 | candidateId: this.id, 38 | lastLogIndex: this._node.log._lastLogIndex, 39 | lastLogTerm: this._node.log._lastLogTerm 40 | } 41 | 42 | this._node.network.rpc( 43 | { 44 | to: peer, 45 | action: 'RequestVote', 46 | params: requestVoteArgs 47 | }, 48 | // eslint-disable-next-line handle-callback-err 49 | (err, reply) => { // callback 50 | voteCount++ 51 | if ((!this._stopped) && reply && reply.params.voteGranted) { 52 | votedForMe++ 53 | maybeDone() 54 | } 55 | } 56 | ) 57 | }) 58 | 59 | function maybeDone () { 60 | debug('maybeDone()') 61 | if (!majorityReached) { 62 | if (self._node.network.isMajority(votedForMe)) { 63 | // won 64 | majorityReached = true 65 | debug('%s: election won', self.id) 66 | self._node.state.transition('leader') 67 | } else { 68 | debug('still don\'t have majority') 69 | } 70 | if (self._node.network.isMajority(voteCount - votedForMe)) { 71 | // lost 72 | debug('%s: election lost', self.id) 73 | majorityReached = true 74 | self._resetElectionTimeout() 75 | } 76 | } 77 | } 78 | } 79 | } 80 | 81 | module.exports = Candidate 82 | -------------------------------------------------------------------------------- /lib/states/follower.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.states.follower') 4 | const Base = require('./base') 5 | 6 | class Follower extends Base { 7 | 8 | start () { 9 | debug('%s is follower', this.id) 10 | this.name = 'follower' 11 | super.start() 12 | } 13 | 14 | } 15 | 16 | module.exports = Follower 17 | -------------------------------------------------------------------------------- /lib/states/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const stateModules = { 4 | follower: require('./follower'), 5 | candidate: require('./candidate'), 6 | leader: require('./leader'), 7 | weakened: require('./weakened') 8 | } 9 | 10 | function findState (stateName) { 11 | const State = stateModules[stateName] 12 | if (!State) { 13 | throw new Error('state not found: ' + stateName) 14 | } 15 | return State 16 | } 17 | 18 | module.exports = findState 19 | -------------------------------------------------------------------------------- /lib/states/leader.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.states.leader') 4 | const async = require('async') 5 | const timers = require('timers') 6 | const once = require('once') 7 | 8 | const Base = require('./base') 9 | const PeerLeader = require('../peer-leader') 10 | 11 | class Leader extends Base { 12 | 13 | constructor (node, _options) { 14 | const options = Object.assign({}, _options || {}, { electionTimeout: false }) 15 | super(node, options) 16 | this.name = 'leader' 17 | } 18 | 19 | start () { 20 | debug('%s is leader', this.id) 21 | this._followers = this._node.network.peers().reduce((followers, address) => { 22 | followers[address] = new PeerLeader(address, this._node, this._options) 23 | return followers 24 | }, {}) 25 | super.start() 26 | this._waitForConsensus(this._node.state.log._commitIndex, {}, this._node.network.peers()) 27 | } 28 | 29 | stop () { 30 | Object.keys(this._followers) 31 | .map(address => this._followers[address]) 32 | .forEach(follower => { 33 | follower.stop() 34 | follower.removeAllListeners() 35 | }) 36 | 37 | super.stop() 38 | } 39 | 40 | join (address) { 41 | const follower = this._followers[address] 42 | if (!follower) { 43 | this._followers[address] = new PeerLeader(address, this._node, this._options) 44 | } 45 | } 46 | 47 | leave (address) { 48 | const follower = this._followers[address] 49 | if (follower) { 50 | follower.stop() 51 | delete this._followers[address] 52 | } 53 | } 54 | 55 | peers () { 56 | return Object.keys(this._followers) 57 | .map(addr => this._followers[addr]) 58 | .map(peer => peer.state()) 59 | } 60 | 61 | command (consensuses, command, options, done) { 62 | const index = this._node.log.push(command) 63 | 64 | process.nextTick(() => { 65 | async.eachSeries(consensuses, this._waitForConsensus.bind(this, index, options), (err) => { 66 | if (err) { 67 | done(err) 68 | } else { 69 | this._node.state.log.commit(index, done) 70 | } 71 | }) 72 | }) 73 | } 74 | 75 | _waitForConsensus (waitingForIndex, options, consensus, _done) { 76 | debug('_waitForConsensus %d', waitingForIndex) 77 | const done = once(_done || noop) 78 | 79 | // vote for self 80 | let votes = 1 81 | 82 | if (!consensus.length) { 83 | return done() 84 | } 85 | 86 | let waitingFor = options.alsoWaitFor 87 | if (!Array.isArray(waitingFor)) { 88 | waitingFor = [waitingFor] 89 | } 90 | waitingFor = waitingFor.filter(address => address && address !== this.id) 91 | 92 | // TODO: consider using another options as timeout value (waitForConsensusTimeout?) 93 | const timeout = timers.setTimeout(onTimeout, this._options.rpcTimeoutMS) 94 | const peers = consensus.map(address => { 95 | let follower = this._followers[address] 96 | if (!follower) { 97 | follower = this._followers[address] = new PeerLeader(address, this._node, this._options) 98 | } 99 | return follower 100 | }) 101 | 102 | peers.forEach(peer => { 103 | peer.on('committed', onPeerCommit) 104 | peer.needsIndex(waitingForIndex) 105 | }) 106 | 107 | function onPeerCommit (peer, peerIndex) { 108 | if (peerIndex >= waitingForIndex) { 109 | votes++ 110 | peer.removeListener('committed', onPeerCommit) 111 | waitingFor = waitingFor.filter(addr => peer._address !== peer._address) 112 | } 113 | if (isMajority(consensus, votes) && !waitingFor.length) { 114 | debug('have consensus for index %d', waitingForIndex) 115 | cleanup() 116 | done() 117 | } 118 | } 119 | 120 | function onTimeout () { 121 | cleanup() 122 | const err = new Error('timedout waiting for consensus') 123 | err.code = 'ETIMEOUT' 124 | done(err) 125 | } 126 | 127 | function cleanup () { 128 | timers.clearTimeout(timeout) 129 | peers.forEach(peer => { 130 | peer.removeListener('committed', onPeerCommit) 131 | }) 132 | } 133 | } 134 | 135 | _onElectionTimeout () { 136 | // do nothing, we're the leader 137 | } 138 | } 139 | 140 | module.exports = Leader 141 | 142 | function noop () {} 143 | 144 | function isMajority (consensus, count) { 145 | const quorum = Math.floor((consensus.length + 1) / 2) + 1 146 | return consensus.length && count >= quorum 147 | } 148 | -------------------------------------------------------------------------------- /lib/states/weakened.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.states.weakened') 4 | const Base = require('./base') 5 | 6 | class Follower extends Base { 7 | 8 | start () { 9 | debug('%s is weakened', this.id) 10 | this.name = 'weakened' 11 | this._stopped = false 12 | super.start() 13 | this._node.state.untilNotWeakened(this._noLongerWeakened.bind(this)) 14 | } 15 | 16 | stop () { 17 | super.stop() 18 | this._stopped = true 19 | } 20 | 21 | _noLongerWeakened () { 22 | if (!this._stopped) { 23 | this._node.state.transition('follower') 24 | } 25 | } 26 | 27 | _onElectionTimeout () { 28 | // do nothing 29 | } 30 | 31 | } 32 | 33 | module.exports = Follower 34 | -------------------------------------------------------------------------------- /lib/utils/batch-transform-stream.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const Transform = require('stream').Transform 4 | 5 | const defaultOptions = { 6 | batchSize: 10, 7 | objectMode: true 8 | } 9 | 10 | class BatchTransformStream extends Transform { 11 | 12 | constructor (_options) { 13 | const options = Object.assign({}, defaultOptions, _options || {}) 14 | super(options) 15 | this._options = options 16 | this._chunks = [] 17 | this._finished = false 18 | } 19 | 20 | _transform (chunk, enc, callback) { 21 | this._chunks.push(chunk) 22 | process.nextTick(() => { 23 | this._maybePush() 24 | callback() 25 | }) 26 | } 27 | 28 | _maybePush () { 29 | if (this._chunks.length >= this._options.batchSize) { 30 | this._definitelyPush() 31 | } 32 | } 33 | 34 | _definitelyPush () { 35 | const chunks = this._chunks 36 | this._chunks = [] 37 | this.push({ 38 | finished: this._finished, 39 | chunks 40 | }) 41 | } 42 | 43 | _flush () { 44 | this._finished = true 45 | this._definitelyPush() 46 | } 47 | } 48 | 49 | module.exports = BatchTransformStream 50 | -------------------------------------------------------------------------------- /lib/utils/clear-db.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const async = require('async') 4 | const once = require('once') 5 | 6 | function clearDB (_cb) { 7 | const queue = async.queue(this.del.bind(this)) 8 | const cb = once(_cb) 9 | queue.drain = cb 10 | 11 | let hadData = false 12 | 13 | this.createKeyStream() 14 | .on('data', (key) => { 15 | hadData = true 16 | queue.push(key) 17 | }) 18 | .once('end', () => { 19 | if (!hadData) { 20 | cb() 21 | } 22 | }) 23 | .once('error', cb) 24 | } 25 | 26 | module.exports = clearDB 27 | -------------------------------------------------------------------------------- /lib/utils/not-leader-error.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | class NotLeaderError extends Error { 4 | constructor (leader) { 5 | super('not the leader') 6 | this.code = 'ENOTLEADER' 7 | this.leader = leader 8 | } 9 | } 10 | 11 | module.exports = NotLeaderError 12 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "skiff", 3 | "version": "1.10.0", 4 | "description": "Raft for Node.js", 5 | "main": "skiff.js", 6 | "scripts": { 7 | "test": "npm run quick-tests", 8 | "quick-tests": "lab -vl test/active-network.js && lab -vl test/passive-network.js && lab -vl test/incoming-dispatcher.js && lab -vl test/election.js && lab -vl test/log-replication.js && lab -vl test/log-replication-catchup.js && lab -vl test/log-compaction.js && lab -vl test/leveldown.js && lab -vl test/levelup.js && lab -vl test/persistence.js && lab -vl test/remote-commands.js && lab -vl test/weakening.js", 9 | "resilience-tests": "npm run resilience-tests-memory && npm run resilience-tests-disk", 10 | "resilience-tests-memory": "lab test/resilience/resilience-chaos-memory.js && lab test/resilience/resilience-order-memory.js", 11 | "resilience-tests-disk": "lab test/resilience/resilience-order-disk.js && lab test/resilience/resilience-chaos-disk.js", 12 | "test-some": "lab -vl test/active-network.js test/election.js", 13 | "test-coverage": "node --harmony node_modules/istanbul/lib/cli.js cover -- lab -vl && istanbul check-coverage", 14 | "style": "eslint skiff.js lib" 15 | }, 16 | "repository": { 17 | "type": "git", 18 | "url": "git+https://github.com/pgte/skiff.git" 19 | }, 20 | "keywords": [ 21 | "raft", 22 | "distributed", 23 | "consensus", 24 | "leveldb" 25 | ], 26 | "author": "pgte", 27 | "license": "MIT", 28 | "bugs": { 29 | "url": "https://github.com/pgte/skiff/issues" 30 | }, 31 | "homepage": "https://github.com/pgte/skiff#readme", 32 | "devDependencies": { 33 | "code": "^3.0.1", 34 | "eslint": "^3.1.1", 35 | "eslint-config-standard": "^5.3.5", 36 | "eslint-plugin-promise": "^2.0.0", 37 | "eslint-plugin-standard": "^2.0.0", 38 | "istanbul": "^0.4.4", 39 | "lab": "^10.9.0", 40 | "left-pad": "^1.1.1", 41 | "memdown": "^1.2.0", 42 | "mkdirp": "^0.5.1", 43 | "pre-commit": "^1.1.3", 44 | "rimraf": "^2.5.4", 45 | "split": "^1.0.0", 46 | "wreck": "^10.0.0" 47 | }, 48 | "pre-commit": [ 49 | "style", 50 | "test" 51 | ], 52 | "dependencies": { 53 | "abstract-leveldown": "^2.6.0", 54 | "async": "^2.0.0", 55 | "concat-stream": "^1.5.1", 56 | "debug": "^2.2.0", 57 | "deepmerge": "^0.2.10", 58 | "level-sublevel": "^6.5.4", 59 | "leveldown": "^1.4.6", 60 | "levelup": "^1.3.2", 61 | "msgpack5": "^3.4.0", 62 | "multiaddr": "^2.0.2", 63 | "once": "^1.3.3", 64 | "reconnect-core": "^1.3.0", 65 | "through2": "^2.0.1", 66 | "uuid": "^2.0.2" 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /skiff-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yldio/skiff/5174b70e493731e56e3fd9de0a7209f7929e85a6/skiff-logo.png -------------------------------------------------------------------------------- /skiff.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('skiff.node') 4 | const merge = require('deepmerge') 5 | const EventEmitter = require('events') 6 | const async = require('async') 7 | const Levelup = require('levelup') 8 | 9 | const Address = require('./lib/address') 10 | const Network = require('./lib/network') 11 | const IncomingDispatcher = require('./lib/incoming-dispatcher') 12 | const Node = require('./lib/node') 13 | const CommandQueue = require('./lib/command-queue') 14 | const Commands = require('./lib/commands') 15 | const DB = require('./lib/db') 16 | const Leveldown = require('./lib/leveldown') 17 | const Iterator = require('./lib/iterator') 18 | const defaultOptions = require('./lib/default-options') 19 | 20 | const importantStateEvents = [ 21 | 'warning', 22 | 'new state', 23 | 'election timeout', 24 | 'leader', 25 | 'rpc latency', 26 | 'joined', 27 | 'left' 28 | ] 29 | 30 | class Shell extends EventEmitter { 31 | 32 | constructor (id, _options) { 33 | super() 34 | this.id = Address(id) 35 | this._options = merge(defaultOptions, _options || {}) 36 | debug('creating node %s with peers %j', id, this._options.peers) 37 | this._ownsNetwork = false 38 | 39 | this._db = new DB(this._options.location, this.id, this._options.db, this._options.levelup) 40 | 41 | this._dispatcher = new IncomingDispatcher({id}) 42 | 43 | const connections = { 44 | isConnectedTo: (addr) => this._connections.indexOf(addr) >= 0 45 | } 46 | // connections 47 | this._connections = this._options.peers.filter(addr => addr !== id) 48 | 49 | this.on('connect', peer => { 50 | if (this._connections.indexOf(peer) < 0) { 51 | this._connections.push(peer) 52 | } 53 | }) 54 | 55 | this.on('disconnect', peer => { 56 | this._connections = this._connections.filter(c => c !== peer) 57 | }) 58 | 59 | this._node = new Node( 60 | this.id, 61 | connections, 62 | this._dispatcher, 63 | this._db, 64 | this.peers.bind(this), 65 | this._options) 66 | 67 | // propagate important events 68 | importantStateEvents.forEach(event => this._node.on(event, this.emit.bind(this, event))) 69 | 70 | this._commandQueue = new CommandQueue() 71 | this._commands = new Commands(this.id, this._commandQueue, this._node) 72 | 73 | this._startState = 'stopped' 74 | 75 | // stats 76 | this._stats = { 77 | messagesReceived: 0, 78 | messagesSent: 0, 79 | rpcSent: 0, 80 | rpcReceived: 0, 81 | rpcReceivedByType: { 82 | 'AppendEntries': 0, 83 | 'RequestVote': 0, 84 | 'InstallSnapshot': 0 85 | }, 86 | rpcSentByType: { 87 | 'AppendEntries': 0, 88 | 'RequestVote': 0, 89 | 'InstallSnapshot': 0 90 | } 91 | } 92 | this._node.on('message received', () => { 93 | this._stats.messagesReceived ++ 94 | }) 95 | this._node.on('message sent', () => { 96 | this._stats.messagesSent ++ 97 | }) 98 | this._node.on('rpc sent', (type) => { 99 | this._stats.rpcSent ++ 100 | this._stats.rpcSentByType[type] ++ 101 | }) 102 | this._node.on('rpc received', (type) => { 103 | this._stats.rpcReceived ++ 104 | this._stats.rpcReceivedByType[type] ++ 105 | }) 106 | } 107 | 108 | // ------ Start and stop 109 | 110 | start (cb) { 111 | debug('%s: start state is %s', this.id, this._startState) 112 | if (this._startState === 'stopped') { 113 | this._startState = 'starting' 114 | debug('starting node %s', this.id) 115 | async.parallel( 116 | [ 117 | this._startNetwork.bind(this), 118 | this._loadPersistedState.bind(this) 119 | ], 120 | err => { 121 | debug('%s: done starting', this.id) 122 | if (err) { 123 | this._startState = 'stopped' 124 | } else { 125 | this._startState = 'started' 126 | this.emit('started') 127 | } 128 | this._node._transition('follower') 129 | cb(err) 130 | }) 131 | } else if (this._startState === 'started') { 132 | process.nextTick(cb) 133 | } else if (this._startState === 'starting') { 134 | this.once('started', cb) 135 | } 136 | } 137 | 138 | _startNetwork (cb) { 139 | const network = this._getNetworkConstructors() 140 | 141 | this._network = { 142 | passive: network.passive.node(this.id), 143 | active: network.active.node(this.id) 144 | } 145 | 146 | this._network.passive.pipe(this._dispatcher, { end: false }) 147 | this._network.active.pipe(this._dispatcher, { end: false }) 148 | 149 | this._node.passive.pipe(this._network.passive, { end: false }) 150 | this._node.active.pipe(this._network.active, { end: false }) 151 | 152 | this._network.active.on('connect', peer => { 153 | this.emit('connect', peer) 154 | }) 155 | this._network.active.on('disconnect', peer => { 156 | this.emit('disconnect', peer) 157 | }) 158 | 159 | if (cb) { 160 | if (network.passive.listening()) { 161 | process.nextTick(cb) 162 | } else { 163 | network.passive.once('listening', () => { 164 | cb() // do not carry event args into callback 165 | }) 166 | } 167 | } 168 | } 169 | 170 | _getNetworkConstructors () { 171 | const address = this.id.nodeAddress() 172 | let constructors = this._options.network 173 | if (!constructors) { 174 | this._ownsNetwork = constructors = Network({ 175 | passive: { 176 | server: merge( 177 | { 178 | port: address.port, 179 | host: address.address 180 | }, 181 | this._options.server 182 | ) 183 | } 184 | }) 185 | } 186 | 187 | return constructors 188 | } 189 | 190 | _loadPersistedState (cb) { 191 | this._db.load((err, results) => { 192 | if (err) { 193 | cb(err) 194 | } else { 195 | this._node._log.setEntries(results.log) 196 | if (results.meta.currentTerm) { 197 | this._node._setTerm(results.meta.currentTerm) 198 | } 199 | if (results.meta.votedFor) { 200 | this._node._setVotedFor(results.meta.votedFor) 201 | } 202 | if (results.meta.peers) { 203 | this._node._peers = results.meta.peers 204 | } 205 | cb() 206 | } 207 | }) 208 | } 209 | 210 | stop (cb) { 211 | if (this._network) { 212 | if (cb) { 213 | if (this._ownsNetwork) { 214 | this._ownsNetwork.passive.once('closed', cb) 215 | } else { 216 | process.nextTick(cb) 217 | } 218 | } 219 | if (this._ownsNetwork) { 220 | this._ownsNetwork.passive.end() 221 | this._ownsNetwork.active.end() 222 | this._ownsNetwork = undefined 223 | } 224 | this._network = undefined 225 | } else if (cb) { 226 | process.nextTick(cb) 227 | } 228 | 229 | this._node.stop() 230 | } 231 | 232 | // ------ Topology 233 | 234 | join (address, done) { 235 | debug('%s: joining %s', this.id, address) 236 | this.start(err => { 237 | if (err) { 238 | done(err) 239 | } else { 240 | this._node.join(address, done) 241 | } 242 | }) 243 | } 244 | 245 | leave (address, done) { 246 | debug('%s: leaving %s', this.id, address) 247 | this.start(err => { 248 | if (err) { 249 | done(err) 250 | } else { 251 | this._node.leave(address, done) 252 | } 253 | }) 254 | } 255 | 256 | // ------ Commands 257 | 258 | command (command, options, callback) { 259 | if (typeof options === 'function') { 260 | callback = options 261 | options = {} 262 | } 263 | if (this.is('leader')) { 264 | this._commandQueue.write({command, options, callback}) 265 | } else { 266 | // bypass the queue if we're not the leader 267 | this._node.command(command, options, callback) 268 | } 269 | } 270 | 271 | readConsensus (callback) { 272 | this._node.readConsensus(callback) 273 | } 274 | 275 | // ------- State 276 | 277 | is (state) { 278 | return this._node.is(state) 279 | } 280 | 281 | weaken (duration) { 282 | this._node.weaken(duration) 283 | } 284 | 285 | // -------- Level* 286 | 287 | leveldown () { 288 | return new Leveldown(this) 289 | } 290 | 291 | levelup (options) { 292 | return Levelup(this.id, Object.assign({}, { 293 | db: this.leveldown.bind(this), 294 | valueEncoding: 'json' 295 | }, options)) 296 | } 297 | 298 | iterator (options) { 299 | return new Iterator(this, this._db.state, options) 300 | } 301 | 302 | // -------- Stats 303 | 304 | stats () { 305 | return this._stats 306 | } 307 | 308 | connections () { 309 | return this._connections 310 | } 311 | 312 | peers (done) { 313 | this._node.peers(this._network, done) 314 | } 315 | 316 | term () { 317 | return this._node._getTerm() 318 | } 319 | 320 | logEntries () { 321 | return this._node.getLogEntries() 322 | } 323 | } 324 | 325 | createNodeShell.createNetwork = function createNetwork (options) { 326 | return Network(options) 327 | } 328 | 329 | module.exports = createNodeShell 330 | 331 | function createNodeShell (id, options) { 332 | return new Shell(id, options) 333 | } 334 | -------------------------------------------------------------------------------- /test/active-network.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const it = lab.it 7 | const expect = require('code').expect 8 | 9 | const async = require('async') 10 | const Address = require('../lib/address') 11 | const net = require('net') 12 | const timers = require('timers') 13 | const Msgpack = require('msgpack5') 14 | 15 | const Network = require('../lib/network/active') 16 | 17 | const serverAddresses = [ 18 | '/ip4/127.0.0.1/tcp/8080/what/ever', 19 | '/ip4/127.0.0.1/tcp/8081/what/ever', 20 | '/ip4/127.0.0.1/tcp/8082/what/ever' 21 | ] 22 | 23 | const A_BIT = 500 24 | 25 | describe('active network', () => { 26 | let network, servers 27 | const serverData = serverAddresses.map(() => []) 28 | const serverConns = serverAddresses.map(() => undefined) 29 | const serverHandlers = serverAddresses.map((server, index) => { 30 | return function (conn) { 31 | const msgpack = Msgpack() 32 | conn.pipe(msgpack.decoder()).on('data', onServerData) 33 | 34 | const reply = msgpack.encoder() 35 | reply.pipe(conn) 36 | 37 | function onServerData (data) { 38 | serverData[index].push(data) 39 | const message = Object.assign({}, data, { isReply: true }) 40 | reply.write(message) 41 | } 42 | } 43 | }) 44 | 45 | before(done => { 46 | let listening = 0 47 | let lindex = -1 48 | 49 | async.mapSeries(serverAddresses, (addr, cb) => { 50 | const index = ++lindex 51 | const maddr = Address(addr) 52 | const server = net.createServer(onServerConnection) 53 | const listenAddr = maddr.nodeAddress() 54 | server.listen({port: listenAddr.port, host: listenAddr.address}, () => { 55 | cb(null, server) 56 | }) 57 | 58 | function onServerConnection (conn) { 59 | serverConns[index] = conn 60 | serverHandlers[index](conn) 61 | conn.once('finish', () => { serverConns[index] = undefined }) 62 | } 63 | 64 | }, (err, _servers) => { 65 | if (err) { 66 | done(err) 67 | } else { 68 | servers = _servers 69 | done() 70 | } 71 | }) 72 | }) 73 | 74 | it('can be created', done => { 75 | network = Network() 76 | done() 77 | }) 78 | 79 | it('can be used to send a message to a peer', done => { 80 | const node = network.node(serverAddresses[0]) 81 | node.once('data', message => { 82 | expect(message).to.equal({to: serverAddresses[0], what: 'hey', isReply: true}) 83 | done() 84 | }) 85 | node.write({to: serverAddresses[0], what: 'hey'}) 86 | }) 87 | 88 | it('peer gets the message', done => { 89 | expect(serverData[0].length).to.equal(1) 90 | expect(serverData[0].shift()).to.equal({to: serverAddresses[0], what: 'hey'}) 91 | done() 92 | }) 93 | 94 | it('allows message to unconnected peer', done => { 95 | network.write({to: '/ip4/127.0.0.1/tcp/8083', what: 'hey'}, done) 96 | }) 97 | 98 | it('waits a bit', done => { 99 | timers.setTimeout(done, A_BIT) 100 | }) 101 | 102 | it('allows peer to disconnect', done => { 103 | serverConns[0].destroy() 104 | done() 105 | }) 106 | 107 | it('sending a message while trying to reconnect will fail silently', done => { 108 | network.write({to: serverAddresses[0], what: 'should not have reached you'}, done) 109 | }) 110 | 111 | it('can still send data to another peer', done => { 112 | const node = network.node(serverAddresses[1]) 113 | node.once('data', message => { 114 | expect(message).to.equal({to: serverAddresses[1], what: 'hey you', isReply: true}) 115 | done() 116 | }) 117 | network.write({to: serverAddresses[1], what: 'hey you'}) 118 | }) 119 | 120 | it('waits a bit', done => { 121 | timers.setTimeout(done, A_BIT) 122 | }) 123 | 124 | it('can still send data to another peer 2', done => { 125 | const node = network.node(serverAddresses[2]) 126 | node.once('data', message => { 127 | expect(message).to.equal({to: serverAddresses[2], what: 'hey you dude', isReply: true}) 128 | done() 129 | }) 130 | node.write({to: serverAddresses[2], what: 'hey you dude'}) 131 | }) 132 | 133 | it('peer gets the message', done => { 134 | expect(serverData[1]).to.equal([{to: serverAddresses[1], what: 'hey you'}]) 135 | done() 136 | }) 137 | 138 | it('can send data to reconnected peer', done => { 139 | const node = network.node(serverAddresses[0]) 140 | node.once('data', message => { 141 | expect(message).to.equal({to: serverAddresses[0], what: 'hey you\'re back!', isReply: true}) 142 | done() 143 | }) 144 | node.write({to: serverAddresses[0], what: 'hey you\'re back!'}) 145 | }) 146 | 147 | it('reconnected peer gets the message', done => { 148 | expect(serverData[0].length).to.equal(1) 149 | expect(serverData[0].shift()).to.equal({to: serverAddresses[0], what: 'hey you\'re back!'}) 150 | done() 151 | }) 152 | 153 | it('can remove existing peer', done => { 154 | network.disconnect('/ip4/127.0.0.1/tcp/8083') 155 | done() 156 | }) 157 | 158 | it('can remove non-existing peer', done => { 159 | network.disconnect('/ip4/127.0.0.1/tcp/8084') 160 | done() 161 | }) 162 | 163 | it('waits a bit', done => { 164 | timers.setTimeout(done, A_BIT) 165 | }) 166 | 167 | it('catches errors', done => { 168 | serverHandlers[2] = function (conn) { 169 | const msgpack = Msgpack() 170 | conn.pipe(msgpack.decoder()).on('data', onServerData) 171 | 172 | function onServerData (data) { 173 | expect(data).to.equal({to: serverAddresses[2], what: 'yo'}) 174 | // reply garbage 175 | conn.end(new Buffer([0xc1])) 176 | done() 177 | } 178 | } 179 | 180 | // make it reconnect 181 | serverConns[2].destroy() 182 | 183 | setTimeout(() => network.write({to: serverAddresses[2], what: 'yo'}), A_BIT) 184 | }) 185 | 186 | it('waits a bit', done => { 187 | timers.setTimeout(done, A_BIT) 188 | }) 189 | 190 | it('can get closed', done => { 191 | let closed = 0 192 | servers.forEach(server => server.close(onceClosed)) 193 | network.end() 194 | 195 | function onceClosed () { 196 | if (++closed === servers.length) { 197 | done() 198 | } 199 | } 200 | }) 201 | }) 202 | -------------------------------------------------------------------------------- /test/election.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const memdown = require('memdown') 12 | 13 | const Node = require('../') 14 | 15 | const A_BIT = 4000 16 | 17 | describe('election', () => { 18 | let nodes, followers, leader 19 | 20 | const nodeAddresses = [ 21 | '/ip4/127.0.0.1/tcp/9090', 22 | '/ip4/127.0.0.1/tcp/9091', 23 | '/ip4/127.0.0.1/tcp/9092' 24 | ] 25 | 26 | before(done => { 27 | nodes = nodeAddresses.map(address => Node(address, { 28 | db: memdown, 29 | peers: nodeAddresses.filter(addr => addr !== address) 30 | })) 31 | done() 32 | }) 33 | 34 | before(done => { 35 | async.each(nodes, (node, cb) => node.start(cb), done) 36 | }) 37 | 38 | after(done => { 39 | async.each(nodes, (node, cb) => node.stop(cb), done) 40 | }) 41 | 42 | it('waits a bit', {timeout: 5000}, done => setTimeout(done, A_BIT)) 43 | 44 | it('one of the nodes gets elected', done => { 45 | leader = nodes.find(node => node.is('leader')) 46 | followers = nodes.filter(node => node.is('follower')) 47 | expect(followers.length).to.equal(2) 48 | expect(leader).to.not.be.undefined() 49 | expect(followers.indexOf(leader)).to.equal(-1) 50 | done() 51 | }) 52 | 53 | }) 54 | -------------------------------------------------------------------------------- /test/incoming-dispatcher.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const it = lab.it 6 | const expect = require('code').expect 7 | 8 | const Dispatcher = require('../lib/incoming-dispatcher') 9 | 10 | describe('incoming dispatcher', () => { 11 | let dispatcher 12 | 13 | it('can be created', done => { 14 | dispatcher = new Dispatcher({ maxPending: 10 }) 15 | done() 16 | }) 17 | 18 | it('can ask for next', done => { 19 | expect(dispatcher.next()).to.be.undefined() 20 | done() 21 | }) 22 | 23 | it('accepts new objects', done => { 24 | for (var i = 0; i < 10; i++) { 25 | dispatcher.write(i) 26 | } 27 | done() 28 | }) 29 | 30 | it('should keep inserted objects', done => { 31 | let i 32 | let prev = -1 33 | 34 | while (i = dispatcher.next()) { 35 | expect(i).to.equal(prev + 1) 36 | prev = i 37 | } 38 | done() 39 | }) 40 | 41 | it('should cap', done => { 42 | for (var i = 0; i < 20; i++) { 43 | dispatcher.write(i) 44 | } 45 | 46 | let prev = 9 47 | 48 | while (i = dispatcher.next()) { 49 | expect(i).to.equal(prev + 1) 50 | prev = i 51 | } 52 | 53 | done() 54 | }) 55 | 56 | it('should emit when inserting', done => { 57 | dispatcher.once('readable', () => { 58 | expect(dispatcher.next()).to.equal('a') 59 | done() 60 | }) 61 | dispatcher.write('a') 62 | }) 63 | }) 64 | -------------------------------------------------------------------------------- /test/leveldown.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | 13 | const Node = require('../') 14 | 15 | const A_BIT = 4000 16 | 17 | describe('leveldown', () => { 18 | let nodes, follower, leader, leveldown 19 | const nodeAddresses = [ 20 | '/ip4/127.0.0.1/tcp/9390', 21 | '/ip4/127.0.0.1/tcp/9391', 22 | '/ip4/127.0.0.1/tcp/9392' 23 | ] 24 | 25 | before(done => { 26 | nodes = nodeAddresses.map((address, index) => 27 | Node(address, { 28 | db: Memdown, 29 | peers: nodeAddresses.filter(addr => addr !== address) 30 | })) 31 | done() 32 | }) 33 | 34 | before(done => { 35 | async.each(nodes, (node, cb) => node.start(cb), done) 36 | }) 37 | 38 | after(done => { 39 | async.each(nodes, (node, cb) => node.stop(cb), done) 40 | }) 41 | 42 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 43 | 44 | before(done => { 45 | leader = nodes.find(node => node.is('leader')) 46 | follower = nodes.find(node => node.is('follower')) 47 | expect(follower).to.not.be.undefined() 48 | expect(leader).to.not.be.undefined() 49 | expect(leader === follower).to.not.be.true() 50 | done() 51 | }) 52 | 53 | it ('can be created', done => { 54 | leveldown = leader.leveldown() 55 | done() 56 | }) 57 | 58 | it ('can set bunch of keys', done => { 59 | async.each( 60 | ['a', 'b', 'c'], 61 | (key, cb) => { 62 | leveldown.put(`key ${key}`, `value ${key}`, cb) 63 | }, 64 | done) 65 | }) 66 | 67 | it ('can get a key', done => { 68 | async.each(['a', 'b', 'c'], (key, cb) => { 69 | leveldown.get(`key ${key}`, (err, values) => { 70 | expect(err).to.be.null() 71 | expect(values).to.equal(`value ${key}`) 72 | cb() 73 | }) 74 | }, done) 75 | }) 76 | 77 | it('key is there', done => { 78 | leveldown.get('key c', done) 79 | }) 80 | 81 | it('can del a key', done => { 82 | leveldown.del('key c', done) 83 | }) 84 | 85 | it('deleted key is no longer found', done => { 86 | leveldown.get('key c', err => { 87 | expect(err.message).to.equal('Key not found in database') 88 | done() 89 | }) 90 | }) 91 | 92 | it('accepts batch commands', done => { 93 | const batch = [ 94 | {type: 'put', key: 'key d', value: 'value d'}, 95 | {type: 'put', key: 'key e', value: 'value e'}, 96 | {type: 'del', key: 'key b'}, 97 | ] 98 | leveldown.batch(batch, done) 99 | }) 100 | 101 | it('batch puts were effective', done => { 102 | async.map(['key d', 'key e'], leveldown.get.bind(leveldown), 103 | (err, results) => { 104 | expect(err).to.be.null() 105 | expect(results).to.equal(['value d', 'value e']) 106 | done() 107 | }) 108 | }) 109 | 110 | it('batch dels were effective', done => { 111 | leveldown.get('key b', err => { 112 | expect(err.message).to.equal('Key not found in database') 113 | done() 114 | }) 115 | }) 116 | 117 | describe('iterator', () => { 118 | let iterator 119 | 120 | it('can be created', done => { 121 | iterator = leveldown.iterator({ 122 | keyAsBuffer: false, 123 | valueAsBuffer: false 124 | }) 125 | done() 126 | }) 127 | 128 | it('can iterate through all the keys', done => { 129 | let stopped = false 130 | const expecteds = [ 131 | {key: 'key a', value: 'value a'}, 132 | {key: 'key d', value: 'value d'}, 133 | {key: 'key e', value: 'value e'} 134 | ] 135 | async.whilst( 136 | () => !stopped, 137 | (cb) => { 138 | iterator.next((err, key, value) => { 139 | if (!err && !key) { 140 | stopped = true 141 | return cb() 142 | } 143 | expect(err).to.be.null() 144 | expect({key, value}).to.equal(expecteds.shift()) 145 | cb(err) 146 | }) 147 | }, 148 | (err) => { 149 | expect(err).to.be.null() 150 | expect(expecteds.length).to.equal(0) 151 | done() 152 | } 153 | ) 154 | }) 155 | }) 156 | }) 157 | -------------------------------------------------------------------------------- /test/levelup.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | 13 | const Node = require('../') 14 | 15 | const A_BIT = 4000 16 | 17 | describe('levelup', () => { 18 | let nodes, follower, leader, levelup 19 | const nodeAddresses = [ 20 | '/ip4/127.0.0.1/tcp/9390', 21 | '/ip4/127.0.0.1/tcp/9391', 22 | '/ip4/127.0.0.1/tcp/9392' 23 | ] 24 | 25 | before(done => { 26 | nodes = nodeAddresses.map((address, index) => 27 | Node(address, { 28 | db: Memdown, 29 | peers: nodeAddresses.filter(addr => addr !== address) 30 | })) 31 | done() 32 | }) 33 | 34 | before(done => { 35 | async.each(nodes, (node, cb) => node.start(cb), done) 36 | }) 37 | 38 | after(done => { 39 | async.each(nodes, (node, cb) => node.stop(cb), done) 40 | }) 41 | 42 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 43 | 44 | before(done => { 45 | leader = nodes.find(node => node.is('leader')) 46 | follower = nodes.find(node => node.is('follower')) 47 | expect(follower).to.not.be.undefined() 48 | expect(leader).to.not.be.undefined() 49 | expect(leader === follower).to.not.be.true() 50 | done() 51 | }) 52 | 53 | it ('can be created', done => { 54 | levelup = leader.levelup() 55 | done() 56 | }) 57 | 58 | it ('can set bunch of keys', done => { 59 | async.each( 60 | ['a', 'b', 'c'], 61 | (key, cb) => { 62 | levelup.put(`key ${key}`, `value ${key}`, cb) 63 | }, 64 | done) 65 | }) 66 | 67 | it ('can get a key', done => { 68 | async.each(['a', 'b', 'c'], (key, cb) => { 69 | levelup.get(`key ${key}`, (err, values) => { 70 | expect(err).to.be.null() 71 | expect(values).to.equal(`value ${key}`) 72 | cb() 73 | }) 74 | }, done) 75 | }) 76 | 77 | it('key is there', done => { 78 | levelup.get('key c', done) 79 | }) 80 | 81 | it('can del a key', done => { 82 | levelup.del('key c', done) 83 | }) 84 | 85 | it('deleted key is no longer found', done => { 86 | levelup.get('key c', err => { 87 | expect(err.message).to.equal('Key not found in database [key c]') 88 | done() 89 | }) 90 | }) 91 | 92 | it('accepts batch commands', done => { 93 | const batch = [ 94 | {type: 'put', key: 'key d', value: 'value d'}, 95 | {type: 'put', key: 'key e', value: 'value e'}, 96 | {type: 'del', key: 'key b'}, 97 | ] 98 | levelup.batch(batch, done) 99 | }) 100 | 101 | it('batch puts were effective', done => { 102 | async.map(['key d', 'key e'], levelup.get.bind(levelup), 103 | (err, results) => { 104 | expect(err).to.be.null() 105 | expect(results).to.equal(['value d', 'value e']) 106 | done() 107 | }) 108 | }) 109 | 110 | it('batch dels were effective', done => { 111 | levelup.get('key b', err => { 112 | expect(err.message).to.equal('Key not found in database [key b]') 113 | done() 114 | }) 115 | }) 116 | 117 | describe('read stream', () => { 118 | let rs 119 | 120 | it('can be created', done => { 121 | rs = levelup.createReadStream() 122 | done() 123 | }) 124 | 125 | it('can iterate through all the keys', done => { 126 | let stopped = false 127 | const expecteds = [ 128 | {key: 'key a', value: 'value a'}, 129 | {key: 'key d', value: 'value d'}, 130 | {key: 'key e', value: 'value e'} 131 | ] 132 | 133 | rs.on('data', (data) => { 134 | expect(data).to.equal(expecteds.shift()) 135 | if (expecteds.length === 0) { 136 | done() 137 | } 138 | }) 139 | }) 140 | }) 141 | }) 142 | -------------------------------------------------------------------------------- /test/log-compaction.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | const leftPad = require('left-pad') 13 | 14 | const Node = require('../') 15 | 16 | const A_BIT = 4000 17 | 18 | describe('log compaction', () => { 19 | let nodes, follower, leader, leveldown 20 | const nodeAddresses = [ 21 | '/ip4/127.0.0.1/tcp/9490', 22 | '/ip4/127.0.0.1/tcp/9491', 23 | '/ip4/127.0.0.1/tcp/9492' 24 | ] 25 | const newNodeAddress = '/ip4/127.0.0.1/tcp/9493' 26 | 27 | before(done => { 28 | nodes = nodeAddresses.map((address, index) => 29 | Node(address, { 30 | db: Memdown, 31 | minLogRetention: 10, 32 | peers: nodeAddresses.filter(addr => addr !== address).concat(newNodeAddress) 33 | })) 34 | done() 35 | }) 36 | 37 | before(done => { 38 | async.each(nodes, (node, cb) => node.start(cb), done) 39 | }) 40 | 41 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 42 | 43 | before(done => { 44 | leader = nodes.find(node => node.is('leader')) 45 | follower = nodes.find(node => node.is('follower')) 46 | expect(follower).to.not.be.undefined() 47 | expect(leader).to.not.be.undefined() 48 | expect(leader === follower).to.not.be.true() 49 | leveldown = leader.leveldown() 50 | done() 51 | }) 52 | 53 | it ('can insert 30 items', {timeout: 10000}, done => { 54 | const items = [] 55 | for(var i = 0 ; i < 30 ; i++) { 56 | items.push(leftPad(i.toString(), 3, '0')) 57 | } 58 | async.each(items, (item, cb) => { 59 | leveldown.put(item, item, cb) 60 | }, 61 | done) 62 | }) 63 | 64 | it ('log length was capped', done => { 65 | expect(leader.logEntries().length).to.equal(10) 66 | done() 67 | }) 68 | 69 | it('waits a bit', {timeout: 5000}, done => setTimeout(done, A_BIT)) 70 | 71 | describe ('node that is late to the party', () => { 72 | let newNode 73 | 74 | before(done => { 75 | newNode = Node(newNodeAddress, { 76 | db: Memdown, 77 | minLogRetention: 10, 78 | peers: nodeAddresses 79 | }) 80 | newNode.start(done) 81 | }) 82 | 83 | after(done => { 84 | async.each(nodes.concat(newNode), (node, cb) => node.stop(cb), done) 85 | }) 86 | 87 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 88 | 89 | it('waits a bit', {timeout: 5000}, done => setTimeout(done, A_BIT)) 90 | 91 | it('catches up', done => { 92 | let nextEntry = 0 93 | newNode._db.state.createReadStream() 94 | .on('data', (entry) => { 95 | const expectedKey = leftPad(nextEntry, 3, '0') 96 | expect(entry.key).to.equal(expectedKey) 97 | nextEntry ++ 98 | }) 99 | .once('end', () => { 100 | expect(nextEntry).to.equal(30) 101 | done() 102 | }) 103 | }) 104 | 105 | it('accepts more entries', {timeout: 10000}, done => { 106 | leader = nodes.concat(newNode).find(node => node.is('leader')) 107 | leveldown = leader.leveldown() 108 | 109 | const items = [] 110 | for(var i = 30 ; i < 60 ; i++) { 111 | items.push(leftPad(i.toString(), 3, '0')) 112 | } 113 | async.each(items, (item, cb) => { 114 | leveldown.put(item, item, cb) 115 | }, 116 | done) 117 | }) 118 | 119 | it('waits a bit', {timeout: 5000}, done => setTimeout(done, A_BIT)) 120 | 121 | it('new node catches up', done => { 122 | let nextEntry = 0 123 | newNode._db.state.createReadStream() 124 | .on('data', (entry) => { 125 | const expectedKey = leftPad(nextEntry, 3, '0') 126 | expect(entry.key).to.equal(expectedKey) 127 | nextEntry ++ 128 | }) 129 | .once('end', () => { 130 | expect(nextEntry).to.equal(60) 131 | done() 132 | }) 133 | }) 134 | }) 135 | }) 136 | -------------------------------------------------------------------------------- /test/log-replication-catchup.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const memdown = require('memdown') 12 | 13 | const Node = require('../') 14 | 15 | const A_BIT = 4000 16 | 17 | describe('log replication catchup', () => { 18 | let nodes, follower, leader, newNode 19 | 20 | const nodeAddresses = [ 21 | '/ip4/127.0.0.1/tcp/9290', 22 | '/ip4/127.0.0.1/tcp/9291', 23 | '/ip4/127.0.0.1/tcp/9292' 24 | ] 25 | 26 | const newAddress = '/ip4/127.0.0.1/tcp/9293' 27 | 28 | before(done => { 29 | nodes = nodeAddresses.map((address, index) => 30 | Node(address, { 31 | db: memdown, 32 | peers: nodeAddresses.filter(addr => addr !== address) 33 | })) 34 | done() 35 | }) 36 | 37 | before(done => { 38 | async.each(nodes, (node, cb) => node.start(cb), done) 39 | }) 40 | 41 | after(done => { 42 | async.each(nodes.concat(newNode), (node, cb) => node.stop(cb), done) 43 | }) 44 | 45 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 46 | 47 | before(done => { 48 | leader = nodes.find(node => node.is('leader')) 49 | follower = nodes.find(node => node.is('follower')) 50 | expect(follower).to.not.be.undefined() 51 | expect(leader).to.not.be.undefined() 52 | expect(leader === follower).to.not.be.true() 53 | done() 54 | }) 55 | 56 | before(done => leader.command({type: 'put', key: 'a', value: '1'}, done)) 57 | 58 | before(done => leader.command({type: 'put', key: 'b', value: '2'}, done)) 59 | 60 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 61 | 62 | before(done => { 63 | newNode = Node(newAddress, { 64 | db: memdown, 65 | peers: nodeAddresses 66 | }) 67 | newNode.on('warning', (err) => { 68 | throw err 69 | }) 70 | newNode.start(done) 71 | }) 72 | 73 | before(done => { 74 | leader = nodes.find(node => node.is('leader')) 75 | leader.join(newAddress, done) 76 | }) 77 | 78 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 79 | 80 | it('new node gets updated', done => { 81 | const db = newNode._db.db 82 | db.sublevel('state').get('a', (err, value) => { 83 | expect(err).to.be.null() 84 | expect(value).to.equal('1') 85 | 86 | db.sublevel('state').get('b', (err, value) => { 87 | expect(err).to.be.null() 88 | expect(value).to.equal('2') 89 | done() 90 | }) 91 | }) 92 | }) 93 | }) 94 | -------------------------------------------------------------------------------- /test/log-replication.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const memdown = require('memdown') 12 | 13 | const Node = require('../') 14 | 15 | const A_BIT = 4000 16 | 17 | describe('log replication', () => { 18 | let nodes, follower, leader 19 | const nodeAddresses = [ 20 | '/ip4/127.0.0.1/tcp/9190', 21 | '/ip4/127.0.0.1/tcp/9191', 22 | '/ip4/127.0.0.1/tcp/9192' 23 | ] 24 | 25 | before(done => { 26 | nodes = nodeAddresses.map((address, index) => 27 | Node(address, { 28 | db: memdown, 29 | peers: nodeAddresses.filter(addr => addr !== address) 30 | })) 31 | done() 32 | }) 33 | 34 | before(done => { 35 | async.each(nodes, (node, cb) => node.start(cb), done) 36 | }) 37 | 38 | after(done => { 39 | async.each(nodes, (node, cb) => node.stop(cb), done) 40 | }) 41 | 42 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 43 | 44 | before(done => { 45 | leader = nodes.find(node => node.is('leader')) 46 | follower = nodes.find(node => node.is('follower')) 47 | expect(follower).to.not.be.undefined() 48 | expect(leader).to.not.be.undefined() 49 | expect(leader === follower).to.not.be.true() 50 | done() 51 | }) 52 | 53 | it('leader accepts command', done => { 54 | leader.command({type: 'put', key: 'a', value: '1'}, err => { 55 | expect(err).to.be.undefined() 56 | done() 57 | }) 58 | }) 59 | 60 | it('leader accepts query command', done => { 61 | leader.command({type: 'get', key: 'a'}, (err, result) => { 62 | expect(err).to.be.null() 63 | expect(result).to.equal('1') 64 | done() 65 | }) 66 | }) 67 | 68 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 69 | }) 70 | -------------------------------------------------------------------------------- /test/passive-network.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const it = lab.it 6 | const expect = require('code').expect 7 | 8 | const net = require('net') 9 | const Msgpack = require('msgpack5') 10 | const async = require('async') 11 | 12 | const Network = require('../lib/network/passive') 13 | 14 | describe('passive network', () => { 15 | let network, clientOptions 16 | 17 | const to = '/ip4/127.0.0.1/tcp/9163' 18 | 19 | const clientAddresses = [ 20 | '/ip4/127.0.0.1/tcp/8080/p/abc', 21 | '/ip4/127.0.0.1/tcp/8081/p/abc', 22 | '/ip4/127.0.0.1/tcp/8082/p/abc' 23 | ] 24 | 25 | const clients = clientAddresses.map(address => { 26 | const addr = address.split('/').slice(0, 5).join('/') 27 | return { address: addr } 28 | }) 29 | 30 | it('can be created', done => { 31 | network = new Network() 32 | network.once('listening', (options) => { 33 | clientOptions = options 34 | done() 35 | }) 36 | }) 37 | 38 | it('accepts client connections', done => { 39 | async.map(clients, setupClient, done) 40 | }) 41 | 42 | it('accepts a msgpack message from a client', done => { 43 | const expected = clients.reduce((messages, client) => { 44 | messages[client.address] = { from: client.address, to, what: 'hey' } 45 | return messages 46 | }, {}) 47 | 48 | const node = network.node(to) 49 | 50 | node.on('data', (message) => { 51 | const from = message.from 52 | const expectedMessage = expected[from] 53 | expect(expectedMessage).to.equal({ from: from, to, what: 'hey' }) 54 | delete expected[from] 55 | if (!Object.keys(expected).length) { 56 | node.removeAllListeners('data') 57 | done() 58 | } 59 | }) 60 | 61 | clients.forEach(client => { 62 | client.encoder.write({ from: client.address, to, what: 'hey' }) 63 | }) 64 | }) 65 | 66 | it('can send a message and reaches connected client', done => { 67 | async.each(clients, (client, cb) => { 68 | client.decoder.once('data', message => { 69 | expect(message).to.equal({to: client.address, beep: 'boop'}) 70 | cb() 71 | }) 72 | network.write({to: client.address, beep: 'boop'}) 73 | }, done) 74 | }) 75 | 76 | it('can try sending a message to an unconnected client', done => { 77 | network.write({to: 'does not exist', beep: 'nope'}, done) 78 | }) 79 | 80 | it('can receive sending a message with no from', done => { 81 | clients[0].encoder.write({ something: 'is wrong' }, done) 82 | }) 83 | 84 | it('can receive an invalid message', done => { 85 | network.once('warning', (warn) => { 86 | expect(warn.message).to.equal('not implemented yet') 87 | done() 88 | }) 89 | clients[0].conn.write(new Buffer([0xc1])) 90 | }) 91 | 92 | it('allows the peer to reconnect and send message', done => { 93 | const client = clients[0] 94 | const oldConn = client.conn 95 | setupClient(client, () => { 96 | oldConn.end() 97 | client.encoder.write({ from: client.address, to, the: 'new me' }) 98 | client.encoder.write({ from: client.address, to, the: 'new me again' }) 99 | 100 | const node = network.node(to) 101 | 102 | node.once('data', (message) => { 103 | expect(message).to.equal({ from: client.address, to, the: 'new me' }) 104 | client.decoder.once('data', message => { 105 | expect(message).to.equal({ to: client.address, hope: 'this reaches you' }) 106 | done() 107 | }) 108 | node.write({ to: client.address, hope: 'this reaches you' }) 109 | }) 110 | }) 111 | }) 112 | 113 | it('can finish', done => { 114 | network.end() 115 | network.once('closed', done) 116 | clients.forEach(client => client.conn.end()) 117 | }) 118 | 119 | function setupClient (client, cb) { 120 | const conn = net.connect(clientOptions, cb) 121 | const msgpack = Msgpack() 122 | const decoder = msgpack.decoder() 123 | const encoder = msgpack.encoder() 124 | 125 | client.conn = conn 126 | client.decoder = decoder 127 | client.encoder = encoder 128 | 129 | conn.pipe(decoder) 130 | encoder.pipe(conn) 131 | } 132 | }) 133 | -------------------------------------------------------------------------------- /test/persistence.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const Memdown = require('memdown') 12 | const leftPad = require('left-pad') 13 | 14 | const Node = require('../') 15 | 16 | const A_BIT = 4000 17 | 18 | describe('persistence', () => { 19 | 20 | let nodes, leader, leveldown, term, items 21 | const nodeAddresses = [ 22 | '/ip4/127.0.0.1/tcp/9490', 23 | '/ip4/127.0.0.1/tcp/9491', 24 | '/ip4/127.0.0.1/tcp/9492' 25 | ] 26 | 27 | before(done => { 28 | nodes = nodeAddresses.map((address) => 29 | Node(address, { 30 | db: Memdown, 31 | peers: nodeAddresses.filter(addr => addr !== address) 32 | })) 33 | done() 34 | }) 35 | 36 | before(done => { 37 | async.each(nodes, (node, cb) => node.start(cb), done) 38 | }) 39 | 40 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 41 | 42 | before(done => { 43 | leader = nodes.find(node => node.is('leader')) 44 | expect(leader).to.not.be.undefined() 45 | leveldown = leader.leveldown() 46 | term = leader.term() 47 | done() 48 | }) 49 | 50 | before({timeout: 10000}, done => { 51 | items = [] 52 | for(var i = 0 ; i < 30 ; i++) { 53 | items.push(leftPad(i.toString(), 3, '0')) 54 | } 55 | async.each(items, (item, cb) => { 56 | leveldown.put(item, item, cb) 57 | }, 58 | done) 59 | }) 60 | 61 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 62 | 63 | before({timeout: 4000}, done => async.each(nodes, (node, cb) => node.stop(cb), done)) 64 | 65 | before(done => { 66 | // restart nodes 67 | nodes = nodeAddresses.map((address) => 68 | Node(address, { 69 | db: Memdown, 70 | peers: nodeAddresses.filter(addr => addr !== address) 71 | })) 72 | done() 73 | }) 74 | 75 | before(done => { 76 | async.each(nodes, (node, cb) => node.start(cb), done) 77 | }) 78 | 79 | after(done => { 80 | async.each(nodes, (node, cb) => node.stop(cb), done) 81 | }) 82 | 83 | it('retains logs and other metadata', done => { 84 | const expected = items.map((item, index) => { 85 | return { 86 | t: term, 87 | i: index + 1, 88 | c: { 89 | type: 'put', 90 | key: item, 91 | value: item 92 | }} 93 | }) 94 | 95 | const snapshot = leader._node._getPersistableState() 96 | expect(typeof snapshot.currentTerm).to.equal('number') 97 | expect(snapshot.currentTerm >= 1).to.be.true() 98 | expect(snapshot.votedFor).to.equal(leader.id.toString()) 99 | 100 | nodes.forEach(node => { 101 | const entries = node.logEntries().map(entry => { 102 | return {i: entry.i, t: entry.t, c: { 103 | type: entry.c.type, 104 | key: entry.c.key, 105 | value: entry.c.value 106 | }} 107 | }) 108 | expect(entries).to.equal(expected) 109 | 110 | const nodeSnapshot = node._node._getPersistableState() 111 | expect(nodeSnapshot.currentTerm).to.equal(snapshot.currentTerm) 112 | expect(typeof nodeSnapshot.votedFor).to.equal('string') 113 | }) 114 | done() 115 | }) 116 | }) 117 | -------------------------------------------------------------------------------- /test/remote-commands.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const memdown = require('memdown') 12 | const leftpad = require('left-pad') 13 | 14 | const Node = require('../') 15 | 16 | const A_BIT = 4000 17 | 18 | describe('log replication', () => { 19 | let nodes, followers, leader 20 | const nodeAddresses = [ 21 | '/ip4/127.0.0.1/tcp/9700', 22 | '/ip4/127.0.0.1/tcp/9701', 23 | '/ip4/127.0.0.1/tcp/9702' 24 | ] 25 | 26 | before(done => { 27 | nodes = nodeAddresses.map((address, index) => 28 | Node(address, { 29 | db: memdown, 30 | peers: nodeAddresses.filter(addr => addr !== address) 31 | })) 32 | done() 33 | }) 34 | 35 | before(done => { 36 | async.each(nodes, (node, cb) => node.start(cb), done) 37 | }) 38 | 39 | after(done => { 40 | async.each(nodes, (node, cb) => node.stop(cb), done) 41 | }) 42 | 43 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 44 | 45 | before(done => { 46 | leader = nodes.find(node => node.is('leader')) 47 | followers = nodes.filter(node => !node.is('leader')) 48 | expect(followers.length).to.equal(2) 49 | expect(leader).to.not.be.undefined() 50 | done() 51 | }) 52 | 53 | it('follower accepts command', done => { 54 | const commands = [] 55 | for(var i=0; i < 20; i++) { 56 | commands.push({type: 'put', key: leftpad(i.toString(), 3, '0'), value: i}) 57 | } 58 | async.eachSeries(commands, (command, cb) => { 59 | const index = command.value % followers.length 60 | const follower = followers[index] 61 | follower.command(command, cb) 62 | }, done) 63 | }) 64 | 65 | it('can query from followers', done => { 66 | const db = followers[0].levelup() 67 | let next = 0 68 | db.createReadStream() 69 | .on('data', entry => { 70 | expect(entry.key).to.equal(leftpad(next.toString(), 3, '0')) 71 | expect(entry.value).to.equal(next) 72 | next ++ 73 | }) 74 | .once('end', () => { 75 | expect(next).to.equal(20) 76 | done() 77 | }) 78 | }) 79 | 80 | it('can query one value from follower', done => { 81 | const db = followers[0].levelup() 82 | db.get('019', (err, value) => { 83 | expect(err).to.be.null() 84 | expect(value).to.equal(19) 85 | done() 86 | }) 87 | }) 88 | 89 | it('can query from leader', done => { 90 | expect(leader.is('leader')).to.equal(true) 91 | const db = leader.levelup() 92 | let next = 0 93 | db.createReadStream() 94 | .on('data', entry => { 95 | expect(entry.key).to.equal(leftpad(next.toString(), 3, '0')) 96 | expect(entry.value).to.equal(next) 97 | next ++ 98 | }) 99 | .once('end', () => { 100 | expect(next).to.equal(20) 101 | done() 102 | }) 103 | }) 104 | 105 | }) 106 | -------------------------------------------------------------------------------- /test/resilience/resilience-chaos-disk.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const timers = require('timers') 11 | 12 | const Setup = require('./setup') 13 | const Client = require('./setup/client') 14 | 15 | describe('resilience, no chaos, in memory', () => { 16 | const setup = Setup({chaos: true, persist: true}) 17 | before({timeout: 30000}, setup.before) 18 | after({timeout: 30000}, setup.after) 19 | 20 | it ('works', {timeout: 121000}, done => { 21 | let timeout 22 | const client = Client(setup.addresses, {duration : 120000}) 23 | const emitter = client(done) 24 | resetOperationTimeout() 25 | emitter.on('operation', resetOperationTimeout) 26 | 27 | function onOperationTimeout () { 28 | console.log('stats: %j', emitter.stats) 29 | done(new Error('no operation for more than 11 seconds')) 30 | } 31 | 32 | function resetOperationTimeout () { 33 | if (timeout) { 34 | timers.clearTimeout(timeout) 35 | } 36 | timeout = timers.setTimeout(onOperationTimeout, 11000) 37 | } 38 | }) 39 | }) 40 | -------------------------------------------------------------------------------- /test/resilience/resilience-chaos-memory.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const timers = require('timers') 11 | 12 | const Setup = require('./setup') 13 | const Client = require('./setup/client') 14 | 15 | describe('resilience, chaos, in memory', () => { 16 | const setup = Setup() 17 | before({timeout: 30000}, setup.before) 18 | after({timeout: 30000}, setup.after) 19 | 20 | it ('works', {timeout: 121000}, done => { 21 | let timeout 22 | const client = Client(setup.addresses, {duration : 120000}) 23 | const emitter = client((err) => { 24 | timers.clearTimeout(timeout) 25 | console.log('stats: %j', emitter.stats) 26 | done(err) 27 | }) 28 | resetOperationTimeout() 29 | emitter.on('operation', resetOperationTimeout) 30 | 31 | function onOperationTimeout () { 32 | done(new Error('no operation for more than 11 seconds')) 33 | } 34 | 35 | function resetOperationTimeout () { 36 | if (timeout) { 37 | timers.clearTimeout(timeout) 38 | } 39 | timeout = timers.setTimeout(onOperationTimeout, 11000) 40 | } 41 | }) 42 | }) 43 | -------------------------------------------------------------------------------- /test/resilience/resilience-order-disk.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const timers = require('timers') 11 | 12 | const Setup = require('./setup') 13 | const Client = require('./setup/client') 14 | 15 | describe('resilience, no chaos, in memory', () => { 16 | const setup = Setup({chaos: false, persist: true}) 17 | before({timeout: 30000}, setup.before) 18 | after({timeout: 30000}, setup.after) 19 | 20 | it ('works', {timeout: 121000}, done => { 21 | let timeout 22 | const client = Client(setup.addresses, {duration : 120000}) 23 | const emitter = client(done) 24 | resetOperationTimeout() 25 | emitter.on('operation', resetOperationTimeout) 26 | 27 | function onOperationTimeout () { 28 | console.log('stats: %j', emitter.stats) 29 | done(new Error('no operation for more than 11 seconds')) 30 | } 31 | 32 | function resetOperationTimeout () { 33 | if (timeout) { 34 | timers.clearTimeout(timeout) 35 | } 36 | timeout = timers.setTimeout(onOperationTimeout, 11000) 37 | } 38 | }) 39 | }) 40 | -------------------------------------------------------------------------------- /test/resilience/resilience-order-memory.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const timers = require('timers') 11 | 12 | const Setup = require('./setup') 13 | const Client = require('./setup/client') 14 | 15 | describe('resilience, no chaos, in memory', () => { 16 | const setup = Setup({chaos: false}) 17 | before({timeout: 30000}, setup.before) 18 | after({timeout: 30000}, setup.after) 19 | 20 | it ('works', {timeout: 121000}, done => { 21 | let timeout 22 | const client = Client(setup.addresses, {duration : 120000}) 23 | const emitter = client(done) 24 | resetOperationTimeout() 25 | emitter.on('operation', resetOperationTimeout) 26 | 27 | function onOperationTimeout () { 28 | console.log('stats: %j', emitter.stats) 29 | done(new Error('no operation for more than 11 seconds')) 30 | } 31 | 32 | function resetOperationTimeout () { 33 | if (timeout) { 34 | timers.clearTimeout(timeout) 35 | } 36 | timeout = timers.setTimeout(onOperationTimeout, 11000) 37 | } 38 | }) 39 | }) 40 | -------------------------------------------------------------------------------- /test/resilience/setup/client.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | // const keys = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'x', 'y', 'z'] 4 | const keys = ['a'] 5 | const Multiaddr = require('multiaddr') 6 | const Wreck = require('wreck') 7 | const timers = require('timers') 8 | const once = require('once') 9 | const EventEmitter = require('events') 10 | 11 | const defaultOptions = { 12 | duration: 60000, 13 | retryTimeout: 500 14 | } 15 | 16 | const wreck = Wreck.defaults({ 17 | timeout: 8000 18 | }) 19 | 20 | function Client (nodes, _options) { 21 | 22 | const emitter = new EventEmitter() 23 | emitter.stats = { 24 | operationsStarted: 0, 25 | operationsCompleted: 0 26 | } 27 | 28 | const options = Object.assign({}, defaultOptions, _options) 29 | 30 | let timeout 31 | const started = Date.now() 32 | const endpoints = nodes.map(multiAddrToUrl) 33 | 34 | const values = {} 35 | for (var i=0 ; i < keys.length; i++) { 36 | values[keys[i]] = 0 37 | } 38 | let leader = undefined 39 | 40 | return function client (_done) { 41 | const done = once(callback) 42 | timeout = timers.setTimeout(done, options.duration) 43 | work(done) 44 | return emitter 45 | 46 | function callback (err) { 47 | if (err) { 48 | _done(err) 49 | } else { 50 | _done(null) 51 | } 52 | } 53 | } 54 | 55 | function work (done) { 56 | emitter.stats.operationsStarted ++ 57 | makeOneRequest (err => { 58 | emitter.stats.operationsCompleted ++ 59 | if (err) { 60 | clearTimeout(timeout) 61 | done(err) 62 | } else { 63 | emitter.emit('operation') 64 | const elapsed = Date.now() - started 65 | if (elapsed < options.duration) { 66 | work(done) 67 | } else { 68 | clearTimeout(timeout) 69 | done() 70 | } 71 | } 72 | }) 73 | emitter.emit('operation started') 74 | } 75 | 76 | function makeOneRequest (done) { 77 | if (Math.random() > 0.5) { 78 | makeOnePutRequest(done) 79 | } else { 80 | makeOneGetRequest(done) 81 | } 82 | } 83 | 84 | function makeOnePutRequest (done) { 85 | const key = randomKey() 86 | let value = values[key] 87 | value ++ 88 | values[key] = value 89 | 90 | tryPut() 91 | 92 | function tryPut () { 93 | const endpoint = pickEndpoint() 94 | const options = { payload: value.toString() } 95 | wreck.put(`${endpoint}/${key}`, options, parsingWreckReply(endpoint, 201, tryPut, err => { 96 | if (err) { 97 | done(err) 98 | } else { 99 | done() 100 | } 101 | })) 102 | } 103 | } 104 | 105 | function makeOneGetRequest (done) { 106 | const key = randomKey() 107 | const expectedValue = values[key] 108 | 109 | tryGet() 110 | 111 | function tryGet () { 112 | const endpoint = pickEndpoint() 113 | wreck.get(`${endpoint}/${key}`, parsingWreckReply(endpoint, 200, tryGet, (err, payload) => { 114 | if (err) { 115 | done(err) 116 | } else { 117 | const value = Number(payload) || 0 118 | if (value !== expectedValue) { 119 | done(new Error(`GET request to ${endpoint} returned unexpected value for key ${key}. Expected ${expectedValue} and returned ${value}`)) 120 | } else { 121 | done() 122 | } 123 | } 124 | })) 125 | } 126 | } 127 | 128 | function pickEndpoint () { 129 | let endpoint = leader 130 | if (!endpoint) { 131 | endpoint = randomEndpoint() 132 | } 133 | return endpoint 134 | } 135 | 136 | function randomEndpoint () { 137 | return endpoints[Math.floor(Math.random() * endpoints.length)] 138 | } 139 | 140 | function randomKey () { 141 | return keys[Math.floor(Math.random() * keys.length)] 142 | } 143 | 144 | function parsingWreckReply (address, expectedCode, retry, done) { 145 | return function (err, res, payload) { 146 | if (err) { 147 | if (err.code === 'ECONNREFUSED' || err.code === 'ECONNRESET' || err.code === 'ETIMEOUT') { 148 | leader = null 149 | timers.setTimeout(retry, 100) 150 | } else { 151 | done(err) 152 | } 153 | } else { 154 | if (res.statusCode !== expectedCode) { 155 | let error 156 | try { 157 | error = JSON.parse(payload).error 158 | } catch (er) { 159 | error = {} 160 | } 161 | if (error && (error.code === 'ENOTLEADER' || error.code === 'ENOMAJORITY' || error.code === 'EOUTDATEDTERM')) { 162 | if (error.leader) { 163 | leader = multiAddrToUrl(error.leader) 164 | } else { 165 | leader = undefined 166 | } 167 | timers.setImmediate(retry) 168 | } else if (error.code === 'ETIMEOUT') { 169 | timers.setImmediate(retry) 170 | } else { 171 | done (new Error(`response status code was ${res.statusCode}, response: ${payload}`)) 172 | } 173 | } else { 174 | done(null, payload) 175 | } 176 | } 177 | } 178 | } 179 | } 180 | 181 | function multiAddrToUrl (maddr) { 182 | const addr = Multiaddr(maddr.toString()) 183 | const url = `http://127.0.0.1:${Number(addr.nodeAddress().port) + 1}` 184 | return url 185 | } 186 | 187 | module.exports = Client 188 | -------------------------------------------------------------------------------- /test/resilience/setup/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const async = require('async') 4 | const timers = require('timers') 5 | const fork = require('child_process').fork 6 | const path = require('path') 7 | const rimraf = require('rimraf') 8 | const mkdirp = require('mkdirp') 9 | 10 | const Node = require('./node') 11 | 12 | const defaultOptions = { 13 | persist: false, 14 | chaos: true, 15 | nodeCount: 3, 16 | killerIntervalMS: 10000 17 | } 18 | 19 | function Setup(_options) { 20 | let killer, liveNodes 21 | const deadNodes = [] 22 | const allAddresses = [] 23 | const options = Object.assign({}, defaultOptions, _options) 24 | const maxDeadNodes = Math.ceil(options.nodeCount / 2) - 1 25 | const dataPath = path.join(__dirname, '..', 'resilience', 'data') 26 | 27 | let killing = true 28 | 29 | return { before, after, addresses: allAddresses} 30 | 31 | function before (done) { 32 | async.series([setupDirs, createNodes, startNodes, startKiller], done) 33 | } 34 | 35 | function after (done) { 36 | async.series([stopKiller, stopNodes], done) 37 | } 38 | 39 | function setupDirs (done) { 40 | rimraf.sync(dataPath) 41 | mkdirp.sync(dataPath) 42 | done() 43 | } 44 | 45 | function createNodes (done) { 46 | const ports = [] 47 | for (var i=0; i < options.nodeCount ; i++) { 48 | ports.push(5300 + i*2) 49 | } 50 | 51 | ports.map(portToAddress).forEach(address => allAddresses.push(address)) 52 | 53 | liveNodes = ports.map(port => new Node(port, { 54 | peers: ports.filter(p => p !== port).map(portToAddress), 55 | persist: options.persist 56 | })) 57 | 58 | done() 59 | } 60 | 61 | function startNodes (done) { 62 | async.each(liveNodes, (node, cb) => node.start(cb), done) 63 | } 64 | 65 | function startKiller (done) { 66 | if (options.chaos) { 67 | killer = timers.setTimeout(() => { 68 | killAndRevive(err => { 69 | if (err) { 70 | throw err 71 | } else { 72 | startKiller() 73 | } 74 | }) 75 | }, options.killerIntervalMS) 76 | } 77 | 78 | if (done) { 79 | done() 80 | } 81 | } 82 | 83 | function killAndRevive (cb) { 84 | if (deadNodes.length >= maxDeadNodes) { 85 | killing = false 86 | } else if (!deadNodes.length) { 87 | killing = true 88 | } 89 | if (killing) { 90 | killOne(cb) 91 | } else { 92 | reviveOne(cb) 93 | } 94 | } 95 | 96 | function killOne (cb) { 97 | const node = popRandomLiveNode() 98 | console.log('killing %s...', node._address) 99 | deadNodes.push(node._address) 100 | node.stop(cb) 101 | } 102 | 103 | function reviveOne (cb) { 104 | const address = randomDeadNode() 105 | console.log('reviving %s...', address) 106 | const node = new Node(address, { 107 | peers: allAddresses.filter(addr => addr !== address) 108 | }) 109 | liveNodes.push(node) 110 | node.start(cb) 111 | } 112 | 113 | function popRandomLiveNode () { 114 | const index = Math.floor(Math.random() * liveNodes.length) 115 | const node = liveNodes[index] 116 | liveNodes.splice(index, 1) 117 | return node 118 | } 119 | 120 | function randomDeadNode () { 121 | const index = Math.floor(Math.random() * deadNodes.length) 122 | const node = deadNodes[index] 123 | deadNodes.splice(index, 1) 124 | return node 125 | } 126 | 127 | function stopKiller (done) { 128 | timers.clearInterval(killer) 129 | done() 130 | } 131 | 132 | function stopNodes (done) { 133 | async.each(liveNodes, (node, cb) => node.stop(cb), done) 134 | } 135 | } 136 | 137 | function portToAddress (port) { 138 | return `/ip4/127.0.0.1/tcp/${port}` 139 | } 140 | 141 | module.exports = Setup 142 | -------------------------------------------------------------------------------- /test/resilience/setup/node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fork = require('child_process').fork 4 | const path = require('path') 5 | const split = require('split') 6 | 7 | const channels = ['stdout', 'stderr'] 8 | 9 | class Node { 10 | constructor (address, options) { 11 | this._address = address 12 | this._options = options 13 | this._exiting = false 14 | } 15 | 16 | start (done) { 17 | const args = [this._address, JSON.stringify(this._options)] 18 | this._child = fork(path.join(__dirname, 'server.js'), args, { 19 | silent: true 20 | }); 21 | 22 | channels.forEach(channel => { 23 | this._child[channel].pipe(split()) 24 | .on('data', line => { 25 | line = line.trim() 26 | if (line) { 27 | process[channel].write(`${this._address} (${this._child.pid}): ${line}\n`) 28 | } 29 | }) 30 | }) 31 | 32 | this._child.stdout.pipe(split()).once('data', (line) => { 33 | if (line.match(/started/)) { 34 | done() 35 | } else if (!this._exiting) { 36 | done(new Error(`Could not start child, first line of output was ${line}`)) 37 | } else { 38 | done() 39 | } 40 | }) 41 | 42 | this._child.once('exit', (code, signal) => { 43 | if (!this._exiting) { 44 | throw new Error(`child exited without being asked to, code = ${code}, signal = ${signal}`) 45 | } 46 | }) 47 | } 48 | 49 | stop (done) { 50 | this._exiting = true 51 | this._child.once('exit', () => { 52 | done() 53 | }) 54 | this._child.kill() 55 | } 56 | } 57 | 58 | module.exports = Node 59 | -------------------------------------------------------------------------------- /test/resilience/setup/server.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const http = require('http') 4 | const timers = require('timers') 5 | const async = require('async') 6 | const Memdown = require('memdown') 7 | const Leveldown = require('leveldown') 8 | const Multiaddr = require('multiaddr') 9 | const join = require('path').join 10 | const Node = require('../../../') 11 | 12 | const port = Number(process.argv[2]) 13 | const address = `/ip4/127.0.0.1/tcp/${port}` 14 | const options = Object.assign({}, JSON.parse(process.argv[3]), { 15 | location: join(__dirname, '..', 'resilience', 'data') 16 | }) 17 | 18 | if (!options.persist) { 19 | options.db = Memdown 20 | } 21 | 22 | const node = new Node(address, options) 23 | node.on('warning', err => { throw err }) 24 | const db = node.leveldown() 25 | 26 | const server = http.createServer(function(req, res) { 27 | const key = req.url.substring(1) 28 | if (req.method === 'PUT') { 29 | let body = '' 30 | req.setEncoding('utf8') 31 | req 32 | .on('data', d => body += d) 33 | .once('end', () => { 34 | handleWriteRequest(key, Number(body), res) 35 | }) 36 | } else if (req.method === 'GET') { 37 | handleReadRequest(key, res) 38 | } else { 39 | res.statusCode = 404 40 | res.end(encodeError(new Error('Not found'))) 41 | } 42 | }) 43 | 44 | function handleWriteRequest(key, value, res) { 45 | db.put(key, value, handlingError(key, res, 201)) 46 | } 47 | 48 | function handleReadRequest (key, res) { 49 | db.get(key, handlingError(key, res)) 50 | } 51 | 52 | async.parallel([server.listen.bind(server, port + 1), node.start.bind(node)], err => { 53 | if (err) { 54 | throw err 55 | } else { 56 | console.log(`server ${address} started`) 57 | node.on('new state', state => console.log('new state: %j', state)) 58 | } 59 | }) 60 | 61 | function encodeError (err) { 62 | return JSON.stringify({ error: { message: err.message, code: err.code, leader: err.leader }}) 63 | } 64 | function handlingError (key, res, code) { 65 | return function (err, value) { 66 | if (err) { 67 | if (err.message.match(/not found/)) { 68 | res.statusCode = code || 200 69 | res.end(JSON.stringify({ok: true})) 70 | } else { 71 | res.statusCode = 500 72 | res.end(encodeError(err)) 73 | } 74 | } else { 75 | res.statusCode = code || 200 76 | if (value) { 77 | res.end(value.toString()) 78 | } else { 79 | res.end(JSON.stringify({ok: true})) 80 | } 81 | } 82 | } 83 | } -------------------------------------------------------------------------------- /test/weakening.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lab = exports.lab = require('lab').script() 4 | const describe = lab.experiment 5 | const before = lab.before 6 | const after = lab.after 7 | const it = lab.it 8 | const expect = require('code').expect 9 | 10 | const async = require('async') 11 | const memdown = require('memdown') 12 | 13 | const Node = require('../') 14 | 15 | const A_BIT = 4000 16 | 17 | describe('log replication', () => { 18 | let nodes, followers, leader, preferred, weakened 19 | const nodeAddresses = [ 20 | '/ip4/127.0.0.1/tcp/9710', 21 | '/ip4/127.0.0.1/tcp/9711', 22 | '/ip4/127.0.0.1/tcp/9712' 23 | ] 24 | 25 | before(done => { 26 | nodes = nodeAddresses.map((address, index) => 27 | Node(address, { 28 | db: memdown, 29 | peers: nodeAddresses.filter(addr => addr !== address) 30 | })) 31 | done() 32 | }) 33 | 34 | before(done => { 35 | async.each(nodes, (node, cb) => node.start(cb), done) 36 | }) 37 | 38 | after(done => { 39 | async.each(nodes, (node, cb) => node.stop(cb), done) 40 | }) 41 | 42 | before({timeout: 5000}, done => setTimeout(done, A_BIT)) 43 | 44 | before(done => { 45 | leader = nodes.find(node => node.is('leader')) 46 | followers = nodes.filter(node => node.is('follower')) 47 | expect(followers.length).to.equal(2) 48 | expect(leader).to.not.be.undefined() 49 | done() 50 | }) 51 | 52 | it('can weaken all the nodes except the preferred', done => { 53 | preferred = followers[0] 54 | weakened = followers.filter(f => f !== preferred).concat(leader) 55 | weakened.forEach(w => w.weaken(1100)) 56 | done() 57 | }) 58 | 59 | it('waits a bit', {timeout: 5000}, done => setTimeout(done, A_BIT)) 60 | 61 | it('resulted in ellecting the preferred', done => { 62 | expect(preferred.is('leader')).to.be.true() 63 | expect(weakened.every(w => w.is('follower'))).to.be.true() 64 | done() 65 | }) 66 | 67 | }) 68 | --------------------------------------------------------------------------------