├── .gitignore ├── README.md ├── index.js ├── package.json └── test.js /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | .env.test 60 | 61 | # parcel-bundler cache (https://parceljs.org/) 62 | .cache 63 | 64 | # next.js build output 65 | .next 66 | 67 | # nuxt.js build output 68 | .nuxt 69 | 70 | # vuepress build output 71 | .vuepress/dist 72 | 73 | # Serverless directories 74 | .serverless/ 75 | 76 | # FuseBox cache 77 | .fusebox/ 78 | 79 | # DynamoDB Local files 80 | .dynamodb/ 81 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # @geut/saga 2 | 3 | > A helper module to share operations between peers built on top of 4 | > hyperdb. 5 | 6 | --- 7 | ![npm version badge](https://badge.fury.io/js/%40geut%2Fsaga.svg) 8 | 9 | ## Install 10 | 11 | `npm install @geut/saga` 12 | 13 | ## Usage 14 | 15 | First, instantiate saga: 16 | 17 | `const saga = Saga(ram, publicKey, username);` 18 | 19 | Then, we will need a swarm of peers: 20 | 21 | ```javascript 22 | const sw = swarm({ 23 | id: username, 24 | stream: () => { 25 | return saga.replicate(); 26 | } 27 | }); 28 | 29 | sw.join(signalhub(discoveryKey, signalUrls), webrtcOpts); 30 | 31 | sw.on('connection', async peer => { 32 | try { 33 | await saga.connect(peer); 34 | } catch (err) { 35 | console.log(err); 36 | } 37 | }); 38 | ``` 39 | _Hint:_ You can use [`@geut/discovery-swarm-webrtc`](https://github.com/geut/discovery-swarm-webrtc). 40 | 41 | _Hint2:_ You will need a signal server, like [signalhubws](https://github.com/soyuka/signalhubws). 42 | 43 | After that, you are ready to use `saga`. This include, sending and receiving operations that you can apply on each peer. 44 | 45 | ## API 46 | 47 | ### initialize 48 | 49 | > Returns a `promise`. 50 | 51 | Used to trigger hyperdb creation and setup the watch for operations. 52 | 53 | ### connect 54 | 55 | > `Peer` | Required. The peer must have a `remoteUserData` property. 56 | 57 | This will **authorize** the peer to be a writer. Emits a `join` event 58 | along with peer data. Finally, if the peer leaves, it will also emit 59 | a `leave` event, indicating the peer. 60 | 61 | ### replicate 62 | 63 | It will replicate the stream. Uses hyperdb replicates method under the 64 | hood with some fixed values: 65 | 66 | ```javascript 67 | { 68 | live: true, 69 | userData: JSON.stringify({ 70 | key: this.db.local.key, 71 | username: this.username, 72 | timestamp: this.timestamp 73 | }) 74 | } 75 | ``` 76 | 77 | ### writeOperation 78 | 79 | > `Object` | Optional 80 | 81 | Use this method to send a new operation to every peer. It will also add 82 | the username of the sender and a timestamp. 83 | 84 | ## Events 85 | 86 | `saga` inherits from node.js `EventEmitter`. It will emit the following 87 | events: 88 | 89 | ### join 90 | 91 | > `peerData: Object` 92 | 93 | After authorizing a new peer, saga will emit a join event with some peer 94 | data. This is triggered by the `connect` method (see above). 95 | 96 | ### leave 97 | 98 | > `peerData: Object` 99 | 100 | After a peer leaves, the leave event will be emitted with some peer data. 101 | 102 | ### operation 103 | 104 | > `Object` 105 | 106 | The operation event will be emitted after reading feed history changes 107 | (see hyperdb) caused by new operations arrival. You can listen to this 108 | event to retrieve the latest operations that you can apply to regenerate 109 | the distributed state between peers. The event will contain an object with 110 | the `username` of the sender, a `timestamp` and the `operation`. 111 | 112 | ## Motivation 113 | 114 | The idea came up after playing with [olaf](https://github.com/geut/olaf), 115 | a P2P Dat powered chat application. That is when `saga` first appears. 116 | Later we were playing with the idea of CRDT based editor, also Dat 117 | powered. I wanted to re-use some parts and saga was my first option. 118 | I only need to make some subtle changes, like mostly renaming messages to 119 | operations in order to be more generic. 120 | 121 | When creating P2P apps, the absence of a centralized server, empowers 122 | peers (former _clients_). Since now they can share data between each 123 | other, it is useful to have a way to re-create locally changes that have 124 | happened in another peer, this is were you can use `saga`. Also keep in 125 | mind that libraries like 126 | [Automerge](https://github.com/automerge/automerge) are a great match! 127 | 128 | 129 | --- 130 | Brought to you by **GEUT LABS ʘ** 131 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events'); 2 | const { Writable } = require('stream'); 3 | const hyperdb = require('hyperdb'); 4 | const pump = require('pump'); 5 | const hyperid = require('hyperid'); 6 | const debug = require('debug'); 7 | 8 | const log = debug('geut:saga'); 9 | 10 | const uuid = hyperid(); 11 | 12 | class ForEachChunk extends Writable { 13 | constructor(opts, cb) { 14 | if (!cb) { 15 | cb = opts; 16 | opts = {}; 17 | } 18 | super(opts); 19 | 20 | this.cb = cb; 21 | } 22 | 23 | _write(chunk, enc, next) { 24 | this.cb(chunk, enc, next); 25 | } 26 | } 27 | 28 | const forEachChunk = (...args) => new ForEachChunk(...args); 29 | 30 | class Saga extends EventEmitter { 31 | constructor(storage, key, username) { 32 | super(); 33 | 34 | this.operations = new Map(); 35 | this.newestOperations = new Map(); 36 | this.users = new Map(); 37 | this.username = username; 38 | this.timestamp = Date.now(); 39 | 40 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }); 41 | } 42 | 43 | async initialize() { 44 | await this._ready(); 45 | log('Initializing saga') 46 | this._updateHistory(this._watchForOperations.bind(this)); 47 | } 48 | 49 | writeOperation(operation) { 50 | const key = `operations/${uuid()}`; 51 | const data = { 52 | key, 53 | operation, 54 | username: this.username, 55 | timestamp: Date.now() 56 | }; 57 | 58 | return new Promise((resolve, reject) => { 59 | this.db.put(key, data, err => { 60 | if (err) { 61 | reject(err); 62 | } else { 63 | resolve(key); 64 | } 65 | }); 66 | }); 67 | } 68 | 69 | replicate() { 70 | return this.db.replicate({ 71 | live: true, 72 | userData: JSON.stringify({ 73 | key: this.db.local.key, 74 | username: this.username, 75 | timestamp: this.timestamp 76 | }) 77 | }); 78 | } 79 | 80 | async connect(peer) { 81 | if (!peer.remoteUserData) { 82 | throw new Error('peer does not have userData'); 83 | } 84 | 85 | const data = JSON.parse(peer.remoteUserData); 86 | const key = Buffer.from(data.key); 87 | const username = data.username; 88 | 89 | await this._authorize(key); 90 | 91 | if (!this.users.has(username)) { 92 | this.users.set(username, new Date()); 93 | this.emit('join', data); 94 | log('A new peer joins') 95 | peer.on('close', () => { 96 | if (!this.users.has(username)) return; 97 | this.users.delete(username); 98 | this.emit('leave', data); 99 | log('A peer leaves') 100 | }); 101 | } 102 | } 103 | 104 | _authorize(key) { 105 | return new Promise((resolve, reject) => { 106 | this.db.authorized(key, (err, auth) => { 107 | if (err) return reject(err); 108 | 109 | if (auth) { 110 | return resolve(); 111 | } 112 | 113 | this.db.authorize(key, err => { 114 | if (err) return reject(err); 115 | resolve(); 116 | }); 117 | }); 118 | }); 119 | } 120 | 121 | _updateHistory(onFinish) { 122 | const h = this.db.createHistoryStream({ reverse: true }); 123 | 124 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 125 | const { key } = data; 126 | 127 | if (/operations/.test(key)) { 128 | log(`Check if operation ${key} has been applied`); 129 | if (this.operations.has(key)) { 130 | // TODO(dk): check this condition, if we destroy the stream nothing works. In the other hand 131 | // it would be cool to not process everything again and again. That was the purpose of maintaining 132 | // a newestOperations array. 133 | // h.destroy() 134 | // return 135 | } else { 136 | log(`Adding new operation key ${key}`); 137 | this.newestOperations.set(data.key, data.value); 138 | } 139 | } 140 | 141 | next(); 142 | }); 143 | 144 | pump(h, ws, err => { 145 | // work with latest operations in the right order 146 | const values = [...this.newestOperations.values()]; 147 | const keys = [...this.newestOperations.keys()]; 148 | values.reverse().forEach((val, idx) => { 149 | log('Applying new operations'); 150 | const key = keys[idx]; 151 | this.emit('operation', { ...val }, key); 152 | log('A new operation has been sent') 153 | // update applied operations 154 | this.operations.set(key, val); 155 | }); 156 | 157 | // reset newestOperations 158 | this.newestOperations = new Map(); 159 | if (onFinish) onFinish(err); 160 | }); 161 | } 162 | 163 | _watchForOperations() { 164 | this.db.watch('operations', () => { 165 | this._updateHistory(); 166 | }); 167 | } 168 | 169 | _ready() { 170 | return new Promise(resolve => this.db.ready(resolve)); 171 | } 172 | } 173 | 174 | // export default (...args) => new Saga(...args); 175 | module.exports = (...args) => new Saga(...args); 176 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@geut/saga", 3 | "version": "1.0.0", 4 | "description": "A helper module to share operations between peers built on top of hyperdb.", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "jest" 8 | }, 9 | "keywords": [], 10 | "author": "GEUT", 11 | "license": "MIT", 12 | "dependencies": { 13 | "hyperdb": "^3.5.0", 14 | "hyperid": "^2.0.2", 15 | "pump": "^3.0.0" 16 | }, 17 | "devDependencies": { 18 | "debug": "^4.1.1", 19 | "jest": "^24.1.0" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /test.js: -------------------------------------------------------------------------------- 1 | const Saga = require('./'); 2 | 3 | 4 | jest.mock('hyperdb', () => { 5 | return () => ({ 6 | ready: jest.fn((resolver) => resolver()), 7 | authorized: jest.fn((key, cb) => cb(null, true)), 8 | authorize: jest.fn(() => Promise.resolve()), 9 | createHistoryStream: jest.fn(), 10 | watch: jest.fn(() => ({ on: jest.fn() })) 11 | }); 12 | }); 13 | 14 | jest.mock('pump', (str1, str2, lastFn) => { 15 | return jest.fn() 16 | }); 17 | 18 | const validPeer = { 19 | remoteUserData: JSON.stringify({ 20 | username: 'test', 21 | key: 'k3y' 22 | }), 23 | on: jest.fn() 24 | }; 25 | 26 | test('connect and close', async () => { 27 | expect.assertions(2); 28 | const saga = Saga('/tmp', null, 'test'); 29 | expect(saga).toBeDefined(); 30 | await saga.initialize(); 31 | 32 | saga.on('join', data => { 33 | expect(data.username).toEqual(JSON.parse(validPeer.remoteUserData).username); 34 | }) 35 | 36 | saga.connect(validPeer); 37 | }) 38 | --------------------------------------------------------------------------------