├── .gitignore
├── .travis.yml
├── example.html
├── example.js
├── LICENSE
├── package.json
├── readme.md
├── index.js
└── test.js
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | package-lock.json
3 |
4 | example-bundle.js
5 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 |
3 | node_js:
4 | - "8"
5 | - "10"
6 |
7 | sudo: false
8 |
9 | script:
10 | - npm test
11 |
--------------------------------------------------------------------------------
/example.html:
--------------------------------------------------------------------------------
1 |
2 |
hyperdiscovery example
3 |
4 | Open dev tools to see output
5 |
--------------------------------------------------------------------------------
/example.js:
--------------------------------------------------------------------------------
1 | var hyperdrive = require('hyperdrive')
2 | var ram = require('random-access-memory')
3 | var Discovery = require('.')
4 |
5 | var key = process.argv[2]
6 | var archive = hyperdrive(ram, key)
7 | var archive2 = hyperdrive(ram)
8 | var discovery = Discovery(archive)
9 |
10 | archive.ready(function (err) {
11 | if (err) throw err
12 | console.log('key', archive.key.toString('hex'))
13 | })
14 |
15 | const toWrite = 'console.log("Hello World!")'
16 |
17 | archive2.ready(function (err) {
18 | if (err) throw err
19 | archive2.writeFile('example.js', toWrite, () => {})
20 | discovery.add(archive2)
21 | console.log('key', archive2.key.toString('hex'))
22 | })
23 |
24 | discovery.on('connection', function (peer, type) {
25 | console.log('connection')
26 | peer.on('close', function () {
27 | console.log('peer disconnected')
28 | })
29 | })
30 | discovery.on('listening', () => {
31 | console.log('listening')
32 | })
33 | discovery.on('error', (err) => {
34 | console.log('error', err)
35 | })
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Dat Project Contributors
4 | Copyright (c) 2019 Andrew Osheroff
5 | Copyright (c) 2018 Blue Link Labs
6 |
7 | Permission is hereby granted, free of charge, to any person obtaining a copy
8 | of this software and associated documentation files (the "Software"), to deal
9 | in the Software without restriction, including without limitation the rights
10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | copies of the Software, and to permit persons to whom the Software is
12 | furnished to do so, subject to the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be included in all
15 | copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | SOFTWARE.
24 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "hyperdiscovery",
3 | "version": "10.2.2",
4 | "description": "Join the p2p swarm for hypercore and hyperdrive feeds.",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "standard && dependency-check . && node test.js",
8 | "build-example": "browserify example.js > example-bundle.js"
9 | },
10 | "repository": {
11 | "type": "git",
12 | "url": "git+https://github.com/datproject/hyperdiscovery.git"
13 | },
14 | "author": "Dat Project Contributors",
15 | "license": "MIT",
16 | "bugs": {
17 | "url": "https://github.com/datproject/hyperdiscovery/issues"
18 | },
19 | "browser": {
20 | "discovery-swarm": "discovery-swarm-web"
21 | },
22 | "homepage": "https://github.com/datproject/hyperdiscovery#readme",
23 | "dependencies": {
24 | "dat-encoding": "^5.0.1",
25 | "dat-swarm-defaults": "^1.0.2",
26 | "debug": "^4.1.1",
27 | "discovery-swarm": "^5.1.4",
28 | "discovery-swarm-web": "^2.0.0",
29 | "hypercore-protocol": "^6.9.0",
30 | "mutexify": "^1.2.0"
31 | },
32 | "devDependencies": {
33 | "browserify": "^16.2.3",
34 | "dependency-check": "^3.3.0",
35 | "hypercore": "^6.25.2",
36 | "hyperdb": "^3.5.0",
37 | "hyperdrive": "^9.14.3",
38 | "random-access-memory": "^3.1.1",
39 | "standard": "^12.0.1",
40 | "tape": "^4.10.1"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 |
2 | [](https://github.com/hyperswarm/replicator) See [hyperswarm replicator](https://github.com/hyperswarm/replicator) for similar functionality.
3 |
4 | More info on active projects and modules at [dat-ecosystem.org](https://dat-ecosystem.org/)
5 |
6 | ---
7 |
8 | # hyperdiscovery
9 |
10 | ### Old documentation below
11 |
12 | This library is compatible with hypercore<=v7, which is now out of date.
13 |
14 | [](http://travis-ci.org/datproject/hyperdiscovery)
15 |
16 | Join the p2p swarm for [hypercore][core] and [hyperdrive][drive]. Uses
17 | [discovery-swarm][swarm] under the hood. Also works in web browsers using [discovery-swarm-web](https://github.com/RangerMauve/discovery-swarm-web).
18 |
19 | This module only works
20 |
21 | ```
22 | npm install hyperdiscovery
23 | ```
24 |
25 | ## Usage
26 |
27 | Run the following code in two different places and they will replicate the contents of the given `ARCHIVE_KEY`.
28 |
29 | ```js
30 | var hyperdrive = require('hyperdrive')
31 | var hypercore = require('hypercore')
32 | var Discovery = require('hyperdiscovery')
33 |
34 | var archive = hyperdrive('./database', 'ARCHIVE_KEY')
35 | var discovery = Discovery(archive)
36 | discovery.on('connection', function (peer, type) {
37 | console.log('got', peer, type)
38 | console.log('connected to', discovery.connections, 'peers')
39 | peer.on('close', function () {
40 | console.log('peer disconnected')
41 | })
42 | })
43 |
44 | // add another archive/feed later
45 | var feed = hypercore('./feed')
46 | discovery.add(feed) // adds this hypercore feed to the same discovery swarm
47 | ```
48 |
49 | Will use `discovery-swarm` to attempt to connect peers. Uses `dat-swarm-defaults` for peer introduction defaults on the server side, which can be overwritten (see below).
50 |
51 | The module can also create and join a swarm for a hypercore feed:
52 |
53 | ```js
54 | var hypercore = require('hypercore')
55 | var Discovery = require('hyperdiscovery')
56 |
57 | var feed = hypercore('/feed')
58 | var discovery = Discovery(feed)
59 | ```
60 |
61 | ## API
62 |
63 | ### `var discovery = Discovery(archive, opts)`
64 |
65 | Join the p2p swarm for the given feed. The return object, `discovery`, is an event emitter that will emit a `peer` event with the peer information when a peer is found.
66 |
67 | ### `discovery.add(archive, [opts])`
68 |
69 | Add an archive/feed to the discovery swarm. Options will be passed to `discovery-swarm`. If you pass `opts.announce` as a falsy value you don't announce your port (discover-only mode).
70 |
71 | ### `discovery.totalConnections`
72 |
73 | Get length of the list of total active connections, across all archives and feeds.
74 |
75 | ### `discovery.leave(discoveryKey)`
76 |
77 | Leave discovery for a specific discovery key.
78 |
79 | ### `discovery.rejoin(discoveryKey)`
80 |
81 | Rejoin discovery for a discovery key (*must be added first using `discovery.add`).
82 |
83 | ### `discovery.close()`
84 |
85 | Exit the swarm, close all replication streams.
86 |
87 | ##### Options
88 |
89 | * `stream`: function, replication stream for connection. Default is `archive.replicate({live, upload, download})`.
90 | * `upload`: bool, upload data to the other peer?
91 | * `download`: bool, download data from the other peer?
92 | * `port`: port for discovery swarm
93 | * `utp`: use utp in discovery swarm
94 | * `tcp`: use tcp in discovery swarm
95 | * `bootstrap`: [string], WebRTC bootstrap signal servers for web
96 | * `discovery`: string, discovery-swarm-stream server for web
97 |
98 | Defaults from datland-swarm-defaults can also be overwritten:
99 |
100 | * `dns.server`: DNS server
101 | * `dns.domain`: DNS domain
102 | * `dht.bootstrap`: distributed hash table bootstrapping nodes
103 |
104 | ## Debugging
105 |
106 | Set `DEBUG='*'` in the environment to enable debugging output inside discovery-swarm.
107 |
108 | ## See Also
109 | - [mafintosh/hypercore][core]
110 | - [mafintosh/hyperdrive][drive]
111 | - [mafintosh/hyperdb][db]
112 | - [mafintosh/discovery-swarm][swarm]
113 | - [discovery-swarm-web][swarm-web]
114 |
115 | ## License
116 | ISC
117 |
118 | [core]: https://github.com/mafintosh/hypercore
119 | [drive]: https://github.com/mafintosh/hyperdrive
120 | [db]: https://github.com/mafintosh/hyperdb
121 | [swarm]: https://github.com/mafintosh/discovery-swarm
122 | [swarm-web]: https://github.com/RangerMauve/discovery-swarm-web
123 |
--------------------------------------------------------------------------------
/index.js:
--------------------------------------------------------------------------------
1 | const crypto = require('crypto')
2 | const EventEmitter = require('events')
3 |
4 | const datEncoding = require('dat-encoding')
5 | const hypercoreProtocol = require('hypercore-protocol')
6 | const discoverySwarm = require('discovery-swarm')
7 | const swarmDefaults = require('dat-swarm-defaults')
8 |
9 | const debug = require('debug')('hyperdiscovery')
10 |
11 | module.exports = (...args) => new Hyperdiscovery(...args)
12 |
13 | const DEFAULT_PORTS = [3282, 3000, 3002, 3004, 2001, 2003, 2005]
14 |
15 | class Hyperdiscovery extends EventEmitter {
16 | // Modified from Beaker Browser, copyright Blue Link Labs:
17 | // https://github.com/beakerbrowser/beaker-core/
18 | // https://github.com/beakerbrowser/dat-node
19 | // And Core Store, copyright Andrew Osheroff:
20 | // https://github.com/andrewosh/corestore
21 |
22 | constructor (feed, opts) {
23 | super()
24 |
25 | if (feed && !feed.replicate) {
26 | opts = feed
27 | feed = null
28 | }
29 | opts = opts || {}
30 | // Old Options:
31 | // * `stream`: function, replication stream for connection. Default is `archive.replicate({live, upload, download})`.
32 | // * `upload`: bool, upload data to the other peer?
33 | // * `download`: bool, download data from the other peer?
34 | // * `port`: port for discovery swarm
35 | // * `utp`: use utp in discovery swarm
36 | // * `tcp`: use tcp in discovery swarm
37 |
38 | this._opts = opts
39 | this.id = opts.id || crypto.randomBytes(32)
40 | this._port = DEFAULT_PORTS.shift()
41 | this._portAlts = DEFAULT_PORTS
42 | if (opts.port) {
43 | if (Array.isArray(opts.port)) {
44 | this._port = opts.port.shift()
45 | this._portAlts = opts.port
46 | } else {
47 | this._port = opts.port
48 | }
49 | }
50 |
51 | this._swarm = discoverySwarm(swarmDefaults({
52 | // Discovery-swarm options
53 | hash: false,
54 | utp: defaultTrue(opts.utp),
55 | tcp: defaultTrue(opts.tcp),
56 | dht: defaultTrue(opts.dht),
57 |
58 | // Discovery-swarm-web options
59 | bootstrap: opts.bootstrap,
60 | discovery: opts.discovery,
61 |
62 | id: this.id,
63 | stream: this._createReplicationStream.bind(this)
64 | }))
65 |
66 | // bubble listening and errors
67 | this._swarm.on('listening', () => {
68 | this.port = this._swarm.address().port
69 | this.emit('listening', this.port)
70 | debug('swarm:listening', { port: this.port })
71 | })
72 | this._swarm.on('error', (err) => {
73 | if (err && err.code !== 'EADDRINUSE' && err.message !== 'Could not bind') return this.emit('error', err)
74 | const port = this._portAlts.shift()
75 | debug(`Port ${this._port} in use. Trying ${port}.`)
76 | this._port = port
77 | this.listen(port)
78 | })
79 |
80 | // re-emit a variety of events
81 | const reEmit = (event) => {
82 | this._swarm.on(event, (...args) => {
83 | this.emit(event, ...args)
84 | debug(`swarm:${event}`, ...args)
85 | })
86 | }
87 | reEmit('peer')
88 | reEmit('peer-banned')
89 | reEmit('peer-rejected')
90 | reEmit('drop')
91 | reEmit('connecting')
92 | reEmit('connect-failed')
93 | reEmit('handshaking')
94 | reEmit('handshake-timeout')
95 | reEmit('connection')
96 | reEmit('connection-closed')
97 | reEmit('redundant-connection')
98 |
99 | this._replicatingFeeds = new Map()
100 |
101 | if (opts.autoListen !== false) {
102 | this.listen()
103 | }
104 |
105 | if (feed) {
106 | this.add(feed)
107 | }
108 | }
109 |
110 | get totalConnections () {
111 | // total connections across all keys
112 | return this._swarm.connections.length
113 | }
114 |
115 | connections (dKey) {
116 | if (!dKey) return this.totalConnections
117 |
118 | const feed = this._replicatingFeeds.get(dKey)
119 | return feed && feed.peers
120 | }
121 |
122 | _createReplicationStream (info) {
123 | var self = this
124 |
125 | // create the protocol stream
126 | var streamKeys = [] // list of keys replicated over the stream
127 | var stream = hypercoreProtocol({
128 | id: this.id,
129 | live: true,
130 | encrypt: true,
131 | extensions: this._opts.extensions
132 | })
133 | stream.peerInfo = info
134 |
135 | // add the dat if the discovery network gave us any info
136 | if (info.channel) {
137 | add(info.channel)
138 | }
139 |
140 | // add any requested dats
141 | stream.on('feed', add)
142 |
143 | function add (dkey) {
144 | const dkeyStr = datEncoding.toStr(dkey)
145 |
146 | // lookup the archive
147 | try {
148 | var feed = self._replicatingFeeds.get(dkeyStr)
149 | if (!feed) return // TODO: error ?
150 | } catch (err) {
151 | if (!stream.destroyed) stream.destroy(err)
152 | }
153 |
154 | self._replicatingFeeds.set(dkeyStr, feed)
155 |
156 | if (!feed || !feed.isSwarming) {
157 | return
158 | }
159 |
160 | if (!feed.replicationStreams) {
161 | feed.replicationStreams = []
162 | }
163 | if (feed.replicationStreams.indexOf(stream) !== -1) {
164 | return // already replicating
165 | }
166 |
167 | // create the replication stream
168 | feed.replicate({ stream, live: true })
169 | if (stream.destroyed) return // in case the stream was destroyed during setup
170 |
171 | // track the stream
172 | var keyStr = datEncoding.toStr(feed.key)
173 | streamKeys.push(keyStr)
174 | feed.replicationStreams.push(stream)
175 |
176 | function onend () {
177 | feed.replicationStreams = feed.replicationStreams.filter(s => (s !== stream))
178 | }
179 | stream.once('error', onend)
180 | stream.once('end', onend)
181 | stream.once('close', onend)
182 | }
183 |
184 | // debugging
185 | stream.on('error', err => {
186 | debug({
187 | event: 'connection-error',
188 | peer: `${info.host}:${info.port}`,
189 | connectionType: info.type,
190 | message: err.toString()
191 | })
192 | })
193 |
194 | return stream
195 | }
196 |
197 | add (feed, opts) {
198 | if (!feed.key) return feed.ready(() => { this.add(feed) })
199 | const key = datEncoding.toStr(feed.key)
200 | const discoveryKey = datEncoding.toStr(feed.discoveryKey)
201 | this._replicatingFeeds.set(discoveryKey, feed)
202 |
203 | this.rejoin(feed.discoveryKey, opts)
204 | this.emit('join', { key, discoveryKey })
205 | feed.isSwarming = true
206 | }
207 |
208 | rejoin (discoveryKey, opts) {
209 | this._swarm.join(datEncoding.toBuf(discoveryKey), opts)
210 | }
211 |
212 | listen (port) {
213 | port = port || this._port
214 | this._swarm.listen(port)
215 | return new Promise(resolve => {
216 | this._swarm.once('listening', resolve)
217 | })
218 | }
219 |
220 | leave (discoveryKey) {
221 | const dKeyStr = datEncoding.toStr(discoveryKey)
222 | const feed = this._replicatingFeeds.get(dKeyStr)
223 | if (!feed) return
224 | if (feed.replicationStreams) {
225 | feed.replicationStreams.forEach(stream => stream.destroy()) // stop all active replications
226 | feed.replicationStreams.length = 0
227 | }
228 | this._swarm.leave(feed.discoveryKey)
229 | this.emit('leave', { key: feed.key.toString('hex'), discoveryKey: dKeyStr })
230 | }
231 |
232 | close () {
233 | const self = this
234 | return new Promise((resolve, reject) => {
235 | this._replicatingFeeds.forEach((val, key) => {
236 | this.leave(key)
237 | })
238 | this._swarm.destroy(err => {
239 | if (err) return reject(err)
240 | self.emit('close')
241 | resolve()
242 | })
243 | })
244 | }
245 | }
246 |
247 | function defaultTrue (x) {
248 | return x === undefined ? true : x
249 | }
250 |
--------------------------------------------------------------------------------
/test.js:
--------------------------------------------------------------------------------
1 | const tape = require('tape')
2 | const hypercore = require('hypercore')
3 | const hyperdrive = require('hyperdrive')
4 | // const hyperdb = require('hyperdb')
5 | const ram = require('random-access-memory')
6 | const Discovery = require('.')
7 |
8 | function getHypercoreSwarms (opts) {
9 | return new Promise((resolve, reject) => {
10 | const feed1 = hypercore(ram)
11 |
12 | feed1.ready(() => {
13 | const feed2 = hypercore(ram, feed1.key)
14 | feed2.once('ready', () => {
15 | const write = Discovery(feed1, opts)
16 | const read = Discovery(feed2, opts)
17 |
18 | write.on('error', (err) => {
19 | throw err
20 | })
21 | read.on('error', (err) => {
22 | throw err
23 | })
24 | write.on('close', () => {
25 | close(feed1)
26 | })
27 | read.on('close', () => {
28 | close(feed2)
29 | })
30 |
31 | resolve([write, read])
32 | })
33 | })
34 | })
35 | }
36 |
37 | function getHyperdriveSwarms (opts) {
38 | return new Promise((resolve, reject) => {
39 | const archive1 = hyperdrive(ram)
40 |
41 | archive1.ready(() => {
42 | const archive2 = hyperdrive(ram, archive1.key)
43 | archive2.once('ready', () => {
44 | const write = Discovery(archive1, opts)
45 | const read = Discovery(archive2, opts)
46 |
47 | write.on('error', (err) => {
48 | throw err
49 | })
50 | read.on('error', (err) => {
51 | throw err
52 | })
53 | write.on('close', () => {
54 | close(archive1)
55 | })
56 | read.on('close', () => {
57 | close(archive2)
58 | })
59 |
60 | resolve([write, read])
61 | })
62 | })
63 | })
64 | }
65 |
66 | // function getDbSwarms (opts, cb) {
67 | // var db1 = hyperdb(ram, { valueEncoding: 'utf-8' })
68 | // db1.once('ready', function () {
69 | // var db2 = hyperdb(ram, db1.key, { valueEncoding: 'utf-8' })
70 | // db2.once('ready', function () {
71 | // var write = swarm(db1, opts)
72 | // var read = swarm(db2, opts)
73 | // var swarms = [write, read]
74 | // cb(swarms)
75 | // })
76 | // })
77 | // }
78 |
79 | tape('hypercore: connect and close', async (t) => {
80 | const [write, read] = await getHypercoreSwarms({})
81 | let missing = 2
82 |
83 | write.once('connection', (peer, type) => {
84 | t.pass('write connected')
85 | t.equals(write.totalConnections, 1)
86 | done()
87 | })
88 |
89 | read.once('connection', (peer, type) => {
90 | t.pass('read connected')
91 | t.equals(read.totalConnections, 1)
92 | done()
93 | })
94 |
95 | async function done () {
96 | if (--missing) return
97 | try {
98 | await write.close()
99 | await read.close()
100 | } catch (err) {
101 | t.error(err)
102 | }
103 | t.pass('discovery closed')
104 | t.end()
105 | }
106 | })
107 |
108 | tape('hypercore: connect without utp', async (t) => {
109 | const [write, read] = await getHypercoreSwarms({ utp: false })
110 | let missing = 2
111 | write.once('connection', (peer, type) => {
112 | t.pass('write connected')
113 | t.equals(write.totalConnections, 1)
114 | done()
115 | })
116 |
117 | read.once('connection', (peer, type) => {
118 | t.pass('read connected')
119 | t.equals(read.totalConnections, 1)
120 | done()
121 | })
122 |
123 | async function done () {
124 | if (--missing) return
125 | try {
126 | await write.close()
127 | await read.close()
128 | } catch (err) {
129 | t.error(err)
130 | }
131 | t.pass('discovery closed')
132 | t.end()
133 | }
134 | })
135 |
136 | tape('hypercore: multiple in single swarm', async (t) => {
137 | const [disc1, disc2] = await getHypercoreSwarms({ utp: false })
138 | const feed1 = hypercore(ram)
139 | let feed2
140 |
141 | disc1.on('connection', (peer, type) => {
142 | const dKey = feed1.discoveryKey.toString('hex')
143 | if (dKey === peer.discoveryKey.toString('hex')) {
144 | t.pass('added feeds connected')
145 | // t.equals(disc1.connections(dKey), 1)
146 | done()
147 | }
148 | })
149 |
150 | feed1.ready(() => {
151 | feed2 = hypercore(ram, feed1.key)
152 | disc1.add(feed1)
153 | disc2.add(feed2)
154 | })
155 |
156 | async function done () {
157 | try {
158 | await disc1.close()
159 | await disc2.close()
160 | await close(feed1)
161 | await close(feed2)
162 | } catch (err) {
163 | t.error(err)
164 | }
165 | t.pass('discovery closed')
166 | t.end()
167 | }
168 | })
169 |
170 | tape('hyperdrive: connect and close', async (t) => {
171 | const [write, read] = await getHyperdriveSwarms({ utp: false })
172 | let missing = 2
173 |
174 | write.once('connection', (peer, type) => {
175 | t.pass('write connected')
176 | t.equals(write.totalConnections, 1)
177 | done()
178 | })
179 |
180 | read.once('connection', (peer, type) => {
181 | t.pass('read connected')
182 | t.equals(read.totalConnections, 1)
183 | done()
184 | })
185 |
186 | async function done () {
187 | if (--missing) return
188 | try {
189 | await write.close()
190 | await read.close()
191 | } catch (err) {
192 | t.error(err)
193 | }
194 | t.pass('discovery closed')
195 | t.end()
196 | }
197 | })
198 |
199 | tape('hyperdrive: connect without utp', async (t) => {
200 | const [write, read] = await getHyperdriveSwarms({ utp: false })
201 | let missing = 2
202 | write.once('connection', (peer, type) => {
203 | t.pass('write connected')
204 | t.equals(write.totalConnections, 1)
205 | done()
206 | })
207 |
208 | read.once('connection', (peer, type) => {
209 | t.pass('read connected')
210 | t.equals(read.totalConnections, 1)
211 | done()
212 | })
213 |
214 | async function done () {
215 | if (--missing) return
216 | try {
217 | await write.close()
218 | await read.close()
219 | } catch (err) {
220 | t.error(err)
221 | }
222 | t.pass('discovery closed')
223 | t.end()
224 | }
225 | })
226 |
227 | tape('hyperdrive: multiple in single swarm', async (t) => {
228 | const [disc1, disc2] = await getHyperdriveSwarms({ utp: false })
229 | const archive1 = hyperdrive(ram)
230 | let archive2
231 |
232 | disc1.on('connection', (peer, type) => {
233 | const dKey = archive1.discoveryKey.toString('hex')
234 | if (dKey === peer.discoveryKey.toString('hex')) {
235 | t.pass('added feeds connected')
236 | // t.equals(disc1.connections(dKey), 1)
237 | done()
238 | }
239 | })
240 |
241 | archive1.ready(() => {
242 | archive2 = hyperdrive(ram, archive1.key)
243 | disc1.add(archive1)
244 | disc2.add(archive2)
245 | })
246 |
247 | async function done () {
248 | try {
249 | await disc1.close()
250 | await disc2.close()
251 | await close(archive1)
252 | await close(archive2)
253 | } catch (err) {
254 | t.error(err)
255 | }
256 | t.pass('discovery closed')
257 | t.end()
258 | }
259 | })
260 |
261 | // tape('hyperdb connect and close', (t) => {
262 | // t.plan(6)
263 | // getDbSwarms({}, function (swarms) {
264 | // var write = swarms[0]
265 | // var read = swarms[1]
266 | // var missing = 2
267 |
268 | // write.once('connection', (peer, type) => {
269 | // t.ok(1, 'write connected')
270 | // t.equals(write.connections.length, 1)
271 | // done()
272 | // })
273 |
274 | // read.once('connection', (peer, type) => {
275 | // t.ok(1, 'read connected')
276 | // t.equals(read.connections.length, 1)
277 | // done()
278 | // })
279 |
280 | // function done () {
281 | // if (--missing) return
282 | // write.close(function () {
283 | // t.ok(1, 'write closed')
284 | // read.close(function () {
285 | // t.ok(1, 'read closed')
286 | // })
287 | // })
288 | // }
289 | // })
290 | // })
291 |
292 | async function close (feed) {
293 | return new Promise(resolve => {
294 | feed.close(resolve)
295 | })
296 | }
297 |
--------------------------------------------------------------------------------