├── .github └── workflows │ └── test.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── index.js ├── package.json └── test.js /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Testing 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | build: 7 | strategy: 8 | matrix: 9 | node: [ '24' ] 10 | os: [macos-latest, ubuntu-latest, windows-latest] 11 | runs-on: ${{ matrix.os }} 12 | 13 | name: Unit tests ${{ matrix.node }} ${{ matrix.os }} 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions/setup-node@v2 17 | with: 18 | node-version: ${{ matrix.node }} 19 | - run: npm install 20 | - run: npm run lint 21 | - run: npm run test:node 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | # next.js build output 61 | .next 62 | 63 | package-lock.json 64 | test-bundle.js 65 | bundle.js 66 | dat-sdk-bundle.js 67 | 68 | # IDE 69 | .idea -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at hi@datproject.org. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Dat Project 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hyper-sdk 2 | 3 | A Software Development Kit for the [hypercore-protocol](https://hypercore-protocol.org/) 4 | 5 | ## Why use this? 6 | 7 | Hypercore-protocol and it's ecosystem consists of a bunch of low level building blocks for working with data in distributed applications. Although this modularity makes it easy to mix and match pieces, it adds complexity when it comes to actually building something. 8 | 9 | The Hyper SDK combines the lower level pieces of the Hyper stack into high level APIs that you can use across platforms so that you can focus on your application rather than the gritty details of how it works. 10 | 11 | ## Goals 12 | 13 | - High level API 14 | - Cross-platform with same codebase 15 | - ✔ [Node.js](https://nodejs.org/en) 16 | - ✔ [Electron](https://www.electronjs.org/) 17 | - ✔ [Pear](https://docs.pears.com/) 18 | - 🏗️ Web (PRs welcome) 19 | 20 | ## Installation 21 | 22 | Make sure you've set up [Node.js](https://nodejs.org/). 23 | 24 | ```shell 25 | npm install --save hyper-sdk 26 | # or yarn 27 | ``` 28 | 29 | ```js 30 | import * as SDK from "hyper-sdk" 31 | ``` 32 | 33 | ## API 34 | 35 | ### SDK.create() 36 | 37 | ```JavaScript 38 | const sdk = await SDK.create({ 39 | // This argument is mandatory since Hypercore no longer support in-memory 40 | // Check out the env-paths module for application specific path storage 41 | storage: './hyper-sdk', 42 | 43 | // This controls whether the SDK will automatically start swarming when loading a core via `get` 44 | // Set this to false if you want to have more fine control over peer discovery 45 | autoJoin: true, 46 | 47 | // Specify options to pass to the Corestore constructor 48 | // The storage will get derived from the `storage` parameter 49 | // https://github.com/hypercore-protocol/corestore/ 50 | corestoreOpts: {}, 51 | 52 | // Specify options to pass to the hyperswarm constructor 53 | // The keypair will get derived automatically from the corestore 54 | // https://github.com/hyperswarm/hyperswarm 55 | swarmOpts: {}, 56 | }) 57 | ``` 58 | 59 | ### sdk.publicKey 60 | 61 | The public key used for identifying this peer in the hyperswarm network. 62 | 63 | This is a 32 byte buffer which can be use in conjunction with `sdk.joinPeer()` to connect two peers directly together. 64 | 65 | ### sdk.connections 66 | 67 | The list of active connections to other peers, taken from hyperswarm. 68 | 69 | ### sdk.peers 70 | 71 | The list of active peers. 72 | 73 | Each peer has a `publicKey`, and list of `topics` 74 | 75 | You can find more docs in the [hyperswarm](https://github.com/hyperswarm/hyperswarm#peerinfo-api) repo. 76 | 77 | ### sdk.cores 78 | 79 | List of active Hypercores. 80 | 81 | ### sdk.on('peer-add', peerInfo) / sdk.on('peer-remove', peerInfo) 82 | 83 | You can listen on when a peer gets connected or disconnected with this event. 84 | 85 | You can find more docs in the [hyperswarm](https://github.com/hyperswarm/hyperswarm#peerinfo-api) repo. 86 | 87 | ```JavaScript 88 | sdk.on('peer-add', (peerInfo) => { 89 | console.log('Connected to', peerInfo.publicKey, 'on', peerInfo.topics) 90 | }) 91 | sdk.on('peer-add', (peerInfo) => { 92 | console.log('Disconnected from') 93 | }) 94 | ``` 95 | 96 | ### sdk.get() 97 | 98 | You can initialize a [Hypercore](https://github.com/hypercore-protocol/hypercore) instance by passing in a key, a name to derive a key from, or a URL containing either a key or a DNS name. 99 | 100 | Unlike corestore, you may not initialize a hypercore from a `null` key since everything must be derivable or loadable. 101 | 102 | Unless `autoJoin` is set to `false`, the peer discovery will be automatically started for the core. 103 | 104 | ```JavaScript 105 | // Derive a key from a "name" 106 | const core = await sdk.get('example name') 107 | 108 | // Resolve DNS to a hypercore 109 | const core = await sdk.get('hyper://example.mauve.moe') 110 | 111 | // Buffer key, 32 bytes of 0's 112 | const core = await sdk.get(b4a.alloc(32, 0)) 113 | 114 | // Hex key, equivalent to 32 bytes of zeros 115 | const core = await sdk.get('hyper://0000000000000000000000000000000000000000000000000000000000000000') 116 | 117 | // z32 encoded, equivalent to 32 bytes of zeros 118 | const core = await sdk.get('hyper://yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy') 119 | 120 | // Don't auto-join the swarm for the core on init 121 | const core = await sdk.get('example', {autoJoin: false}) 122 | ``` 123 | 124 | ### sdk.getDrive() 125 | 126 | You can initialize a [Hyperdrive](https://github.com/holepunchto/hyperdrive-next) instance by passing in the same arguments as in `sdk.get()`. 127 | 128 | In addition to the usual `hyperdrive` properties, there's a new `url` property to get the `hyper://` URL for the drive to used elsewhere. 129 | 130 | Note that the drives's metadata DB's discovery key will be used for replicating if `autoJoin` is `true`. 131 | 132 | Hyperdrive is mostly useful for storing and loading files since it splits the metadata representing the file systema and the blob storage into separate cores. 133 | 134 | ```JavaScript 135 | const drive = await sdk.getDrive('hyper://blob.mauve.moe') 136 | for(const path of drive.readdir('/')) { 137 | const stat = drive.stat(path) 138 | } 139 | ``` 140 | 141 | ### sdk.getBee() 142 | 143 | You can initialize a [Hyperbee](https://github.com/holepunchto/hyperbee) instance by passing the same arguments as in `sdk.get()`. 144 | 145 | In addition to the usual `hyperbee` properties, there's a new `url` property to get the `hyper://` URL for the bee to used elsewhere. 146 | 147 | Additionally, you should pass in a `keyEncoding` and a `valueEncoding` in order to control the encoding for data that's being written. 148 | 149 | Hyperbee is best used when you want to create database indexes. 150 | 151 | For an out of the box database with a proper query language, check out [HyperbeeDeeBee](https://github.com/RangerMauve/hyperbeedeebee/). 152 | 153 | ```JavaScript 154 | const db = await sdk.getBee('example db') 155 | 156 | const db = await sdk.getBee('example db', {keyEncoding: 'utf8', valueEncoding: 'json') 157 | await db.put('hello', 'world') 158 | 159 | for(const entry of db.createReadStream()) { 160 | console.log(entry) 161 | } 162 | ``` 163 | 164 | ### sdk.resolveDNSToKey() 165 | 166 | You can manually resolve DNS addresses to hypercore keys on domains using the DNS Link spec with this method. 167 | 168 | However, it's not mandatory to use DNS since `sdk.get()` will automatically detect and perform resolutions of DNS for `hyper://` URLs. 169 | 170 | Hyper-SDK currently bypasses the OS DNS resolver and uses DNS Over HTTPS. You can configure your own using the `dnsResolver` config option and any of the options [on this list](https://dnsprivacy.org/public_resolvers/#dns-over-https-doh). By default we use the one provided by [Mozilla](https://developers.cloudflare.com/1.1.1.1/commitment-to-privacy/privacy-policy/firefox/). 171 | 172 | ```JavaScript 173 | const key = await sdk.resolveDNSToKey('example.mauve.moe') 174 | ``` 175 | 176 | ### sdk.namespace() 177 | 178 | Get back a namespaced [Corestore](https://github.com/hypercore-protocol/corestore/) instance which can be passed to things like Hyperdrive. 179 | 180 | Note that cores initialized with a namespaced corestore will not be auto-joined and you will need to call `sdk.join(core.discoveryKey)` on said cores. 181 | 182 | ```JavaScript 183 | import Hypderdrive from "hyperdrive" 184 | 185 | const drive = new Hyperdrive(sdk.namespace('example')) 186 | 187 | // Wait for the drive to initiailize 188 | await drive.ready() 189 | 190 | // Manually trigger peer lookup for this drive 191 | sdk.join(drive.publicKey) 192 | ``` 193 | 194 | ### sdk.join() / sdk.leave() 195 | 196 | You can manually trigger peer discovery of hypercores as well as stop peer discovery. 197 | This can be done by using the `discoveryKey` of a hypercore, or any 32 byte buffer. 198 | 199 | As well, you can use string names for topics in order to discover peers based on a human readable string. 200 | When using string topics, they are converted to 32 byte buffers using the [Hypercore Crypto namespace algorithm](https://github.com/mafintosh/hypercore-crypto#list--cryptonamespacename-count). 201 | 202 | ```JavaScript 203 | const core = await sdk.get('example', {autoJoin: false}) 204 | 205 | // Start finding peers without advertising 206 | sdk.join(core.discoveryKey, {server: false}) 207 | 208 | // Listen on a human readable topic 209 | sdk.join("cool cat videos") 210 | 211 | sdk.leave(core.discoveryKey) 212 | sdk.leave("cool cat videos") 213 | ``` 214 | 215 | ### sdk.joinPeer() / sdk.leavePeer() 216 | 217 | ```JavaScript 218 | const sdk1 = await SDK.create({storage: './sdk1'}) 219 | const sdk2 = await SDK.create({storage: './sdk1'}) 220 | 221 | sdk1.joinPeer(sdk2.publicKey) 222 | ``` 223 | 224 | ### sdk.close() 225 | 226 | This will gracefully close connections, remove advertisements from the DHT, and close any open file handles. 227 | 228 | Make sure you invoke this to keep the network fast and to avoid data corruption! 229 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | import HyperSwarm from 'hyperswarm' 2 | import CoreStore from 'corestore' 3 | import Hypercore from 'hypercore' 4 | import Hyperdrive from 'hyperdrive' 5 | import Hyperbee from 'hyperbee' 6 | import crypto from 'hypercore-crypto' 7 | import z32 from 'z32' 8 | import b4a from 'b4a' 9 | import { EventEmitter } from 'events' 10 | import { join } from 'path' 11 | import RocksDB from 'rocksdb-native' 12 | 13 | // TODO: Base36 encoding/decoding for URLs instead of hex 14 | 15 | export const HYPER_PROTOCOL_SCHEME = 'hyper://' 16 | export const DEFAULT_CORE_OPTS = { 17 | } 18 | export const DEFAULT_JOIN_OPTS = { 19 | server: true, 20 | client: true 21 | } 22 | export const DEFAULT_CORESTORE_OPTS = { 23 | } 24 | export const DEFAULT_SWARM_OPTS = { 25 | } 26 | 27 | // Monkey-patching with first class URL support 28 | Object.defineProperty(Hypercore.prototype, 'url', { 29 | get: function () { 30 | return `${HYPER_PROTOCOL_SCHEME}${this.id}/` 31 | } 32 | }) 33 | Object.defineProperty(Hyperdrive.prototype, 'url', { 34 | get: function () { 35 | return `${HYPER_PROTOCOL_SCHEME}${this.core.id}/` 36 | } 37 | }) 38 | Object.defineProperty(Hyperbee.prototype, 'url', { 39 | get: function () { 40 | return `${HYPER_PROTOCOL_SCHEME}${this.feed.id}/` 41 | } 42 | }) 43 | 44 | const DEFAULT_DNS_RESOLVER = 'https://mozilla.cloudflare-dns.com/dns-query' 45 | 46 | const DNSLINK_PREFIX = 'dnslink=/hyper/' 47 | 48 | export class SDK extends EventEmitter { 49 | #fetch 50 | #dnsCache 51 | #dnsMemoryCache 52 | #defaultCoreOpts 53 | #defaultJoinOpts 54 | #dnsResolver 55 | #swarm 56 | #corestore 57 | #coreCache 58 | #beeCache 59 | #driveCache 60 | 61 | constructor ({ 62 | swarm = throwMissing('swarm'), 63 | corestore = throwMissing('corestore'), 64 | dnsCache = throwMissing('dnsCache'), 65 | fetch = globalThis.fetch, 66 | defaultCoreOpts = DEFAULT_CORE_OPTS, 67 | defaultJoinOpts = DEFAULT_JOIN_OPTS, 68 | dnsResolver = DEFAULT_DNS_RESOLVER, 69 | autoJoin = true, 70 | doReplicate = true 71 | } = {}) { 72 | super() 73 | this.#swarm = swarm 74 | this.#corestore = corestore 75 | this.#dnsCache = dnsCache 76 | this.#fetch = fetch 77 | 78 | // These probably shouldn't be accessed 79 | this.#dnsMemoryCache = new Map() 80 | this.#coreCache = new Map() 81 | this.#beeCache = new Map() 82 | this.#driveCache = new Map() 83 | 84 | this.#defaultCoreOpts = defaultCoreOpts 85 | this.#defaultJoinOpts = defaultJoinOpts 86 | this.#dnsResolver = dnsResolver 87 | 88 | this.autoJoin = autoJoin 89 | 90 | if (doReplicate) { 91 | swarm.on('connection', (connection, peerInfo) => { 92 | this.emit('peer-add', peerInfo) 93 | connection.once('close', () => this.emit('peer-remove', peerInfo)) 94 | this.replicate(connection) 95 | }) 96 | } 97 | } 98 | 99 | get swarm () { 100 | return this.#swarm 101 | } 102 | 103 | get corestore () { 104 | return this.#corestore 105 | } 106 | 107 | get publicKey () { 108 | return this.swarm.keyPair.publicKey 109 | } 110 | 111 | get connections () { 112 | return this.swarm.connections 113 | } 114 | 115 | get peers () { 116 | return this.swarm.peers 117 | } 118 | 119 | get cores () { 120 | return [...this.#coreCache.values()] 121 | } 122 | 123 | async resolveDNSToKey (hostname) { 124 | // TODO: Check for TTL? 125 | if (this.#dnsMemoryCache.has(hostname)) { 126 | return this.#dnsMemoryCache.get(hostname) 127 | } 128 | 129 | const fetch = this.#fetch 130 | 131 | const subdomained = `_dnslink.${hostname}` 132 | 133 | const url = `${this.#dnsResolver}?name=${subdomained}&type=TXT` 134 | 135 | let answers = null 136 | try { 137 | const response = await fetch(url, { 138 | headers: { accept: 'application/dns-json' } 139 | }) 140 | 141 | if (!response.ok) { 142 | throw new Error(`Unable to resolve DoH for ${hostname} ${await response.text()}`) 143 | } 144 | 145 | const dnsResults = await response.json() 146 | answers = dnsResults.Answer 147 | await this.#dnsCache.put(hostname, JSON.stringify(dnsResults)) 148 | } catch (e) { 149 | const cached = await this.#dnsCache.get(hostname) 150 | if (cached) { 151 | answers = JSON.parse(cached).Answer 152 | } 153 | } 154 | 155 | for (let { name, data } of answers) { 156 | if (name !== subdomained || !data) { 157 | continue 158 | } 159 | if (data.startsWith('"')) { 160 | data = data.slice(1, -1) 161 | } 162 | if (!data.startsWith(DNSLINK_PREFIX)) { 163 | continue 164 | } 165 | const key = data.split('/')[2] 166 | this.#dnsMemoryCache.set(hostname, key) 167 | return key 168 | } 169 | 170 | throw new Error(`DNS-Link Record not found for TXT ${subdomained}`) 171 | } 172 | 173 | // Resolves a string to be a key or opts and resolves DNS 174 | // Useful for hypercore opts or Hyperdrive 175 | async resolveNameOrKeyToOpts (nameOrKeyOrURL) { 176 | // If a URL, use the hostname as either a key or a DNS to resolve 177 | // If not a URL, try to decode to a key 178 | // if not a key, use as name to generate a hypercore 179 | // Else it's an errorW 180 | 181 | const isKeyString = (typeof nameOrKeyOrURL === 'string') 182 | if (!isKeyString) { 183 | // If a 32 byte buffer, use it as the key 184 | if (nameOrKeyOrURL && nameOrKeyOrURL.length === 32) { 185 | return { key: nameOrKeyOrURL } 186 | } else { 187 | throw new Error('Must specify a name, url, or a 32 byte buffer with a key') 188 | } 189 | } 190 | 191 | if (nameOrKeyOrURL.startsWith(HYPER_PROTOCOL_SCHEME)) { 192 | const url = new URL(nameOrKeyOrURL) 193 | // probably a domain 194 | if (url.hostname.includes('.')) { 195 | const key = await this.resolveDNSToKey(url.hostname) 196 | 197 | return { key: stringToKey(key) } 198 | } else { 199 | // Try to parse the hostname to a key 200 | const key = stringToKey(url.hostname) 201 | if (!key) { 202 | // If not a key or a domain, throw an error 203 | throw new Error('URLs must have either an encoded key or a valid DNSlink domain') 204 | } 205 | return { key } 206 | } 207 | } else { 208 | const parsed = stringToKey(nameOrKeyOrURL) 209 | if (parsed) { 210 | return { key: parsed } 211 | } else { 212 | return { name: nameOrKeyOrURL } 213 | } 214 | } 215 | } 216 | 217 | async getBee (nameOrKeyOrURL, opts = {}) { 218 | const core = await this.get(nameOrKeyOrURL, opts) 219 | 220 | if (this.#beeCache.has(core.url)) { 221 | return this.#beeCache.get(core.url) 222 | } 223 | 224 | const bee = new Hyperbee(core, opts) 225 | 226 | core.once('close', () => { 227 | this.#beeCache.delete(core.url) 228 | }) 229 | 230 | this.#beeCache.set(core.url, bee) 231 | 232 | await bee.ready() 233 | 234 | return bee 235 | } 236 | 237 | async getDrive (nameOrKeyOrURL, opts = {}) { 238 | const coreOpts = { 239 | ...this.#defaultCoreOpts, 240 | autoJoin: this.autoJoin, 241 | ...opts 242 | } 243 | 244 | const resolvedOpts = await this.resolveNameOrKeyToOpts(nameOrKeyOrURL) 245 | 246 | const { key, name } = resolvedOpts 247 | let stringKey = key && key.toString('hex') 248 | 249 | if (this.#driveCache.has(name)) { 250 | return this.#driveCache.get(name) 251 | } else if (this.#driveCache.has(stringKey)) { 252 | return this.#driveCache.get(stringKey) 253 | } 254 | 255 | Object.assign(coreOpts, resolvedOpts) 256 | 257 | let corestore = this.corestore 258 | 259 | if (resolvedOpts.key) { 260 | corestore = this.namespace(stringKey) 261 | } else if (resolvedOpts.name) { 262 | corestore = this.namespace(name) 263 | } else { 264 | throw new Error('Unable to parse') 265 | } 266 | 267 | const drive = new Hyperdrive(corestore, key || null) 268 | 269 | await drive.ready() 270 | 271 | const core = drive.core 272 | stringKey = core.key.toString('hex') 273 | 274 | drive.once('close', () => { 275 | this.#driveCache.delete(stringKey) 276 | this.#driveCache.delete(name) 277 | }) 278 | 279 | this.#driveCache.set(stringKey, drive) 280 | if (name) this.#driveCache.set(name, drive) 281 | 282 | if (coreOpts.autoJoin && !core.discovery) { 283 | await this.joinCore(core, opts) 284 | } 285 | 286 | return drive 287 | } 288 | 289 | async get (nameOrKeyOrURL, opts = {}) { 290 | const coreOpts = { 291 | ...this.#defaultCoreOpts, 292 | autoJoin: this.autoJoin, 293 | ...opts 294 | } 295 | 296 | const resolvedOpts = await this.resolveNameOrKeyToOpts(nameOrKeyOrURL) 297 | 298 | const { key, name } = resolvedOpts 299 | let stringKey = key && key.toString('hex') 300 | 301 | if (this.#coreCache.has(name)) { 302 | return this.#coreCache.get(name) 303 | } else if (this.#coreCache.has(stringKey)) { 304 | return this.#coreCache.get(stringKey) 305 | } 306 | 307 | Object.assign(coreOpts, resolvedOpts) 308 | 309 | // There shouldn't be a way to pass null for the key 310 | const core = this.corestore.get(coreOpts) 311 | 312 | // Await for core to be ready 313 | await core.ready() 314 | 315 | core.once('close', () => { 316 | this.#coreCache.delete(stringKey) 317 | this.#coreCache.delete(name) 318 | }) 319 | 320 | stringKey = core.key.toString('hex') 321 | 322 | this.#coreCache.set(stringKey, core) 323 | if (name) this.#coreCache.set(name, core) 324 | 325 | if (coreOpts.autoJoin && !core.discovery) { 326 | await this.joinCore(core, opts) 327 | } 328 | 329 | return core 330 | } 331 | 332 | // Returns a corestore for a namespace 333 | namespace (namespace) { 334 | return this.corestore.namespace(namespace) 335 | } 336 | 337 | makeTopicKey (name) { 338 | const [key] = crypto.namespace(name, 1) 339 | return key 340 | } 341 | 342 | async joinCore (core, opts = {}) { 343 | if (core.discovery) return 344 | const discovery = this.join(core.discoveryKey, opts) 345 | core.discovery = discovery 346 | 347 | // If we're the owner, then we wait until is fully announced 348 | if (core.writable) { 349 | await discovery.flushed() 350 | } 351 | 352 | // Await for initial peer for new readable cores 353 | if (!core.writable && !core.length) { 354 | const done = core.findingPeers() 355 | this.swarm.flush().then(done) 356 | await core.update() 357 | } 358 | 359 | core.once('close', () => { 360 | discovery.destroy() 361 | }) 362 | } 363 | 364 | join (topic, opts = {}) { 365 | if (typeof topic === 'string') { 366 | return this.join(this.makeTopicKey(topic), opts) 367 | } 368 | const joinOpts = { ...this.defaultJoinOpts, ...opts } 369 | return this.swarm.join(topic, joinOpts) 370 | } 371 | 372 | leave (topic) { 373 | if (typeof topic === 'string') { 374 | return this.leave(this.makeTopicKey(topic)) 375 | } 376 | return this.swarm.leave(topic) 377 | } 378 | 379 | joinPeer (id) { 380 | return this.swarm.joinPeer(id) 381 | } 382 | 383 | leavePeer (id) { 384 | return this.swarm.leavePeer(id) 385 | } 386 | 387 | async ready () { 388 | // Wait for the network to be configured? 389 | await this.corestore.ready() 390 | await this.swarm.listen() 391 | } 392 | 393 | async close () { 394 | await this.#dnsCache.flush() 395 | // Close corestore, close hyperswarm 396 | await Promise.all([ 397 | this.corestore.close(), 398 | this.swarm.destroy(), 399 | this.#dnsCache.close() 400 | ]) 401 | } 402 | 403 | replicate (connection) { 404 | this.corestore.replicate(connection) 405 | } 406 | } 407 | 408 | export async function create ({ 409 | storage, 410 | corestoreOpts = DEFAULT_CORESTORE_OPTS, 411 | swarmOpts = DEFAULT_SWARM_OPTS, 412 | fetch = globalThis.fetch, 413 | ...opts 414 | } = {}) { 415 | // TODO: Account for "random-access-application" style storage 416 | if (!storage) throw new Error('Storage parameter is required to be a valid file path') 417 | const corestore = opts.corestore || new CoreStore(storage, { ...corestoreOpts }) 418 | const dnsCache = opts.dnsCache || new RocksDB(join(storage, 'dnsCache')) 419 | 420 | const networkKeypair = await corestore.createKeyPair('noise') 421 | 422 | const swarm = opts.swarm || new HyperSwarm({ 423 | keyPair: networkKeypair, 424 | ...swarmOpts 425 | }) 426 | 427 | const sdk = new SDK({ 428 | ...opts, 429 | fetch: fetch || (await import('bare-fetch')).default, 430 | corestore, 431 | swarm, 432 | dnsCache 433 | }) 434 | 435 | await sdk.ready() 436 | 437 | return sdk 438 | } 439 | 440 | function stringToKey (string) { 441 | if (string.length === 52) { 442 | try { 443 | return z32.decode(string) 444 | } catch { 445 | // Not formatted properly, probs a name? 446 | } 447 | } else if (string.length === 64) { 448 | // Parse as hex key 449 | try { 450 | return b4a.from(string, 'hex') 451 | } catch { 452 | // Not formatted properly, probs a name? 453 | } 454 | } 455 | return null 456 | } 457 | 458 | function throwMissing (name) { 459 | throw new TypeError(`Missing parameter ${name}`) 460 | } 461 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hyper-sdk", 3 | "version": "6.0.0", 4 | "description": "A Software Development Kit for the Hypercore-Protocol", 5 | "type": "module", 6 | "exports": { 7 | ".": { 8 | "default": "./index.js" 9 | } 10 | }, 11 | "imports": { 12 | "events": { 13 | "default": "bare-events" 14 | }, 15 | "stream": { 16 | "default": "bare-stream" 17 | }, 18 | "path": { 19 | "default": "bare-path" 20 | } 21 | }, 22 | "scripts": { 23 | "test": "npm run test:node && npm run test:bare", 24 | "test:bare": "bare test.js", 25 | "test:node": "node test.js", 26 | "lint": "standard --fix", 27 | "upgrade-hyper": "npm i --save corestore@latest hypercore@latest hyperswarm@latest hyperdrive@latest hyperbee@latest hypercore-protocol@latest z32@latest b4a@latest bare-events@latest bare-fetch@latest bare-os@latest bare-path@latest bare-stream@latest" 28 | }, 29 | "repository": { 30 | "type": "git", 31 | "url": "git+https://github.com/rangermauve/hyper-sdk.git" 32 | }, 33 | "keywords": [ 34 | "dat", 35 | "sdk", 36 | "hyperdrive", 37 | "hypercore", 38 | "hypercore-protocol", 39 | "p2p" 40 | ], 41 | "author": "RangerMauve", 42 | "license": "MIT", 43 | "bugs": { 44 | "url": "https://github.com/rangermauve/hyper-sdk/issues" 45 | }, 46 | "homepage": "https://github.com/rangermauve/hyper-sdk#readme", 47 | "dependencies": { 48 | "b4a": "^1.6.7", 49 | "bare-events": "^2.5.4", 50 | "bare-fetch": "^2.3.0", 51 | "bare-os": "^3.6.1", 52 | "bare-path": "^3.0.0", 53 | "bare-stream": "^2.6.5", 54 | "corestore": "^7.4.3", 55 | "dns-query": "^0.11.2", 56 | "hyperbee": "^2.24.2", 57 | "hypercore": "^11.8.3", 58 | "hypercore-protocol": "^8.0.7", 59 | "hyperdrive": "^12.3.0", 60 | "hyperswarm": "^4.11.7", 61 | "rocksdb-native": "^3.5.10", 62 | "test-tmp": "^1.4.0", 63 | "z32": "^1.1.0" 64 | }, 65 | "devDependencies": { 66 | "bare": "^1.17.6", 67 | "brittle": "^3.7.0", 68 | "standard": "^17.0.0", 69 | "tape": "^5.6.1", 70 | "tmp-promise": "^3.0.3" 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /test.js: -------------------------------------------------------------------------------- 1 | import { test } from 'brittle' 2 | import { once } from 'events' 3 | import { create } from './index.js' 4 | import b4a from 'b4a' 5 | import tmp from 'test-tmp' 6 | 7 | const NULL_KEY = 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy' 8 | const NULL_BUFFER = b4a.alloc(32, 0) 9 | const NULL_HEX_KEY = NULL_BUFFER.toString('hex') 10 | const NULL_URL = `hyper://${NULL_KEY}/` 11 | 12 | const timeout = 30000 13 | 14 | test('Specify storage for sdk', async (t) => { 15 | const storage = await tmp() 16 | const name = 'example' 17 | const data = 'Hello World!' 18 | let sdk = await create({ storage }) 19 | let sdk2 = null 20 | 21 | try { 22 | try { 23 | sdk2 = await create({ storage }) 24 | t.fail(new Error('Should not be able to load SDK over existing dir')) 25 | } catch { 26 | t.pass('Threw error when opening same storage path twice') 27 | } finally { 28 | if (sdk2) await sdk2.close() 29 | } 30 | 31 | const core1 = await sdk.get(name) 32 | const url1 = core1.url 33 | await core1.append(data) 34 | 35 | await sdk.close() 36 | 37 | sdk = await create({ storage }) 38 | 39 | const core2 = await sdk.get(name) 40 | const url2 = core2.url 41 | 42 | t.is(url1, url2, 'Loaded core has same key') 43 | 44 | const contents = await core2.get(0) 45 | 46 | t.alike(contents.toString('utf8'), data, 'Got data back from disk') 47 | } finally { 48 | await sdk.close() 49 | } 50 | }) 51 | 52 | test('Support storage reuse by default', async (t) => { 53 | const storage = await tmp() 54 | 55 | const sdk = await create({ storage }) 56 | const core = await sdk.get('persist in memory') 57 | const key = core.key 58 | 59 | const data = b4a.from('beep') 60 | await core.append(data) 61 | await core.close() 62 | t.ok(core.closed, 'initial core was closed') 63 | 64 | const coreAgain = await sdk.get(key) 65 | t.alike(await coreAgain.get(0, { wait: false }), data, 'found persisted data') 66 | 67 | await sdk.close() 68 | }) 69 | 70 | test('Load hypercores by names and urls', async (t) => { 71 | const storage = await tmp() 72 | 73 | const sdk = await create({ storage }) 74 | const name = 'example' 75 | 76 | try { 77 | const core = await sdk.get(name) 78 | 79 | t.ok(core, 'Got core for name') 80 | 81 | const toTry = [ 82 | NULL_KEY, 83 | NULL_BUFFER, 84 | NULL_HEX_KEY, 85 | `hyper://${NULL_KEY}`, 86 | `hyper://${NULL_HEX_KEY}` 87 | ] 88 | 89 | for (const key of toTry) { 90 | const core = await sdk.get(key) 91 | 92 | t.ok(core, `Got core for ${key}`) 93 | t.is(core.url, NULL_URL, 'Correct URL got loaded') 94 | } 95 | } finally { 96 | await sdk.close() 97 | } 98 | }) 99 | 100 | test('Loading same key twice results in same core', async (t) => { 101 | const storage = await tmp() 102 | 103 | const sdk = await create({ storage }) 104 | const name = 'example' 105 | 106 | try { 107 | const core1 = await sdk.get(name) 108 | const core2 = await sdk.get(core1.key) 109 | const core3 = await sdk.get(core1.url) 110 | t.is(core1, core2, 'Key loaded same core from memory') 111 | t.is(core1, core3, 'URL loaded same core from memory') 112 | 113 | const drive1 = await sdk.getDrive(name) 114 | const drive2 = await sdk.getDrive(drive1.key) 115 | const drive3 = await sdk.getDrive(drive1.url) 116 | t.is(drive1, drive2, 'Key loaded same drive from memory') 117 | t.is(drive1, drive3, 'URL loaded same drive from memory') 118 | 119 | const bee1 = await sdk.getBee(name) 120 | const bee2 = await sdk.getBee(bee1.key) 121 | const bee3 = await sdk.getBee(bee1.url) 122 | t.is(bee1, bee2, 'Key loaded same bee from memory') 123 | t.is(bee1, bee3, 'URL loaded same bee from memory') 124 | 125 | await core1.close() 126 | await drive1.close() 127 | const core4 = await sdk.get(name) 128 | t.not(core1, core4, 'New core after close') 129 | const drive4 = await sdk.getDrive(name) 130 | t.not(drive1, drive4, 'New drive after close') 131 | const bee4 = await sdk.getBee(name) 132 | t.not(bee1, bee4, 'New bee after close') 133 | } finally { 134 | await sdk.close() 135 | } 136 | }) 137 | 138 | test('Resolve DNS entries to keys', async (t) => { 139 | const storage = await tmp() 140 | 141 | const expected = NULL_KEY 142 | 143 | const sdk = await create({ storage }) 144 | 145 | try { 146 | const resolved = await sdk.resolveDNSToKey('example.mauve.moe') 147 | 148 | t.is(resolved, expected, 'Resolved to correct key') 149 | } finally { 150 | await sdk.close() 151 | } 152 | }) 153 | 154 | test('Resolve DNS in hyper URLs', async (t) => { 155 | const storage = await tmp() 156 | 157 | const expected = NULL_KEY 158 | 159 | const sdk = await create({ storage }) 160 | 161 | try { 162 | const core = await sdk.get('hyper://example.mauve.moe') 163 | 164 | t.is(core.id, expected, 'Loaded correct core from DNSLink') 165 | } finally { 166 | await sdk.close() 167 | } 168 | }) 169 | 170 | test('Get hostname from cache when fetch fails', async (t) => { 171 | const storage = await tmp() 172 | 173 | const expected = NULL_KEY 174 | 175 | const fetch = globalThis.fetch || (await import('bare-fetch')).default 176 | 177 | let isFirst = true 178 | let hasFailed = false 179 | function testFetch (...args) { 180 | if (isFirst) { 181 | isFirst = false 182 | return fetch(...args) 183 | } 184 | hasFailed = true 185 | throw new Error('Simulated Network Fail') 186 | } 187 | 188 | let sdk = await create({ fetch: testFetch, storage }) 189 | 190 | try { 191 | const resolved = await sdk.resolveDNSToKey('example.mauve.moe') 192 | 193 | t.is(resolved, expected, 'Resolved to correct key') 194 | 195 | await sdk.close() 196 | sdk = await create({ fetch: testFetch, storage }) 197 | 198 | const resolved2 = await sdk.resolveDNSToKey('example.mauve.moe') 199 | 200 | t.is(resolved2, expected, 'Resolved to correct key, without network') 201 | t.is(hasFailed, true, 'Fetch was called and failed') 202 | } finally { 203 | await sdk.close() 204 | } 205 | }) 206 | 207 | test('Load a core between two peers', { timeout }, async (t) => { 208 | const storage1 = await tmp() 209 | const storage2 = await tmp() 210 | 211 | const sdk1 = await create({ storage: storage1 }) 212 | const sdk2 = await create({ storage: storage2 }) 213 | try { 214 | t.comment('Initializing core on first peer') 215 | 216 | const core1 = await sdk1.get('example') 217 | await core1.append('Hello World!') 218 | 219 | t.comment('Loading core on second peer') 220 | 221 | const core2 = await sdk2.get(core1.url) 222 | 223 | t.ok(core2.peers?.length, 'Found peer') 224 | t.is(core2.url, core1.url, 'Got expected URL') 225 | t.is(core2.length, 1, 'Not empty') 226 | 227 | const data = await core2.get(0) 228 | t.alike(data, Buffer.from('Hello World!'), 'Got block back out') 229 | } finally { 230 | await Promise.all([ 231 | sdk1.close(), 232 | sdk2.close() 233 | ]) 234 | } 235 | }) 236 | 237 | test('Connect directly between two peers', { timeout }, async (t) => { 238 | const storage1 = await tmp() 239 | const storage2 = await tmp() 240 | 241 | const sdk1 = await create({ storage: storage1 }) 242 | const sdk2 = await create({ storage: storage2 }) 243 | 244 | const onPeer = once(sdk2, 'peer-add') 245 | const onPeernt = once(sdk2, 'peer-remove') 246 | try { 247 | await sdk1.joinPeer(sdk2.publicKey) 248 | 249 | const [peerInfo] = await onPeer 250 | 251 | t.alike(peerInfo.publicKey, sdk1.publicKey, 'Connected to peer') 252 | } finally { 253 | await Promise.all([ 254 | sdk1.close(), 255 | sdk2.close() 256 | ]) 257 | } 258 | 259 | await onPeernt 260 | 261 | t.pass('Peer remove event detected') 262 | }) 263 | 264 | test('Get a hyperdrive and share a file', async (t) => { 265 | const storage1 = await tmp() 266 | const storage2 = await tmp() 267 | 268 | const sdk1 = await create({ storage: storage1 }) 269 | const sdk2 = await create({ storage: storage2 }) 270 | 271 | try { 272 | const drive1 = await sdk1.getDrive('example') 273 | 274 | const ws = drive1.createWriteStream('/blob.txt') 275 | const onWrote = once(ws, 'close') 276 | 277 | ws.write('Hello, ') 278 | ws.write('world!') 279 | ws.end() 280 | 281 | await onWrote 282 | 283 | const drive2 = await sdk2.getDrive(drive1.url) 284 | 285 | t.is(drive2.url, drive1.url, 'Loaded drive has same URL') 286 | 287 | const rs = drive2.createReadStream('/blob.txt') 288 | 289 | let data = '' 290 | for await (const chunk of rs) { 291 | data += chunk.toString('utf8') 292 | } 293 | 294 | t.is(data, 'Hello, world!', 'Loaded expected data') 295 | } finally { 296 | await Promise.all([ 297 | sdk1.close(), 298 | sdk2.close() 299 | ]) 300 | } 301 | }) 302 | 303 | test('Get a hyperbee and share a key value pair', async (t) => { 304 | const storage1 = await tmp() 305 | const storage2 = await tmp() 306 | 307 | const sdk1 = await create({ storage: storage1 }) 308 | const sdk2 = await create({ storage: storage2 }) 309 | 310 | try { 311 | const encodingOpts = { keyEncoding: 'utf8', valueEncoding: 'utf8' } 312 | const db1 = await sdk1.getBee('example', encodingOpts) 313 | 314 | await db1.put('hello', 'world') 315 | 316 | const db2 = await sdk2.getBee(db1.url, encodingOpts) 317 | t.is(db2.url, db1.url, 'Loaded bee has same URL') 318 | 319 | const { value } = await db2.get('hello') 320 | 321 | t.is(value, 'world', 'Got value for key') 322 | } finally { 323 | await Promise.all([ 324 | sdk1.close(), 325 | sdk2.close() 326 | ]) 327 | } 328 | }) 329 | 330 | // test('', async (t) => {}) 331 | --------------------------------------------------------------------------------