├── .github └── workflows │ └── test-node.yml ├── .gitignore ├── LICENSE ├── README.md ├── UPGRADE.md ├── errors.js ├── examples ├── .gitignore ├── announce.js ├── basic.js ├── http.js └── lookup.js ├── index.js ├── lib ├── audit.js ├── bit-interlude.js ├── bitfield.js ├── caps.js ├── compat.js ├── copy-prologue.js ├── core.js ├── default-encryption.js ├── download.js ├── fully-remote-proof.js ├── hotswap-queue.js ├── info.js ├── merkle-tree.js ├── messages.js ├── multisig.js ├── mutex.js ├── receiver-queue.js ├── remote-bitfield.js ├── replicator.js ├── session-state.js ├── streams.js └── verifier.js ├── messages.js ├── package.json └── test ├── all.js ├── atomic.js ├── basic.js ├── batch.js ├── bench ├── networking.js ├── open-close.js ├── range-download.js ├── speedtest.js └── throughput.js ├── bit-interlude.js ├── bitfield.js ├── clear.js ├── compat.js ├── conflicts.js ├── core.js ├── encodings.js ├── encryption.js ├── extension.js ├── fixtures ├── abi │ ├── snapshot.js │ ├── v10.0.0-alpha.39 │ │ ├── bitfield │ │ ├── data │ │ ├── oplog │ │ └── tree │ ├── v10.4.1-partial │ │ ├── data │ │ └── oplog │ └── v10.4.1 │ │ ├── bitfield │ │ ├── data │ │ ├── oplog │ │ └── tree ├── basic.snapshot.cjs ├── encryption │ ├── generate.js │ └── v11.0.48.cjs └── storage.snapshot.cjs ├── fully-remote-proof.js ├── helpers ├── index.js └── networking.js ├── manifest.js ├── merkle-tree.js ├── move-to.js ├── mutex.js ├── preload.js ├── purge.js ├── remote-bitfield.js ├── remote-length.js ├── replicate.js ├── sessions.js ├── snapshots.js ├── streams.js ├── timeouts.js └── user-data.js /.github/workflows/test-node.yml: -------------------------------------------------------------------------------- 1 | name: Build Status 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - rocksdb 7 | tags: # To trigger the canary 8 | - '*' 9 | pull_request: 10 | branches: 11 | - main 12 | - rocksdb 13 | jobs: 14 | build: 15 | if: ${{ !startsWith(github.ref, 'refs/tags/')}} # Already runs for the push of the commit, no need to run again for the tag 16 | strategy: 17 | matrix: 18 | node-version: [lts/*] 19 | os: [ubuntu-latest, macos-latest, windows-latest] 20 | runs-on: ${{ matrix.os }} 21 | steps: 22 | - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 https://github.com/actions/checkout/releases/tag/v4.1.1 23 | - name: Use Node.js ${{ matrix.node-version }} 24 | uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 # v3.8.2 https://github.com/actions/setup-node/releases/tag/v3.8.2 25 | with: 26 | node-version: ${{ matrix.node-version }} 27 | - run: npm install 28 | - run: npm test 29 | - run: npm -g install bare 30 | - run: npm run test:bare 31 | trigger_canary: 32 | if: startsWith(github.ref, 'refs/tags/') # Only run when a new package is published (detects when a new tag is pushed) 33 | runs-on: ubuntu-latest 34 | steps: 35 | - name: trigger canary 36 | run: | 37 | curl -L -X POST \ 38 | -H "Accept: application/vnd.github+json" \ 39 | -H "Authorization: Bearer ${{ secrets.CANARY_DISPATCH_PAT }}" \ 40 | -H "X-GitHub-Api-Version: 2022-11-28" \ 41 | https://api.github.com/repos/holepunchto/canary-tests/dispatches \ 42 | -d '{"event_type":"triggered-by-${{ github.event.repository.name }}-${{ github.ref_name }}"}' 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | package-lock.json 3 | .DS_Store 4 | sandbox 5 | coverage 6 | sandbox.js 7 | tmp 8 | *.log 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Mathias Buus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /UPGRADE.md: -------------------------------------------------------------------------------- 1 | # Upgrade Notes 2 | 3 | Notes for downstream developers who are upgrading their modules to new, breaking versions of Hypercore. 4 | 5 | ## 11.0.0 6 | 7 | - `sparse` is no longer an option when creating a `Hypercore` instance. All hypercores are sparse. 8 | - `encryptionKey` will deprecated in favor of the `encryption` option when creating a `Hypercore` instance. 9 | - Storage is now auto migrated to [`hypercore-storage`](https://github.com/holepunchto/hypercore-storage) if a path `storage` argument was used. 10 | If you are getting a `TypeError: db.columnFamily is not a function` error, you 11 | are likely trying to use a legacy `random-access-storage` instance such as 12 | `random-access-memory` or `random-access-file`. 13 | - `core.indexedLength` is now `core.signedLength` 14 | 15 | ## 10.0.0 16 | 17 | - All number encodings are now LE 18 | - Introduces an "oplog" to atomically track changes locally 19 | - Updated merkle format that only requires a single signature (stored in the oplog) 20 | 21 | ## 9.0.0 22 | 23 | - The format of signatures [has been changed](https://github.com/holepunchto/hypercore/issues/260). This is backwards-compatible (v9 can read v8 signatures), but forward-incompatible (v8 cannot read v9 signatures). If a v8 peer replicates with a v9 peer, it will emit a "REMOTE SIGNATURE INVALID" error on the replication stream. 24 | - The encryption ([NOISE](https://github.com/emilbayes/noise-protocol)) handshake has been changed in an backwards- and forwards-incompatible way. v8 peers can not handshake with v9 peers, and vice-versa. A NOISE-related error is emitted on the replication stream. 25 | - There is no way (yet) to detect whether a peer is running an incompatible version of hypercore at the replication level. One workaround for downstream developers is to include their own application-level handshake before piping to the replication stream, to communicate a "app protocol version" (maybe "v8" and "v9") and abort the connection if the peer is running an incompatible version. 26 | -------------------------------------------------------------------------------- /errors.js: -------------------------------------------------------------------------------- 1 | // explicitly exposed as hypercore/errors 2 | module.exports = require('hypercore-errors') 3 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | source 2 | clone 3 | -------------------------------------------------------------------------------- /examples/announce.js: -------------------------------------------------------------------------------- 1 | const Hypercore = require('../') 2 | const Hyperswarm = require('hyperswarm') 3 | 4 | const core = new Hypercore('./source') 5 | 6 | start() 7 | 8 | async function start () { 9 | await core.ready() 10 | while (core.length < 1000) { 11 | await core.append('block #' + core.length) 12 | } 13 | 14 | const swarm = new Hyperswarm() 15 | swarm.on('connection', socket => core.replicate(socket)) 16 | swarm.join(core.discoveryKey, { server: true, client: false }) 17 | 18 | console.log('Core:', core.key.toString('hex')) 19 | } 20 | -------------------------------------------------------------------------------- /examples/basic.js: -------------------------------------------------------------------------------- 1 | const Hypercore = require('../') 2 | 3 | start() 4 | 5 | async function start () { 6 | const core = new Hypercore('/tmp/basic') 7 | await core.append(['Hello', 'World']) 8 | console.log(core) 9 | await core.close() 10 | } 11 | -------------------------------------------------------------------------------- /examples/http.js: -------------------------------------------------------------------------------- 1 | const http = require('http') 2 | const Hypercore = require('../') 3 | const Hyperswarm = require('hyperswarm') 4 | const rangeParser = require('range-parser') 5 | 6 | // Convert video into a core: node http.js import ./joker-scene.mp4 7 | // Later replicate so other peers can also watch it: node http.js 8 | // Other peers: node http.js 9 | 10 | const key = process.argv[2] && process.argv[2] !== 'import' ? Buffer.from(process.argv[2], 'hex') : null 11 | const core = new Hypercore('/tmp/movie' + (key ? '-peer' : ''), key) 12 | 13 | if (process.argv[2] === 'import') importData(process.argv[3]) 14 | else start() 15 | 16 | async function start () { 17 | await core.ready() 18 | if (core.writable) console.log('Share this core key:', core.key.toString('hex')) 19 | 20 | core.on('download', (index) => console.log('Downloaded block #' + index)) 21 | 22 | const swarm = new Hyperswarm() 23 | swarm.on('connection', (socket) => core.replicate(socket)) 24 | const discovery = swarm.join(core.discoveryKey) 25 | 26 | if (core.writable) { 27 | console.log('Announcing') 28 | await discovery.flushed() 29 | } else { 30 | console.log('Finding peers') 31 | const done = core.findingPeers() 32 | swarm.flush().then(done, done) 33 | await core.update() 34 | } 35 | 36 | http.createServer(function (req, res) { 37 | res.setHeader('Content-Type', 'video/mp4') 38 | res.setHeader('Accept-Ranges', 'bytes') 39 | 40 | let byteOffset = 0 41 | let byteLength = core.byteLength 42 | 43 | if (req.headers.range) { 44 | const ranges = rangeParser(core.byteLength, req.headers.range) 45 | 46 | if (ranges === -1 || ranges === -2) { 47 | res.statusCode = 206 48 | res.setHeader('Content-Length', 0) 49 | res.end() 50 | return 51 | } 52 | 53 | const range = ranges[0] 54 | byteOffset = range.start 55 | byteLength = range.end - range.start + 1 56 | 57 | res.statusCode = 206 58 | res.setHeader('Content-Range', 'bytes ' + range.start + '-' + range.end + '/' + core.byteLength) 59 | } 60 | 61 | res.setHeader('Content-Length', byteLength) 62 | 63 | if (req.method === 'HEAD') { 64 | res.end() 65 | return 66 | } 67 | 68 | const bs = core.createByteStream({ byteOffset, byteLength }) 69 | bs.pipe(res, noop) 70 | }).listen(function () { 71 | console.log('HTTP server on http://localhost:' + this.address().port) 72 | }) 73 | } 74 | 75 | async function importData (filename) { 76 | const fs = require('fs') 77 | const rs = fs.createReadStream(filename) 78 | 79 | for await (const data of rs) { 80 | await core.append(data) 81 | } 82 | 83 | console.log('done!', core) 84 | } 85 | 86 | function noop () {} 87 | -------------------------------------------------------------------------------- /examples/lookup.js: -------------------------------------------------------------------------------- 1 | const Hypercore = require('../') 2 | const Hyperswarm = require('hyperswarm') 3 | 4 | const core = new Hypercore('./clone', process.argv[2]) 5 | 6 | start() 7 | 8 | async function start () { 9 | await core.ready() 10 | 11 | const swarm = new Hyperswarm() 12 | swarm.on('connection', socket => core.replicate(socket)) 13 | swarm.join(core.discoveryKey, { server: false, client: true }) 14 | 15 | console.log((await core.get(42)).toString()) 16 | console.log((await core.get(142)).toString()) 17 | console.log((await core.get(511)).toString()) 18 | console.log((await core.get(512)).toString()) 19 | console.log((await core.get(513)).toString()) 20 | } 21 | -------------------------------------------------------------------------------- /lib/audit.js: -------------------------------------------------------------------------------- 1 | const crypto = require('hypercore-crypto') 2 | const flat = require('flat-tree') 3 | const b4a = require('b4a') 4 | const { MerkleTree } = require('./merkle-tree') 5 | 6 | module.exports = async function auditCore (core, { tree = true, blocks = true, bitfield = true, dryRun = false } = {}) { 7 | const length = core.state.length 8 | const stats = { treeNodes: 0, blocks: 0, bits: 0, droppedTreeNodes: 0, droppedBlocks: 0, droppedBits: 0, corrupt: false } 9 | 10 | // audit the tree 11 | if (tree) { 12 | let tx = null 13 | 14 | const roots = await MerkleTree.getRootsFromStorage(core.state.storage, length) 15 | const stack = [] 16 | 17 | for (const r of roots) { 18 | if (r === null) { 19 | if (!dryRun) { 20 | const storage = core.state.storage 21 | await storage.store.deleteCore(storage.core) 22 | return null 23 | } 24 | 25 | stats.corrupt = true 26 | } 27 | 28 | stack.push(r) 29 | } 30 | 31 | stats.treeNodes += roots.length 32 | 33 | while (stack.length > 0) { 34 | const node = stack.pop() 35 | 36 | if ((node.index & 1) === 0) continue 37 | 38 | const [left, right] = flat.children(node.index) 39 | 40 | const rx = core.state.storage.read() 41 | const leftNodePromise = rx.getTreeNode(left) 42 | const rightNodePromise = rx.getTreeNode(right) 43 | 44 | rx.tryFlush() 45 | 46 | const [leftNode, rightNode] = await Promise.all([leftNodePromise, rightNodePromise]) 47 | 48 | if (isBadTree(node, leftNode, rightNode)) { 49 | if (!tx && !stats.corrupt) tx = core.state.storage.write() 50 | const [l, r] = flat.spans(node.index) 51 | tx.deleteTreeNodeRange(l, r + 1) 52 | stats.droppedTreeNodes++ 53 | continue 54 | } 55 | 56 | if (!leftNode) continue 57 | 58 | stats.treeNodes += 2 59 | stack.push(leftNode, rightNode) 60 | } 61 | 62 | if (tx && !dryRun) await tx.flush() 63 | } 64 | 65 | // audit the blocks 66 | if (blocks) { 67 | let tx = null 68 | 69 | for await (const block of core.state.storage.createBlockStream()) { 70 | if (!core.bitfield.get(block.index)) { 71 | if (!tx && !stats.corrupt) tx = core.state.storage.write() 72 | tx.deleteBlock(block.index) 73 | stats.droppedBlocks++ 74 | } 75 | 76 | const rx = core.state.storage.read() 77 | const treeNodePromise = rx.getTreeNode(2 * block.index) 78 | 79 | rx.tryFlush() 80 | 81 | const treeNode = await treeNodePromise 82 | 83 | if (isBadBlock(treeNode, block.value)) { 84 | if (!tx && !stats.corrupt) tx = core.state.storage.write() 85 | tx.deleteBlock(block.index) 86 | stats.droppedBlocks++ 87 | continue 88 | } 89 | 90 | stats.blocks++ 91 | } 92 | 93 | if (tx && !dryRun) await tx.flush() 94 | } 95 | 96 | if (bitfield) { 97 | let tx = null 98 | 99 | for (const index of allBits(core.bitfield)) { 100 | const rx = core.state.storage.read() 101 | const blockPromise = rx.getBlock(index) 102 | 103 | rx.tryFlush() 104 | 105 | const block = await blockPromise 106 | if (!block) { 107 | stats.droppedBits++ 108 | if (dryRun) continue 109 | 110 | if (!tx && !stats.corrupt) tx = core.state.storage.write() 111 | 112 | core.bitfield.set(index, false) 113 | 114 | const page = core.bitfield.getBitfield(index) 115 | if (page.bitfield) tx.setBitfieldPage(page.index, page.bitfield) 116 | else tx.deleteBitfieldPage(page.idnex) 117 | continue 118 | } 119 | 120 | stats.bits++ 121 | } 122 | 123 | if (tx && !dryRun) await tx.flush() 124 | } 125 | 126 | return stats 127 | } 128 | 129 | function isBadBlock (node, block) { 130 | if (!node) return true 131 | const hash = crypto.data(block) 132 | return !b4a.equals(hash, node.hash) || node.size !== block.byteLength 133 | } 134 | 135 | function isBadTree (parent, left, right) { 136 | if (!left && !right) return false 137 | if (!left || !right) return true 138 | const hash = crypto.parent(left, right) 139 | return !b4a.equals(hash, parent.hash) || parent.size !== (left.size + right.size) 140 | } 141 | 142 | function * allBits (bitfield) { 143 | let i = 0 144 | if (bitfield.get(0)) yield 0 145 | while (true) { 146 | i = bitfield.findFirst(true, i + 1) 147 | if (i === -1) break 148 | yield i 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /lib/bit-interlude.js: -------------------------------------------------------------------------------- 1 | const b4a = require('b4a') 2 | const quickbit = require('./compat').quickbit 3 | 4 | module.exports = class BitInterlude { 5 | constructor () { 6 | this.ranges = [] 7 | } 8 | 9 | contiguousLength (from) { 10 | for (const r of this.ranges) { 11 | if (r.start > from) break 12 | if (!r.value && r.start <= from) return r.start 13 | } 14 | 15 | // TODO: be smarter 16 | while (this.get(from) === true) from++ 17 | return from 18 | } 19 | 20 | get (index) { 21 | let start = 0 22 | let end = this.ranges.length 23 | 24 | while (start < end) { 25 | const mid = (start + end) >> 1 26 | const r = this.ranges[mid] 27 | 28 | if (index < r.start) { 29 | end = mid 30 | continue 31 | } 32 | 33 | if (index >= r.end) { 34 | if (mid === start) break 35 | start = mid 36 | continue 37 | } 38 | 39 | return r.value 40 | } 41 | 42 | return false 43 | } 44 | 45 | setRange (start, end, value) { 46 | if (start === end) return 47 | 48 | let r = null 49 | 50 | for (let i = 0; i < this.ranges.length; i++) { 51 | r = this.ranges[i] 52 | 53 | // if already inside, stop 54 | if (r.start <= start && end <= r.end) { 55 | if (value === r.value) return 56 | 57 | const ranges = mergeRanges(r, { start, end, value }) 58 | this.ranges.splice(i, 1, ...ranges) 59 | 60 | return 61 | } 62 | 63 | // we wanna overun the interval 64 | if (start > r.end) { 65 | continue 66 | } 67 | 68 | // we overran but this interval is ending after us, move it back 69 | if (end >= r.start && end <= r.end) { 70 | r.start = r.value === value ? start : end 71 | if (r.value !== value) this.ranges.splice(i, 0, { start, end, value }) 72 | return 73 | } 74 | 75 | // we overran but our start is contained in this interval, move start back 76 | if (start >= r.start && start <= r.end) { 77 | if (r.value !== value) { 78 | this.ranges.splice(++i, 0, { start, end, value }) 79 | r.end = start 80 | return 81 | } 82 | 83 | start = r.start 84 | } 85 | 86 | let remove = 0 87 | 88 | for (let j = i; j < this.ranges.length; j++) { 89 | const n = this.ranges[j] 90 | if (n.start > end || n.value !== value) break 91 | if (n.start <= end && n.end > end) end = n.end 92 | remove++ 93 | } 94 | 95 | this.ranges.splice(i, remove, { start, end, value }) 96 | return 97 | } 98 | 99 | if (r !== null) { 100 | if (start <= r.end && end > r.end) { 101 | r.end = end 102 | return 103 | } 104 | 105 | // we never 106 | if (r.end > start) return 107 | } 108 | 109 | this.ranges.push({ start, end, value }) 110 | } 111 | 112 | flush (tx, bitfield) { 113 | if (!this.ranges.length) return [] 114 | 115 | let index = this.ranges[0].start 116 | const final = this.ranges[this.ranges.length - 1].end 117 | 118 | let i = 0 119 | 120 | while (index < final) { 121 | const page = bitfield.getBitfield(index) // read only 122 | const pageIndex = page ? page.index : bitfield.getPageIndex(index) 123 | 124 | const buf = b4a.allocUnsafe(bitfield.getPageByteLength()) 125 | 126 | if (page) { 127 | const src = page.bitfield // Uint32Array 128 | buf.set(b4a.from(src.buffer, src.byteOffset, src.byteLength), 0) 129 | } else { 130 | b4a.fill(buf, 0) 131 | } 132 | 133 | const last = (pageIndex + 1) * (buf.byteLength << 3) 134 | const offset = pageIndex * (buf.byteLength << 3) 135 | 136 | let hasValue = false 137 | 138 | while (i < this.ranges.length) { 139 | const { start, end, value } = this.ranges[i] 140 | 141 | if (!hasValue && value) hasValue = true 142 | 143 | const from = start < index ? index : start 144 | const to = end < last ? end : last 145 | 146 | quickbit.fill(buf, value, from - offset, to - offset) 147 | 148 | index = to 149 | 150 | if (to === last) break 151 | 152 | i++ 153 | } 154 | 155 | if (page || hasValue) tx.putBitfieldPage(pageIndex, buf) 156 | } 157 | 158 | return this.ranges 159 | } 160 | } 161 | 162 | function mergeRanges (a, b) { 163 | const ranges = [] 164 | if (a.start < b.start) ranges.push({ start: a.start, end: b.start, value: a.value }) 165 | ranges.push({ start: b.start, end: b.end, value: b.value }) 166 | if (b.end < a.end) ranges.push({ start: b.end, end: a.end, value: a.value }) 167 | 168 | return ranges 169 | } 170 | -------------------------------------------------------------------------------- /lib/bitfield.js: -------------------------------------------------------------------------------- 1 | const BigSparseArray = require('big-sparse-array') 2 | const b4a = require('b4a') 3 | const quickbit = require('./compat').quickbit 4 | 5 | const BITS_PER_PAGE = 32768 6 | const BYTES_PER_PAGE = BITS_PER_PAGE / 8 7 | const WORDS_PER_PAGE = BYTES_PER_PAGE / 4 8 | const BITS_PER_SEGMENT = 2097152 9 | const BYTES_PER_SEGMENT = BITS_PER_SEGMENT / 8 10 | const WORDS_PER_SEGMENT = BYTES_PER_SEGMENT / 4 11 | const INITIAL_WORDS_PER_SEGMENT = 1024 12 | const PAGES_PER_SEGMENT = BITS_PER_SEGMENT / BITS_PER_PAGE 13 | const SEGMENT_GROWTH_FACTOR = 4 14 | 15 | class BitfieldPage { 16 | constructor (index, segment) { 17 | this.index = index 18 | this.offset = index * BYTES_PER_PAGE - segment.offset 19 | this.bitfield = null 20 | this.segment = segment 21 | 22 | segment.add(this) 23 | } 24 | 25 | get tree () { 26 | return this.segment.tree 27 | } 28 | 29 | get (index, dirty) { 30 | return quickbit.get(this.bitfield, index) 31 | } 32 | 33 | set (index, val) { 34 | if (quickbit.set(this.bitfield, index, val)) { 35 | this.tree.update(this.offset * 8 + index) 36 | } 37 | } 38 | 39 | setRange (start, end, val) { 40 | quickbit.fill(this.bitfield, val, start, end) 41 | 42 | let i = Math.floor(start / 128) 43 | const n = i + Math.ceil((end - start) / 128) 44 | 45 | while (i <= n) this.tree.update(this.offset * 8 + i++ * 128) 46 | } 47 | 48 | findFirst (val, position) { 49 | return quickbit.findFirst(this.bitfield, val, position) 50 | } 51 | 52 | findLast (val, position) { 53 | return quickbit.findLast(this.bitfield, val, position) 54 | } 55 | 56 | count (start, length, val) { 57 | const end = start + length 58 | 59 | let i = start 60 | let c = 0 61 | 62 | while (length > 0) { 63 | const l = this.findFirst(val, i) 64 | if (l === -1 || l >= end) return c 65 | 66 | const h = this.findFirst(!val, l + 1) 67 | if (h === -1 || h >= end) return c + end - l 68 | 69 | c += h - l 70 | length -= h - i 71 | i = h 72 | } 73 | 74 | return c 75 | } 76 | } 77 | 78 | class BitfieldSegment { 79 | constructor (index, bitfield) { 80 | this.index = index 81 | this.offset = index * BYTES_PER_SEGMENT 82 | this.tree = quickbit.Index.from(bitfield, BYTES_PER_SEGMENT) 83 | this.pages = new Array(PAGES_PER_SEGMENT) 84 | } 85 | 86 | get bitfield () { 87 | return this.tree.field 88 | } 89 | 90 | add (page) { 91 | const i = page.index - this.index * PAGES_PER_SEGMENT 92 | this.pages[i] = page 93 | 94 | const start = i * WORDS_PER_PAGE 95 | const end = start + WORDS_PER_PAGE 96 | 97 | if (end >= this.bitfield.length) this.reallocate(end) 98 | 99 | page.bitfield = this.bitfield.subarray(start, end) 100 | } 101 | 102 | reallocate (length) { 103 | let target = this.bitfield.length 104 | while (target < length) target *= SEGMENT_GROWTH_FACTOR 105 | 106 | const bitfield = new Uint32Array(target) 107 | bitfield.set(this.bitfield) 108 | 109 | this.tree = quickbit.Index.from(bitfield, BYTES_PER_SEGMENT) 110 | 111 | for (let i = 0; i < this.pages.length; i++) { 112 | const page = this.pages[i] 113 | if (!page) continue 114 | 115 | const start = i * WORDS_PER_PAGE 116 | const end = start + WORDS_PER_PAGE 117 | 118 | page.bitfield = bitfield.subarray(start, end) 119 | } 120 | } 121 | 122 | findFirst (val, position) { 123 | position = this.tree.skipFirst(!val, position) 124 | 125 | let j = position & (BITS_PER_PAGE - 1) 126 | let i = (position - j) / BITS_PER_PAGE 127 | 128 | if (i >= PAGES_PER_SEGMENT) return -1 129 | 130 | while (i < this.pages.length) { 131 | const p = this.pages[i] 132 | 133 | let index = -1 134 | 135 | if (p) index = p.findFirst(val, j) 136 | else if (!val) index = j 137 | 138 | if (index !== -1) return i * BITS_PER_PAGE + index 139 | 140 | j = 0 141 | i++ 142 | } 143 | 144 | return -1 145 | } 146 | 147 | findLast (val, position) { 148 | position = this.tree.skipLast(!val, position) 149 | 150 | let j = position & (BITS_PER_PAGE - 1) 151 | let i = (position - j) / BITS_PER_PAGE 152 | 153 | if (i >= PAGES_PER_SEGMENT) return -1 154 | 155 | while (i >= 0) { 156 | const p = this.pages[i] 157 | 158 | let index = -1 159 | 160 | if (p) index = p.findLast(val, j) 161 | else if (!val) index = j 162 | 163 | if (index !== -1) return i * BITS_PER_PAGE + index 164 | 165 | j = BITS_PER_PAGE - 1 166 | i-- 167 | } 168 | 169 | return -1 170 | } 171 | } 172 | 173 | module.exports = class Bitfield { 174 | static BITS_PER_PAGE = BITS_PER_PAGE 175 | static BYTES_PER_PAGE = BYTES_PER_PAGE 176 | 177 | constructor (buffer) { 178 | this.resumed = !!(buffer && buffer.byteLength >= 0) 179 | 180 | this._pages = new BigSparseArray() 181 | this._segments = new BigSparseArray() 182 | 183 | const view = this.resumed 184 | ? new Uint32Array( 185 | buffer.buffer, 186 | buffer.byteOffset, 187 | Math.floor(buffer.byteLength / 4) 188 | ) 189 | : new Uint32Array(INITIAL_WORDS_PER_SEGMENT) 190 | 191 | for (let i = 0; i < view.length; i += WORDS_PER_SEGMENT) { 192 | let bitfield = view.subarray(i, i + (WORDS_PER_SEGMENT)) 193 | let length = WORDS_PER_SEGMENT 194 | 195 | if (i === 0) { 196 | length = INITIAL_WORDS_PER_SEGMENT 197 | while (length < bitfield.length) length *= SEGMENT_GROWTH_FACTOR 198 | } 199 | 200 | if (bitfield.length !== length) { 201 | const copy = new Uint32Array(length) 202 | copy.set(bitfield, 0) 203 | bitfield = copy 204 | } 205 | 206 | const segment = new BitfieldSegment(i / (WORDS_PER_SEGMENT), bitfield) 207 | this._segments.set(segment.index, segment) 208 | 209 | for (let j = 0; j < bitfield.length; j += WORDS_PER_PAGE) { 210 | const page = new BitfieldPage((i + j) / WORDS_PER_PAGE, segment) 211 | this._pages.set(page.index, page) 212 | } 213 | } 214 | } 215 | 216 | static from (bitfield) { 217 | return new Bitfield(bitfield.toBuffer(bitfield._pages.maxLength * BITS_PER_PAGE)) 218 | } 219 | 220 | toBuffer (length) { 221 | const pages = Math.ceil(length / BITS_PER_PAGE) 222 | const buffer = b4a.allocUnsafe(pages * BYTES_PER_PAGE) 223 | 224 | for (let i = 0; i < pages; i++) { 225 | const page = this._pages.get(i) 226 | const offset = i * BYTES_PER_PAGE 227 | 228 | if (page) { 229 | const buf = b4a.from( 230 | page.bitfield.buffer, 231 | page.bitfield.byteOffset, 232 | page.bitfield.byteLength 233 | ) 234 | 235 | buffer.set(buf, offset) 236 | } else { 237 | buffer.fill(0, offset, offset + BYTES_PER_PAGE) 238 | } 239 | } 240 | 241 | return buffer 242 | } 243 | 244 | getBitfield (index) { 245 | const i = this.getPageIndex(index) 246 | 247 | const p = this._pages.get(i) 248 | return p || null 249 | } 250 | 251 | merge (bitfield, length) { 252 | let i = 0 253 | 254 | while (i < length) { 255 | const start = bitfield.firstSet(i) 256 | if (start === -1) break 257 | 258 | i = bitfield.firstUnset(start) 259 | 260 | if (i === -1 || i > length) i = length 261 | 262 | this.setRange(start, i, true) 263 | 264 | if (i >= length) break 265 | } 266 | } 267 | 268 | get (index) { 269 | const j = index & (BITS_PER_PAGE - 1) 270 | const i = (index - j) / BITS_PER_PAGE 271 | 272 | const p = this._pages.get(i) 273 | 274 | return p ? p.get(j) : false 275 | } 276 | 277 | getPageByteLength () { 278 | return BYTES_PER_PAGE 279 | } 280 | 281 | getPageIndex (index) { 282 | const j = index & (BITS_PER_PAGE - 1) 283 | return (index - j) / BITS_PER_PAGE 284 | } 285 | 286 | getPage (index, create) { 287 | const i = this.getPageIndex(index) 288 | 289 | let p = this._pages.get(i) 290 | 291 | if (p) return p 292 | 293 | if (!create) return null 294 | 295 | const k = Math.floor(i / PAGES_PER_SEGMENT) 296 | const s = this._segments.get(k) || this._segments.set(k, new BitfieldSegment(k, new Uint32Array(k === 0 ? INITIAL_WORDS_PER_SEGMENT : WORDS_PER_SEGMENT))) 297 | 298 | p = this._pages.set(i, new BitfieldPage(i, s)) 299 | 300 | return p 301 | } 302 | 303 | set (index, val) { 304 | const j = index & (BITS_PER_PAGE - 1) 305 | const i = (index - j) / BITS_PER_PAGE 306 | 307 | let p = this._pages.get(i) 308 | 309 | if (!p && val) { 310 | const k = Math.floor(i / PAGES_PER_SEGMENT) 311 | const s = this._segments.get(k) || this._segments.set(k, new BitfieldSegment(k, new Uint32Array(k === 0 ? INITIAL_WORDS_PER_SEGMENT : WORDS_PER_SEGMENT))) 312 | 313 | p = this._pages.set(i, new BitfieldPage(i, s)) 314 | } 315 | 316 | if (p) p.set(j, val) 317 | } 318 | 319 | setRange (start, end, val) { 320 | let j = start & (BITS_PER_PAGE - 1) 321 | let i = (start - j) / BITS_PER_PAGE 322 | 323 | while (start < end) { 324 | let p = this._pages.get(i) 325 | 326 | if (!p && val) { 327 | const k = Math.floor(i / PAGES_PER_SEGMENT) 328 | const s = this._segments.get(k) || this._segments.set(k, new BitfieldSegment(k, new Uint32Array(k === 0 ? INITIAL_WORDS_PER_SEGMENT : WORDS_PER_SEGMENT))) 329 | 330 | p = this._pages.set(i, new BitfieldPage(i, s)) 331 | } 332 | 333 | const offset = p.index * BITS_PER_PAGE 334 | const last = Math.min(end - offset, BITS_PER_PAGE) 335 | const range = last - j 336 | 337 | if (p) p.setRange(j, last, val) 338 | 339 | j = 0 340 | i++ 341 | start += range 342 | } 343 | } 344 | 345 | findFirst (val, position) { 346 | let j = position & (BITS_PER_SEGMENT - 1) 347 | let i = (position - j) / BITS_PER_SEGMENT 348 | 349 | while (i < this._segments.maxLength) { 350 | const s = this._segments.get(i) 351 | 352 | let index = -1 353 | 354 | if (s) index = s.findFirst(val, j) 355 | else if (!val) index = j 356 | 357 | if (index !== -1) return i * BITS_PER_SEGMENT + index 358 | 359 | j = 0 360 | i++ 361 | } 362 | 363 | return val ? -1 : this._segments.maxLength * BITS_PER_SEGMENT 364 | } 365 | 366 | firstSet (position) { 367 | return this.findFirst(true, position) 368 | } 369 | 370 | firstUnset (position) { 371 | return this.findFirst(false, position) 372 | } 373 | 374 | findLast (val, position) { 375 | let j = position & (BITS_PER_SEGMENT - 1) 376 | let i = (position - j) / BITS_PER_SEGMENT 377 | 378 | while (i >= 0) { 379 | const s = this._segments.get(i) 380 | 381 | let index = -1 382 | 383 | if (s) index = s.findLast(val, j) 384 | else if (!val) index = j 385 | 386 | if (index !== -1) return i * BITS_PER_SEGMENT + index 387 | 388 | j = BITS_PER_SEGMENT - 1 389 | i-- 390 | } 391 | 392 | return -1 393 | } 394 | 395 | lastSet (position) { 396 | return this.findLast(true, position) 397 | } 398 | 399 | lastUnset (position) { 400 | return this.findLast(false, position) 401 | } 402 | 403 | count (start, length, val) { 404 | let j = start & (BITS_PER_PAGE - 1) 405 | let i = (start - j) / BITS_PER_PAGE 406 | let c = 0 407 | 408 | while (length > 0) { 409 | const p = this._pages.get(i) 410 | 411 | const end = Math.min(j + length, BITS_PER_PAGE) 412 | const range = end - j 413 | 414 | if (p) c += p.count(j, range, val) 415 | else if (!val) c += range 416 | 417 | j = 0 418 | i++ 419 | length -= range 420 | } 421 | 422 | return c 423 | } 424 | 425 | countSet (start, length) { 426 | return this.count(start, length, true) 427 | } 428 | 429 | countUnset (start, length) { 430 | return this.count(start, length, false) 431 | } 432 | 433 | * want (start, length) { 434 | const j = start & (BITS_PER_SEGMENT - 1) 435 | let i = (start - j) / BITS_PER_SEGMENT 436 | 437 | while (length > 0) { 438 | const s = this._segments.get(i) 439 | 440 | if (s) { 441 | // We always send at least 4 KiB worth of bitfield in a want, rounding 442 | // to the nearest 4 KiB. 443 | const end = ceilTo(clamp(length / 8, 4096, BYTES_PER_SEGMENT), 4096) 444 | 445 | yield { 446 | start: i * BITS_PER_SEGMENT, 447 | bitfield: s.bitfield.subarray(0, end / 4) 448 | } 449 | } 450 | 451 | i++ 452 | length -= BITS_PER_SEGMENT 453 | } 454 | } 455 | 456 | clear (tx) { 457 | return tx.deleteBitfieldPageRange(0, -1) 458 | } 459 | 460 | onupdate (ranges) { 461 | for (const { start, end, value } of ranges) { 462 | this.setRange(start, end, value) 463 | } 464 | } 465 | 466 | static async open (storage, length) { 467 | if (length === 0) return new Bitfield(storage, null) 468 | 469 | const pages = Math.ceil(length / BITS_PER_PAGE) 470 | const buffer = b4a.alloc(pages * BYTES_PER_PAGE) 471 | const stream = storage.createBitfieldStream() 472 | 473 | for await (const { index, page } of stream) { 474 | buffer.set(page, index * BYTES_PER_PAGE) 475 | } 476 | 477 | return new Bitfield(buffer) 478 | } 479 | } 480 | 481 | function clamp (n, min, max) { 482 | return Math.min(Math.max(n, min), max) 483 | } 484 | 485 | function ceilTo (n, multiple = 1) { 486 | const remainder = n % multiple 487 | if (remainder === 0) return n 488 | return n + multiple - remainder 489 | } 490 | -------------------------------------------------------------------------------- /lib/caps.js: -------------------------------------------------------------------------------- 1 | const crypto = require('hypercore-crypto') 2 | const sodium = require('sodium-universal') 3 | const b4a = require('b4a') 4 | const c = require('compact-encoding') 5 | 6 | // TODO: rename this to "crypto" and move everything hashing related etc in here 7 | // Also lets move the tree stuff from hypercore-crypto here 8 | 9 | const [ 10 | TREE, 11 | REPLICATE_INITIATOR, 12 | REPLICATE_RESPONDER, 13 | MANIFEST, 14 | DEFAULT_NAMESPACE, 15 | DEFAULT_ENCRYPTION 16 | ] = crypto.namespace('hypercore', 6) 17 | 18 | exports.MANIFEST = MANIFEST 19 | exports.DEFAULT_NAMESPACE = DEFAULT_NAMESPACE 20 | exports.DEFAULT_ENCRYPTION = DEFAULT_ENCRYPTION 21 | 22 | exports.replicate = function (isInitiator, key, handshakeHash) { 23 | const out = b4a.allocUnsafe(32) 24 | sodium.crypto_generichash_batch(out, [isInitiator ? REPLICATE_INITIATOR : REPLICATE_RESPONDER, key], handshakeHash) 25 | return out 26 | } 27 | 28 | exports.treeSignable = function (manifestHash, treeHash, length, fork) { 29 | const state = { start: 0, end: 112, buffer: b4a.allocUnsafe(112) } 30 | c.fixed32.encode(state, TREE) 31 | c.fixed32.encode(state, manifestHash) 32 | c.fixed32.encode(state, treeHash) 33 | c.uint64.encode(state, length) 34 | c.uint64.encode(state, fork) 35 | return state.buffer 36 | } 37 | 38 | exports.treeSignableCompat = function (hash, length, fork, noHeader) { 39 | const end = noHeader ? 48 : 80 40 | const state = { start: 0, end, buffer: b4a.allocUnsafe(end) } 41 | if (!noHeader) c.fixed32.encode(state, TREE) // ultra legacy mode, kill in future major 42 | c.fixed32.encode(state, hash) 43 | c.uint64.encode(state, length) 44 | c.uint64.encode(state, fork) 45 | return state.buffer 46 | } 47 | -------------------------------------------------------------------------------- /lib/compat.js: -------------------------------------------------------------------------------- 1 | // Export the appropriate version of `quickbit-universal` as the plain import 2 | // may resolve to an older version in some environments 3 | let quickbit = require('quickbit-universal') 4 | if ( 5 | typeof quickbit.findFirst !== 'function' || 6 | typeof quickbit.findLast !== 'function' || 7 | typeof quickbit.clear !== 'function' 8 | ) { 9 | // This should always load the fallback from the locally installed version 10 | quickbit = require('quickbit-universal/fallback') 11 | } 12 | exports.quickbit = quickbit 13 | -------------------------------------------------------------------------------- /lib/copy-prologue.js: -------------------------------------------------------------------------------- 1 | const crypto = require('hypercore-crypto') 2 | const flat = require('flat-tree') 3 | const b4a = require('b4a') 4 | const quickbit = require('quickbit-universal') 5 | const Bitfield = require('./bitfield') 6 | 7 | const MAX_BATCH_USED = 4 * 1024 * 1024 8 | const MIN_BATCH_USED = 512 * 1024 9 | 10 | // just in its own file as its a bit involved 11 | 12 | module.exports = copyPrologue 13 | 14 | async function copyPrologue (src, dst) { 15 | const prologue = dst.header.manifest.prologue 16 | 17 | if (src.length < prologue.length || prologue.length === 0) return 18 | 19 | const stack = [] 20 | const roots = flat.fullRoots(prologue.length * 2) 21 | const batch = { roots, first: true, last: false, contig: 0, used: 0, tree: [], blocks: [] } 22 | 23 | for (let i = 0; i < roots.length; i++) { 24 | const node = roots[i] 25 | batch.tree.push(node) 26 | stack.push(node) 27 | } 28 | 29 | let lastPage = -1 30 | let lastBlock = -1 31 | 32 | for await (const data of src.storage.createBlockStream({ gte: 0, lt: prologue.length, reverse: true })) { 33 | if (walkTree(stack, data.index * 2, batch) === false) { 34 | throw new Error('Missing block or tree node for ' + data.index) 35 | } 36 | 37 | batch.contig = data.index + 1 === lastBlock ? batch.contig + 1 : 1 38 | lastBlock = data.index 39 | 40 | const page = getBitfieldPage(data.index) 41 | batch.blocks.push(data) 42 | 43 | if (lastPage !== page) batch.used += 4096 44 | batch.used += Math.max(data.value.byteLength, 128) // 128 is just a sanity number to avoid mega batches 45 | 46 | // always safe to partially flush so we do that ondemand to reduce memory usage... 47 | if ((batch.used >= MIN_BATCH_USED && page !== lastPage) || (batch.used >= MAX_BATCH_USED)) { 48 | await flushBatch(prologue, src, dst, batch) 49 | } 50 | 51 | lastPage = page 52 | } 53 | 54 | if (lastBlock !== 0) batch.contig = 0 55 | 56 | batch.last = true 57 | await flushBatch(prologue, src, dst, batch) 58 | } 59 | 60 | async function flushBatch (prologue, src, dst, batch) { 61 | const nodePromises = [] 62 | 63 | const srcReader = src.storage.read() 64 | for (const index of batch.tree) { 65 | nodePromises.push(srcReader.getTreeNode(index)) 66 | } 67 | srcReader.tryFlush() 68 | 69 | const nodes = await Promise.all(nodePromises) 70 | 71 | const pagePromises = [] 72 | const dstReader = dst.storage.read() 73 | 74 | const headPromise = batch.first ? dstReader.getHead() : null 75 | if (headPromise) headPromise.catch(noop) 76 | 77 | let lastPage = -1 78 | for (const { index } of batch.blocks) { 79 | const page = getBitfieldPage(index) 80 | if (page === lastPage) continue 81 | lastPage = page 82 | pagePromises.push(dstReader.getBitfieldPage(page)) 83 | } 84 | 85 | dstReader.tryFlush() 86 | 87 | const pages = await Promise.all(pagePromises) 88 | const head = headPromise === null ? null : await headPromise 89 | const userData = [] 90 | 91 | // reads done! 92 | 93 | if (batch.first) { 94 | const roots = nodes.slice(0, batch.roots.length) 95 | 96 | for (const node of roots) { 97 | if (!node) throw new Error('Missing nodes for prologue hash') 98 | } 99 | 100 | const treeHash = crypto.tree(roots) 101 | if (!b4a.equals(treeHash, prologue.hash)) throw new Error('Prologue does not match source') 102 | } 103 | 104 | if (batch.first) { 105 | for await (const data of src.storage.createUserDataStream()) userData.push(data) 106 | } 107 | 108 | for (let i = 0; i < pages.length; i++) { 109 | if (!pages[i]) pages[i] = b4a.alloc(4096) 110 | } 111 | 112 | const tx = dst.storage.write() 113 | 114 | for (const node of nodes) tx.putTreeNode(node) 115 | 116 | lastPage = -1 117 | let pageIndex = -1 118 | 119 | for (const { index, value } of batch.blocks) { 120 | const page = getBitfieldPage(index) 121 | 122 | if (page !== lastPage) { 123 | lastPage = page 124 | pageIndex++ 125 | // queue the page now, we mutate it below but its the same ref 126 | tx.putBitfieldPage(pageIndex, pages[pageIndex]) 127 | } 128 | 129 | const pageBuffer = pages[pageIndex] 130 | quickbit.set(pageBuffer, getBitfieldOffset(index), true) 131 | tx.putBlock(index, value) 132 | } 133 | 134 | for (const { key, value } of userData) { 135 | tx.putUserData(key, value) 136 | } 137 | 138 | let upgraded = batch.first && !head 139 | if (upgraded) { 140 | tx.setHead(prologueToTree(prologue)) 141 | } 142 | 143 | await tx.flush() 144 | 145 | if (upgraded) { 146 | const roots = nodes.slice(0, batch.roots.length) 147 | dst.state.setRoots(roots) 148 | dst.header.tree = prologueToTree(prologue) 149 | } 150 | 151 | if (userData.length > 0) { 152 | dst.header.userData = userData.concat(dst.header.userData) 153 | } 154 | 155 | if (batch.contig) { 156 | // TODO: we need to persist this somehow 157 | dst.header.hints.contiguousLength = batch.contig 158 | } 159 | 160 | let start = 0 161 | let length = 0 162 | 163 | // update in memory bitfield 164 | for (const { index } of batch.blocks) { 165 | if (start === 0 || start - 1 === index) { 166 | length++ 167 | } else { 168 | if (length > 0) signalReplicator(dst, upgraded, start, length) 169 | upgraded = false 170 | length = 1 171 | } 172 | 173 | start = index 174 | dst.bitfield.set(index, true) 175 | } 176 | 177 | if (length > 0) signalReplicator(dst, upgraded, start, length) 178 | 179 | // unlink 180 | batch.tree = [] 181 | batch.blocks = [] 182 | batch.first = false 183 | batch.used = 0 184 | } 185 | 186 | function signalReplicator (core, upgraded, start, length) { 187 | if (upgraded) { 188 | core.replicator.cork() 189 | core.replicator.onhave(start, length, false) 190 | core.replicator.onupgrade() 191 | core.replicator.uncork() 192 | } else { 193 | core.replicator.onhave(start, length, false) 194 | } 195 | } 196 | 197 | function prologueToTree (prologue) { 198 | return { 199 | fork: 0, 200 | length: prologue.length, 201 | rootHash: prologue.hash, 202 | signature: null 203 | } 204 | } 205 | 206 | function getBitfieldPage (index) { 207 | return Math.floor(index / Bitfield.BITS_PER_PAGE) 208 | } 209 | 210 | function getBitfieldOffset (index) { 211 | return index & (Bitfield.BITS_PER_PAGE - 1) 212 | } 213 | 214 | function walkTree (stack, target, batch) { 215 | while (stack.length > 0) { 216 | const node = stack.pop() 217 | 218 | if ((node & 1) === 0) { 219 | if (node === target) return true 220 | continue 221 | } 222 | 223 | const ite = flat.iterator(node) 224 | if (!ite.contains(target)) continue 225 | 226 | while ((ite.index & 1) !== 0) { 227 | const left = ite.leftChild() 228 | const right = ite.sibling() // is right child 229 | 230 | batch.tree.push(left, right) 231 | 232 | if (ite.contains(target)) stack.push(left) 233 | else ite.sibling() 234 | } 235 | 236 | if (ite.index === target) return true 237 | } 238 | 239 | return false 240 | } 241 | 242 | function noop () {} 243 | -------------------------------------------------------------------------------- /lib/default-encryption.js: -------------------------------------------------------------------------------- 1 | const sodium = require('sodium-universal') 2 | const c = require('compact-encoding') 3 | const b4a = require('b4a') 4 | const { DEFAULT_ENCRYPTION } = require('./caps') 5 | 6 | const nonce = b4a.alloc(sodium.crypto_stream_NONCEBYTES) 7 | 8 | module.exports = class DefaultEncryption { 9 | static PADDING = 8 10 | 11 | constructor (encryptionKey, hypercoreKey, opts = {}) { 12 | this.key = encryptionKey 13 | this.compat = opts.compat === true 14 | 15 | const keys = DefaultEncryption.deriveKeys(encryptionKey, hypercoreKey, opts) 16 | 17 | this.blockKey = keys.block 18 | this.blindingKey = keys.blinding 19 | } 20 | 21 | static deriveKeys (encryptionKey, hypercoreKey, { block = false, compat = false } = {}) { 22 | const subKeys = b4a.alloc(2 * sodium.crypto_stream_KEYBYTES) 23 | 24 | const blockKey = block ? encryptionKey : subKeys.subarray(0, sodium.crypto_stream_KEYBYTES) 25 | const blindingKey = subKeys.subarray(sodium.crypto_stream_KEYBYTES) 26 | 27 | if (!block) { 28 | if (compat) sodium.crypto_generichash_batch(blockKey, [encryptionKey], hypercoreKey) 29 | else sodium.crypto_generichash_batch(blockKey, [DEFAULT_ENCRYPTION, hypercoreKey, encryptionKey]) 30 | } 31 | 32 | sodium.crypto_generichash(blindingKey, blockKey) 33 | 34 | return { 35 | blinding: blindingKey, 36 | block: blockKey 37 | } 38 | } 39 | 40 | static blockEncryptionKey (hypercoreKey, encryptionKey) { 41 | const blockKey = b4a.alloc(sodium.crypto_stream_KEYBYTES) 42 | sodium.crypto_generichash_batch(blockKey, [DEFAULT_ENCRYPTION, hypercoreKey, encryptionKey]) 43 | return blockKey 44 | } 45 | 46 | static encrypt (index, block, fork, blockKey, blindingKey) { 47 | const padding = block.subarray(0, DefaultEncryption.PADDING) 48 | block = block.subarray(DefaultEncryption.PADDING) 49 | 50 | c.uint64.encode({ start: 0, end: 8, buffer: padding }, fork) 51 | c.uint64.encode({ start: 0, end: 8, buffer: nonce }, index) 52 | 53 | // Zero out any previous padding. 54 | nonce.fill(0, 8, 8 + padding.byteLength) 55 | 56 | // Blind the fork ID, possibly risking reusing the nonce on a reorg of the 57 | // Hypercore. This is fine as the blinding is best-effort and the latest 58 | // fork ID shared on replication anyway. 59 | sodium.crypto_stream_xor( 60 | padding, 61 | padding, 62 | nonce, 63 | blindingKey 64 | ) 65 | 66 | nonce.set(padding, 8) 67 | 68 | // The combination of a (blinded) fork ID and a block index is unique for a 69 | // given Hypercore and is therefore a valid nonce for encrypting the block. 70 | sodium.crypto_stream_xor( 71 | block, 72 | block, 73 | nonce, 74 | blockKey 75 | ) 76 | } 77 | 78 | static decrypt (index, block, blockKey) { 79 | const padding = block.subarray(0, DefaultEncryption.PADDING) 80 | block = block.subarray(DefaultEncryption.PADDING) 81 | 82 | c.uint64.encode({ start: 0, end: 8, buffer: nonce }, index) 83 | 84 | nonce.set(padding, 8) 85 | 86 | // Decrypt the block using the blinded fork ID. 87 | sodium.crypto_stream_xor( 88 | block, 89 | block, 90 | nonce, 91 | blockKey 92 | ) 93 | } 94 | 95 | encrypt (index, block, fork, core) { 96 | if (core.compat !== this.compat) this._reload(core) 97 | return DefaultEncryption.encrypt(index, block, fork, this.blockKey, this.blindingKey) 98 | } 99 | 100 | decrypt (index, block, core) { 101 | if (core.compat !== this.compat) this._reload(core) 102 | return DefaultEncryption.decrypt(index, block, this.blockKey) 103 | } 104 | 105 | padding () { 106 | return DefaultEncryption.PADDING 107 | } 108 | 109 | _reload (core) { 110 | const block = b4a.equals(this.key, this.blockKey) 111 | const keys = DefaultEncryption.deriveKeys(this.key, core.key, { block, compat: core.compat }) 112 | 113 | this.blockKey = keys.blockKey 114 | this.blindingKey = keys.blindingKey 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /lib/download.js: -------------------------------------------------------------------------------- 1 | module.exports = class Download { 2 | constructor (session, range) { 3 | this.session = session 4 | this.range = range 5 | this.request = null 6 | this.opened = false 7 | this.opening = this._open() 8 | this.opening.catch(noop) 9 | } 10 | 11 | ready () { 12 | return this.opening 13 | } 14 | 15 | async _open () { 16 | if (this.session.opened === false) await this.session.opening 17 | this._download() 18 | this.opened = true 19 | } 20 | 21 | async done () { 22 | await this.ready() 23 | 24 | try { 25 | return await this.request.promise 26 | } catch (err) { 27 | if (isSessionMoved(err)) return this._download() 28 | throw err 29 | } 30 | } 31 | 32 | _download () { 33 | const activeRequests = (this.range && this.range.activeRequests) || this.session.activeRequests 34 | this.request = this.session.core.replicator.addRange(activeRequests, this.range) 35 | this.request.promise.catch(noop) 36 | return this.request.promise 37 | } 38 | 39 | /** 40 | * Deprecated. Use `range.done()`. 41 | */ 42 | downloaded () { 43 | return this.done() 44 | } 45 | 46 | destroy () { 47 | this._destroyBackground().catch(noop) 48 | } 49 | 50 | async _destroyBackground () { 51 | if (this.opened === false) await this.ready() 52 | if (this.request.context) this.request.context.detach(this.request) 53 | } 54 | } 55 | 56 | function noop () {} 57 | 58 | function isSessionMoved (err) { 59 | return err.code === 'SESSION_MOVED' 60 | } 61 | -------------------------------------------------------------------------------- /lib/fully-remote-proof.js: -------------------------------------------------------------------------------- 1 | // this helper is for fully remote proofs, is like in a push notification where no other context exists 2 | 3 | const { MerkleTree } = require('./merkle-tree.js') 4 | const messages = require('./messages.js') 5 | const b4a = require('b4a') 6 | const c = require('compact-encoding') 7 | const crypto = require('hypercore-crypto') 8 | const flat = require('flat-tree') 9 | 10 | class SlimSession { 11 | constructor (storage, auth, head, roots) { 12 | this.fork = head ? head.fork : 0 13 | this.roots = roots 14 | this.length = head ? head.length : 0 15 | this.signature = head ? head.signature : null 16 | this.ancestors = this.length 17 | this.byteLength = 0 18 | this.prologue = auth.manifest.prologue 19 | this.storage = storage 20 | 21 | for (let i = 0; i < roots.length; i++) this.byteLength += roots[i].size 22 | } 23 | } 24 | 25 | module.exports = { verify, proof } 26 | 27 | async function verify (storage, buffer, { referrer = null } = {}) { 28 | const state = { buffer, start: 0, end: buffer.byteLength } 29 | 30 | const discoveryKey = c.fixed32.decode(state) 31 | const proof = messages.wire.data.decode(state) 32 | 33 | const result = { 34 | key: null, 35 | discoveryKey, 36 | newer: true, 37 | length: 0, 38 | proof, 39 | block: null 40 | } 41 | 42 | const core = await storage.resume(discoveryKey) 43 | if (core === null) return null 44 | 45 | let rx = core.read() 46 | const authPromise = rx.getAuth() 47 | const headPromise = rx.getHead() 48 | const referrerPromise = rx.getUserData('referrer') 49 | 50 | rx.tryFlush() 51 | 52 | const [auth, head, ref] = await Promise.all([authPromise, headPromise, referrerPromise]) 53 | 54 | if (auth === null) return null 55 | 56 | if (referrer && (!ref || !b4a.equals(ref, referrer))) return null 57 | 58 | rx = core.read() 59 | 60 | const rootPromises = [] 61 | 62 | for (const index of flat.fullRoots(head ? 2 * head.length : 0)) { 63 | rootPromises.push(rx.getTreeNode(index)) 64 | } 65 | 66 | rx.tryFlush() 67 | 68 | const roots = await Promise.all(rootPromises) 69 | const length = head ? head.length : 0 70 | 71 | if (!auth.manifest || !auth.manifest.signers.length) return null 72 | 73 | const batch = await MerkleTree.verifyFullyRemote(new SlimSession(core, auth, head, roots), proof) 74 | const publicKey = auth.manifest.signers[0].publicKey 75 | 76 | let signable = null 77 | let signature = null 78 | 79 | if (auth.manifest.version === 0) { 80 | signable = batch.signable(auth.manifest.signers[0].namespace) 81 | signature = batch.signature 82 | } else { 83 | if (batch.signature[0] !== 1) return null 84 | if (batch.signature[1] !== 0) return null 85 | 86 | signable = batch.signable(auth.key) 87 | signature = batch.signature.subarray(2, 66) 88 | } 89 | 90 | if (!crypto.verify(signable, signature, publicKey)) { 91 | return null 92 | } 93 | 94 | result.key = auth.key 95 | result.discoveryKey = discoveryKey 96 | result.newer = batch.length > length 97 | result.length = batch.length 98 | result.block = proof.block 99 | 100 | return result 101 | } 102 | 103 | async function proof (sender, { index, block = null } = {}) { 104 | const treeProof = await sender.proof({ 105 | block: block ? { index, nodes: 0 } : null, 106 | upgrade: { start: 0, length: sender.length } 107 | }) 108 | 109 | const proof = await treeProof.settle() 110 | 111 | if (block) proof.block.value = block 112 | proof.manifest = sender.core.header.manifest 113 | 114 | const state = { buffer: null, start: 0, end: 0 } 115 | const data = { request: 0, ...proof } 116 | 117 | c.fixed32.preencode(state, sender.discoveryKey) 118 | messages.wire.data.preencode(state, data) 119 | 120 | state.buffer = b4a.allocUnsafe(state.end) 121 | 122 | c.fixed32.encode(state, sender.discoveryKey) 123 | messages.wire.data.encode(state, data) 124 | 125 | return state.buffer 126 | } 127 | -------------------------------------------------------------------------------- /lib/hotswap-queue.js: -------------------------------------------------------------------------------- 1 | const TICKS = 16 2 | 3 | module.exports = class HotswapQueue { 4 | constructor () { 5 | this.priorities = [[], [], []] 6 | } 7 | 8 | * pick (peer) { 9 | for (let i = 0; i < this.priorities.length; i++) { 10 | // try first one more than second one etc etc 11 | let ticks = (this.priorities.length - i) * TICKS 12 | const queue = this.priorities[i] 13 | 14 | for (let j = 0; j < queue.length; j++) { 15 | const r = j + Math.floor(Math.random() * queue.length - j) 16 | const a = queue[j] 17 | const b = queue[r] 18 | 19 | if (r !== j) { 20 | queue[(b.hotswap.index = j)] = b 21 | queue[(a.hotswap.index = r)] = a 22 | } 23 | 24 | if (hasInflight(b, peer)) continue 25 | 26 | yield b 27 | 28 | if (--ticks <= 0) break 29 | } 30 | } 31 | } 32 | 33 | add (block) { 34 | if (block.hotswap !== null) this.remove(block) 35 | if (block.inflight.length === 0 || block.inflight.length >= 3) return 36 | 37 | // TODO: also use other stuff to determine queue prio 38 | const queue = this.priorities[block.inflight.length - 1] 39 | 40 | const index = queue.push(block) - 1 41 | block.hotswap = { ref: this, queue, index } 42 | } 43 | 44 | remove (block) { 45 | const hotswap = block.hotswap 46 | if (hotswap === null) return 47 | 48 | block.hotswap = null 49 | const head = hotswap.queue.pop() 50 | if (head === block) return 51 | hotswap.queue[(head.hotswap.index = hotswap.index)] = head 52 | } 53 | } 54 | 55 | function hasInflight (block, peer) { 56 | for (let j = 0; j < block.inflight.length; j++) { 57 | if (block.inflight[j].peer === peer) return true 58 | } 59 | return false 60 | } 61 | -------------------------------------------------------------------------------- /lib/info.js: -------------------------------------------------------------------------------- 1 | module.exports = class Info { 2 | constructor (opts = {}) { 3 | this.key = opts.key 4 | this.discoveryKey = opts.discoveryKey 5 | this.length = opts.length || 0 6 | this.contiguousLength = opts.contiguousLength || 0 7 | this.byteLength = opts.byteLength || 0 8 | this.fork = opts.fork || 0 9 | this.padding = opts.padding || 0 10 | this.storage = opts.storage || null 11 | } 12 | 13 | static async from (session, opts = {}) { 14 | return new Info({ 15 | key: session.key, 16 | discoveryKey: session.discoveryKey, 17 | length: session.length, 18 | contiguousLength: session.contiguousLength, 19 | byteLength: session.byteLength, 20 | fork: session.fork, 21 | padding: session.padding, 22 | storage: opts.storage ? await this.storage(session) : null 23 | }) 24 | } 25 | 26 | static async storage (session) { 27 | const { oplog, tree, blocks, bitfield } = session.core 28 | try { 29 | return { 30 | oplog: await Info.bytesUsed(oplog.storage), 31 | tree: await Info.bytesUsed(tree.storage), 32 | blocks: await Info.bytesUsed(blocks.storage), 33 | bitfield: await Info.bytesUsed(bitfield.storage) 34 | } 35 | } catch { 36 | return null 37 | } 38 | } 39 | 40 | static bytesUsed (file) { 41 | return new Promise((resolve, reject) => { 42 | file.stat((err, st) => { 43 | if (err) { 44 | resolve(0) // prob just file not found (TODO, improve) 45 | } else if (typeof st.blocks !== 'number') { 46 | reject(new Error('cannot determine bytes used')) 47 | } else { 48 | resolve(st.blocks * 512) 49 | } 50 | }) 51 | }) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /lib/multisig.js: -------------------------------------------------------------------------------- 1 | const c = require('compact-encoding') 2 | const b4a = require('b4a') 3 | const flat = require('flat-tree') 4 | const { MerkleTree } = require('./merkle-tree') 5 | const { multiSignature, multiSignaturev0 } = require('./messages') 6 | 7 | module.exports = { 8 | assemblev0, 9 | assemble, 10 | inflatev0, 11 | inflate, 12 | partialSignature, 13 | signableLength 14 | } 15 | 16 | function inflatev0 (data) { 17 | return c.decode(multiSignaturev0, data) 18 | } 19 | 20 | function inflate (data) { 21 | return c.decode(multiSignature, data) 22 | } 23 | 24 | async function partialSignature (core, signer, from, to = core.state.length, signature = core.state.signature) { 25 | if (from > core.state.length) return null 26 | const nodes = to <= from ? null : await upgradeNodes(core, from, to) 27 | 28 | if (signature.byteLength !== 64) signature = c.decode(multiSignature, signature).proofs[0].signature 29 | 30 | return { 31 | signer, 32 | signature, 33 | patch: nodes ? to - from : 0, 34 | nodes 35 | } 36 | } 37 | 38 | async function upgradeNodes (core, from, to) { 39 | const rx = core.state.storage.read() 40 | const p = await MerkleTree.proof(core.state, rx, { upgrade: { start: from, length: to - from } }) 41 | rx.tryFlush() 42 | return (await p.settle()).upgrade.nodes 43 | } 44 | 45 | function signableLength (lengths, quorum) { 46 | if (quorum <= 0) quorum = 1 47 | if (quorum > lengths.length) return 0 48 | 49 | return lengths.sort(cmp)[quorum - 1] 50 | } 51 | 52 | function cmp (a, b) { 53 | return b - a 54 | } 55 | 56 | function assemblev0 (inputs) { 57 | const proofs = [] 58 | const patch = [] 59 | 60 | for (const u of inputs) { 61 | proofs.push(compressProof(u, patch)) 62 | } 63 | 64 | return c.encode(multiSignaturev0, { proofs, patch }) 65 | } 66 | 67 | function assemble (inputs) { 68 | const proofs = [] 69 | const patch = [] 70 | const seen = new Set() 71 | 72 | for (const u of inputs) { 73 | if (u.nodes) { 74 | for (const node of u.nodes) { 75 | if (seen.has(node.index)) continue 76 | seen.add(node.index) 77 | patch.push(node) 78 | } 79 | } 80 | 81 | proofs.push({ 82 | signer: u.signer, 83 | signature: u.signature, 84 | patch: u.patch 85 | }) 86 | } 87 | 88 | return c.encode(multiSignature, { proofs, patch }) 89 | } 90 | 91 | function compareNode (a, b) { 92 | if (a.index !== b.index) return false 93 | if (a.size !== b.size) return false 94 | return b4a.equals(a.hash, b.hash) 95 | } 96 | 97 | function compressProof (proof, nodes) { 98 | return { 99 | signer: proof.signer, 100 | signature: proof.signature, 101 | patch: proof.patch ? compressUpgrade(proof, nodes) : null 102 | } 103 | } 104 | 105 | function compressUpgrade (p, nodes) { 106 | const u = { 107 | start: flat.rightSpan(p.nodes[p.nodes.length - 1].index) / 2 + 1, 108 | length: p.patch, 109 | nodes: [] 110 | } 111 | 112 | for (const node of p.nodes) { 113 | let present = false 114 | for (let i = 0; i < nodes.length; i++) { 115 | if (!compareNode(nodes[i], node)) continue 116 | 117 | u.nodes.push(i) 118 | present = true 119 | break 120 | } 121 | 122 | if (present) continue 123 | u.nodes.push(nodes.push(node) - 1) 124 | } 125 | 126 | return u 127 | } 128 | -------------------------------------------------------------------------------- /lib/mutex.js: -------------------------------------------------------------------------------- 1 | module.exports = class Mutex { 2 | constructor () { 3 | this.locked = false 4 | this.destroyed = false 5 | 6 | this._destroying = null 7 | this._destroyError = null 8 | this._queue = [] 9 | this._enqueue = (resolve, reject) => this._queue.push([resolve, reject]) 10 | } 11 | 12 | idle () { 13 | return this._queue.length === 0 && this.locked === false 14 | } 15 | 16 | lock () { 17 | if (this.destroyed) return Promise.reject(this._destroyError || new Error('Mutex has been destroyed')) 18 | if (this.locked) return new Promise(this._enqueue) 19 | this.locked = true 20 | return Promise.resolve() 21 | } 22 | 23 | unlock () { 24 | if (!this._queue.length) { 25 | this.locked = false 26 | return 27 | } 28 | this._queue.shift()[0]() 29 | } 30 | 31 | destroy (err) { 32 | if (!this._destroying) this._destroying = this.locked ? this.lock().catch(() => {}) : Promise.resolve() 33 | 34 | this.destroyed = true 35 | if (err) this._destroyError = err 36 | 37 | if (err) { 38 | while (this._queue.length) this._queue.shift()[1](err) 39 | } 40 | 41 | return this._destroying 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /lib/receiver-queue.js: -------------------------------------------------------------------------------- 1 | const FIFO = require('fast-fifo') 2 | 3 | module.exports = class ReceiverQueue { 4 | constructor () { 5 | this.queue = new FIFO() 6 | this.priority = [] 7 | this.requests = new Map() 8 | this.length = 0 9 | } 10 | 11 | push (req) { 12 | // TODO: use a heap at some point if we wanna support multiple prios 13 | if (req.priority > 0) this.priority.push(req) 14 | else this.queue.push(req) 15 | 16 | this.requests.set(req.id, req) 17 | this.length++ 18 | } 19 | 20 | shift () { 21 | while (this.priority.length > 0) { 22 | const msg = this.priority.pop() 23 | const req = this._processRequest(msg) 24 | if (req !== null) return req 25 | } 26 | 27 | while (this.queue.length > 0) { 28 | const msg = this.queue.shift() 29 | const req = this._processRequest(msg) 30 | if (req !== null) return req 31 | } 32 | 33 | return null 34 | } 35 | 36 | _processRequest (req) { 37 | if (req.block || req.hash || req.seek || req.upgrade || req.manifest) { 38 | this.requests.delete(req.id) 39 | this.length-- 40 | return req 41 | } 42 | 43 | return null 44 | } 45 | 46 | clear () { 47 | this.queue.clear() 48 | this.priority = [] 49 | this.length = 0 50 | this.requests.clear() 51 | } 52 | 53 | delete (id) { 54 | const req = this.requests.get(id) 55 | if (!req) return 56 | 57 | req.block = null 58 | req.hash = null 59 | req.seek = null 60 | req.upgrade = null 61 | req.manifest = false 62 | 63 | this.requests.delete(id) 64 | this.length-- 65 | 66 | if (this.length === 0) { 67 | this.queue.clear() 68 | this.priority = [] 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /lib/remote-bitfield.js: -------------------------------------------------------------------------------- 1 | const BigSparseArray = require('big-sparse-array') 2 | const quickbit = require('./compat').quickbit 3 | 4 | const BITS_PER_PAGE = 32768 5 | const BYTES_PER_PAGE = BITS_PER_PAGE / 8 6 | const WORDS_PER_PAGE = BYTES_PER_PAGE / 4 7 | const BITS_PER_SEGMENT = 2097152 8 | const BYTES_PER_SEGMENT = BITS_PER_SEGMENT / 8 9 | const PAGES_PER_SEGMENT = BITS_PER_SEGMENT / BITS_PER_PAGE 10 | 11 | class RemoteBitfieldPage { 12 | constructor (index, bitfield, segment) { 13 | this.index = index 14 | this.offset = index * BYTES_PER_PAGE - segment.offset 15 | this.bitfield = bitfield 16 | this.segment = segment 17 | 18 | segment.add(this) 19 | } 20 | 21 | get tree () { 22 | return this.segment.tree 23 | } 24 | 25 | get (index) { 26 | return quickbit.get(this.bitfield, index) 27 | } 28 | 29 | set (index, val) { 30 | if (quickbit.set(this.bitfield, index, val)) { 31 | this.tree.update(this.offset * 8 + index) 32 | } 33 | } 34 | 35 | setRange (start, end, val) { 36 | quickbit.fill(this.bitfield, val, start, end) 37 | 38 | let i = Math.floor(start / 128) 39 | const n = i + Math.ceil((end - start) / 128) 40 | 41 | while (i <= n) this.tree.update(this.offset * 8 + i++ * 128) 42 | } 43 | 44 | findFirst (val, position) { 45 | return quickbit.findFirst(this.bitfield, val, position) 46 | } 47 | 48 | findLast (val, position) { 49 | return quickbit.findLast(this.bitfield, val, position) 50 | } 51 | 52 | insert (start, bitfield) { 53 | this.bitfield.set(bitfield, start / 32) 54 | this.segment.refresh() 55 | } 56 | 57 | clear (start, bitfield) { 58 | quickbit.clear(this.bitfield, { field: bitfield, offset: start }) 59 | } 60 | } 61 | 62 | class RemoteBitfieldSegment { 63 | constructor (index) { 64 | this.index = index 65 | this.offset = index * BYTES_PER_SEGMENT 66 | this.tree = quickbit.Index.from([], BYTES_PER_SEGMENT) 67 | this.pages = new Array(PAGES_PER_SEGMENT) 68 | this.pagesLength = 0 69 | } 70 | 71 | get chunks () { 72 | return this.tree.chunks 73 | } 74 | 75 | refresh () { 76 | this.tree = quickbit.Index.from(this.tree.chunks, BYTES_PER_SEGMENT) 77 | } 78 | 79 | add (page) { 80 | const pageIndex = page.index - this.index * PAGES_PER_SEGMENT 81 | if (pageIndex >= this.pagesLength) this.pagesLength = pageIndex + 1 82 | 83 | this.pages[pageIndex] = page 84 | 85 | const chunk = { field: page.bitfield, offset: page.offset } 86 | 87 | this.chunks.push(chunk) 88 | 89 | for (let i = this.chunks.length - 2; i >= 0; i--) { 90 | const prev = this.chunks[i] 91 | if (prev.offset <= chunk.offset) break 92 | this.chunks[i] = chunk 93 | this.chunks[i + 1] = prev 94 | } 95 | } 96 | 97 | findFirst (val, position) { 98 | position = this.tree.skipFirst(!val, position) 99 | 100 | let j = position & (BITS_PER_PAGE - 1) 101 | let i = (position - j) / BITS_PER_PAGE 102 | 103 | if (i >= PAGES_PER_SEGMENT) return -1 104 | 105 | while (i < this.pagesLength) { 106 | const p = this.pages[i] 107 | 108 | let index = -1 109 | 110 | if (p) index = p.findFirst(val, j) 111 | else if (!val) index = j 112 | 113 | if (index !== -1) return i * BITS_PER_PAGE + index 114 | 115 | j = 0 116 | i++ 117 | } 118 | 119 | return (val || this.pagesLength === PAGES_PER_SEGMENT) ? -1 : this.pagesLength * BITS_PER_PAGE 120 | } 121 | 122 | findLast (val, position) { 123 | position = this.tree.skipLast(!val, position) 124 | 125 | let j = position & (BITS_PER_PAGE - 1) 126 | let i = (position - j) / BITS_PER_PAGE 127 | 128 | if (i >= PAGES_PER_SEGMENT) return -1 129 | 130 | while (i >= 0) { 131 | const p = this.pages[i] 132 | 133 | let index = -1 134 | 135 | if (p) index = p.findLast(val, j) 136 | else if (!val) index = j 137 | 138 | if (index !== -1) return i * BITS_PER_PAGE + index 139 | 140 | j = BITS_PER_PAGE - 1 141 | i-- 142 | } 143 | 144 | return -1 145 | } 146 | } 147 | 148 | module.exports = class RemoteBitfield { 149 | static BITS_PER_PAGE = BITS_PER_PAGE 150 | 151 | constructor () { 152 | this._pages = new BigSparseArray() 153 | this._segments = new BigSparseArray() 154 | this._maxSegments = 0 155 | } 156 | 157 | getBitfield (index) { 158 | const j = index & (BITS_PER_PAGE - 1) 159 | const i = (index - j) / BITS_PER_PAGE 160 | 161 | const p = this._pages.get(i) 162 | return p || null 163 | } 164 | 165 | get (index) { 166 | const j = index & (BITS_PER_PAGE - 1) 167 | const i = (index - j) / BITS_PER_PAGE 168 | 169 | const p = this._pages.get(i) 170 | 171 | return p ? p.get(j) : false 172 | } 173 | 174 | set (index, val) { 175 | const j = index & (BITS_PER_PAGE - 1) 176 | const i = (index - j) / BITS_PER_PAGE 177 | 178 | let p = this._pages.get(i) 179 | 180 | if (!p && val) { 181 | const k = Math.floor(i / PAGES_PER_SEGMENT) 182 | const s = this._segments.get(k) || this._segments.set(k, new RemoteBitfieldSegment(k)) 183 | if (this._maxSegments <= k) this._maxSegments = k + 1 184 | 185 | p = this._pages.set(i, new RemoteBitfieldPage(i, new Uint32Array(WORDS_PER_PAGE), s)) 186 | } 187 | 188 | if (p) p.set(j, val) 189 | } 190 | 191 | setRange (start, end, val) { 192 | let j = start & (BITS_PER_PAGE - 1) 193 | let i = (start - j) / BITS_PER_PAGE 194 | 195 | while (start < end) { 196 | let p = this._pages.get(i) 197 | 198 | if (!p && val) { 199 | const k = Math.floor(i / PAGES_PER_SEGMENT) 200 | const s = this._segments.get(k) || this._segments.set(k, new RemoteBitfieldSegment(k)) 201 | if (this._maxSegments <= k) this._maxSegments = k + 1 202 | 203 | p = this._pages.set(i, new RemoteBitfieldPage(i, new Uint32Array(WORDS_PER_PAGE), s)) 204 | } 205 | 206 | const offset = i * BITS_PER_PAGE 207 | const last = Math.min(end - offset, BITS_PER_PAGE) 208 | const range = last - j 209 | 210 | if (p) p.setRange(j, last, val) 211 | 212 | j = 0 213 | i++ 214 | start += range 215 | } 216 | } 217 | 218 | findFirst (val, position) { 219 | let j = position & (BITS_PER_SEGMENT - 1) 220 | let i = (position - j) / BITS_PER_SEGMENT 221 | 222 | while (i < this._maxSegments) { 223 | const s = this._segments.get(i) 224 | 225 | let index = -1 226 | 227 | if (s) index = s.findFirst(val, j) 228 | else if (!val) index = j 229 | 230 | if (index !== -1) return i * BITS_PER_SEGMENT + index 231 | 232 | j = 0 233 | i++ 234 | } 235 | 236 | // For the val === false case, we always return at least 237 | // the 'position', also if nothing was found 238 | return val 239 | ? -1 240 | : Math.max(position, this._maxSegments * BITS_PER_SEGMENT) 241 | } 242 | 243 | firstSet (position) { 244 | return this.findFirst(true, position) 245 | } 246 | 247 | firstUnset (position) { 248 | return this.findFirst(false, position) 249 | } 250 | 251 | findLast (val, position) { 252 | let j = position & (BITS_PER_SEGMENT - 1) 253 | let i = (position - j) / BITS_PER_SEGMENT 254 | 255 | while (i >= 0) { 256 | const s = this._segments.get(i) 257 | 258 | let index = -1 259 | 260 | if (s) index = s.findLast(val, j) 261 | else if (!val) index = j 262 | 263 | if (index !== -1) return i * BITS_PER_SEGMENT + index 264 | 265 | j = BITS_PER_SEGMENT - 1 266 | i-- 267 | } 268 | 269 | return -1 270 | } 271 | 272 | lastSet (position) { 273 | return this.findLast(true, position) 274 | } 275 | 276 | lastUnset (position) { 277 | return this.findLast(false, position) 278 | } 279 | 280 | insert (start, bitfield) { 281 | if (start % 32 !== 0) return false 282 | 283 | let length = bitfield.byteLength * 8 284 | 285 | let j = start & (BITS_PER_PAGE - 1) 286 | let i = (start - j) / BITS_PER_PAGE 287 | 288 | while (length > 0) { 289 | let p = this._pages.get(i) 290 | 291 | if (!p) { 292 | const k = Math.floor(i / PAGES_PER_SEGMENT) 293 | const s = this._segments.get(k) || this._segments.set(k, new RemoteBitfieldSegment(k)) 294 | if (this._maxSegments <= k) this._maxSegments = k + 1 295 | 296 | p = this._pages.set(i, new RemoteBitfieldPage(i, new Uint32Array(WORDS_PER_PAGE), s)) 297 | } 298 | 299 | const end = Math.min(j + length, BITS_PER_PAGE) 300 | const range = end - j 301 | 302 | p.insert(j, bitfield.subarray(0, range / 32)) 303 | 304 | bitfield = bitfield.subarray(range / 32) 305 | 306 | j = 0 307 | i++ 308 | length -= range 309 | } 310 | 311 | return true 312 | } 313 | 314 | clear (start, bitfield) { 315 | if (start % 32 !== 0) return false 316 | 317 | let length = bitfield.byteLength * 8 318 | 319 | let j = start & (BITS_PER_PAGE - 1) 320 | let i = (start - j) / BITS_PER_PAGE 321 | 322 | while (length > 0) { 323 | let p = this._pages.get(i) 324 | 325 | if (!p) { 326 | const k = Math.floor(i / PAGES_PER_SEGMENT) 327 | const s = this._segments.get(k) || this._segments.set(k, new RemoteBitfieldSegment(k)) 328 | if (this._maxSegments <= k) this._maxSegments = k + 1 329 | 330 | p = this._pages.set(i, new RemoteBitfieldPage(i, new Uint32Array(WORDS_PER_PAGE), s)) 331 | } 332 | 333 | const end = Math.min(j + length, BITS_PER_PAGE) 334 | const range = end - j 335 | 336 | p.clear(j, bitfield.subarray(0, range / 32)) 337 | 338 | bitfield = bitfield.subarray(range / 32) 339 | 340 | j = 0 341 | i++ 342 | length -= range 343 | } 344 | 345 | return true 346 | } 347 | } 348 | -------------------------------------------------------------------------------- /lib/streams.js: -------------------------------------------------------------------------------- 1 | const { Writable, Readable } = require('streamx') 2 | 3 | class ReadStream extends Readable { 4 | constructor (core, opts = {}) { 5 | super() 6 | 7 | this.core = core 8 | this.start = opts.start || 0 9 | this.end = typeof opts.end === 'number' ? opts.end : -1 10 | this.snapshot = !opts.live && opts.snapshot !== false 11 | this.live = this.end === -1 ? !!opts.live : false 12 | } 13 | 14 | _open (cb) { 15 | this._openP().then(cb, cb) 16 | } 17 | 18 | _read (cb) { 19 | this._readP().then(cb, cb) 20 | } 21 | 22 | async _openP () { 23 | if (this.end === -1) await this.core.update() 24 | else await this.core.ready() 25 | if (this.snapshot && this.end === -1) this.end = this.core.length 26 | } 27 | 28 | async _readP () { 29 | const end = this.live ? -1 : (this.end === -1 ? this.core.length : this.end) 30 | if (end >= 0 && this.start >= end) { 31 | this.push(null) 32 | return 33 | } 34 | 35 | this.push(await this.core.get(this.start++)) 36 | } 37 | } 38 | 39 | exports.ReadStream = ReadStream 40 | 41 | class WriteStream extends Writable { 42 | constructor (core) { 43 | super() 44 | this.core = core 45 | } 46 | 47 | _writev (batch, cb) { 48 | this._writevP(batch).then(cb, cb) 49 | } 50 | 51 | async _writevP (batch) { 52 | await this.core.append(batch) 53 | } 54 | } 55 | 56 | exports.WriteStream = WriteStream 57 | 58 | class ByteStream extends Readable { 59 | constructor (core, opts = {}) { 60 | super() 61 | 62 | this._core = core 63 | this._index = 0 64 | this._range = null 65 | 66 | this._byteOffset = opts.byteOffset || 0 67 | this._byteLength = typeof opts.byteLength === 'number' ? opts.byteLength : -1 68 | this._prefetch = typeof opts.prefetch === 'number' ? opts.prefetch : 32 69 | 70 | this._applyOffset = this._byteOffset > 0 71 | } 72 | 73 | _open (cb) { 74 | this._openp().then(cb, cb) 75 | } 76 | 77 | _read (cb) { 78 | this._readp().then(cb, cb) 79 | } 80 | 81 | async _openp () { 82 | if (this._byteLength === -1) { 83 | await this._core.update() 84 | this._byteLength = Math.max(this._core.byteLength - this._byteOffset, 0) 85 | } 86 | } 87 | 88 | async _readp () { 89 | let data = null 90 | 91 | if (this._byteLength === 0) { 92 | this.push(null) 93 | return 94 | } 95 | 96 | let relativeOffset = 0 97 | 98 | if (this._applyOffset) { 99 | this._applyOffset = false 100 | 101 | const [block, byteOffset] = await this._core.seek(this._byteOffset) 102 | 103 | this._index = block 104 | relativeOffset = byteOffset 105 | } 106 | 107 | this._predownload(this._index + 1) 108 | data = await this._core.get(this._index++, { valueEncoding: 'binary' }) 109 | 110 | if (relativeOffset > 0) data = data.subarray(relativeOffset) 111 | 112 | if (data.byteLength > this._byteLength) data = data.subarray(0, this._byteLength) 113 | this._byteLength -= data.byteLength 114 | 115 | this.push(data) 116 | if (this._byteLength === 0) this.push(null) 117 | } 118 | 119 | _predownload (index) { 120 | if (this._range) this._range.destroy() 121 | this._range = this._core.download({ start: index, end: index + this._prefetch, linear: true }) 122 | } 123 | 124 | _destroy (cb) { 125 | if (this._range) this._range.destroy() 126 | cb(null) 127 | } 128 | } 129 | 130 | exports.ByteStream = ByteStream 131 | -------------------------------------------------------------------------------- /lib/verifier.js: -------------------------------------------------------------------------------- 1 | const crypto = require('hypercore-crypto') 2 | const b4a = require('b4a') 3 | const c = require('compact-encoding') 4 | const flat = require('flat-tree') 5 | const { BAD_ARGUMENT } = require('hypercore-errors') 6 | const unslab = require('unslab') 7 | 8 | const m = require('./messages') 9 | const multisig = require('./multisig') 10 | const caps = require('./caps') 11 | 12 | class Signer { 13 | constructor (manifestHash, version, index, { signature = 'ed25519', publicKey, namespace = caps.DEFAULT_NAMESPACE } = {}) { 14 | if (!publicKey) throw BAD_ARGUMENT('public key is required for a signer') 15 | if (signature !== 'ed25519') throw BAD_ARGUMENT('Only Ed25519 signatures are supported') 16 | 17 | this.manifestHash = manifestHash 18 | this.version = version 19 | this.signer = index 20 | this.signature = signature 21 | this.publicKey = publicKey 22 | this.namespace = namespace 23 | } 24 | 25 | _ctx () { 26 | return this.version === 0 ? this.namespace : this.manifestHash 27 | } 28 | 29 | verify (batch, signature) { 30 | return crypto.verify(batch.signable(this._ctx()), signature, this.publicKey) 31 | } 32 | 33 | sign (batch, keyPair) { 34 | return crypto.sign(batch.signable(this._ctx()), keyPair.secretKey) 35 | } 36 | } 37 | 38 | class CompatSigner extends Signer { 39 | constructor (index, signer, legacy) { 40 | super(null, 0, index, signer) 41 | this.legacy = legacy 42 | } 43 | 44 | verify (batch, signature) { 45 | return crypto.verify(batch.signableCompat(this.legacy), signature, this.publicKey) 46 | } 47 | 48 | sign (batch, keyPair) { 49 | return crypto.sign(batch.signableCompat(this.legacy), keyPair.secretKey) 50 | } 51 | } 52 | 53 | module.exports = class Verifier { 54 | constructor (manifestHash, manifest, { compat = isCompat(manifestHash, manifest), legacy = false } = {}) { 55 | const self = this 56 | 57 | this.manifestHash = manifestHash 58 | this.compat = compat || manifest === null 59 | this.version = this.compat ? 0 : typeof manifest.version === 'number' ? manifest.version : 1 60 | this.hash = manifest.hash || 'blake2b' 61 | this.allowPatch = !this.compat && !!manifest.allowPatch 62 | this.quorum = this.compat ? 1 : defaultQuorum(manifest) 63 | 64 | this.signers = manifest.signers ? manifest.signers.map(createSigner) : [] 65 | this.prologue = this.compat ? null : (manifest.prologue || null) 66 | 67 | function createSigner (signer, index) { 68 | return self.compat 69 | ? new CompatSigner(index, signer, legacy) 70 | : new Signer(manifestHash, self.version, index, signer) 71 | } 72 | } 73 | 74 | _verifyCompat (batch, signature) { 75 | if (!signature) return false 76 | 77 | if (this.compat || (!this.allowPatch && this.signers.length === 1)) { 78 | return !!signature && this.signers[0].verify(batch, signature) 79 | } 80 | 81 | return this._verifyMulti(batch, signature) 82 | } 83 | 84 | _inflate (signature) { 85 | if (this.version >= 1) return multisig.inflate(signature) 86 | const { proofs, patch } = multisig.inflatev0(signature) 87 | 88 | return { 89 | proofs: proofs.map(proofToVersion1), 90 | patch 91 | } 92 | } 93 | 94 | _verifyMulti (batch, signature) { 95 | if (!signature || this.quorum === 0) return false 96 | 97 | const { proofs, patch } = this._inflate(signature) 98 | if (proofs.length < this.quorum) return false 99 | 100 | const tried = new Uint8Array(this.signers.length) 101 | const nodes = this.allowPatch && patch.length ? toMap(patch) : null 102 | 103 | for (let i = 0; i < this.quorum; i++) { 104 | const inp = proofs[i] 105 | 106 | let tree = batch 107 | 108 | if (inp.patch && this.allowPatch) { 109 | tree = batch.clone() 110 | 111 | const upgrade = generateUpgrade(nodes, batch.length, inp.patch) 112 | const proof = { fork: tree.fork, block: null, hash: null, seek: null, upgrade, manifest: null } 113 | 114 | try { 115 | if (!tree.verifyUpgrade(proof)) return false 116 | } catch { 117 | return false 118 | } 119 | } 120 | 121 | if (inp.signer >= this.signers.length || tried[inp.signer]) return false 122 | tried[inp.signer] = 1 123 | 124 | const s = this.signers[inp.signer] 125 | if (!s.verify(tree, inp.signature)) return false 126 | } 127 | 128 | return true 129 | } 130 | 131 | verify (batch, signature) { 132 | if (this.version === 0) { 133 | return this._verifyCompat(batch, signature) 134 | } 135 | 136 | if (this.prologue !== null && batch.length <= this.prologue.length) { 137 | return batch.length === this.prologue.length && b4a.equals(batch.hash(), this.prologue.hash) 138 | } 139 | 140 | return this._verifyMulti(batch, signature) 141 | } 142 | 143 | // TODO: better api for this that is more ... multisig-ey 144 | sign (batch, keyPair) { 145 | if (!keyPair || !keyPair.secretKey) throw BAD_ARGUMENT('No key pair was passed') 146 | 147 | for (const s of this.signers) { 148 | if (b4a.equals(s.publicKey, keyPair.publicKey)) { 149 | const signature = s.sign(batch, keyPair) 150 | if (this.signers.length !== 1 || this.version === 0) return signature 151 | return this.assemble([{ signer: 0, signature, patch: 0, nodes: null }]) 152 | } 153 | } 154 | 155 | throw BAD_ARGUMENT('Public key is not a declared signer') 156 | } 157 | 158 | assemble (inputs) { 159 | return this.version === 0 ? multisig.assemblev0(inputs) : multisig.assemble(inputs) 160 | } 161 | 162 | static manifestHash (manifest) { 163 | return manifestHash(manifest) 164 | } 165 | 166 | static encodeManifest (manifest) { 167 | return c.encode(m.manifest, manifest) 168 | } 169 | 170 | static decodeManifest (manifest) { 171 | return c.decode(m.manifest, manifest) 172 | } 173 | 174 | static defaultSignerManifest (publicKey) { 175 | return { 176 | version: 1, 177 | hash: 'blake2b', 178 | allowPatch: false, 179 | quorum: 1, 180 | signers: [{ 181 | signature: 'ed25519', 182 | namespace: caps.DEFAULT_NAMESPACE, 183 | publicKey 184 | }], 185 | prologue: null, 186 | linked: null, 187 | userData: null 188 | } 189 | } 190 | 191 | static fromManifest (manifest, opts) { 192 | const m = this.createManifest(manifest) 193 | return new this(manifestHash(m), m, opts) 194 | } 195 | 196 | static createManifest (inp) { 197 | if (!inp) return null 198 | 199 | const manifest = { 200 | version: getManifestVersion(inp), // defaults to v1 201 | hash: 'blake2b', 202 | allowPatch: !!inp.allowPatch, 203 | quorum: defaultQuorum(inp), 204 | signers: inp.signers ? inp.signers.map(parseSigner) : [], 205 | prologue: null, 206 | linked: null, 207 | userData: inp.userData || null 208 | } 209 | 210 | if (inp.hash && inp.hash !== 'blake2b') throw BAD_ARGUMENT('Only Blake2b hashes are supported') 211 | 212 | if (inp.prologue) { 213 | if (!(b4a.isBuffer(inp.prologue.hash) && inp.prologue.hash.byteLength === 32) || !(inp.prologue.length >= 0)) { 214 | throw BAD_ARGUMENT('Invalid prologue') 215 | } 216 | 217 | manifest.prologue = inp.prologue 218 | manifest.prologue.hash = unslab(manifest.prologue.hash) 219 | } 220 | 221 | if (manifest.userData !== null && manifest.version < 2) { 222 | throw BAD_ARGUMENT('Invalid field: userData') 223 | } 224 | 225 | if (inp.linked && inp.linked.length) { 226 | if (manifest.version < 2) throw BAD_ARGUMENT('Invalid field: linked') 227 | 228 | for (const key of inp.linked) { 229 | if (!(b4a.isBuffer(key) && key.byteLength === 32)) { 230 | throw BAD_ARGUMENT('Invalid key') 231 | } 232 | } 233 | 234 | manifest.linked = inp.linked 235 | } 236 | 237 | return manifest 238 | } 239 | 240 | static isValidManifest (key, manifest) { 241 | return b4a.equals(key, manifestHash(manifest)) 242 | } 243 | 244 | static isCompat (key, manifest) { 245 | return isCompat(key, manifest) 246 | } 247 | 248 | static sign (manifest, batch, keyPair, opts) { 249 | return Verifier.fromManifest(manifest, opts).sign(batch, keyPair) 250 | } 251 | } 252 | 253 | function toMap (nodes) { 254 | const m = new Map() 255 | for (const node of nodes) m.set(node.index, node) 256 | return m 257 | } 258 | 259 | function isCompat (key, manifest) { 260 | return !!(manifest && manifest.signers.length === 1 && b4a.equals(key, manifest.signers[0].publicKey)) 261 | } 262 | 263 | function defaultQuorum (man) { 264 | if (typeof man.quorum === 'number') return man.quorum 265 | if (!man.signers || !man.signers.length) return 0 266 | return (man.signers.length >> 1) + 1 267 | } 268 | 269 | function generateUpgrade (patch, start, length) { 270 | const upgrade = { start, length, nodes: null, additionalNodes: [], signature: null } 271 | 272 | const from = start * 2 273 | const to = from + length * 2 274 | 275 | for (const ite = flat.iterator(0); ite.fullRoot(to); ite.nextTree()) { 276 | if (ite.index + ite.factor / 2 < from) continue 277 | 278 | if (upgrade.nodes === null && ite.contains(from - 2)) { 279 | upgrade.nodes = [] 280 | 281 | const root = ite.index 282 | const target = from - 2 283 | 284 | ite.seek(target) 285 | 286 | while (ite.index !== root) { 287 | ite.sibling() 288 | if (ite.index > target) upgrade.nodes.push(patch.get(ite.index)) 289 | ite.parent() 290 | } 291 | 292 | continue 293 | } 294 | 295 | if (upgrade.nodes === null) upgrade.nodes = [] 296 | upgrade.nodes.push(patch.get(ite.index)) 297 | } 298 | 299 | if (upgrade.nodes === null) upgrade.nodes = [] 300 | return upgrade 301 | } 302 | 303 | function parseSigner (signer) { 304 | validateSigner(signer) 305 | return { 306 | signature: 'ed25519', 307 | namespace: unslab(signer.namespace || caps.DEFAULT_NAMESPACE), 308 | publicKey: unslab(signer.publicKey) 309 | } 310 | } 311 | 312 | function validateSigner (signer) { 313 | if (!signer || !signer.publicKey) throw BAD_ARGUMENT('Signer missing public key') 314 | if (signer.signature && signer.signature !== 'ed25519') throw BAD_ARGUMENT('Only Ed25519 signatures are supported') 315 | } 316 | 317 | function manifestHash (manifest) { 318 | const state = { start: 0, end: 32, buffer: null } 319 | m.manifest.preencode(state, manifest) 320 | state.buffer = b4a.allocUnsafe(state.end) 321 | c.raw.encode(state, caps.MANIFEST) 322 | m.manifest.encode(state, manifest) 323 | return crypto.hash(state.buffer) 324 | } 325 | 326 | function proofToVersion1 (proof) { 327 | return { 328 | signer: proof.signer, 329 | signature: proof.signature, 330 | patch: proof.patch ? proof.patch.length : 0 331 | } 332 | } 333 | 334 | function getManifestVersion (inp) { 335 | if (typeof inp.version === 'number') return inp.version 336 | if (inp.linked && inp.linked.length) return 2 337 | if (inp.userData) return 2 338 | return 1 339 | } 340 | -------------------------------------------------------------------------------- /messages.js: -------------------------------------------------------------------------------- 1 | // explicitly exposed as hypercore/messages 2 | module.exports = require('./lib/messages') 3 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hypercore", 3 | "version": "11.8.3", 4 | "description": "Hypercore is a secure, distributed append-only log", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "standard && brittle test/all.js", 8 | "test:bare": "bare test/all.js", 9 | "test:generate": "brittle -r test/all.js test/*.js" 10 | }, 11 | "repository": { 12 | "type": "git", 13 | "url": "git+https://github.com/holepunchto/hypercore.git" 14 | }, 15 | "contributors": [ 16 | { 17 | "name": "Mathias Buus", 18 | "email": "mathiasbuus@gmail.com", 19 | "url": "https://mafinto.sh" 20 | }, 21 | { 22 | "name": "Andrew Osheroff", 23 | "email": "andrewosh@gmail.com", 24 | "url": "https://andrewosh.com" 25 | } 26 | ], 27 | "license": "MIT", 28 | "bugs": { 29 | "url": "https://github.com/holepunchto/hypercore/issues" 30 | }, 31 | "homepage": "https://github.com/holepunchto/hypercore#readme", 32 | "files": [ 33 | "index.js", 34 | "errors.js", 35 | "messages.js", 36 | "lib/**.js" 37 | ], 38 | "imports": { 39 | "events": { 40 | "bare": "bare-events", 41 | "default": "events" 42 | } 43 | }, 44 | "dependencies": { 45 | "@hyperswarm/secret-stream": "^6.0.0", 46 | "b4a": "^1.1.0", 47 | "bare-events": "^2.2.0", 48 | "big-sparse-array": "^1.0.3", 49 | "compact-encoding": "^2.11.0", 50 | "fast-fifo": "^1.3.0", 51 | "flat-tree": "^1.9.0", 52 | "hypercore-crypto": "^3.2.1", 53 | "hypercore-errors": "^1.2.0", 54 | "hypercore-id-encoding": "^1.2.0", 55 | "hypercore-storage": "^1.0.0", 56 | "is-options": "^1.0.1", 57 | "nanoassert": "^2.0.0", 58 | "protomux": "^3.5.0", 59 | "quickbit-universal": "^2.2.0", 60 | "random-array-iterator": "^1.0.0", 61 | "safety-catch": "^1.0.1", 62 | "sodium-universal": "^5.0.1", 63 | "streamx": "^2.12.4", 64 | "unslab": "^1.3.0", 65 | "z32": "^1.0.0" 66 | }, 67 | "devDependencies": { 68 | "brittle": "^3.0.0", 69 | "debugging-stream": "^3.1.0", 70 | "hyperswarm": "^4.3.6", 71 | "rache": "^1.0.0", 72 | "range-parser": "^1.2.1", 73 | "speedometer": "^1.1.0", 74 | "standard": "^17.0.0", 75 | "test-tmp": "^1.0.2", 76 | "tiny-byte-size": "^1.1.0", 77 | "udx-native": "^1.6.1", 78 | "uncaughts": "^1.1.0" 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /test/all.js: -------------------------------------------------------------------------------- 1 | // This runner is auto-generated by Brittle 2 | 3 | runTests() 4 | 5 | async function runTests () { 6 | const test = (await import('brittle')).default 7 | 8 | test.pause() 9 | 10 | await import('./basic.js') // todo: implement storageInfo API 11 | await import('./batch.js') 12 | await import('./bitfield.js') 13 | await import('./clear.js') // todo: replace Info.bytesUsed API 14 | // await import('./compat.js') // todo: how to test compat? 15 | await import('./conflicts.js') 16 | await import('./core.js') 17 | await import('./encodings.js') 18 | await import('./encryption.js') 19 | await import('./extension.js') 20 | await import('./manifest.js') 21 | await import('./merkle-tree.js') 22 | await import('./move-to.js') 23 | await import('./mutex.js') 24 | await import('./preload.js') 25 | // await import('./purge.js') // todo: implement purge 26 | await import('./remote-bitfield.js') 27 | await import('./remote-length.js') 28 | await import('./replicate.js') 29 | await import('./sessions.js') 30 | await import('./snapshots.js') 31 | await import('./streams.js') 32 | await import('./timeouts.js') 33 | await import('./user-data.js') 34 | await import('./atomic.js') 35 | 36 | test.resume() 37 | } 38 | -------------------------------------------------------------------------------- /test/atomic.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | 4 | const Hypercore = require('../') 5 | const { create, createStorage } = require('./helpers') 6 | 7 | test('atomic - session', async function (t) { 8 | const core = await create(t) 9 | 10 | await core.append('hello') 11 | await core.append('world') 12 | 13 | const atom = core.state.storage.createAtom() 14 | 15 | const atomic = core.session({ atom }) 16 | 17 | await atomic.append('edits!') 18 | 19 | t.alike(await atomic.get(0), b4a.from('hello')) 20 | t.alike(await atomic.get(1), b4a.from('world')) 21 | t.alike(await atomic.get(2), b4a.from('edits!')) 22 | t.alike(await atomic.seek(11), [2, 1]) 23 | t.alike(atomic.byteLength, 16) 24 | t.alike(atomic.length, 3) 25 | 26 | await atomic.close() 27 | 28 | // nothing changed as it was atomic session 29 | t.alike(core.byteLength, 10) 30 | t.alike(core.length, 2) 31 | 32 | await core.close() 33 | }) 34 | 35 | test('atomic - checkout session', async function (t) { 36 | const core = await create(t) 37 | 38 | await core.append('hello') 39 | await core.append('world') 40 | 41 | let truncates = 0 42 | let appends = 0 43 | 44 | core.on('append', () => appends++) 45 | core.on('truncate', () => truncates++) 46 | 47 | const atom = core.state.storage.createAtom() 48 | 49 | const atomic = core.session({ atom, checkout: 1 }) 50 | await atomic.ready() 51 | 52 | await atomic.append('edits!') 53 | 54 | t.alike(await atomic.get(0), b4a.from('hello')) 55 | t.alike(await atomic.get(1), b4a.from('edits!')) 56 | t.alike(await atomic.seek(11), [2, 0]) 57 | t.alike(atomic.byteLength, 11) 58 | t.alike(atomic.length, 2) 59 | 60 | // nothing changed as it was atomic session 61 | t.alike(core.byteLength, 10) 62 | t.alike(core.length, 2) 63 | 64 | t.is(appends, 0) 65 | t.is(truncates, 0) 66 | 67 | await atom.flush() 68 | 69 | t.alike(core.byteLength, 11) 70 | t.alike(core.length, 2) 71 | 72 | t.is(appends, 1) 73 | t.is(truncates, 1) 74 | 75 | await atomic.close() 76 | await core.close() 77 | }) 78 | 79 | test('atomic - append', async function (t) { 80 | const core = await create(t) 81 | 82 | await core.append('hello') 83 | await core.append('world') 84 | 85 | const atom = core.state.storage.createAtom() 86 | 87 | const atomic = core.session({ atom }) 88 | 89 | await atomic.append('edits!') 90 | 91 | t.alike(atomic.byteLength, 16) 92 | t.alike(atomic.length, 3) 93 | 94 | t.alike(core.byteLength, 10) 95 | t.alike(core.length, 2) 96 | 97 | await atom.flush() 98 | 99 | t.alike(core.byteLength, 16) 100 | t.alike(core.length, 3) 101 | 102 | await atomic.close() 103 | await core.close() 104 | }) 105 | 106 | test('atomic - multiple flushes', async function (t) { 107 | const core = await create(t) 108 | 109 | await core.append('hello') 110 | await core.append('world') 111 | 112 | const atom = core.state.storage.createAtom() 113 | 114 | const atomic = core.session({ atom }) 115 | 116 | await atomic.append('edits!') 117 | 118 | t.alike(atomic.byteLength, 16) 119 | t.alike(atomic.length, 3) 120 | 121 | t.alike(core.byteLength, 10) 122 | t.alike(core.length, 2) 123 | 124 | await atom.flush() 125 | 126 | t.alike(core.byteLength, 16) 127 | t.alike(core.length, 3) 128 | 129 | await atomic.append('more') 130 | 131 | t.alike(atomic.byteLength, 20) 132 | t.alike(atomic.length, 4) 133 | 134 | t.alike(core.byteLength, 16) 135 | t.alike(core.length, 3) 136 | 137 | await atom.flush() 138 | 139 | t.alike(core.byteLength, 20) 140 | t.alike(core.length, 4) 141 | 142 | await atomic.close() 143 | await core.close() 144 | }) 145 | 146 | test('atomic - across cores', async function (t) { 147 | const core = await create(t) 148 | const core2 = await create(t) 149 | 150 | let appends = 0 151 | 152 | t.is(core.length, 0) 153 | t.is(core.writable, true) 154 | t.is(core.readable, true) 155 | 156 | core.on('append', function () { 157 | appends++ 158 | }) 159 | 160 | const atom = core.state.storage.createAtom() 161 | 162 | const a1 = core.session({ atom }) 163 | const a2 = core2.session({ atom }) 164 | 165 | await a1.append('1.1') 166 | await a1.append('1.2') 167 | await a2.append('2.2') 168 | 169 | t.is(a1.length, 2) 170 | t.is(a2.length, 1) 171 | 172 | t.is(core.length, 0) 173 | t.is(core2.length, 0) 174 | 175 | t.is(core.core.bitfield.get(0), false) 176 | t.is(core2.core.bitfield.get(0), false) 177 | 178 | t.is(appends, 0) 179 | 180 | await atom.flush() 181 | 182 | t.is(core.length, 2) 183 | t.is(core2.length, 1) 184 | 185 | t.is(core.core.bitfield.get(0), true) 186 | t.is(core2.core.bitfield.get(0), true) 187 | 188 | t.is(appends, 1) 189 | 190 | await a1.close() 191 | await a2.close() 192 | 193 | await core.close() 194 | await core2.close() 195 | }) 196 | 197 | test('atomic - overwrite', async function (t) { 198 | const core = await create(t) 199 | const core2 = await create(t) 200 | 201 | await core.append('hello') 202 | await core.append('world') 203 | 204 | await core2.append('hello') 205 | 206 | t.is(core.length, 2) 207 | t.is(core2.length, 1) 208 | 209 | const draft = core.session({ name: 'writer' }) 210 | const draft2 = core2.session({ name: 'writer' }) 211 | 212 | await draft.append('all the way') 213 | 214 | await draft2.append('back') 215 | await draft2.append('to the') 216 | await draft2.append('beginning') 217 | 218 | const atom = core.state.storage.createAtom() 219 | 220 | const a1 = core.session({ atom }) 221 | const a2 = core2.session({ atom }) 222 | 223 | await a1.commit(draft, { treeLength: core.length }) 224 | await a2.commit(draft2, { treeLength: core2.length }) 225 | 226 | t.is(a1.length, 3) 227 | t.is(a2.length, 4) 228 | 229 | t.is(core.length, 2) 230 | t.is(core2.length, 1) 231 | 232 | await atom.flush() 233 | 234 | t.is(core.length, 3) 235 | t.is(core2.length, 4) 236 | 237 | await draft.close() 238 | await draft2.close() 239 | 240 | await a1.close() 241 | await a2.close() 242 | 243 | await core.close() 244 | await core2.close() 245 | }) 246 | 247 | test('atomic - user data', async function (t) { 248 | const core = await create(t) 249 | 250 | await core.setUserData('hello', 'world') 251 | 252 | t.alike(await core.getUserData('hello'), b4a.from('world')) 253 | 254 | const atom = core.state.storage.createAtom() 255 | 256 | const atomic = core.session({ atom }) 257 | await atomic.setUserData('hello', 'done') 258 | 259 | t.alike(await atomic.getUserData('hello'), b4a.from('done')) 260 | t.alike(await core.getUserData('hello'), b4a.from('world')) 261 | 262 | await atom.flush() 263 | 264 | t.alike(await core.getUserData('hello'), b4a.from('done')) 265 | 266 | await atomic.close() 267 | await core.close() 268 | }) 269 | 270 | test('atomic - append and user data', async function (t) { 271 | const core = await create(t) 272 | 273 | await core.setUserData('hello', 'world') 274 | 275 | t.is(core.length, 0) 276 | t.alike(await core.getUserData('hello'), b4a.from('world')) 277 | 278 | const atom = core.state.storage.createAtom() 279 | 280 | const atomic = core.session({ atom }) 281 | 282 | await atomic.setUserData('hello', 'done') 283 | await atomic.append('append') 284 | 285 | t.alike(await core.getUserData('hello'), b4a.from('world')) 286 | t.alike(await atomic.getUserData('hello'), b4a.from('done')) 287 | 288 | t.is(core.length, 0) 289 | t.is(atomic.length, 1) 290 | 291 | await atom.flush() 292 | 293 | t.is(core.length, 1) 294 | t.alike(await core.getUserData('hello'), b4a.from('done')) 295 | 296 | await atomic.close() 297 | await core.close() 298 | }) 299 | 300 | test('atomic - overwrite and user data', async function (t) { 301 | const storage = await createStorage(t) 302 | 303 | const core = new Hypercore(storage) 304 | const core2 = new Hypercore(storage) 305 | 306 | await core.ready() 307 | await core2.ready() 308 | 309 | await core.append('hello') 310 | await core.append('world') 311 | 312 | await core2.append('hello') 313 | 314 | t.is(core.length, 2) 315 | t.is(core2.length, 1) 316 | t.alike(await core.getUserData('hello'), null) 317 | t.alike(await core.getUserData('goodbye'), null) 318 | 319 | const draft = core.session({ name: 'writer' }) 320 | const draft2 = core2.session({ name: 'writer' }) 321 | 322 | await draft.append('all the way') 323 | 324 | await draft2.append('back') 325 | await draft2.append('to the') 326 | await draft2.append('beginning') 327 | 328 | const atom = core.state.storage.createAtom() 329 | 330 | const a1 = core.session({ atom }) 331 | const a2 = core2.session({ atom }) 332 | 333 | await a1.commit(draft, { treeLength: core.length, atom }) 334 | await a2.commit(draft2, { treeLength: core2.length, atom }) 335 | 336 | await a1.setUserData('hello', 'world', { atom }) 337 | await a2.setUserData('goodbye', 'everybody', { atom }) 338 | 339 | t.is(core.length, 2) 340 | t.is(core2.length, 1) 341 | 342 | t.is(a1.length, 3) 343 | t.is(a2.length, 4) 344 | 345 | t.alike(await core.getUserData('hello'), null) 346 | t.alike(await core.getUserData('goodbye'), null) 347 | 348 | t.alike(await a1.getUserData('hello'), b4a.from('world')) 349 | t.alike(await a2.getUserData('goodbye'), b4a.from('everybody')) 350 | 351 | await atom.flush() 352 | 353 | t.is(core.length, 3) 354 | t.is(core2.length, 4) 355 | 356 | t.alike(await core.getUserData('hello'), b4a.from('world')) 357 | t.alike(await core2.getUserData('goodbye'), b4a.from('everybody')) 358 | 359 | await a1.close() 360 | await a2.close() 361 | 362 | await draft.close() 363 | await draft2.close() 364 | 365 | await core.close() 366 | await core2.close() 367 | }) 368 | 369 | test('atomic - truncate', async function (t) { 370 | const core = await create(t) 371 | 372 | await core.append('hello') 373 | await core.append('world') 374 | 375 | const atom = core.state.storage.createAtom() 376 | 377 | const atomic = core.session({ atom }) 378 | 379 | await atomic.truncate(1) 380 | 381 | t.alike(core.byteLength, 10) 382 | t.alike(core.length, 2) 383 | 384 | t.alike(atomic.byteLength, 5) 385 | t.alike(atomic.length, 1) 386 | 387 | t.alike(await atomic.get(0), b4a.from('hello')) 388 | t.alike(await atomic.get(1, { wait: false }), null) 389 | t.alike(await atomic.seek(6, { wait: false }), null) 390 | 391 | await atom.flush() 392 | 393 | t.alike(core.byteLength, 5) 394 | t.alike(core.length, 1) 395 | 396 | await atomic.close() 397 | await core.close() 398 | }) 399 | 400 | // not supported yet 401 | test.skip('draft truncate then append', async function (t) { 402 | const core = await create(t) 403 | 404 | await core.append('hello') 405 | await core.append('world') 406 | 407 | const atom = core.state.storage.createAtom() 408 | 409 | const atomic = core.session({ atom }) 410 | 411 | await atomic.truncate(1) 412 | await atomic.append('other') 413 | await atomic.append('data') 414 | 415 | t.alike(core.byteLength, 10) 416 | t.alike(core.length, 2) 417 | t.alike(await core.get(2, { wait: false }), null) 418 | 419 | t.alike(atomic.byteLength, 14) 420 | t.alike(atomic.length, 3) 421 | 422 | t.alike(await atomic.get(0), b4a.from('hello')) 423 | t.alike(await atomic.get(1), b4a.from('other')) 424 | t.alike(await atomic.get(2), b4a.from('data')) 425 | t.alike(await atomic.seek(11), [2, 1]) 426 | 427 | await atom.flush() 428 | 429 | // nothing changed as it was a draft 430 | t.alike(core.byteLength, 14) 431 | t.alike(core.length, 3) 432 | t.alike(await core.get(2), b4a.from('data')) 433 | 434 | await atomic.close() 435 | await core.close() 436 | }) 437 | -------------------------------------------------------------------------------- /test/bench/networking.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const speedometer = require('speedometer') 3 | const byteSize = require('tiny-byte-size') 4 | const b4a = require('b4a') 5 | const { create } = require('../helpers') 6 | const { makeStreamPair } = require('../helpers/networking.js') 7 | 8 | async function setup (t, opts = {}) { 9 | t.timeout(60 * 1000) 10 | 11 | const a = await create() 12 | const b = await create(a.key) 13 | 14 | await a.append(new Array(opts.append).fill().map(() => b4a.alloc(16).fill('a'))) 15 | 16 | // Note: stream.rtt will be around double this latency value 17 | const [n1, n2] = makeStreamPair(t, { latency: opts.latency }) 18 | a.replicate(n1) 19 | b.replicate(n2) 20 | 21 | const info = track(b) 22 | let started = Date.now() 23 | 24 | t.comment('Starting to download') 25 | b.on('download', onchange) 26 | b.on('upload', onchange) 27 | await b.download({ start: 0, end: a.length }).done() 28 | 29 | return [a, b] 30 | 31 | function onchange () { 32 | if (b.replicator.peers.length !== 1) throw new Error('Different number of peers') 33 | 34 | if (Date.now() - started < 500) return 35 | started = Date.now() 36 | 37 | const peer = b.replicator.peers[0] 38 | const rtt = peer.stream.rawStream.rtt 39 | t.comment('Blocks', Math.ceil(info.blocks.down()), '(' + byteSize(info.network.down()) + ' bytes)', 'RTT', rtt, 'Max inflight', peer.getMaxInflight()) 40 | } 41 | } 42 | 43 | test('replication speed - localhost', async function (t) { 44 | await setup(t, { append: 5000, latency: [0, 0] }) 45 | }) 46 | 47 | test('replication speed - nearby', async function (t) { 48 | await setup(t, { append: 5000, latency: [25, 25] }) 49 | }) 50 | 51 | test('replication speed - different country', async function (t) { 52 | await setup(t, { append: 5000, latency: [75, 75] }) 53 | }) 54 | 55 | test('replication speed - far away', async function (t) { 56 | await setup(t, { append: 5000, latency: [250, 250] }) 57 | }) 58 | 59 | test('replication speed - orbit', async function (t) { 60 | await setup(t, { append: 5000, latency: [500, 500] }) 61 | }) 62 | 63 | function track (core) { 64 | const info = { 65 | blocks: { down: speedometer(), up: speedometer() }, 66 | network: { down: speedometer(), up: speedometer() } 67 | } 68 | 69 | core.on('download', onspeed.bind(null, 'down', info)) 70 | core.on('upload', onspeed.bind(null, 'up', info)) 71 | 72 | return info 73 | } 74 | 75 | function onspeed (eventName, info, index, byteLength, from) { 76 | const block = info.blocks[eventName] 77 | const network = info.network[eventName] 78 | 79 | const blocks = block(1) 80 | const networks = network(byteLength) 81 | 82 | if (block.max === undefined || blocks > block.max) block.max = blocks 83 | if (network.max === undefined || networks > network.max) network.max = networks 84 | } 85 | -------------------------------------------------------------------------------- /test/bench/open-close.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const tmp = require('test-tmp') 3 | const b4a = require('b4a') 4 | const Hypercore = require('../../index.js') 5 | 6 | test('open and close', async function (t) { 7 | const tmpDir = await tmp(t) 8 | 9 | const core = new Hypercore(tmpDir) 10 | for (let i = 0; i < 100; i++) { 11 | await core.append(b4a.from([0])) 12 | } 13 | await core.close() 14 | 15 | const elapsed = await t.execution(async function () { 16 | for (let i = 0; i < 100; i++) { 17 | const core = new Hypercore(tmpDir) 18 | await core.ready() 19 | await core.close() 20 | } 21 | }) 22 | 23 | t.comment(elapsed) 24 | }) 25 | -------------------------------------------------------------------------------- /test/bench/range-download.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | 4 | const { create, replicate } = require('../helpers') 5 | 6 | test('range download, single block missing', async function (t) { 7 | const a = await create() 8 | const b = await create(a.key) 9 | 10 | const n = 100000 11 | 12 | for (let i = 0; i < n; i++) await a.append(b4a.from([0])) 13 | 14 | replicate(a, b, t) 15 | 16 | await b.download({ start: 0, end: n }).done() 17 | await b.clear(n - 1) 18 | 19 | const elapsed = await t.execution(async function () { 20 | await b.download({ start: 0, end: n }).done() 21 | }) 22 | 23 | t.comment(elapsed) 24 | }) 25 | 26 | test('range download, repeated', async function (t) { 27 | const a = await create() 28 | const b = await create(a.key) 29 | 30 | const n = 100000 31 | 32 | for (let i = 0; i < n; i++) await a.append(b4a.from([0])) 33 | 34 | replicate(a, b, t) 35 | 36 | await b.download({ start: 0, end: n }).done() 37 | 38 | const elapsed = await t.execution(async function () { 39 | for (let i = 0; i < 1000; i++) { 40 | await b.download({ start: 0, end: n }).done() 41 | } 42 | }) 43 | 44 | t.comment(elapsed) 45 | }) 46 | -------------------------------------------------------------------------------- /test/bench/speedtest.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const Hypercore = require('../../index.js') 3 | const RAM = require('random-access-memory') 4 | const { replicate } = require('../helpers') 5 | 6 | test('speedtest replication with many peers', { timeout: 120000 }, async function (t) { 7 | const core = new Hypercore(RAM) 8 | await core.ready() 9 | 10 | const clone1 = new Hypercore(RAM, core.key) 11 | const clone2 = new Hypercore(RAM, core.key) 12 | const clone3 = new Hypercore(RAM, core.key) 13 | 14 | for (let i = 0; i < 100000; i++) { 15 | await core.append('#' + i) 16 | if (i % 10000 === 0) t.comment('Append ' + i) 17 | } 18 | 19 | t.comment('Writer complete') 20 | 21 | replicate(core, clone1, t) 22 | replicate(core, clone2, t) 23 | replicate(core, clone3, t) 24 | replicate(clone1, clone2, t) 25 | replicate(clone1, clone3, t) 26 | 27 | const started = Date.now() 28 | let count = 0 29 | 30 | clone1.on('download', ondownload) 31 | clone2.on('download', ondownload) 32 | 33 | function ondownload () { 34 | if (++count % 10000 === 0) t.comment('Downloaded ' + count + ' blocks after ' + (Date.now() - started) + ' ms') 35 | } 36 | 37 | const d1 = clone1.download({ start: 0, end: core.length }) 38 | const d2 = clone2.download({ start: 0, end: core.length }) 39 | 40 | await d1.done() 41 | await d2.done() 42 | 43 | t.comment('Done in ' + (Date.now() - started) + ' ms') 44 | }) 45 | -------------------------------------------------------------------------------- /test/bench/throughput.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const tmp = require('test-tmp') 3 | const b4a = require('b4a') 4 | const { create, replicate } = require('../helpers') 5 | const Hypercore = require('../../index.js') 6 | 7 | test('throughput from disk', async function (t) { 8 | const dir = await tmp(t) 9 | 10 | const a = new Hypercore(dir) 11 | await a.append(new Array(20000).fill().map(() => b4a.alloc(1))) 12 | 13 | const b = await create(a.key) 14 | replicate(a, b, t) 15 | 16 | const elapsed = await t.execution(async function () { 17 | await b.download({ start: 0, end: a.length }).done() 18 | }) 19 | 20 | t.comment(elapsed) 21 | 22 | await a.close() 23 | await b.close() 24 | }) 25 | -------------------------------------------------------------------------------- /test/bit-interlude.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const BitInterlude = require('../lib/bit-interlude') 3 | 4 | const bitfield = (val = false) => ({ get () { return val } }) 5 | 6 | test('bit-interlude - basic', t => { 7 | const bits = new BitInterlude(bitfield()) 8 | 9 | bits.setRange(0, 5, true) 10 | bits.setRange(10, 15, true) 11 | bits.setRange(16, 20, true) 12 | 13 | t.is(bits.get(3), true) 14 | t.is(bits.get(7), false) 15 | t.is(bits.get(10), true) 16 | t.is(bits.get(15), false) 17 | t.is(bits.get(18), true) 18 | 19 | t.is(bits.contiguousLength(0), 5) 20 | t.is(bits.contiguousLength(10), 15) 21 | t.is(bits.contiguousLength(16), 20) 22 | }) 23 | 24 | test('bit-interlude - drop', t => { 25 | const bits = new BitInterlude(bitfield(true)) 26 | 27 | bits.setRange(15, 20, false) 28 | 29 | t.is(bits.get(7), true) 30 | t.is(bits.get(15), false) 31 | t.is(bits.get(18), false) 32 | 33 | t.is(bits.contiguousLength(0), 15) 34 | t.is(bits.contiguousLength(16), 15) 35 | }) 36 | 37 | test('bit-interlude - drop multiple', t => { 38 | const bits = new BitInterlude(bitfield(true)) 39 | 40 | bits.setRange(0, 10, false) 41 | bits.setRange(15, 20, false) 42 | 43 | t.is(bits.get(7), false) 44 | t.is(bits.get(12), true) 45 | t.is(bits.get(15), false) 46 | t.is(bits.get(18), false) 47 | 48 | t.is(bits.contiguousLength(8), 0) 49 | t.is(bits.contiguousLength(12), 0) 50 | t.is(bits.contiguousLength(16), 0) 51 | }) 52 | 53 | test('bit-interlude - set & drop', t => { 54 | const bits = new BitInterlude(bitfield()) 55 | 56 | bits.setRange(0, 10, true) 57 | bits.setRange(7, 12, false) 58 | bits.setRange(15, 20, true) 59 | bits.setRange(2, 3, false) 60 | 61 | t.is(bits.get(0), true) 62 | t.is(bits.get(2), false) 63 | t.is(bits.get(3), true) 64 | t.is(bits.get(7), false) 65 | t.is(bits.get(12), false) 66 | t.is(bits.get(15), true) 67 | t.is(bits.get(18), true) 68 | 69 | t.is(bits.contiguousLength(8), 2) 70 | t.is(bits.contiguousLength(12), 2) 71 | t.is(bits.contiguousLength(16), 2) 72 | }) 73 | -------------------------------------------------------------------------------- /test/bitfield.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const createTempDir = require('test-tmp') 4 | const CoreStorage = require('hypercore-storage') 5 | const Bitfield = require('../lib/bitfield') 6 | const BitInterlude = require('../lib/bit-interlude') 7 | 8 | test('bitfield - set and get', async function (t) { 9 | const storage = await createStorage(t) 10 | const b = await Bitfield.open(storage, 0) 11 | 12 | t.absent(b.get(42)) 13 | b.set(42, true) 14 | t.ok(b.get(42)) 15 | 16 | // bigger offsets 17 | t.absent(b.get(42000000)) 18 | b.set(42000000, true) 19 | t.ok(b.get(42000000, true)) 20 | b.set(42000000, false) 21 | t.absent(b.get(42000000, true)) 22 | }) 23 | 24 | test('bitfield - random set and gets', async function (t) { 25 | const b = await Bitfield.open(await createStorage(t), 0) 26 | const set = new Set() 27 | 28 | for (let i = 0; i < 200; i++) { 29 | const idx = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER) 30 | b.set(idx, true) 31 | set.add(idx) 32 | } 33 | 34 | for (let i = 0; i < 500; i++) { 35 | const idx = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER) 36 | const expected = set.has(idx) 37 | const val = b.get(idx, true) 38 | if (val !== expected) { 39 | t.fail('expected ' + expected + ' but got ' + val + ' at ' + idx) 40 | return 41 | } 42 | } 43 | 44 | for (const idx of set) { 45 | const val = b.get(idx, true) 46 | if (val !== true) { 47 | t.fail('expected true but got ' + val + ' at ' + idx) 48 | return 49 | } 50 | } 51 | 52 | t.pass('all random set and gets pass') 53 | }) 54 | 55 | test('bitfield - reload', async function (t) { 56 | const dir = await createTempDir(t) 57 | 58 | { 59 | const storage = await createStorage(t, dir) 60 | const bitfield = await Bitfield.open(storage, 0) 61 | const b = new BitInterlude() 62 | b.setRange(142, 143, true) 63 | b.setRange(40000, 40001, true) 64 | b.setRange(1424242424, 1424242425, true) 65 | await flush(storage, b, bitfield) 66 | 67 | // fully close db 68 | await storage.db.close({ force: true }) 69 | } 70 | 71 | { 72 | const b = await Bitfield.open(await createStorage(t, dir), 1424242425) 73 | t.ok(b.get(142)) 74 | t.ok(b.get(40000)) 75 | t.ok(b.get(1424242424)) 76 | } 77 | }) 78 | 79 | test('bitfield - want', async function (t) { 80 | // This test will likely break when bitfields are optimised to not actually 81 | // store pages of all set or unset bits. 82 | 83 | const b = new Bitfield(b4a.alloc(1024 * 512) /* 512 KiB */) 84 | 85 | t.alike([...b.want(0, 0)], []) 86 | 87 | t.alike([...b.want(0, 1)], [ 88 | { 89 | start: 0, 90 | bitfield: new Uint32Array(1024 /* 4 KiB */) 91 | } 92 | ]) 93 | 94 | t.alike([...b.want(0, 1024 * 4 * 8 /* 4 KiB */)], [ 95 | { 96 | start: 0, 97 | bitfield: new Uint32Array(1024 /* 4 KiB */) 98 | } 99 | ]) 100 | 101 | t.alike([...b.want(0, 1024 * 13 * 8 /* 13 KiB */)], [ 102 | { 103 | start: 0, 104 | bitfield: new Uint32Array(1024 * 16 / 4 /* 16 KiB */) 105 | } 106 | ]) 107 | 108 | t.alike([...b.want(0, 1024 * 260 * 8 /* 260 KiB */)], [ 109 | { 110 | start: 0, 111 | bitfield: new Uint32Array(1024 * 256 / 4 /* 256 KiB */) 112 | }, 113 | { 114 | start: 2 ** 18 * 8, 115 | bitfield: new Uint32Array(1024 /* 4 KiB */) 116 | } 117 | ]) 118 | }) 119 | 120 | test('bitfield - sparse array overflow', async function (t) { 121 | const b = await Bitfield.open(await createStorage(t), 0) 122 | 123 | // Previously bugged due to missing bounds check in sparse array 124 | b.set(7995511118690925, true) 125 | }) 126 | 127 | test('bitfield - count', async function (t) { 128 | const s = await createStorage(t) 129 | const b = await Bitfield.open(s, 0) 130 | 131 | for (const [start, end] of [[0, 2], [5, 6], [7, 9], [13, 14], [16, 19], [20, 25]]) { 132 | b.setRange(start, end, true) 133 | } 134 | 135 | t.is(b.count(3, 18, true), 8) 136 | t.is(b.count(3, 18, false), 10) 137 | }) 138 | 139 | test('bitfield - find first, all zeroes', async function (t) { 140 | const b = await Bitfield.open(await createStorage(t), 0) 141 | 142 | t.is(b.findFirst(false, 0), 0) 143 | t.is(b.findFirst(true, 0), -1) 144 | 145 | t.comment('Page boundaries') 146 | t.is(b.findFirst(false, 2 ** 15), 2 ** 15) 147 | t.is(b.findFirst(false, 2 ** 15 - 1), 2 ** 15 - 1) 148 | t.is(b.findFirst(false, 2 ** 15 + 1), 2 ** 15 + 1) 149 | t.is(b.findFirst(false, 2 ** 16), 2 ** 16) 150 | t.is(b.findFirst(false, 2 ** 16 - 1), 2 ** 16 - 1) 151 | t.is(b.findFirst(false, 2 ** 16 + 1), 2 ** 16 + 1) 152 | 153 | t.comment('Segment boundaries') 154 | t.is(b.findFirst(false, 2 ** 21), 2 ** 21) 155 | t.is(b.findFirst(false, 2 ** 21 - 1), 2 ** 21 - 1) 156 | t.is(b.findFirst(false, 2 ** 21 + 1), 2 ** 21 + 1) 157 | t.is(b.findFirst(false, 2 ** 22), 2 ** 22) 158 | t.is(b.findFirst(false, 2 ** 22 - 1), 2 ** 22 - 1) 159 | t.is(b.findFirst(false, 2 ** 22 + 1), 2 ** 22 + 1) 160 | }) 161 | 162 | test('bitfield - find first, all ones', async function (t) { 163 | const s = await createStorage(t) 164 | const b = await Bitfield.open(s, 0) 165 | 166 | b.setRange(0, 2 ** 24, true) 167 | 168 | t.is(b.findFirst(true, 0), 0) 169 | t.is(b.findFirst(true, 2 ** 24), -1) 170 | t.is(b.findFirst(false, 0), 2 ** 24) 171 | t.is(b.findFirst(false, 2 ** 24), 2 ** 24) 172 | 173 | t.comment('Page boundaries') 174 | t.is(b.findFirst(true, 2 ** 15), 2 ** 15) 175 | t.is(b.findFirst(true, 2 ** 15 - 1), 2 ** 15 - 1) 176 | t.is(b.findFirst(true, 2 ** 15 + 1), 2 ** 15 + 1) 177 | t.is(b.findFirst(true, 2 ** 16), 2 ** 16) 178 | t.is(b.findFirst(true, 2 ** 16 - 1), 2 ** 16 - 1) 179 | t.is(b.findFirst(true, 2 ** 16 + 1), 2 ** 16 + 1) 180 | 181 | t.comment('Segment boundaries') 182 | t.is(b.findFirst(true, 2 ** 21), 2 ** 21) 183 | t.is(b.findFirst(true, 2 ** 21 - 1), 2 ** 21 - 1) 184 | t.is(b.findFirst(true, 2 ** 21 + 1), 2 ** 21 + 1) 185 | t.is(b.findFirst(true, 2 ** 22), 2 ** 22) 186 | t.is(b.findFirst(true, 2 ** 22 - 1), 2 ** 22 - 1) 187 | t.is(b.findFirst(true, 2 ** 22 + 1), 2 ** 22 + 1) 188 | }) 189 | 190 | test('bitfield - find last, all zeroes', async function (t) { 191 | const b = await Bitfield.open(await createStorage(t), 0) 192 | 193 | t.is(b.findLast(false, 0), 0) 194 | t.is(b.findLast(true, 0), -1) 195 | 196 | t.comment('Page boundaries') 197 | t.is(b.findLast(false, 2 ** 15), 2 ** 15) 198 | t.is(b.findLast(false, 2 ** 15 - 1), 2 ** 15 - 1) 199 | t.is(b.findLast(false, 2 ** 15 + 1), 2 ** 15 + 1) 200 | t.is(b.findLast(false, 2 ** 16), 2 ** 16) 201 | t.is(b.findLast(false, 2 ** 16 - 1), 2 ** 16 - 1) 202 | t.is(b.findLast(false, 2 ** 16 + 1), 2 ** 16 + 1) 203 | 204 | t.comment('Segment boundaries') 205 | t.is(b.findLast(false, 2 ** 21), 2 ** 21) 206 | t.is(b.findLast(false, 2 ** 21 - 1), 2 ** 21 - 1) 207 | t.is(b.findLast(false, 2 ** 21 + 1), 2 ** 21 + 1) 208 | t.is(b.findLast(false, 2 ** 22), 2 ** 22) 209 | t.is(b.findLast(false, 2 ** 22 - 1), 2 ** 22 - 1) 210 | t.is(b.findLast(false, 2 ** 22 + 1), 2 ** 22 + 1) 211 | }) 212 | 213 | test('bitfield - find last, all ones', async function (t) { 214 | const s = await createStorage(t) 215 | const b = await Bitfield.open(s, 0) 216 | 217 | b.setRange(0, 2 ** 24, true) 218 | 219 | t.is(b.findLast(false, 0), -1) 220 | t.is(b.findLast(false, 2 ** 24), 2 ** 24) 221 | t.is(b.findLast(true, 0), 0) 222 | t.is(b.findLast(true, 2 ** 24), 2 ** 24 - 1) 223 | 224 | t.comment('Page boundaries') 225 | t.is(b.findLast(true, 2 ** 15), 2 ** 15) 226 | t.is(b.findLast(true, 2 ** 15 - 1), 2 ** 15 - 1) 227 | t.is(b.findLast(true, 2 ** 15 + 1), 2 ** 15 + 1) 228 | t.is(b.findLast(true, 2 ** 16), 2 ** 16) 229 | t.is(b.findLast(true, 2 ** 16 - 1), 2 ** 16 - 1) 230 | t.is(b.findLast(true, 2 ** 16 + 1), 2 ** 16 + 1) 231 | 232 | t.comment('Segment boundaries') 233 | t.is(b.findLast(true, 2 ** 21), 2 ** 21) 234 | t.is(b.findLast(true, 2 ** 21 - 1), 2 ** 21 - 1) 235 | t.is(b.findLast(true, 2 ** 21 + 1), 2 ** 21 + 1) 236 | t.is(b.findLast(true, 2 ** 22), 2 ** 22) 237 | t.is(b.findLast(true, 2 ** 22 - 1), 2 ** 22 - 1) 238 | t.is(b.findLast(true, 2 ** 22 + 1), 2 ** 22 + 1) 239 | }) 240 | 241 | test('bitfield - find last, ones around page boundary', async function (t) { 242 | const s = await createStorage(t) 243 | const b = await Bitfield.open(s, 0) 244 | 245 | b.set(32767, true) 246 | b.set(32768, true) 247 | 248 | t.is(b.lastUnset(32768), 32766) 249 | t.is(b.lastUnset(32769), 32769) 250 | }) 251 | 252 | test('bitfield - set range on page boundary', async function (t) { 253 | const s = await createStorage(t) 254 | const b = await Bitfield.open(s, 0) 255 | 256 | b.setRange(2032, 2058, true) 257 | 258 | t.is(b.findFirst(true, 2048), 2048) 259 | }) 260 | 261 | test('set last bits in segment and findFirst', async function (t) { 262 | const s = await createStorage(t) 263 | const b = await Bitfield.open(s, 0) 264 | 265 | b.set(2097150, true) 266 | 267 | t.is(b.findFirst(false, 2097150), 2097151) 268 | 269 | b.set(2097151, true) 270 | 271 | t.is(b.findFirst(false, 2097150), 2097152) 272 | t.is(b.findFirst(false, 2097151), 2097152) 273 | }) 274 | 275 | test('bitfield - setRange over multiple pages', async function (t) { 276 | const storage = await createStorage(t) 277 | const b = await Bitfield.open(storage, 0) 278 | 279 | b.setRange(32768, 32769, true) 280 | 281 | t.is(b.get(0), false) 282 | t.is(b.get(32768), true) 283 | t.is(b.get(32769), false) 284 | 285 | b.setRange(0, 32768 * 2, false) 286 | b.setRange(32768, 32768 * 2 + 1, true) 287 | 288 | t.is(b.get(0), false) 289 | t.is(b.get(32768), true) 290 | t.is(b.get(32768 * 2), true) 291 | t.is(b.get(32768 * 2 + 1), false) 292 | }) 293 | 294 | async function createStorage (t, dir) { 295 | if (!dir) dir = await createTempDir(t) 296 | 297 | const db = new CoreStorage(dir) 298 | 299 | t.teardown(() => db.close()) 300 | 301 | const dkey = b4a.alloc(32) 302 | 303 | return (await db.resume(dkey)) || (await db.create({ key: dkey, discoveryKey: dkey })) 304 | } 305 | 306 | async function flush (s, b, bitfield) { 307 | const tx = s.write() 308 | b.flush(tx, bitfield) 309 | await tx.flush() 310 | } 311 | -------------------------------------------------------------------------------- /test/clear.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const tmp = require('test-tmp') 3 | const b4a = require('b4a') 4 | const CoreStorage = require('hypercore-storage') 5 | const { create, createStorage, replicate, eventFlush } = require('./helpers') 6 | 7 | const Hypercore = require('../') 8 | 9 | test('clear', async function (t) { 10 | const a = await create(t) 11 | await a.append(['a', 'b', 'c']) 12 | 13 | t.is(a.contiguousLength, 3) 14 | 15 | await a.clear(1) 16 | 17 | t.is(a.contiguousLength, 1, 'contig updated') 18 | 19 | t.ok(await a.has(0), 'has 0') 20 | t.absent(await a.has(1), 'has not 1') 21 | t.ok(await a.has(2), 'has 2') 22 | 23 | await a.close() 24 | }) 25 | 26 | test('clear + replication', async function (t) { 27 | const a = await create(t) 28 | const b = await create(t, a.key) 29 | 30 | replicate(a, b, t) 31 | 32 | await a.append(['a', 'b', 'c']) 33 | await b.download({ start: 0, end: 3 }).done() 34 | 35 | await a.clear(1) 36 | 37 | t.absent(await a.has(1), 'a cleared') 38 | t.ok(await b.has(1), 'b not cleared') 39 | 40 | t.alike(await a.get(1), b4a.from('b'), 'a downloaded from b') 41 | 42 | await a.close() 43 | await b.close() 44 | }) 45 | 46 | test('clear + replication, gossip', async function (t) { 47 | const a = await create(t) 48 | const b = await create(t, a.key) 49 | const c = await create(t, a.key) 50 | 51 | replicate(a, b, t) 52 | replicate(b, c, t) 53 | 54 | await a.append(['a', 'b', 'c']) 55 | await b.download({ start: 0, end: 3 }).done() 56 | await c.update() 57 | 58 | await b.clear(1) 59 | 60 | t.ok(await a.has(1), 'a not cleared') 61 | t.absent(await b.has(1), 'b cleared') 62 | 63 | let resolved = false 64 | 65 | const req = c.get(1) 66 | req.then(() => (resolved = true)) 67 | 68 | await eventFlush() 69 | t.absent(resolved, 'c not downloaded') 70 | 71 | t.alike(await b.get(1), b4a.from('b'), 'b downloaded from a') 72 | t.alike(await req, b4a.from('b'), 'c downloaded from b') 73 | }) 74 | 75 | test('incorrect clear', async function (t) { 76 | const core = await create(t) 77 | 78 | const blocks = [] 79 | while (blocks.length < 129) { 80 | blocks.push(b4a.from('tick')) 81 | } 82 | 83 | await core.append(blocks) 84 | await core.clear(127, 128) 85 | 86 | t.absent(await core.has(127)) 87 | t.ok(await core.has(128)) 88 | t.alike(await core.get(128), b4a.from('tick')) 89 | }) 90 | 91 | test('clear blocks with diff option', async function (t) { 92 | const storage = await createStorage(t) 93 | const core = new Hypercore(storage) 94 | await core.append(b4a.alloc(128)) 95 | 96 | const cleared = await core.clear(1337) 97 | t.is(cleared, null) 98 | 99 | // todo: reenable bytes use api 100 | 101 | // const cleared2 = await core.clear(0, { diff: true }) 102 | // t.ok(cleared2.blocks > 0) 103 | 104 | // const cleared3 = await core.clear(0, { diff: true }) 105 | // t.is(cleared3.blocks, 0) 106 | 107 | await core.close() 108 | }) 109 | 110 | test('clear - no side effect from clearing unknown nodes', async function (t) { 111 | const storageWriter = await tmp(t) 112 | const storageReader = await tmp(t) 113 | 114 | const writer1 = new Hypercore(storageWriter) 115 | await writer1.append(['a', 'b', 'c', 'd']) // => 'Error: Could not load node: 1' 116 | 117 | const clone = new Hypercore(storageReader, writer1.key) 118 | await clone.ready() 119 | 120 | // Needs replicate and the three clears for error to happen 121 | replicate(writer1, clone, t) 122 | await clone.clear(0) 123 | await clone.clear(1) 124 | await clone.clear(2) 125 | 126 | await writer1.close() 127 | await clone.close() 128 | 129 | t.pass('did not crash') 130 | }) 131 | 132 | test('clear - large cores', async function (t) { 133 | t.timeout(100000) 134 | const dir = await t.tmp() 135 | 136 | const db = new CoreStorage(dir) 137 | const a = new Hypercore(db) 138 | await a.ready() 139 | t.teardown(() => a.close(), { order: 1 }) 140 | 141 | const blocks = [] 142 | for (let i = 0; i < 300_000; i++) blocks.push(`Block-${i}`) 143 | await a.append(blocks) 144 | 145 | t.is(a.contiguousLength, 300_000, 'sanity check') 146 | { 147 | const storageBlocks = await consumeStream(a.state.storage.createBlockStream()) 148 | t.is(storageBlocks.length, 300_000, 'storage-level sanity check') 149 | } 150 | 151 | await a.clear(100, 1000) 152 | await a.clear(2 ** 16 - 10, 2 ** 16 + 10) // 2 ** 16 is when the bitfield first changes pages, so interesting are to test 153 | await a.clear(290000, 299998) 154 | 155 | t.is(b4a.toString(await a.get(99)), 'Block-99') 156 | t.is(await a.get(100, { wait: false }), null) 157 | t.is(await a.get(999, { wait: false }), null) 158 | t.is(b4a.toString(await a.get(1000)), 'Block-1000') 159 | { 160 | const storageBlocks = await consumeStream(a.state.storage.createBlockStream({ gte: 99, lte: 1000 })) 161 | t.alike(storageBlocks.map(b => b.index), [99, 1000], 'correct state in hypercore storage') 162 | } 163 | 164 | t.is(b4a.toString(await a.get(2 ** 16 - 11)), 'Block-65525') 165 | t.is(await a.get(2 ** 16 - 10, { wait: false }), null) 166 | t.is(await a.get(2 ** 16 + 9, { wait: false }), null) 167 | t.is(b4a.toString(await a.get(2 ** 16 + 10)), 'Block-65546') 168 | { 169 | const storageBlocks = await consumeStream(a.state.storage.createBlockStream({ gte: 2 ** 16 - 11, lte: 2 ** 16 + 10 })) 170 | t.alike(storageBlocks.map(b => b.index), [65525, 65546], 'correct state in hypercore storage') 171 | } 172 | 173 | t.is(b4a.toString(await a.get(290000 - 1)), 'Block-289999') 174 | t.is(await a.get(290000, { wait: false }), null) 175 | t.is(await a.get(299997, { wait: false }), null) 176 | t.is(b4a.toString(await a.get(299998)), 'Block-299998') 177 | { 178 | const storageBlocks = await consumeStream(a.state.storage.createBlockStream({ gte: 289999, lte: 299998 })) 179 | t.alike(storageBlocks.map(b => b.index), [289999, 299998], 'correct state in hypercore storage') 180 | } 181 | }) 182 | 183 | async function consumeStream (rx) { 184 | const res = [] 185 | for await (const b of rx) res.push(b) 186 | return res 187 | } 188 | -------------------------------------------------------------------------------- /test/compat.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const path = require('path') 3 | const RAF = require('random-access-file') 4 | const RAO = require('random-access-memory-overlay') 5 | const b4a = require('b4a') 6 | const Hypercore = require('..') 7 | 8 | const abis = [ 9 | 'v10.0.0-alpha.39', 10 | 'v10.4.1', 11 | 'v10.4.1-partial' 12 | ] 13 | 14 | for (const abi of abis) { 15 | const root = path.join(__dirname, 'fixtures', 'abi', abi) 16 | 17 | test(abi, async function (t) { 18 | const core = new Hypercore((file) => new RAO(new RAF(path.join(root, file)))) 19 | await core.ready() 20 | 21 | t.is(core.length, 1000, 'lengths match') 22 | t.is(core.contiguousLength, 1000, 'contiguous lengths match') 23 | 24 | for (let i = 0; i < 1000; i++) { 25 | const block = await core.get(i) 26 | 27 | if (!b4a.equals(block, b4a.from([i]))) { 28 | return t.fail(`block ${i} diverges`) 29 | } 30 | } 31 | 32 | t.pass('blocks match') 33 | 34 | await core.close() 35 | }) 36 | } 37 | -------------------------------------------------------------------------------- /test/conflicts.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const { create, replicate, unreplicate } = require('./helpers') 3 | 4 | test.skip('one forks', async function (t) { 5 | // NOTE: skipped because this test occasionally (~1/100) flakes 6 | // because one of the 'conflict' events never emits 7 | // due to a lifecycle issue (when closing all sessions 8 | // on a core in reaction to the conflict) 9 | t.plan(3) 10 | 11 | const a = await create(t) 12 | await a.append(['a', 'b', 'c', 'd', 'e']) 13 | 14 | a.core.name = 'a' 15 | 16 | const b = await create(t, a.key) 17 | b.core.name = 'b' 18 | 19 | const c = await create(t, { keyPair: a.core.header.keyPair }) 20 | await c.append(['a', 'b', 'c', 'd', 'f', 'e']) 21 | c.core.name = 'c' 22 | 23 | const streams = replicate(a, b, t) 24 | 25 | // Note: 'conflict' can be emitted more than once (no guarantees on that) 26 | c.once('conflict', function (length) { 27 | t.is(length, 5, 'conflict at 5 seen by c') 28 | }) 29 | 30 | b.once('conflict', function (length) { 31 | t.is(length, 5, 'conflict at 5 seen by b') 32 | }) 33 | 34 | await b.get(2) 35 | 36 | await unreplicate(streams) 37 | 38 | replicate(c, b, t) 39 | 40 | await t.exception(b.get(4)) 41 | }) 42 | -------------------------------------------------------------------------------- /test/core.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const createTempDir = require('test-tmp') 4 | const CoreStorage = require('hypercore-storage') 5 | const { MerkleTree } = require('../lib/merkle-tree') 6 | const Core = require('../lib/core') 7 | 8 | test('core - append', async function (t) { 9 | const { core } = await create(t) 10 | 11 | { 12 | const info = await core.state.append([ 13 | b4a.from('hello'), 14 | b4a.from('world') 15 | ]) 16 | 17 | t.alike(info, { length: 2, byteLength: 10 }) 18 | t.is(core.state.length, 2) 19 | t.is(core.state.byteLength, 10) 20 | t.alike([ 21 | await getBlock(core, 0), 22 | await getBlock(core, 1) 23 | ], [ 24 | b4a.from('hello'), 25 | b4a.from('world') 26 | ]) 27 | } 28 | 29 | { 30 | const info = await core.state.append([ 31 | b4a.from('hej') 32 | ]) 33 | 34 | t.alike(info, { length: 3, byteLength: 13 }) 35 | t.is(core.state.length, 3) 36 | t.is(core.state.byteLength, 13) 37 | t.alike([ 38 | await getBlock(core, 0), 39 | await getBlock(core, 1), 40 | await getBlock(core, 2) 41 | ], [ 42 | b4a.from('hello'), 43 | b4a.from('world'), 44 | b4a.from('hej') 45 | ]) 46 | } 47 | }) 48 | 49 | test('core - append and truncate', async function (t) { 50 | const { core, reopen } = await create(t) 51 | 52 | await core.state.append([ 53 | b4a.from('hello'), 54 | b4a.from('world'), 55 | b4a.from('fo'), 56 | b4a.from('ooo') 57 | ]) 58 | 59 | t.is(core.state.lastTruncation, null) 60 | 61 | await core.state.truncate(3, 1) 62 | 63 | t.is(core.state.lastTruncation.from, 4) 64 | t.is(core.state.lastTruncation.to, 3) 65 | 66 | t.is(core.state.length, 3) 67 | t.is(core.state.byteLength, 12) 68 | t.is(core.state.fork, 1) 69 | 70 | await core.state.append([ 71 | b4a.from('a'), 72 | b4a.from('b'), 73 | b4a.from('c'), 74 | b4a.from('d') 75 | ]) 76 | 77 | await core.state.truncate(3, 2) 78 | 79 | t.is(core.state.lastTruncation.from, 7) 80 | t.is(core.state.lastTruncation.to, 3) 81 | 82 | t.is(core.state.length, 3) 83 | t.is(core.state.byteLength, 12) 84 | t.is(core.state.fork, 2) 85 | 86 | await core.state.truncate(2, 3) 87 | t.is(core.state.lastTruncation.from, 3) 88 | t.is(core.state.lastTruncation.to, 2) 89 | 90 | await core.state.append([b4a.from('a')]) 91 | t.is(core.state.lastTruncation, null) 92 | 93 | await core.state.truncate(2, 4) 94 | t.is(core.state.lastTruncation.from, 3) 95 | t.is(core.state.lastTruncation.to, 2) 96 | 97 | await core.state.append([b4a.from('a')]) 98 | t.is(core.state.lastTruncation, null) 99 | 100 | await core.state.truncate(2, 5) 101 | t.is(core.state.lastTruncation.from, 3) 102 | t.is(core.state.lastTruncation.to, 2) 103 | 104 | await core.state.append([b4a.from('a')]) 105 | t.is(core.state.lastTruncation, null) 106 | 107 | await core.state.truncate(2, 6) 108 | t.is(core.state.lastTruncation.from, 3) 109 | t.is(core.state.lastTruncation.to, 2) 110 | 111 | await core.state.append([b4a.from('a')]) 112 | t.is(core.state.lastTruncation, null) 113 | 114 | await core.state.truncate(2, 7) 115 | t.is(core.state.lastTruncation.from, 3) 116 | t.is(core.state.lastTruncation.to, 2) 117 | 118 | // check that it was persisted 119 | const coreReopen = await reopen() 120 | 121 | t.is(coreReopen.state.length, 2) 122 | t.is(coreReopen.state.byteLength, 10) 123 | t.is(coreReopen.state.fork, 7) 124 | t.is(coreReopen.state.lastTruncation, null) 125 | // t.is(coreReopen.header.hints.reorgs.length, 4) 126 | }) 127 | 128 | test('core - user data', async function (t) { 129 | const { core, reopen } = await create(t) 130 | 131 | await putUserData(core.storage, 'hello', b4a.from('world')) 132 | 133 | for await (const { key, value } of core.createUserDataStream()) { 134 | t.alike(key, 'hello') 135 | t.alike(value, b4a.from('world')) 136 | } 137 | 138 | t.is(await countEntries(core.createUserDataStream({ gte: 'x', lt: 'z' })), 0) 139 | 140 | await putUserData(core.storage, 'hej', b4a.from('verden')) 141 | 142 | t.is(await countEntries(core.createUserDataStream()), 2) 143 | 144 | for await (const { key, value } of core.createUserDataStream({ gte: 'hello' })) { 145 | t.alike(key, 'hello') 146 | t.alike(value, b4a.from('world')) 147 | } 148 | 149 | await putUserData(core.storage, 'hello', null) 150 | 151 | t.is(await countEntries(core.createUserDataStream()), 1) 152 | t.is(await countEntries(core.createUserDataStream({ gte: 'hello' })), 0) 153 | 154 | await putUserData(core.storage, 'hej', b4a.from('world')) 155 | 156 | // check that it was persisted 157 | const coreReopen = await reopen() 158 | 159 | for await (const { key, value } of coreReopen.createUserDataStream()) { 160 | t.alike(key, 'hej') 161 | t.alike(value, b4a.from('world')) 162 | } 163 | 164 | t.is(await countEntries(coreReopen.createUserDataStream({ gte: 'hello' })), 0) 165 | 166 | function putUserData (storage, key, value) { 167 | const tx = storage.write() 168 | tx.putUserData(key, value) 169 | return tx.flush() 170 | } 171 | 172 | async function countEntries (stream) { 173 | let count = 0 174 | // eslint-disable-next-line no-unused-vars 175 | for await (const entry of stream) count++ 176 | return count 177 | } 178 | }) 179 | 180 | test('core - header does not retain slabs', async function (t) { 181 | const { core, reopen } = await create(t) 182 | 183 | t.is(core.header.key.buffer.byteLength, 32, 'unslabbed key') 184 | t.is(core.header.keyPair.publicKey.buffer.byteLength, 32, 'unslabbed public key') 185 | t.is(core.header.keyPair.secretKey.buffer.byteLength, 64, 'unslabbed private key') 186 | t.is(core.header.manifest.signers[0].namespace.buffer.byteLength, 32, 'unslabbed signers namespace') 187 | t.is(core.header.manifest.signers[0].publicKey.buffer.byteLength, 32, 'unslabbed signers publicKey') 188 | 189 | // check the different code path when re-opening 190 | const coreReopen = await reopen() 191 | 192 | t.is(coreReopen.header.key.buffer.byteLength, 32, 'reopen unslabbed key') 193 | t.is(coreReopen.header.keyPair.publicKey.buffer.byteLength, 32, 'reopen unslabbed public key') 194 | t.is(coreReopen.header.keyPair.secretKey.buffer.byteLength, 64, 'reopen unslabbed secret key') 195 | t.is(coreReopen.header.manifest.signers[0].namespace.buffer.byteLength, 32, 'reopen unslabbed signers namespace') 196 | t.is(coreReopen.header.manifest.signers[0].publicKey.buffer.byteLength, 32, 'reopen unslabbed signers publicKey') 197 | 198 | await coreReopen.close() 199 | }) 200 | 201 | test('core - verify', async function (t) { 202 | const { core } = await create(t) 203 | const { core: clone } = await create(t, { keyPair: { publicKey: core.header.keyPair.publicKey } }) 204 | 205 | t.is(clone.header.keyPair.publicKey, core.header.keyPair.publicKey) 206 | 207 | await core.state.append([b4a.from('a'), b4a.from('b')]) 208 | 209 | { 210 | const p = await getProof(core, { upgrade: { start: 0, length: 2 } }) 211 | await clone.verify(p) 212 | } 213 | 214 | const tree1 = await getCoreHead(core.storage) 215 | const tree2 = await getCoreHead(clone.storage) 216 | 217 | t.is(tree1.length, 2) 218 | t.alike(tree1.signature, tree2.signature) 219 | 220 | { 221 | const nodes = await MerkleTree.missingNodes(clone.state, 2, clone.state.length) 222 | const p = await getProof(core, { block: { index: 1, nodes, value: true } }) 223 | await clone.verify(p) 224 | } 225 | }) 226 | 227 | test('core - verify parallel upgrades', async function (t) { 228 | const { core } = await create(t) 229 | const { core: clone } = await create(t, { keyPair: { publicKey: core.header.keyPair.publicKey } }) 230 | 231 | t.is(clone.header.keyPair.publicKey, core.header.keyPair.publicKey) 232 | 233 | await core.state.append([b4a.from('a'), b4a.from('b'), b4a.from('c'), b4a.from('d')]) 234 | 235 | { 236 | const p1 = await getProof(core, { upgrade: { start: 0, length: 2 } }) 237 | const p2 = await getProof(core, { upgrade: { start: 0, length: 3 } }) 238 | 239 | const v1 = clone.verify(p1) 240 | const v2 = clone.verify(p2) 241 | 242 | await v1 243 | await v2 244 | } 245 | 246 | const tree1 = await getCoreHead(core.storage) 247 | const tree2 = await getCoreHead(clone.storage) 248 | 249 | t.is(tree2.length, tree1.length) 250 | t.alike(tree2.signature, tree1.signature) 251 | }) 252 | 253 | test('core - clone', async function (t) { 254 | const { core } = await create(t) 255 | 256 | await core.state.append([ 257 | b4a.from('hello'), 258 | b4a.from('world') 259 | ]) 260 | 261 | const manifest = { prologue: { hash: await core.state.hash(), length: core.state.length } } 262 | const { core: copy } = (await create(t, { manifest })) 263 | 264 | await copy.copyPrologue(core.state) 265 | 266 | t.alike([ 267 | await getBlock(copy, 0), 268 | await getBlock(copy, 1) 269 | ], [ 270 | b4a.from('hello'), 271 | b4a.from('world') 272 | ]) 273 | 274 | const signature = copy.state.signature 275 | const roots = copy.state.roots.map(r => r.index) 276 | 277 | for (let i = 0; i <= core.state.length * 2; i++) { 278 | t.alike( 279 | await MerkleTree.get(copy.state, i, false), 280 | await MerkleTree.get(core.state, i, false) 281 | ) 282 | } 283 | 284 | await core.state.append([b4a.from('c')]) 285 | 286 | // copy should be independent 287 | t.alike(copy.state.signature, signature) 288 | t.alike(copy.state.roots.map(r => r.index), roots) 289 | t.is(copy.header.hints.contiguousLength, 2) 290 | }) 291 | 292 | test('core - clone verify', async function (t) { 293 | const { core } = await create(t) 294 | 295 | await core.state.append([b4a.from('a'), b4a.from('b')]) 296 | 297 | const manifest = { prologue: { hash: await core.state.hash(), length: core.state.length } } 298 | const { core: copy } = await create(t, { manifest }) 299 | const { core: clone } = await create(t, { manifest }) 300 | 301 | await copy.copyPrologue(core.state) 302 | 303 | // copy should be independent 304 | await core.state.append([b4a.from('c')]) 305 | 306 | { 307 | const p = await getProof(copy, { upgrade: { start: 0, length: 2 } }) 308 | t.ok(await clone.verify(p)) 309 | } 310 | 311 | t.is(clone.header.tree.length, 2) 312 | 313 | { 314 | const nodes = await MerkleTree.missingNodes(clone.state, 2, clone.state.length) 315 | const p = await getProof(copy, { block: { index: 1, nodes, value: true } }) 316 | p.block.value = await getBlock(copy, 1) 317 | await clone.verify(p) 318 | } 319 | 320 | t.is(core.header.hints.contiguousLength, 3) 321 | t.is(copy.header.hints.contiguousLength, 2) 322 | t.is(clone.header.hints.contiguousLength, 0) 323 | 324 | t.pass('verified') 325 | }) 326 | 327 | test('core - partial clone', async function (t) { 328 | const { core } = await create(t) 329 | 330 | await core.state.append([b4a.from('0')]) 331 | await core.state.append([b4a.from('1')]) 332 | 333 | const manifest = { prologue: { hash: await core.state.hash(), length: core.state.length } } 334 | 335 | await core.state.append([b4a.from('2')]) 336 | await core.state.append([b4a.from('3')]) 337 | 338 | const { core: copy } = (await create(t, { manifest })) 339 | 340 | await copy.copyPrologue(core.state) 341 | 342 | t.is(core.state.length, 4) 343 | t.is(copy.state.length, 2) 344 | 345 | t.is(core.header.hints.contiguousLength, 4) 346 | t.is(copy.header.hints.contiguousLength, 2) 347 | 348 | t.alike([ 349 | await getBlock(copy, 0), 350 | await getBlock(copy, 1), 351 | await getBlock(copy, 2) 352 | ], [ 353 | b4a.from('0'), 354 | b4a.from('1'), 355 | null 356 | ]) 357 | }) 358 | 359 | test('core - copyPrologue bails if core is not the same', async function (t) { 360 | const { core } = await create(t) 361 | const { core: copy } = await create(t, { manifest: { prologue: { hash: b4a.alloc(32), length: 1 } } }) 362 | 363 | // copy should be independent 364 | await core.state.append([b4a.from('a')]) 365 | 366 | await t.exception(copy.copyPrologue(core.state)) 367 | 368 | t.is(copy.header.hints.contiguousLength, 0) 369 | }) 370 | 371 | test('core - copyPrologue many', async function (t) { 372 | const { core } = await create(t, { compat: false, version: 1 }) 373 | await core.state.append([b4a.from('a'), b4a.from('b')]) 374 | 375 | const manifest = { ...core.header.manifest } 376 | manifest.prologue = { length: core.state.length, hash: core.state.hash() } 377 | 378 | const { core: copy } = await create(t, { manifest }) 379 | const { core: copy2 } = await create(t, { manifest }) 380 | const { core: copy3 } = await create(t, { manifest }) 381 | 382 | await copy.copyPrologue(core.state) 383 | 384 | t.alike(copy.header.manifest.signers[0].publicKey, core.header.manifest.signers[0].publicKey) 385 | 386 | t.is(copy.state.length, core.state.length) 387 | t.is(copy.state.byteLength, core.state.byteLength) 388 | 389 | // copy should be independent 390 | await core.state.append([b4a.from('c')]) 391 | 392 | // upgrade clone 393 | { 394 | const batch = core.state.createTreeBatch() 395 | const p = await getProof(core, { upgrade: { start: 0, length: 3 } }) 396 | p.upgrade.signature = copy2.verifier.sign(batch, core.header.keyPair) 397 | t.ok(await copy2.verify(p)) 398 | } 399 | 400 | await t.execution(copy2.copyPrologue(core.state)) 401 | await t.execution(copy3.copyPrologue(core.state)) 402 | 403 | t.is(copy2.state.length, core.state.length) 404 | t.is(copy.state.length, copy3.state.length) 405 | 406 | t.is(copy2.header.tree.length, core.header.tree.length) 407 | t.is(copy.header.tree.length, copy3.header.tree.length) 408 | 409 | t.is(copy2.state.byteLength, core.state.byteLength) 410 | t.is(copy.state.byteLength, copy3.state.byteLength) 411 | 412 | manifest.prologue = { length: core.state.length, hash: core.state.hash() } 413 | const { core: copy4 } = await create(t, { manifest }) 414 | await copy4.copyPrologue(copy2.state) 415 | 416 | t.is(copy4.state.length, 3) 417 | t.is(copy4.header.tree.length, 3) 418 | 419 | t.is(core.header.hints.contiguousLength, 3) 420 | t.is(copy.header.hints.contiguousLength, 2) 421 | t.is(copy2.header.hints.contiguousLength, 2) 422 | t.is(copy3.header.hints.contiguousLength, 2) 423 | t.is(copy4.header.hints.contiguousLength, 2) 424 | 425 | t.alike(await getBlock(copy4, 0), b4a.from('a')) 426 | t.alike(await getBlock(copy4, 1), b4a.from('b')) 427 | }) 428 | 429 | async function create (t, opts = {}) { 430 | const dir = opts.dir || await createTempDir(t) 431 | 432 | let db = null 433 | 434 | t.teardown(teardown, { order: 1 }) 435 | 436 | const reopen = async () => { 437 | if (db) await db.close() 438 | 439 | db = new CoreStorage(dir) 440 | 441 | const core = new Core(db, opts) 442 | await core.ready() 443 | t.teardown(() => core.close()) 444 | return core 445 | } 446 | 447 | const core = await reopen() 448 | 449 | return { core, reopen } 450 | 451 | async function teardown () { 452 | if (db) await db.close() 453 | } 454 | } 455 | 456 | async function getBlock (core, i) { 457 | const r = core.storage.read() 458 | const p = r.getBlock(i) 459 | r.tryFlush() 460 | return p 461 | } 462 | 463 | async function getProof (core, req) { 464 | const batch = core.storage.read() 465 | const p = await MerkleTree.proof(core.state, batch, req) 466 | const block = req.block ? batch.getBlock(req.block.index) : null 467 | batch.tryFlush() 468 | const proof = await p.settle() 469 | if (block) proof.block.value = await block 470 | return proof 471 | } 472 | 473 | function getCoreHead (storage) { 474 | const b = storage.read() 475 | const p = b.getHead() 476 | b.tryFlush() 477 | return p 478 | } 479 | -------------------------------------------------------------------------------- /test/encodings.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const { create } = require('./helpers') 4 | 5 | test('encodings - supports built ins', async function (t) { 6 | const a = await create(t, null, { valueEncoding: 'json' }) 7 | 8 | await a.append({ hello: 'world' }) 9 | t.alike(await a.get(0), { hello: 'world' }) 10 | t.alike(await a.get(0, { valueEncoding: 'utf-8' }), '{"hello":"world"}') 11 | }) 12 | 13 | test('encodings - supports custom encoding', async function (t) { 14 | const a = await create(t, null, { valueEncoding: { encode () { return b4a.from('foo') }, decode () { return 'bar' } } }) 15 | 16 | await a.append({ hello: 'world' }) 17 | t.is(await a.get(0), 'bar') 18 | t.alike(await a.get(0, { valueEncoding: 'utf-8' }), 'foo') 19 | }) 20 | 21 | test('encodings - supports custom batch encoding', async function (t) { 22 | const a = await create(t, null, { 23 | encodeBatch: batch => { 24 | return [b4a.from(batch.join('-'))] 25 | }, 26 | valueEncoding: 'utf-8' 27 | }) 28 | await a.append(['a', 'b', 'c']) 29 | await a.append(['d', 'e']) 30 | await a.append('f') 31 | 32 | t.is(await a.get(0), 'a-b-c') 33 | t.is(await a.get(1), 'd-e') 34 | t.is(await a.get(2), 'f') 35 | }) 36 | -------------------------------------------------------------------------------- /test/encryption.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const crypto = require('hypercore-crypto') 4 | const Hypercore = require('..') 5 | const { create, createStorage, replicate } = require('./helpers') 6 | 7 | const fixturesRaw = require('./fixtures/encryption/v11.0.48.cjs') 8 | 9 | const encryptionKey = b4a.alloc(32, 'hello world') 10 | 11 | test('encrypted append and get', async function (t) { 12 | const a = await create(t, { encryption: { key: encryptionKey } }) 13 | 14 | t.ok(a.encryption) 15 | 16 | await a.append(['hello']) 17 | 18 | const info = await a.info() 19 | t.is(info.byteLength, 5) 20 | t.is(a.core.state.byteLength, 5 + a.padding) 21 | 22 | const unencrypted = await a.get(0) 23 | t.alike(unencrypted, b4a.from('hello')) 24 | 25 | const encrypted = await getBlock(a, 0) 26 | t.absent(encrypted.includes('hello')) 27 | }) 28 | 29 | test('get with decrypt option', async function (t) { 30 | const a = await create(t, { encryption: { key: encryptionKey } }) 31 | 32 | await a.append('hello') 33 | 34 | const unencrypted = await a.get(0, { decrypt: true }) 35 | t.alike(unencrypted, b4a.from('hello')) 36 | 37 | const encrypted = await a.get(0, { decrypt: false }) 38 | t.absent(encrypted.includes('hello')) 39 | }) 40 | 41 | test('encrypted seek', async function (t) { 42 | const a = await create(t, { encryption: { key: encryptionKey } }) 43 | 44 | await a.append(['hello', 'world', '!']) 45 | 46 | t.alike(await a.seek(0), [0, 0]) 47 | t.alike(await a.seek(4), [0, 4]) 48 | t.alike(await a.seek(5), [1, 0]) 49 | t.alike(await a.seek(6), [1, 1]) 50 | t.alike(await a.seek(6), [1, 1]) 51 | t.alike(await a.seek(9), [1, 4]) 52 | t.alike(await a.seek(10), [2, 0]) 53 | t.alike(await a.seek(11), [3, 0]) 54 | }) 55 | 56 | test('encrypted replication', async function (t) { 57 | const a = await create(t, { encryption: { key: encryptionKey } }) 58 | 59 | await a.append(['a', 'b', 'c', 'd', 'e']) 60 | 61 | await t.test('with encryption key', async function (t) { 62 | const b = await create(t, a.key, { encryption: { key: encryptionKey } }) 63 | 64 | replicate(a, b, t) 65 | 66 | await t.test('through direct download', async function (t) { 67 | const r = b.download({ start: 0, length: a.length }) 68 | await r.done() 69 | 70 | for (let i = 0; i < 5; i++) { 71 | t.alike(await b.get(i), await a.get(i)) 72 | } 73 | }) 74 | 75 | await t.test('through indirect download', async function (t) { 76 | await a.append(['f', 'g', 'h', 'i', 'j']) 77 | 78 | for (let i = 5; i < 10; i++) { 79 | t.alike(await b.get(i), await a.get(i)) 80 | } 81 | 82 | await a.truncate(5) 83 | }) 84 | }) 85 | 86 | await t.test('without encryption key', async function (t) { 87 | const b = await create(t, a.key) 88 | 89 | replicate(a, b, t) 90 | 91 | await t.test('through direct download', async function (t) { 92 | const r = b.download({ start: 0, length: a.length }) 93 | await r.done() 94 | 95 | for (let i = 0; i < 5; i++) { 96 | t.alike(await b.get(i), await getBlock(a, i)) 97 | } 98 | }) 99 | 100 | await t.test('through indirect download', async function (t) { 101 | await a.append(['f', 'g', 'h', 'i', 'j']) 102 | 103 | for (let i = 5; i < 10; i++) { 104 | t.alike(await b.get(i), await getBlock(a, i)) 105 | } 106 | 107 | await a.truncate(5) 108 | }) 109 | }) 110 | }) 111 | 112 | test('encrypted seek via replication', async function (t) { 113 | const a = await create(t, { encryption: { key: encryptionKey } }) 114 | const b = await create(t, a.key, { encryption: { key: encryptionKey } }) 115 | 116 | await a.append(['hello', 'world', '!']) 117 | 118 | replicate(a, b, t) 119 | 120 | t.alike(await b.seek(0), [0, 0]) 121 | t.alike(await b.seek(4), [0, 4]) 122 | t.alike(await b.seek(5), [1, 0]) 123 | t.alike(await b.seek(6), [1, 1]) 124 | t.alike(await b.seek(6), [1, 1]) 125 | t.alike(await b.seek(9), [1, 4]) 126 | t.alike(await b.seek(10), [2, 0]) 127 | t.alike(await b.seek(11), [3, 0]) 128 | }) 129 | 130 | test('encrypted session', async function (t) { 131 | const a = await create(t, { encryption: { key: encryptionKey } }) 132 | 133 | await a.append(['hello']) 134 | 135 | const s = a.session() 136 | 137 | t.alike(a.encryptionKey, s.encryptionKey) 138 | t.alike(await s.get(0), b4a.from('hello')) 139 | 140 | await s.append(['world']) 141 | 142 | const unencrypted = await s.get(1) 143 | t.alike(unencrypted, b4a.from('world')) 144 | t.alike(await a.get(1), unencrypted) 145 | 146 | const encrypted = await getBlock(s, 1) 147 | t.absent(encrypted.includes('world')) 148 | t.alike(await getBlock(a, 1), encrypted) 149 | 150 | await s.close() 151 | }) 152 | 153 | test('encrypted session before ready core', async function (t) { 154 | const storage = await createStorage(t) 155 | 156 | const a = new Hypercore(storage, { encryption: { key: encryptionKey } }) 157 | const s = a.session() 158 | 159 | await a.ready() 160 | 161 | t.alike(a.encryptionKey, s.encryptionKey) 162 | 163 | await a.append(['hello']) 164 | t.alike(await s.get(0), b4a.from('hello')) 165 | 166 | await s.close() 167 | await a.close() 168 | }) 169 | 170 | test('encrypted session on unencrypted core', async function (t) { 171 | const a = await create(t) 172 | 173 | const s = a.session({ encryption: { key: encryptionKey }, debug: 'debug' }) 174 | 175 | t.ok(s.encryption) 176 | t.absent(a.encryption) 177 | 178 | await s.append(['hello']) 179 | 180 | const unencrypted = await s.get(0) 181 | t.alike(unencrypted, b4a.from('hello')) 182 | 183 | const encrypted = await a.get(0) 184 | t.absent(encrypted.includes('hello')) 185 | 186 | await s.close() 187 | }) 188 | 189 | test('encrypted session on encrypted core, same key', async function (t) { 190 | const a = await create(t, { encryption: { key: encryptionKey } }) 191 | const s = a.session({ encryption: { key: encryptionKey } }) 192 | 193 | t.alike(s.encryptionKey, a.encryptionKey) 194 | 195 | await s.append(['hello']) 196 | 197 | const unencrypted = await s.get(0) 198 | t.alike(unencrypted, b4a.from('hello')) 199 | t.alike(unencrypted, await a.get(0)) 200 | 201 | await s.close() 202 | }) 203 | 204 | test('multiple gets to replicated, encrypted block', async function (t) { 205 | const a = await create(t, { encryption: { key: encryptionKey } }) 206 | await a.append('a') 207 | 208 | const b = await create(t, a.key, { encryption: { key: encryptionKey } }) 209 | 210 | replicate(a, b, t) 211 | 212 | const p = b.get(0) 213 | const q = b.get(0) 214 | 215 | t.alike(await p, await q) 216 | t.alike(await p, b4a.from('a')) 217 | }) 218 | 219 | test('encrypted core from existing unencrypted core', async function (t) { 220 | const a = await create(t, { encryptionKey: null }) 221 | const b = new Hypercore({ core: a.core, encryption: { key: encryptionKey } }) 222 | 223 | t.alike(b.key, a.key) 224 | 225 | await b.append(['hello']) 226 | 227 | const unencrypted = await b.get(0) 228 | t.alike(unencrypted, b4a.from('hello')) 229 | 230 | await b.close() 231 | }) 232 | 233 | test('from session sessions pass encryption', async function (t) { 234 | const storage = await createStorage(t) 235 | 236 | const a = new Hypercore(storage) 237 | const b = new Hypercore({ core: a.core, encryption: { key: encryptionKey } }) 238 | const c = b.session() 239 | 240 | await a.ready() 241 | await b.ready() 242 | await c.ready() 243 | 244 | t.absent(a.encryption) 245 | t.ok(b.encryption) 246 | t.ok(c.encryption) 247 | 248 | await c.close() 249 | await b.close() 250 | await a.close() 251 | }) 252 | 253 | test('session keeps encryption', async function (t) { 254 | const storage = await createStorage(t) 255 | 256 | const a = new Hypercore(storage) 257 | const b = a.session({ encryption: { key: encryptionKey } }) 258 | await b.ready() 259 | 260 | await b.close() 261 | await a.close() 262 | }) 263 | 264 | // block encryption module is only available after bmping manifest version 265 | test('block encryption module', async function (t) { 266 | class XOREncryption { 267 | padding () { 268 | return 0 269 | } 270 | 271 | async encrypt (index, block) { 272 | await new Promise(setImmediate) 273 | 274 | for (let i = 0; i < block.byteLength; i++) { 275 | block[i] ^= ((index + 1) & 0xff) // +1 so no 0 xor in test 276 | } 277 | } 278 | 279 | async decrypt (index, block) { 280 | await new Promise(setImmediate) 281 | 282 | for (let i = 0; i < block.byteLength; i++) { 283 | block[i] ^= ((index + 1) & 0xff) 284 | } 285 | } 286 | } 287 | 288 | const core = await create(t, null, { encryption: new XOREncryption() }) 289 | await core.ready() 290 | 291 | await core.append('0') 292 | await core.append('1') 293 | await core.append('2') 294 | 295 | t.unlike(await core.get(0, { raw: true }), b4a.from('0')) 296 | t.unlike(await core.get(1, { raw: true }), b4a.from('1')) 297 | t.unlike(await core.get(2, { raw: true }), b4a.from('2')) 298 | 299 | t.alike(await core.get(0), b4a.from('0')) 300 | t.alike(await core.get(1), b4a.from('1')) 301 | t.alike(await core.get(2), b4a.from('2')) 302 | }) 303 | 304 | test('encryption backwards compatibility', async function (t) { 305 | const encryptionKey = b4a.alloc(32).fill('encryption key') 306 | 307 | const compatKey = crypto.keyPair(b4a.alloc(32, 0)) 308 | const defaultKey = crypto.keyPair(b4a.alloc(32, 1)) 309 | const blockKey = crypto.keyPair(b4a.alloc(32, 2)) 310 | 311 | const fixtures = [ 312 | getFixture('compat'), 313 | getFixture('default'), 314 | getFixture('default'), 315 | getFixture('block') 316 | ] 317 | 318 | const compat = await create(t, null, { keyPair: compatKey, encryptionKey, compat: true }) 319 | const def = await create(t, null, { keyPair: defaultKey, encryptionKey, isBlockKey: false }) 320 | const notBlock = await create(t, null, { keyPair: defaultKey, encryptionKey, isBlockKey: false }) 321 | const block = await create(t, null, { keyPair: blockKey, encryptionKey, isBlockKey: true }) 322 | 323 | await compat.ready() 324 | await def.ready() 325 | await notBlock.ready() 326 | await block.ready() 327 | 328 | const largeBlock = Buffer.alloc(512) 329 | for (let i = 0; i < largeBlock.byteLength; i++) largeBlock[i] = i & 0xff 330 | 331 | for (let i = 0; i < 10; i++) { 332 | await compat.append('compat test: ' + i.toString()) 333 | await def.append('default test: ' + i.toString()) 334 | await notBlock.append('default test: ' + i.toString()) 335 | await block.append('block test: ' + i.toString()) 336 | } 337 | 338 | await compat.append(largeBlock.toString('hex')) 339 | await def.append(largeBlock.toString('hex')) 340 | await notBlock.append(largeBlock.toString('hex')) 341 | await block.append(largeBlock.toString('hex')) 342 | 343 | // compat 344 | t.comment('test compat mode') 345 | t.is(compat.length, fixtures[0].length) 346 | 347 | for (let i = 0; i < compat.length; i++) { 348 | t.alike(await compat.get(i, { raw: true }), fixtures[0][i]) 349 | } 350 | 351 | // default 352 | t.comment('test default mode') 353 | t.is(def.length, fixtures[1].length) 354 | 355 | for (let i = 0; i < def.length; i++) { 356 | t.alike(await def.get(i, { raw: true }), fixtures[1][i]) 357 | } 358 | 359 | // not block 360 | t.comment('test block false') 361 | t.is(notBlock.length, fixtures[2].length) 362 | 363 | for (let i = 0; i < notBlock.length; i++) { 364 | t.alike(await notBlock.get(i, { raw: true }), fixtures[2][i]) 365 | } 366 | 367 | // compat 368 | t.comment('test block mode') 369 | t.is(block.length, fixtures[3].length) 370 | 371 | for (let i = 0; i < block.length; i++) { 372 | t.alike(await block.get(i, { raw: true }), fixtures[3][i]) 373 | } 374 | }) 375 | 376 | function getBlock (core, index) { 377 | const batch = core.core.storage.read() 378 | const b = batch.getBlock(index) 379 | batch.tryFlush() 380 | return b 381 | } 382 | 383 | function getFixture (name) { 384 | const blocks = fixturesRaw[name] 385 | return blocks.map(b => b4a.from(b, 'base64')) 386 | } 387 | -------------------------------------------------------------------------------- /test/extension.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const { create, replicate, eventFlush } = require('./helpers') 3 | 4 | test('basic extension', async function (t) { 5 | const messages = ['world', 'hello'] 6 | 7 | const a = await create(t) 8 | a.registerExtension('test-extension', { 9 | encoding: 'utf-8', 10 | onmessage: (message, peer) => { 11 | t.ok(peer === a.peers[0]) 12 | t.is(message, messages.pop()) 13 | } 14 | }) 15 | 16 | const b = await create(t, a.key) 17 | const bExt = b.registerExtension('test-extension', { 18 | encoding: 'utf-8' 19 | }) 20 | 21 | replicate(a, b, t) 22 | 23 | await eventFlush() 24 | t.is(b.peers.length, 1) 25 | 26 | bExt.send('hello', b.peers[0]) 27 | bExt.send('world', b.peers[0]) 28 | 29 | await eventFlush() 30 | t.absent(messages.length) 31 | }) 32 | 33 | test('two extensions', async function (t) { 34 | const messages = ['world', 'hello'] 35 | 36 | const a = await create(t) 37 | const b = await create(t, a.key) 38 | 39 | replicate(a, b, t) 40 | 41 | b.registerExtension('test-extension-1', { 42 | encoding: 'utf-8' 43 | }) 44 | const bExt2 = b.registerExtension('test-extension-2', { 45 | encoding: 'utf-8' 46 | }) 47 | 48 | await eventFlush() 49 | t.is(b.peers.length, 1) 50 | 51 | bExt2.send('world', b.peers[0]) 52 | 53 | await eventFlush() 54 | 55 | a.registerExtension('test-extension-2', { 56 | encoding: 'utf-8', 57 | onmessage: (message, peer) => { 58 | t.ok(peer === a.peers[0]) 59 | t.is(message, messages.pop()) 60 | } 61 | }) 62 | 63 | bExt2.send('hello', b.peers[0]) 64 | 65 | await eventFlush() 66 | t.is(messages.length, 1) // First message gets ignored 67 | }) 68 | -------------------------------------------------------------------------------- /test/fixtures/abi/snapshot.js: -------------------------------------------------------------------------------- 1 | // Generate an ABI snapshot for the current version of Hypercore. 2 | 3 | const path = require('path') 4 | const crypto = require('hypercore-crypto') 5 | const b4a = require('b4a') 6 | const Hypercore = require('../../../') 7 | 8 | const { version } = require('../../../package.json') 9 | 10 | const core = new Hypercore(path.join(__dirname, `v${version}`), { 11 | keyPair: crypto.keyPair() // Use an ephemeral key pair 12 | }) 13 | 14 | core.ready().then( 15 | async () => { 16 | for (let i = 0; i < 1000; i++) { 17 | await core.append(b4a.from([i])) 18 | } 19 | }, 20 | (err) => { 21 | console.error(err) 22 | process.exit(1) 23 | } 24 | ) 25 | -------------------------------------------------------------------------------- /test/fixtures/abi/v10.0.0-alpha.39/bitfield: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.0.0-alpha.39/bitfield -------------------------------------------------------------------------------- /test/fixtures/abi/v10.0.0-alpha.39/data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.0.0-alpha.39/data -------------------------------------------------------------------------------- /test/fixtures/abi/v10.0.0-alpha.39/oplog: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.0.0-alpha.39/oplog -------------------------------------------------------------------------------- /test/fixtures/abi/v10.0.0-alpha.39/tree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.0.0-alpha.39/tree -------------------------------------------------------------------------------- /test/fixtures/abi/v10.4.1-partial/data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.4.1-partial/data -------------------------------------------------------------------------------- /test/fixtures/abi/v10.4.1-partial/oplog: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.4.1-partial/oplog -------------------------------------------------------------------------------- /test/fixtures/abi/v10.4.1/bitfield: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.4.1/bitfield -------------------------------------------------------------------------------- /test/fixtures/abi/v10.4.1/data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.4.1/data -------------------------------------------------------------------------------- /test/fixtures/abi/v10.4.1/oplog: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.4.1/oplog -------------------------------------------------------------------------------- /test/fixtures/abi/v10.4.1/tree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holepunchto/hypercore/46975c3726af3c15366b3dd807a39fafc3383a59/test/fixtures/abi/v10.4.1/tree -------------------------------------------------------------------------------- /test/fixtures/basic.snapshot.cjs: -------------------------------------------------------------------------------- 1 | /* eslint-disable */ 2 | 3 | exports['storage info - should match snapshot - 0'] = { 4 | "oplog": 1048576, 5 | "tree": 1048576, 6 | "blocks": 1048576, 7 | "bitfield": 1048576 8 | } 9 | 10 | /* eslint-enable */ 11 | -------------------------------------------------------------------------------- /test/fixtures/encryption/generate.js: -------------------------------------------------------------------------------- 1 | // Generate encryption fixtures 2 | 3 | const fs = require('fs') 4 | const path = require('path') 5 | const crypto = require('hypercore-crypto') 6 | const tmpDir = require('test-tmp') 7 | 8 | const Hypercore = require('../../../') 9 | const { version } = require('../../../package.json') 10 | 11 | main() 12 | 13 | async function main () { 14 | const encryptionKey = Buffer.alloc(32).fill('encryption key') 15 | 16 | const compatKey = crypto.keyPair(Buffer.alloc(32, 0)) 17 | const defaultKey = crypto.keyPair(Buffer.alloc(32, 1)) 18 | const blockKey = crypto.keyPair(Buffer.alloc(32, 2)) 19 | 20 | const closing = [] 21 | 22 | const compat = new Hypercore(await tmpDir({ teardown }), { keyPair: compatKey, encryptionKey, compat: true }) 23 | const def = new Hypercore(await tmpDir({ teardown }), { keyPair: defaultKey, encryptionKey, isBlockKey: false }) 24 | const block = new Hypercore(await tmpDir({ teardown }), { keyPair: blockKey, encryptionKey, isBlockKey: true }) 25 | 26 | await compat.ready() 27 | await def.ready() 28 | await block.ready() 29 | 30 | const largeBlock = Buffer.alloc(512) 31 | for (let i = 0; i < largeBlock.byteLength; i++) largeBlock[i] = i & 0xff 32 | 33 | for (let i = 0; i < 10; i++) { 34 | await compat.append('compat test: ' + i.toString()) 35 | await def.append('default test: ' + i.toString()) 36 | await block.append('block test: ' + i.toString()) 37 | } 38 | 39 | await compat.append(largeBlock.toString('hex')) 40 | await def.append(largeBlock.toString('hex')) 41 | await block.append(largeBlock.toString('hex')) 42 | 43 | const fixture = fs.createWriteStream(path.join(__dirname, `v${version}`)) 44 | 45 | fixture.write('/* eslint-disable */\n\n') 46 | 47 | await writeFixture('compat', compat) 48 | await writeFixture('default', def) 49 | await writeFixture('block', block) 50 | 51 | fixture.write('/* eslint-enable */\n') 52 | 53 | fixture.end() 54 | await new Promise(resolve => fixture.on('close', resolve)) 55 | 56 | await compat.close() 57 | await def.close() 58 | await block.close() 59 | 60 | await shutdown() 61 | 62 | function teardown (fn) { 63 | closing.push(fn) 64 | } 65 | 66 | function shutdown () { 67 | return Promise.all(closing.map(fn => fn())) 68 | } 69 | 70 | async function writeFixture (name, core) { 71 | fixture.write(`exports['${name}'] = [\n`) 72 | for (let i = 0; i < core.length; i++) { 73 | const b64 = (await core.get(i, { raw: true })).toString('base64') 74 | fixture.write(` '${b64}'${(i === core.length - 1) ? '' : ','}\n`) 75 | } 76 | fixture.write(']\n\n') 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /test/fixtures/encryption/v11.0.48.cjs: -------------------------------------------------------------------------------- 1 | /* eslint-disable */ 2 | 3 | exports['compat'] = [ 4 | 'nDwlg9HRVVn48l5C+beY4f1hlxif8A==', 5 | 'PLfFcX+Z9TFJS5viqhMYATUyhMk7rw==', 6 | 'QbLGoZSGCQtO/U4DJ2cIN7Kk4FzAZw==', 7 | '8pnkYgewV1dENUgeQs6YY+0CsV6ctA==', 8 | 'vP8WwW71BLHHs3fcHmTtSxmmV3tvpQ==', 9 | '8LO53CW0Mn0BOGjPjwASq8ZycaRWiA==', 10 | 'OJql22gF2kO7o6IRjLHFVqL2413PAQ==', 11 | 'CohERKsAMKtiI3LwPo+ELVMraGMKtw==', 12 | '5gxIRLBm/q+6Pc0ww3+AxCi41RcudA==', 13 | 'mBr2B3XnMnSYo2KzC6MNBOtchY2EUw==', 14 | 'w9PUVzvOSjYZdzL3n/3Yjyk8Pm3prIfc/MkRNCtSvVRT1rK86nuLfb3YkKWgY7Ed0YD9X+DB680VxIBXneRACZCP9SUtkQ5iLhaD9/riG0Fw+penagGC9X47108EJAd1gMR1O1G2WH7nVRVb3y/myo98anwkG+eKi8xGIn+UARBdQtNkzz3l+Y/QMZ1wvDOEaZh/Ffp0AVNj66yvDlnHINFc5q7VRKNrddXa70rNPlD1zqYNmqbvAV8QBdPLUjPjWGCCi1Y8gWC74ocYSUT9Z8iS8DQ/SzUplQJIvsBgCJrIn7zlldOTL7EMw7O3564Ojl93GRHbDregmXkyiNrnlJRkk8i3loHXoW3kJ1DDLOCS00Se3w2xOqOOLtd4XhRR9kr7qiQ4uHJv+duNdfrVhNiicAVXe6TW4RCPVIlUkOMv5rKAO5KGqIOAD34nQgyeSoIhKxitKXwgadgm+g9ahbSXufu0EPr4bh4CuQ95bIWsTaXFn76HajRDwQMgNuNJPjCD/SE4EFyTjZaPwKuPE5FFodBPHpW43S1+UBwN5Vh+NO2WVwL9neVQKe9FvLBAk44ndMzZLebGJVwEFBogREBTxoLU0AreCjkpS0pzmIzk5byHJrcsm0GFEH+e+zKmfIEYwhUdpNPW3wrM0J/PL9rfD2N/GbCOdc58D5FwOgJcL1fK37lly6iT7XRXUrGgIOED9VfQYrWWnvnxiHLAQQ+poy51IldzsHR5pySdhyim3/+pezJuEcm3V0Vx8+Xxf7ciZ0xbueLVMvvqQ3MLCc6ZSA7az7HvJw8jqeq5/miIcEVVclCD0EGiDa32erGwsoFCu5D0PdV9+emmEg6X47WwPiorDrdrAEZlZwxYR/TTztU+A+e/Y1gahY0jjiSObIBr2UvIoeFae/QxDYmzvwAkeKCOWAStYXso0hWJVeI72oYAnmIiGA9yBXJhdWJY9IewS77oFnOBjKKmLJUbeoACEBxqbebDizE9562666Netqo6HyuAu+0jcuea3ZKjvh0ZwD6Rpv+7VjGL7R633fQwdrmL5iWrXO5wY/mOcq86+g3yVOvZr+sfbL9VAdACYut/Himzak980xfQraX1omDxg03wuQFYvbKJ04Uzca+FbYxnl4YzbVfuZt8wLoBTPssTFFWhQxkiYoqHmhbklIU83nMiNH+U8T7nrtNCBXgCmQEImAiroMIPGaP0v+A3skAI6rgVtL2IZ6mf6LVDDWVhXgPu5uHi3QHiuqv/rGF8ongWEIqQqRgt7sRAK7mwluKzceDEZuU+zhdYID3P1iVvzHo2QEOCWfN95j9+GRyFUMCjc/3e+BOvrtNu2gloCAaZ3E0p7kQt7zTAX3qMs/UvB9LzVD2d' 15 | ] 16 | 17 | exports['default'] = [ 18 | '/UFE+5IFU2U3o/jwybXF9Yf+0JlwfsM=', 19 | 'no9bWiyQvlaabzLKKfbvfkqW9IcTKc8=', 20 | 'VPg3K5hmg+BBSn+xPmUrZcjp3s2vLG8=', 21 | 'WjF137E0suunTmbtcyJMaUkMISpYOoU=', 22 | '1wsLIjbEqsD6Pmef9irDIjUjshi+Upo=', 23 | 'GJWnyuKtIaq9dqIWv6hd4RLotToIH6g=', 24 | 'Vsm2xOl1aGgI2vPmAj8ZKs7/xgaa8R4=', 25 | 'Vxy1Q524Zke8RSu62qYViSymsFPwgh4=', 26 | 'Y8QlWTKAqUcfiR1ozb9uj7nzsG19Pfw=', 27 | 'HJqWtozv1C7wQjWnwVyL+wP9/7obVcs=', 28 | 'rOi0T3JF6VeEQ/eSOyaRsMaGBXWi1bbEAu8G9a0A6Zesg1lhfRYG5N7du/aIF4WVMjwpMhTU+dQq4r/j/KmF/SCfnthQeyytJl1yxvYNDOSXacTc/D8DBKjU7IQqpl6bRkmpJDJwUlBOTLMD/daYa0sXpnYg1jyhlqmi4genCPUx9pkER/6vAPrdHq1zkwuVMYT3aoinQrco+HrhlHxFAAqxoPElAUJE1+VFn5xKe77pkoUwHsj7vjqG/r4s9qDjtYxwc2ZNo90o2FmTJ+KnL9IvGYcwNyBP3ZDsPspcPyQdtK1rS6zQJHGf4qLpg+1NnbQ3BHWJ64BWN9y3hLWTzH4Xau2u8S2ipCmrusHLeIJQAUXOmPKYFYFuSPOSIvAGpHNYWpDvyPFbCs0ZC2ttD/GIMaJLWT8XYL6isY4ns8HYOB0gCndT9RZLdVW0VT1jkYhtbVqlQD1LHaGhhXxJnucVJ1PI8ZbT+CiygmxqCMeG0C76hKjbTEcTiQuhH3B9Tk8s3Wifj8YlElyGkOTSHtVM2x6pEei0EP9kqtB8mt9wGwVd3RjUYJoNcZt7r3X6ZsGjFmYIIMmZmilFloB7uIUABtIecukrvlbRzpy5RzlRr0cywPMW+MO6qY1SPgeAdtMHoqj2fW/RNUZMT7exrKGVetWuCqaTiPeR1gC4qAdBtc5DHwoBhxo4meyp876E9/BRpFUnw3E5HLeadu6jTnC7zdfveUk7+HJx7iLxaVPDxVQqNQpB/xjBCHtDMWdJ0FxWbS55TvCbK+s8QMPN+PtIzT3WWD9/6y2DvxdNqjQDVBZHxJidS3HPkpq8nUrfDGQhzlZ3qDLeRrASxl2rODVO6JU6kxLYXTpMFsqj4pUgJNMPnt5d2klXS0qQD8ZE/HlRNB4jXmJ8dbGOC/izFtmMhene4Np7n5sEYiJgfvsXk7TAR7/d/nRLIx7drEqwyoHzQtISez9SxF2d79/tNfE3QVi/wKIjo72tGw+3jaryYEuuZkx+tvaXIbBDWKglHemD0orNCRuuf+ZPyNjb8WlMrZ5J4HP6cI/uz8xHc4owypdqxEt2Ne2QTq/Z3XaT/sOQhiF1WM1BlZ6pstX1/Es6g9k4fHvXJMvgGn13LRBtSliUKN08GeCVv5ozs/0kUYsHe1SFX0tCoA2S2yU5V6aUTa1+bC2gnhxO4mYoGhwyqny7vjFg2hwVYLaDg4JgaGf9ePYLKj/M5Xj5Q+Q44ZKgjcuXRA10buIcXeR1EsScBiqwjzR/8D0hHACoJ81utNfN9L7OtIUh6OIYRYnMxcNx5Y3svgxb7STTmrLkorD2UdNZjSo7sKLXIMaJSY5n0p+kZQjOsgHxMyfUNkZVvRNZTfbc60wd' 29 | ] 30 | 31 | exports['block'] = [ 32 | '87VJ77lmAFwJzx9vlrKaB+XB/REc', 33 | 'G0JfCfdn1m9FQlqjfexPK8xJQD4d', 34 | 'C8Zuffa7K1Kf+Gvw7GbgSN5E+3zd', 35 | '8MF5UefDZkf93UzRenq3ebfyQ02A', 36 | '0OLCgve744BDosP4g+RQrUbeZdHh', 37 | '93gFjJH1tzvHdyad1S6+1iF1rJVn', 38 | 'VxnSlCE/vgI1aB/d6tAaGJpawQfi', 39 | 'neKG5Nl77jFCQFTvFvsxp+1pvoFA', 40 | 'r96xMtHumU388zQETcHcUkU//wyT', 41 | 'YH44ccE3PF57SMPbh7G4TVFOLn+/', 42 | 'e33cPYNqfjwsRDQjjwcdtSiffKXFu+j82ffRn7TUsaZ09Ms6gug4KVb5ky/OFAz1VRVab9Mp+tlZHxfmmdUC9KCQMcFf677CsxZwyfvh22JtFWK4cDPLdJog+9Aed4iJaZ6DSK/QXm6x/QPTrX3aXTWq/U0z/yU1NEg2i1i4yMjfWU/+3FAa0XwF+9xmjpLrFKKfOrny6QLgZGlYwjtRJqJpbHIF5FpJtzWaY7l6ya1shzNv2gZ88RUKag2bwunvcC9KuT5wnfhkDjFv/oVn6nvqjKXxKpvyEZ5MnzOkZNL9zSxFj8vXM+2hcwItwzSIHKsnoCmZVgePsWkZOQ3MlEW8fMsS2S4wtFeiBQFnuELGfZXgH+6Iv0UuXc+bv5DAMnwLHdpQgFYNLVqptRlbbmY+EKkxbyLjcp9Z3b5zP1JuV2136ZT1CUTZEUTQ/e4hj582TmcmUKYORirzoImBPHR112UCeBn9yRR/BsLSp+po0VamcxRjvp4mgJAJj5OGA/70nSU30Nd71JaogtirUv8yt10gYRb1FK1I1Uh2R8/SCKm34I/XZQXIB+1R0SVp/qLwmGaI8XyoOWJXHkYqTc3K5IzAr762WA3xDtMc8e2eUSFaVLuOcXGCBut2jzyeeWDUR6Ge5UpWeT8NAHg+zWQKk4CRN4U/PoQwOJB/qRdUg9G5R6c5W+pGHmQKv6qSBeKVlL3BjzG4c6f1mGe6n4t7YHZYV3ZNlSxMOtGhpWLKUAasWmRvpFtyouMAbYh0yNCsbc8VE5CnZs7YoebtSjvCoDEf/k7cEEH9bsMgs0IG4MFZ58Th0LHZGXdQWNwhS4Zh8eYukWF2CUU5QyUZxNSn2f84kRqlb6DQ7ps7xSzPXBEIFMAv5RTxRPk5EexT8Tnc7JQpssLKx2RMTUYBV/ZqzJeQ/k/HEMgkf7biwhiKKZ5/PbiDbO3oqHmDlIx7XVxp+svHgqIsZldJ03LfX31aKnASE3YBbgxwJHeaYLWV64xFfbOo+VtqP+F5ZjVUQtf/FjYA4h1ATE2GkNlqlC7dwgJzuUqC1IkvONNUfCHSUVKou4/LKs/O8ncGkhhjJzbbg9OGKmGl2T9Hcfgq42uloWhiiN0mOaIJxt9ZT0VoAox8PQ5783JnNFeVkgJ0H+nlOY8iy4DcaoOKzGsuZFLWvG1uhhktcx3pUlX9ufdAhiPlaVmpR2UfRESMf8QTIzCs0VqfbSyxxrESGoVcEgICXYDW5HCjzl5O8e18NqnreiuCYMqTjPGY0u0aOm2XIemSU6rqJ0SVknZdoDuF0KI8no65z0iPeePK+Z7odBme5XvErxKLN0VFNPm4sGhTrC1Gh8sH1n6XWsmd+VnnV0/wJ1BdMlFe' 43 | ] 44 | 45 | /* eslint-enable */ 46 | -------------------------------------------------------------------------------- /test/fully-remote-proof.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const remote = require('../lib/fully-remote-proof.js') 4 | 5 | const { create } = require('./helpers') 6 | 7 | test('fully remote proof - proof and verify', async function (t) { 8 | const core = await create(t) 9 | 10 | await core.append('hello') 11 | await core.append('world') 12 | 13 | { 14 | const proof = await remote.proof(core) 15 | t.ok(await remote.verify(core.state.storage.store, proof)) 16 | } 17 | 18 | { 19 | const proof = await remote.proof(core, { index: 0, block: b4a.from('hello') }) 20 | const p = await remote.verify(core.state.storage.store, proof) 21 | t.is(p.block.index, 0) 22 | t.alike(p.block.value, b4a.from('hello')) 23 | } 24 | 25 | { 26 | const proof = await remote.proof(core, { index: 0, block: b4a.from('hello') }) 27 | const p = await remote.verify(core.state.storage.store, proof, { referrer: b4a.alloc(32) }) 28 | t.is(p, null) 29 | } 30 | }) 31 | -------------------------------------------------------------------------------- /test/helpers/index.js: -------------------------------------------------------------------------------- 1 | const Hypercore = require('../../') 2 | const createTempDir = require('test-tmp') 3 | const CoreStorage = require('hypercore-storage') 4 | const safetyCatch = require('safety-catch') 5 | const DebuggingStream = require('debugging-stream') 6 | 7 | exports.create = async function (t, ...args) { 8 | const dir = await createTempDir(t) 9 | 10 | const db = new CoreStorage(dir) 11 | 12 | const core = new Hypercore(db, ...args) 13 | await core.ready() 14 | 15 | t.teardown(() => core.close().catch(safetyCatch), { order: 1 }) 16 | 17 | return core 18 | } 19 | 20 | const createStorage = exports.createStorage = async function (t, dir) { 21 | if (!dir) dir = await createTempDir(t) 22 | return new CoreStorage(dir) 23 | } 24 | 25 | exports.createStored = async function (t) { 26 | const dir = await createTempDir(t) 27 | let db = null 28 | 29 | return async function (...args) { 30 | if (db) await db.close() 31 | db = await createStorage(t, dir) 32 | return new Hypercore(db, ...args) 33 | } 34 | } 35 | 36 | exports.replicate = function replicate (a, b, t, opts = {}) { 37 | const s1 = a.replicate(true, { keepAlive: false, ...opts }) 38 | const s2 = b.replicate(false, { keepAlive: false, ...opts }) 39 | 40 | const closed1 = new Promise(resolve => s1.once('close', resolve)) 41 | const closed2 = new Promise(resolve => s2.once('close', resolve)) 42 | 43 | s1.on('error', err => { 44 | safetyCatch(err) 45 | t.comment(`replication stream error (initiator): ${err}`) 46 | }) 47 | s2.on('error', err => { 48 | safetyCatch(err) 49 | t.comment(`replication stream error (responder): ${err}`) 50 | }) 51 | 52 | if (opts.teardown !== false) { 53 | t.teardown(async function () { 54 | s1.destroy() 55 | s2.destroy() 56 | await closed1 57 | await closed2 58 | }) 59 | } 60 | 61 | s1.pipe(s2).pipe(s1) 62 | 63 | return [s1, s2] 64 | } 65 | 66 | exports.unreplicate = function unreplicate (streams) { 67 | return Promise.all(streams.map((s) => { 68 | return new Promise((resolve) => { 69 | s.on('error', () => {}) 70 | s.on('close', resolve) 71 | s.destroy() 72 | }) 73 | })) 74 | } 75 | 76 | exports.replicateDebugStream = function replicate (a, b, t, opts = {}) { 77 | const { latency, speed, jitter } = opts 78 | 79 | const s1 = a.replicate(true, { keepAlive: false, ...opts }) 80 | const s2Base = b.replicate(false, { keepAlive: false, ...opts }) 81 | const s2 = new DebuggingStream(s2Base, { latency, speed, jitter }) 82 | 83 | s1.on('error', err => t.comment(`replication stream error (initiator): ${err}`)) 84 | s2.on('error', err => t.comment(`replication stream error (responder): ${err}`)) 85 | 86 | if (opts.teardown !== false) { 87 | t.teardown(async function () { 88 | let missing = 2 89 | await new Promise(resolve => { 90 | s1.on('close', onclose) 91 | s1.destroy() 92 | 93 | s2.on('close', onclose) 94 | s2.destroy() 95 | 96 | function onclose () { 97 | if (--missing === 0) resolve() 98 | } 99 | }) 100 | }) 101 | } 102 | 103 | s1.pipe(s2).pipe(s1) 104 | 105 | return [s1, s2] 106 | } 107 | 108 | exports.eventFlush = async function eventFlush () { 109 | await new Promise(resolve => setImmediate(resolve)) 110 | } 111 | -------------------------------------------------------------------------------- /test/helpers/networking.js: -------------------------------------------------------------------------------- 1 | const UDX = require('udx-native') 2 | const safetyCatch = require('safety-catch') 3 | const NoiseStream = require('@hyperswarm/secret-stream') 4 | 5 | module.exports = { 6 | makeStreamPair 7 | } 8 | 9 | function makeStreamPair (t, opts = {}) { 10 | const u = new UDX() 11 | const a = u.createSocket() 12 | const b = u.createSocket() 13 | 14 | t.teardown(() => a.close()) 15 | t.teardown(() => b.close()) 16 | 17 | a.bind(0, '127.0.0.1') 18 | b.bind(0, '127.0.0.1') 19 | 20 | const p = proxy({ from: a, to: b }, async function () { 21 | const delay = opts.latency[0] + Math.round(Math.random() * (opts.latency[1] - opts.latency[0])) 22 | if (delay) await new Promise((resolve) => setTimeout(resolve, delay)) 23 | return false 24 | }) 25 | 26 | t.teardown(() => p.close()) 27 | 28 | const s1 = u.createStream(1) 29 | const s2 = u.createStream(2) 30 | 31 | s1.connect(a, 2, p.address().port) 32 | s2.connect(b, 1, p.address().port) 33 | 34 | t.teardown(() => s1.destroy()) 35 | t.teardown(() => s2.destroy()) 36 | 37 | const n1 = new NoiseStream(true, s1) 38 | const n2 = new NoiseStream(false, s2) 39 | 40 | return [n1, n2] 41 | } 42 | 43 | function proxy ({ from, to, bind } = {}, handler) { 44 | from = from.address().port 45 | to = to.address().port 46 | 47 | const u = new UDX() 48 | const socket = u.createSocket() 49 | 50 | socket.on('message', function (buf, rinfo) { 51 | const forwarding = handler() 52 | const port = rinfo.port === to ? from : to 53 | 54 | if (forwarding && forwarding.then) forwarding.then(fwd).catch(safetyCatch) 55 | else fwd(forwarding) 56 | 57 | function fwd () { 58 | socket.trySend(buf, port, '127.0.0.1') 59 | } 60 | }) 61 | 62 | socket.bind(bind || 0, '127.0.0.1') 63 | 64 | return socket 65 | } 66 | -------------------------------------------------------------------------------- /test/move-to.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const crypto = require('hypercore-crypto') 4 | const { create } = require('./helpers') 5 | 6 | test('move - basic', async function (t) { 7 | t.plan(9) 8 | 9 | const core = await create(t) 10 | 11 | const sess = core.session({ name: 'session' }) 12 | 13 | await sess.append('1') 14 | await sess.append('2') 15 | await sess.append('3') 16 | 17 | await core.commit(sess) 18 | 19 | t.is(core.length, 3) 20 | t.is(sess.length, 3) 21 | 22 | const keyPair = crypto.keyPair() 23 | 24 | const manifest = { 25 | prologue: { 26 | length: core.length, 27 | hash: core.state.hash() 28 | }, 29 | signers: [{ 30 | publicKey: keyPair.publicKey 31 | }] 32 | } 33 | 34 | const core2 = await create(t, { manifest, keyPair }) 35 | await core2.core.copyPrologue(core.state) 36 | 37 | t.is(core2.length, 3) 38 | 39 | sess.once('migrate', key => { t.alike(key, core2.key) }) 40 | 41 | await sess.state.moveTo(core2, core2.length) 42 | await sess.append('4') 43 | 44 | await core2.commit(sess) 45 | 46 | t.alike(await sess.get(0), b4a.from('1')) 47 | t.alike(await sess.get(1), b4a.from('2')) 48 | t.alike(await sess.get(2), b4a.from('3')) 49 | t.alike(await sess.get(3), b4a.from('4')) 50 | 51 | t.alike(await core2.get(3), b4a.from('4')) 52 | 53 | await core.close() 54 | await core2.close() 55 | await sess.close() 56 | }) 57 | 58 | test('move - snapshots', async function (t) { 59 | const core = await create(t) 60 | 61 | await core.append('hello') 62 | await core.append('world') 63 | await core.append('again') 64 | 65 | const sess = core.session({ name: 'snapshot' }) 66 | 67 | const snap = sess.snapshot() 68 | await snap.ready() 69 | 70 | await sess.close() 71 | await core.truncate(1) 72 | 73 | await core.append('break') 74 | 75 | t.is(snap.length, 3) 76 | t.is(core.length, 2) 77 | 78 | const keyPair = crypto.keyPair() 79 | 80 | const manifest = { 81 | prologue: { 82 | length: core.length, 83 | hash: core.state.hash() 84 | }, 85 | signers: [{ 86 | publicKey: keyPair.publicKey 87 | }] 88 | } 89 | 90 | const core2 = await create(t, { manifest, keyPair }) 91 | await core2.core.copyPrologue(core.state) 92 | 93 | t.is(core2.length, 2) 94 | 95 | await snap.state.moveTo(core2, core2.length) 96 | 97 | t.is(snap.length, 3) 98 | 99 | t.alike(await snap.get(0), b4a.from('hello')) 100 | t.alike(await snap.get(1), b4a.from('world')) 101 | t.alike(await snap.get(2), b4a.from('again')) 102 | 103 | await snap.close() 104 | await core.close() 105 | await core2.close() 106 | }) 107 | -------------------------------------------------------------------------------- /test/mutex.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const Mutex = require('../lib/mutex') 3 | 4 | test('mutex - basic', async function (t) { 5 | const mutex = new Mutex() 6 | 7 | let count = 0 8 | 9 | const locks = [] 10 | 11 | for (let i = 0; i < 5; i++) locks.push(counter(i)) 12 | 13 | await Promise.all(locks) 14 | 15 | t.is(count, 5) 16 | 17 | async function counter (i) { 18 | await mutex.lock() 19 | t.is(count++, i) 20 | setImmediate(() => mutex.unlock()) 21 | } 22 | }) 23 | 24 | test('mutex - lock after destroy', async function (t) { 25 | const mutex = new Mutex() 26 | mutex.destroy() 27 | try { 28 | await mutex.lock() 29 | t.fail('should not be able to lock after destroy') 30 | } catch { 31 | t.pass('lock threw after destroy') 32 | } 33 | }) 34 | 35 | test('mutex - graceful destroy', async function (t) { 36 | t.plan(1) 37 | 38 | const mutex = new Mutex() 39 | const promises = [] 40 | let resolveCount = 0 41 | 42 | for (let i = 0; i < 5; i++) { 43 | promises.push(mutex.lock().then(() => resolveCount++)) 44 | } 45 | 46 | const destroyed = mutex.destroy() 47 | 48 | for (let i = 0; i < 5; i++) mutex.unlock() 49 | 50 | await destroyed 51 | 52 | t.is(resolveCount, 5) 53 | }) 54 | 55 | test('mutex - quick destroy', async function (t) { 56 | t.plan(2) 57 | 58 | const mutex = new Mutex() 59 | const promises = [] 60 | let rejectCount = 0 61 | let resolveCount = 0 62 | 63 | for (let i = 0; i < 5; i++) { 64 | promises.push(mutex.lock().then(() => resolveCount++, () => rejectCount++)) 65 | } 66 | 67 | const destroyed = mutex.destroy(new Error('Test error')) 68 | 69 | for (let i = 0; i < 5; i++) mutex.unlock() 70 | 71 | await destroyed 72 | 73 | t.is(resolveCount, 1) 74 | t.is(rejectCount, 4) 75 | }) 76 | 77 | test('mutex - graceful then quick destroy', async function (t) { 78 | t.plan(2) 79 | 80 | const mutex = new Mutex() 81 | const promises = [] 82 | let rejectCount = 0 83 | let resolveCount = 0 84 | 85 | for (let i = 0; i < 5; i++) { 86 | promises.push(mutex.lock().then(() => resolveCount++, () => rejectCount++)) 87 | } 88 | 89 | const destroyed = mutex.destroy() 90 | mutex.destroy(new Error('Test error')) 91 | 92 | for (let i = 0; i < 5; i++) mutex.unlock() 93 | 94 | await destroyed 95 | 96 | t.is(resolveCount, 1) 97 | t.is(rejectCount, 4) 98 | }) 99 | 100 | test('mutex - quick destroy with re-entry', async function (t) { 101 | t.plan(2) 102 | 103 | const mutex = new Mutex() 104 | const promises = [] 105 | let rejectCount = 0 106 | let resolveCount = 0 107 | 108 | for (let i = 0; i < 5; i++) { 109 | promises.push(lock()) 110 | } 111 | 112 | const destroyed = mutex.destroy(new Error('Test error')) 113 | 114 | for (let i = 0; i < 5; i++) mutex.unlock() 115 | 116 | await destroyed 117 | 118 | t.is(resolveCount, 1) 119 | t.is(rejectCount, 4) 120 | 121 | async function lock () { 122 | try { 123 | await mutex.lock() 124 | resolveCount++ 125 | } catch { 126 | try { 127 | await mutex.lock() 128 | t.fail('should never aquire it after failing') 129 | } catch { 130 | rejectCount++ 131 | } 132 | } 133 | } 134 | }) 135 | 136 | test('mutex - error propagates', async function (t) { 137 | const mutex = new Mutex() 138 | 139 | let resolveCount = 0 140 | const rejectErrors = [] 141 | const err = new Error('Stop') 142 | 143 | for (let i = 0; i < 5; i++) { 144 | mutex.lock().then(() => resolveCount++, err => rejectErrors.push(err)) 145 | } 146 | 147 | await mutex.destroy(err) 148 | 149 | try { 150 | await mutex.lock() 151 | } catch (e) { 152 | t.ok(e === err) 153 | } 154 | 155 | t.is(resolveCount, 1) 156 | t.alike(rejectErrors, [err, err, err, err]) 157 | }) 158 | -------------------------------------------------------------------------------- /test/preload.js: -------------------------------------------------------------------------------- 1 | const crypto = require('hypercore-crypto') 2 | const test = require('brittle') 3 | const Hypercore = require('../') 4 | const { createStorage } = require('./helpers') 5 | 6 | test('preload - custom keypair', async function (t) { 7 | const keyPair = crypto.keyPair() 8 | const storage = await createStorage(t) 9 | 10 | const preload = new Promise((resolve) => { 11 | resolve({ keyPair }) 12 | }) 13 | 14 | const core = new Hypercore(storage, keyPair.publicKey, { preload }) 15 | await core.ready() 16 | 17 | t.ok(core.writable) 18 | t.alike(core.key, keyPair.publicKey) 19 | 20 | await core.close() 21 | }) 22 | -------------------------------------------------------------------------------- /test/purge.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const tmp = require('test-tmp') 3 | const fs = require('fs') 4 | const Path = require('path') 5 | 6 | const Hypercore = require('..') 7 | 8 | test('basic purge', async function (t) { 9 | const dir = await tmp(t) 10 | const core = new Hypercore(dir) 11 | await core.append(['a', 'b', 'c']) 12 | 13 | const oplogLoc = Path.join(dir, 'oplog') 14 | const treeLoc = Path.join(dir, 'tree') 15 | const bitfieldLoc = Path.join(dir, 'bitfield') 16 | const dataLoc = Path.join(dir, 'data') 17 | 18 | t.is(fs.existsSync(oplogLoc), true) 19 | t.is(fs.existsSync(treeLoc), true) 20 | t.is(fs.existsSync(bitfieldLoc), true) 21 | t.is(fs.existsSync(dataLoc), true) 22 | t.is(fs.readdirSync(dir).length, 4) // Sanity check 23 | 24 | await core.purge() 25 | 26 | t.is(core.closed, true) 27 | t.is(fs.existsSync(oplogLoc), false) 28 | t.is(fs.existsSync(treeLoc), false) 29 | t.is(fs.existsSync(bitfieldLoc), false) 30 | t.is(fs.existsSync(dataLoc), false) 31 | t.is(fs.readdirSync(dir).length, 0) // Nothing remains 32 | }) 33 | 34 | test('purge closes all sessions', async function (t) { 35 | const dir = await tmp(t) 36 | const core = new Hypercore(dir) 37 | await core.append(['a', 'b', 'c']) 38 | const otherSession = core.session() 39 | await otherSession.ready() 40 | 41 | await core.purge() 42 | 43 | t.is(core.closed, true) 44 | t.is(otherSession.closed, true) 45 | }) 46 | -------------------------------------------------------------------------------- /test/remote-bitfield.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const RemoteBitfield = require('../lib/remote-bitfield') 4 | const { create, replicate } = require('./helpers') 5 | 6 | test('remote bitfield - findFirst', function (t) { 7 | const b = new RemoteBitfield() 8 | 9 | b.set(1000000, true) 10 | 11 | t.is(b.findFirst(true, 0), 1000000) 12 | }) 13 | 14 | test('remote bitfield - set range on page boundary', function (t) { 15 | const b = new RemoteBitfield() 16 | 17 | b.setRange(2032, 2058, true) 18 | 19 | t.is(b.findFirst(true, 2048), 2048) 20 | }) 21 | 22 | test('remote bitfield - set range to false', function (t) { 23 | const b = new RemoteBitfield() 24 | 25 | b.setRange(0, 5000, false) 26 | 27 | t.is(b.findFirst(true, 0), -1) 28 | }) 29 | 30 | test('set last bits in segment and findFirst', function (t) { 31 | const b = new RemoteBitfield() 32 | 33 | b.set(32766, true) 34 | t.is(b.findFirst(false, 32766), 32767) 35 | 36 | b.set(32767, true) 37 | t.is(b.findFirst(false, 32766), 32768) 38 | t.is(b.findFirst(false, 32767), 32768) 39 | }) 40 | 41 | test('remote congituous length consistency (remote-bitfield findFirst edge case)', async function (t) { 42 | // Indirectly tests the findFirst method for the case where 43 | // a position > 0 is passed in, while _maxSegments is still 0 44 | // because nothing was set. 45 | const a = await create(t) 46 | const b = await create(t, a.key) 47 | const c = await create(t, a.key) 48 | 49 | replicate(a, b, t) 50 | replicate(b, c, t) 51 | 52 | await a.append('block0') 53 | await a.append('block1') 54 | 55 | await b.get(0) 56 | await new Promise(resolve => setTimeout(resolve, 500)) 57 | 58 | const peer = getPeer(c, b) 59 | 60 | t.is(peer._remoteContiguousLength, 1, 'Sanity check') 61 | 62 | t.is( 63 | peer._remoteContiguousLength <= peer.remoteContiguousLength, 64 | true, 65 | 'invariant holds: remoteContiguousLength at least _remoteContiguousLength' 66 | ) 67 | }) 68 | 69 | test('bitfield messages sent on cache miss', async function (t) { 70 | const original = await create(t) 71 | const sparse = await create(t, original.key) 72 | const empty = await create(t, original.key) 73 | 74 | await original.append(['a', 'b', 'c', 'd', 'e']) 75 | 76 | replicate(original, sparse, t) 77 | await original.get(2) 78 | await original.get(3) 79 | 80 | replicate(sparse, empty, t) 81 | await new Promise(resolve => setTimeout(resolve, 1000)) 82 | 83 | t.is(empty.replicator.peers.length, 1, 'Sanity check') 84 | const stats = empty.replicator.peers[0].stats 85 | t.is(stats.wireBitfield.rx, 0, 'initially no bitfields sent (sanity check') 86 | 87 | await t.exception( 88 | async () => { 89 | await empty.get(1, { timeout: 100 }) 90 | }, 91 | /REQUEST_TIMEOUT/, 92 | 'request on unavailable block times out (sanity check)' 93 | ) 94 | t.is(stats.wireBitfield.rx, 1, 'Requests bitfield on cache miss') 95 | }) 96 | 97 | // Peer b as seen by peer a (b is the remote peer) 98 | function getPeer (a, b) { 99 | for (const aPeer of a.core.replicator.peers) { 100 | for (const bPeer of b.core.replicator.peers) { 101 | if (b4a.equals(aPeer.stream.remotePublicKey, bPeer.stream.publicKey)) { 102 | return aPeer 103 | } 104 | } 105 | } 106 | 107 | throw new Error('Error in test: peer not found') 108 | } 109 | -------------------------------------------------------------------------------- /test/remote-length.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const RemoteBitfield = require('../lib/remote-bitfield') 4 | const { create, replicate } = require('./helpers') 5 | 6 | test('when the writer appends he broadcasts the new contiguous length', async function (t) { 7 | const a = await create(t) 8 | const b = await create(t, a.key) 9 | 10 | replicate(a, b, t) 11 | await new Promise(resolve => setTimeout(resolve, 100)) 12 | 13 | t.is(getPeer(b, a).remoteContiguousLength, 0, 'Sanity check') 14 | 15 | await a.append('a') 16 | await new Promise(resolve => setTimeout(resolve, 100)) 17 | t.is(getPeer(b, a).remoteContiguousLength, 1, 'Broadcast new length to other peers') 18 | 19 | await a.append('b') 20 | await new Promise(resolve => setTimeout(resolve, 100)) 21 | t.is(getPeer(b, a).remoteContiguousLength, 2, 'Broadcast new length to other peers') 22 | }) 23 | 24 | test('contiguous-length announce-on-update flow', async function (t) { 25 | const a = await create(t) 26 | const b = await create(t, a.key) 27 | const c = await create(t, a.key) 28 | 29 | replicate(a, b, t) 30 | replicate(b, c, t) 31 | 32 | await a.append('a') 33 | await new Promise(resolve => setTimeout(resolve, 100)) 34 | t.is(getPeer(c, b).remoteContiguousLength, 0, 'Sanity check: c knows nothing yet') 35 | t.is(getPeer(b, a).remoteContiguousLength, 1, 'Sanity check: b knows about a') 36 | 37 | await b.get(0) 38 | await new Promise(resolve => setTimeout(resolve, 100)) 39 | t.is(getPeer(c, b).remoteContiguousLength, 1, 'b broadcast its new contiguous length to the other peers') 40 | t.is(getPeer(a, b).remoteContiguousLength, 0, 'b did not notify peers he already knows own that block') 41 | }) 42 | 43 | test('announce-range-on-update flow with big core (multiple bitfield pages)', async function (t) { 44 | t.timeout(1000 * 60 * 5) // Expected to take around 15s. Additional headroom in case of slow CI machine 45 | 46 | const a = await create(t) 47 | const b = await create(t, a.key) 48 | const c = await create(t, a.key) 49 | 50 | replicate(a, b, t) 51 | replicate(b, c, t) 52 | 53 | const nrBlocks = RemoteBitfield.BITS_PER_PAGE + 10 54 | 55 | const blocks = [] 56 | for (let i = 0; i < nrBlocks; i++) { 57 | blocks.push(`block-${i}`) 58 | } 59 | await a.append(blocks) 60 | 61 | await new Promise(resolve => setTimeout(resolve, 500)) 62 | 63 | const lastBlock = nrBlocks - 1 64 | 65 | t.is( 66 | getPeer(c, b)._remoteHasBlock(lastBlock), 67 | false, 68 | 'Sanity check: c knows nothing yet' 69 | ) 70 | t.is( 71 | getPeer(b, a)._remoteHasBlock(lastBlock), 72 | true, 73 | 'Sanity check: b knows about a' 74 | ) 75 | 76 | await b.get(nrBlocks - 1) 77 | await new Promise(resolve => setTimeout(resolve, 500)) 78 | 79 | t.is( 80 | getPeer(c, b)._remoteHasBlock(lastBlock), 81 | true, 82 | 'b broadcast its new block to the other peers') 83 | t.is( 84 | getPeer(a, b)._remoteHasBlock(lastBlock), 85 | false, 86 | 'b did not notify peers he already knows own that block' 87 | ) 88 | 89 | // Some sanity checks on the actual public api 90 | 91 | const getOpts = { 92 | timeout: 500, 93 | valueEncoding: 'utf-8' 94 | } 95 | 96 | // Note: This check is expected to fail if BITS_PER_PAGE changes; just update it then 97 | t.is( 98 | await c.get(nrBlocks - 1, getOpts), 99 | 'block-32777', 100 | 'Peer c can get the block peer b also has' 101 | ) 102 | 103 | await t.exception( 104 | async () => await c.get(nrBlocks - 2, getOpts), 105 | /REQUEST_TIMEOUT/, 106 | 'Sanity check: peer c can not get blocks peer b does not have') 107 | }) 108 | 109 | test('truncates by the writer result in the updated contiguous length being announced', async function (t) { 110 | const a = await create(t) 111 | const b = await create(t, a.key) 112 | 113 | replicate(a, b, t) 114 | await new Promise(resolve => setTimeout(resolve, 100)) 115 | 116 | t.is(getPeer(b, a).remoteContiguousLength, 0, 'Sanity check') 117 | 118 | await a.append(['a', 'b']) 119 | await new Promise(resolve => setTimeout(resolve, 100)) 120 | t.is(getPeer(b, a).remoteContiguousLength, 2, 'updated length broadcast to other peers') 121 | 122 | await a.truncate(1) 123 | await new Promise(resolve => setTimeout(resolve, 100)) 124 | t.is(getPeer(b, a).remoteContiguousLength, 1, 'truncate broadcast to other peers') 125 | }) 126 | 127 | // Get peer b as seen by peer a (b is the remote peer). 128 | function getPeer (a, b) { 129 | for (const aPeer of a.core.replicator.peers) { 130 | for (const bPeer of b.core.replicator.peers) { 131 | if (b4a.equals(aPeer.stream.remotePublicKey, bPeer.stream.publicKey)) return aPeer 132 | } 133 | } 134 | 135 | throw new Error('Error in test: peer not found') 136 | } 137 | -------------------------------------------------------------------------------- /test/sessions.js: -------------------------------------------------------------------------------- 1 | const uncaughts = require('uncaughts') 2 | const test = require('brittle') 3 | const crypto = require('hypercore-crypto') 4 | const c = require('compact-encoding') 5 | const b4a = require('b4a') 6 | const { create, createStorage } = require('./helpers') 7 | 8 | const Hypercore = require('../') 9 | 10 | test('sessions - can create writable sessions from a read-only core', async function (t) { 11 | t.plan(5) 12 | 13 | const storage = await createStorage(t) 14 | const keyPair = crypto.keyPair() 15 | const core = new Hypercore(storage, keyPair.publicKey, { 16 | valueEncoding: 'utf-8' 17 | }) 18 | await core.ready() 19 | t.absent(core.writable) 20 | 21 | const session = core.session({ keyPair }) 22 | await session.ready() 23 | 24 | t.ok(session.writable) 25 | 26 | try { 27 | await core.append('hello') 28 | t.fail('should not have appended to the read-only core') 29 | } catch { 30 | t.pass('read-only core append threw correctly') 31 | } 32 | 33 | try { 34 | await session.append('world') 35 | t.pass('session append did not throw') 36 | } catch { 37 | t.fail('session append should not have thrown') 38 | } 39 | 40 | t.is(core.length, 1) 41 | 42 | await session.close() 43 | await core.close() 44 | }) 45 | 46 | test('sessions - custom valueEncoding on session', async function (t) { 47 | const storage = await createStorage(t) 48 | const core1 = new Hypercore(storage) 49 | await core1.append(c.encode(c.raw.json, { a: 1 })) 50 | 51 | const core2 = core1.session({ valueEncoding: 'json' }) 52 | await core2.append({ b: 2 }) 53 | 54 | t.alike(await core2.get(0), { a: 1 }) 55 | t.alike(await core2.get(1), { b: 2 }) 56 | 57 | await core2.close() 58 | await core1.close() 59 | }) 60 | 61 | test('sessions - truncate a checkout session', async function (t) { 62 | const storage = await createStorage(t) 63 | const core = new Hypercore(storage) 64 | 65 | for (let i = 0; i < 10; i++) await core.append(b4a.from([i])) 66 | 67 | const atom = storage.createAtom() 68 | 69 | const session = core.session({ checkout: 7, atom }) 70 | await session.ready() 71 | 72 | t.is(session.length, 7) 73 | 74 | await session.truncate(5, session.fork) 75 | 76 | t.is(session.length, 5) 77 | 78 | await session.append(b4a.from('hello')) 79 | 80 | await session.close() 81 | await core.close() 82 | }) 83 | 84 | test.skip('session on a from instance does not inject itself to other sessions', async function (t) { 85 | const a = await create(t, { }) 86 | 87 | const b = new Hypercore({ core: a.core, encryptionKey: null }) 88 | await b.ready() 89 | 90 | const c = new Hypercore({ core: a.core, encryptionKey: null }) 91 | await c.ready() 92 | await c.setEncryptionKey(b4a.alloc(32)) 93 | 94 | const d = new Hypercore({ core: a.core, encryptionKey: null }) 95 | await d.ready() 96 | 97 | t.absent(a.encryption) 98 | t.absent(b.encryption) 99 | t.ok(c.encryption) 100 | t.absent(d.encryption) 101 | 102 | await b.close() 103 | await c.close() 104 | await d.close() 105 | }) 106 | 107 | test('sessions - cannot set checkout if name not set', async function (t) { 108 | const storage = await createStorage(t) 109 | const core = new Hypercore(storage) 110 | await core.append('Block0') 111 | 112 | t.exception( 113 | () => core.session({ checkout: 0 }), 114 | /Checkouts are only supported on atoms or named sessions/ 115 | ) 116 | 117 | t.execution(() => core.session({ checkout: 0, name: 'named' }), 'sanity check on happy path') 118 | 119 | await core.close() 120 | }) 121 | 122 | test('sessions - checkout breaks prologue', async function (t) { 123 | const storage = await createStorage(t) 124 | const storage2 = await createStorage(t) 125 | 126 | uncaughts.on(noop) 127 | 128 | const core = new Hypercore(storage) 129 | 130 | for (let i = 0; i < 10; i++) await core.append(b4a.from([i])) 131 | 132 | const prologued = new Hypercore(storage2, { 133 | manifest: { 134 | ...core.manifest, 135 | prologue: { 136 | hash: await core.treeHash(), 137 | length: core.length 138 | } 139 | } 140 | }) 141 | 142 | await prologued.ready() 143 | await prologued.core.copyPrologue(core.state) 144 | 145 | let session 146 | try { 147 | session = prologued.session({ name: 'fail', checkout: 7 }) 148 | await session.ready() 149 | t.fail() 150 | } catch (err) { 151 | t.pass() 152 | } 153 | 154 | await session.close() 155 | await prologued.close() 156 | await core.close() 157 | 158 | uncaughts.off(noop) 159 | }) 160 | 161 | function noop () {} 162 | -------------------------------------------------------------------------------- /test/snapshots.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const createTempDir = require('test-tmp') 3 | const b4a = require('b4a') 4 | const Hypercore = require('../') 5 | const { replicate, unreplicate, create, createStorage } = require('./helpers') 6 | 7 | test('snapshot does not change when original gets modified', async function (t) { 8 | const core = await create(t) 9 | 10 | await core.append('block0') 11 | await core.append('block1') 12 | await core.append('block2') 13 | 14 | const snap = core.snapshot() 15 | await snap.ready() 16 | 17 | t.is(snap.length, 3, 'correct length') 18 | t.is(snap.signedLength, 3, 'correct signed length') 19 | t.is(b4a.toString(await snap.get(2)), 'block2', 'block exists') 20 | 21 | await core.append('Block3') 22 | t.is(snap.length, 3, 'correct length') 23 | t.is(snap.signedLength, 3, 'correct signed length') 24 | t.is(b4a.toString(await snap.get(2)), 'block2', 'block exists') 25 | 26 | await core.truncate(3) 27 | t.is(snap.length, 3, 'correct length') 28 | t.is(snap.signedLength, 3, 'correct signed length') 29 | t.is(b4a.toString(await snap.get(2)), 'block2', 'block exists') 30 | 31 | await core.truncate(2) 32 | t.is(snap.length, 3, 'correct length') 33 | t.is(snap.signedLength, 2, 'signed length now lower since it truncated below snap') 34 | t.is(b4a.toString(await snap.get(2)), 'block2', 'block exists') 35 | 36 | await core.append('new Block2') 37 | t.is(snap.length, 3, 'correct length') 38 | t.is(snap.signedLength, 2, 'signed length remains at lowest value after appending again to the original') 39 | t.is(b4a.toString(await snap.get(2)), 'block2', 'Old block still (snapshot did not change)') 40 | 41 | { 42 | const res = [] 43 | for await (const b of snap.createReadStream()) { 44 | res.push(b4a.toString(b)) 45 | } 46 | t.alike(res, ['block0', 'block1', 'block2']) 47 | } 48 | 49 | await snap.close() 50 | }) 51 | 52 | test('implicit snapshot - gets are snapshotted at call time', async function (t) { 53 | t.plan(8) 54 | 55 | const core = await create(t) 56 | const clone = await create(t, core.key, { valueEncoding: 'utf-8' }) 57 | 58 | clone.on('truncate', function (len) { 59 | t.is(len, 2, 'remote truncation') 60 | }) 61 | 62 | core.on('truncate', function (len) { 63 | t.is(len, 2, 'local truncation') 64 | }) 65 | 66 | await core.append('block #0.0') 67 | await core.append('block #1.0') 68 | await core.append('block #2.0') 69 | 70 | const r1 = replicate(core, clone, t) 71 | 72 | t.is(await clone.get(0), 'block #0.0') 73 | 74 | await unreplicate(r1) 75 | 76 | const range1 = core.download({ start: 0, end: 4 }) 77 | const range2 = clone.download({ start: 0, end: 4 }) 78 | 79 | const p2 = clone.get(1) 80 | const p3 = clone.get(2) 81 | 82 | const exception = t.exception(p3, 'should fail cause snapshot not available') 83 | 84 | await core.truncate(2) 85 | 86 | await core.append('block #2.1') 87 | await core.append('block #3.1') 88 | 89 | replicate(core, clone, t) 90 | 91 | t.is(await p2, 'block #1.0') 92 | await exception 93 | 94 | t.is(await clone.get(2), 'block #2.1') 95 | 96 | await range1.done() 97 | t.pass('local range finished') 98 | 99 | await range2.done() 100 | t.pass('remote range finished') 101 | }) 102 | 103 | test('snapshots wait for ready', async function (t) { 104 | t.plan(8) 105 | 106 | const dir = await createTempDir(t) 107 | const db = await createStorage(t, dir) 108 | 109 | const core = new Hypercore(db) 110 | await core.ready() 111 | 112 | const s1 = core.snapshot() 113 | 114 | await core.append('block #0.0') 115 | await core.append('block #1.0') 116 | 117 | const s2 = core.snapshot() 118 | 119 | await core.append('block #2.0') 120 | 121 | t.is(s1.length, 0, 'empty snapshot') 122 | t.is(s2.length, 2, 'set after ready') 123 | 124 | await core.append('block #3.0') 125 | 126 | // check that they are static 127 | t.is(s1.length, 0, 'is static') 128 | t.is(s2.length, 2, 'is static') 129 | 130 | await core.close() 131 | await s1.close() 132 | await s2.close() 133 | await db.close() 134 | 135 | const db2 = await createStorage(t, dir) 136 | const coreCopy = new Hypercore(db2) 137 | 138 | // if a snapshot is made on an opening core, it should wait until opened 139 | const s3 = coreCopy.snapshot() 140 | 141 | await s3.ready() 142 | 143 | t.is(s3.length, 4, 'waited for ready') 144 | 145 | const s4 = coreCopy.snapshot() 146 | await s4.ready() 147 | 148 | t.is(s4.length, 4) 149 | 150 | await s3.update() 151 | await s4.update() 152 | 153 | t.is(s3.length, 4, 'no changes') 154 | t.is(s4.length, 4, 'no changes') 155 | 156 | await coreCopy.close() 157 | await s3.close() 158 | await s4.close() 159 | }) 160 | 161 | test('snapshots are consistent', async function (t) { 162 | t.plan(6) 163 | 164 | const core = await create(t) 165 | const clone = await create(t, core.key) 166 | 167 | await core.append('block #0.0') 168 | await core.append('block #1.0') 169 | await core.append('block #2.0') 170 | 171 | replicate(clone, core, t) 172 | 173 | await clone.update({ wait: true }) 174 | 175 | const snapshot = clone.snapshot({ valueEncoding: 'utf-8' }) 176 | await snapshot.ready() 177 | 178 | t.is(snapshot.length, 3) 179 | 180 | t.is(await snapshot.get(1), 'block #1.0') 181 | 182 | const promise = new Promise(resolve => clone.once('truncate', resolve)) 183 | 184 | await core.truncate(1) 185 | await core.append('block #1.1') 186 | await core.append('block #2.1') 187 | 188 | // wait for clone to update 189 | await promise 190 | 191 | t.is(clone.fork, 1, 'clone updated') 192 | 193 | const b = snapshot.get(0) 194 | t.exception(snapshot.get(1)) 195 | t.exception(snapshot.get(2)) 196 | t.is(await b, 'block #0.0') 197 | 198 | await snapshot.close() 199 | }) 200 | 201 | test('snapshot over named batch persists after truncate', async function (t) { 202 | t.plan(8) 203 | 204 | const core = await create(t) 205 | 206 | await core.append('block #0.0') 207 | await core.append('block #1.0') 208 | await core.append('block #2.0') 209 | 210 | const session = core.session({ name: 'session' }) 211 | 212 | const snapshot = session.snapshot({ valueEncoding: 'utf-8' }) 213 | await snapshot.ready() 214 | 215 | await session.close() 216 | 217 | t.is(snapshot.length, 3) 218 | 219 | t.is(await snapshot.get(1), 'block #1.0') 220 | 221 | await core.truncate(1) 222 | await core.append('block #1.1') 223 | 224 | t.is(core.fork, 1, 'clone updated') 225 | t.is(core.length, 2, 'core updated') 226 | 227 | // t.is(snapshot.fork, 0, 'snapshot remains') 228 | t.is(snapshot.length, 3, 'snapshot remains') 229 | 230 | t.is(await snapshot.get(0), 'block #0.0') 231 | t.is(await snapshot.get(1), 'block #1.0') 232 | t.is(await snapshot.get(2), 'block #2.0') 233 | 234 | await core.close() 235 | await snapshot.close() 236 | }) 237 | -------------------------------------------------------------------------------- /test/streams.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | 4 | const { create } = require('./helpers') 5 | 6 | test('basic read stream', async function (t) { 7 | const core = await create(t) 8 | 9 | const expected = [ 10 | 'hello', 11 | 'world', 12 | 'verden', 13 | 'welt' 14 | ] 15 | 16 | await core.append(expected) 17 | 18 | for await (const data of core.createReadStream()) { 19 | t.alike(b4a.toString(data), expected.shift()) 20 | } 21 | 22 | t.is(expected.length, 0) 23 | }) 24 | 25 | test('read stream with start / end', async function (t) { 26 | const core = await create(t) 27 | 28 | const datas = [ 29 | 'hello', 30 | 'world', 31 | 'verden', 32 | 'welt' 33 | ] 34 | 35 | await core.append(datas) 36 | 37 | { 38 | const expected = datas.slice(1) 39 | 40 | for await (const data of core.createReadStream({ start: 1 })) { 41 | t.alike(b4a.toString(data), expected.shift()) 42 | } 43 | 44 | t.is(expected.length, 0) 45 | } 46 | 47 | { 48 | const expected = datas.slice(2, 3) 49 | 50 | for await (const data of core.createReadStream({ start: 2, end: 3 })) { 51 | t.alike(b4a.toString(data), expected.shift()) 52 | } 53 | 54 | t.is(expected.length, 0) 55 | } 56 | }) 57 | 58 | test('read stream with end and live (live should be ignored)', async function (t) { 59 | const core = await create(t) 60 | 61 | const initial = [ 62 | 'alpha', 63 | 'beta', 64 | 'gamma', 65 | 'delta', 66 | 'epsilon' 67 | ] 68 | 69 | await core.append(initial) 70 | 71 | const expected = [ 72 | 'alpha', 73 | 'beta', 74 | 'gamma' 75 | ] 76 | 77 | const stream = core.createReadStream({ end: 3, live: true }) 78 | const collected = [] 79 | 80 | for await (const data of stream) { 81 | collected.push(b4a.toString(data)) 82 | } 83 | 84 | t.alike(collected, expected) 85 | }) 86 | 87 | test('basic write+read stream', async function (t) { 88 | const core = await create(t) 89 | 90 | const expected = [ 91 | 'hello', 92 | 'world', 93 | 'verden', 94 | 'welt' 95 | ] 96 | 97 | const ws = core.createWriteStream() 98 | 99 | for (const data of expected) ws.write(data) 100 | ws.end() 101 | 102 | await new Promise(resolve => ws.on('finish', resolve)) 103 | 104 | for await (const data of core.createReadStream()) { 105 | t.alike(b4a.toString(data), expected.shift()) 106 | } 107 | 108 | t.is(expected.length, 0) 109 | }) 110 | 111 | test('basic byte stream', async function (t) { 112 | const core = await create(t) 113 | 114 | const expected = [ 115 | 'hello', 116 | 'world', 117 | 'verden', 118 | 'welt' 119 | ] 120 | 121 | await core.append(expected) 122 | 123 | for await (const data of core.createByteStream()) { 124 | t.alike(b4a.toString(data), expected.shift()) 125 | } 126 | 127 | t.is(expected.length, 0) 128 | }) 129 | 130 | test('basic byte stream with byteOffset / byteLength', async function (t) { 131 | const core = await create(t) 132 | 133 | await core.append([ 134 | 'hello', 135 | 'world', 136 | 'verden', 137 | 'welt' 138 | ]) 139 | 140 | const opts = { byteOffset: 5, byteLength: 11 } 141 | const expected = [ 142 | 'world', 143 | 'verden' 144 | ] 145 | 146 | for await (const data of core.createByteStream(opts)) { 147 | t.alike(b4a.toString(data), expected.shift()) 148 | } 149 | 150 | t.is(expected.length, 0) 151 | }) 152 | 153 | test('basic byte stream with byteOffset / byteLength of a core that has valueEncoding', async function (t) { 154 | const core = await create(t, { valueEncoding: 'utf8' }) 155 | 156 | await core.append([ 157 | 'hello', 158 | 'world', 159 | 'verden', 160 | 'welt' 161 | ]) 162 | 163 | const opts = { byteOffset: 5, byteLength: 11 } 164 | const expected = [ 165 | 'world', 166 | 'verden' 167 | ] 168 | 169 | for await (const data of core.createByteStream(opts)) { 170 | t.ok(b4a.isBuffer(data)) 171 | t.alike(b4a.toString(data), expected.shift()) 172 | } 173 | 174 | t.is(expected.length, 0) 175 | }) 176 | 177 | test('byte stream with lower byteLength than byteOffset', async function (t) { 178 | const core = await create(t) 179 | 180 | await core.append([ 181 | 'hello', 182 | 'world', 183 | 'verden', 184 | 'welt' 185 | ]) 186 | 187 | const opts = { byteOffset: 10, byteLength: 6 } 188 | const expected = [ 189 | 'verden' 190 | ] 191 | 192 | for await (const data of core.createByteStream(opts)) { 193 | t.alike(b4a.toString(data), expected.shift()) 194 | } 195 | 196 | t.is(expected.length, 0) 197 | }) 198 | 199 | test('basic byte stream with custom byteOffset but default byteLength', async function (t) { 200 | const core = await create(t) 201 | 202 | await core.append([ 203 | 'hello', 204 | 'world', 205 | 'verden', 206 | 'welt' 207 | ]) 208 | 209 | const opts = { byteOffset: 10 } 210 | const expected = [ 211 | 'verden', 212 | 'welt' 213 | ] 214 | 215 | for await (const data of core.createByteStream(opts)) { 216 | t.alike(b4a.toString(data), expected.shift()) 217 | } 218 | 219 | t.is(expected.length, 0) 220 | }) 221 | -------------------------------------------------------------------------------- /test/timeouts.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const { create, createStorage } = require('./helpers') 3 | const Hypercore = require('../') 4 | const b4a = require('b4a') 5 | 6 | test('core and session timeout property', async function (t) { 7 | t.plan(3) 8 | 9 | const storage = await createStorage(t) 10 | const core = new Hypercore(storage) 11 | t.is(core.timeout, 0) 12 | 13 | const a = core.session() 14 | t.is(a.timeout, 0) 15 | 16 | const b = core.session({ timeout: 50 }) 17 | t.is(b.timeout, 50) 18 | 19 | await new Promise(resolve => setTimeout(resolve, 100)) 20 | 21 | await core.close() 22 | await a.close() 23 | await b.close() 24 | }) 25 | 26 | test('core session inherits timeout property', async function (t) { 27 | t.plan(3) 28 | 29 | const storage = await createStorage(t) 30 | const core = new Hypercore(storage, { timeout: 50 }) 31 | t.is(core.timeout, 50) 32 | 33 | const a = core.session() 34 | t.is(a.timeout, 50) 35 | 36 | const b = core.session({ timeout: 0 }) 37 | t.is(b.timeout, 0) 38 | 39 | await new Promise(resolve => setTimeout(resolve, 100)) 40 | 41 | await core.close() 42 | await a.close() 43 | await b.close() 44 | }) 45 | 46 | test('get before timeout', async function (t) { 47 | t.plan(1) 48 | 49 | const core = await create(t) 50 | 51 | const get = core.get(0, { timeout: 30000 }) 52 | setTimeout(() => core.append('hi'), 100) 53 | t.alike(await get, b4a.from('hi')) 54 | }) 55 | 56 | test('get after timeout', async function (t) { 57 | t.plan(1) 58 | 59 | const core = await create(t) 60 | 61 | try { 62 | await core.get(0, { timeout: 1 }) 63 | t.fail('should not get a block') 64 | } catch (err) { 65 | t.is(err.code, 'REQUEST_TIMEOUT') 66 | } 67 | }) 68 | 69 | test('get after timeout with constructor', async function (t) { 70 | t.plan(1) 71 | 72 | const core = await create(t, { timeout: 1 }) 73 | 74 | try { 75 | await core.get(0) 76 | t.fail('should not get a block') 77 | } catch (err) { 78 | t.is(err.code, 'REQUEST_TIMEOUT') 79 | } 80 | }) 81 | 82 | test('session get after timeout', async function (t) { 83 | t.plan(1) 84 | 85 | const core = await create(t) 86 | const session = core.session({ timeout: 1 }) 87 | 88 | try { 89 | await session.get(0) 90 | t.fail('should not get a block') 91 | } catch (err) { 92 | t.is(err.code, 'REQUEST_TIMEOUT') 93 | } 94 | 95 | await session.close() 96 | }) 97 | 98 | test('session get after inherited timeout', async function (t) { 99 | t.plan(1) 100 | 101 | const core = await create(t, { timeout: 1 }) 102 | const session = core.session() 103 | 104 | try { 105 | await session.get(0) 106 | t.fail('should not get a block') 107 | } catch (err) { 108 | t.is(err.code, 'REQUEST_TIMEOUT') 109 | } 110 | 111 | await session.close() 112 | }) 113 | 114 | test('core constructor timeout but disable on get', async function (t) { 115 | t.plan(1) 116 | 117 | const core = await create(t, { timeout: 1 }) 118 | 119 | const get = core.get(0, { timeout: 0 }) 120 | setTimeout(() => core.append('hi'), 100) 121 | t.alike(await get, b4a.from('hi')) 122 | }) 123 | 124 | test('core constructor timeout but increase on get', async function (t) { 125 | t.plan(1) 126 | 127 | const core = await create(t, { timeout: 1 }) 128 | 129 | const get = core.get(0, { timeout: 30000 }) 130 | setTimeout(() => core.append('hi'), 100) 131 | t.alike(await get, b4a.from('hi')) 132 | }) 133 | 134 | test('block request gets cancelled before timeout', async function (t) { 135 | t.plan(1) 136 | 137 | const core = await create(t) 138 | 139 | const a = core.session() 140 | const promise = a.get(0, { timeout: 1 }) 141 | const close = a.close() 142 | 143 | try { 144 | await promise 145 | t.fail('should have failed') 146 | } catch (err) { 147 | t.is(err.code, 'SESSION_CLOSED') 148 | } 149 | 150 | await close 151 | }) 152 | -------------------------------------------------------------------------------- /test/user-data.js: -------------------------------------------------------------------------------- 1 | const test = require('brittle') 2 | const b4a = require('b4a') 3 | const { create, createStored } = require('./helpers') 4 | 5 | test('userdata - can set through setUserData', async function (t) { 6 | const core = await create(t) 7 | await core.setUserData('hello', b4a.from('world')) 8 | 9 | t.alike(await core.getUserData('hello'), b4a.from('world')) 10 | }) 11 | 12 | test('userdata - can set through constructor option', async function (t) { 13 | const core = await create(t, { 14 | userData: { 15 | hello: b4a.from('world') 16 | } 17 | }) 18 | 19 | t.alike(await core.getUserData('hello'), b4a.from('world')) 20 | }) 21 | 22 | test('userdata - persists across restarts', async function (t) { 23 | const create = await createStored(t) 24 | 25 | let core = await create({ 26 | userData: { 27 | hello: b4a.from('world') 28 | } 29 | }) 30 | await core.ready() 31 | 32 | await core.close() 33 | core = await create({ 34 | userData: { 35 | other: b4a.from('another') 36 | } 37 | }) 38 | 39 | t.alike(await core.getUserData('hello'), b4a.from('world')) 40 | t.alike(await core.getUserData('other'), b4a.from('another')) 41 | 42 | await core.close() 43 | }) 44 | 45 | test('userdata - big userdata gets swapped to external header', async function (t) { 46 | const core = await create(t) 47 | await core.setUserData('hello', b4a.alloc(20000)) 48 | await core.setUserData('world', b4a.alloc(20000)) 49 | await core.setUserData('world2', b4a.alloc(20000)) 50 | 51 | t.alike(await core.getUserData('hello'), b4a.alloc(20000)) 52 | t.alike(await core.getUserData('world'), b4a.alloc(20000)) 53 | t.alike(await core.getUserData('world2'), b4a.alloc(20000)) 54 | }) 55 | --------------------------------------------------------------------------------