├── test ├── fixtures │ ├── keys │ │ ├── LOCK │ │ ├── 000008.log │ │ ├── identity-keys │ │ │ ├── LOCK │ │ │ ├── 000855.log │ │ │ ├── CURRENT │ │ │ ├── 000005.ldb │ │ │ ├── 000010.ldb │ │ │ ├── 000015.ldb │ │ │ ├── 000020.ldb │ │ │ ├── MANIFEST-000854 │ │ │ ├── LOG │ │ │ ├── LOG.old │ │ │ ├── 02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260.json │ │ │ ├── 032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214.json │ │ │ ├── 0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6.json │ │ │ └── 03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c.json │ │ ├── signing-keys │ │ │ ├── LOCK │ │ │ ├── 000041.log │ │ │ ├── CURRENT │ │ │ ├── 000007.ldb │ │ │ ├── 000014.ldb │ │ │ ├── 000021.ldb │ │ │ ├── 000028.ldb │ │ │ ├── MANIFEST-000040 │ │ │ ├── userA.json │ │ │ ├── userB.json │ │ │ ├── userC.json │ │ │ ├── userD.json │ │ │ ├── LOG │ │ │ └── LOG.old │ │ ├── CURRENT │ │ ├── 000007.ldb │ │ ├── MANIFEST-000006 │ │ ├── LOG.old │ │ └── LOG │ ├── big-log.fixture.js │ ├── v0-entries.fixture.js │ └── v1-entries.fixture.js ├── .eslintrc ├── mocha.opts ├── browser │ └── index.html ├── utils │ └── log-creator.js ├── browser.spec.js ├── log-join-concurrent.spec.js ├── log-append.spec.js ├── replicate.spec.js ├── log-references.spec.js ├── entry-io.spec.js └── signed-log.spec.js ├── circle.yml ├── .eslintrc ├── .eslintignore ├── src ├── utils │ ├── is-defined.js │ ├── index.js │ ├── find-uniques.js │ └── difference.js ├── default-access-controller.js ├── g-set.js ├── entry-index.js ├── log-errors.js ├── lamport-clock.js ├── log-sorting.js ├── log-io.js ├── entry-io.js └── entry.js ├── docs ├── fonts │ ├── OpenSans-Bold-webfont.eot │ ├── OpenSans-Bold-webfont.woff │ ├── OpenSans-Italic-webfont.eot │ ├── OpenSans-Italic-webfont.woff │ ├── OpenSans-Light-webfont.eot │ ├── OpenSans-Light-webfont.woff │ ├── OpenSans-Regular-webfont.eot │ ├── OpenSans-Regular-webfont.woff │ ├── OpenSans-BoldItalic-webfont.eot │ ├── OpenSans-BoldItalic-webfont.woff │ ├── OpenSans-LightItalic-webfont.eot │ └── OpenSans-LightItalic-webfont.woff ├── scripts │ ├── linenumber.js │ └── prettify │ │ └── lang-css.js ├── styles │ ├── prettify-jsdoc.css │ ├── prettify-tomorrow.css │ └── jsdoc-default.css ├── g-set.js.html ├── GSet.html ├── log-sorting.js.html └── entry-io.js.html ├── .npmignore ├── examples ├── browser │ ├── index.html │ ├── index.js │ └── browser.html ├── entry.js └── log.js ├── dist └── ipfslog.min.js.LICENSE.txt ├── .gitignore ├── CONTRIBUTING.md ├── .github ├── workflows │ ├── npm-publish.yml │ ├── npm-publish-next.yml │ └── run-tests.yml ├── FUNDING.yml └── PULL_REQUEST_TEMPLATE.md ├── Makefile ├── conf ├── webpack.config.js ├── webpack.example.config.js └── webpack.tests.config.js ├── benchmarks ├── utils │ └── create-log.js ├── append.js ├── README.md ├── index.js ├── join.js ├── heads.js ├── tails.js ├── values.js ├── tail-hashes.js ├── to-multihash.js ├── traverse.js ├── to-string.js ├── has.js ├── get.js ├── find-heads.js ├── from-entry.js ├── from-entry-hash.js ├── from-multihash.js └── legacy │ ├── benchmark-append.js │ ├── benchmark-join.js │ ├── benchmark-join2.js │ ├── browser │ ├── benchmark-append-signed.html │ └── benchmark-join-signed.html │ └── benchmark-from-entry-hash.js ├── LICENSE ├── package.json ├── CODE_OF_CONDUCT.md ├── .circleci └── config.yml ├── API.md └── README.md /test/fixtures/keys/LOCK: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/fixtures/keys/000008.log: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/LOCK: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/LOCK: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/000855.log: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/000041.log: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/fixtures/keys/CURRENT: -------------------------------------------------------------------------------- 1 | MANIFEST-000006 2 | -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | machine: 2 | node: 3 | version: 10.15.0 4 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/CURRENT: -------------------------------------------------------------------------------- 1 | MANIFEST-000854 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/CURRENT: -------------------------------------------------------------------------------- 1 | MANIFEST-000040 2 | -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["@orbitdb/eslint-config-orbitdb"] 3 | } 4 | -------------------------------------------------------------------------------- /test/.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "node": true, 4 | "mocha": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | dist/** 2 | lib/** 3 | examples/browser/bundle.js 4 | examples/browser/lib/** 5 | -------------------------------------------------------------------------------- /test/mocha.opts: -------------------------------------------------------------------------------- 1 | --reporter spec 2 | --colors 3 | --recursive 4 | --exit 5 | --exclude test/browser/**/*.js -------------------------------------------------------------------------------- /test/fixtures/keys/000007.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/000007.ldb -------------------------------------------------------------------------------- /src/utils/is-defined.js: -------------------------------------------------------------------------------- 1 | const isDefined = (arg) => arg !== undefined && arg !== null 2 | 3 | export default isDefined 4 | -------------------------------------------------------------------------------- /test/fixtures/keys/MANIFEST-000006: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/MANIFEST-000006 -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Bold-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Bold-webfont.eot -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Bold-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Bold-webfont.woff -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Italic-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Italic-webfont.eot -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Italic-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Italic-webfont.woff -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Light-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Light-webfont.eot -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Light-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Light-webfont.woff -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Regular-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Regular-webfont.eot -------------------------------------------------------------------------------- /docs/fonts/OpenSans-Regular-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-Regular-webfont.woff -------------------------------------------------------------------------------- /docs/fonts/OpenSans-BoldItalic-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-BoldItalic-webfont.eot -------------------------------------------------------------------------------- /docs/fonts/OpenSans-BoldItalic-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-BoldItalic-webfont.woff -------------------------------------------------------------------------------- /docs/fonts/OpenSans-LightItalic-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-LightItalic-webfont.eot -------------------------------------------------------------------------------- /docs/fonts/OpenSans-LightItalic-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/docs/fonts/OpenSans-LightItalic-webfont.woff -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/000005.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/identity-keys/000005.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/000010.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/identity-keys/000010.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/000015.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/identity-keys/000015.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/000020.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/identity-keys/000020.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/000007.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/signing-keys/000007.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/000014.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/signing-keys/000014.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/000021.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/signing-keys/000021.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/000028.ldb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/signing-keys/000028.ldb -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/MANIFEST-000854: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/identity-keys/MANIFEST-000854 -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/MANIFEST-000040: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/orbitdb-archive/ipfs-log/HEAD/test/fixtures/keys/signing-keys/MANIFEST-000040 -------------------------------------------------------------------------------- /src/default-access-controller.js: -------------------------------------------------------------------------------- 1 | class AccessController { 2 | async canAppend (entry, identityProvider) { 3 | return true 4 | } 5 | } 6 | 7 | export default AccessController 8 | -------------------------------------------------------------------------------- /test/fixtures/keys/LOG.old: -------------------------------------------------------------------------------- 1 | 2019/05/29-12:52:06.991247 7fca7e369700 Recovering log #3 2 | 2019/05/29-12:52:06.996822 7fca7e369700 Delete type=3 #2 3 | 2019/05/29-12:52:06.996849 7fca7e369700 Delete type=0 #3 4 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/userA.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c","privateKey":"0a135ce157a9ccb8375c2fae0d472f1eade4b40b37704c02df923b78ca03c627"} 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/userB.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6","privateKey":"855f70d3b5224e5af76c23db0792339ca8d968a5a802ff0c5b54d674ef01aaad"} 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/userC.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214","privateKey":"291d4dc915d81e9ebe5627c3f5e7309e819e721ee75e63286baa913497d61c78"} 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/userD.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260","privateKey":"faa2d697318a6f8daeb8f4189fc657e7ae1b24e18c91c3bb9b95ad3c0cc050f8"} 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/LOG: -------------------------------------------------------------------------------- 1 | 2019/03/27-11:43:50.355576 7f06127fc700 Recovering log #39 2 | 2019/03/27-11:43:50.370199 7f06127fc700 Delete type=0 #39 3 | 2019/03/27-11:43:50.370296 7f06127fc700 Delete type=3 #38 4 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/LOG: -------------------------------------------------------------------------------- 1 | 2019/03/27-11:44:01.835874 7f06117fa700 Recovering log #853 2 | 2019/03/27-11:44:01.849182 7f06117fa700 Delete type=3 #852 3 | 2019/03/27-11:44:01.849215 7f06117fa700 Delete type=0 #853 4 | -------------------------------------------------------------------------------- /test/fixtures/keys/signing-keys/LOG.old: -------------------------------------------------------------------------------- 1 | 2019/03/27-11:43:50.315665 7f0610ff9700 Recovering log #37 2 | 2019/03/27-11:43:50.331030 7f0610ff9700 Delete type=3 #36 3 | 2019/03/27-11:43:50.331192 7f0610ff9700 Delete type=0 #37 4 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/LOG.old: -------------------------------------------------------------------------------- 1 | 2019/03/27-11:44:01.798320 7f06117fa700 Recovering log #851 2 | 2019/03/27-11:44:01.813702 7f06117fa700 Delete type=0 #851 3 | 2019/03/27-11:44:01.813995 7f06117fa700 Delete type=3 #850 4 | -------------------------------------------------------------------------------- /src/utils/index.js: -------------------------------------------------------------------------------- 1 | import difference from './difference.js' 2 | import findUniques from './find-uniques.js' 3 | import isDefined from './is-defined.js' 4 | 5 | export { 6 | difference, 7 | findUniques, 8 | isDefined 9 | } 10 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"030f4141da9bb4bc8d9cc9a6a01cdf0e8bc0c0f90fd28646f93d0de4e93b723e31","privateKey":"7c6140e9ae4c70eb11600b3d550cc6aac45511b5a660f4e75fe9a7c4e6d1c7b7"} 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"0208290bc83e02be25a65be2e067e4d2ecc55ae88e0c073b5d48887d45e7e0e393","privateKey":"2b487a932233c8691024c951faaeac207be161797bdda7bd934c0125012a5551"} 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"0276b51c36dc6a117aef6f8ecaa49c27c309b29bbc97218e21cc0d7c903a21f376","privateKey":"1cd65d23d72932f5ca2328988d19a5b11fbab1f4c921ef2471768f1773bd56de"} 2 | -------------------------------------------------------------------------------- /test/fixtures/keys/identity-keys/03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c.json: -------------------------------------------------------------------------------- 1 | {"publicKey":"038bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711ce","privateKey":"97f64ca2bf7bd6aa2136eb0aa3ce512433bd903b91d48b2208052d6ff286d080"} 2 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | benchmarks 2 | examples 3 | test 4 | lib 5 | ipfs 6 | .github 7 | .circleci 8 | .eslintignore 9 | .eslintrc 10 | webpack.config.js 11 | webpack.example.config.js 12 | webpack.tests.config.js 13 | dist/ipfslog.min.js.map 14 | docs 15 | 16 | Makefile 17 | circle.yml 18 | -------------------------------------------------------------------------------- /examples/browser/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 |

ipfs-log example

7 |

 8 |     
 9 |   
10 | 
11 | 


--------------------------------------------------------------------------------
/test/fixtures/keys/LOG:
--------------------------------------------------------------------------------
1 | 2019/05/29-12:52:07.013848 7fca7db68700 Recovering log #5
2 | 2019/05/29-12:52:07.013874 7fca7db68700 Level-0 table #7: started
3 | 2019/05/29-12:52:07.015813 7fca7db68700 Level-0 table #7: 408 bytes OK
4 | 2019/05/29-12:52:07.020773 7fca7db68700 Delete type=3 #4
5 | 2019/05/29-12:52:07.020800 7fca7db68700 Delete type=0 #5
6 | 


--------------------------------------------------------------------------------
/src/utils/find-uniques.js:
--------------------------------------------------------------------------------
 1 | function findUniques (value, key) {
 2 |   // Create an index of the collection
 3 |   const uniques = {}
 4 |   const get = e => uniques[e]
 5 |   const addToIndex = e => (uniques[key ? e[key] : e] = e)
 6 |   value.forEach(addToIndex)
 7 |   return Object.keys(uniques).map(get)
 8 | }
 9 | 
10 | export default findUniques
11 | 


--------------------------------------------------------------------------------
/examples/entry.js:
--------------------------------------------------------------------------------
 1 | const Log = require('../src/log')
 2 | const EntryIO = require('../src/entry-io')
 3 | const Ipfs = require('ipfs')
 4 | const { MemStore } = require('orbit-db-test-utils')
 5 | const IdentityProvider = require('orbit-db-identity-provider')
 6 | 
 7 | module.exports = {
 8 |   Log,
 9 |   EntryIO,
10 |   Ipfs,
11 |   MemStore,
12 |   IdentityProvider
13 | }
14 | 


--------------------------------------------------------------------------------
/dist/ipfslog.min.js.LICENSE.txt:
--------------------------------------------------------------------------------
 1 | /**
 2 |  * [js-sha3]{@link https://github.com/emn178/js-sha3}
 3 |  *
 4 |  * @version 0.8.0
 5 |  * @author Chen, Yi-Cyuan [emn178@gmail.com]
 6 |  * @copyright Chen, Yi-Cyuan 2015-2018
 7 |  * @license MIT
 8 |  */
 9 | 
10 | //! stable.js 0.1.8, https://github.com/Two-Screen/stable
11 | 
12 | //! © 2018 Angry Bytes and contributors. MIT licensed.
13 | 


--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
 1 | node_modules/
 2 | coverage/
 3 | ipfs/
 4 | ipfs-log/
 5 | orbitdb/
 6 | dist/*.js.map
 7 | examples/browser/bundle.js
 8 | test/keystore/
 9 | test-keys/
10 | test/browser/*bundle.js*
11 | test/browser/bundle.js.map
12 | ipfs-log-benchmarks/
13 | examples/browser/bundle.js.map
14 | examples/browser/ipfs.min.js
15 | examples/browser/ipfslog.min.js
16 | .nyc_output
17 | lib/es5
18 | 


--------------------------------------------------------------------------------
/src/g-set.js:
--------------------------------------------------------------------------------
 1 | /**
 2 |  * Interface for G-Set CRDT
 3 |  *
 4 |  * From:
 5 |  * "A comprehensive study of Convergent and Commutative Replicated Data Types"
 6 |  * https://hal.inria.fr/inria-00555588
 7 |  */
 8 | class GSet {
 9 |   constructor (values) {} // eslint-disable-line
10 |   append (value) {}
11 |   merge (set) {}
12 |   get (value) {}
13 |   has (value) {}
14 |   get values () {}
15 |   get length () {}
16 | }
17 | 
18 | export default GSet
19 | 


--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
 1 | # Contribute
 2 | 
 3 | Please contribute! Here are some things that would be great:
 4 | 
 5 | - [Open an issue!](https://github.com/orbitdb/ipfs-log/issues/new)
 6 | - Open a pull request!
 7 | - Say hi! :wave:
 8 | 
 9 | Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md).
10 | 
11 | ## Making a PR
12 | 
13 | When making a PR, please run `make build` first and add the results. Thanks
14 | 
15 | ## Something missing in this document?
16 | 
17 | Add it! :) PRs encouraged.
18 | 


--------------------------------------------------------------------------------
/src/entry-index.js:
--------------------------------------------------------------------------------
 1 | class EntryIndex {
 2 |   constructor (entries = {}) {
 3 |     this._cache = entries
 4 |   }
 5 | 
 6 |   set (k, v) {
 7 |     this._cache[k] = v
 8 |   }
 9 | 
10 |   get (k) {
11 |     return this._cache[k]
12 |   }
13 | 
14 |   delete (k) {
15 |     return delete this._cache[k]
16 |   }
17 | 
18 |   add (newItems) {
19 |     this._cache = Object.assign(this._cache, newItems)
20 |   }
21 | 
22 |   get length () {
23 |     return Object.values(this._cache).length
24 |   }
25 | }
26 | 
27 | export default EntryIndex
28 | 


--------------------------------------------------------------------------------
/.github/workflows/npm-publish.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | name: Node.js Package
 3 | 
 4 | on:
 5 |   push:
 6 |     tags:
 7 |       - 'v*'
 8 | 
 9 | jobs:
10 |   publish-npm:
11 |     runs-on: ubuntu-latest
12 |     steps:
13 |       - uses: actions/checkout@v2
14 |       - uses: actions/setup-node@v2
15 |         with:
16 |           node-version: 'lts/*'
17 |           registry-url: https://registry.npmjs.org/
18 |       - run: npm ci
19 |       - run: npm test
20 |       - run: npm publish
21 |         env:
22 |           NODE_AUTH_TOKEN: ${{secrets.npm_token}}
23 | 


--------------------------------------------------------------------------------
/test/browser/index.html:
--------------------------------------------------------------------------------
 1 | 
 2 | 
 3 |     
 4 |     Mocha Tests
 5 |     
 6 |     
 7 | 
 8 | 
 9 | 
10 | 11 | 14 | 15 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: build 2 | 3 | deps: 4 | npm install 5 | 6 | test: deps 7 | npm run test 8 | npm run test:browser 9 | 10 | build: test 11 | npm run build 12 | @echo "Build success!" 13 | @echo "Built: 'dist/', 'examples/browser/'" 14 | 15 | clean: 16 | rm -rf ipfs/ 17 | rm -rf ipfs-log-benchmarks/ 18 | rm -rf orbitdb/ 19 | rm -rf node_modules/ 20 | rm -rf coverage/ 21 | rm -rf test/keystore/ 22 | rm test/browser/*bundle* 23 | 24 | clean-dependencies: clean 25 | rm -f package-lock.json 26 | 27 | rebuild: | clean-dependencies build 28 | 29 | .PHONY: test 30 | -------------------------------------------------------------------------------- /src/log-errors.js: -------------------------------------------------------------------------------- 1 | const IPFSNotDefinedError = () => new Error('IPFS instance not defined') 2 | const LogNotDefinedError = () => new Error('Log instance not defined') 3 | const NotALogError = () => new Error('Given argument is not an instance of Log') 4 | const CannotJoinWithDifferentId = () => new Error('Can\'t join logs with different IDs') 5 | const LtOrLteMustBeStringOrArray = () => new Error('lt or lte must be a string or array of Entries') 6 | 7 | export { 8 | IPFSNotDefinedError, 9 | LogNotDefinedError, 10 | NotALogError, 11 | CannotJoinWithDifferentId, 12 | LtOrLteMustBeStringOrArray 13 | } 14 | -------------------------------------------------------------------------------- /conf/webpack.config.js: -------------------------------------------------------------------------------- 1 | import path from 'path' 2 | import { fileURLToPath } from 'url' 3 | 4 | export default (env, argv) => { 5 | const __filename = fileURLToPath(import.meta.url) 6 | const __dirname = path.dirname(__filename) 7 | 8 | return { 9 | entry: './src/log.js', 10 | output: { 11 | libraryTarget: 'var', 12 | library: 'Log', 13 | filename: 'ipfslog.min.js' 14 | }, 15 | mode: 'development', 16 | target: 'web', 17 | plugins: [ 18 | ], 19 | resolve: { 20 | modules: [ 21 | 'node_modules', 22 | path.resolve(__dirname, '../node_modules') 23 | ] 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/npm-publish-next.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Node.js Package (next tag) 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | publish-npm: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - uses: actions/setup-node@v2 15 | with: 16 | node-version: 'lts/*' 17 | registry-url: https://registry.npmjs.org/ 18 | - run: npm ci 19 | - run: npm test 20 | - run: | 21 | npm version prerelease --no-git-tag-version \ 22 | --preid=`git rev-parse --short HEAD` 23 | npm publish --tag next 24 | env: 25 | NODE_AUTH_TOKEN: ${{secrets.npm_token}} 26 | -------------------------------------------------------------------------------- /docs/scripts/linenumber.js: -------------------------------------------------------------------------------- 1 | /* global document */ 2 | (() => { 3 | const source = document.getElementsByClassName('prettyprint source linenums') 4 | let i = 0 5 | let lineNumber = 0 6 | let lineId 7 | let lines 8 | let totalLines 9 | let anchorHash 10 | 11 | if (source && source[0]) { 12 | anchorHash = document.location.hash.substring(1) 13 | lines = source[0].getElementsByTagName('li') 14 | totalLines = lines.length 15 | 16 | for (; i < totalLines; i++) { 17 | lineNumber++ 18 | lineId = `line${lineNumber}` 19 | lines[i].id = lineId 20 | if (lineId === anchorHash) { 21 | lines[i].className += ' selected' 22 | } 23 | } 24 | } 25 | })() 26 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: orbitdb 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /src/utils/difference.js: -------------------------------------------------------------------------------- 1 | function difference (a, b, key) { 2 | // Indices for quick lookups 3 | const processed = {} 4 | const existing = {} 5 | 6 | // Create an index of the first collection 7 | const addToIndex = e => (existing[key ? e[key] : e] = true) 8 | a.forEach(addToIndex) 9 | 10 | // Reduce to entries that are not in the first collection 11 | const reducer = (res, entry) => { 12 | const isInFirst = existing[key ? entry[key] : entry] !== undefined 13 | const hasBeenProcessed = processed[key ? entry[key] : entry] !== undefined 14 | if (!isInFirst && !hasBeenProcessed) { 15 | res.push(entry) 16 | processed[key ? entry[key] : entry] = true 17 | } 18 | return res 19 | } 20 | 21 | return b.reduce(reducer, []) 22 | } 23 | 24 | export default difference 25 | -------------------------------------------------------------------------------- /src/lamport-clock.js: -------------------------------------------------------------------------------- 1 | class LamportClock { 2 | constructor (id, time) { 3 | this.id = id 4 | this.time = time || 0 5 | } 6 | 7 | tick () { 8 | return new LamportClock(this.id, ++this.time) 9 | } 10 | 11 | merge (clock) { 12 | this.time = Math.max(this.time, clock.time) 13 | return new LamportClock(this.id, this.time) 14 | } 15 | 16 | clone () { 17 | return new LamportClock(this.id, this.time) 18 | } 19 | 20 | static compare (a, b) { 21 | // Calculate the "distance" based on the clock, ie. lower or greater 22 | const dist = a.time - b.time 23 | 24 | // If the sequence number is the same (concurrent events), 25 | // and the IDs are different, take the one with a "lower" id 26 | if (dist === 0 && a.id !== b.id) return a.id < b.id ? -1 : 1 27 | 28 | return dist 29 | } 30 | } 31 | 32 | export default LamportClock 33 | -------------------------------------------------------------------------------- /benchmarks/utils/create-log.js: -------------------------------------------------------------------------------- 1 | const Keystore = require('orbit-db-keystore') 2 | const IdentityProvider = require('orbit-db-identity-provider') 3 | const leveldown = require('leveldown') 4 | const storage = require('orbit-db-storage-adapter')(leveldown) 5 | 6 | const Log = require('../../src/log') 7 | const AccessController = Log.AccessController 8 | 9 | let store 10 | 11 | const createLog = async (ipfs, logId) => { 12 | if (!store) { 13 | const keysPath = (await ipfs.repo.stat()).repoPath + '/keys' 14 | store = await storage.createStore(keysPath) 15 | } 16 | 17 | const access = new AccessController() 18 | const keystore = new Keystore(store) 19 | const identity = await IdentityProvider.createIdentity({ id: 'userA', keystore }) 20 | const log = new Log(ipfs, identity, { logId: 'A', access }) 21 | return { log, access, identity } 22 | } 23 | 24 | module.exports = createLog 25 | -------------------------------------------------------------------------------- /docs/scripts/prettify/lang-css.js: -------------------------------------------------------------------------------- 1 | PR.registerLangHandler(PR.createSimpleLexer([['pln', /^[\t\n\f\r ]+/, null, ' \t\r\n ']], [['str', /^"(?:[^\n\f\r"\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*"/, null], ['str', /^'(?:[^\n\f\r'\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*'/, null], ['lang-css-str', /^url\(([^"')]*)\)/i], ['kwd', /^(?:url|rgb|!important|@import|@page|@media|@charset|inherit)(?=[^\w-]|$)/i, null], ['lang-css-kw', /^(-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*)\s*:/i], ['com', /^\/\*[^*]*\*+(?:[^*/][^*]*\*+)*\//], ['com', 2 | /^(?:<\!--|--\>)/], ['lit', /^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i], ['lit', /^#[\da-f]{3,6}/i], ['pln', /^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i], ['pun', /^[^\s\w"']+/]]), ['css']); PR.registerLangHandler(PR.createSimpleLexer([], [['kwd', /^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i]]), ['css-kw']); PR.registerLangHandler(PR.createSimpleLexer([], [['str', /^[^"')]+/]]), ['css-str']) 3 | -------------------------------------------------------------------------------- /test/fixtures/big-log.fixture.js: -------------------------------------------------------------------------------- 1 | export default `DONE 2 | └─EOF 3 | └─entryC10 4 | └─entryB10 5 | └─entryA10 6 | └─entryC9 7 | └─entryB9 8 | └─entryA9 9 | └─entryC8 10 | └─entryB8 11 | └─entryA8 12 | └─entryC7 13 | └─entryB7 14 | └─entryA7 15 | └─entryC6 16 | └─entryB6 17 | └─entryA6 18 | └─entryC5 19 | └─entryB5 20 | └─entryA5 21 | └─entryC4 22 | └─entryB4 23 | └─entryA4 24 | └─3 25 | └─entryC3 26 | └─entryB3 27 | └─entryA3 28 | └─2 29 | └─entryC2 30 | └─entryB2 31 | └─entryA2 32 | └─1 33 | └─entryC1 34 | └─entryB1 35 | └─entryA1` 36 | -------------------------------------------------------------------------------- /.github/workflows/run-tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Run Tests 3 | 4 | on: push 5 | 6 | jobs: 7 | test-node: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v3 11 | - uses: actions/setup-node@v3 12 | with: 13 | node-version: 'lts/*' 14 | registry-url: https://registry.npmjs.org/ 15 | - name: Install dependencies 16 | run: npm ci 17 | - name: Run linter 18 | run: npm run lint 19 | - name: Run tests 20 | run: npm run test 21 | test-browser: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v3 25 | - uses: actions/setup-node@v3 26 | with: 27 | node-version: 'lts/*' 28 | registry-url: https://registry.npmjs.org/ 29 | - name: Install dependencies 30 | run: npm ci 31 | - name: Run linter 32 | run: npm run lint 33 | - name: Run tests 34 | run: npm run test:browser 35 | -------------------------------------------------------------------------------- /benchmarks/append.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | return { log, ipfsd } 9 | }, 10 | cycle: async function ({ log }) { 11 | await log.append('Hello', 32) 12 | }, 13 | teardown: async function ({ ipfsd }) { 14 | await stopIpfs(ipfsd) 15 | } 16 | } 17 | 18 | const baseline = { 19 | while: ({ stats, startTime, baselineLimit }) => { 20 | return stats.count < baselineLimit 21 | } 22 | } 23 | 24 | const stress = { 25 | while: ({ stats, startTime, stressLimit }) => { 26 | return process.hrtime(startTime)[0] < stressLimit 27 | } 28 | } 29 | 30 | module.exports = [ 31 | { name: 'append-baseline', ...base, ...baseline }, 32 | { name: 'append-stress', ...base, ...stress } 33 | ] 34 | -------------------------------------------------------------------------------- /benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # IPFS Log benchmark runner 2 | 3 | ## Usage: 4 | 5 | From the project root, run: 6 | 7 | ``` 8 | node --expose-gc benchmarks/runner/index.js [options] 9 | ``` 10 | 11 | ## Options 12 | 13 | - `--help, -h` [boolean] Show this help 14 | - `--baseline, -b` [boolean] Run baseline benchmarks only 15 | - `--report, -r` [boolean] Output report (Default: false) 16 | - `--list, -l` [boolean] List all benchmarks 17 | - `--grep, -g` Regular expression used to match benchmarks (Default: /.*/) 18 | - `--stressLimit` seconds to run a stress benchmark (Default: 300) 19 | - `--baselineLimit` benchmark cycle limit for baseline benchmarks (Default: 1000) 20 | - `--logLimit` max log size used for baseline benchmarks (inclusive) (Default: 10000) 21 | 22 | ## Examples: 23 | 24 | ```JavaScript 25 | index.js -r -g append-baseline Run a single benchmark (append-baseline) 26 | index.js -r -g values-.*-baseline Run all of the values baseline benchmarks 27 | ``` 28 | -------------------------------------------------------------------------------- /benchmarks/index.js: -------------------------------------------------------------------------------- 1 | const append = require('./append.js') 2 | const findHeads = require('./find-heads.js') 3 | const fromEntry = require('./from-entry.js') 4 | const fromEntryHash = require('./from-entry-hash.js') 5 | const fromMultihash = require('./from-multihash.js') 6 | const get = require('./get.js') 7 | const has = require('./has.js') 8 | const heads = require('./heads.js') 9 | const join = require('./join.js') 10 | const tailHashes = require('./tail-hashes.js') 11 | const tails = require('./tails.js') 12 | const toMultihash = require('./to-multihash.js') 13 | const toString = require('./to-string.js') 14 | const traverse = require('./traverse.js') 15 | const values = require('./values.js') 16 | 17 | module.exports = [ 18 | ...append, 19 | ...findHeads, 20 | ...fromEntry, 21 | ...fromEntryHash, 22 | ...fromMultihash, 23 | ...get, 24 | ...has, 25 | ...heads, 26 | ...join, 27 | ...tailHashes, 28 | ...tails, 29 | ...toMultihash, 30 | ...toString, 31 | ...traverse, 32 | ...values 33 | ] 34 | -------------------------------------------------------------------------------- /examples/browser/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IPFS = require('ipfs') 4 | const IdentityProvider = require('orbit-db-identity-provider') 5 | const Log = require('../../src/log') 6 | 7 | const dataPath = './ipfs-log/examples/browser/ipfs/index.html' 8 | const ipfs = new IPFS({ 9 | repo: dataPath, 10 | start: false, 11 | EXPERIMENTAL: { 12 | pubsub: true 13 | } 14 | }) 15 | 16 | ipfs.on('error', (e) => console.error(e)) 17 | 18 | ipfs.on('ready', async () => { 19 | const identity = await IdentityProvider.createIdentity({ id: 'exampleUser' }) 20 | const outputElm = document.getElementById('output') 21 | 22 | // When IPFS is ready, add some log entries 23 | const log = new Log(ipfs, identity, { logId: 'example-log' }) 24 | 25 | await log.append('one') 26 | const values = JSON.stringify(log.values, null, 2) 27 | console.log('\n', values) 28 | outputElm.innerHTML += values + '


' 29 | 30 | await log.append({ two: 'hello' }) 31 | const values2 = JSON.stringify(log.values, null, 2) 32 | console.log('\n', values2) 33 | outputElm.innerHTML += values2 + '

' 34 | }) 35 | -------------------------------------------------------------------------------- /benchmarks/join.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log: logA } = await createLog(ipfsd.api, 'A') 8 | const { log: logB } = await createLog(ipfsd.api, 'B') 9 | return { logA, logB, ipfsd } 10 | }, 11 | cycle: async function ({ logA, logB }) { 12 | const add1 = await logA.append('Hello1') 13 | const add2 = await logB.append('Hello2') 14 | 15 | await Promise.all([add1, add2]) 16 | logA.join(logB) 17 | logB.join(logA) 18 | }, 19 | teardown: async function ({ ipfsd }) { 20 | await stopIpfs(ipfsd) 21 | } 22 | } 23 | 24 | const baseline = { 25 | while: ({ stats, startTime, baselineLimit }) => { 26 | return stats.count < baselineLimit 27 | } 28 | } 29 | 30 | const stress = { 31 | while: ({ stats, startTime, stressLimit }) => { 32 | return process.hrtime(startTime)[0] < stressLimit 33 | } 34 | } 35 | 36 | module.exports = [ 37 | { name: 'join-baseline', ...base, ...baseline }, 38 | { name: 'join-stress', ...base, ...stress } 39 | ] 40 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | > ⚠️ NOTE: Please don't use the PR description for communication, use comments instead. 2 | 3 | ## Description (Required) 4 | 5 | _ 6 | A short description of the overall goals of the pull request. 7 | Optional: Is it a fix? maintenance? new feature? 8 | _ 9 | 10 | ## Other changes (e.g. bug fixes, UI tweaks, refactors) 11 | 12 | _ 13 | Minor changes goes here. In general we should avoid that but sometimes it's much easier / faster to add a quick fix for a bug we noticed during development. 14 | _ 15 | 16 | ## TODO 17 | 18 | > ⚠️ NOTE: Please make sure all items are checked or remove the TODO list before closing the PR 19 | 20 | - [ ] Add unit test 21 | - [ ] Add super cool feature 22 | - [ ] Add documentation 23 | - [ ] Fix comment 24 | 25 | ## Scripts 26 | ** New scripts**: 27 | 28 | - `script` : script details 29 | 30 | ## Deps 31 | 32 | **New dependencies**: 33 | 34 | - `dependency` : dependency details 35 | 36 | **New dev dependencies**: 37 | 38 | - `devDependency` : dependency details 39 | 40 | 41 | > ⚠️ NOTE: Use special keywords to close / connect the PR with the issue(s) it solves or contributes to 42 | 43 | (Resolves | Closes | Contributes to) #1234 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016-2018 Protocol Labs Inc. 4 | Copyright (c) 2018-2019 Haja Networks Oy 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /conf/webpack.example.config.js: -------------------------------------------------------------------------------- 1 | import path from 'path' 2 | import { createRequire } from 'module' 3 | import { fileURLToPath } from 'url' 4 | 5 | export default (env, argv) => { 6 | const require = createRequire(import.meta.url) 7 | const __filename = fileURLToPath(import.meta.url) 8 | const __dirname = path.dirname(__filename) 9 | 10 | return { 11 | devtool: 'source-map', 12 | entry: './examples/entry.js', 13 | output: { 14 | libraryTarget: 'global', 15 | filename: '../examples/browser/bundle.js' 16 | }, 17 | target: 'web', 18 | externals: { 19 | fs: '{}', 20 | fatfs: '{}', 21 | 'fs-extra': '{ copy: () => {} }', 22 | rimraf: '{ sync: () => {} }', 23 | 'idb-readable-stream': '{}', 24 | runtimejs: '{}', 25 | net: '{}', 26 | child_process: {}, 27 | dns: '{}', 28 | tls: '{}', 29 | bindings: '{}' 30 | }, 31 | resolve: { 32 | modules: [ 33 | 'node_modules', 34 | path.resolve(__dirname, '../node_modules') 35 | ], 36 | fallback: { 37 | path: require.resolve('path-browserify'), 38 | stream: require.resolve('stream-browserify') 39 | } 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /benchmarks/heads.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | for (let i = 1; i < this.count + 1; i++) { 11 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 12 | await log.append(`Hello World: ${i}`) 13 | } 14 | 15 | return { log, ipfsd } 16 | }, 17 | cycle: async function ({ log }) { 18 | return log.heads 19 | }, 20 | teardown: async function ({ ipfsd }) { 21 | await stopIpfs(ipfsd) 22 | } 23 | } 24 | 25 | const baseline = { 26 | while: ({ stats, startTime, baselineLimit }) => { 27 | return stats.count < baselineLimit 28 | } 29 | } 30 | 31 | const stress = { 32 | while: ({ stats, startTime, stressLimit }) => { 33 | return process.hrtime(startTime)[0] < stressLimit 34 | } 35 | } 36 | 37 | const counts = [1, 100, 1000] 38 | const benchmarks = [] 39 | for (const count of counts) { 40 | const c = { count } 41 | benchmarks.push({ name: `heads-${count}-baseline`, ...base, ...c, ...baseline }) 42 | benchmarks.push({ name: `heads-${count}-stress`, ...base, ...c, ...stress }) 43 | } 44 | 45 | module.exports = benchmarks 46 | -------------------------------------------------------------------------------- /benchmarks/tails.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | for (let i = 1; i < this.count + 1; i++) { 11 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 12 | await log.append(`Hello World: ${i}`) 13 | } 14 | 15 | return { log, ipfsd } 16 | }, 17 | cycle: async function ({ log }) { 18 | return log.tails 19 | }, 20 | teardown: async function ({ ipfsd }) { 21 | await stopIpfs(ipfsd) 22 | } 23 | } 24 | 25 | const baseline = { 26 | while: ({ stats, startTime, baselineLimit }) => { 27 | return stats.count < baselineLimit 28 | } 29 | } 30 | 31 | const stress = { 32 | while: ({ stats, startTime, stressLimit }) => { 33 | return process.hrtime(startTime)[0] < stressLimit 34 | } 35 | } 36 | 37 | const counts = [1, 100, 1000, 5000] 38 | const benchmarks = [] 39 | for (const count of counts) { 40 | const c = { count } 41 | benchmarks.push({ name: `tails-${count}-baseline`, ...base, ...c, ...baseline }) 42 | benchmarks.push({ name: `tails-${count}-stress`, ...base, ...c, ...stress }) 43 | } 44 | 45 | module.exports = benchmarks 46 | -------------------------------------------------------------------------------- /benchmarks/values.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | for (let i = 1; i < this.count + 1; i++) { 11 | process.stdout.write(`\r${this.name} - Preparing - Writing: ${i}/${this.count}`) 12 | await log.append(`Hello World: ${i}`) 13 | } 14 | 15 | return { log, ipfsd } 16 | }, 17 | cycle: async function ({ log }) { 18 | return log.values 19 | }, 20 | teardown: async function ({ ipfsd }) { 21 | await stopIpfs(ipfsd) 22 | } 23 | } 24 | 25 | const baseline = { 26 | while: ({ stats, startTime, baselineLimit }) => { 27 | return stats.count < baselineLimit 28 | } 29 | } 30 | 31 | const stress = { 32 | while: ({ stats, startTime, stressLimit }) => { 33 | return process.hrtime(startTime)[0] < stressLimit 34 | } 35 | } 36 | 37 | const counts = [1, 100, 1000] 38 | const benchmarks = [] 39 | for (const count of counts) { 40 | const c = { count } 41 | benchmarks.push({ name: `values-${count}-baseline`, ...base, ...c, ...baseline }) 42 | benchmarks.push({ name: `values-${count}-stress`, ...base, ...c, ...stress }) 43 | } 44 | 45 | module.exports = benchmarks 46 | -------------------------------------------------------------------------------- /benchmarks/tail-hashes.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | for (let i = 1; i < this.count + 1; i++) { 11 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 12 | await log.append(`Hello World: ${i}`) 13 | } 14 | 15 | return { log, ipfsd } 16 | }, 17 | cycle: async function ({ log }) { 18 | return log.tailHashes 19 | }, 20 | teardown: async function ({ ipfsd }) { 21 | await stopIpfs(ipfsd) 22 | } 23 | } 24 | 25 | const baseline = { 26 | while: ({ stats, startTime, baselineLimit }) => { 27 | return stats.count < baselineLimit 28 | } 29 | } 30 | 31 | const stress = { 32 | while: ({ stats, startTime, stressLimit }) => { 33 | return process.hrtime(startTime)[0] < stressLimit 34 | } 35 | } 36 | 37 | const counts = [1, 100, 1000] 38 | const benchmarks = [] 39 | for (const count of counts) { 40 | const c = { count } 41 | benchmarks.push({ name: `tailHashes-${count}-baseline`, ...base, ...c, ...baseline }) 42 | benchmarks.push({ name: `tailHashes-${count}-stress`, ...base, ...c, ...stress }) 43 | } 44 | 45 | module.exports = benchmarks 46 | -------------------------------------------------------------------------------- /benchmarks/to-multihash.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | for (let i = 1; i < this.count + 1; i++) { 11 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 12 | await log.append(`Hello World: ${i}`) 13 | } 14 | 15 | return { log, ipfsd } 16 | }, 17 | cycle: async function ({ log }) { 18 | await log.toMultihash() 19 | }, 20 | teardown: async function ({ ipfsd }) { 21 | await stopIpfs(ipfsd) 22 | } 23 | } 24 | 25 | const baseline = { 26 | while: ({ stats, startTime, baselineLimit }) => { 27 | return stats.count < baselineLimit 28 | } 29 | } 30 | 31 | const stress = { 32 | while: ({ stats, startTime, stressLimit }) => { 33 | return process.hrtime(startTime)[0] < stressLimit 34 | } 35 | } 36 | 37 | const counts = [1, 100, 1000] 38 | const benchmarks = [] 39 | for (const count of counts) { 40 | const c = { count } 41 | benchmarks.push({ name: `toMultihash-${count}-baseline`, ...base, ...c, ...baseline }) 42 | benchmarks.push({ name: `toMultihash-${count}-stress`, ...base, ...c, ...stress }) 43 | } 44 | 45 | module.exports = benchmarks 46 | -------------------------------------------------------------------------------- /benchmarks/traverse.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | for (let i = 1; i < this.count + 1; i++) { 11 | process.stdout.write(`\r${this.name} - Preparing - Writing: ${i}/${this.count}`) 12 | await log.append(`Hello World: ${i}`) 13 | } 14 | 15 | return { log, ipfsd } 16 | }, 17 | cycle: async function ({ log }) { 18 | return log.traverse(log.heads) 19 | }, 20 | teardown: async function ({ ipfsd }) { 21 | await stopIpfs(ipfsd) 22 | } 23 | } 24 | 25 | const baseline = { 26 | while: ({ stats, startTime, baselineLimit }) => { 27 | return stats.count < baselineLimit 28 | } 29 | } 30 | 31 | const stress = { 32 | while: ({ stats, startTime, stressLimit }) => { 33 | return process.hrtime(startTime)[0] < stressLimit 34 | } 35 | } 36 | 37 | const counts = [1, 100, 1000] 38 | const benchmarks = [] 39 | for (const count of counts) { 40 | const c = { count } 41 | benchmarks.push({ name: `traverse-${count}-baseline`, ...base, ...c, ...baseline }) 42 | benchmarks.push({ name: `traverse-${count}-stress`, ...base, ...c, ...stress }) 43 | } 44 | 45 | module.exports = benchmarks 46 | -------------------------------------------------------------------------------- /benchmarks/to-string.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | for (let i = 1; i < this.count + 1; i++) { 11 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 12 | await log.append(`Hello World: ${i}`) 13 | } 14 | 15 | return { log, ipfsd } 16 | }, 17 | cycle: async function ({ log }) { 18 | return log.toString() 19 | }, 20 | teardown: async function ({ ipfsd }) { 21 | await stopIpfs(ipfsd) 22 | } 23 | } 24 | 25 | const baseline = { 26 | while: ({ stats, startTime, baselineLimit }) => { 27 | return stats.count < baselineLimit 28 | } 29 | } 30 | 31 | const stress = { 32 | while: ({ stats, startTime, stressLimit }) => { 33 | return process.hrtime(startTime)[0] < stressLimit 34 | } 35 | } 36 | 37 | const counts = [1, 100, 1000] 38 | const benchmarks = [] 39 | for (const count of counts) { 40 | const c = { count } 41 | if (count < 1000) benchmarks.push({ name: `toString-${count}-baseline`, ...base, ...c, ...baseline }) 42 | benchmarks.push({ name: `toString-${count}-stress`, ...base, ...c, ...stress }) 43 | } 44 | 45 | module.exports = benchmarks 46 | -------------------------------------------------------------------------------- /benchmarks/has.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | let entry 11 | for (let i = 1; i < this.count + 1; i++) { 12 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 13 | entry = await log.append(`Hello World: ${i}`) 14 | } 15 | 16 | return { log, ipfsd, entry } 17 | }, 18 | cycle: async function ({ log, entry }) { 19 | return log.has(entry) 20 | }, 21 | teardown: async function ({ ipfsd }) { 22 | await stopIpfs(ipfsd) 23 | } 24 | } 25 | 26 | const baseline = { 27 | while: ({ stats, startTime, baselineLimit }) => { 28 | return stats.count < baselineLimit 29 | } 30 | } 31 | 32 | const stress = { 33 | while: ({ stats, startTime, stressLimit }) => { 34 | return process.hrtime(startTime)[0] < stressLimit 35 | } 36 | } 37 | 38 | const counts = [1, 100, 1000] 39 | const benchmarks = [] 40 | for (const count of counts) { 41 | const c = { count } 42 | benchmarks.push({ name: `has-${count}-baseline`, ...base, ...c, ...baseline }) 43 | benchmarks.push({ name: `has-${count}-stress`, ...base, ...c, ...stress }) 44 | } 45 | 46 | module.exports = benchmarks 47 | -------------------------------------------------------------------------------- /benchmarks/get.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | 4 | const base = { 5 | prepare: async function () { 6 | const ipfsd = await startIpfs('js-ipfs', config) 7 | const { log } = await createLog(ipfsd.api, 'A') 8 | 9 | process.stdout.clearLine() 10 | let entry 11 | for (let i = 1; i < this.count + 1; i++) { 12 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 13 | entry = await log.append(`Hello World: ${i}`) 14 | } 15 | 16 | return { log, ipfsd, entry } 17 | }, 18 | cycle: async function ({ log, entry }) { 19 | return log.get(entry.hash) 20 | }, 21 | teardown: async function ({ ipfsd }) { 22 | await stopIpfs(ipfsd) 23 | } 24 | } 25 | 26 | const baseline = { 27 | while: ({ stats, startTime, baselineLimit }) => { 28 | return stats.count < baselineLimit 29 | } 30 | } 31 | 32 | const stress = { 33 | while: ({ stats, startTime, stressLimit }) => { 34 | return process.hrtime(startTime)[0] < stressLimit 35 | } 36 | } 37 | 38 | const counts = [1, 100, 1000] 39 | const benchmarks = [] 40 | for (const count of counts) { 41 | const c = { count } 42 | benchmarks.push({ name: `get-${count}-baseline`, ...base, ...c, ...baseline }) 43 | benchmarks.push({ name: `get-${count}-stress`, ...base, ...c, ...stress }) 44 | } 45 | 46 | module.exports = benchmarks 47 | -------------------------------------------------------------------------------- /benchmarks/find-heads.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | const Log = require('../src/log') 4 | 5 | const base = { 6 | prepare: async function () { 7 | const ipfsd = await startIpfs('js-ipfs', config) 8 | const { log } = await createLog(ipfsd.api, 'A') 9 | 10 | process.stdout.clearLine() 11 | const entries = [] 12 | for (let i = 1; i < this.count + 1; i++) { 13 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 14 | const entry = await log.append(`Hello World: ${i}`) 15 | entries.push(entry) 16 | } 17 | 18 | return { log, entries, ipfsd } 19 | }, 20 | cycle: async function ({ log, entries }) { 21 | return Log.findHeads(entries) 22 | }, 23 | teardown: async function ({ ipfsd }) { 24 | await stopIpfs(ipfsd) 25 | } 26 | } 27 | 28 | const baseline = { 29 | while: ({ stats, startTime, baselineLimit }) => { 30 | return stats.count < baselineLimit 31 | } 32 | } 33 | 34 | const stress = { 35 | while: ({ stats, startTime, stressLimit }) => { 36 | return process.hrtime(startTime)[0] < stressLimit 37 | } 38 | } 39 | 40 | const counts = [1, 100, 1000] 41 | const benchmarks = [] 42 | for (const count of counts) { 43 | const c = { count } 44 | benchmarks.push({ name: `findHeads-${count}-baseline`, ...base, ...c, ...baseline }) 45 | benchmarks.push({ name: `findHeads-${count}-stress`, ...base, ...c, ...stress }) 46 | } 47 | 48 | module.exports = benchmarks 49 | -------------------------------------------------------------------------------- /benchmarks/from-entry.js: -------------------------------------------------------------------------------- 1 | const Log = require('../src/log') 2 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 3 | const createLog = require('./utils/create-log') 4 | 5 | const base = { 6 | prepare: async function () { 7 | const ipfsd = await startIpfs('js-ipfs', config) 8 | const { log, access, identity } = await createLog(ipfsd.api, 'A') 9 | const refCount = 64 10 | process.stdout.clearLine() 11 | for (let i = 1; i < this.count + 1; i++) { 12 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 13 | await log.append('hello' + i, refCount) 14 | } 15 | 16 | return { log, ipfsd, access, identity } 17 | }, 18 | cycle: async function ({ log, ipfsd, access, identity }) { 19 | await Log.fromEntry(ipfsd.api, identity, log.heads, { access }) 20 | }, 21 | teardown: async function ({ ipfsd }) { 22 | await stopIpfs(ipfsd) 23 | } 24 | } 25 | 26 | const baseline = { 27 | while: ({ stats, startTime, baselineLimit }) => { 28 | return stats.count < baselineLimit 29 | } 30 | } 31 | 32 | const stress = { 33 | while: ({ stats, startTime, stressLimit }) => { 34 | return process.hrtime(startTime)[0] < stressLimit 35 | } 36 | } 37 | 38 | const counts = [1, 100, 1000] 39 | const benchmarks = [] 40 | for (const count of counts) { 41 | const c = { count } 42 | if (count < 1000) benchmarks.push({ name: `fromEntry-${count}-baseline`, ...base, ...c, ...baseline }) 43 | benchmarks.push({ name: `fromEntry-${count}-stress`, ...base, ...c, ...stress }) 44 | } 45 | 46 | module.exports = benchmarks 47 | -------------------------------------------------------------------------------- /examples/log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IPFS = require('ipfs') 4 | const Log = require('../src/log') 5 | const IdentityProvider = require('orbit-db-identity-provider') 6 | const Keystore = require('orbit-db-keystore') 7 | 8 | const dataPath = './ipfs/examples/log' 9 | 10 | const ipfs = new IPFS({ 11 | repo: dataPath + '/ipfs', 12 | start: false, 13 | EXPERIMENTAL: { 14 | pubsub: true 15 | } 16 | }) 17 | 18 | ipfs.on('error', (err) => console.error(err)) 19 | ipfs.on('ready', async () => { 20 | let identityA, identityB, identityC 21 | try { 22 | const keystore = new Keystore() 23 | identityA = await IdentityProvider.createIdentity({ id: 'identityA', keystore }) 24 | identityB = await IdentityProvider.createIdentity({ id: 'identityB', keystore }) 25 | identityC = await IdentityProvider.createIdentity({ id: 'identityC', keystore }) 26 | } catch (e) { 27 | console.error(e) 28 | } 29 | 30 | const log1 = new Log(ipfs, identityA, { lodId: 'A' }) 31 | const log2 = new Log(ipfs, identityB, { lodId: 'A' }) 32 | const log3 = new Log(ipfs, identityC, { lodId: 'A' }) 33 | 34 | try { 35 | await log1.append('one') 36 | await log1.append('two') 37 | await log2.append('three') 38 | // Join the logs 39 | await log3.join(log1) 40 | await log3.join(log2) 41 | // Add one more 42 | await log3.append('four') 43 | console.log(log3.values) 44 | } catch (e) { 45 | console.error(e) 46 | process.exit(1) 47 | } 48 | console.log(log3.toString()) 49 | // four 50 | // └─two 51 | // └─one 52 | // └─three 53 | process.exit(0) 54 | }) 55 | -------------------------------------------------------------------------------- /conf/webpack.tests.config.js: -------------------------------------------------------------------------------- 1 | import glob from 'glob' 2 | import webpack from 'webpack' 3 | import { createRequire } from 'module' 4 | 5 | export default (env, argv) => { 6 | const require = createRequire(import.meta.url) 7 | return { 8 | // TODO: put all tests in a .js file that webpack can use as entry point 9 | entry: glob.sync('./test/*.spec.js', { ignore: ['./test/replicate.spec.js'] }), 10 | output: { 11 | filename: '../test/browser/bundle.js' 12 | }, 13 | target: 'web', 14 | mode: 'production', 15 | devtool: 'source-map', 16 | plugins: [ 17 | new webpack.ProvidePlugin({ 18 | process: 'process/browser.js', 19 | Buffer: ['buffer', 'Buffer'] 20 | }) 21 | ], 22 | experiments: { 23 | topLevelAwait: true 24 | }, 25 | resolve: { 26 | modules: [ 27 | 'node_modules' 28 | ], 29 | fallback: { 30 | path: require.resolve('path-browserify'), 31 | os: false, 32 | fs: false, 33 | constants: false, 34 | stream: false 35 | } 36 | }, 37 | externals: { 38 | fs: '{ existsSync: () => true }', 39 | 'fs-extra': '{ copy: () => {} }', 40 | rimraf: '{ sync: () => {} }' 41 | }, 42 | module: { 43 | rules: [ 44 | { 45 | test: /\.m?js$/, 46 | exclude: /node_modules/, 47 | use: { 48 | loader: 'babel-loader', 49 | options: { 50 | presets: ['@babel/preset-env'], 51 | plugins: ['@babel/plugin-syntax-import-assertions'] 52 | } 53 | } 54 | } 55 | ] 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /benchmarks/from-entry-hash.js: -------------------------------------------------------------------------------- 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils') 2 | const createLog = require('./utils/create-log') 3 | const Log = require('../src/log') 4 | 5 | const base = { 6 | prepare: async function () { 7 | const ipfsd = await startIpfs('js-ipfs', config) 8 | const { log, access, identity } = await createLog(ipfsd.api, 'A') 9 | const refCount = 64 10 | process.stdout.clearLine() 11 | for (let i = 1; i < this.count + 1; i++) { 12 | process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`) 13 | await log.append('hello' + i, refCount) 14 | } 15 | return { ipfsd, log, access, identity } 16 | }, 17 | cycle: async function ({ log, ipfsd, access, identity }) { 18 | await Log.fromEntryHash(ipfsd.api, identity, log.heads.map(e => e.hash), { 19 | access, 20 | logId: log._id 21 | }) 22 | }, 23 | teardown: async function ({ ipfsd }) { 24 | await stopIpfs(ipfsd) 25 | } 26 | } 27 | 28 | const baseline = { 29 | while: ({ stats, startTime, baselineLimit }) => { 30 | return stats.count < baselineLimit 31 | } 32 | } 33 | 34 | const stress = { 35 | while: ({ stats, startTime, stressLimit }) => { 36 | return process.hrtime(startTime)[0] < stressLimit 37 | } 38 | } 39 | 40 | const counts = [1, 100, 1000] 41 | const benchmarks = [] 42 | for (const count of counts) { 43 | const c = { count } 44 | if (count < 1000) benchmarks.push({ name: `fromEntryHash-${count}-baseline`, ...base, ...c, ...baseline }) 45 | benchmarks.push({ name: `fromEntryHash-${count}-stress`, ...base, ...c, ...stress }) 46 | } 47 | 48 | module.exports = benchmarks 49 | -------------------------------------------------------------------------------- /examples/browser/browser.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 |

ipfs-log example

7 |

 8 | 
 9 |     
10 |     
11 |     
12 | 
13 |     
44 |   
45 | 
46 | 


--------------------------------------------------------------------------------
/benchmarks/from-multihash.js:
--------------------------------------------------------------------------------
 1 | const { startIpfs, stopIpfs, config } = require('orbit-db-test-utils')
 2 | const createLog = require('./utils/create-log')
 3 | const Log = require('../src/log')
 4 | 
 5 | const base = {
 6 |   prepare: async function () {
 7 |     const ipfsd = await startIpfs('js-ipfs', config)
 8 |     const { log, access, identity } = await createLog(ipfsd.api, 'A')
 9 |     const refCount = 64
10 |     process.stdout.clearLine()
11 |     for (let i = 1; i < this.count + 1; i++) {
12 |       process.stdout.write(`\r${this.name} / Preparing / Writing: ${i}/${this.count}`)
13 |       await log.append('hello' + i, refCount)
14 |     }
15 | 
16 |     const multihash = await log.toMultihash()
17 |     return { ipfsd, access, identity, log, multihash }
18 |   },
19 |   cycle: async function ({ log, access, identity, ipfsd, multihash }) {
20 |     await Log.fromMultihash(ipfsd.api, identity, multihash, { access })
21 |   },
22 |   teardown: async function ({ ipfsd }) {
23 |     await stopIpfs(ipfsd)
24 |   }
25 | }
26 | 
27 | const baseline = {
28 |   while: ({ stats, startTime, baselineLimit }) => {
29 |     return stats.count < baselineLimit
30 |   }
31 | }
32 | 
33 | const stress = {
34 |   while: ({ stats, startTime, stressLimit }) => {
35 |     return process.hrtime(startTime)[0] < stressLimit
36 |   }
37 | }
38 | 
39 | const counts = [1, 100, 1000]
40 | const benchmarks = []
41 | for (const count of counts) {
42 |   const c = { count }
43 |   if (count < 1000) benchmarks.push({ name: `fromMultihash-${count}-baseline`, ...base, ...c, ...baseline })
44 |   benchmarks.push({ name: `fromMultihash-${count}-stress`, ...base, ...c, ...stress })
45 | }
46 | 
47 | module.exports = benchmarks
48 | 


--------------------------------------------------------------------------------
/test/fixtures/v0-entries.fixture.js:
--------------------------------------------------------------------------------
1 | export const hello = JSON.parse('{"hash":"Qmc2DEiLirMH73kHpuFPbt3V65sBrnDWkJYSjUQHXXvghT","id":"A","payload":"hello","next":[],"v":0,"clock":{"id":"0411a0d38181c9374eca3e480ecada96b1a4db9375c5e08c3991557759d22f6f2f902d0dc5364a948035002504d825308b0c257b7cbb35229c2076532531f8f4ef","time":0},"key":"0411a0d38181c9374eca3e480ecada96b1a4db9375c5e08c3991557759d22f6f2f902d0dc5364a948035002504d825308b0c257b7cbb35229c2076532531f8f4ef","sig":"3044022062f4cfc8b8f3cc01283b25eab3eeb295614bb0faa8bd20f026c1487ae663121102207ce415bd7423b66d695338c17122e937259f77d1e86494d3146436f0959fccc6"}')
2 | export const helloWorld = JSON.parse('{"hash":"QmUKMoRrmsYAzQg1nQiD7Fzgpo24zXky7jVJNcZGiSAdhc","id":"A","payload":"hello world","next":[],"v":0,"clock":{"id":"0411a0d38181c9374eca3e480ecada96b1a4db9375c5e08c3991557759d22f6f2f902d0dc5364a948035002504d825308b0c257b7cbb35229c2076532531f8f4ef","time":0},"key":"0411a0d38181c9374eca3e480ecada96b1a4db9375c5e08c3991557759d22f6f2f902d0dc5364a948035002504d825308b0c257b7cbb35229c2076532531f8f4ef","sig":"3044022062f4cfc8b8f3cc01283b25eab3eeb295614bb0faa8bd20f026c1487ae663121102207ce415bd7423b66d695338c17122e937259f77d1e86494d3146436f0959fccc6"}')
3 | export const helloAgain = JSON.parse('{"hash":"QmZ8va2fSjRufV1sD6x5mwi6E5GrSjXHx7RiKFVBzkiUNZ","id":"A","payload":"hello again","next":["QmUKMoRrmsYAzQg1nQiD7Fzgpo24zXky7jVJNcZGiSAdhc"],"v":0,"clock":{"id":"0411a0d38181c9374eca3e480ecada96b1a4db9375c5e08c3991557759d22f6f2f902d0dc5364a948035002504d825308b0c257b7cbb35229c2076532531f8f4ef","time":0},"key":"0411a0d38181c9374eca3e480ecada96b1a4db9375c5e08c3991557759d22f6f2f902d0dc5364a948035002504d825308b0c257b7cbb35229c2076532531f8f4ef","sig":"3044022062f4cfc8b8f3cc01283b25eab3eeb295614bb0faa8bd20f026c1487ae663121102207ce415bd7423b66d695338c17122e937259f77d1e86494d3146436f0959fccc6"}')
4 | 


--------------------------------------------------------------------------------
/test/utils/log-creator.js:
--------------------------------------------------------------------------------
 1 | class LogCreator {
 2 |   static async createLogWithSixteenEntries (Log, ipfs, identities) {
 3 |     const create = async () => {
 4 |       const logA = new Log(ipfs, identities[0], { logId: 'X' })
 5 |       const logB = new Log(ipfs, identities[1], { logId: 'X' })
 6 |       const log3 = new Log(ipfs, identities[2], { logId: 'X' })
 7 |       const log = new Log(ipfs, identities[3], { logId: 'X' })
 8 | 
 9 |       for (let i = 1; i <= 5; i++) {
10 |         await logA.append('entryA' + i)
11 |       }
12 |       for (let i = 1; i <= 5; i++) {
13 |         await logB.append('entryB' + i)
14 |       }
15 |       await log3.join(logA)
16 |       await log3.join(logB)
17 |       for (let i = 6; i <= 10; i++) {
18 |         await logA.append('entryA' + i)
19 |       }
20 |       await log.join(log3)
21 |       await log.append('entryC0')
22 |       await log.join(logA)
23 |       return log
24 |     }
25 | 
26 |     const expectedData = [
27 |       'entryA1', 'entryB1', 'entryA2', 'entryB2', 'entryA3', 'entryB3',
28 |       'entryA4', 'entryB4', 'entryA5', 'entryB5',
29 |       'entryA6',
30 |       'entryC0',
31 |       'entryA7', 'entryA8', 'entryA9', 'entryA10'
32 |     ]
33 | 
34 |     const log = await create()
35 |     return { log, expectedData, json: log.toJSON() }
36 |   }
37 | 
38 |   static async createLogWithTwoHundredEntries (Log, ipfs, identities) {
39 |     const amount = 100
40 | 
41 |     const expectedData = []
42 | 
43 |     const create = async () => {
44 |       const logA = new Log(ipfs, identities[0], { logId: 'X' })
45 |       const logB = new Log(ipfs, identities[1], { logId: 'X' })
46 |       for (let i = 1; i <= amount; i++) {
47 |         await logA.append('entryA' + i)
48 |         await logB.join(logA)
49 |         await logB.append('entryB' + i)
50 |         await logA.join(logB)
51 |         expectedData.push('entryA' + i)
52 |         expectedData.push('entryB' + i)
53 |       }
54 |       return logA
55 |     }
56 | 
57 |     const log = await create()
58 |     return { log, expectedData }
59 |   }
60 | }
61 | 
62 | export default LogCreator
63 | 


--------------------------------------------------------------------------------
/docs/styles/prettify-jsdoc.css:
--------------------------------------------------------------------------------
  1 | /* JSDoc prettify.js theme */
  2 | 
  3 | /* plain text */
  4 | .pln {
  5 |   color: #000000;
  6 |   font-weight: normal;
  7 |   font-style: normal;
  8 | }
  9 | 
 10 | /* string content */
 11 | .str {
 12 |   color: #006400;
 13 |   font-weight: normal;
 14 |   font-style: normal;
 15 | }
 16 | 
 17 | /* a keyword */
 18 | .kwd {
 19 |   color: #000000;
 20 |   font-weight: bold;
 21 |   font-style: normal;
 22 | }
 23 | 
 24 | /* a comment */
 25 | .com {
 26 |   font-weight: normal;
 27 |   font-style: italic;
 28 | }
 29 | 
 30 | /* a type name */
 31 | .typ {
 32 |   color: #000000;
 33 |   font-weight: normal;
 34 |   font-style: normal;
 35 | }
 36 | 
 37 | /* a literal value */
 38 | .lit {
 39 |   color: #006400;
 40 |   font-weight: normal;
 41 |   font-style: normal;
 42 | }
 43 | 
 44 | /* punctuation */
 45 | .pun {
 46 |   color: #000000;
 47 |   font-weight: bold;
 48 |   font-style: normal;
 49 | }
 50 | 
 51 | /* lisp open bracket */
 52 | .opn {
 53 |   color: #000000;
 54 |   font-weight: bold;
 55 |   font-style: normal;
 56 | }
 57 | 
 58 | /* lisp close bracket */
 59 | .clo {
 60 |   color: #000000;
 61 |   font-weight: bold;
 62 |   font-style: normal;
 63 | }
 64 | 
 65 | /* a markup tag name */
 66 | .tag {
 67 |   color: #006400;
 68 |   font-weight: normal;
 69 |   font-style: normal;
 70 | }
 71 | 
 72 | /* a markup attribute name */
 73 | .atn {
 74 |   color: #006400;
 75 |   font-weight: normal;
 76 |   font-style: normal;
 77 | }
 78 | 
 79 | /* a markup attribute value */
 80 | .atv {
 81 |   color: #006400;
 82 |   font-weight: normal;
 83 |   font-style: normal;
 84 | }
 85 | 
 86 | /* a declaration */
 87 | .dec {
 88 |   color: #000000;
 89 |   font-weight: bold;
 90 |   font-style: normal;
 91 | }
 92 | 
 93 | /* a variable name */
 94 | .var {
 95 |   color: #000000;
 96 |   font-weight: normal;
 97 |   font-style: normal;
 98 | }
 99 | 
100 | /* a function name */
101 | .fun {
102 |   color: #000000;
103 |   font-weight: bold;
104 |   font-style: normal;
105 | }
106 | 
107 | /* Specify class=linenums on a pre to get line numbering */
108 | ol.linenums {
109 |   margin-top: 0;
110 |   margin-bottom: 0;
111 | }
112 | 


--------------------------------------------------------------------------------
/docs/g-set.js.html:
--------------------------------------------------------------------------------
 1 | 
 2 | 
 3 | 
 4 |     
 5 |     JSDoc: Source: g-set.js
 6 | 
 7 |     
 8 |     
 9 |     
12 |     
13 |     
14 | 
15 | 
16 | 
17 | 
18 | 
19 | 20 |

Source: g-set.js

21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 |
29 |
'use strict'
30 | 
31 | /**
32 |  * Interface for G-Set CRDT
33 |  *
34 |  * From:
35 |  * "A comprehensive study of Convergent and Commutative Replicated Data Types"
36 |  * https://hal.inria.fr/inria-00555588
37 |  */
38 | class GSet {
39 |   constructor (values) {} // eslint-disable-line
40 |   append (value) {}
41 |   merge (set) {}
42 |   get (value) {}
43 |   has (value) {}
44 |   get values () {}
45 |   get length () {}
46 | }
47 | 
48 | module.exports = GSet
49 | 
50 |
51 |
52 | 53 | 54 | 55 | 56 |
57 | 58 | 61 | 62 |
63 | 64 |
65 | Documentation generated by JSDoc 3.6.6 on Fri Dec 11 2020 17:11:17 GMT-0500 (Eastern Standard Time) 66 |
67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /benchmarks/legacy/benchmark-append.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IPFS = require('ipfs') 4 | const IPFSRepo = require('ipfs-repo') 5 | const DatastoreLevel = require('datastore-level') 6 | const Log = require('../src/log') 7 | const IdentityProvider = require('orbit-db-identity-provider') 8 | const Keystore = require('orbit-db-keystore') 9 | 10 | // State 11 | let ipfs 12 | let log 13 | 14 | // Metrics 15 | let totalQueries = 0 16 | let seconds = 0 17 | let queriesPerSecond = 0 18 | let lastTenSeconds = 0 19 | 20 | const queryLoop = async () => { 21 | await log.append(totalQueries.toString(), 1, false) 22 | totalQueries++ 23 | lastTenSeconds++ 24 | queriesPerSecond++ 25 | setImmediate(queryLoop) 26 | } 27 | 28 | const run = (() => { 29 | console.log('Starting benchmark...') 30 | 31 | const repoConf = { 32 | storageBackends: { 33 | blocks: DatastoreLevel 34 | } 35 | } 36 | 37 | ipfs = new IPFS({ 38 | repo: new IPFSRepo('./ipfs-log-benchmarks/ipfs', repoConf), 39 | start: false, 40 | EXPERIMENTAL: { 41 | pubsub: false, 42 | sharding: false, 43 | dht: false 44 | } 45 | }) 46 | 47 | ipfs.on('error', (err) => { 48 | console.error(err) 49 | }) 50 | 51 | ipfs.on('ready', async () => { 52 | // Use memory store to test without disk IO 53 | // const memstore = new MemStore() 54 | // ipfs.dag.put = memstore.put.bind(memstore) 55 | // ipfs.dag.get = memstore.get.bind(memstore) 56 | const keystore = new Keystore('./ipfs-log-benchmarks/keys/') 57 | const identity = await IdentityProvider.createIdentity({ id: 'userA', keystore }) 58 | 59 | log = new Log(ipfs, identity, { logId: 'A' }) 60 | 61 | // Output metrics at 1 second interval 62 | setInterval(() => { 63 | seconds++ 64 | if (seconds % 10 === 0) { 65 | console.log(`--> Average of ${lastTenSeconds / 10} q/s in the last 10 seconds`) 66 | if (lastTenSeconds === 0) throw new Error('Problems!') 67 | lastTenSeconds = 0 68 | } 69 | console.log(`${queriesPerSecond} queries per second, ${totalQueries} queries in ${seconds} seconds (Entry count: ${log.values.length})`) 70 | queriesPerSecond = 0 71 | }, 1000) 72 | 73 | setImmediate(queryLoop) 74 | }) 75 | })() 76 | 77 | module.exports = run 78 | -------------------------------------------------------------------------------- /test/browser.spec.js: -------------------------------------------------------------------------------- 1 | import { Level } from 'level' 2 | 3 | const isBrowser = () => typeof window !== 'undefined' 4 | 5 | // This file will be picked up by webpack into the 6 | // tests bundle and the code here gets run when imported 7 | // into the browser tests index through browser/run.js 8 | before(async () => { 9 | if (isBrowser()) { 10 | const keyA = (await import('./fixtures/keys/signing-keys/userA.json')).default 11 | const keyB = (await import('./fixtures/keys/signing-keys/userB.json')).default 12 | const keyC = (await import('./fixtures/keys/signing-keys/userC.json')).default 13 | const keyD = (await import('./fixtures/keys/signing-keys/userD.json')).default 14 | const keyE = (await import('./fixtures/keys/identity-keys/0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6.json')).default 15 | const keyF = (await import('./fixtures/keys/identity-keys/032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214.json')).default 16 | const keyG = (await import('./fixtures/keys/identity-keys/02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260.json')).default 17 | const keyH = (await import('./fixtures/keys/identity-keys/03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c.json')).default 18 | 19 | // If in browser, put the fixture keys in local storage 20 | // so that Keystore can find them 21 | const signingStore = new Level('./orbitdb/identity/signingkeys') 22 | const identityStore = new Level('./orbitdb/identity/identitykeys') 23 | 24 | const copyFixtures = [] 25 | 26 | copyFixtures.push(signingStore.put('userA', JSON.stringify(keyA))) 27 | copyFixtures.push(signingStore.put('userB', JSON.stringify(keyB))) 28 | copyFixtures.push(signingStore.put('userC', JSON.stringify(keyC))) 29 | copyFixtures.push(signingStore.put('userD', JSON.stringify(keyD))) 30 | 31 | copyFixtures.push(identityStore.put('0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6', JSON.stringify(keyE))) 32 | copyFixtures.push(identityStore.put('032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214', JSON.stringify(keyF))) 33 | copyFixtures.push(identityStore.put('02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260', JSON.stringify(keyG))) 34 | copyFixtures.push(identityStore.put('03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c', JSON.stringify(keyH))) 35 | 36 | copyFixtures.push(signingStore.close()) 37 | copyFixtures.push(identityStore.close()) 38 | 39 | await Promise.all(copyFixtures) 40 | } 41 | }) 42 | -------------------------------------------------------------------------------- /benchmarks/legacy/benchmark-join.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IPFS = require('ipfs') 4 | const IPFSRepo = require('ipfs-repo') 5 | const DatastoreLevel = require('datastore-level') 6 | const Log = require('../src/log') 7 | const IdentityProvider = require('orbit-db-identity-provider') 8 | const Keystore = require('orbit-db-keystore') 9 | 10 | // State 11 | let ipfs 12 | let log1, log2 13 | 14 | // Metrics 15 | let totalQueries = 0 16 | let seconds = 0 17 | let queriesPerSecond = 0 18 | let lastTenSeconds = 0 19 | 20 | const queryLoop = async () => { 21 | try { 22 | await Promise.all([ 23 | log1.append('a' + totalQueries), 24 | log2.append('b' + totalQueries) 25 | ]) 26 | 27 | await log1.join(log2) 28 | await log2.join(log1) 29 | totalQueries++ 30 | lastTenSeconds++ 31 | queriesPerSecond++ 32 | setImmediate(queryLoop) 33 | } catch (e) { 34 | console.error(e) 35 | process.exit(0) 36 | } 37 | } 38 | 39 | const run = (() => { 40 | console.log('Starting benchmark...') 41 | 42 | const repoConf = { 43 | storageBackends: { 44 | blocks: DatastoreLevel 45 | } 46 | } 47 | 48 | ipfs = new IPFS({ 49 | repo: new IPFSRepo('./ipfs-log-benchmarks/ipfs', repoConf), 50 | start: false, 51 | EXPERIMENTAL: { 52 | pubsub: true 53 | } 54 | }) 55 | 56 | ipfs.on('error', (err) => { 57 | console.error(err) 58 | process.exit(1) 59 | }) 60 | 61 | ipfs.on('ready', async () => { 62 | // Use memory store to test without disk IO 63 | // const memstore = new MemStore() 64 | // ipfs.dag.put = memstore.put.bind(memstore) 65 | // ipfs.dag.get = memstore.get.bind(memstore) 66 | const keystore = new Keystore('./ipfs-log-benchmarks/keys/') 67 | 68 | const identity = await IdentityProvider.createIdentity({ id: 'userA', keystore }) 69 | const identity2 = await IdentityProvider.createIdentity({ id: 'userB', keystore }) 70 | 71 | log1 = new Log(ipfs, identity, { logId: 'A' }) 72 | log2 = new Log(ipfs, identity2, { logId: 'A' }) 73 | 74 | // Output metrics at 1 second interval 75 | setInterval(() => { 76 | seconds++ 77 | if (seconds % 10 === 0) { 78 | console.log(`--> Average of ${lastTenSeconds / 10} q/s in the last 10 seconds`) 79 | if (lastTenSeconds === 0) throw new Error('Problems!') 80 | lastTenSeconds = 0 81 | } 82 | console.log(`${queriesPerSecond} queries per second, ${totalQueries} queries in ${seconds} seconds. log1: ${log1.length}, log2: ${log2.length}`) 83 | queriesPerSecond = 0 84 | }, 1000) 85 | 86 | queryLoop() 87 | }) 88 | })() 89 | 90 | module.exports = run 91 | -------------------------------------------------------------------------------- /docs/styles/prettify-tomorrow.css: -------------------------------------------------------------------------------- 1 | /* Tomorrow Theme */ 2 | /* Original theme - https://github.com/chriskempson/tomorrow-theme */ 3 | /* Pretty printing styles. Used with prettify.js. */ 4 | /* SPAN elements with the classes below are added by prettyprint. */ 5 | /* plain text */ 6 | .pln { 7 | color: #4d4d4c; } 8 | 9 | @media screen { 10 | /* string content */ 11 | .str { 12 | color: #718c00; } 13 | 14 | /* a keyword */ 15 | .kwd { 16 | color: #8959a8; } 17 | 18 | /* a comment */ 19 | .com { 20 | color: #8e908c; } 21 | 22 | /* a type name */ 23 | .typ { 24 | color: #4271ae; } 25 | 26 | /* a literal value */ 27 | .lit { 28 | color: #f5871f; } 29 | 30 | /* punctuation */ 31 | .pun { 32 | color: #4d4d4c; } 33 | 34 | /* lisp open bracket */ 35 | .opn { 36 | color: #4d4d4c; } 37 | 38 | /* lisp close bracket */ 39 | .clo { 40 | color: #4d4d4c; } 41 | 42 | /* a markup tag name */ 43 | .tag { 44 | color: #c82829; } 45 | 46 | /* a markup attribute name */ 47 | .atn { 48 | color: #f5871f; } 49 | 50 | /* a markup attribute value */ 51 | .atv { 52 | color: #3e999f; } 53 | 54 | /* a declaration */ 55 | .dec { 56 | color: #f5871f; } 57 | 58 | /* a variable name */ 59 | .var { 60 | color: #c82829; } 61 | 62 | /* a function name */ 63 | .fun { 64 | color: #4271ae; } } 65 | /* Use higher contrast and text-weight for printable form. */ 66 | @media print, projection { 67 | .str { 68 | color: #060; } 69 | 70 | .kwd { 71 | color: #006; 72 | font-weight: bold; } 73 | 74 | .com { 75 | color: #600; 76 | font-style: italic; } 77 | 78 | .typ { 79 | color: #404; 80 | font-weight: bold; } 81 | 82 | .lit { 83 | color: #044; } 84 | 85 | .pun, .opn, .clo { 86 | color: #440; } 87 | 88 | .tag { 89 | color: #006; 90 | font-weight: bold; } 91 | 92 | .atn { 93 | color: #404; } 94 | 95 | .atv { 96 | color: #060; } } 97 | /* Style */ 98 | /* 99 | pre.prettyprint { 100 | background: white; 101 | font-family: Consolas, Monaco, 'Andale Mono', monospace; 102 | font-size: 12px; 103 | line-height: 1.5; 104 | border: 1px solid #ccc; 105 | padding: 10px; } 106 | */ 107 | 108 | /* Specify class=linenums on a pre to get line numbering */ 109 | ol.linenums { 110 | margin-top: 0; 111 | margin-bottom: 0; } 112 | 113 | /* IE indents via margin-left */ 114 | li.L0, 115 | li.L1, 116 | li.L2, 117 | li.L3, 118 | li.L4, 119 | li.L5, 120 | li.L6, 121 | li.L7, 122 | li.L8, 123 | li.L9 { 124 | /* */ } 125 | 126 | /* Alternate shading for lines */ 127 | li.L1, 128 | li.L3, 129 | li.L5, 130 | li.L7, 131 | li.L9 { 132 | /* */ } 133 | -------------------------------------------------------------------------------- /benchmarks/legacy/benchmark-join2.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IPFS = require('ipfs') 4 | const IPFSRepo = require('ipfs-repo') 5 | const DatastoreLevel = require('datastore-level') 6 | const Log = require('../src/log') 7 | const IdentityProvider = require('orbit-db-identity-provider') 8 | const Keystore = require('orbit-db-keystore') 9 | 10 | // State 11 | let ipfs 12 | let log1, log2 13 | 14 | // Metrics 15 | // const totalQueries = 0 16 | // const queryLoop = async () => { 17 | // try { 18 | // await Promise.all([ 19 | // log1.append('a' + totalQueries), 20 | // log2.append('b' + totalQueries) 21 | // ]) 22 | // 23 | // await log1.join(log2) 24 | // await log2.join(log1) 25 | // totalQueries++ 26 | // setImmediate(queryLoop) 27 | // } catch (e) { 28 | // console.error(e) 29 | // process.exit(0) 30 | // } 31 | // } 32 | 33 | const run = (() => { 34 | console.log('Starting benchmark...') 35 | 36 | const repoConf = { 37 | storageBackends: { 38 | blocks: DatastoreLevel 39 | } 40 | } 41 | 42 | ipfs = new IPFS({ 43 | repo: new IPFSRepo('./ipfs-log-benchmarks/ipfs', repoConf), 44 | start: false, 45 | EXPERIMENTAL: { 46 | pubsub: true 47 | } 48 | }) 49 | 50 | ipfs.on('error', (err) => { 51 | console.error(err) 52 | process.exit(1) 53 | }) 54 | 55 | ipfs.on('ready', async () => { 56 | // Use memory store to test without disk IO 57 | // const memstore = new MemStore() 58 | // ipfs.dag.put = memstore.put.bind(memstore) 59 | // ipfs.dag.get = memstore.get.bind(memstore) 60 | const keystore = new Keystore('./benchmarks/ipfs-log-benchmarks/keys') 61 | const identity = await IdentityProvider.createIdentity({ id: 'userA', keystore }) 62 | const identity2 = await IdentityProvider.createIdentity({ id: 'userB', keystore }) 63 | 64 | log1 = new Log(ipfs, identity, { logId: 'A' }) 65 | log2 = new Log(ipfs, identity2, { logId: 'A' }) 66 | 67 | const amount = 10000 68 | console.log('log length:', amount) 69 | 70 | console.log('Writing log...') 71 | const st3 = new Date().getTime() 72 | for (let i = 0; i < amount; i++) { 73 | await log1.append('a' + i, 64) 74 | } 75 | const et3 = new Date().getTime() 76 | console.log('write took', (et3 - st3), 'ms') 77 | 78 | console.log('Joining logs...') 79 | const st = new Date().getTime() 80 | await log2.join(log1) 81 | const et = new Date().getTime() 82 | console.log('join took', (et - st), 'ms') 83 | 84 | console.log('Loading log...') 85 | const st2 = new Date().getTime() 86 | const l2 = await Log.fromEntryHash(ipfs, identity, log1.heads[0].hash, { logId: 'A' }) 87 | const et2 = new Date().getTime() 88 | console.log('load took', (et2 - st2), 'ms') 89 | console.log('Entry size:', Buffer.from(JSON.stringify(l2.heads)).length, 'bytes') 90 | // console.log(log2.heads) 91 | console.log('log length:', log2.values.length) 92 | // console.log(log2.values.map(e => e.payload)) 93 | }) 94 | })() 95 | 96 | module.exports = run 97 | -------------------------------------------------------------------------------- /benchmarks/legacy/browser/benchmark-append-signed.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 |

ipfs-log - benchmark append()

7 | 8 |

Description

9 |
Add an entry to a log. Measure throughput in operations per second.
10 |

11 |       const signingKeysPath = '../test/fixtures/keys'
12 |       const identity = await Identities.createIdentity({ id: 'userA', signingKeysPath })
13 | 
14 |       log = new Log(ipfs, identity, { logId: 'browser-benchmark-append-signed' })
15 |       await log.append(loopCount)
16 |     
17 | 18 |

Results

19 |

20 | 
21 |     
22 |     
23 |     
24 | 
25 |     
83 |   
84 | 
85 | 


--------------------------------------------------------------------------------
/docs/GSet.html:
--------------------------------------------------------------------------------
  1 | 
  2 | 
  3 | 
  4 |     
  5 |     JSDoc: Class: GSet
  6 | 
  7 |     
  8 |     
  9 |     
 12 |     
 13 |     
 14 | 
 15 | 
 16 | 
 17 | 
 18 | 
19 | 20 |

Class: GSet

21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 29 |
30 | 31 |

GSet()

32 | 33 |
Interface for G-Set CRDT 34 | 35 | From: 36 | "A comprehensive study of Convergent and Commutative Replicated Data Types" 37 | https://hal.inria.fr/inria-00555588
38 | 39 | 40 |
41 | 42 |
43 |
44 | 45 | 46 | 47 | 48 |

Constructor

49 | 50 | 51 | 52 |

new GSet()

53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 |
72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 |
Source:
99 |
102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 |
110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 |
132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 |
153 | 154 |
155 | 156 | 157 | 158 | 159 |
160 | 161 | 164 | 165 |
166 | 167 |
168 | Documentation generated by JSDoc 3.6.6 on Fri Dec 11 2020 17:11:17 GMT-0500 (Eastern Standard Time) 169 |
170 | 171 | 172 | 173 | 174 | -------------------------------------------------------------------------------- /test/log-join-concurrent.spec.js: -------------------------------------------------------------------------------- 1 | import { strictEqual, deepStrictEqual } from 'assert' 2 | import rimraf from 'rimraf' 3 | import { copy } from 'fs-extra' 4 | import Log, { Sorting } from '../src/log.js' 5 | import IdentityProvider from 'orbit-db-identity-provider' 6 | 7 | // Test utils 8 | import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils' 9 | 10 | const { sync: rmrf } = rimraf 11 | const { SortByEntryHash } = Sorting 12 | const { createIdentity } = IdentityProvider 13 | 14 | let ipfsd, ipfs, testIdentity 15 | 16 | Object.keys(testAPIs).forEach(IPFS => { 17 | describe('Log - Join Concurrent Entries (' + IPFS + ')', function () { 18 | this.timeout(config.timeout) 19 | 20 | const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config 21 | 22 | before(async () => { 23 | rmrf(identityKeysPath) 24 | rmrf(signingKeysPath) 25 | await copy(identityKeyFixtures, identityKeysPath) 26 | await copy(signingKeyFixtures, signingKeysPath) 27 | testIdentity = await createIdentity({ id: 'userA', identityKeysPath, signingKeysPath }) 28 | ipfsd = await startIpfs(IPFS, config.defaultIpfsConfig) 29 | ipfs = ipfsd.api 30 | }) 31 | 32 | after(async () => { 33 | await stopIpfs(ipfsd) 34 | await testIdentity.provider.keystore.close() 35 | await testIdentity.provider.signingKeystore.close() 36 | rmrf(identityKeysPath) 37 | rmrf(signingKeysPath) 38 | }) 39 | 40 | describe('join ', async () => { 41 | let log1, log2 42 | 43 | before(async () => { 44 | log1 = new Log(ipfs, testIdentity, { logId: 'A', sortFn: SortByEntryHash }) 45 | log2 = new Log(ipfs, testIdentity, { logId: 'A', sortFn: SortByEntryHash }) 46 | }) 47 | 48 | it('joins consistently', async () => { 49 | for (let i = 0; i < 10; i++) { 50 | await log1.append('hello1-' + i) 51 | await log2.append('hello2-' + i) 52 | } 53 | 54 | await log1.join(log2) 55 | await log2.join(log1) 56 | 57 | const hash1 = await log1.toMultihash() 58 | const hash2 = await log2.toMultihash() 59 | 60 | strictEqual(hash1, hash2) 61 | strictEqual(log1.length, 20) 62 | deepStrictEqual(log1.values.map(e => e.payload), log2.values.map(e => e.payload)) 63 | }) 64 | 65 | it('Concurrently appending same payload after join results in same state', async () => { 66 | for (let i = 10; i < 20; i++) { 67 | await log1.append('hello1-' + i) 68 | await log2.append('hello2-' + i) 69 | } 70 | 71 | await log1.join(log2) 72 | await log2.join(log1) 73 | 74 | await log1.append('same') 75 | await log2.append('same') 76 | 77 | const hash1 = await log1.toMultihash() 78 | const hash2 = await log2.toMultihash() 79 | 80 | strictEqual(hash1, hash2) 81 | strictEqual(log1.length, 41) 82 | strictEqual(log2.length, 41) 83 | deepStrictEqual(log1.values.map(e => e.payload), log2.values.map(e => e.payload)) 84 | }) 85 | 86 | it('Joining after concurrently appending same payload joins entry once', async () => { 87 | await log1.join(log2) 88 | await log2.join(log1) 89 | 90 | strictEqual(log1.length, log2.length) 91 | strictEqual(log1.length, 41) 92 | deepStrictEqual(log1.values.map(e => e.payload), log2.values.map(e => e.payload)) 93 | }) 94 | }) 95 | }) 96 | }) 97 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-log", 3 | "version": "6.0.0", 4 | "type": "module", 5 | "description": "Append-only log CRDT on IPFS", 6 | "main": "src/log.js", 7 | "author": "Haad", 8 | "license": "MIT", 9 | "repository": { 10 | "type": "git", 11 | "url": "https://github.com/orbitdb/ipfs-log" 12 | }, 13 | "engines": { 14 | "node": ">=14.0.0" 15 | }, 16 | "keywords": [ 17 | "ipfs", 18 | "log", 19 | "crdts", 20 | "crdt" 21 | ], 22 | "dependencies": { 23 | "json-stringify-deterministic": "^1.0.8", 24 | "orbit-db-identity-provider": "^0.5.0", 25 | "orbit-db-io": "^3.0.0", 26 | "p-do-whilst": "^2.0.0", 27 | "p-each-series": "^3.0.0", 28 | "p-map": "^5.5.0", 29 | "p-whilst": "^3.0.0" 30 | }, 31 | "devDependencies": { 32 | "@babel/cli": "^7.20.7", 33 | "@babel/core": "^7.20.12", 34 | "@babel/plugin-syntax-object-rest-spread": "~7.8.3", 35 | "@babel/plugin-transform-modules-commonjs": "^7.20.11", 36 | "@babel/plugin-transform-runtime": "^7.19.6", 37 | "@babel/preset-env": "^7.20.2", 38 | "@babel/runtime": "^7.20.7", 39 | "@mapbox/node-pre-gyp": "^1.0.10", 40 | "assert": "^2.0.0", 41 | "babel-loader": "~9.1.2", 42 | "c8": "^7.12.0", 43 | "eslint": "^8.32.0", 44 | "fs-extra": "^11.1.0", 45 | "http-server": "^14.1.1", 46 | "is-node": "^1.0.2", 47 | "jsdoc": "^4.0.0", 48 | "json-loader": "~0.5.7", 49 | "level": "^8.0.0", 50 | "mocha": "^10.2.0", 51 | "mocha-headless-chrome": "^4.0.0", 52 | "node-polyfill-webpack-plugin": "^2.0.1", 53 | "orbit-db-benchmark-runner": "^2.0.1", 54 | "orbit-db-keystore": "^2.0.0", 55 | "orbit-db-storage-adapter": "^0.9.0", 56 | "orbit-db-test-utils": "^3.0.0", 57 | "path-browserify": "^1.0.1", 58 | "process": "^0.11.10", 59 | "rimraf": "^4.1.0", 60 | "standard": "~17.0.0", 61 | "stream-browserify": "^3.0.0", 62 | "webpack": "^5.75.0", 63 | "webpack-cli": "^5.0.1" 64 | }, 65 | "standard": { 66 | "env": "mocha", 67 | "ignore": [ 68 | "lib/es5/", 69 | "docs/" 70 | ] 71 | }, 72 | "contributors": [ 73 | "haadcode", 74 | "aphelionz", 75 | "shamb0t", 76 | "thiagodelgado111", 77 | "mistakia", 78 | "satazor", 79 | "RichardLitt", 80 | "greenkeeperio-bot", 81 | "chrisdostert", 82 | "zachferland", 83 | "kaibakker", 84 | "dignifiedquire", 85 | "adam-palazzo" 86 | ], 87 | "scripts": { 88 | "test": "TEST=all c8 mocha", 89 | "test:browser": "npm run build:tests && mocha-headless-chrome -t 360000 -f ./test/browser/index.html -a no-sandbox", 90 | "build": "npm run build:es5 && npm run build:examples && npm run build:dist", 91 | "build:docs": "jsdoc src/ -d docs --readme README.md", 92 | "build:tests": "webpack --config ./conf/webpack.tests.config.js --mode production", 93 | "build:examples": "webpack --config ./conf/webpack.example.config.js", 94 | "build:dist": "webpack --config ./conf/webpack.config.js --mode production", 95 | "build:es5": "babel src --out-dir ./lib/es5/ --presets @babel/preset-env --plugins @babel/plugin-transform-runtime", 96 | "lint": "standard --env=mocha", 97 | "lint:fix": "standard --env=mocha --fix", 98 | "benchmark": "nyc benchmark-runner -r -b --baselineLimit 1000", 99 | "benchmark:stress": "benchmark-runner -r --grep stress", 100 | "examples": "http-server examples/browser" 101 | }, 102 | "localMaintainers": [ 103 | "haad ", 104 | "hajamark ", 105 | "shamb0t " 106 | ] 107 | } 108 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at [community@orbitdb.org](mailto:community@orbitdb.org), which goes to all members of the @OrbitDB community team, or to [richardlitt@orbitdb.org](mailto:richardlitt@orbitdb.org), which goes only to [@RichardLitt](https://github.com/RichardLitt) or to [haadcode@orbitdb.org](mailto:haadcode@orbitdb.org), which goes only to [@haadcode](https://github.com/haadcode). 59 | 60 | All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 61 | 62 | Project maintainers who do not follow or enforce the Code of Conduct in good 63 | faith may face temporary or permanent repercussions as determined by other 64 | members of the project's leadership. 65 | 66 | ## Attribution 67 | 68 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 69 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 70 | 71 | [homepage]: https://www.contributor-covenant.org 72 | -------------------------------------------------------------------------------- /benchmarks/legacy/browser/benchmark-join-signed.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 |

ipfs-log - benchmark join()

7 | 8 |

Description

9 |
Add an entry to two logs and join them after each update. Measure throughput in operations per second.
10 |

 11 |     const testKeyPath = '../test/fixtures/keys'
 12 |     const keystore = Keystore.create(testKeyPath)
 13 |     const identity1 = await Identities.createIdentity({ id: 'userA', keystore })
 14 |     const identity2 = await Identities.createIdentity({ id: 'userA', keystore })
 15 | 
 16 |     log1 = new Log(ipfs, identity1, { logId: 'browser-benchmark-join-signed' })
 17 |     log2 = new Log(ipfs, identity2, { logId: 'browser-benchmark-join-signed' })
 18 | 
 19 |     const add1 = await log1.append("a" + loopCount)
 20 |     const add2 = await log2.append("b" + loopCount)
 21 | 
 22 |     const result = await Promise.all([add1, add2])
 23 |     await log1.join(log2)
 24 |     awiat log2.join(log1)
 25 |     
26 | 27 |

Results

28 |

 29 | 
 30 |     
 31 |     
 32 |     
 33 | 
 34 |     
 98 |   
 99 | 
100 | 


--------------------------------------------------------------------------------
/benchmarks/legacy/benchmark-from-entry-hash.js:
--------------------------------------------------------------------------------
  1 | 'use strict'
  2 | 
  3 | const IPFS = require('ipfs')
  4 | const IPFSRepo = require('ipfs-repo')
  5 | const DatastoreLevel = require('datastore-level')
  6 | const Log = require('../src/log')
  7 | const IdentityProvider = require('orbit-db-identity-provider')
  8 | const Keystore = require('orbit-db-keystore')
  9 | // State
 10 | let ipfs
 11 | let log
 12 | 
 13 | // Metrics
 14 | let totalLoaded = 0
 15 | let seconds = 0
 16 | let entriesLoadedPerSecond = 0
 17 | let lastTenSeconds = 0
 18 | let total = 0
 19 | 
 20 | const run = (() => {
 21 |   console.log('Starting benchmark...')
 22 | 
 23 |   const repoConf = {
 24 |     storageBackends: {
 25 |       blocks: DatastoreLevel
 26 |     }
 27 |   }
 28 | 
 29 |   ipfs = new IPFS({
 30 |     repo: new IPFSRepo('./ipfs-log-benchmarks/ipfs', repoConf),
 31 |     start: false,
 32 |     EXPERIMENTAL: {
 33 |       pubsub: false,
 34 |       sharding: false,
 35 |       dht: false
 36 |     }
 37 |   })
 38 | 
 39 |   ipfs.on('error', (err) => {
 40 |     console.error(err)
 41 |   })
 42 | 
 43 |   ipfs.on('ready', async () => {
 44 |     // Create a log
 45 |     const keystore = new Keystore('./ipfs-log-benchmarks/keys/')
 46 |     const identity = await IdentityProvider.createIdentity({ id: 'userA', keystore })
 47 | 
 48 |     log = new Log(ipfs, identity, { logId: 'A' })
 49 | 
 50 |     const count = parseInt(process.argv[2]) || 50000
 51 |     const refCount = 64
 52 | 
 53 |     console.log('Creating a log...')
 54 | 
 55 |     const st = new Date().getTime()
 56 | 
 57 |     try {
 58 |       for (let i = 1; i < count + 1; i++) {
 59 |         await log.append('hello' + i, refCount)
 60 |         process.stdout.write('\rWriting ' + i + ' / ' + count)
 61 |       }
 62 |       const dt1 = new Date().getTime()
 63 |       process.stdout.write(' (' + (dt1 - st) + ' ms)\n')
 64 |     } catch (e) {
 65 |       console.log(e)
 66 |     }
 67 | 
 68 |     const onDataUpdated = (hash, entry, resultLength) => {
 69 |       entriesLoadedPerSecond++
 70 |       lastTenSeconds++
 71 |       total = resultLength
 72 |       process.stdout.write('\rLoading ' + total + ' / ' + count)
 73 |     }
 74 | 
 75 |     const outputMetrics = () => {
 76 |       totalLoaded = total - totalLoaded
 77 |       seconds++
 78 |       if (seconds % 10 === 0) {
 79 |         console.log(`--> Average of ${lastTenSeconds / 10} e/s in the last 10 seconds`)
 80 |         if (lastTenSeconds === 0) throw new Error('Problems!')
 81 |         lastTenSeconds = 0
 82 |       }
 83 |       console.log(`\n${entriesLoadedPerSecond} entries loaded per second, ${totalLoaded} loaded in ${seconds} seconds (Entry count: ${total})`)
 84 |       entriesLoadedPerSecond = 0
 85 |     }
 86 | 
 87 |     // Output metrics at 1 second interval
 88 |     setInterval(outputMetrics, 1000)
 89 | 
 90 |     const dt2 = new Date().getTime()
 91 | 
 92 |     if (global.gc) {
 93 |       global.gc()
 94 |     } else {
 95 |       console.warn('Start benchmark with --expose-gc flag')
 96 |     }
 97 | 
 98 |     const m1 = process.memoryUsage()
 99 | 
100 |     await Log.fromEntryHash(ipfs, log._identity, log.heads.map(e => e.hash), {
101 |       logId: log._id,
102 |       length: -1,
103 |       exclude: [],
104 |       onProgressCallback: onDataUpdated
105 |     })
106 | 
107 |     outputMetrics()
108 |     const et = new Date().getTime()
109 |     console.log('Loading took:', (et - dt2), 'ms')
110 | 
111 |     const m2 = process.memoryUsage()
112 |     const usedDelta = m1.heapUsed && Math.abs(m1.heapUsed - m2.heapUsed) / m1.heapUsed * 100
113 |     const totalDelta = m1.heapTotal && Math.abs(m1.heapTotal - m2.heapTotal) / m1.heapTotal * 100
114 | 
115 |     let usedOutput = `Memory Heap Used: ${(m2.heapUsed / 1024 / 1024).toFixed(2)} MB`
116 |     usedOutput += ` (${m2.heapUsed > m1.heapUsed ? '+' : '-'}${usedDelta.toFixed(2)}%)`
117 |     let totalOutput = `Memory Heap Total: ${(m2.heapTotal / 1024 / 1024).toFixed(2)} MB`
118 |     totalOutput += ` (${m2.heapTotal > m1.heapTotal ? '+' : '-'}${totalDelta.toFixed(2)}%)`
119 | 
120 |     console.log(usedOutput)
121 |     console.log(totalOutput)
122 | 
123 |     process.exit(0)
124 |   })
125 | })()
126 | 
127 | module.exports = run
128 | 


--------------------------------------------------------------------------------
/src/log-sorting.js:
--------------------------------------------------------------------------------
  1 | import Clock from './lamport-clock.js'
  2 | 
  3 | /**
  4 |  * Sort two entries as Last-Write-Wins (LWW).
  5 |  *
  6 |  * Last Write Wins is a conflict resolution strategy for sorting elements
  7 |  * where the element with a greater clock (latest) is chosen as the winner.
  8 |  *
  9 |  * @param {Entry} a First entry
 10 |  * @param {Entry} b Second entry
 11 |  * @returns {number} 1 if a is latest, -1 if b is latest
 12 |  */
 13 | function LastWriteWins (a, b) {
 14 |   // Ultimate conflict resolution (take the first/left arg)
 15 |   const First = (a, b) => a
 16 |   // Sort two entries by their clock id, if the same always take the first
 17 |   const sortById = (a, b) => SortByClockId(a, b, First)
 18 |   // Sort two entries by their clock time, if concurrent,
 19 |   // determine sorting using provided conflict resolution function
 20 |   const sortByEntryClocks = (a, b) => SortByClocks(a, b, sortById)
 21 |   // Sort entries by clock time as the primary sort criteria
 22 |   return sortByEntryClocks(a, b)
 23 | }
 24 | 
 25 | /**
 26 |  * Sort two entries by their hash.
 27 |  *
 28 |  * @param {Entry} a First entry
 29 |  * @param {Entry} b Second entry
 30 |  * @returns {number} 1 if a is latest, -1 if b is latest
 31 |  */
 32 | function SortByEntryHash (a, b) {
 33 |   // Ultimate conflict resolution (compare hashes)
 34 |   const compareHash = (a, b) => a.hash < b.hash ? -1 : 1
 35 |   // Sort two entries by their clock id, if the same then compare hashes
 36 |   const sortById = (a, b) => SortByClockId(a, b, compareHash)
 37 |   // Sort two entries by their clock time, if concurrent,
 38 |   // determine sorting using provided conflict resolution function
 39 |   const sortByEntryClocks = (a, b) => SortByClocks(a, b, sortById)
 40 |   // Sort entries by clock time as the primary sort criteria
 41 |   return sortByEntryClocks(a, b)
 42 | }
 43 | 
 44 | /**
 45 |  * Sort two entries by their clock time.
 46 |  * @param {Entry} a First entry to compare
 47 |  * @param {Entry} b Second entry to compare
 48 |  * @param {function(a, b)} resolveConflict A function to call if entries are concurrent (happened at the same time). The function should take in two entries and return 1 if the first entry should be chosen and -1 if the second entry should be chosen.
 49 |  * @returns {number} 1 if a is greater, -1 if b is greater
 50 |  */
 51 | function SortByClocks (a, b, resolveConflict) {
 52 |   // Compare the clocks
 53 |   const diff = Clock.compare(a.clock, b.clock)
 54 |   // If the clocks are concurrent, use the provided
 55 |   // conflict resolution function to determine which comes first
 56 |   return diff === 0 ? resolveConflict(a, b) : diff
 57 | }
 58 | 
 59 | /**
 60 |  * Sort two entries by their clock id.
 61 |  * @param {Entry} a First entry to compare
 62 |  * @param {Entry} b Second entry to compare
 63 |  * @param {function(a, b)} resolveConflict A function to call if the clocks ids are the same. The function should take in two entries and return 1 if the first entry should be chosen and -1 if the second entry should be chosen.
 64 |  * @returns {number} 1 if a is greater, -1 if b is greater
 65 |  */
 66 | function SortByClockId (a, b, resolveConflict) {
 67 |   // Sort by ID if clocks are concurrent,
 68 |   // take the entry with a "greater" clock id
 69 |   return a.clock.id === b.clock.id
 70 |     ? resolveConflict(a, b)
 71 |     : a.clock.id < b.clock.id ? -1 : 1
 72 | }
 73 | 
 74 | /**
 75 |  * A wrapper function to throw an error if the results of a passed function return zero
 76 |  * @param {function(a, b)} [tiebreaker] The tiebreaker function to validate.
 77 |  * @returns {function(a, b)} 1 if a is greater, -1 if b is greater
 78 |  * @throws {Error} if func ever returns 0
 79 |  */
 80 | function NoZeroes (func) {
 81 |   const msg = `Your log's tiebreaker function, ${func.name}, has returned zero and therefore cannot be`
 82 | 
 83 |   const comparator = (a, b) => {
 84 |     // Validate by calling the function
 85 |     const result = func(a, b)
 86 |     if (result === 0) { throw Error(msg) }
 87 |     return result
 88 |   }
 89 | 
 90 |   return comparator
 91 | }
 92 | 
 93 | export default {
 94 |   SortByClocks,
 95 |   SortByClockId,
 96 |   LastWriteWins,
 97 |   SortByEntryHash,
 98 |   NoZeroes
 99 | }
100 | 


--------------------------------------------------------------------------------
/test/log-append.spec.js:
--------------------------------------------------------------------------------
  1 | import { strictEqual } from 'assert'
  2 | import rimraf from 'rimraf'
  3 | import { copy } from 'fs-extra'
  4 | import Log from '../src/log.js'
  5 | import IdentityProvider from 'orbit-db-identity-provider'
  6 | import Keystore from 'orbit-db-keystore'
  7 | 
  8 | // Test utils
  9 | import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
 10 | 
 11 | const { createIdentity } = IdentityProvider
 12 | const { sync: rmrf } = rimraf
 13 | 
 14 | let ipfsd, ipfs, testIdentity
 15 | 
 16 | Object.keys(testAPIs).forEach((IPFS) => {
 17 |   describe('Log - Append (' + IPFS + ')', function () {
 18 |     this.timeout(config.timeout)
 19 | 
 20 |     const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
 21 | 
 22 |     let keystore, signingKeystore
 23 | 
 24 |     before(async () => {
 25 |       rmrf(identityKeysPath)
 26 |       rmrf(signingKeysPath)
 27 |       await copy(identityKeyFixtures, identityKeysPath)
 28 |       await copy(signingKeyFixtures, signingKeysPath)
 29 | 
 30 |       keystore = new Keystore(identityKeysPath)
 31 |       signingKeystore = new Keystore(signingKeysPath)
 32 | 
 33 |       testIdentity = await createIdentity({ id: 'userA', keystore, signingKeystore })
 34 |       ipfsd = await startIpfs(IPFS, config.defaultIpfsConfig)
 35 |       ipfs = ipfsd.api
 36 |     })
 37 | 
 38 |     after(async () => {
 39 |       await stopIpfs(ipfsd)
 40 |       rmrf(identityKeysPath)
 41 |       rmrf(signingKeysPath)
 42 | 
 43 |       await keystore.close()
 44 |       await signingKeystore.close()
 45 |     })
 46 | 
 47 |     describe('append', () => {
 48 |       describe('append one', async () => {
 49 |         let log
 50 | 
 51 |         before(async () => {
 52 |           log = new Log(ipfs, testIdentity, 'A')
 53 |           await log.append('hello1')
 54 |         })
 55 | 
 56 |         it('added the correct amount of items', () => {
 57 |           strictEqual(log.length, 1)
 58 |         })
 59 | 
 60 |         it('added the correct values', async () => {
 61 |           log.values.forEach((entry) => {
 62 |             strictEqual(entry.payload, 'hello1')
 63 |           })
 64 |         })
 65 | 
 66 |         it('added the correct amount of next pointers', async () => {
 67 |           log.values.forEach((entry) => {
 68 |             strictEqual(entry.next.length, 0)
 69 |           })
 70 |         })
 71 | 
 72 |         it('has the correct heads', async () => {
 73 |           log.heads.forEach((head) => {
 74 |             strictEqual(head.hash, log.values[0].hash)
 75 |           })
 76 |         })
 77 | 
 78 |         it('updated the clocks correctly', async () => {
 79 |           log.values.forEach((entry) => {
 80 |             strictEqual(entry.clock.id, testIdentity.publicKey)
 81 |             strictEqual(entry.clock.time, 1)
 82 |           })
 83 |         })
 84 |       })
 85 | 
 86 |       describe('append 100 items to a log', async () => {
 87 |         const amount = 100
 88 |         const nextPointerAmount = 64
 89 | 
 90 |         let log
 91 | 
 92 |         before(async () => {
 93 |           log = new Log(ipfs, testIdentity, 'A')
 94 |           for (let i = 0; i < amount; i++) {
 95 |             await log.append('hello' + i, nextPointerAmount)
 96 |             // Make sure the log has the right heads after each append
 97 |             const values = log.values
 98 |             strictEqual(log.heads.length, 1)
 99 |             strictEqual(log.heads[0].hash, values[values.length - 1].hash)
100 |           }
101 |         })
102 | 
103 |         it('added the correct amount of items', () => {
104 |           strictEqual(log.length, amount)
105 |         })
106 | 
107 |         it('added the correct values', async () => {
108 |           log.values.forEach((entry, index) => {
109 |             strictEqual(entry.payload, 'hello' + index)
110 |           })
111 |         })
112 | 
113 |         it('updated the clocks correctly', async () => {
114 |           log.values.forEach((entry, index) => {
115 |             strictEqual(entry.clock.time, index + 1)
116 |             strictEqual(entry.clock.id, testIdentity.publicKey)
117 |           })
118 |         })
119 | 
120 |         it('added the correct amount of refs pointers', async () => {
121 |           log.values.forEach((entry, index) => {
122 |             strictEqual(entry.refs.length, index > 0 ? Math.ceil(Math.log2(Math.min(nextPointerAmount, index))) : 0)
123 |           })
124 |         })
125 |       })
126 |     })
127 |   })
128 | })
129 | 


--------------------------------------------------------------------------------
/.circleci/config.yml:
--------------------------------------------------------------------------------
 1 | # This configuration was automatically generated from a CircleCI 1.0 config.
 2 | # It should include any build commands you had along with commands that CircleCI
 3 | # inferred from your project structure. We strongly recommend you read all the
 4 | # comments in this file to understand the structure of CircleCI 2.0, as the idiom
 5 | # for configuration has changed substantially in 2.0 to allow arbitrary jobs rather
 6 | # than the prescribed lifecycle of 1.0. In general, we recommend using this generated
 7 | # configuration as a reference rather than using it in production, though in most
 8 | # cases it should duplicate the execution of your original 1.0 config.
 9 | version: 2
10 | jobs:
11 |   build:
12 |     working_directory: ~/orbitdb/ipfs-log
13 |     parallelism: 1
14 |     shell: /bin/bash --login
15 |     # CircleCI 2.0 does not support environment variables that refer to each other the same way as 1.0 did.
16 |     # If any of these refer to each other, rewrite them so that they don't or see https://circleci.com/docs/2.0/env-vars/#interpolating-environment-variables-to-set-other-environment-variables .
17 |     environment:
18 |       CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
19 |       CIRCLE_TEST_REPORTS: /tmp/circleci-test-results
20 |     # In CircleCI 1.0 we used a pre-configured image with a large number of languages and other packages.
21 |     # In CircleCI 2.0 you can now specify your own image, or use one of our pre-configured images.
22 |     # The following configuration line tells CircleCI to use the specified docker image as the runtime environment for you job.
23 |     # We have selected a pre-built image that mirrors the build environment we use on
24 |     # the 1.0 platform, but we recommend you choose an image more tailored to the needs
25 |     # of each job. For more information on choosing an image (or alternatively using a
26 |     # VM instead of a container) see https://circleci.com/docs/2.0/executor-types/
27 |     # To see the list of pre-built images that CircleCI provides for most common languages see
28 |     # https://circleci.com/docs/2.0/circleci-images/
29 |     docker:
30 |       - image: circleci/node:lts-browsers
31 |     steps:
32 |     # Machine Setup
33 |     #   If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each
34 |     # The following `checkout` command checks out your code to your working directory. In 1.0 we did this implicitly. In 2.0 you can choose where in the course of a job your code should be checked out.
35 |     - checkout
36 |     # Prepare for artifact and test results  collection equivalent to how it was done on 1.0.
37 |     # In many cases you can simplify this from what is generated here.
38 |     # 'See docs on artifact collection here https://circleci.com/docs/2.0/artifacts/'
39 |     - run: mkdir -p $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS
40 |     # This is based on your 1.0 configuration file or project settings
41 |     # Dependencies
42 |     #   This would typically go in either a build or a build-and-test job when using workflows
43 |     # Restore the dependency cache
44 |     - restore_cache:
45 |         keys:
46 |         # This branch if available
47 |         - v2.1-dep-{{ .Branch }}-
48 |         # Default branch if not
49 |         - v2.1-dep-master-
50 |         # Any branch if there are none on the default branch - this should be unnecessary if you have your default branch configured correctly
51 |         - v2.1-dep-
52 |     # The following line was run implicitly in your 1.0 builds based on what CircleCI inferred about the structure of your project. In 2.0 you need to be explicit about which commands should be run. In some cases you can discard inferred commands if they are not relevant to your project.
53 |     - run: if [ -z "${NODE_ENV:-}" ]; then export NODE_ENV=test; fi
54 |     - run: export PATH="~/orbitdb/ipfs-log/node_modules/.bin:$PATH"
55 |     - run: npm install
56 |     # Save dependency cache
57 |     - save_cache:
58 |         key: v2-dep-{{ .Branch }}-{{ epoch }}
59 |         paths:
60 |         # This is a broad list of cache paths to include many possible development environments
61 |         # You can probably delete some of these entries
62 |         - vendor/bundle
63 |         - ~/virtualenvs
64 |         - ~/.m2
65 |         - ~/.ivy2
66 |         - ~/.bundle
67 |         - ~/.go_workspace
68 |         - ~/.gradle
69 |         - ~/.cache/bower
70 |         - ./node_modules
71 |     # Test
72 |     #   This would typically be a build job when using workflows, possibly combined with build
73 |     # The following line was run implicitly in your 1.0 builds based on what CircleCI inferred about the structure of your project. In 2.0 you need to be explicit about which commands should be run. In some cases you can discard inferred commands if they are not relevant to your project.
74 |     # Teardown
75 |     - run: npm run lint
76 |     - run: npm test
77 |     - run: npm run test:browser
78 |     - run: npm run benchmark
79 |     #   If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each
80 |     # Save test results
81 |     - store_test_results:
82 |         path: /tmp/circleci-test-results
83 |     # Save artifacts
84 |     - store_artifacts:
85 |         path: /tmp/circleci-artifacts
86 |     - store_artifacts:
87 |         path: /tmp/circleci-test-results
88 | 


--------------------------------------------------------------------------------
/test/fixtures/v1-entries.fixture.js:
--------------------------------------------------------------------------------
1 | export const v1Entries = JSON.parse('[{"hash": "zdpuAsJDrLKrAiU8M518eu6mgv9HzS3e1pfH5XC7LUsFgsK5c","id": "A","payload": "one","next": [],"v": 1,"clock": {   "id":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "time": 1 },"key": "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361","identity": { "id":    "03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c",   "publicKey":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "signatures":    { "id":       "3045022100f5f6f10571d14347aaf34e526ce3419fd64d75ffa7aa73692cbb6aeb6fbc147102203a3e3fa41fa8fcbb9fc7c148af5b640e2f704b20b3a4e0b93fc3a6d44dffb41e",      "publicKey":       "3044022020982b8492be0c184dc29de0a3a3bd86a86ba997756b0bf41ddabd24b47c5acf02203745fda39d7df650a5a478e52bbe879f0cb45c074025a93471414a56077640a4" },   "type": "orbitdb" },"sig": "3045022100f72546c99cf30eda1d394d91209bdb4569408a792caf9dc7c6415fef37a3118d0220645c4a6d218f8fc478af5bab175aaa99e1505d70c2a00997aacafa8de697944e" },{ "hash": "zdpuAxgKyiM9qkP9yPKCCqrHer9kCqYyr7KbhucsPwwfh6JB3","id": "A","payload": "two","next": [ "zdpuAsJDrLKrAiU8M518eu6mgv9HzS3e1pfH5XC7LUsFgsK5c" ],"v": 1,"clock": {   "id":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "time": 2 },"key": "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361","identity": { "id":    "03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c",   "publicKey":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "signatures":    { "id":       "3045022100f5f6f10571d14347aaf34e526ce3419fd64d75ffa7aa73692cbb6aeb6fbc147102203a3e3fa41fa8fcbb9fc7c148af5b640e2f704b20b3a4e0b93fc3a6d44dffb41e",      "publicKey":       "3044022020982b8492be0c184dc29de0a3a3bd86a86ba997756b0bf41ddabd24b47c5acf02203745fda39d7df650a5a478e52bbe879f0cb45c074025a93471414a56077640a4" },   "type": "orbitdb" },"sig": "3045022100b85c85c59e6d0952f95e3839e48b43b4073ef26f6f4696d785ce64053cd5869a0220644a4a7a15ddcd2b152611b08bf23b9df7823846719f2d0e4b0aff64190ed146" },{ "hash": "zdpuAq7PAbQ7iavSdkNUUUrRUba5wSpRDJRsiC8RcvkXdgqYJ","id": "A","payload": "three","next": [ "zdpuAxgKyiM9qkP9yPKCCqrHer9kCqYyr7KbhucsPwwfh6JB3" ],"v": 1,"clock": {   "id":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "time": 3 },"key": "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361","identity": { "id":    "03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c",   "publicKey":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "signatures":    { "id":       "3045022100f5f6f10571d14347aaf34e526ce3419fd64d75ffa7aa73692cbb6aeb6fbc147102203a3e3fa41fa8fcbb9fc7c148af5b640e2f704b20b3a4e0b93fc3a6d44dffb41e",      "publicKey":       "3044022020982b8492be0c184dc29de0a3a3bd86a86ba997756b0bf41ddabd24b47c5acf02203745fda39d7df650a5a478e52bbe879f0cb45c074025a93471414a56077640a4" },   "type": "orbitdb" },"sig": "304402206f6a1582bc2c18b63eeb5b1e2280f2700c5d467d60185738702f90f4e655214602202ce0fb6de31b42a24768f274ecb4c1e2ed8529e073cfb361fc1ef5d1e2d75a31" },{ "hash": "zdpuAqgCh78NCXffmFYv4DM2KfhhpY92agJ9sKRB2eq9B5mFA","id": "A","payload": "four","next": [ "zdpuAq7PAbQ7iavSdkNUUUrRUba5wSpRDJRsiC8RcvkXdgqYJ" ],"v": 1,"clock": {   "id":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "time": 4 },"key": "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361","identity": { "id":    "03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c",   "publicKey":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "signatures":    { "id":       "3045022100f5f6f10571d14347aaf34e526ce3419fd64d75ffa7aa73692cbb6aeb6fbc147102203a3e3fa41fa8fcbb9fc7c148af5b640e2f704b20b3a4e0b93fc3a6d44dffb41e",      "publicKey":       "3044022020982b8492be0c184dc29de0a3a3bd86a86ba997756b0bf41ddabd24b47c5acf02203745fda39d7df650a5a478e52bbe879f0cb45c074025a93471414a56077640a4" },   "type": "orbitdb" },"sig": "30440220103ff89892856ec222d37b1244199cfb6e39629f155cd80ffa9b6e0b67de98940220391da8dc35e0b99f247c41676b8fb2337879d05dd343c55d9a89275c05076dcc" },{ "hash": "zdpuAwNuRc2Kc1aNDdcdSWuxfNpHRJQw8L8APBNHCEFuyU4Xf","id": "A","payload": "five","next": [ "zdpuAqgCh78NCXffmFYv4DM2KfhhpY92agJ9sKRB2eq9B5mFA" ],"v": 1,"clock": {   "id":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "time": 5 },"key": "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361","identity": { "id":    "03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c",   "publicKey":    "048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361",   "signatures":    { "id":       "3045022100f5f6f10571d14347aaf34e526ce3419fd64d75ffa7aa73692cbb6aeb6fbc147102203a3e3fa41fa8fcbb9fc7c148af5b640e2f704b20b3a4e0b93fc3a6d44dffb41e",      "publicKey":       "3044022020982b8492be0c184dc29de0a3a3bd86a86ba997756b0bf41ddabd24b47c5acf02203745fda39d7df650a5a478e52bbe879f0cb45c074025a93471414a56077640a4" },"type":"orbitdb"},"sig":"3044022012a6bad4be1aabec23816bc8ccaf3cb41d43f06adb3f7d55b14fe2ddae37035a02204324d0b9481c351a1b6c391bd9cb960c039f102f950cf2a48fd8648f7615c51f"}]')
2 | 


--------------------------------------------------------------------------------
/docs/log-sorting.js.html:
--------------------------------------------------------------------------------
  1 | 
  2 | 
  3 | 
  4 |     
  5 |     JSDoc: Source: log-sorting.js
  6 | 
  7 |     
  8 |     
  9 |     
 12 |     
 13 |     
 14 | 
 15 | 
 16 | 
 17 | 
 18 | 
19 | 20 |

Source: log-sorting.js

21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 |
29 |
'use strict'
 30 | 
 31 | const Clock = require('./lamport-clock')
 32 | 
 33 | /**
 34 |  * Sort two entries as Last-Write-Wins (LWW).
 35 |  *
 36 |  * Last Write Wins is a conflict resolution strategy for sorting elements
 37 |  * where the element with a greater clock (latest) is chosen as the winner.
 38 |  *
 39 |  * @param {Entry} a First entry
 40 |  * @param {Entry} b Second entry
 41 |  * @returns {number} 1 if a is latest, -1 if b is latest
 42 |  */
 43 | function LastWriteWins (a, b) {
 44 |   // Ultimate conflict resolution (take the first/left arg)
 45 |   const First = (a, b) => a
 46 |   // Sort two entries by their clock id, if the same always take the first
 47 |   const sortById = (a, b) => SortByClockId(a, b, First)
 48 |   // Sort two entries by their clock time, if concurrent,
 49 |   // determine sorting using provided conflict resolution function
 50 |   const sortByEntryClocks = (a, b) => SortByClocks(a, b, sortById)
 51 |   // Sort entries by clock time as the primary sort criteria
 52 |   return sortByEntryClocks(a, b)
 53 | }
 54 | 
 55 | /**
 56 |  * Sort two entries by their hash.
 57 |  *
 58 |  * @param {Entry} a First entry
 59 |  * @param {Entry} b Second entry
 60 |  * @returns {number} 1 if a is latest, -1 if b is latest
 61 |  */
 62 | function SortByEntryHash (a, b) {
 63 |   // Ultimate conflict resolution (compare hashes)
 64 |   const compareHash = (a, b) => a.hash < b.hash ? -1 : 1
 65 |   // Sort two entries by their clock id, if the same then compare hashes
 66 |   const sortById = (a, b) => SortByClockId(a, b, compareHash)
 67 |   // Sort two entries by their clock time, if concurrent,
 68 |   // determine sorting using provided conflict resolution function
 69 |   const sortByEntryClocks = (a, b) => SortByClocks(a, b, sortById)
 70 |   // Sort entries by clock time as the primary sort criteria
 71 |   return sortByEntryClocks(a, b)
 72 | }
 73 | 
 74 | /**
 75 |  * Sort two entries by their clock time.
 76 |  * @param {Entry} a First entry to compare
 77 |  * @param {Entry} b Second entry to compare
 78 |  * @param {function(a, b)} resolveConflict A function to call if entries are concurrent (happened at the same time). The function should take in two entries and return 1 if the first entry should be chosen and -1 if the second entry should be chosen.
 79 |  * @returns {number} 1 if a is greater, -1 if b is greater
 80 |  */
 81 | function SortByClocks (a, b, resolveConflict) {
 82 |   // Compare the clocks
 83 |   const diff = Clock.compare(a.clock, b.clock)
 84 |   // If the clocks are concurrent, use the provided
 85 |   // conflict resolution function to determine which comes first
 86 |   return diff === 0 ? resolveConflict(a, b) : diff
 87 | }
 88 | 
 89 | /**
 90 |  * Sort two entries by their clock id.
 91 |  * @param {Entry} a First entry to compare
 92 |  * @param {Entry} b Second entry to compare
 93 |  * @param {function(a, b)} resolveConflict A function to call if the clocks ids are the same. The function should take in two entries and return 1 if the first entry should be chosen and -1 if the second entry should be chosen.
 94 |  * @returns {number} 1 if a is greater, -1 if b is greater
 95 |  */
 96 | function SortByClockId (a, b, resolveConflict) {
 97 |   // Sort by ID if clocks are concurrent,
 98 |   // take the entry with a "greater" clock id
 99 |   return a.clock.id === b.clock.id
100 |     ? resolveConflict(a, b)
101 |     : a.clock.id < b.clock.id ? -1 : 1
102 | }
103 | 
104 | /**
105 |  * A wrapper function to throw an error if the results of a passed function return zero
106 |  * @param {function(a, b)} [tiebreaker] The tiebreaker function to validate.
107 |  * @returns {function(a, b)} 1 if a is greater, -1 if b is greater
108 |  * @throws {Error} if func ever returns 0
109 |  */
110 | function NoZeroes (func) {
111 |   const msg = `Your log's tiebreaker function, ${func.name}, has returned zero and therefore cannot be`
112 | 
113 |   const comparator = (a, b) => {
114 |     // Validate by calling the function
115 |     const result = func(a, b)
116 |     if (result === 0) { throw Error(msg) }
117 |     return result
118 |   }
119 | 
120 |   return comparator
121 | }
122 | 
123 | exports.SortByClocks = SortByClocks
124 | exports.SortByClockId = SortByClockId
125 | exports.LastWriteWins = LastWriteWins
126 | exports.SortByEntryHash = SortByEntryHash
127 | exports.NoZeroes = NoZeroes
128 | 
129 |
130 |
131 | 132 | 133 | 134 | 135 |
136 | 137 | 140 | 141 |
142 | 143 |
144 | Documentation generated by JSDoc 3.6.6 on Fri Dec 11 2020 17:11:17 GMT-0500 (Eastern Standard Time) 145 |
146 | 147 | 148 | 149 | 150 | 151 | -------------------------------------------------------------------------------- /test/replicate.spec.js: -------------------------------------------------------------------------------- 1 | import { strictEqual } from 'assert' 2 | import rimraf from 'rimraf' 3 | import { copy } from 'fs-extra' 4 | import Log from '../src/log.js' 5 | import IdentityProvider from 'orbit-db-identity-provider' 6 | import Keystore from 'orbit-db-keystore' 7 | 8 | // Test utils 9 | import { config, testAPIs, startIpfs, stopIpfs, getIpfsPeerId, waitForPeers, connectPeers } from 'orbit-db-test-utils' 10 | 11 | const { sync: rmrf } = rimraf 12 | const { fromMultihash } = Log 13 | const { createIdentity } = IdentityProvider 14 | 15 | Object.keys(testAPIs).forEach((IPFS) => { 16 | describe('ipfs-log - Replication (' + IPFS + ')', function () { 17 | this.timeout(config.timeout * 2) 18 | 19 | let ipfsd1, ipfsd2, ipfs1, ipfs2, id1, id2, testIdentity, testIdentity2 20 | 21 | const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config 22 | 23 | let keystore, signingKeystore 24 | 25 | before(async () => { 26 | rmrf(identityKeysPath) 27 | rmrf(signingKeysPath) 28 | await copy(identityKeyFixtures, identityKeysPath) 29 | await copy(signingKeyFixtures, signingKeysPath) 30 | 31 | // Start two IPFS instances 32 | ipfsd1 = await startIpfs(IPFS, config.daemon1) 33 | ipfsd2 = await startIpfs(IPFS, config.daemon2) 34 | ipfs1 = ipfsd1.api 35 | ipfs2 = ipfsd2.api 36 | 37 | await connectPeers(ipfs1, ipfs2) 38 | 39 | // Get the peer IDs 40 | id1 = await getIpfsPeerId(ipfs1) 41 | id2 = await getIpfsPeerId(ipfs2) 42 | 43 | keystore = new Keystore(identityKeysPath) 44 | signingKeystore = new Keystore(signingKeysPath) 45 | 46 | // Create an identity for each peers 47 | testIdentity = await createIdentity({ id: 'userB', keystore, signingKeystore }) 48 | testIdentity2 = await createIdentity({ id: 'userA', keystore, signingKeystore }) 49 | }) 50 | 51 | after(async () => { 52 | await stopIpfs(ipfsd1) 53 | await stopIpfs(ipfsd2) 54 | rmrf(identityKeysPath) 55 | rmrf(signingKeysPath) 56 | 57 | await keystore.close() 58 | await signingKeystore.close() 59 | }) 60 | 61 | describe('replicates logs deterministically', function () { 62 | const amount = 128 + 1 63 | const channel = 'XXX' 64 | const logId = 'A' 65 | 66 | let log1, log2, input1, input2 67 | const buffer1 = [] 68 | const buffer2 = [] 69 | let processing = 0 70 | 71 | const handleMessage = async (message) => { 72 | if (id1.toString() === message.from.toString()) { 73 | return 74 | } 75 | const hash = Buffer.from(message.data).toString() 76 | buffer1.push(hash) 77 | processing++ 78 | process.stdout.write('\r') 79 | process.stdout.write(`> Buffer1: ${buffer1.length} - Buffer2: ${buffer2.length}`) 80 | const log = await fromMultihash(ipfs1, testIdentity, hash) 81 | await log1.join(log) 82 | processing-- 83 | } 84 | 85 | const handleMessage2 = async (message) => { 86 | if (id2.toString() === message.from.toString()) { 87 | return 88 | } 89 | const hash = Buffer.from(message.data).toString() 90 | buffer2.push(hash) 91 | processing++ 92 | process.stdout.write('\r') 93 | process.stdout.write(`> Buffer1: ${buffer1.length} - Buffer2: ${buffer2.length}`) 94 | const log = await fromMultihash(ipfs2, testIdentity2, hash) 95 | await log2.join(log) 96 | processing-- 97 | } 98 | 99 | beforeEach(async () => { 100 | log1 = new Log(ipfs1, testIdentity, { logId }) 101 | log2 = new Log(ipfs2, testIdentity2, { logId }) 102 | input1 = new Log(ipfs1, testIdentity, { logId }) 103 | input2 = new Log(ipfs2, testIdentity2, { logId }) 104 | await ipfs1.pubsub.subscribe(channel, handleMessage) 105 | await ipfs2.pubsub.subscribe(channel, handleMessage2) 106 | }) 107 | 108 | afterEach(async () => { 109 | await ipfs1.pubsub.unsubscribe(channel, handleMessage) 110 | await ipfs2.pubsub.unsubscribe(channel, handleMessage2) 111 | }) 112 | 113 | it('replicates logs', async () => { 114 | await waitForPeers(ipfs1, [id2], channel) 115 | 116 | for (let i = 1; i <= amount; i++) { 117 | await input1.append('A' + i) 118 | await input2.append('B' + i) 119 | const hash1 = await input1.toMultihash() 120 | const hash2 = await input2.toMultihash() 121 | await ipfs1.pubsub.publish(channel, Buffer.from(hash1)) 122 | await ipfs2.pubsub.publish(channel, Buffer.from(hash2)) 123 | } 124 | 125 | console.log('\nAll messages sent') 126 | 127 | const whileProcessingMessages = (timeoutMs) => { 128 | return new Promise((resolve, reject) => { 129 | const timeout = setTimeout(() => reject(new Error('timeout')), timeoutMs) 130 | const timer = setInterval(() => { 131 | if (buffer1.length + buffer2.length === amount * 2 && 132 | processing === 0) { 133 | console.log('\nAll messages received') 134 | clearInterval(timer) 135 | clearTimeout(timeout) 136 | resolve() 137 | } 138 | }, 200) 139 | }) 140 | } 141 | 142 | console.log('Waiting for all to process') 143 | await whileProcessingMessages(config.timeout * 2) 144 | 145 | const result = new Log(ipfs1, testIdentity, { logId }) 146 | await result.join(log1) 147 | await result.join(log2) 148 | 149 | strictEqual(buffer1.length, amount) 150 | strictEqual(buffer2.length, amount) 151 | strictEqual(result.length, amount * 2) 152 | strictEqual(log1.length, amount) 153 | strictEqual(log2.length, amount) 154 | strictEqual(result.values[0].payload, 'A1') 155 | strictEqual(result.values[1].payload, 'B1') 156 | strictEqual(result.values[2].payload, 'A2') 157 | strictEqual(result.values[3].payload, 'B2') 158 | strictEqual(result.values[99].payload, 'B50') 159 | strictEqual(result.values[100].payload, 'A51') 160 | strictEqual(result.values[198].payload, 'A100') 161 | strictEqual(result.values[199].payload, 'B100') 162 | }) 163 | }) 164 | }) 165 | }) 166 | -------------------------------------------------------------------------------- /API.md: -------------------------------------------------------------------------------- 1 | # ipfs-log - API Documentation 2 | 3 | # Log 4 | 5 | To use `ipfs-log`, require the module in your project: 6 | 7 | ```javascript 8 | const Log = require('ipfs-log') 9 | ``` 10 | 11 | ### Constructor 12 | 13 | #### new Log(ipfs, identity, [{ logId, access, entries, heads, clock, sortFn }]) 14 | 15 | Create a log. Each log gets a unique ID, which can be passed in the `options` as `logId`. Returns a `Log` instance. 16 | 17 | ```javascript 18 | const IdentityProvider = require('orbit-db-identity-provider') 19 | const identity = await IdentityProvider.createIdentity({ id: 'peerid' }) 20 | const ipfs = new IPFS() 21 | const log = new Log(ipfs, identity, { logId: 'logid' }) 22 | 23 | console.log(log.id) 24 | // 'logId' 25 | ``` 26 | 27 | `ipfs` is an instance of IPFS. `identity` is an instance of [Identity](https://github.com/orbitdb/orbit-db-identity-provider/blob/master/src/identity.js), used to sign entries. `logId` is a unique log identifier. Usually this should be a user id or similar. `access` is an instance of `AccessController`, which by default allows any one to append to the log. 28 | 29 | ### Properties 30 | 31 | #### id 32 | 33 | Returns the ID of the log. 34 | 35 | #### values 36 | 37 | Returns an `Array` of [entries](https://github.com/orbitdb/ipfs-log/blob/master/src/entry.js) in the log. The values are in linearized order according to their [Lamport clocks](https://en.wikipedia.org/wiki/Lamport_timestamps). 38 | 39 | ```javascript 40 | const values = log.values 41 | // TODO: output example 42 | ``` 43 | 44 | #### length 45 | 46 | Returns the number of entries in the log. 47 | 48 | #### clock 49 | 50 | Returns the current timestamp of the log. 51 | 52 | #### heads 53 | 54 | Returns the heads of the log. Heads are the entries that are not referenced by other entries in the log. 55 | 56 | ```javascript 57 | const heads = log.heads 58 | // TODO: output example 59 | ``` 60 | 61 | #### tails 62 | 63 | Return the tails of the log. Tails are the entries that reference other entries that are not in the log. 64 | 65 | ```javascript 66 | const tails = log.tails 67 | // TODO: output example 68 | ``` 69 | 70 | ### Methods 71 | 72 | #### append(data) 73 | 74 | Append an entry to the log. Returns a *Promise* that resolves to the latest `Entry`. 75 | 76 | `ipfs` IPFS instance. 77 | 78 | `log` Log to append to. 79 | 80 | `data` can be any type of data: Number, String, Object, etc. It can also be an instance of [Entry](https://github.com/orbtidb/ipfs-log/blob/master/src/entry.js). 81 | 82 | ```javascript 83 | await log.append({ some: 'data' }) 84 | await log.append('text')) 85 | console.log(log.values) 86 | // [ 87 | // { hash: 'zdpuArZdzymC6zRTMGd5xw4Dw2Q2VCYjuaHAekTSyXS1GmSKs', 88 | // id: 'logId', 89 | // payload: { some: 'data' }, 90 | // next: [], 91 | // v: 1, 92 | // clock: 93 | // LamportClock { 94 | // id: 95 | // '04d1b23b1efe6c4d91cd639caf443528b88358369fa552fe8dd9cda17d6c77c42969c688ec0d201e3f8a128334a3b0806ece694b55892b036c0781ce18d35a374b', 96 | // time: 1 }, 97 | // key: 98 | // '04d1b23b1efe6c4d91cd639caf443528b88358369fa552fe8dd9cda17d6c77c42969c688ec0d201e3f8a128334a3b0806ece694b55892b036c0781ce18d35a374b', 99 | // identity: ... 100 | // }, 101 | // { hash: 'zdpuAuDmVuEfgcUja7SCuNuqkiLCPXVutFTkSE8k8b9oLCVcR', 102 | // id: 'logId', 103 | // payload: 'text', 104 | // next: [ 'zdpuArZdzymC6zRTMGd5xw4Dw2Q2VCYjuaHAekTSyXS1GmSKs' ], 105 | // v: 1, 106 | // clock: 107 | // LamportClock { 108 | // id: 109 | // '04d1b23b1efe6c4d91cd639caf443528b88358369fa552fe8dd9cda17d6c77c42969c688ec0d201e3f8a128334a3b0806ece694b55892b036c0781ce18d35a374b', 110 | // time: 2 }, 111 | // key: 112 | // '04d1b23b1efe6c4d91cd639caf443528b88358369fa552fe8dd9cda17d6c77c42969c688ec0d201e3f8a128334a3b0806ece694b55892b036c0781ce18d35a374b', 113 | // identity: 114 | // { ... } 115 | // ] 116 | ``` 117 | 118 | #### join(log, [length]) 119 | 120 | Join the log with another log. Returns a Promise that resolves to a `Log` instance. The size of the joined log can be specified by giving `length` argument. 121 | 122 | ```javascript 123 | // log1.values ==> ['A', 'B', 'C'] 124 | // log2.values ==> ['C', 'D', 'E'] 125 | 126 | log1.join(log2) 127 | .then(() => console.log(log1.values)) 128 | // ['A', 'B', 'C', 'D', 'E'] 129 | ``` 130 | 131 | ### toMultihash({ format }) 132 | 133 | Returns the multihash of the log. 134 | 135 | Converting the log to a multihash will persist the contents of `log.toJSON` to IPFS, thus causing side effects. 136 | 137 | You can specify the `format` with which to write the content to IPFS. By default `dag-cbor` is used, returning a [CIDv1](https://github.com/multiformats/cid#how-does-it-work) string. To return a CIDv0 string, set `format` to `dag-pb`. 138 | 139 | ```javascript 140 | log1.toMultihash() 141 | .then(hash => console.log(hash)) 142 | 143 | // zdpuAsfLFPAYJ41C2bZYZCKZxGkYUD9Wt7mcXHWcR19Jjko9B 144 | 145 | log1.toMultihash({format: 'dag-pb' }) 146 | .then(hash => console.log(hash)) 147 | 148 | // QmR8rV2Ph2yUaw7eW7e86TZF4XDjb13QbPAN83YEpYHxiw 149 | ``` 150 | 151 | ### toBuffer() 152 | 153 | Converts the log to a `Buffer` that contains the log as JSON.stringified `string`. Returns a `Buffer`. 154 | 155 | ```javascript 156 | const buffer = log1.toBuffer() 157 | ``` 158 | 159 | ### toString 160 | 161 | Returns the log values as a nicely formatted string. 162 | 163 | ```javascript 164 | console.log(log.toString()) 165 | // two 166 | // └─one 167 | // └─three 168 | ``` 169 | 170 | ## Static methods 171 | 172 | #### Log.isLog(log) 173 | 174 | Check if an object is a `Log` instance. 175 | 176 | ```javascript 177 | Log.isLog(log1) 178 | // true 179 | Log.isLog('hello') 180 | // false 181 | ``` 182 | 183 | #### Log.fromEntry(ipfs, identity, sourceEntries, [{ access, length=-1, exclude, onProgressCallback, sortFn }]) 184 | 185 | Create a `Log` from an `Entry`. 186 | 187 | Creating a log from an entry will retrieve entries from IPFS, thus causing side effects. 188 | 189 | #### Log.fromEntryHash(ipfs, identity, hash, [{ logId, length=-1, access, exclude, onProgressCallback, sortFn }]) 190 | 191 | Create a `Log` from a hash of an `Entry` 192 | 193 | Creating a log from a hash will retrieve entries from IPFS, thus causing side effects. 194 | 195 | #### Log.fromMultihash(ipfs, identity, hash, [{ access, length=-1, exclude, onProgressCallback, sortFn }]) 196 | 197 | Create a `Log` from a hash. 198 | 199 | Creating a log from a hash will retrieve entries from IPFS, thus causing side effects. 200 | -------------------------------------------------------------------------------- /test/log-references.spec.js: -------------------------------------------------------------------------------- 1 | import { strictEqual } from 'assert' 2 | import rimraf from 'rimraf' 3 | import { copy } from 'fs-extra' 4 | import Log from '../src/log.js' 5 | import IdentityProvider from 'orbit-db-identity-provider' 6 | import Keystore from 'orbit-db-keystore' 7 | 8 | // Test utils 9 | import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils' 10 | 11 | const { sync: rmrf } = rimraf 12 | const { createIdentity } = IdentityProvider 13 | 14 | let ipfsd, ipfs, testIdentity 15 | 16 | Object.keys(testAPIs).forEach((IPFS) => { 17 | describe('Log - References (' + IPFS + ')', function () { 18 | this.timeout(config.timeout) 19 | 20 | const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config 21 | 22 | let keystore, signingKeystore 23 | 24 | before(async () => { 25 | rmrf(identityKeysPath) 26 | rmrf(signingKeysPath) 27 | await copy(identityKeyFixtures, identityKeysPath) 28 | await copy(signingKeyFixtures, signingKeysPath) 29 | 30 | keystore = new Keystore(identityKeysPath) 31 | signingKeystore = new Keystore(signingKeysPath) 32 | 33 | testIdentity = await createIdentity({ id: 'userA', keystore, signingKeystore }) 34 | ipfsd = await startIpfs(IPFS, config.defaultIpfsConfig) 35 | ipfs = ipfsd.api 36 | }) 37 | 38 | after(async () => { 39 | await stopIpfs(ipfsd) 40 | rmrf(identityKeysPath) 41 | rmrf(signingKeysPath) 42 | 43 | await keystore.close() 44 | await signingKeystore.close() 45 | }) 46 | describe('References', () => { 47 | it('creates entries with references', async () => { 48 | const amount = 64 49 | const maxReferenceDistance = 2 50 | const log1 = new Log(ipfs, testIdentity, { logId: 'A' }) 51 | const log2 = new Log(ipfs, testIdentity, { logId: 'B' }) 52 | const log3 = new Log(ipfs, testIdentity, { logId: 'C' }) 53 | const log4 = new Log(ipfs, testIdentity, { logId: 'D' }) 54 | 55 | for (let i = 0; i < amount; i++) { 56 | await log1.append(i.toString(), maxReferenceDistance) 57 | } 58 | 59 | for (let i = 0; i < amount * 2; i++) { 60 | await log2.append(i.toString(), Math.pow(maxReferenceDistance, 2)) 61 | } 62 | 63 | for (let i = 0; i < amount * 3; i++) { 64 | await log3.append(i.toString(), Math.pow(maxReferenceDistance, 3)) 65 | } 66 | 67 | for (let i = 0; i < amount * 4; i++) { 68 | await log4.append(i.toString(), Math.pow(maxReferenceDistance, 4)) 69 | } 70 | 71 | strictEqual(log1.values[log1.length - 1].next.length, 1) 72 | strictEqual(log2.values[log2.length - 1].next.length, 1) 73 | strictEqual(log3.values[log3.length - 1].next.length, 1) 74 | strictEqual(log4.values[log4.length - 1].next.length, 1) 75 | strictEqual(log1.values[log1.length - 1].refs.length, 1) 76 | strictEqual(log2.values[log2.length - 1].refs.length, 2) 77 | strictEqual(log3.values[log3.length - 1].refs.length, 3) 78 | strictEqual(log4.values[log4.length - 1].refs.length, 4) 79 | }) 80 | 81 | const inputs = [ 82 | { amount: 1, referenceCount: 1, refLength: 0 }, 83 | { amount: 1, referenceCount: 2, refLength: 0 }, 84 | { amount: 2, referenceCount: 1, refLength: 1 }, 85 | { amount: 2, referenceCount: 2, refLength: 1 }, 86 | { amount: 3, referenceCount: 2, refLength: 1 }, 87 | { amount: 3, referenceCount: 4, refLength: 1 }, 88 | { amount: 4, referenceCount: 4, refLength: 2 }, 89 | { amount: 4, referenceCount: 4, refLength: 2 }, 90 | { amount: 32, referenceCount: 4, refLength: 2 }, 91 | { amount: 32, referenceCount: 8, refLength: 3 }, 92 | { amount: 32, referenceCount: 16, refLength: 4 }, 93 | { amount: 18, referenceCount: 32, refLength: 5 }, 94 | { amount: 128, referenceCount: 32, refLength: 5 }, 95 | { amount: 64, referenceCount: 64, refLength: 6 }, 96 | { amount: 65, referenceCount: 64, refLength: 6 }, 97 | { amount: 128, referenceCount: 64, refLength: 6 }, 98 | { amount: 128, referenceCount: 1, refLength: 0 }, 99 | { amount: 128, referenceCount: 2, refLength: 1 }, 100 | { amount: 256, referenceCount: 1, refLength: 0 }, 101 | { amount: 256, referenceCount: 256, refLength: 8 }, 102 | { amount: 256, referenceCount: 1024, refLength: 8 } 103 | ] 104 | 105 | inputs.forEach(input => { 106 | it(`has ${input.refLength} references, max distance ${input.referenceCount}, total of ${input.amount} entries`, async () => { 107 | const test = async (amount, referenceCount, refLength) => { 108 | const log1 = new Log(ipfs, testIdentity, { logId: 'A' }) 109 | for (let i = 0; i < amount; i++) { 110 | await log1.append((i + 1).toString(), referenceCount) 111 | } 112 | 113 | strictEqual(log1.values.length, input.amount) 114 | strictEqual(log1.values[log1.length - 1].clock.time, input.amount) 115 | 116 | for (let k = 0; k < input.amount; k++) { 117 | const idx = log1.length - k - 1 118 | strictEqual(log1.values[idx].clock.time, idx + 1) 119 | 120 | // Check the first ref (distance 2) 121 | if (log1.values[idx].refs.length > 0) { strictEqual(log1.values[idx].refs[0], log1.values[idx - 2].hash) } 122 | 123 | // Check the second ref (distance 2) 124 | 125 | if (log1.values[idx].refs.length > 1 && idx > referenceCount) { strictEqual(log1.values[idx].refs[1], log1.values[idx - 4].hash) } 126 | 127 | // Check the third ref (distance 4) 128 | if (log1.values[idx].refs.length > 2 && idx > referenceCount) { strictEqual(log1.values[idx].refs[2], log1.values[idx - 8].hash) } 129 | 130 | // Check the fourth ref (distance 8) 131 | if (log1.values[idx].refs.length > 3 && idx > referenceCount) { strictEqual(log1.values[idx].refs[3], log1.values[idx - 16].hash) } 132 | 133 | // Check the fifth ref (distance 16) 134 | if (log1.values[idx].refs.length > 4 && idx > referenceCount) { strictEqual(log1.values[idx].refs[4], log1.values[idx - 32].hash) } 135 | 136 | // Check the reference of each entry 137 | if (idx > referenceCount) { strictEqual(log1.values[idx].refs.length, refLength) } 138 | } 139 | } 140 | 141 | await test(input.amount, input.referenceCount, input.refLength) 142 | }) 143 | }) 144 | }) 145 | }) 146 | }) 147 | -------------------------------------------------------------------------------- /test/entry-io.spec.js: -------------------------------------------------------------------------------- 1 | import { strictEqual, deepStrictEqual } from 'assert' 2 | import rimraf from 'rimraf' 3 | import { copy } from 'fs-extra' 4 | import EntryIO from '../src/entry-io.js' 5 | import Log from '../src/log.js' 6 | import Keystore from 'orbit-db-keystore' 7 | import IdentityProvider from 'orbit-db-identity-provider' 8 | // Test utils 9 | import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils' 10 | 11 | const { sync: rmrf } = rimraf 12 | 13 | const { fromMultihash } = Log 14 | const { fetchAll } = EntryIO 15 | const { createIdentity } = IdentityProvider 16 | 17 | let ipfsd, ipfs, testIdentity, testIdentity2, testIdentity3, testIdentity4 18 | 19 | const last = arr => arr[arr.length - 1] 20 | 21 | Object.keys(testAPIs).forEach((IPFS) => { 22 | describe('Entry - Persistency (' + IPFS + ')', function () { 23 | this.timeout(config.timeout) 24 | 25 | const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config 26 | 27 | let options, keystore, signingKeystore 28 | 29 | before(async () => { 30 | rmrf(identityKeysPath) 31 | rmrf(signingKeysPath) 32 | await copy(identityKeyFixtures, identityKeysPath) 33 | await copy(signingKeyFixtures, signingKeysPath) 34 | const defaultOptions = { identityKeysPath, signingKeysPath } 35 | 36 | keystore = new Keystore(identityKeysPath) 37 | signingKeystore = new Keystore(signingKeysPath) 38 | 39 | const users = ['userA', 'userB', 'userC', 'userD'] 40 | options = users.map((user) => { 41 | return Object.assign({}, defaultOptions, { id: user, keystore, signingKeystore }) 42 | }) 43 | 44 | testIdentity = await createIdentity(options[0]) 45 | testIdentity2 = await createIdentity(options[1]) 46 | testIdentity3 = await createIdentity(options[2]) 47 | testIdentity4 = await createIdentity(options[3]) 48 | ipfsd = await startIpfs(IPFS, config.defaultIpfsConfig) 49 | ipfs = ipfsd.api 50 | }) 51 | 52 | after(async () => { 53 | await stopIpfs(ipfsd) 54 | rmrf(identityKeysPath) 55 | rmrf(signingKeysPath) 56 | 57 | await keystore.close() 58 | await signingKeystore.close() 59 | }) 60 | 61 | it('log with one entry', async () => { 62 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 63 | await log.append('one') 64 | const hash = log.values[0].hash 65 | const res = await fetchAll(ipfs, hash, { length: 1 }) 66 | strictEqual(res.length, 1) 67 | }) 68 | 69 | it('log with 2 entries', async () => { 70 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 71 | await log.append('one') 72 | await log.append('two') 73 | const hash = last(log.values).hash 74 | const res = await fetchAll(ipfs, hash, { length: 2 }) 75 | strictEqual(res.length, 2) 76 | }) 77 | 78 | it('loads max 1 entry from a log of 2 entry', async () => { 79 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 80 | await log.append('one') 81 | await log.append('two') 82 | const hash = last(log.values).hash 83 | const res = await fetchAll(ipfs, hash, { length: 1 }) 84 | strictEqual(res.length, 1) 85 | }) 86 | 87 | it('log with 100 entries', async () => { 88 | const count = 100 89 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 90 | for (let i = 0; i < count; i++) { 91 | await log.append('hello' + i) 92 | } 93 | const hash = await log.toMultihash() 94 | const result = await fromMultihash(ipfs, testIdentity, hash) 95 | strictEqual(result.length, count) 96 | }) 97 | 98 | it('load only 42 entries from a log with 100 entries', async () => { 99 | const count = 100 100 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 101 | let log2 = new Log(ipfs, testIdentity, { logId: 'X' }) 102 | for (let i = 1; i <= count; i++) { 103 | await log.append('hello' + i) 104 | if (i % 10 === 0) { 105 | log2 = new Log(ipfs, testIdentity, 106 | { logId: log2.id, entries: log2.values, heads: log2.heads.concat(log.heads) }) 107 | await log2.append('hi' + i) 108 | } 109 | } 110 | 111 | const hash = await log.toMultihash() 112 | const result = await fromMultihash(ipfs, testIdentity, hash, { length: 42 }) 113 | strictEqual(result.length, 42) 114 | }) 115 | 116 | it('load only 99 entries from a log with 100 entries', async () => { 117 | const count = 100 118 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 119 | let log2 = new Log(ipfs, testIdentity, { logId: 'X' }) 120 | for (let i = 1; i <= count; i++) { 121 | await log.append('hello' + i) 122 | if (i % 10 === 0) { 123 | log2 = new Log(ipfs, testIdentity, { logId: log2.id, entries: log2.values }) 124 | await log2.append('hi' + i) 125 | await log2.join(log) 126 | } 127 | } 128 | 129 | const hash = await log2.toMultihash() 130 | const result = await fromMultihash(ipfs, testIdentity, hash, { length: 99 }) 131 | strictEqual(result.length, 99) 132 | }) 133 | 134 | it('load only 10 entries from a log with 100 entries', async () => { 135 | const count = 100 136 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 137 | let log2 = new Log(ipfs, testIdentity, { logId: 'X' }) 138 | let log3 = new Log(ipfs, testIdentity, { logId: 'X' }) 139 | for (let i = 1; i <= count; i++) { 140 | await log.append('hello' + i) 141 | if (i % 10 === 0) { 142 | log2 = new Log(ipfs, testIdentity, 143 | { logId: log2.id, entries: log2.values, heads: log2.heads }) 144 | await log2.append('hi' + i) 145 | await log2.join(log) 146 | } 147 | if (i % 25 === 0) { 148 | log3 = new Log(ipfs, testIdentity, 149 | { logId: log3.id, entries: log3.values, heads: log3.heads.concat(log2.heads) }) 150 | await log3.append('--' + i) 151 | } 152 | } 153 | 154 | await log3.join(log2) 155 | const hash = await log3.toMultihash() 156 | const result = await fromMultihash(ipfs, testIdentity, hash, { length: 10 }) 157 | strictEqual(result.length, 10) 158 | }) 159 | 160 | it('load only 10 entries and then expand to max from a log with 100 entries', async () => { 161 | const count = 30 162 | 163 | const log = new Log(ipfs, testIdentity, { logId: 'X' }) 164 | const log2 = new Log(ipfs, testIdentity2, { logId: 'X' }) 165 | let log3 = new Log(ipfs, testIdentity3, { logId: 'X' }) 166 | for (let i = 1; i <= count; i++) { 167 | await log.append('hello' + i) 168 | if (i % 10 === 0) { 169 | await log2.append('hi' + i) 170 | await log2.join(log) 171 | } 172 | if (i % 25 === 0) { 173 | log3 = new Log(ipfs, testIdentity3, 174 | { logId: log3.id, entries: log3.values, heads: log3.heads.concat(log2.heads) }) 175 | await log3.append('--' + i) 176 | } 177 | } 178 | 179 | await log3.join(log2) 180 | 181 | const log4 = new Log(ipfs, testIdentity4, { logId: 'X' }) 182 | await log4.join(log2) 183 | await log4.join(log3) 184 | 185 | const values3 = log3.values.map((e) => e.payload) 186 | const values4 = log4.values.map((e) => e.payload) 187 | 188 | deepStrictEqual(values3, values4) 189 | }) 190 | }) 191 | }) 192 | -------------------------------------------------------------------------------- /src/log-io.js: -------------------------------------------------------------------------------- 1 | import Entry from './entry.js' 2 | import EntryIO from './entry-io.js' 3 | import Sorting from './log-sorting.js' 4 | import { IPFSNotDefinedError, LogNotDefinedError, NotALogError } from './log-errors.js' 5 | import { isDefined, findUniques, difference } from './utils/index.js' 6 | import { read, write } from 'orbit-db-io' 7 | 8 | const { LastWriteWins, NoZeroes } = Sorting 9 | const { fetchAll, fetchParallel } = EntryIO 10 | const { compare, isEntry } = Entry 11 | const IPLD_LINKS = ['heads'] 12 | const last = (arr, n) => arr.slice(arr.length - Math.min(arr.length, n), arr.length) 13 | 14 | class LogIO { 15 | // 16 | /** 17 | * Get the multihash of a Log. 18 | * @param {IPFS} ipfs An IPFS instance 19 | * @param {Log} log Log to get a multihash for 20 | * @returns {Promise} 21 | * @deprecated 22 | */ 23 | static async toMultihash (ipfs, log, { format } = {}) { 24 | if (!isDefined(ipfs)) throw IPFSNotDefinedError() 25 | if (!isDefined(log)) throw LogNotDefinedError() 26 | if (!isDefined(format)) format = 'dag-cbor' 27 | if (log.values.length < 1) throw new Error('Can\'t serialize an empty log') 28 | 29 | return write(ipfs, format, log.toJSON(), { links: IPLD_LINKS }) 30 | } 31 | 32 | /** 33 | * Create a log from a hashes. 34 | * @param {IPFS} ipfs An IPFS instance 35 | * @param {string} hash The hash of the log 36 | * @param {Object} options 37 | * @param {number} options.length How many items to include in the log 38 | * @param {Array} options.exclude Entries to not fetch (cached) 39 | * @param {function(hash, entry, parent, depth)} options.onProgressCallback 40 | */ 41 | static async fromMultihash (ipfs, hash, 42 | { length = -1, exclude = [], shouldExclude, timeout, concurrency, sortFn, onProgressCallback }) { 43 | if (!isDefined(ipfs)) throw IPFSNotDefinedError() 44 | if (!isDefined(hash)) throw new Error(`Invalid hash: ${hash}`) 45 | 46 | const logData = await read(ipfs, hash, { links: IPLD_LINKS }) 47 | 48 | if (!logData.heads || !logData.id) throw NotALogError() 49 | 50 | // Use user provided sorting function or the default one 51 | sortFn = sortFn || NoZeroes(LastWriteWins) 52 | const isHead = e => logData.heads.includes(e.hash) 53 | 54 | const all = await fetchAll(ipfs, logData.heads, 55 | { length, exclude, shouldExclude, timeout, concurrency, onProgressCallback }) 56 | 57 | const logId = logData.id 58 | const entries = length > -1 ? last(all.sort(sortFn), length) : all 59 | const heads = entries.filter(isHead) 60 | return { logId, entries, heads } 61 | } 62 | 63 | /** 64 | * Create a log from an entry hash. 65 | * @param {IPFS} ipfs An IPFS instance 66 | * @param {string} hash The hash of the entry 67 | * @param {Object} options 68 | * @param {number} options.length How many items to include in the log 69 | * @param {Array} options.exclude Entries to not fetch (cached) 70 | * @param {function(hash, entry, parent, depth)} options.onProgressCallback 71 | */ 72 | static async fromEntryHash (ipfs, hash, 73 | { length = -1, exclude = [], shouldExclude, timeout, concurrency, sortFn, onProgressCallback }) { 74 | if (!isDefined(ipfs)) throw IPFSNotDefinedError() 75 | if (!isDefined(hash)) throw new Error("'hash' must be defined") 76 | // Convert input hash(s) to an array 77 | const hashes = Array.isArray(hash) ? hash : [hash] 78 | // Fetch given length, return size at least the given input entries 79 | length = length > -1 ? Math.max(length, 1) : length 80 | const all = await fetchParallel(ipfs, hashes, 81 | { length, exclude, shouldExclude, timeout, concurrency, onProgressCallback }) 82 | // Cap the result at the right size by taking the last n entries, 83 | // or if given length is -1, then take all 84 | sortFn = sortFn || NoZeroes(LastWriteWins) 85 | const entries = length > -1 ? last(all.sort(sortFn), length) : all 86 | return { entries } 87 | } 88 | 89 | /** 90 | * Creates a log data from a JSON object, to be passed to a Log constructor 91 | * 92 | * @param {IPFS} ipfs An IPFS instance 93 | * @param {json} json A json object containing valid log data 94 | * @param {Object} options 95 | * @param {number} options.length How many entries to include 96 | * @param {function(hash, entry, parent, depth)} options.onProgressCallback 97 | **/ 98 | static async fromJSON (ipfs, json, { length = -1, timeout, concurrency, onProgressCallback }) { 99 | if (!isDefined(ipfs)) throw IPFSNotDefinedError() 100 | const { id, heads } = json 101 | const headHashes = heads.map(e => e.hash) 102 | const all = await fetchParallel(ipfs, headHashes, 103 | { length, timeout, concurrency, onProgressCallback }) 104 | const entries = all.sort(compare) 105 | return { logId: id, entries, heads } 106 | } 107 | 108 | /** 109 | * Create a new log starting from an entry. 110 | * @param {IPFS} ipfs An IPFS instance 111 | * @param {Entry|Array} sourceEntries An entry or an array of entries to fetch a log from 112 | * @param {Object} options 113 | * @param {number} options.length How many entries to include 114 | * @param {Array} options.exclude Entries to not fetch (cached) 115 | * @param {function(hash, entry, parent, depth)} options.onProgressCallback 116 | */ 117 | static async fromEntry (ipfs, sourceEntries, 118 | { length = -1, exclude = [], shouldExclude, timeout, concurrency, onProgressCallback }) { 119 | if (!isDefined(ipfs)) throw IPFSNotDefinedError() 120 | if (!isDefined(sourceEntries)) throw new Error("'sourceEntries' must be defined") 121 | 122 | // Make sure we only have Entry objects as input 123 | if (!Array.isArray(sourceEntries) && !isEntry(sourceEntries)) { 124 | throw new Error('\'sourceEntries\' argument must be an array of Entry instances or a single Entry') 125 | } 126 | 127 | if (!Array.isArray(sourceEntries)) { 128 | sourceEntries = [sourceEntries] 129 | } 130 | 131 | // Fetch given length, return size at least the given input entries 132 | length = length > -1 ? Math.max(length, sourceEntries.length) : length 133 | 134 | // Make sure we pass hashes instead of objects to the fetcher function 135 | const hashes = sourceEntries.map(e => e.hash) 136 | 137 | // Fetch the entries 138 | const all = await fetchParallel(ipfs, hashes, 139 | { length, exclude, shouldExclude, timeout, concurrency, onProgressCallback }) 140 | 141 | // Combine the fetches with the source entries and take only uniques 142 | const combined = sourceEntries.concat(all).concat(exclude) 143 | const uniques = findUniques(combined, 'hash').sort(compare) 144 | 145 | // Cap the result at the right size by taking the last n entries 146 | const sliced = uniques.slice(length > -1 ? -length : -uniques.length) 147 | 148 | // Make sure that the given input entries are present in the result 149 | // in order to not lose references 150 | const missingSourceEntries = difference(sliced, sourceEntries, 'hash') 151 | 152 | const replaceInFront = (a, withEntries) => { 153 | const sliced = a.slice(withEntries.length, a.length) 154 | return withEntries.concat(sliced) 155 | } 156 | 157 | // Add the input entries at the beginning of the array and remove 158 | // as many elements from the array before inserting the original entries 159 | const entries = replaceInFront(sliced, missingSourceEntries) 160 | const logId = entries[entries.length - 1].id 161 | return { logId, entries } 162 | } 163 | } 164 | 165 | export default LogIO 166 | -------------------------------------------------------------------------------- /docs/styles/jsdoc-default.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: 'Open Sans'; 3 | font-weight: normal; 4 | font-style: normal; 5 | src: url('../fonts/OpenSans-Regular-webfont.eot'); 6 | src: 7 | local('Open Sans'), 8 | local('OpenSans'), 9 | url('../fonts/OpenSans-Regular-webfont.eot?#iefix') format('embedded-opentype'), 10 | url('../fonts/OpenSans-Regular-webfont.woff') format('woff'), 11 | url('../fonts/OpenSans-Regular-webfont.svg#open_sansregular') format('svg'); 12 | } 13 | 14 | @font-face { 15 | font-family: 'Open Sans Light'; 16 | font-weight: normal; 17 | font-style: normal; 18 | src: url('../fonts/OpenSans-Light-webfont.eot'); 19 | src: 20 | local('Open Sans Light'), 21 | local('OpenSans Light'), 22 | url('../fonts/OpenSans-Light-webfont.eot?#iefix') format('embedded-opentype'), 23 | url('../fonts/OpenSans-Light-webfont.woff') format('woff'), 24 | url('../fonts/OpenSans-Light-webfont.svg#open_sanslight') format('svg'); 25 | } 26 | 27 | html 28 | { 29 | overflow: auto; 30 | background-color: #fff; 31 | font-size: 14px; 32 | } 33 | 34 | body 35 | { 36 | font-family: 'Open Sans', sans-serif; 37 | line-height: 1.5; 38 | color: #4d4e53; 39 | background-color: white; 40 | } 41 | 42 | a, a:visited, a:active { 43 | color: #0095dd; 44 | text-decoration: none; 45 | } 46 | 47 | a:hover { 48 | text-decoration: underline; 49 | } 50 | 51 | header 52 | { 53 | display: block; 54 | padding: 0px 4px; 55 | } 56 | 57 | tt, code, kbd, samp { 58 | font-family: Consolas, Monaco, 'Andale Mono', monospace; 59 | } 60 | 61 | .class-description { 62 | font-size: 130%; 63 | line-height: 140%; 64 | margin-bottom: 1em; 65 | margin-top: 1em; 66 | } 67 | 68 | .class-description:empty { 69 | margin: 0; 70 | } 71 | 72 | #main { 73 | float: left; 74 | width: 70%; 75 | } 76 | 77 | article dl { 78 | margin-bottom: 40px; 79 | } 80 | 81 | article img { 82 | max-width: 100%; 83 | } 84 | 85 | section 86 | { 87 | display: block; 88 | background-color: #fff; 89 | padding: 12px 24px; 90 | border-bottom: 1px solid #ccc; 91 | margin-right: 30px; 92 | } 93 | 94 | .variation { 95 | display: none; 96 | } 97 | 98 | .signature-attributes { 99 | font-size: 60%; 100 | color: #aaa; 101 | font-style: italic; 102 | font-weight: lighter; 103 | } 104 | 105 | nav 106 | { 107 | display: block; 108 | float: right; 109 | margin-top: 28px; 110 | width: 30%; 111 | box-sizing: border-box; 112 | border-left: 1px solid #ccc; 113 | padding-left: 16px; 114 | } 115 | 116 | nav ul { 117 | font-family: 'Lucida Grande', 'Lucida Sans Unicode', arial, sans-serif; 118 | font-size: 100%; 119 | line-height: 17px; 120 | padding: 0; 121 | margin: 0; 122 | list-style-type: none; 123 | } 124 | 125 | nav ul a, nav ul a:visited, nav ul a:active { 126 | font-family: Consolas, Monaco, 'Andale Mono', monospace; 127 | line-height: 18px; 128 | color: #4D4E53; 129 | } 130 | 131 | nav h3 { 132 | margin-top: 12px; 133 | } 134 | 135 | nav li { 136 | margin-top: 6px; 137 | } 138 | 139 | footer { 140 | display: block; 141 | padding: 6px; 142 | margin-top: 12px; 143 | font-style: italic; 144 | font-size: 90%; 145 | } 146 | 147 | h1, h2, h3, h4 { 148 | font-weight: 200; 149 | margin: 0; 150 | } 151 | 152 | h1 153 | { 154 | font-family: 'Open Sans Light', sans-serif; 155 | font-size: 48px; 156 | letter-spacing: -2px; 157 | margin: 12px 24px 20px; 158 | } 159 | 160 | h2, h3.subsection-title 161 | { 162 | font-size: 30px; 163 | font-weight: 700; 164 | letter-spacing: -1px; 165 | margin-bottom: 12px; 166 | } 167 | 168 | h3 169 | { 170 | font-size: 24px; 171 | letter-spacing: -0.5px; 172 | margin-bottom: 12px; 173 | } 174 | 175 | h4 176 | { 177 | font-size: 18px; 178 | letter-spacing: -0.33px; 179 | margin-bottom: 12px; 180 | color: #4d4e53; 181 | } 182 | 183 | h5, .container-overview .subsection-title 184 | { 185 | font-size: 120%; 186 | font-weight: bold; 187 | letter-spacing: -0.01em; 188 | margin: 8px 0 3px 0; 189 | } 190 | 191 | h6 192 | { 193 | font-size: 100%; 194 | letter-spacing: -0.01em; 195 | margin: 6px 0 3px 0; 196 | font-style: italic; 197 | } 198 | 199 | table 200 | { 201 | border-spacing: 0; 202 | border: 0; 203 | border-collapse: collapse; 204 | } 205 | 206 | td, th 207 | { 208 | border: 1px solid #ddd; 209 | margin: 0px; 210 | text-align: left; 211 | vertical-align: top; 212 | padding: 4px 6px; 213 | display: table-cell; 214 | } 215 | 216 | thead tr 217 | { 218 | background-color: #ddd; 219 | font-weight: bold; 220 | } 221 | 222 | th { border-right: 1px solid #aaa; } 223 | tr > th:last-child { border-right: 1px solid #ddd; } 224 | 225 | .ancestors, .attribs { color: #999; } 226 | .ancestors a, .attribs a 227 | { 228 | color: #999 !important; 229 | text-decoration: none; 230 | } 231 | 232 | .clear 233 | { 234 | clear: both; 235 | } 236 | 237 | .important 238 | { 239 | font-weight: bold; 240 | color: #950B02; 241 | } 242 | 243 | .yes-def { 244 | text-indent: -1000px; 245 | } 246 | 247 | .type-signature { 248 | color: #aaa; 249 | } 250 | 251 | .name, .signature { 252 | font-family: Consolas, Monaco, 'Andale Mono', monospace; 253 | } 254 | 255 | .details { margin-top: 14px; border-left: 2px solid #DDD; } 256 | .details dt { width: 120px; float: left; padding-left: 10px; padding-top: 6px; } 257 | .details dd { margin-left: 70px; } 258 | .details ul { margin: 0; } 259 | .details ul { list-style-type: none; } 260 | .details li { margin-left: 30px; padding-top: 6px; } 261 | .details pre.prettyprint { margin: 0 } 262 | .details .object-value { padding-top: 0; } 263 | 264 | .description { 265 | margin-bottom: 1em; 266 | margin-top: 1em; 267 | } 268 | 269 | .code-caption 270 | { 271 | font-style: italic; 272 | font-size: 107%; 273 | margin: 0; 274 | } 275 | 276 | .source 277 | { 278 | border: 1px solid #ddd; 279 | width: 80%; 280 | overflow: auto; 281 | } 282 | 283 | .prettyprint.source { 284 | width: inherit; 285 | } 286 | 287 | .source code 288 | { 289 | font-size: 100%; 290 | line-height: 18px; 291 | display: block; 292 | padding: 4px 12px; 293 | margin: 0; 294 | background-color: #fff; 295 | color: #4D4E53; 296 | } 297 | 298 | .prettyprint code span.line 299 | { 300 | display: inline-block; 301 | } 302 | 303 | .prettyprint.linenums 304 | { 305 | padding-left: 70px; 306 | -webkit-user-select: none; 307 | -moz-user-select: none; 308 | -ms-user-select: none; 309 | user-select: none; 310 | } 311 | 312 | .prettyprint.linenums ol 313 | { 314 | padding-left: 0; 315 | } 316 | 317 | .prettyprint.linenums li 318 | { 319 | border-left: 3px #ddd solid; 320 | } 321 | 322 | .prettyprint.linenums li.selected, 323 | .prettyprint.linenums li.selected * 324 | { 325 | background-color: lightyellow; 326 | } 327 | 328 | .prettyprint.linenums li * 329 | { 330 | -webkit-user-select: text; 331 | -moz-user-select: text; 332 | -ms-user-select: text; 333 | user-select: text; 334 | } 335 | 336 | .params .name, .props .name, .name code { 337 | color: #4D4E53; 338 | font-family: Consolas, Monaco, 'Andale Mono', monospace; 339 | font-size: 100%; 340 | } 341 | 342 | .params td.description > p:first-child, 343 | .props td.description > p:first-child 344 | { 345 | margin-top: 0; 346 | padding-top: 0; 347 | } 348 | 349 | .params td.description > p:last-child, 350 | .props td.description > p:last-child 351 | { 352 | margin-bottom: 0; 353 | padding-bottom: 0; 354 | } 355 | 356 | .disabled { 357 | color: #454545; 358 | } 359 | -------------------------------------------------------------------------------- /src/entry-io.js: -------------------------------------------------------------------------------- 1 | import pMap from 'p-map' 2 | import pDoWhilst from 'p-do-whilst' 3 | import Entry from './entry.js' 4 | 5 | const { isEntry, fromMultihash } = Entry 6 | const hasItems = arr => arr && arr.length > 0 7 | 8 | class EntryIO { 9 | // Fetch log graphs in parallel 10 | static async fetchParallel (ipfs, hashes, { length, exclude = [], shouldExclude, timeout, concurrency, onProgressCallback }) { 11 | const fetchOne = async (hash) => EntryIO.fetchAll(ipfs, hash, { length, exclude, shouldExclude, timeout, onProgressCallback, concurrency }) 12 | const concatArrays = (arr1, arr2) => arr1.concat(arr2) 13 | const flatten = (arr) => arr.reduce(concatArrays, []) 14 | const res = await pMap(hashes, fetchOne, { concurrency: Math.max(concurrency || hashes.length, 1) }) 15 | return flatten(res) 16 | } 17 | 18 | /** 19 | * Fetch log entries 20 | * 21 | * @param {IPFS} [ipfs] An IPFS instance 22 | * @param {string} [hash] Multihash of the entry to fetch 23 | * @param {string} [parent] Parent of the node to be fetched 24 | * @param {Object} [all] Entries to skip 25 | * @param {Number} [amount=-1] How many entries to fetch 26 | * @param {Number} [depth=0] Current depth of the recursion 27 | * @param {function(entry)} shouldExclude A function that can be passed to determine whether a specific hash should be excluded, ie. not fetched. The function should return true to indicate exclusion, otherwise return false. 28 | * @param {function(entry)} onProgressCallback Called when an entry was fetched 29 | * @returns {Promise>} 30 | */ 31 | static async fetchAll (ipfs, hashes, { length = -1, exclude = [], shouldExclude, timeout, onProgressCallback, onStartProgressCallback, concurrency = 32, delay = 0 } = {}) { 32 | const result = [] 33 | const cache = {} 34 | const loadingCache = {} 35 | const loadingQueue = Array.isArray(hashes) 36 | ? { 0: hashes.slice() } 37 | : { 0: [hashes] } 38 | let running = 0 // keep track of how many entries are being fetched at any time 39 | let maxClock = 0 // keep track of the latest clock time during load 40 | let minClock = 0 // keep track of the minimum clock time during load 41 | shouldExclude = shouldExclude || (() => false) // default fn returns false to not exclude any hash 42 | 43 | // Does the loading queue have more to process? 44 | const loadingQueueHasMore = () => Object.values(loadingQueue).find(hasItems) !== undefined 45 | 46 | // Add a multihash to the loading queue 47 | const addToLoadingQueue = (e, idx) => { 48 | if (!loadingCache[e] && !shouldExclude(e)) { 49 | if (!loadingQueue[idx]) loadingQueue[idx] = [] 50 | if (!loadingQueue[idx].includes(e)) { 51 | loadingQueue[idx].push(e) 52 | } 53 | loadingCache[e] = true 54 | } 55 | } 56 | 57 | // Get the next items to process from the loading queue 58 | const getNextFromQueue = (length = 1) => { 59 | const getNext = (res, key, idx) => { 60 | const nextItems = loadingQueue[key] 61 | while (nextItems.length > 0 && res.length < length) { 62 | const hash = nextItems.shift() 63 | res.push(hash) 64 | } 65 | if (nextItems.length === 0) { 66 | delete loadingQueue[key] 67 | } 68 | return res 69 | } 70 | return Object.keys(loadingQueue).reduce(getNext, []) 71 | } 72 | 73 | // Add entries that we don't need to fetch to the "cache" 74 | const addToExcludeCache = e => { cache[e.hash || e] = true } 75 | 76 | // Fetch one entry and add it to the results 77 | const fetchEntry = async (hash) => { 78 | if (!hash || cache[hash] || shouldExclude(hash)) { 79 | return 80 | } 81 | 82 | /* eslint-disable no-async-promise-executor */ 83 | return new Promise(async (resolve, reject) => { 84 | // Resolve the promise after a timeout (if given) in order to 85 | // not get stuck loading a block that is unreachable 86 | const timer = timeout && timeout > 0 87 | ? setTimeout(() => { 88 | console.warn(`Warning: Couldn't fetch entry '${hash}', request timed out (${timeout}ms)`) 89 | resolve() 90 | }, timeout) 91 | : null 92 | 93 | const addToResults = (entry) => { 94 | if (isEntry(entry) && !cache[entry.hash] && !shouldExclude(entry.hash)) { 95 | const ts = entry.clock.time 96 | 97 | // Update min/max clocks 98 | maxClock = Math.max(maxClock, ts) 99 | minClock = result.length > 0 100 | ? Math.min(result[result.length - 1].clock.time, minClock) 101 | : maxClock 102 | 103 | const isLater = (result.length >= length && ts >= minClock) 104 | const calculateIndex = (idx) => maxClock - ts + ((idx + 1) * idx) 105 | 106 | // Add the entry to the results if 107 | // 1) we're fetching all entries 108 | // 2) results is not filled yet 109 | // the clock of the entry is later than current known minimum clock time 110 | if ((length < 0 || result.length < length || isLater) && !shouldExclude(entry.hash) && !cache[entry.hash]) { 111 | result.push(entry) 112 | cache[entry.hash] = true 113 | 114 | if (onProgressCallback) { 115 | onProgressCallback(entry) 116 | } 117 | } 118 | 119 | if (length < 0) { 120 | // If we're fetching all entries (length === -1), adds nexts and refs to the queue 121 | entry.next.forEach(addToLoadingQueue) 122 | if (entry.refs) entry.refs.forEach(addToLoadingQueue) 123 | } else { 124 | // If we're fetching entries up to certain length, 125 | // fetch the next if result is filled up, to make sure we "check" 126 | // the next entry if its clock is later than what we have in the result 127 | if (result.length < length || ts > minClock || (ts === minClock && !cache[entry.hash] && !shouldExclude(entry.hash))) { 128 | entry.next.forEach(e => addToLoadingQueue(e, calculateIndex(0))) 129 | } 130 | if (entry.refs && (result.length + entry.refs.length <= length)) { 131 | entry.refs.forEach((e, i) => addToLoadingQueue(e, calculateIndex(i))) 132 | } 133 | } 134 | } 135 | } 136 | 137 | if (onStartProgressCallback) { 138 | onStartProgressCallback(hash, null, 0, result.length) 139 | } 140 | 141 | try { 142 | // Load the entry 143 | const entry = await fromMultihash(ipfs, hash) 144 | // Simulate network latency (for debugging purposes) 145 | if (delay > 0) { 146 | const sleep = (ms = 0) => new Promise(resolve => setTimeout(resolve, ms)) 147 | await sleep(delay) 148 | } 149 | // Add it to the results 150 | addToResults(entry) 151 | resolve() 152 | } catch (e) { 153 | reject(e) 154 | } finally { 155 | clearTimeout(timer) 156 | } 157 | }) 158 | } 159 | 160 | // One loop of processing the loading queue 161 | const _processQueue = async () => { 162 | if (running < concurrency) { 163 | const nexts = getNextFromQueue(concurrency) 164 | running += nexts.length 165 | await pMap(nexts, fetchEntry, { concurrency }) 166 | running -= nexts.length 167 | } 168 | } 169 | 170 | // Add entries to exclude from processing to the cache before we start 171 | exclude.forEach(addToExcludeCache) 172 | 173 | // Fetch entries 174 | await pDoWhilst(_processQueue, loadingQueueHasMore) 175 | 176 | return result 177 | } 178 | } 179 | 180 | export default EntryIO 181 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ipfs-log 2 | 3 | [![npm](https://img.shields.io/npm/v/ipfs-log.svg)](https://www.npmjs.com/package/ipfs-log) 4 | [![CircleCI Status](https://circleci.com/gh/orbitdb/ipfs-log.svg?style=shield)](https://circleci.com/gh/orbitdb/ipfs-log) 5 | [![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/orbitdb/Lobby) [![Matrix](https://img.shields.io/badge/matrix-%23orbitdb%3Apermaweb.io-blue.svg)](https://riot.permaweb.io/#/room/#orbitdb:permaweb.io) 6 | 7 | 8 | > An append-only log on IPFS. 9 | 10 | `ipfs-log` is an immutable, operation-based conflict-free replicated data structure ([CRDT](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type)) for distributed systems. It's an append-only log that can be used to model a mutable, shared state between peers in p2p applications. 11 | 12 | Every entry in the log is saved in IPFS and each points to a hash of previous entry(ies) forming a graph. Logs can be forked and joined back together. 13 | 14 | The module works in **Node.js** and **Browsers**. 15 | 16 | ``` 17 | Log A Log B 18 | | | 19 | logA.append("one") logB.append("hello") 20 | | | 21 | v v 22 | +-----+ +-------+ 23 | |"one"| |"hello"| 24 | +-----+ +-------+ 25 | | | 26 | logA.append("two") logB.append("world") 27 | | | 28 | v v 29 | +-----------+ +---------------+ 30 | |"one","two"| |"hello","world"| 31 | +-----------+ +---------------+ 32 | | | 33 | | | 34 | logA.join(logB) <----------+ 35 | | 36 | v 37 | +---------------------------+ 38 | |"one","hello","two","world"| 39 | +---------------------------+ 40 | ``` 41 | 42 | 43 | ## Table of Contents 44 | 45 | - [Background](#background) 46 | - [Install](#install) 47 | - [Usage](#usage) 48 | - [API](#api) 49 | - [Tests](#tests) 50 | - [Benchmarking](#benchmarking) 51 | - [Build](#build) 52 | - [Contribute](#contribute) 53 | - [License](#license) 54 | 55 | ## Background 56 | 57 | IPFS Log has a few use cases: 58 | 59 | - CRDTs 60 | - Database operations log 61 | - Feed of data 62 | - Track a version of a file 63 | - Messaging 64 | 65 | It was originally created for, and currently used in, [orbit-db](https://github.com/orbitdb/orbit-db) - a distributed peer-to-peer database on [IPFS](https://github.com/ipfs/ipfs). 66 | 67 | ## Requirements 68 | 69 | - Node.js v8.6.0 or newer (uses `...` spread syntax) 70 | - Preferably you should use an LTS version of node.js (even numbered 8, 10, etc) 71 | 72 | ## Install 73 | 74 | This project uses [npm](http://npmjs.com/) and [nodejs](https://nodejs.org/). 75 | 76 | ``` 77 | npm install ipfs-log 78 | ``` 79 | 80 | ## Usage 81 | 82 | See the [API documentation](#api) and [examples](https://github.com/orbitdb/ipfs-log/tree/master/examples) for more details. 83 | 84 | ### Quick Start 85 | 86 | Install dependencies: 87 | 88 | ``` 89 | npm install ipfs-log ipfs 90 | ``` 91 | 92 | Run a simple program: 93 | 94 | ```javascript 95 | 96 | // For js-ipfs >= 0.38 97 | 98 | import from Log 'ipfs-log' 99 | import IdentityProvider from 'orbit-db-identity-provider' 100 | import * as IPFS from 'ipfs' 101 | 102 | const start = async () => { 103 | const identity = await IdentityProvider.createIdentity({ id: "peerid" }) 104 | const ipfs = await IPFS.create({ repo: "./path-for-js-ipfs-repo" }) 105 | const log = new Log(ipfs, identity) 106 | 107 | await log.append({ some: "data" }) 108 | await log.append("text") 109 | console.log(log.values.map((e) => e.payload)) 110 | } 111 | 112 | start() 113 | 114 | // [ { some: 'data' }, 'text' ] 115 | ``` 116 | 117 | ### Node.js 118 | 119 | See [examples](https://github.com/orbitdb/ipfs-log/tree/master/examples) for details. 120 | 121 | *If your platforms requires ES5-compatible JavaScript, there's a build in `lib/es5/`.* 122 | 123 | ### Browser 124 | 125 | See [examples/browser](https://github.com/orbitdb/ipfs-log/tree/master/examples/browser) for details. 126 | 127 | *The distribution package for browsers is located in [dist/ipfslog.min.js](https://github.com/orbitdb/ipfs-log/tree/master/dist)* 128 | 129 | *If your platforms requires ES5-compatible JavaScript, there's a build in `lib/es5/`.* 130 | 131 | ## API 132 | 133 | See [API Documentation](https://github.com/orbitdb/ipfs-log/tree/master/API.md) for full details. 134 | 135 | - [Log](https://github.com/orbitdb/ipfs-log/tree/master/API.md#log) 136 | - [Constructor](https://github.com/orbitdb/ipfs-log/tree/master/API.md##constructor) 137 | - [new Log(ipfs, identity, [{ logId, access, entries, heads, clock, sortFn }])](https://github.com/orbitdb/ipfs-log/tree/master/API.md##new-log-ipfs-id) 138 | - [Properties](https://github.com/orbitdb/ipfs-log/tree/master/API.md##properties) 139 | - [id](https://github.com/orbitdb/ipfs-log/tree/master/API.md##id) 140 | - [values](https://github.com/orbitdb/ipfs-log/tree/master/API.md##values) 141 | - [length](https://github.com/orbitdb/ipfs-log/tree/master/API.md##length) 142 | - [clock](https://github.com/orbitdb/ipfs-log/tree/master/API.md##length) 143 | - [heads](https://github.com/orbitdb/ipfs-log/tree/master/API.md##heads) 144 | - [tails](https://github.com/orbitdb/ipfs-log/tree/master/API.md##tails) 145 | - [Methods](https://github.com/orbitdb/ipfs-log/tree/master/API.md##methods) 146 | - [append(data)](https://github.com/orbitdb/ipfs-log/tree/master/API.md##appenddata) 147 | - [join(log)](https://github.com/orbitdb/ipfs-log/tree/master/API.md##joinlog) 148 | - [toMultihash()](https://github.com/orbitdb/ipfs-log/tree/master/API.md##tomultihash) 149 | - [toBuffer()](https://github.com/orbitdb/ipfs-log/tree/master/API.md##tobuffer) 150 | - [toString()](https://github.com/orbitdb/ipfs-log/tree/master/API.md##toString) 151 | - [Static Methods](https://github.com/orbitdb/ipfs-log/tree/master/API.md##static-methods) 152 | - [Log.fromEntry()]() 153 | - [Log.fromEntryCid()]() 154 | - [Log.fromCID()]() 155 | - [Log.fromMultihash()]() 156 | 157 | ## Tests 158 | 159 | Run all tests: 160 | ``` 161 | npm test 162 | ``` 163 | 164 | Run tests with js-ipfs only (default): 165 | ``` 166 | mocha 167 | ``` 168 | 169 | Run tests with go-ipfs only: 170 | ``` 171 | TEST=go mocha 172 | ``` 173 | 174 | ## Benchmarking 175 | 176 | To use the benchmark runner: 177 | 178 | ```JavaScript 179 | node --expose-gc benchmarks/runner/index.js -r --grep append-stress --stress-limit Infinity 180 | ``` 181 | 182 | This will run the `append-stress` benchmarks until it is canceled. For more information, see the [Benchmarking README](./benchmarks/README.md). 183 | 184 | ## Build 185 | 186 | Run the following command before you commit. 187 | 188 | ``` 189 | make rebuild 190 | ``` 191 | 192 | This will ensure that dependencies and built files are all based on the current code base. 193 | 194 | ## Benchmarks 195 | 196 | There's a benchmark suite in [benchmarks/](https://github.com/orbitdb/ipfs-log/blob/master/benchmarks) that can be run with: 197 | 198 | ``` 199 | node benchmarks/benchmark-append.js 200 | node benchmarks/benchmark-join.js 201 | node benchmarks/benchmark-expand.js 202 | ``` 203 | 204 | There's `append` and `join` benchmarks for browsers in [benchmarks/browser/](https://github.com/orbitdb/ipfs-log/blob/master/benchmarks/browser) which you can run by opening the `.html` files in your browser. 205 | 206 | ## Contribute 207 | 208 | If you find a bug or something is broken, let us know! PRs and [issues](https://github.com/orbitdb/ipfs-log/issues) are gladly accepted too. Take a look at the open issues, too, to see if there is anything that you could do or someone else has already done. Here are some things I know I need: 209 | 210 | ### TODO 211 | 212 | - Support for payload encryption 213 | 214 | ## License 215 | 216 | [MIT](LICENSE) © 2016-2018 Protocol Labs Inc., 217 | 2016-2019 Haja Networks Oy 218 | -------------------------------------------------------------------------------- /test/signed-log.spec.js: -------------------------------------------------------------------------------- 1 | import { notStrictEqual, strictEqual, deepStrictEqual } from 'assert' 2 | import rimraf from 'rimraf' 3 | import { copy } from 'fs-extra' 4 | import Log from '../src/log.js' 5 | import IdentityProvider from 'orbit-db-identity-provider' 6 | import Keystore from 'orbit-db-keystore' 7 | 8 | // Test utils 9 | import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils' 10 | 11 | const { sync: rmrf } = rimraf 12 | const { createIdentity } = IdentityProvider 13 | 14 | let ipfsd, ipfs, testIdentity, testIdentity2 15 | 16 | Object.keys(testAPIs).forEach((IPFS) => { 17 | describe('Signed Log (' + IPFS + ')', function () { 18 | this.timeout(config.timeout) 19 | 20 | const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config 21 | 22 | let keystore, signingKeystore 23 | 24 | before(async () => { 25 | rmrf(identityKeysPath) 26 | rmrf(signingKeysPath) 27 | await copy(identityKeyFixtures, identityKeysPath) 28 | await copy(signingKeyFixtures, signingKeysPath) 29 | 30 | keystore = new Keystore(identityKeysPath) 31 | signingKeystore = new Keystore(signingKeysPath) 32 | 33 | testIdentity = await createIdentity({ id: 'userA', keystore, signingKeystore }) 34 | testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore }) 35 | ipfsd = await startIpfs(IPFS, config.defaultIpfsConfig) 36 | ipfs = ipfsd.api 37 | }) 38 | 39 | after(async () => { 40 | await stopIpfs(ipfsd) 41 | rmrf(identityKeysPath) 42 | rmrf(signingKeysPath) 43 | await keystore.close() 44 | await signingKeystore.close() 45 | }) 46 | 47 | it('creates a signed log', () => { 48 | const logId = 'A' 49 | const log = new Log(ipfs, testIdentity, { logId }) 50 | notStrictEqual(log.id, null) 51 | strictEqual(log.id, logId) 52 | }) 53 | 54 | it('has the correct identity', () => { 55 | const log = new Log(ipfs, testIdentity, { logId: 'A' }) 56 | notStrictEqual(log.id, null) 57 | strictEqual(log._identity.id, '03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c') 58 | strictEqual(log._identity.publicKey, '048bef2231e64d5c7147bd4b8afb84abd4126ee8d8335e4b069ac0a65c7be711cea5c1b8d47bc20ebaecdca588600ddf2894675e78b2ef17cf49e7bbaf98080361') 59 | strictEqual(log._identity.signatures.id, '3045022100f5f6f10571d14347aaf34e526ce3419fd64d75ffa7aa73692cbb6aeb6fbc147102203a3e3fa41fa8fcbb9fc7c148af5b640e2f704b20b3a4e0b93fc3a6d44dffb41e') 60 | strictEqual(log._identity.signatures.publicKey, '3044022020982b8492be0c184dc29de0a3a3bd86a86ba997756b0bf41ddabd24b47c5acf02203745fda39d7df650a5a478e52bbe879f0cb45c074025a93471414a56077640a4') 61 | }) 62 | 63 | it('has the correct public key', () => { 64 | const log = new Log(ipfs, testIdentity, { logId: 'A' }) 65 | strictEqual(log._identity.publicKey, testIdentity.publicKey) 66 | }) 67 | 68 | it('has the correct pkSignature', () => { 69 | const log = new Log(ipfs, testIdentity, { logId: 'A' }) 70 | strictEqual(log._identity.signatures.id, testIdentity.signatures.id) 71 | }) 72 | 73 | it('has the correct signature', () => { 74 | const log = new Log(ipfs, testIdentity, { logId: 'A' }) 75 | strictEqual(log._identity.signatures.publicKey, testIdentity.signatures.publicKey) 76 | }) 77 | 78 | it('entries contain an identity', async () => { 79 | const log = new Log(ipfs, testIdentity, { logId: 'A' }) 80 | await log.append('one') 81 | notStrictEqual(log.values[0].sig, null) 82 | deepStrictEqual(log.values[0].identity, testIdentity.toJSON()) 83 | }) 84 | 85 | it('doesn\'t sign entries when identity is not defined', async () => { 86 | let err 87 | try { 88 | const log = new Log(ipfs) // eslint-disable-line no-unused-vars 89 | } catch (e) { 90 | err = e 91 | } 92 | strictEqual(err.message, 'Identity is required') 93 | }) 94 | 95 | it('doesn\'t join logs with different IDs ', async () => { 96 | const log1 = new Log(ipfs, testIdentity, { logId: 'A' }) 97 | const log2 = new Log(ipfs, testIdentity2, { logId: 'B' }) 98 | 99 | let err 100 | try { 101 | await log1.append('one') 102 | await log2.append('two') 103 | await log2.append('three') 104 | await log1.join(log2) 105 | } catch (e) { 106 | err = e.toString() 107 | throw e 108 | } 109 | 110 | strictEqual(err, undefined) 111 | strictEqual(log1.id, 'A') 112 | strictEqual(log1.values.length, 1) 113 | strictEqual(log1.values[0].payload, 'one') 114 | }) 115 | 116 | it('throws an error if log is signed but trying to merge with an entry that doesn\'t have public signing key', async () => { 117 | const log1 = new Log(ipfs, testIdentity, { logId: 'A' }) 118 | const log2 = new Log(ipfs, testIdentity2, { logId: 'A' }) 119 | 120 | let err 121 | try { 122 | await log1.append('one') 123 | await log2.append('two') 124 | delete log2.values[0].key 125 | await log1.join(log2) 126 | } catch (e) { 127 | err = e.toString() 128 | } 129 | strictEqual(err, 'Error: Entry doesn\'t have a key') 130 | }) 131 | 132 | it('throws an error if log is signed but trying to merge an entry that doesn\'t have a signature', async () => { 133 | const log1 = new Log(ipfs, testIdentity, { logId: 'A' }) 134 | const log2 = new Log(ipfs, testIdentity2, { logId: 'A' }) 135 | 136 | let err 137 | try { 138 | await log1.append('one') 139 | await log2.append('two') 140 | delete log2.values[0].sig 141 | await log1.join(log2) 142 | } catch (e) { 143 | err = e.toString() 144 | } 145 | strictEqual(err, 'Error: Entry doesn\'t have a signature') 146 | }) 147 | 148 | it('throws an error if log is signed but the signature doesn\'t verify', async () => { 149 | const log1 = new Log(ipfs, testIdentity, { logId: 'A' }) 150 | const log2 = new Log(ipfs, testIdentity2, { logId: 'A' }) 151 | let err 152 | 153 | try { 154 | await log1.append('one') 155 | await log2.append('two') 156 | log2.values[0].sig = log1.values[0].sig 157 | await log1.join(log2) 158 | } catch (e) { 159 | err = e.toString() 160 | } 161 | 162 | const entry = log2.values[0] 163 | strictEqual(err, `Error: Could not validate signature "${entry.sig}" for entry "${entry.hash}" and key "${entry.key}"`) 164 | strictEqual(log1.values.length, 1) 165 | strictEqual(log1.values[0].payload, 'one') 166 | }) 167 | 168 | it('throws an error if entry doesn\'t have append access', async () => { 169 | const denyAccess = { canAppend: () => false } 170 | const log1 = new Log(ipfs, testIdentity, { logId: 'A' }) 171 | const log2 = new Log(ipfs, testIdentity2, { logId: 'A', access: denyAccess }) 172 | 173 | let err 174 | try { 175 | await log1.append('one') 176 | await log2.append('two') 177 | await log1.join(log2) 178 | } catch (e) { 179 | err = e.toString() 180 | } 181 | 182 | strictEqual(err, `Error: Could not append entry, key "${testIdentity2.id}" is not allowed to write to the log`) 183 | }) 184 | 185 | it('throws an error upon join if entry doesn\'t have append access', async () => { 186 | const testACL = { 187 | canAppend: (entry) => entry.identity.id !== testIdentity2.id 188 | } 189 | const log1 = new Log(ipfs, testIdentity, { logId: 'A', access: testACL }) 190 | const log2 = new Log(ipfs, testIdentity2, { logId: 'A' }) 191 | 192 | let err 193 | try { 194 | await log1.append('one') 195 | await log2.append('two') 196 | await log1.join(log2) 197 | } catch (e) { 198 | err = e.toString() 199 | } 200 | 201 | strictEqual(err, `Error: Could not append entry, key "${testIdentity2.id}" is not allowed to write to the log`) 202 | }) 203 | }) 204 | }) 205 | -------------------------------------------------------------------------------- /src/entry.js: -------------------------------------------------------------------------------- 1 | import Clock from './lamport-clock.js' 2 | import { read, write } from 'orbit-db-io' 3 | import { isDefined } from './utils/index.js' 4 | import stringify from 'json-stringify-deterministic' 5 | 6 | const IpfsNotDefinedError = () => new Error('Ipfs instance not defined') 7 | const IPLD_LINKS = ['next', 'refs'] 8 | const getWriteFormatForVersion = v => v === 0 ? 'dag-pb' : 'dag-cbor' 9 | const getWriteFormat = e => Entry.isEntry(e) ? getWriteFormatForVersion(e.v) : getWriteFormatForVersion(e) 10 | 11 | /* 12 | * @description 13 | * An ipfs-log entry 14 | */ 15 | class Entry { 16 | /** 17 | * Create an Entry 18 | * @param {IPFS} ipfs An IPFS instance 19 | * @param {Identity} identity The identity instance 20 | * @param {string} logId The unique identifier for this log 21 | * @param {*} data Data of the entry to be added. Can be any JSON.stringifyable data 22 | * @param {Array} [next=[]] Parent hashes or entries 23 | * @param {LamportClock} [clock] The lamport clock 24 | * @returns {Promise} 25 | * @example 26 | * const entry = await Entry.create(ipfs, identity, 'hello') 27 | * console.log(entry) 28 | * // { hash: null, payload: "hello", next: [] } 29 | */ 30 | static async create (ipfs, identity, logId, data, next = [], clock, refs = [], pin) { 31 | if (!isDefined(ipfs)) throw IpfsNotDefinedError() 32 | if (!isDefined(identity)) throw new Error('Identity is required, cannot create entry') 33 | if (!isDefined(logId)) throw new Error('Entry requires an id') 34 | if (!isDefined(data)) throw new Error('Entry requires data') 35 | if (!isDefined(next) || !Array.isArray(next)) throw new Error("'next' argument is not an array") 36 | 37 | // Clean the next objects and convert to hashes 38 | const toEntry = (e) => e.hash ? e.hash : e 39 | const nexts = next.filter(isDefined).map(toEntry) 40 | 41 | const entry = { 42 | hash: null, // "zd...Foo", we'll set the hash after persisting the entry 43 | id: logId, // For determining a unique chain 44 | payload: data, // Can be any JSON.stringifyable data 45 | next: nexts, // Array of hashes 46 | refs, 47 | v: 2, // To tag the version of this data structure 48 | clock: clock || new Clock(identity.publicKey) 49 | } 50 | 51 | const signature = await identity.provider.sign(identity, Entry.toBuffer(entry)) 52 | 53 | entry.key = identity.publicKey 54 | entry.identity = identity.toJSON() 55 | entry.sig = signature 56 | entry.hash = await Entry.toMultihash(ipfs, entry, pin) 57 | 58 | return entry 59 | } 60 | 61 | /** 62 | * Verifies an entry signature. 63 | * 64 | * @param {IdentityProvider} identityProvider The identity provider to use 65 | * @param {Entry} entry The entry being verified 66 | * @return {Promise} A promise that resolves to a boolean value indicating if the signature is valid 67 | */ 68 | static async verify (identityProvider, entry) { 69 | if (!identityProvider) throw new Error('Identity-provider is required, cannot verify entry') 70 | if (!Entry.isEntry(entry)) throw new Error('Invalid Log entry') 71 | if (!entry.key) throw new Error("Entry doesn't have a key") 72 | if (!entry.sig) throw new Error("Entry doesn't have a signature") 73 | 74 | const e = Entry.toEntry(entry, { presigned: true }) 75 | const verifier = entry.v < 1 ? 'v0' : 'v1' 76 | return identityProvider.verify(entry.sig, entry.key, Entry.toBuffer(e), verifier) 77 | } 78 | 79 | /** 80 | * Transforms an entry into a Buffer. 81 | * @param {Entry} entry The entry 82 | * @return {Buffer} The buffer 83 | */ 84 | static toBuffer (entry) { 85 | const stringifiedEntry = entry.v === 0 ? JSON.stringify(entry) : stringify(entry) 86 | return Buffer.from(stringifiedEntry) 87 | } 88 | 89 | /** 90 | * Get the multihash of an Entry. 91 | * @param {IPFS} ipfs An IPFS instance 92 | * @param {Entry} entry Entry to get a multihash for 93 | * @returns {Promise} 94 | * @example 95 | * const multihash = await Entry.toMultihash(ipfs, entry) 96 | * console.log(multihash) 97 | * // "Qm...Foo" 98 | * @deprecated 99 | */ 100 | static async toMultihash (ipfs, entry, pin = false) { 101 | if (!ipfs) throw IpfsNotDefinedError() 102 | if (!Entry.isEntry(entry)) throw new Error('Invalid object format, cannot generate entry hash') 103 | 104 | // // Ensure `entry` follows the correct format 105 | const e = Entry.toEntry(entry) 106 | return write(ipfs, getWriteFormat(e.v), e, { links: IPLD_LINKS, pin }) 107 | } 108 | 109 | static toEntry (entry, { presigned = false, includeHash = false } = {}) { 110 | const e = { 111 | hash: includeHash ? entry.hash : null, 112 | id: entry.id, 113 | payload: entry.payload, 114 | next: entry.next 115 | } 116 | 117 | const v = entry.v 118 | if (v > 1) { 119 | e.refs = entry.refs // added in v2 120 | } 121 | e.v = entry.v 122 | e.clock = new Clock(entry.clock.id, entry.clock.time) 123 | 124 | if (presigned) { 125 | return e // don't include key/sig information 126 | } 127 | 128 | e.key = entry.key 129 | if (v > 0) { 130 | e.identity = entry.identity // added in v1 131 | } 132 | e.sig = entry.sig 133 | return e 134 | } 135 | 136 | /** 137 | * Create an Entry from a hash. 138 | * @param {IPFS} ipfs An IPFS instance 139 | * @param {string} hash The hash to create an Entry from 140 | * @returns {Promise} 141 | * @example 142 | * const entry = await Entry.fromMultihash(ipfs, "zd...Foo") 143 | * console.log(entry) 144 | * // { hash: "Zd...Foo", payload: "hello", next: [] } 145 | */ 146 | static async fromMultihash (ipfs, hash) { 147 | if (!ipfs) throw IpfsNotDefinedError() 148 | if (!hash) throw new Error(`Invalid hash: ${hash}`) 149 | const e = await read(ipfs, hash, { links: IPLD_LINKS }) 150 | 151 | const entry = Entry.toEntry(e) 152 | entry.hash = hash 153 | 154 | return entry 155 | } 156 | 157 | /** 158 | * Check if an object is an Entry. 159 | * @param {Entry} obj 160 | * @returns {boolean} 161 | */ 162 | static isEntry (obj) { 163 | return obj && obj.id !== undefined && 164 | obj.next !== undefined && 165 | obj.payload !== undefined && 166 | obj.v !== undefined && 167 | obj.hash !== undefined && 168 | obj.clock !== undefined && 169 | (obj.refs !== undefined || obj.v < 2) // 'refs' added in v2 170 | } 171 | 172 | /** 173 | * Compares two entries. 174 | * @param {Entry} a 175 | * @param {Entry} b 176 | * @returns {number} 1 if a is greater, -1 is b is greater 177 | */ 178 | static compare (a, b) { 179 | const distance = Clock.compare(a.clock, b.clock) 180 | if (distance === 0) return a.clock.id < b.clock.id ? -1 : 1 181 | return distance 182 | } 183 | 184 | /** 185 | * Check if an entry equals another entry. 186 | * @param {Entry} a 187 | * @param {Entry} b 188 | * @returns {boolean} 189 | */ 190 | static isEqual (a, b) { 191 | return a.hash === b.hash 192 | } 193 | 194 | /** 195 | * Check if an entry is a parent to another entry. 196 | * @param {Entry} entry1 Entry to check 197 | * @param {Entry} entry2 The parent Entry 198 | * @returns {boolean} 199 | */ 200 | static isParent (entry1, entry2) { 201 | return entry2.next.indexOf(entry1.hash) > -1 202 | } 203 | 204 | /** 205 | * Find entry's children from an Array of entries. 206 | * Returns entry's children as an Array up to the last know child. 207 | * @param {Entry} entry Entry for which to find the parents 208 | * @param {Array} values Entries to search parents from 209 | * @returns {Array} 210 | */ 211 | static findChildren (entry, values) { 212 | let stack = [] 213 | let parent = values.find((e) => Entry.isParent(entry, e)) 214 | let prev = entry 215 | while (parent) { 216 | stack.push(parent) 217 | prev = parent 218 | parent = values.find((e) => Entry.isParent(prev, e)) 219 | } 220 | stack = stack.sort((a, b) => a.clock.time > b.clock.time) 221 | return stack 222 | } 223 | } 224 | 225 | export default Entry 226 | export { IPLD_LINKS } 227 | export { getWriteFormat } 228 | -------------------------------------------------------------------------------- /docs/entry-io.js.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | JSDoc: Source: entry-io.js 6 | 7 | 8 | 9 | 12 | 13 | 14 | 15 | 16 | 17 | 18 |
19 | 20 |

Source: entry-io.js

21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 |
29 |
'use strict'
 30 | 
 31 | const pMap = require('p-map')
 32 | const pDoWhilst = require('p-do-whilst')
 33 | const Entry = require('./entry')
 34 | 
 35 | const hasItems = arr => arr && arr.length > 0
 36 | 
 37 | class EntryIO {
 38 |   // Fetch log graphs in parallel
 39 |   static async fetchParallel (ipfs, hashes, { length, exclude = [], timeout, concurrency, onProgressCallback }) {
 40 |     const fetchOne = async (hash) => EntryIO.fetchAll(ipfs, hash, { length, exclude, timeout, onProgressCallback, concurrency })
 41 |     const concatArrays = (arr1, arr2) => arr1.concat(arr2)
 42 |     const flatten = (arr) => arr.reduce(concatArrays, [])
 43 |     const res = await pMap(hashes, fetchOne, { concurrency: Math.max(concurrency || hashes.length, 1) })
 44 |     return flatten(res)
 45 |   }
 46 | 
 47 |   /**
 48 |    * Fetch log entries
 49 |    *
 50 |    * @param {IPFS} [ipfs] An IPFS instance
 51 |    * @param {string} [hash] Multihash of the entry to fetch
 52 |    * @param {string} [parent] Parent of the node to be fetched
 53 |    * @param {Object} [all] Entries to skip
 54 |    * @param {Number} [amount=-1] How many entries to fetch
 55 |    * @param {Number} [depth=0] Current depth of the recursion
 56 |    * @param {function(hash, entry, parent, depth)} onProgressCallback
 57 |    * @returns {Promise<Array<Entry>>}
 58 |    */
 59 |   static async fetchAll (ipfs, hashes, { length = -1, exclude = [], timeout, onProgressCallback, onStartProgressCallback, concurrency = 32, delay = 0 } = {}) {
 60 |     const result = []
 61 |     const cache = {}
 62 |     const loadingCache = {}
 63 |     const loadingQueue = Array.isArray(hashes)
 64 |       ? { 0: hashes.slice() }
 65 |       : { 0: [hashes] }
 66 |     let running = 0 // keep track of how many entries are being fetched at any time
 67 |     let maxClock = 0 // keep track of the latest clock time during load
 68 |     let minClock = 0 // keep track of the minimum clock time during load
 69 | 
 70 |     // Does the loading queue have more to process?
 71 |     const loadingQueueHasMore = () => Object.values(loadingQueue).find(hasItems) !== undefined
 72 | 
 73 |     // Add a multihash to the loading queue
 74 |     const addToLoadingQueue = (e, idx) => {
 75 |       if (!loadingCache[e]) {
 76 |         if (!loadingQueue[idx]) loadingQueue[idx] = []
 77 |         if (!loadingQueue[idx].includes(e)) {
 78 |           loadingQueue[idx].push(e)
 79 |         }
 80 |         loadingCache[e] = true
 81 |       }
 82 |     }
 83 | 
 84 |     // Get the next items to process from the loading queue
 85 |     const getNextFromQueue = (length = 1) => {
 86 |       const getNext = (res, key, idx) => {
 87 |         const nextItems = loadingQueue[key]
 88 |         while (nextItems.length > 0 && res.length < length) {
 89 |           const hash = nextItems.shift()
 90 |           res.push(hash)
 91 |         }
 92 |         if (nextItems.length === 0) {
 93 |           delete loadingQueue[key]
 94 |         }
 95 |         return res
 96 |       }
 97 |       return Object.keys(loadingQueue).reduce(getNext, [])
 98 |     }
 99 | 
100 |     // Add entries that we don't need to fetch to the "cache"
101 |     const addToExcludeCache = e => { cache[e.hash] = true }
102 | 
103 |     // Fetch one entry and add it to the results
104 |     const fetchEntry = async (hash) => {
105 |       if (!hash || cache[hash]) {
106 |         return
107 |       }
108 | 
109 |       return new Promise((resolve, reject) => {
110 |         // Resolve the promise after a timeout (if given) in order to
111 |         // not get stuck loading a block that is unreachable
112 |         const timer = timeout && timeout > 0
113 |           ? setTimeout(() => {
114 |               console.warn(`Warning: Couldn't fetch entry '${hash}', request timed out (${timeout}ms)`)
115 |               resolve()
116 |             }, timeout)
117 |           : null
118 | 
119 |         const addToResults = (entry) => {
120 |           if (Entry.isEntry(entry)) {
121 |             const ts = entry.clock.time
122 | 
123 |             // Update min/max clocks
124 |             maxClock = Math.max(maxClock, ts)
125 |             minClock = result.length > 0
126 |               ? Math.min(result[result.length - 1].clock.time, minClock)
127 |               : maxClock
128 | 
129 |             const isLater = (result.length >= length && ts >= minClock)
130 |             const calculateIndex = (idx) => maxClock - ts + ((idx + 1) * idx)
131 | 
132 |             // Add the entry to the results if
133 |             // 1) we're fetching all entries
134 |             // 2) results is not filled yet
135 |             // the clock of the entry is later than current known minimum clock time
136 |             if (length < 0 || result.length < length || isLater) {
137 |               result.push(entry)
138 |               cache[hash] = true
139 | 
140 |               if (onProgressCallback) {
141 |                 onProgressCallback(hash, entry, result.length, result.length)
142 |               }
143 |             }
144 | 
145 |             if (length < 0) {
146 |               // If we're fetching all entries (length === -1), adds nexts and refs to the queue
147 |               entry.next.forEach(addToLoadingQueue)
148 |               if (entry.refs) entry.refs.forEach(addToLoadingQueue)
149 |             } else {
150 |               // If we're fetching entries up to certain length,
151 |               // fetch the next if result is filled up, to make sure we "check"
152 |               // the next entry if its clock is later than what we have in the result
153 |               if (result.length < length || ts > minClock || (ts === minClock && !cache[entry.hash])) {
154 |                 entry.next.forEach(e => addToLoadingQueue(e, calculateIndex(0)))
155 |               }
156 |               if (entry.refs && (result.length + entry.refs.length <= length)) {
157 |                 entry.refs.forEach((e, i) => addToLoadingQueue(e, calculateIndex(i)))
158 |               }
159 |             }
160 |           }
161 |         }
162 | 
163 |         if (onStartProgressCallback) {
164 |           onStartProgressCallback(hash, null, 0, result.length)
165 |         }
166 | 
167 |         // Load the entry
168 |         Entry.fromMultihash(ipfs, hash).then(async (entry) => {
169 |           try {
170 |             // Add it to the results
171 |             addToResults(entry)
172 | 
173 |             // Simulate network latency (for debugging purposes)
174 |             if (delay > 0) {
175 |               const sleep = (ms = 0) => new Promise(resolve => setTimeout(resolve, ms))
176 |               await sleep(delay)
177 |             }
178 |             resolve()
179 |           } catch (e) {
180 |             reject(e)
181 |           } finally {
182 |             clearTimeout(timer)
183 |           }
184 |         }).catch(reject)
185 |       })
186 |     }
187 | 
188 |     // One loop of processing the loading queue
189 |     const _processQueue = async () => {
190 |       if (running < concurrency) {
191 |         const nexts = getNextFromQueue(concurrency)
192 |         running += nexts.length
193 |         await pMap(nexts, fetchEntry)
194 |         running -= nexts.length
195 |       }
196 |     }
197 | 
198 |     // Add entries to exclude from processing to the cache before we start
199 |     exclude.forEach(addToExcludeCache)
200 | 
201 |     // Fetch entries
202 |     await pDoWhilst(_processQueue, loadingQueueHasMore)
203 | 
204 |     return result
205 |   }
206 | }
207 | 
208 | module.exports = EntryIO
209 | 
210 |
211 |
212 | 213 | 214 | 215 | 216 |
217 | 218 | 221 | 222 |
223 | 224 |
225 | Documentation generated by JSDoc 3.6.6 on Fri Dec 11 2020 17:11:17 GMT-0500 (Eastern Standard Time) 226 |
227 | 228 | 229 | 230 | 231 | 232 | --------------------------------------------------------------------------------