├── .github └── workflows │ └── prebuild.yml ├── .gitignore ├── .jshintrc ├── .npmignore ├── LICENSE ├── README.md ├── SECURITY.md ├── assets ├── gatsby.png ├── harperdb.png ├── performance.png └── powers-dre.png ├── benchmark ├── index.js └── low-level.js ├── binding.gyp ├── caching.js ├── dependencies ├── libmdbx │ ├── CMakeLists.txt │ ├── ChangeLog.md │ ├── GNUmakefile │ ├── LICENSE │ ├── README.md │ ├── VERSION.txt │ ├── cmake │ │ ├── compiler.cmake │ │ ├── profile.cmake │ │ └── utils.cmake │ ├── config.h.in │ ├── man1 │ │ ├── mdbx_chk.1 │ │ ├── mdbx_copy.1 │ │ ├── mdbx_drop.1 │ │ ├── mdbx_dump.1 │ │ ├── mdbx_load.1 │ │ └── mdbx_stat.1 │ ├── mdbx.c │ ├── mdbx.c++ │ ├── mdbx.h │ ├── mdbx.h++ │ ├── mdbx_chk.c │ ├── mdbx_copy.c │ ├── mdbx_drop.c │ ├── mdbx_dump.c │ ├── mdbx_load.c │ ├── mdbx_stat.c │ └── ntdll.def ├── lz4 │ ├── LICENSE │ └── lib │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── dll │ │ ├── example │ │ │ ├── README.md │ │ │ ├── fullbench-dll.sln │ │ │ └── fullbench-dll.vcxproj │ │ └── liblz4.def │ │ ├── liblz4-dll.rc.in │ │ ├── liblz4.pc.in │ │ ├── lz4.c │ │ ├── lz4.h │ │ ├── lz4frame.c │ │ ├── lz4frame.h │ │ ├── lz4frame_static.h │ │ ├── lz4hc.c │ │ ├── lz4hc.h │ │ ├── xxhash.c │ │ └── xxhash.h └── v8 │ ├── v8-fast-api-calls-v16.h │ └── v8-fast-api-calls.h ├── deps.ts ├── dict ├── dict.txt └── dict2.txt ├── external.js ├── index.d.ts ├── keys.js ├── level.js ├── mod.ts ├── node-index.js ├── open.js ├── package.json ├── read.js ├── rollup.config.js ├── src ├── compression.cpp ├── cursor.cpp ├── dbi.cpp ├── env.cpp ├── lmdbx-js.cpp ├── lmdbx-js.h ├── misc.cpp ├── ordered-binary.cpp ├── txn.cpp └── writer.cpp ├── test ├── check-commit.js ├── cluster.js ├── deno.ts ├── index.test.js ├── module.test.mjs ├── performance.js ├── threads.cjs └── types │ └── index.test-d.ts ├── update.deps.mdbx.sh ├── util ├── RangeIterable.js └── when.js └── write.js /.github/workflows/prebuild.yml: -------------------------------------------------------------------------------- 1 | name: Test and Prebuild 2 | on: [push] 3 | jobs: 4 | build-test-macos: 5 | if: startsWith(github.ref, 'refs/tags/') 6 | runs-on: macos-11 7 | steps: 8 | - uses: actions/checkout@v2 9 | - name: Setup node 10 | uses: actions/setup-node@v2 11 | with: 12 | node-version: 12 13 | - uses: denoland/setup-deno@v1 14 | with: 15 | deno-version: v1.x 16 | - run: npm install 17 | - run: npm run build 18 | - run: npm run deno-test 19 | - run: npm test 20 | - run: npm run prebuild-libc 21 | if: startsWith(github.ref, 'refs/tags/') 22 | - run: npm run prebuild-libc 23 | if: startsWith(github.ref, 'refs/tags/') 24 | env: 25 | PREBUILD_ARCH: arm64 26 | - run: tar --create --format ustar --verbose --file=prebuild-darwin.tar -C prebuilds . 27 | if: startsWith(github.ref, 'refs/tags/') 28 | - name: Prebuild 29 | uses: softprops/action-gh-release@v1 30 | if: startsWith(github.ref, 'refs/tags/') 31 | with: 32 | files: prebuild-darwin.tar 33 | build-centos-7: 34 | if: startsWith(github.ref, 'refs/tags/') 35 | runs-on: ubuntu-18.04 36 | container: quay.io/pypa/manylinux2014_x86_64 37 | steps: 38 | - uses: actions/checkout@v2 39 | - name: Setup node 40 | uses: actions/setup-node@v2 41 | with: 42 | node-version: 12 43 | - run: npm install 44 | - run: npm run build 45 | - run: npm run prebuild-libc 46 | - run: ls prebuilds/linux-x64 47 | #- run: cp prebuilds/linux-x64/node.abi93.glibc.node prebuilds/linux-x64/node.abi92.glibc.node 48 | #- run: npm run prebuildify 49 | # env: 50 | # ENABLE_FAST_API_CALLS: true 51 | - run: npm test 52 | - run: tar --create --verbose --file=prebuild-linux.tar -C prebuilds . 53 | - name: Prebuild 54 | if: startsWith(github.ref, 'refs/tags/') 55 | uses: softprops/action-gh-release@v1 56 | with: 57 | files: prebuild-linux.tar 58 | build-debian-9: 59 | runs-on: ubuntu-18.04 60 | container: node:14-stretch 61 | steps: 62 | - uses: actions/checkout@v2 63 | - uses: denoland/setup-deno@v1 64 | with: 65 | deno-version: v1.18.0 66 | - run: npm install 67 | - run: npm run build 68 | - run: npm run deno-test 69 | - run: npm test 70 | build-alpine: 71 | if: startsWith(github.ref, 'refs/tags/') 72 | runs-on: ubuntu-18.04 73 | container: node:12-alpine 74 | steps: 75 | - run: apk add python3 py3-pip build-base 76 | - uses: actions/checkout@v2 77 | - run: npm install --ignore-scripts 78 | - run: wget https://musl.cc/aarch64-linux-musl-cross.tgz 79 | - run: tar -xf aarch64-linux-musl-cross.tgz && pwd && ls 80 | - run: npm run prebuild-libc 81 | env: 82 | PREBUILD_LIBC: musl 83 | PREBUILD_ARCH: arm64 84 | CC: ${PWD}/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc 85 | CXX: ${PWD}/aarch64-linux-musl-cross/bin/aarch64-linux-musl-g++ 86 | - run: npm run prebuild-libc 87 | env: 88 | PREBUILD_LIBC: musl 89 | - run: npm run build 90 | - run: npm test 91 | - run: tar --create --verbose --file=prebuild-alpine.tar -C prebuilds . 92 | - name: Prebuild 93 | if: startsWith(github.ref, 'refs/tags/') 94 | uses: softprops/action-gh-release@v1 95 | with: 96 | files: prebuild-alpine.tar 97 | build-linux-arm64: 98 | if: startsWith(github.ref, 'refs/tags/') 99 | runs-on: ubuntu-18.04 100 | container: quay.io/pypa/manylinux_2_24_x86_64 101 | steps: 102 | - run: apt-get update 103 | - run: apt-get install -y gcc-aarch64-linux-gnu 104 | - run: apt-get install -y g++-aarch64-linux-gnu 105 | - run: ldd --version ldd 106 | - uses: actions/checkout@v2 107 | - name: Setup node 108 | uses: actions/setup-node@v2 109 | with: 110 | node-version: 12 111 | - run: npm install 112 | - run: npm run prebuild-libc 113 | if: startsWith(github.ref, 'refs/tags/') 114 | env: 115 | PREBUILD_ARCH: arm64 116 | CC: aarch64-linux-gnu-gcc 117 | CXX: aarch64-linux-gnu-g++ 118 | - run: tar --create --verbose --file=prebuild-linux-arm64.tar -C prebuilds . 119 | - name: Prebuild 120 | if: startsWith(github.ref, 'refs/tags/') 121 | uses: softprops/action-gh-release@v1 122 | with: 123 | files: prebuild-linux-arm64.tar 124 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Build and test data 2 | build 3 | prebuilds 4 | testdata 5 | test/testdata* 6 | benchmark/benchdata* 7 | dist 8 | 9 | # qmake autogenerated files 10 | moc_* 11 | qrc_* 12 | ui_* 13 | Makefile 14 | 15 | # Qt Creator's stuff 16 | *.pro.user 17 | qtc_packaging 18 | 19 | # Other generated files 20 | *.o 21 | *.slo 22 | *.lo 23 | *.core 24 | MANIFEST 25 | 26 | # gedit's temp files 27 | *~ 28 | .goutputstream* 29 | 30 | # Compiled Dynamic libraries 31 | *.so 32 | *.dylib 33 | 34 | # Compiled Static libraries 35 | *.lai 36 | *.la 37 | *.a 38 | 39 | # NPM dependencies 40 | node_modules/ 41 | yarn.lock 42 | 43 | # Visual Studio Code directory 44 | .vscode 45 | .vs 46 | 47 | #package-lock.json 48 | yarn.lock 49 | 50 | tests/db/ 51 | test/db/ 52 | .DS_Store -------------------------------------------------------------------------------- /.jshintrc: -------------------------------------------------------------------------------- 1 | { 2 | "bitwise": false, 3 | "browser": true, 4 | "camelcase": false, 5 | "curly": true, 6 | "devel": false, 7 | "eqeqeq": true, 8 | "esnext": true, 9 | "freeze": true, 10 | "immed": true, 11 | "indent": 2, 12 | "latedef": true, 13 | "newcap": false, 14 | "noarg": true, 15 | "node": true, 16 | "noempty": true, 17 | "nonew": true, 18 | "quotmark": "single", 19 | "regexp": true, 20 | "smarttabs": false, 21 | "strict": true, 22 | "trailing": true, 23 | "undef": true, 24 | "unused": true, 25 | "maxparams": 4, 26 | "maxstatements": 15, 27 | "maxcomplexity": 10, 28 | "maxdepth": 3, 29 | "maxlen": 120, 30 | "multistr": true, 31 | "predef": [ 32 | "after", 33 | "afterEach", 34 | "before", 35 | "beforeEach", 36 | "describe", 37 | "exports", 38 | "it", 39 | "module", 40 | "require" 41 | ] 42 | } -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | 2 | # Build and test data 3 | build 4 | tests 5 | testdata 6 | test/testdata* 7 | benchmark/benchdata 8 | .github 9 | .gitignore 10 | *.sln 11 | *.vcxproj 12 | 13 | # qmake autogenerated files 14 | moc_* 15 | qrc_* 16 | ui_* 17 | Makefile 18 | 19 | # Qt Creator's stuff 20 | *.pro.user 21 | qtc_packaging 22 | 23 | # Other generated files 24 | *.o 25 | *.slo 26 | *.lo 27 | *.core 28 | MANIFEST 29 | 30 | # gedit's temp files 31 | *~ 32 | .goutputstream* 33 | 34 | # Compiled Dynamic libraries 35 | *.so 36 | *.dylib 37 | 38 | # Compiled Static libraries 39 | *.lai 40 | *.la 41 | *.a 42 | 43 | # NPM dependencies 44 | node_modules/ 45 | 46 | # Visual Studio Code directory 47 | .vscode 48 | .vs 49 | 50 | package-lock.json 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | This project contains is based on the code from the node-lmdb project ([Copyright (c) 2014 Timur Kristóf](https://github.com/venemo/node-lmdb/)) and LMDB, which has a specific [OpenLDAP license](dependencies/lmdb/libraries/liblmdb/LICENSE), 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | | Version | Supported | 6 | | ------- | ------------------ | 7 | | 0.3.x | :white_check_mark: | 8 | 9 | ## Reporting a Vulnerability 10 | 11 | Please report security vulnerabilities to kriszyp@gmail.com. 12 | -------------------------------------------------------------------------------- /assets/gatsby.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kriszyp/lmdbx-js/dba8ad0d867506b19d3beb53917847a956672f9c/assets/gatsby.png -------------------------------------------------------------------------------- /assets/harperdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kriszyp/lmdbx-js/dba8ad0d867506b19d3beb53917847a956672f9c/assets/harperdb.png -------------------------------------------------------------------------------- /assets/performance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kriszyp/lmdbx-js/dba8ad0d867506b19d3beb53917847a956672f9c/assets/performance.png -------------------------------------------------------------------------------- /assets/powers-dre.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kriszyp/lmdbx-js/dba8ad0d867506b19d3beb53917847a956672f9c/assets/powers-dre.png -------------------------------------------------------------------------------- /benchmark/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | import { Worker, isMainThread, parentPort, threadId } from'worker_threads'; 3 | import { isMaster, fork } from 'cluster'; 4 | import inspector from 'inspector' 5 | 6 | var testDirPath = new URL('./benchdata', import.meta.url).toString().slice(8); 7 | import fs from 'fs'; 8 | import rimraf from 'rimraf'; 9 | import benchmark from 'benchmark'; 10 | var suite = new benchmark.Suite(); 11 | 12 | import { open } from '../node-index.js'; 13 | var env; 14 | var dbi; 15 | var keys = []; 16 | var total = 100; 17 | var store 18 | let data = { 19 | name: 'test', 20 | greeting: 'Hello, World!', 21 | flag: true, 22 | littleNum: 3, 23 | biggerNum: 32254435, 24 | decimal:1.332232, 25 | bigDecimal: 3.5522E102, 26 | negative: -54, 27 | aNull: null, 28 | more: 'string', 29 | } 30 | let bigString = 'big' 31 | for (let i = 0; i < 9; i++) { 32 | bigString += bigString 33 | } 34 | //data.more = bigString 35 | console.log(bigString.length) 36 | var c = 0 37 | let result 38 | 39 | let outstanding = 0 40 | let iteration = 1 41 | function setData(deferred) { 42 | /* result = store.transactionAsync(() => { 43 | for (let j = 0;j<100; j++) 44 | store.put((c += 357) % total, data) 45 | })*/ 46 | let key = (c += 357) % total 47 | result = store.put(key, data) 48 | /*if (key % 2 == 0) 49 | result = store.put(key, data) 50 | else 51 | result = store.transactionAsync(() => store.put(key, data))*/ 52 | /* if (iteration++ % 200 == 0) { 53 | setImmediate(() => lastResult.then(() => { 54 | deferred.resolve() 55 | })) 56 | lastResult = result 57 | } else 58 | deferred.resolve()*/ 59 | } 60 | function batchData(deferred) { 61 | result = store.batch(() => { 62 | for (let i = 0; i < 10; i++) { 63 | let key = (c += 357) % total 64 | store.put(key, data) 65 | } 66 | }) 67 | } 68 | let lastResult 69 | function batchDataAdd(deferred) { 70 | outstanding++ 71 | result = store.batch(() => { 72 | for (let i = 0; i < 10; i++) { 73 | let key = (c += 357) 74 | store.put(key, data) 75 | } 76 | }).then(() => { 77 | outstanding-- 78 | }) 79 | if (outstanding < 500) { 80 | deferred.resolve() 81 | } else if (outstanding < 10000) { 82 | setImmediate(() => { 83 | deferred.resolve() 84 | }) 85 | } else { 86 | console.log('delaying') 87 | setTimeout(() => deferred.resolve(), outstanding >> 3) 88 | } 89 | } 90 | 91 | function syncTxn() { 92 | store.transactionSync(() => { 93 | for (let j = 0;j<100; j++) 94 | store.put((c += 357), bigString) 95 | }) 96 | } 97 | 98 | function getData() { 99 | result = store.get((c += 357) % total) 100 | } 101 | function getBinary() { 102 | result = store.getBinary((c += 357) % total) 103 | } 104 | function getBinaryFast() { 105 | result = store.getBinaryFast((c += 357) % total) 106 | } 107 | let a = Buffer.from('this id\0\0\0\0\0') 108 | let b = Buffer.from('mmmmmmore text') 109 | //b = b.subarray(2,b.length) 110 | let b2 = Buffer.from('the similar key') 111 | let b3 = Buffer.from('this is very similar') 112 | function keyComparison() { 113 | try { 114 | result = store.db.compareKeys(a, b2) 115 | }catch(error) { console.log(error)} 116 | } 117 | function getRange() { 118 | let start = (c += 357) % total 119 | let i = 0 120 | for (let entry of store.getRange({ 121 | start, 122 | end: start + 10 123 | })) { 124 | i++ 125 | } 126 | } 127 | let jsonBuffer = JSON.stringify(data) 128 | function plainJSON() { 129 | result = JSON.parse(jsonBuffer) 130 | } 131 | 132 | if (isMainThread && isMaster) { 133 | try{ 134 | //inspector.open(9330, null, true); //debugger 135 | //debugger 136 | } catch(error) {} 137 | 138 | function cleanup(done) { 139 | // cleanup previous test directory 140 | rimraf(testDirPath, function(err) { 141 | if (err) { 142 | return done(err); 143 | } 144 | // setup clean directory 145 | fs.mkdirSync(testDirPath, { recursive: true }); 146 | done(); 147 | }); 148 | } 149 | function setup() { 150 | console.log('opening', testDirPath) 151 | let rootStore = open(testDirPath, { 152 | noMemInit: true, 153 | pageSize: 0x4000, 154 | //noSync: true, 155 | //winMemoryPriority: 4, 156 | //eventTurnBatching: false, 157 | //overlappingSync: true, 158 | }) 159 | store = rootStore.openDB('testing', { 160 | create: true, 161 | sharedStructuresKey: 100000000, 162 | keyIsUint32: true, 163 | }) 164 | let lastPromise 165 | for (let i = 0; i < total; i++) { 166 | lastPromise = store.put(i, data) 167 | } 168 | return lastPromise?.then(() => { 169 | console.log('setup completed'); 170 | }) 171 | } 172 | var txn; 173 | 174 | cleanup(async function (err) { 175 | if (err) { 176 | throw err; 177 | } 178 | await setup(); 179 | //suite.add('compare keys', keyComparison); 180 | //suite.add('syncTxn', syncTxn); 181 | //suite.add('getRange', getRange); 182 | suite.add('setData', setData/*, { 183 | defer: true, 184 | fn: setData 185 | }*/); 186 | /*suite.add('put-batch', { 187 | defer: true, 188 | fn: batchDataAdd 189 | });*/ 190 | suite.add('get', getData);/* 191 | suite.add('plainJSON', plainJSON); 192 | suite.add('getBinary', getBinary);*/ 193 | suite.add('getBinaryFast', getBinaryFast); 194 | suite.on('cycle', function (event) { 195 | console.log({result}) 196 | if (result && result.then) { 197 | let start = Date.now() 198 | result.then(() => { 199 | console.log('last commit took ' + (Date.now() - start) + 'ms') 200 | }) 201 | } 202 | console.log(String(event.target)); 203 | }); 204 | suite.on('complete', async function () { 205 | console.log('Fastest is ' + this.filter('fastest').map('name')); 206 | return 207 | var numCPUs = require('os').cpus().length; 208 | console.log('Test opening/closing threads ' + numCPUs + ' threads'); 209 | for (var i = 0; i < numCPUs; i++) { 210 | var worker = new Worker(__filename); 211 | await new Promise(r => setTimeout(r,30)); 212 | worker.terminate(); 213 | if ((i % 2) == 0) 214 | await new Promise(r => setTimeout(r,30)); 215 | //var worker = fork(); 216 | } 217 | console.log('Now will run benchmark across ' + numCPUs + ' threads'); 218 | for (var i = 0; i < numCPUs; i++) { 219 | var worker = new Worker(__filename); 220 | 221 | //var worker = fork(); 222 | } 223 | }); 224 | 225 | suite.run({ async: true }); 226 | 227 | }); 228 | } else { 229 | let rootStore = open(testDirPath, { 230 | noMemInit: true, 231 | //winMemoryPriority: 4, 232 | pageSize:8192, 233 | }) 234 | store = rootStore.openDB('testing', { 235 | sharedStructuresKey: 100000000, 236 | keysUse32LE: true, 237 | }) 238 | 239 | // other threads 240 | suite.add('put', { 241 | defer: true, 242 | fn: setData 243 | }); 244 | suite.add('get', getData); 245 | suite.add('getBinaryFast', getBinaryFast); 246 | suite.on('cycle', function (event) { 247 | if (result && result.then) { 248 | let start = Date.now() 249 | result.then(() => { 250 | console.log('last commit took ' + (Date.now() - start) + 'ms') 251 | }) 252 | } 253 | console.log(String(event.target)); 254 | }); 255 | suite.run({ async: true }); 256 | 257 | } 258 | -------------------------------------------------------------------------------- /benchmark/low-level.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | var inspector = require('inspector') 3 | //inspector.open(9330, null, true) 4 | 5 | var crypto = require('crypto'); 6 | var path = require('path'); 7 | var testDirPath = path.resolve(__dirname, './benchdata'); 8 | 9 | var fs =require('fs'); 10 | var rimraf = require('rimraf'); 11 | var mkdirp = require('mkdirp'); 12 | var benchmark = require('benchmark'); 13 | var suite = new benchmark.Suite(); 14 | 15 | var lmdb = require('..'); 16 | 17 | var env; 18 | var dbi; 19 | var keys = []; 20 | var total = 10000; 21 | var store; 22 | 23 | function cleanup(done) { 24 | // cleanup previous test directory 25 | rimraf(testDirPath, function(err) { 26 | if (err) { 27 | return done(err); 28 | } 29 | // setup clean directory 30 | mkdirp(testDirPath).then(() => { 31 | done(); 32 | }, error => done(error)); 33 | }); 34 | } 35 | 36 | function setup() { 37 | env = new lmdb.Env(); 38 | env.open({ 39 | path: testDirPath, 40 | maxDbs: 10, 41 | mapSize: 1024 * 1024 * 1024 42 | }); 43 | dbi = env.openDbi({ 44 | name: 'benchmarks', 45 | create: true, 46 | compression: new lmdb.Compression({ 47 | threshold: 1000, 48 | dictionary: fs.readFileSync(require.resolve('../dict/dict.txt')), 49 | }) 50 | }); 51 | 52 | var txn = env.beginTxn(); 53 | var c = 0; 54 | let value = 'hello world!' 55 | for (let i = 0; i < 6; i++) { 56 | value += value 57 | } 58 | while(c < total) { 59 | var key = new Buffer(new Array(8)); 60 | key.writeDoubleBE(c); 61 | keys.push(key.toString('hex')); 62 | txn.putUtf8(dbi, key.toString('hex'), 'testing small'); 63 | c++; 64 | } 65 | txn.commit(); 66 | store = lmdb.open(testDirPath + '.mdb', { 67 | encoding: 'string' 68 | }) 69 | var c= 0; 70 | let lastPromise 71 | while(c < total) { 72 | var key = new Buffer(new Array(8)); 73 | key.writeDoubleBE(c); 74 | keys.push(key.toString('hex')); 75 | lastPromise = store.put(key.toString('hex'), 'testing small'); 76 | c++; 77 | } 78 | return lastPromise.then(() => { 79 | console.log('all committed'); 80 | }) 81 | } 82 | 83 | var txn; 84 | var c = 0; 85 | 86 | function getIndex() { 87 | if (c < total - 1) { 88 | c++; 89 | } else { 90 | c = 0; 91 | } 92 | return c; 93 | } 94 | 95 | function getBinary() { 96 | var data = txn.getBinary(dbi, keys[getIndex()]); 97 | } 98 | 99 | function getBinaryUnsafe() { 100 | //try { 101 | //txn.renew() 102 | var data = txn.getBinaryUnsafe(dbi, keys[getIndex()]); 103 | //var b = dbi.unsafeBuffer 104 | //txn.reset() 105 | //}catch(error){console.error(error)} 106 | } 107 | function getStringFromStore() { 108 | var data = store.get(keys[getIndex()]); 109 | } 110 | 111 | function getString() { 112 | var data = txn.getUtf8(dbi, keys[getIndex()]); 113 | } 114 | 115 | function getStringUnsafe() { 116 | var data = txn.getStringUnsafe(dbi, keys[getIndex()]); 117 | } 118 | 119 | let cursor; 120 | 121 | function cursorGoToNext() { 122 | let readed = 0; 123 | 124 | return () => { 125 | let c = cursor.goToNext(); 126 | readed++; 127 | if (readed >= total) { 128 | c = cursor.goToRange(keys[0]); 129 | readed = 0; // reset to prevent goToRange on every loop 130 | } 131 | } 132 | } 133 | 134 | function cursorGoToNextgetCurrentString() { 135 | let readed = 0; 136 | return () => { 137 | const c = cursor.goToNext(); 138 | readed++; 139 | if (readed >= total) { 140 | cursor.goToRange(keys[0]); 141 | readed = 0; // reset to prevent goToRange on every loop 142 | } 143 | const v = cursor.getCurrentUtf8(); 144 | } 145 | } 146 | let b = Buffer.from('Hi there!'); 147 | function bufferToKeyValue() { 148 | if (lmdb.bufferToKeyValue(b) != 'Hi there!') 149 | throw new Error('wrong string') 150 | 151 | } 152 | function keyValueToBuffer() { 153 | if (!lmdb.keyValueToBuffer('Hi there!').equals(b)) 154 | throw new Error('wrong string') 155 | 156 | } 157 | 158 | cleanup(async function (err) { 159 | if (err) { 160 | throw err; 161 | } 162 | 163 | await setup(); 164 | 165 | // suite.add('getBinary', getBinary); 166 | suite.add('getStringFromStore', getStringFromStore); 167 | //suite.add('bufferToKeyValue', bufferToKeyValue) 168 | //suite.add('keyValueToBuffer', keyValueToBuffer) 169 | suite.add('getString', getString); 170 | suite.add('getBinaryUnsafe', getBinaryUnsafe); 171 | suite.add('getStringUnsafe', getStringUnsafe); 172 | //suite.add('cursorGoToNext', cursorGoToNext()); 173 | suite.add('cursorGoToNextgetCurrentString', cursorGoToNextgetCurrentString()); 174 | 175 | suite.on('start', function () { 176 | txn = env.beginTxn({ 177 | readOnly: true 178 | }); 179 | }); 180 | 181 | suite.on('cycle', function (event) { 182 | txn.abort(); 183 | txn = env.beginTxn({ 184 | readOnly: true 185 | }); 186 | if (cursor) cursor.close(); 187 | cursor = new lmdb.Cursor(txn, dbi); 188 | console.log(String(event.target)); 189 | }); 190 | 191 | suite.on('complete', function () { 192 | txn.abort(); 193 | dbi.close(); 194 | env.close(); 195 | if (cursor) 196 | cursor.close(); 197 | console.log('Fastest is ' + this.filter('fastest').map('name')); 198 | }); 199 | 200 | suite.run(); 201 | 202 | }); -------------------------------------------------------------------------------- /binding.gyp: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "os_linux_compiler%": "gcc", 4 | "use_robust%": "false", 5 | "use_data_v1%": "false", 6 | "enable_pointer_compression%": "false", 7 | "target%": "", 8 | "build_v8_with_gn": "false", 9 | "runtime%": "node" 10 | }, 11 | "conditions": [ 12 | ['OS=="win"', { 13 | "variables": { 14 | "enable_fast_api_calls%": " class extends Store { 6 | constructor(dbName, options) { 7 | super(dbName, options); 8 | if (!this.env.cacheCommitter) { 9 | this.env.cacheCommitter = true; 10 | this.on('aftercommit', ({ next, last }) => { 11 | do { 12 | let store = next.store; 13 | if (store) { 14 | if (next.flag & FAILED_CONDITION) 15 | next.store.cache.delete(next.key); // just delete it from the map 16 | else { 17 | let expirationPriority = next.valueSize >> 10; 18 | let cache = next.store.cache; 19 | let entry = mapGet.call(cache, next.key); 20 | if (entry) 21 | cache.used(entry, expirationPriority + 4); // this will enter it into the LRFU (with a little lower priority than a read) 22 | } 23 | } 24 | } while (next != last && (next = next.next)) 25 | }); 26 | } 27 | this.db.cachingDb = this; 28 | if (options.cache.clearKeptInterval) 29 | options.cache.clearKeptObjects = clearKeptObjects; 30 | this.cache = new WeakLRUCache(options.cache); 31 | } 32 | get isCaching() { 33 | return true 34 | } 35 | get(id, cacheMode) { 36 | let value = this.cache.getValue(id); 37 | if (value !== undefined) 38 | return value; 39 | value = super.get(id); 40 | if (value && typeof value === 'object' && !cacheMode && typeof id !== 'object') { 41 | let entry = this.cache.setValue(id, value, this.lastSize >> 10); 42 | if (this.useVersions) { 43 | entry.version = getLastVersion(); 44 | } 45 | } 46 | return value; 47 | } 48 | getEntry(id, cacheMode) { 49 | let entry = this.cache.get(id); 50 | if (entry) 51 | return entry; 52 | let value = super.get(id); 53 | if (value === undefined) 54 | return; 55 | if (value && typeof value === 'object' && !cacheMode && typeof id !== 'object') { 56 | entry = this.cache.setValue(id, value, this.lastSize >> 10); 57 | } else { 58 | entry = { value }; 59 | } 60 | if (this.useVersions) { 61 | entry.version = getLastVersion(); 62 | } 63 | return entry; 64 | } 65 | putEntry(id, entry, ifVersion) { 66 | let result = super.put(id, entry.value, entry.version, ifVersion); 67 | if (typeof id === 'object') 68 | return result; 69 | if (result && result.then) 70 | this.cache.setManually(id, entry); // set manually so we can keep it pinned in memory until it is committed 71 | else // sync operation, immediately add to cache 72 | this.cache.set(id, entry); 73 | } 74 | put(id, value, version, ifVersion) { 75 | let result = super.put(id, value, version, ifVersion); 76 | if (typeof id !== 'object') { 77 | if (value && value['\x10binary-data\x02']) { 78 | // don't cache binary data, since it will be decoded on get 79 | this.cache.delete(id); 80 | return result; 81 | } 82 | // sync operation, immediately add to cache, otherwise keep it pinned in memory until it is committed 83 | let entry = this.cache.setValue(id, value, !result || result.isSync ? 0 : -1); 84 | if (version !== undefined) 85 | entry.version = typeof version === 'object' ? version.version : version; 86 | } 87 | return result; 88 | } 89 | putSync(id, value, version, ifVersion) { 90 | if (id !== 'object') { 91 | // sync operation, immediately add to cache, otherwise keep it pinned in memory until it is committed 92 | if (value && typeof value === 'object') { 93 | let entry = this.cache.setValue(id, value); 94 | if (version !== undefined) { 95 | entry.version = typeof version === 'object' ? version.version : version; 96 | } 97 | } else // it is possible that a value used to exist here 98 | this.cache.delete(id); 99 | } 100 | return super.putSync(id, value, version, ifVersion); 101 | } 102 | remove(id, ifVersion) { 103 | this.cache.delete(id); 104 | return super.remove(id, ifVersion); 105 | } 106 | removeSync(id, ifVersion) { 107 | this.cache.delete(id); 108 | return super.removeSync(id, ifVersion); 109 | } 110 | clearAsync(callback) { 111 | this.cache.clear(); 112 | return super.clearAsync(callback); 113 | } 114 | clearSync() { 115 | this.cache.clear(); 116 | super.clearSync(); 117 | } 118 | childTransaction(execute) { 119 | throw new Error('Child transactions are not supported in caching stores'); 120 | } 121 | }; 122 | export function setGetLastVersion(get) { 123 | getLastVersion = get; 124 | } 125 | -------------------------------------------------------------------------------- /dependencies/libmdbx/GNUmakefile: -------------------------------------------------------------------------------- 1 | # This makefile is for GNU Make 3.80 or above, and nowadays provided 2 | # just for compatibility and preservation of traditions. 3 | # 4 | # Please use CMake in case of any difficulties or 5 | # problems with this old-school's magic. 6 | # 7 | ################################################################################ 8 | # 9 | # Use `make options` to list the available libmdbx build options. 10 | # 11 | # Note that the defaults should already be correct for most platforms; 12 | # you should not need to change any of these. Read their descriptions 13 | # in README and source code (see src/options.h) if you do. 14 | # 15 | 16 | SHELL := env bash 17 | 18 | # install sandbox 19 | DESTDIR ?= 20 | 21 | # install prefixes (inside sandbox) 22 | prefix ?= /usr/local 23 | mandir ?= $(prefix)/man 24 | 25 | # lib/bin suffix for multiarch/biarch, e.g. '.x86_64' 26 | suffix ?= 27 | 28 | INSTALL ?= install 29 | CC ?= gcc 30 | CFLAGS_EXTRA ?= 31 | LD ?= ld 32 | MDBX_BUILD_OPTIONS ?=-DNDEBUG=1 33 | MDBX_BUILD_TIMESTAMP ?=$(shell date +%Y-%m-%dT%H:%M:%S%z) 34 | CFLAGS ?= -std=gnu11 -O2 -g -Wall -Werror -Wextra -Wpedantic -ffunction-sections -fPIC -fvisibility=hidden -pthread -Wno-error=attributes $(CFLAGS_EXTRA) 35 | # -Wno-tautological-compare 36 | CXX ?= g++ 37 | # Choosing C++ standard with deferred simple variable expansion trick 38 | CXXSTD ?= $(eval CXXSTD := $$(shell PROBE=$$$$([ -f mdbx.c++ ] && echo mdbx.c++ || echo src/mdbx.c++); for std in gnu++23 c++23 gnu++2b c++2b gnu++20 c++20 gnu++2a c++2a gnu++17 c++17 gnu++1z c++1z gnu++14 c++14 gnu++1y c++1y gnu+11 c++11 gnu++0x c++0x; do $(CXX) -std=$$$${std} -c $$$${PROBE} -o /dev/null 2>std-$$$${std}.err >/dev/null && echo "-std=$$$${std}" && exit; done))$(CXXSTD) 39 | CXXFLAGS = $(CXXSTD) $(filter-out -std=gnu11,$(CFLAGS)) 40 | 41 | # TIP: Try append '--no-as-needed,-lrt' for ability to built with modern glibc, but then use with the old. 42 | LIBS ?= $(strip -lm $(shell uname | grep -qi SunOS && echo "-lkstat") $(shell uname | grep -qi -e Darwin -e OpenBSD || echo "-lrt") $(shell uname | grep -qi Windows && echo "-lntdll")) 43 | 44 | LDFLAGS ?= $(strip $(shell $(LD) --help 2>/dev/null | grep -q -- --gc-sections && echo '-Wl,--gc-sections,-z,relro,-O1')$(shell $(LD) --help 2>/dev/null | grep -q -- -dead_strip && echo '-Wl,-dead_strip')) 45 | EXE_LDFLAGS ?= -pthread 46 | 47 | ################################################################################ 48 | 49 | UNAME := $(shell uname -s 2>/dev/null || echo Unknown) 50 | define uname2sosuffix 51 | case "$(UNAME)" in 52 | Darwin*|Mach*) echo dylib;; 53 | CYGWIN*|MINGW*|MSYS*|Windows*) echo dll;; 54 | *) echo so;; 55 | esac 56 | endef 57 | SO_SUFFIX := $(shell $(uname2sosuffix)) 58 | HEADERS := mdbx.h mdbx.h++ 59 | LIBRARIES := libmdbx.a libmdbx.$(SO_SUFFIX) 60 | TOOLS := mdbx_stat mdbx_copy mdbx_dump mdbx_load mdbx_chk mdbx_drop 61 | MANPAGES := mdbx_stat.1 mdbx_copy.1 mdbx_dump.1 mdbx_load.1 mdbx_chk.1 mdbx_drop.1 62 | TIP := // TIP: 63 | 64 | .PHONY: all help options lib tools clean install uninstall check_buildflags_tag 65 | .PHONY: install-strip install-no-strip strip libmdbx mdbx show-options 66 | 67 | ifeq ("$(origin V)", "command line") 68 | MDBX_BUILD_VERBOSE := $(V) 69 | endif 70 | ifndef MDBX_BUILD_VERBOSE 71 | MDBX_BUILD_VERBOSE := 0 72 | endif 73 | 74 | ifeq ($(MDBX_BUILD_VERBOSE),1) 75 | QUIET := 76 | HUSH := 77 | $(info $(TIP) Use `make V=0` for quiet.) 78 | else 79 | QUIET := @ 80 | HUSH := >/dev/null 81 | $(info $(TIP) Use `make V=1` for verbose.) 82 | endif 83 | 84 | all: show-options $(LIBRARIES) $(TOOLS) 85 | 86 | help: 87 | @echo " make all - build libraries and tools" 88 | @echo " make help - print this help" 89 | @echo " make options - list build options" 90 | @echo " make lib - build libraries" 91 | @echo " make tools - built tools" 92 | @echo " make clean " 93 | @echo " make install " 94 | @echo " make uninstall " 95 | @echo "" 96 | @echo " make strip - strip debug symbols from binaries" 97 | @echo " make install-no-strip - install explicitly without strip" 98 | @echo " make install-strip - install explicitly with strip" 99 | @echo "" 100 | @echo " make bench - run ioarena-benchmark" 101 | @echo " make bench-couple - run ioarena-benchmark for mdbx and lmdb" 102 | @echo " make bench-triplet - run ioarena-benchmark for mdbx, lmdb, sqlite3" 103 | @echo " make bench-quartet - run ioarena-benchmark for mdbx, lmdb, rocksdb, wiredtiger" 104 | @echo " make bench-clean - remove temp database(s) after benchmark" 105 | 106 | show-options: 107 | @echo " MDBX_BUILD_OPTIONS = $(MDBX_BUILD_OPTIONS)" 108 | @echo " MDBX_BUILD_TIMESTAMP = $(MDBX_BUILD_TIMESTAMP)" 109 | @echo '$(TIP) Use `make options` to listing available build options.' 110 | @echo " CC =`which $(CC)` | `$(CC) --version | head -1`" 111 | @echo " CFLAGS =$(CFLAGS)" 112 | @echo " CXXFLAGS =$(CXXFLAGS)" 113 | @echo " LDFLAGS =$(LDFLAGS) $(LIBS) $(EXE_LDFLAGS)" 114 | @echo '$(TIP) Use `make help` to listing available targets.' 115 | 116 | options: 117 | @echo " INSTALL =$(INSTALL)" 118 | @echo " DESTDIR =$(DESTDIR)" 119 | @echo " prefix =$(prefix)" 120 | @echo " mandir =$(mandir)" 121 | @echo " suffix =$(suffix)" 122 | @echo "" 123 | @echo " CC =$(CC)" 124 | @echo " CFLAGS_EXTRA =$(CFLAGS_EXTRA)" 125 | @echo " CFLAGS =$(CFLAGS)" 126 | @echo " CXX =$(CXX)" 127 | @echo " CXXSTD =$(CXXSTD)" 128 | @echo " CXXFLAGS =$(CXXFLAGS)" 129 | @echo "" 130 | @echo " LD =$(LD)" 131 | @echo " LDFLAGS =$(LDFLAGS)" 132 | @echo " EXE_LDFLAGS =$(EXE_LDFLAGS)" 133 | @echo " LIBS =$(LIBS)" 134 | @echo "" 135 | @echo " MDBX_BUILD_OPTIONS = $(MDBX_BUILD_OPTIONS)" 136 | @echo " MDBX_BUILD_TIMESTAMP = $(MDBX_BUILD_TIMESTAMP)" 137 | @echo "" 138 | @echo "## Assortment items for MDBX_BUILD_OPTIONS:" 139 | @echo "## Note that the defaults should already be correct for most platforms;" 140 | @echo "## you should not need to change any of these. Read their descriptions" 141 | @echo "## in README and source code (see mdbx.c) if you do." 142 | @grep -h '#ifndef MDBX_' mdbx.c | grep -v BUILD | uniq | sed 's/#ifndef / /' 143 | 144 | lib libmdbx mdbx: libmdbx.a libmdbx.$(SO_SUFFIX) 145 | 146 | tools: $(TOOLS) 147 | 148 | strip: all 149 | @echo ' STRIP libmdbx.$(SO_SUFFIX) $(TOOLS)' 150 | $(TRACE )strip libmdbx.$(SO_SUFFIX) $(TOOLS) 151 | 152 | clean: 153 | @echo ' REMOVE ...' 154 | $(QUIET)rm -rf $(TOOLS) mdbx_test @* *.[ao] *.[ls]o *.$(SO_SUFFIX) *.dSYM *~ tmp.db/* \ 155 | *.gcov *.log *.err src/*.o test/*.o mdbx_example dist \ 156 | config.h src/config.h src/version.c *.tar* buildflags.tag 157 | 158 | MDBX_BUILD_FLAGS =$(strip $(MDBX_BUILD_OPTIONS) $(CXXSTD) $(CFLAGS) $(LDFLAGS) $(LIBS)) 159 | check_buildflags_tag: 160 | $(QUIET)if [ "$(MDBX_BUILD_FLAGS)" != "$$(cat buildflags.tag 2>&1)" ]; then \ 161 | echo -n " CLEAN for build with specified flags..." && \ 162 | $(MAKE) IOARENA=false CXXSTD= -s clean >/dev/null && echo " Ok" && \ 163 | echo '$(MDBX_BUILD_FLAGS)' > buildflags.tag; \ 164 | fi 165 | 166 | buildflags.tag: check_buildflags_tag 167 | 168 | libmdbx.a: mdbx-static.o mdbx++-static.o 169 | @echo ' AR $@' 170 | $(QUIET)$(AR) rcs $@ $? $(HUSH) 171 | 172 | libmdbx.$(SO_SUFFIX): mdbx-dylib.o mdbx++-dylib.o 173 | @echo ' LD $@' 174 | $(QUIET)$(CXX) $(CXXFLAGS) $^ -pthread -shared $(LDFLAGS) $(LIBS) -o $@ 175 | 176 | 177 | ################################################################################ 178 | # Amalgamated source code, i.e. distributed after `make dist` 179 | MAN_SRCDIR := man1/ 180 | 181 | config.h: buildflags.tag mdbx.c $(lastword $(MAKEFILE_LIST)) 182 | @echo ' MAKE $@' 183 | $(QUIET)(echo '#define MDBX_BUILD_TIMESTAMP "$(MDBX_BUILD_TIMESTAMP)"' \ 184 | && echo "#define MDBX_BUILD_FLAGS \"$$(cat buildflags.tag)\"" \ 185 | && echo '#define MDBX_BUILD_COMPILER "$(shell (LC_ALL=C $(CC) --version || echo 'Please use GCC or CLANG compatible compiler') | head -1)"' \ 186 | && echo '#define MDBX_BUILD_TARGET "$(shell set -o pipefail; (LC_ALL=C $(CC) -v 2>&1 | grep -i '^Target:' | cut -d ' ' -f 2- || (LC_ALL=C $(CC) --version | grep -qi e2k && echo E2K) || echo 'Please use GCC or CLANG compatible compiler') | head -1)"' \ 187 | ) >$@ 188 | 189 | mdbx-dylib.o: config.h mdbx.c mdbx.h $(lastword $(MAKEFILE_LIST)) 190 | @echo ' CC $@' 191 | $(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -DLIBMDBX_EXPORTS=1 -c mdbx.c -o $@ 192 | 193 | mdbx-static.o: config.h mdbx.c mdbx.h $(lastword $(MAKEFILE_LIST)) 194 | @echo ' CC $@' 195 | $(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -ULIBMDBX_EXPORTS -c mdbx.c -o $@ 196 | 197 | mdbx++-dylib.o: config.h mdbx.c++ mdbx.h mdbx.h++ $(lastword $(MAKEFILE_LIST)) 198 | @echo ' CC $@' 199 | $(QUIET)$(CXX) $(CXXFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -DLIBMDBX_EXPORTS=1 -c mdbx.c++ -o $@ 200 | 201 | mdbx++-static.o: config.h mdbx.c++ mdbx.h mdbx.h++ $(lastword $(MAKEFILE_LIST)) 202 | @echo ' CC $@' 203 | $(QUIET)$(CXX) $(CXXFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -ULIBMDBX_EXPORTS -c mdbx.c++ -o $@ 204 | 205 | mdbx_%: mdbx_%.c libmdbx.a 206 | @echo ' CC+LD $@' 207 | $(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' $^ $(EXE_LDFLAGS) $(LIBS) -o $@ 208 | 209 | 210 | install: $(LIBRARIES) $(TOOLS) $(HEADERS) 211 | @echo ' INSTALLING...' 212 | $(QUIET)mkdir -p $(DESTDIR)$(prefix)/bin$(suffix) && \ 213 | $(INSTALL) -p $(EXE_INSTALL_FLAGS) $(TOOLS) $(DESTDIR)$(prefix)/bin$(suffix)/ && \ 214 | mkdir -p $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 215 | $(INSTALL) -p $(EXE_INSTALL_FLAGS) $(filter-out libmdbx.a,$(LIBRARIES)) $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 216 | mkdir -p $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 217 | $(INSTALL) -p libmdbx.a $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 218 | mkdir -p $(DESTDIR)$(prefix)/include/ && \ 219 | $(INSTALL) -p -m 444 $(HEADERS) $(DESTDIR)$(prefix)/include/ && \ 220 | mkdir -p $(DESTDIR)$(mandir)/man1/ && \ 221 | $(INSTALL) -p -m 444 $(addprefix $(MAN_SRCDIR), $(MANPAGES)) $(DESTDIR)$(mandir)/man1/ 222 | 223 | install-strip: EXE_INSTALL_FLAGS = -s 224 | install-strip: install 225 | 226 | install-no-strip: EXE_INSTALL_FLAGS = 227 | install-no-strip: install 228 | 229 | uninstall: 230 | @echo ' UNINSTALLING/REMOVE...' 231 | $(QUIET)rm -f $(addprefix $(DESTDIR)$(prefix)/bin$(suffix)/,$(TOOLS)) \ 232 | $(addprefix $(DESTDIR)$(prefix)/lib$(suffix)/,$(LIBRARIES)) \ 233 | $(addprefix $(DESTDIR)$(prefix)/include/,$(HEADERS)) \ 234 | $(addprefix $(DESTDIR)$(mandir)/man1/,$(MANPAGES)) 235 | 236 | ################################################################################ 237 | # Benchmarking by ioarena 238 | 239 | ifeq ($(origin IOARENA),undefined) 240 | IOARENA := $(shell \ 241 | (test -x ../ioarena/@BUILD/src/ioarena && echo ../ioarena/@BUILD/src/ioarena) || \ 242 | (test -x ../../@BUILD/src/ioarena && echo ../../@BUILD/src/ioarena) || \ 243 | (test -x ../../src/ioarena && echo ../../src/ioarena) || which ioarena 2>&- || \ 244 | (echo false && echo '$(TIP) Clone and build the https://github.com/pmwkaa/ioarena.git within a neighbouring directory for availability of benchmarking.' >&2)) 245 | endif 246 | NN ?= 25000000 247 | BENCH_CRUD_MODE ?= nosync 248 | 249 | bench-clean: 250 | @echo ' REMOVE bench-*.txt _ioarena/*' 251 | $(QUIET)rm -rf bench-*.txt _ioarena/* 252 | 253 | re-bench: bench-clean bench 254 | 255 | ifeq ($(or $(IOARENA),false),false) 256 | bench bench-quartet bench-triplet bench-couple: 257 | $(QUIET)echo 'The `ioarena` benchmark is required.' >&2 && \ 258 | echo 'Please clone and build the https://github.com/pmwkaa/ioarena.git within a neighbouring `ioarena` directory.' >&2 && \ 259 | false 260 | 261 | else 262 | 263 | .PHONY: bench bench-clean bench-couple re-bench bench-quartet bench-triplet 264 | 265 | define bench-rule 266 | bench-$(1)_$(2).txt: $(3) $(IOARENA) $(lastword $(MAKEFILE_LIST)) 267 | @echo ' RUNNING ioarena for $1/$2...' 268 | $(QUIET)LD_LIBRARY_PATH="./:$$$${LD_LIBRARY_PATH}" \ 269 | $(IOARENA) -D $(1) -B crud -m $(BENCH_CRUD_MODE) -n $(2) \ 270 | | tee $$@ | grep throughput && \ 271 | LD_LIBRARY_PATH="./:$$$${LD_LIBRARY_PATH}" \ 272 | $(IOARENA) -D $(1) -B get,iterate -m $(BENCH_CRUD_MODE) -r 4 -n $(2) \ 273 | | tee -a $$@ | grep throughput \ 274 | || mv -f $$@ $$@.error 275 | 276 | endef 277 | 278 | $(eval $(call bench-rule,mdbx,$(NN),libmdbx.$(SO_SUFFIX))) 279 | 280 | $(eval $(call bench-rule,sophia,$(NN))) 281 | $(eval $(call bench-rule,leveldb,$(NN))) 282 | $(eval $(call bench-rule,rocksdb,$(NN))) 283 | $(eval $(call bench-rule,wiredtiger,$(NN))) 284 | $(eval $(call bench-rule,forestdb,$(NN))) 285 | $(eval $(call bench-rule,lmdb,$(NN))) 286 | $(eval $(call bench-rule,nessdb,$(NN))) 287 | $(eval $(call bench-rule,sqlite3,$(NN))) 288 | $(eval $(call bench-rule,ejdb,$(NN))) 289 | $(eval $(call bench-rule,vedisdb,$(NN))) 290 | $(eval $(call bench-rule,dummy,$(NN))) 291 | bench: bench-mdbx_$(NN).txt 292 | bench-quartet: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt bench-rocksdb_$(NN).txt bench-wiredtiger_$(NN).txt 293 | bench-triplet: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt bench-sqlite3_$(NN).txt 294 | bench-couple: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt 295 | 296 | # $(eval $(call bench-rule,debug,10)) 297 | # .PHONY: bench-debug 298 | # bench-debug: bench-debug_10.txt 299 | 300 | endif 301 | -------------------------------------------------------------------------------- /dependencies/libmdbx/LICENSE: -------------------------------------------------------------------------------- 1 | The OpenLDAP Public License 2 | Version 2.8, 17 August 2003 3 | 4 | Redistribution and use of this software and associated documentation 5 | ("Software"), with or without modification, are permitted provided 6 | that the following conditions are met: 7 | 8 | 1. Redistributions in source form must retain copyright statements 9 | and notices, 10 | 11 | 2. Redistributions in binary form must reproduce applicable copyright 12 | statements and notices, this list of conditions, and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution, and 15 | 16 | 3. Redistributions must contain a verbatim copy of this document. 17 | 18 | The OpenLDAP Foundation may revise this license from time to time. 19 | Each revision is distinguished by a version number. You may use 20 | this Software under terms of this license revision or under the 21 | terms of any subsequent revision of the license. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS 24 | CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, 25 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 26 | AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 27 | SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S) 28 | OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, 29 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 32 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 34 | ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 | POSSIBILITY OF SUCH DAMAGE. 36 | 37 | The names of the authors and copyright holders must not be used in 38 | advertising or otherwise to promote the sale, use or other dealing 39 | in this Software without specific, written prior permission. Title 40 | to copyright in this Software shall at all times remain with copyright 41 | holders. 42 | 43 | OpenLDAP is a registered trademark of the OpenLDAP Foundation. 44 | 45 | Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, 46 | California, USA. All Rights Reserved. Permission to copy and 47 | distribute verbatim copies of this document is granted. 48 | -------------------------------------------------------------------------------- /dependencies/libmdbx/VERSION.txt: -------------------------------------------------------------------------------- 1 | 0.11.3.0 2 | -------------------------------------------------------------------------------- /dependencies/libmdbx/cmake/profile.cmake: -------------------------------------------------------------------------------- 1 | ## Copyright (c) 2012-2021 Leonid Yuriev . 2 | ## 3 | ## Licensed under the Apache License, Version 2.0 (the "License"); 4 | ## you may not use this file except in compliance with the License. 5 | ## You may obtain a copy of the License at 6 | ## 7 | ## http://www.apache.org/licenses/LICENSE-2.0 8 | ## 9 | ## Unless required by applicable law or agreed to in writing, software 10 | ## distributed under the License is distributed on an "AS IS" BASIS, 11 | ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | ## See the License for the specific language governing permissions and 13 | ## limitations under the License. 14 | ## 15 | 16 | if(CMAKE_VERSION VERSION_LESS 3.12) 17 | cmake_minimum_required(VERSION 3.8.2) 18 | else() 19 | cmake_minimum_required(VERSION 3.12) 20 | endif() 21 | 22 | cmake_policy(PUSH) 23 | cmake_policy(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION}) 24 | 25 | include(CheckLibraryExists) 26 | check_library_exists(gcov __gcov_flush "" HAVE_GCOV) 27 | 28 | option(ENABLE_GCOV 29 | "Enable integration with gcov, a code coverage program" OFF) 30 | 31 | option(ENABLE_GPROF 32 | "Enable integration with gprof, a performance analyzing tool" OFF) 33 | 34 | if(CMAKE_CXX_COMPILER_LOADED) 35 | include(CheckIncludeFileCXX) 36 | check_include_file_cxx(valgrind/memcheck.h HAVE_VALGRIND_MEMCHECK_H) 37 | else() 38 | include(CheckIncludeFile) 39 | check_include_file(valgrind/memcheck.h HAVE_VALGRIND_MEMCHECK_H) 40 | endif() 41 | 42 | option(MDBX_USE_VALGRIND "Enable integration with valgrind, a memory analyzing tool" OFF) 43 | if(MDBX_USE_VALGRIND AND NOT HAVE_VALGRIND_MEMCHECK_H) 44 | message(FATAL_ERROR "MDBX_USE_VALGRIND option is set but valgrind/memcheck.h is not found") 45 | endif() 46 | 47 | option(ENABLE_ASAN 48 | "Enable AddressSanitizer, a fast memory error detector based on compiler instrumentation" OFF) 49 | 50 | option(ENABLE_UBSAN 51 | "Enable UndefinedBehaviorSanitizer, a fast undefined behavior detector based on compiler instrumentation" OFF) 52 | 53 | cmake_policy(POP) 54 | -------------------------------------------------------------------------------- /dependencies/libmdbx/cmake/utils.cmake: -------------------------------------------------------------------------------- 1 | ## Copyright (c) 2012-2021 Leonid Yuriev . 2 | ## 3 | ## Licensed under the Apache License, Version 2.0 (the "License"); 4 | ## you may not use this file except in compliance with the License. 5 | ## You may obtain a copy of the License at 6 | ## 7 | ## http://www.apache.org/licenses/LICENSE-2.0 8 | ## 9 | ## Unless required by applicable law or agreed to in writing, software 10 | ## distributed under the License is distributed on an "AS IS" BASIS, 11 | ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | ## See the License for the specific language governing permissions and 13 | ## limitations under the License. 14 | ## 15 | 16 | if(CMAKE_VERSION VERSION_LESS 3.12) 17 | cmake_minimum_required(VERSION 3.8.2) 18 | else() 19 | cmake_minimum_required(VERSION 3.12) 20 | endif() 21 | 22 | cmake_policy(PUSH) 23 | cmake_policy(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION}) 24 | 25 | macro(add_compile_flags languages) 26 | foreach(_lang ${languages}) 27 | string(REPLACE ";" " " _flags "${ARGN}") 28 | if(CMAKE_CXX_COMPILER_LOADED AND _lang STREQUAL "CXX") 29 | set("${_lang}_FLAGS" "${${_lang}_FLAGS} ${_flags}") 30 | endif() 31 | if(CMAKE_C_COMPILER_LOADED AND _lang STREQUAL "C") 32 | set("${_lang}_FLAGS" "${${_lang}_FLAGS} ${_flags}") 33 | endif() 34 | endforeach() 35 | unset(_lang) 36 | unset(_flags) 37 | endmacro(add_compile_flags) 38 | 39 | macro(set_source_files_compile_flags) 40 | foreach(file ${ARGN}) 41 | get_filename_component(_file_ext ${file} EXT) 42 | set(_lang "") 43 | if("${_file_ext}" STREQUAL ".m") 44 | set(_lang OBJC) 45 | # CMake believes that Objective C is a flavor of C++, not C, 46 | # and uses g++ compiler for .m files. 47 | # LANGUAGE property forces CMake to use CC for ${file} 48 | set_source_files_properties(${file} PROPERTIES LANGUAGE C) 49 | elseif("${_file_ext}" STREQUAL ".mm") 50 | set(_lang OBJCXX) 51 | endif() 52 | 53 | if(_lang) 54 | get_source_file_property(_flags ${file} COMPILE_FLAGS) 55 | if("${_flags}" STREQUAL "NOTFOUND") 56 | set(_flags "${CMAKE_${_lang}_FLAGS}") 57 | else() 58 | set(_flags "${_flags} ${CMAKE_${_lang}_FLAGS}") 59 | endif() 60 | # message(STATUS "Set (${file} ${_flags}") 61 | set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS 62 | "${_flags}") 63 | endif() 64 | endforeach() 65 | unset(_file_ext) 66 | unset(_lang) 67 | endmacro(set_source_files_compile_flags) 68 | 69 | macro(fetch_version name source_root_directory parent_scope) 70 | set(${name}_VERSION "") 71 | set(${name}_GIT_DESCRIBE "") 72 | set(${name}_GIT_TIMESTAMP "") 73 | set(${name}_GIT_TREE "") 74 | set(${name}_GIT_COMMIT "") 75 | set(${name}_GIT_REVISION 0) 76 | set(${name}_GIT_VERSION "") 77 | if(GIT AND EXISTS "${source_root_directory}/.git") 78 | execute_process(COMMAND ${GIT} describe --tags --long --dirty=-dirty 79 | OUTPUT_VARIABLE ${name}_GIT_DESCRIBE 80 | OUTPUT_STRIP_TRAILING_WHITESPACE 81 | WORKING_DIRECTORY ${source_root_directory} 82 | RESULT_VARIABLE rc) 83 | if(rc OR "${name}_GIT_DESCRIBE" STREQUAL "") 84 | message(FATAL_ERROR "Please fetch tags and/or install latest version of git ('describe --tags --long --dirty' failed)") 85 | endif() 86 | 87 | execute_process(COMMAND ${GIT} show --no-patch --format=%cI HEAD 88 | OUTPUT_VARIABLE ${name}_GIT_TIMESTAMP 89 | OUTPUT_STRIP_TRAILING_WHITESPACE 90 | WORKING_DIRECTORY ${source_root_directory} 91 | RESULT_VARIABLE rc) 92 | if(rc OR "${name}_GIT_TIMESTAMP" STREQUAL "%cI") 93 | execute_process(COMMAND ${GIT} show --no-patch --format=%ci HEAD 94 | OUTPUT_VARIABLE ${name}_GIT_TIMESTAMP 95 | OUTPUT_STRIP_TRAILING_WHITESPACE 96 | WORKING_DIRECTORY ${source_root_directory} 97 | RESULT_VARIABLE rc) 98 | if(rc OR "${name}_GIT_TIMESTAMP" STREQUAL "%ci") 99 | message(FATAL_ERROR "Please install latest version of git ('show --no-patch --format=%cI HEAD' failed)") 100 | endif() 101 | endif() 102 | 103 | execute_process(COMMAND ${GIT} show --no-patch --format=%T HEAD 104 | OUTPUT_VARIABLE ${name}_GIT_TREE 105 | OUTPUT_STRIP_TRAILING_WHITESPACE 106 | WORKING_DIRECTORY ${source_root_directory} 107 | RESULT_VARIABLE rc) 108 | if(rc OR "${name}_GIT_TREE" STREQUAL "") 109 | message(FATAL_ERROR "Please install latest version of git ('show --no-patch --format=%T HEAD' failed)") 110 | endif() 111 | 112 | execute_process(COMMAND ${GIT} show --no-patch --format=%H HEAD 113 | OUTPUT_VARIABLE ${name}_GIT_COMMIT 114 | OUTPUT_STRIP_TRAILING_WHITESPACE 115 | WORKING_DIRECTORY ${source_root_directory} 116 | RESULT_VARIABLE rc) 117 | if(rc OR "${name}_GIT_COMMIT" STREQUAL "") 118 | message(FATAL_ERROR "Please install latest version of git ('show --no-patch --format=%H HEAD' failed)") 119 | endif() 120 | 121 | execute_process(COMMAND ${GIT} describe --tags --abbrev=0 "--match=v[0-9]*" 122 | OUTPUT_VARIABLE last_release_tag 123 | OUTPUT_STRIP_TRAILING_WHITESPACE 124 | WORKING_DIRECTORY ${source_root_directory} 125 | RESULT_VARIABLE rc) 126 | if(rc) 127 | message(FATAL_ERROR "Please install latest version of git ('describe --tags --abbrev=0 --match=v[0-9]*' failed)") 128 | endif() 129 | if (last_release_tag) 130 | set(git_revlist_arg "${last_release_tag}..HEAD") 131 | else() 132 | execute_process(COMMAND ${GIT} tag --sort=-version:refname 133 | OUTPUT_VARIABLE tag_list 134 | OUTPUT_STRIP_TRAILING_WHITESPACE 135 | WORKING_DIRECTORY ${source_root_directory} 136 | RESULT_VARIABLE rc) 137 | if(rc) 138 | message(FATAL_ERROR "Please install latest version of git ('tag --sort=-version:refname' failed)") 139 | endif() 140 | string(REGEX REPLACE "\n" ";" tag_list "${tag_list}") 141 | set(git_revlist_arg "HEAD") 142 | foreach(tag IN LISTS tag_list) 143 | if(NOT last_release_tag) 144 | string(REGEX MATCH "^v[0-9]+(\.[0-9]+)+" last_release_tag "${tag}") 145 | set(git_revlist_arg "${tag}..HEAD") 146 | endif() 147 | endforeach(tag) 148 | endif() 149 | execute_process(COMMAND ${GIT} rev-list --count "${git_revlist_arg}" 150 | OUTPUT_VARIABLE ${name}_GIT_REVISION 151 | OUTPUT_STRIP_TRAILING_WHITESPACE 152 | WORKING_DIRECTORY ${source_root_directory} 153 | RESULT_VARIABLE rc) 154 | if(rc OR "${name}_GIT_REVISION" STREQUAL "") 155 | message(FATAL_ERROR "Please install latest version of git ('rev-list --count ${git_revlist_arg}' failed)") 156 | endif() 157 | 158 | string(REGEX MATCH "^(v)?([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)?" git_version_valid "${${name}_GIT_DESCRIBE}") 159 | if(git_version_valid) 160 | string(REGEX REPLACE "^(v)?([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)?" "\\2;\\3;\\4" ${name}_GIT_VERSION ${${name}_GIT_DESCRIBE}) 161 | else() 162 | string(REGEX MATCH "^(v)?([0-9]+)\\.([0-9]+)(.*)?" git_version_valid "${${name}_GIT_DESCRIBE}") 163 | if(git_version_valid) 164 | string(REGEX REPLACE "^(v)?([0-9]+)\\.([0-9]+)(.*)?" "\\2;\\3;0" ${name}_GIT_VERSION ${${name}_GIT_DESCRIBE}) 165 | else() 166 | message(AUTHOR_WARNING "Bad ${name} version \"${${name}_GIT_DESCRIBE}\"; falling back to 0.0.0 (have you made an initial release?)") 167 | set(${name}_GIT_VERSION "0;0;0") 168 | endif() 169 | endif() 170 | endif() 171 | 172 | if(NOT ${name}_GIT_VERSION OR NOT ${name}_GIT_TIMESTAMP OR ${name}_GIT_REVISION STREQUAL "") 173 | if(GIT AND EXISTS "${source_root_directory}/.git") 174 | message(WARNING "Unable to retrieve ${name} version from git.") 175 | endif() 176 | set(${name}_GIT_VERSION "0;0;0;0") 177 | set(${name}_GIT_TIMESTAMP "") 178 | set(${name}_GIT_REVISION 0) 179 | 180 | # Try to get version from VERSION file 181 | set(version_file "${source_root_directory}/VERSION.txt") 182 | if(NOT EXISTS "${version_file}") 183 | set(version_file "${source_root_directory}/VERSION") 184 | endif() 185 | if(EXISTS "${version_file}") 186 | file(STRINGS "${version_file}" ${name}_VERSION LIMIT_COUNT 1 LIMIT_INPUT 42) 187 | endif() 188 | 189 | if(NOT ${name}_VERSION) 190 | message(WARNING "Unable to retrieve ${name} version from \"${version_file}\" file.") 191 | set(${name}_VERSION_LIST ${${name}_GIT_VERSION}) 192 | string(REPLACE ";" "." ${name}_VERSION "${${name}_GIT_VERSION}") 193 | else() 194 | string(REPLACE "." ";" ${name}_VERSION_LIST ${${name}_VERSION}) 195 | endif() 196 | 197 | else() 198 | list(APPEND ${name}_GIT_VERSION ${${name}_GIT_REVISION}) 199 | set(${name}_VERSION_LIST ${${name}_GIT_VERSION}) 200 | string(REPLACE ";" "." ${name}_VERSION "${${name}_GIT_VERSION}") 201 | endif() 202 | 203 | list(GET ${name}_VERSION_LIST 0 "${name}_VERSION_MAJOR") 204 | list(GET ${name}_VERSION_LIST 1 "${name}_VERSION_MINOR") 205 | list(GET ${name}_VERSION_LIST 2 "${name}_VERSION_RELEASE") 206 | list(GET ${name}_VERSION_LIST 3 "${name}_VERSION_REVISION") 207 | 208 | if(${parent_scope}) 209 | set(${name}_VERSION_MAJOR "${${name}_VERSION_MAJOR}" PARENT_SCOPE) 210 | set(${name}_VERSION_MINOR "${${name}_VERSION_MINOR}" PARENT_SCOPE) 211 | set(${name}_VERSION_RELEASE "${${name}_VERSION_RELEASE}" PARENT_SCOPE) 212 | set(${name}_VERSION_REVISION "${${name}_VERSION_REVISION}" PARENT_SCOPE) 213 | set(${name}_VERSION "${${name}_VERSION}" PARENT_SCOPE) 214 | 215 | set(${name}_GIT_DESCRIBE "${${name}_GIT_DESCRIBE}" PARENT_SCOPE) 216 | set(${name}_GIT_TIMESTAMP "${${name}_GIT_TIMESTAMP}" PARENT_SCOPE) 217 | set(${name}_GIT_TREE "${${name}_GIT_TREE}" PARENT_SCOPE) 218 | set(${name}_GIT_COMMIT "${${name}_GIT_COMMIT}" PARENT_SCOPE) 219 | set(${name}_GIT_REVISION "${${name}_GIT_REVISION}" PARENT_SCOPE) 220 | set(${name}_GIT_VERSION "${${name}_GIT_VERSION}" PARENT_SCOPE) 221 | endif() 222 | endmacro(fetch_version) 223 | 224 | cmake_policy(POP) 225 | -------------------------------------------------------------------------------- /dependencies/libmdbx/config.h.in: -------------------------------------------------------------------------------- 1 | /* This is CMake-template for libmdbx's config.h 2 | ******************************************************************************/ 3 | 4 | /* *INDENT-OFF* */ 5 | /* clang-format off */ 6 | 7 | #cmakedefine LTO_ENABLED 8 | #cmakedefine MDBX_USE_VALGRIND 9 | #cmakedefine ENABLE_GPROF 10 | #cmakedefine ENABLE_GCOV 11 | #cmakedefine ENABLE_ASAN 12 | #cmakedefine ENABLE_UBSAN 13 | #cmakedefine01 MDBX_FORCE_ASSERTIONS 14 | 15 | /* Common */ 16 | #cmakedefine01 MDBX_TXN_CHECKOWNER 17 | #cmakedefine MDBX_ENV_CHECKPID_AUTO 18 | #ifndef MDBX_ENV_CHECKPID_AUTO 19 | #cmakedefine01 MDBX_ENV_CHECKPID 20 | #endif 21 | #cmakedefine MDBX_LOCKING_AUTO 22 | #ifndef MDBX_LOCKING_AUTO 23 | #cmakedefine MDBX_LOCKING @MDBX_LOCKING@ 24 | #endif 25 | #cmakedefine MDBX_TRUST_RTC_AUTO 26 | #ifndef MDBX_TRUST_RTC_AUTO 27 | #cmakedefine01 MDBX_TRUST_RTC 28 | #endif 29 | #cmakedefine01 MDBX_DISABLE_PAGECHECKS 30 | 31 | /* Windows */ 32 | #cmakedefine01 MDBX_WITHOUT_MSVC_CRT 33 | 34 | /* MacOS & iOS */ 35 | #cmakedefine01 MDBX_OSX_SPEED_INSTEADOF_DURABILITY 36 | 37 | /* POSIX */ 38 | #cmakedefine01 MDBX_DISABLE_GNU_SOURCE 39 | #cmakedefine MDBX_USE_OFDLOCKS_AUTO 40 | #ifndef MDBX_USE_OFDLOCKS_AUTO 41 | #cmakedefine01 MDBX_USE_OFDLOCKS 42 | #endif 43 | 44 | /* Build Info */ 45 | #ifndef MDBX_BUILD_TIMESTAMP 46 | #cmakedefine MDBX_BUILD_TIMESTAMP "@MDBX_BUILD_TIMESTAMP@" 47 | #endif 48 | #ifndef MDBX_BUILD_TARGET 49 | #cmakedefine MDBX_BUILD_TARGET "@MDBX_BUILD_TARGET@" 50 | #endif 51 | #ifndef MDBX_BUILD_TYPE 52 | #cmakedefine MDBX_BUILD_TYPE "@MDBX_BUILD_TYPE@" 53 | #endif 54 | #ifndef MDBX_BUILD_COMPILER 55 | #cmakedefine MDBX_BUILD_COMPILER "@MDBX_BUILD_COMPILER@" 56 | #endif 57 | #ifndef MDBX_BUILD_FLAGS 58 | #cmakedefine MDBX_BUILD_FLAGS "@MDBX_BUILD_FLAGS@" 59 | #endif 60 | #cmakedefine MDBX_BUILD_SOURCERY @MDBX_BUILD_SOURCERY@ 61 | 62 | /* *INDENT-ON* */ 63 | /* clang-format on */ 64 | -------------------------------------------------------------------------------- /dependencies/libmdbx/man1/mdbx_chk.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2021 Leonid Yuriev . 2 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 3 | .TH MDBX_CHK 1 "2021-05-09" "MDBX 0.10.0" 4 | .SH NAME 5 | mdbx_chk \- MDBX checking tool 6 | .SH SYNOPSIS 7 | .B mdbx_chk 8 | [\c 9 | .BR \-V ] 10 | [\c 11 | .BR \-v [ v [ v ]]] 12 | [\c 13 | .BR \-n ] 14 | [\c 15 | .BR \-q ] 16 | [\c 17 | .BR \-c ] 18 | [\c 19 | .BR \-w ] 20 | [\c 21 | .BR \-d ] 22 | [\c 23 | .BR \-i ] 24 | [\c 25 | .BI \-s \ subdb\fR] 26 | .BR \ dbpath 27 | .SH DESCRIPTION 28 | The 29 | .B mdbx_chk 30 | utility intended to check an MDBX database file. 31 | .SH OPTIONS 32 | .TP 33 | .BR \-V 34 | Write the library version number to the standard output, and exit. 35 | .TP 36 | .BR \-v 37 | Produce verbose output, including summarize space and page usage statistics. 38 | If \fB\-vv\fP is given, be more verbose, show summarized B-tree info 39 | and space allocation. 40 | If \fB\-vvv\fP is given, be more verbose, include summarized statistics 41 | of leaf B-tree pages. 42 | If \fB\-vvvv\fP is given, be even more verbose, show info of each page 43 | during B-tree traversal and basic info of each GC record. 44 | If \fB\-vvvvv\fP is given, turn maximal verbosity, display the full list 45 | of page IDs in the GC records and size of each key-value pair of database(s). 46 | .TP 47 | .BR \-q 48 | Be quiet; do not output anything even if an error was detected. 49 | .TP 50 | .BR \-c 51 | Force using cooperative mode while opening environment, i.e. don't try to open 52 | in exclusive/monopolistic mode. Only exclusive/monopolistic mode allow complete 53 | check, including full check of all meta-pages and actual size of database file. 54 | .TP 55 | .BR \-w 56 | Open environment in read-write mode and lock for writing while checking. 57 | This could be impossible if environment already used by another process(s) 58 | in an incompatible read-write mode. This allow rollback to last steady commit 59 | (in case environment was not closed properly) and then check transaction IDs 60 | of meta-pages. Otherwise, without \fB\-w\fP option environment will be 61 | opened in read-only mode. 62 | .TP 63 | .BR \-d 64 | Disable page-by-page traversal of B-tree. In this case, without B-tree 65 | traversal, it is unable to check for lost-unused pages nor for double-used 66 | pages. 67 | .TP 68 | .BR \-i 69 | Ignore wrong order errors, which will likely false-positive if custom 70 | comparator(s) was used. 71 | .TP 72 | .BR \-s \ subdb 73 | Verify and show info only for a specific subdatabase. 74 | .TP 75 | .BR \-0 | \-1 | \-2 76 | Using specific meta-page 0, or 2 for checking. 77 | .TP 78 | .BR \-t 79 | Turn to a specified meta-page on successful check. 80 | .TP 81 | .BR \-T 82 | Turn to a specified meta-page EVEN ON UNSUCCESSFUL CHECK! 83 | .TP 84 | .BR \-n 85 | Open MDBX environment(s) which do not use subdirectories. 86 | This is legacy option. For now MDBX handles this automatically. 87 | 88 | .SH DIAGNOSTICS 89 | Exit status is zero if no errors occur. Errors result in a non-zero exit status 90 | and a diagnostic message being written to standard error 91 | if no quiet mode was requested. 92 | .SH "SEE ALSO" 93 | .BR mdbx_stat (1), 94 | .BR mdbx_copy (1), 95 | .BR mdbx_dump (1), 96 | .BR mdbx_load (1) 97 | .BR mdbx_drop (1) 98 | .SH AUTHOR 99 | Leonid Yuriev 100 | -------------------------------------------------------------------------------- /dependencies/libmdbx/man1/mdbx_copy.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2021 Leonid Yuriev . 2 | .\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved. 3 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_COPY 1 "2021-05-09" "MDBX 0.10.0" 6 | .SH NAME 7 | mdbx_copy \- MDBX environment copy tool 8 | .SH SYNOPSIS 9 | .B mdbx_copy 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BR \-c ] 16 | [\c 17 | .BR \-n ] 18 | .B src_path 19 | [\c 20 | .BR dest_path ] 21 | .SH DESCRIPTION 22 | The 23 | .B mdbx_copy 24 | utility copies an MDBX environment. The environment can 25 | be copied regardless of whether it is currently in use. 26 | No lockfile is created, since it gets recreated at need. 27 | 28 | If 29 | .I dest_path 30 | is specified it must be the path of an empty directory 31 | for storing the backup. Otherwise, the backup will be 32 | written to stdout. 33 | 34 | .SH OPTIONS 35 | .TP 36 | .BR \-V 37 | Write the library version number to the standard output, and exit. 38 | .TP 39 | .BR \-q 40 | Be quiet. 41 | .TP 42 | .BR \-c 43 | Compact while copying. Only current data pages will be copied; freed 44 | or unused pages will be omitted from the copy. This option will 45 | slow down the backup process as it is more CPU-intensive. 46 | Currently it fails if the environment has suffered a page leak. 47 | .TP 48 | .BR \-n 49 | Open MDBX environment(s) which do not use subdirectories. 50 | This is legacy option. For now MDBX handles this automatically. 51 | 52 | .SH DIAGNOSTICS 53 | Exit status is zero if no errors occur. 54 | Errors result in a non-zero exit status and 55 | a diagnostic message being written to standard error. 56 | .SH CAVEATS 57 | This utility can trigger significant file size growth if run 58 | in parallel with write transactions, because pages which they 59 | free during copying cannot be reused until the copy is done. 60 | .SH "SEE ALSO" 61 | .BR mdbx_dump (1), 62 | .BR mdbx_chk (1), 63 | .BR mdbx_stat (1), 64 | .BR mdbx_load (1) 65 | .BR mdbx_drop (1) 66 | .SH AUTHOR 67 | Howard Chu of Symas Corporation , 68 | Leonid Yuriev 69 | -------------------------------------------------------------------------------- /dependencies/libmdbx/man1/mdbx_drop.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2021 Leonid Yuriev . 2 | .\" Copyright 2014-2021 Howard Chu, Symas Corp. All Rights Reserved. 3 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 4 | .TH MDBX_DROP 1 "2021-05-09" "MDBX 0.10.0" 5 | .SH NAME 6 | mdbx_drop \- MDBX database delete tool 7 | .SH SYNOPSIS 8 | .B mdbx_drop 9 | [\c 10 | .BR \-V ] 11 | [\c 12 | .BR \-d ] 13 | [\c 14 | .BI \-s \ subdb\fR] 15 | [\c 16 | .BR \-n ] 17 | .BR \ dbpath 18 | .SH DESCRIPTION 19 | The 20 | .B mdbx_drop 21 | utility empties or deletes a database in the specified 22 | environment. 23 | .SH OPTIONS 24 | .TP 25 | .BR \-V 26 | Write the library version number to the standard output, and exit. 27 | .TP 28 | .BR \-d 29 | Delete the specified database, don't just empty it. 30 | .TP 31 | .BR \-s \ subdb 32 | Operate on a specific subdatabase. If no database is specified, only the main database is dropped. 33 | .TP 34 | .BR \-n 35 | Dump an MDBX database which does not use subdirectories. 36 | This is legacy option. For now MDBX handles this automatically. 37 | 38 | .SH DIAGNOSTICS 39 | Exit status is zero if no errors occur. 40 | Errors result in a non-zero exit status and 41 | a diagnostic message being written to standard error. 42 | .SH "SEE ALSO" 43 | .BR mdbx_load (1), 44 | .BR mdbx_copy (1), 45 | .BR mdbx_chk (1), 46 | .BR mdbx_stat (1) 47 | .SH AUTHOR 48 | Howard Chu of Symas Corporation 49 | -------------------------------------------------------------------------------- /dependencies/libmdbx/man1/mdbx_dump.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2021 Leonid Yuriev . 2 | .\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved. 3 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_DUMP 1 "2021-05-09" "MDBX 0.10.0" 6 | .SH NAME 7 | mdbx_dump \- MDBX environment export tool 8 | .SH SYNOPSIS 9 | .B mdbx_dump 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BI \-f \ file\fR] 16 | [\c 17 | .BR \-l ] 18 | [\c 19 | .BR \-p ] 20 | [\c 21 | .BR \-a \ | 22 | .BI \-s \ subdb\fR] 23 | [\c 24 | .BR \-r ] 25 | [\c 26 | .BR \-n ] 27 | .BR \ dbpath 28 | .SH DESCRIPTION 29 | The 30 | .B mdbx_dump 31 | utility reads a database and writes its contents to the 32 | standard output using a portable flat-text format 33 | understood by the 34 | .BR mdbx_load (1) 35 | utility. 36 | .SH OPTIONS 37 | .TP 38 | .BR \-V 39 | Write the library version number to the standard output, and exit. 40 | .TP 41 | .BR \-q 42 | Be quiet. 43 | .TP 44 | .BR \-f \ file 45 | Write to the specified file instead of to the standard output. 46 | .TP 47 | .BR \-l 48 | List the databases stored in the environment. Just the 49 | names will be listed, no data will be output. 50 | .TP 51 | .BR \-p 52 | If characters in either the key or data items are printing characters (as 53 | defined by isprint(3)), output them directly. This option permits users to 54 | use standard text editors and tools to modify the contents of databases. 55 | 56 | Note: different systems may have different notions about what characters 57 | are considered printing characters, and databases dumped in this manner may 58 | be less portable to external systems. 59 | .TP 60 | .BR \-a 61 | Dump all of the subdatabases in the environment. 62 | .TP 63 | .BR \-s \ subdb 64 | Dump a specific subdatabase. If no database is specified, only the main database is dumped. 65 | .TP 66 | .BR \-r 67 | Rescure mode. Ignore some errors to dump corrupted DB. 68 | .TP 69 | .BR \-n 70 | Dump an MDBX database which does not use subdirectories. 71 | This is legacy option. For now MDBX handles this automatically. 72 | 73 | .SH DIAGNOSTICS 74 | Exit status is zero if no errors occur. 75 | Errors result in a non-zero exit status and 76 | a diagnostic message being written to standard error. 77 | 78 | Dumping and reloading databases that use user-defined comparison functions 79 | will result in new databases that use the default comparison functions. 80 | \fBIn this case it is quite likely that the reloaded database will be 81 | damaged beyond repair permitting neither record storage nor retrieval.\fP 82 | 83 | The only available workaround is to modify the source for the 84 | .BR mdbx_load (1) 85 | utility to load the database using the correct comparison functions. 86 | .SH "SEE ALSO" 87 | .BR mdbx_load (1), 88 | .BR mdbx_copy (1), 89 | .BR mdbx_chk (1), 90 | .BR mdbx_stat (1) 91 | .BR mdbx_drop (1) 92 | .SH AUTHOR 93 | Howard Chu of Symas Corporation , 94 | Leonid Yuriev 95 | -------------------------------------------------------------------------------- /dependencies/libmdbx/man1/mdbx_load.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2021 Leonid Yuriev . 2 | .\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved. 3 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_LOAD 1 "2021-05-09" "MDBX 0.10.0" 6 | .SH NAME 7 | mdbx_load \- MDBX environment import tool 8 | .SH SYNOPSIS 9 | .B mdbx_load 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BR \-a ] 16 | [\c 17 | .BI \-f \ file\fR] 18 | [\c 19 | .BI \-s \ subdb\fR] 20 | [\c 21 | .BR \-N ] 22 | [\c 23 | .BR \-T ] 24 | [\c 25 | .BR \-r ] 26 | [\c 27 | .BR \-n ] 28 | .BR \ dbpath 29 | .SH DESCRIPTION 30 | The 31 | .B mdbx_load 32 | utility reads from the standard input and loads it into the 33 | MDBX environment 34 | .BR dbpath . 35 | 36 | The input to 37 | .B mdbx_load 38 | must be in the output format specified by the 39 | .BR mdbx_dump (1) 40 | utility or as specified by the 41 | .B -T 42 | option below. 43 | 44 | A simple escape mechanism, where newline and backslash (\\) characters are special, is 45 | applied to the text input. Newline characters are interpreted as record separators. 46 | Backslash characters in the text will be interpreted in one of two ways: If the backslash 47 | character precedes another backslash character, the pair will be interpreted as a literal 48 | backslash. If the backslash character precedes any other character, the two characters 49 | following the backslash will be interpreted as a hexadecimal specification of a single 50 | character; for example, \\0a is a newline character in the ASCII character set. 51 | 52 | For this reason, any backslash or newline characters that naturally occur in the text 53 | input must be escaped to avoid misinterpretation by 54 | .BR mdbx_load . 55 | 56 | .SH OPTIONS 57 | .TP 58 | .BR \-V 59 | Write the library version number to the standard output, and exit. 60 | .TP 61 | .BR \-q 62 | Be quiet. 63 | .TP 64 | .BR \-a 65 | Append all records in the order they appear in the input. The input is assumed to already be 66 | in correctly sorted order and no sorting or checking for redundant values will be performed. 67 | This option must be used to reload data that was produced by running 68 | .B mdbx_dump 69 | on a database that uses custom compare functions. 70 | .TP 71 | .BR \-f \ file 72 | Read from the specified file instead of from the standard input. 73 | .TP 74 | .BR \-s \ subdb 75 | Load a specific subdatabase. If no database is specified, data is loaded into the main database. 76 | .TP 77 | .BR \-N 78 | Don't overwrite existing records when loading into an already existing database; just skip them. 79 | .TP 80 | .BR \-T 81 | Load data from simple text files. The input must be paired lines of text, where the first 82 | line of the pair is the key item, and the second line of the pair is its corresponding 83 | data item. 84 | .TP 85 | .BR \-r 86 | Rescure mode. Ignore errors to load corrupted DB dump. 87 | .TP 88 | .BR \-n 89 | Load an MDBX database which does not use subdirectories. 90 | This is legacy option. For now MDBX handles this automatically. 91 | 92 | .SH DIAGNOSTICS 93 | Exit status is zero if no errors occur. 94 | Errors result in a non-zero exit status and 95 | a diagnostic message being written to standard error. 96 | 97 | .SH "SEE ALSO" 98 | .BR mdbx_dump (1), 99 | .BR mdbx_chk (1), 100 | .BR mdbx_stat (1), 101 | .BR mdbx_copy (1) 102 | .BR mdbx_drop (1) 103 | .SH AUTHOR 104 | Howard Chu of Symas Corporation , 105 | Leonid Yuriev 106 | -------------------------------------------------------------------------------- /dependencies/libmdbx/man1/mdbx_stat.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2021 Leonid Yuriev . 2 | .\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved. 3 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_STAT 1 "2021-05-09" "MDBX 0.10.0" 6 | .SH NAME 7 | mdbx_stat \- MDBX environment status tool 8 | .SH SYNOPSIS 9 | .B mdbx_stat 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BR \-p ] 16 | [\c 17 | .BR \-e ] 18 | [\c 19 | .BR \-f [ f [ f ]]] 20 | [\c 21 | .BR \-r [ r ]] 22 | [\c 23 | .BR \-a \ | 24 | .BI \-s \ subdb\fR] 25 | .BR \ dbpath 26 | [\c 27 | .BR \-n ] 28 | .SH DESCRIPTION 29 | The 30 | .B mdbx_stat 31 | utility displays the status of an MDBX environment. 32 | .SH OPTIONS 33 | .TP 34 | .BR \-V 35 | Write the library version number to the standard output, and exit. 36 | .TP 37 | .BR \-q 38 | Be quiet. 39 | .TP 40 | .BR \-p 41 | Display overall statistics of page operations of all (running, completed 42 | and aborted) transactions in the current multi-process session (since the 43 | first process opened the database after everyone had previously closed it). 44 | .TP 45 | .BR \-e 46 | Display information about the database environment. 47 | .TP 48 | .BR \-f 49 | Display information about the environment GC. 50 | If \fB\-ff\fP is given, summarize each GC/freelist entry. 51 | If \fB\-fff\fP is given, display the full list of page IDs in the GC/freelist. 52 | .TP 53 | .BR \-r 54 | Display information about the environment reader table. 55 | Shows the process ID, thread ID, and transaction ID for each active 56 | reader slot. The process ID and transaction ID are in decimal, the 57 | thread ID is in hexadecimal. The transaction ID is displayed as "-" 58 | if the reader does not currently have a read transaction open. 59 | If \fB\-rr\fP is given, check for stale entries in the reader 60 | table and clear them. The reader table will be printed again 61 | after the check is performed. 62 | .TP 63 | .BR \-a 64 | Display the status of all of the subdatabases in the environment. 65 | .TP 66 | .BR \-s \ subdb 67 | Display the status of a specific subdatabase. 68 | .TP 69 | .BR \-n 70 | Display the status of an MDBX database which does not use subdirectories. 71 | This is legacy option. For now MDBX handles this automatically 72 | for existing databases, but may be required while creating new. 73 | 74 | .SH DIAGNOSTICS 75 | Exit status is zero if no errors occur. 76 | Errors result in a non-zero exit status and 77 | a diagnostic message being written to standard error. 78 | .SH "SEE ALSO" 79 | .BR mdbx_chk (1), 80 | .BR mdbx_copy (1), 81 | .BR mdbx_dump (1), 82 | .BR mdbx_load (1) 83 | .BR mdbx_drop (1) 84 | .SH AUTHOR 85 | Howard Chu of Symas Corporation , 86 | Leonid Yuriev 87 | -------------------------------------------------------------------------------- /dependencies/lz4/LICENSE: -------------------------------------------------------------------------------- 1 | This repository uses 2 different licenses : 2 | - all files in the `lib` directory use a BSD 2-Clause license 3 | - all other files use a GPLv2 license, unless explicitly stated otherwise 4 | 5 | Relevant license is reminded at the top of each source file, 6 | and with presence of COPYING or LICENSE file in associated directories. 7 | 8 | This model is selected to emphasize that 9 | files in the `lib` directory are designed to be included into 3rd party applications, 10 | while all other files, in `programs`, `tests` or `examples`, 11 | receive more limited attention and support for such scenario. 12 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/.gitignore: -------------------------------------------------------------------------------- 1 | # make install artefact 2 | liblz4.pc 3 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/LICENSE: -------------------------------------------------------------------------------- 1 | LZ4 Library 2 | Copyright (c) 2011-2016, Yann Collet 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, 6 | are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above copyright notice, this 12 | list of conditions and the following disclaimer in the documentation and/or 13 | other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 19 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 22 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/README.md: -------------------------------------------------------------------------------- 1 | LZ4 - Library Files 2 | ================================ 3 | 4 | The `/lib` directory contains many files, but depending on project's objectives, 5 | not all of them are necessary. 6 | 7 | #### Minimal LZ4 build 8 | 9 | The minimum required is **`lz4.c`** and **`lz4.h`**, 10 | which provides the fast compression and decompression algorithms. 11 | They generate and decode data using the [LZ4 block format]. 12 | 13 | 14 | #### High Compression variant 15 | 16 | For more compression ratio at the cost of compression speed, 17 | the High Compression variant called **lz4hc** is available. 18 | Add files **`lz4hc.c`** and **`lz4hc.h`**. 19 | This variant also compresses data using the [LZ4 block format], 20 | and depends on regular `lib/lz4.*` source files. 21 | 22 | 23 | #### Frame support, for interoperability 24 | 25 | In order to produce compressed data compatible with `lz4` command line utility, 26 | it's necessary to use the [official interoperable frame format]. 27 | This format is generated and decoded automatically by the **lz4frame** library. 28 | Its public API is described in `lib/lz4frame.h`. 29 | In order to work properly, lz4frame needs all other modules present in `/lib`, 30 | including, lz4 and lz4hc, and also **xxhash**. 31 | So it's necessary to include all `*.c` and `*.h` files present in `/lib`. 32 | 33 | 34 | #### Advanced / Experimental API 35 | 36 | Definitions which are not guaranteed to remain stable in future versions, 37 | are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`. 38 | As the name strongly implies, these definitions should only be invoked 39 | in the context of static linking ***only***. 40 | Otherwise, dependent application may fail on API or ABI break in the future. 41 | The associated symbols are also not exposed by the dynamic library by default. 42 | Should they be nonetheless needed, it's possible to force their publication 43 | by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS` 44 | and `LZ4F_PUBLISH_STATIC_FUNCTIONS`. 45 | 46 | 47 | #### Build macros 48 | 49 | The following build macro can be selected to adjust source code behavior at compilation time : 50 | 51 | - `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus. 52 | This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them. 53 | It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor. 54 | For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`, 55 | and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`. 56 | 57 | - `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow. 58 | Set to 65535 by default, which is the maximum value supported by lz4 format. 59 | Reducing maximum distance will reduce opportunities for LZ4 to find matches, 60 | hence will produce a worse compression ratio. 61 | However, a smaller max distance can allow compatibility with specific decoders using limited memory budget. 62 | This build macro only influences the compressed output of the compressor. 63 | 64 | - `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning. 65 | This is meant to invite users to update their source code. 66 | Should this be a problem, it's generally possible to make the compiler ignore these warnings, 67 | for example with `-Wno-deprecated-declarations` on `gcc`, 68 | or `_CRT_SECURE_NO_WARNINGS` for Visual Studio. 69 | This build macro offers another project-specific method 70 | by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files. 71 | 72 | - `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to 's `malloc`, `calloc` and `free` 73 | by user-defined functions, which must be called `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`. 74 | User functions must be available at link time. 75 | 76 | - `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths 77 | by using bitcount instructions, generally implemented as fast single instructions in many cpus. 78 | In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance, 79 | it's possible to use an optimized software path instead. 80 | This is achieved by setting this build macros . 81 | In most cases, it's not expected to be necessary, 82 | but it can be legitimately considered for less common platforms. 83 | 84 | - `LZ4_ALIGN_TEST` : alignment test ensures that the memory area 85 | passed as argument to become a compression state is suitably aligned. 86 | This test can be disabled if it proves flaky, by setting this value to 0. 87 | 88 | 89 | #### Amalgamation 90 | 91 | lz4 source code can be amalgamated into a single file. 92 | One can combine all source code into `lz4_all.c` by using following command: 93 | ``` 94 | cat lz4.c lz4hc.c lz4frame.c > lz4_all.c 95 | ``` 96 | (`cat` file order is important) then compile `lz4_all.c`. 97 | All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`. 98 | 99 | 100 | #### Windows : using MinGW+MSYS to create DLL 101 | 102 | DLL can be created using MinGW+MSYS with the `make liblz4` command. 103 | This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`. 104 | To override the `dlltool` command when cross-compiling on Linux, just set the `DLLTOOL` variable. Example of cross compilation on Linux with mingw-w64 64 bits: 105 | ``` 106 | make BUILD_STATIC=no CC=x86_64-w64-mingw32-gcc DLLTOOL=x86_64-w64-mingw32-dlltool OS=Windows_NT 107 | ``` 108 | The import library is only required with Visual C++. 109 | The header files `lz4.h`, `lz4hc.h`, `lz4frame.h` and the dynamic library 110 | `dll\liblz4.dll` are required to compile a project using gcc/MinGW. 111 | The dynamic library has to be added to linking options. 112 | It means that if a project that uses LZ4 consists of a single `test-dll.c` 113 | file it should be linked with `dll\liblz4.dll`. For example: 114 | ``` 115 | $(CC) $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll 116 | ``` 117 | The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`. 118 | 119 | 120 | #### Miscellaneous 121 | 122 | Other files present in the directory are not source code. They are : 123 | 124 | - `LICENSE` : contains the BSD license text 125 | - `Makefile` : `make` script to compile and install lz4 library (static and dynamic) 126 | - `liblz4.pc.in` : for `pkg-config` (used in `make install`) 127 | - `README.md` : this file 128 | 129 | [official interoperable frame format]: ../doc/lz4_Frame_format.md 130 | [LZ4 block format]: ../doc/lz4_Block_format.md 131 | 132 | 133 | #### License 134 | 135 | All source material within __lib__ directory are BSD 2-Clause licensed. 136 | See [LICENSE](LICENSE) for details. 137 | The license is also reminded at the top of each source file. 138 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/dll/example/README.md: -------------------------------------------------------------------------------- 1 | LZ4 Windows binary package 2 | ==================================== 3 | 4 | #### The package contents 5 | 6 | - `lz4.exe` : Command Line Utility, supporting gzip-like arguments 7 | - `dll\liblz4.dll` : The DLL of LZ4 library 8 | - `dll\liblz4.lib` : The import library of LZ4 library for Visual C++ 9 | - `example\` : The example of usage of LZ4 library 10 | - `include\` : Header files required with LZ4 library 11 | - `static\liblz4_static.lib` : The static LZ4 library 12 | 13 | 14 | #### Usage of Command Line Interface 15 | 16 | Command Line Interface (CLI) supports gzip-like arguments. 17 | By default CLI takes an input file and compresses it to an output file: 18 | ``` 19 | Usage: lz4 [arg] [input] [output] 20 | ``` 21 | The full list of commands for CLI can be obtained with `-h` or `-H`. The ratio can 22 | be improved with commands from `-3` to `-16` but higher levels also have slower 23 | compression. CLI includes in-memory compression benchmark module with compression 24 | levels starting from `-b` and ending with `-e` with iteration time of `-i` seconds. 25 | CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined 26 | into `-b1e18i1`. 27 | 28 | 29 | #### The example of usage of static and dynamic LZ4 libraries with gcc/MinGW 30 | 31 | Use `cd example` and `make` to build `fullbench-dll` and `fullbench-lib`. 32 | `fullbench-dll` uses a dynamic LZ4 library from the `dll` directory. 33 | `fullbench-lib` uses a static LZ4 library from the `lib` directory. 34 | 35 | 36 | #### Using LZ4 DLL with gcc/MinGW 37 | 38 | The header files from `include\` and the dynamic library `dll\liblz4.dll` 39 | are required to compile a project using gcc/MinGW. 40 | The dynamic library has to be added to linking options. 41 | It means that if a project that uses LZ4 consists of a single `test-dll.c` 42 | file it should be linked with `dll\liblz4.dll`. For example: 43 | ``` 44 | gcc $(CFLAGS) -Iinclude\ test-dll.c -o test-dll dll\liblz4.dll 45 | ``` 46 | The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`. 47 | 48 | 49 | #### The example of usage of static and dynamic LZ4 libraries with Visual C++ 50 | 51 | Open `example\fullbench-dll.sln` to compile `fullbench-dll` that uses a 52 | dynamic LZ4 library from the `dll` directory. The solution works with Visual C++ 53 | 2010 or newer. When one will open the solution with Visual C++ newer than 2010 54 | then the solution will upgraded to the current version. 55 | 56 | 57 | #### Using LZ4 DLL with Visual C++ 58 | 59 | The header files from `include\` and the import library `dll\liblz4.lib` 60 | are required to compile a project using Visual C++. 61 | 62 | 1. The header files should be added to `Additional Include Directories` that can 63 | be found in project properties `C/C++` then `General`. 64 | 2. The import library has to be added to `Additional Dependencies` that can 65 | be found in project properties `Linker` then `Input`. 66 | If one will provide only the name `liblz4.lib` without a full path to the library 67 | the directory has to be added to `Linker\General\Additional Library Directories`. 68 | 69 | The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`. 70 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/dll/example/fullbench-dll.sln: -------------------------------------------------------------------------------- 1 | Microsoft Visual Studio Solution File, Format Version 12.00 2 | # Visual Studio Express 2012 for Windows Desktop 3 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench-dll", "fullbench-dll.vcxproj", "{13992FD2-077E-4954-B065-A428198201A9}" 4 | EndProject 5 | Global 6 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 7 | Debug|Win32 = Debug|Win32 8 | Debug|x64 = Debug|x64 9 | Release|Win32 = Release|Win32 10 | Release|x64 = Release|x64 11 | EndGlobalSection 12 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 13 | {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.ActiveCfg = Debug|Win32 14 | {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.Build.0 = Debug|Win32 15 | {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.ActiveCfg = Debug|x64 16 | {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.Build.0 = Debug|x64 17 | {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.ActiveCfg = Release|Win32 18 | {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.Build.0 = Release|Win32 19 | {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.ActiveCfg = Release|x64 20 | {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.Build.0 = Release|x64 21 | EndGlobalSection 22 | GlobalSection(SolutionProperties) = preSolution 23 | HideSolutionNode = FALSE 24 | EndGlobalSection 25 | EndGlobal 26 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/dll/example/fullbench-dll.vcxproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | Win32 7 | 8 | 9 | Debug 10 | x64 11 | 12 | 13 | Release 14 | Win32 15 | 16 | 17 | Release 18 | x64 19 | 20 | 21 | 22 | {13992FD2-077E-4954-B065-A428198201A9} 23 | Win32Proj 24 | fullbench-dll 25 | $(SolutionDir)bin\$(Platform)_$(Configuration)\ 26 | $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ 27 | 28 | 29 | 30 | Application 31 | true 32 | Unicode 33 | 34 | 35 | Application 36 | true 37 | Unicode 38 | 39 | 40 | Application 41 | false 42 | true 43 | Unicode 44 | 45 | 46 | Application 47 | false 48 | true 49 | Unicode 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | true 69 | $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); 70 | 71 | 72 | true 73 | $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); 74 | true 75 | 76 | 77 | false 78 | $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); 79 | 80 | 81 | false 82 | $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); 83 | true 84 | 85 | 86 | 87 | 88 | 89 | Level4 90 | Disabled 91 | WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) 92 | true 93 | false 94 | ..\include 95 | 96 | 97 | Console 98 | true 99 | $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) 100 | liblz4.lib;%(AdditionalDependencies) 101 | false 102 | 103 | 104 | 105 | 106 | 107 | 108 | Level4 109 | Disabled 110 | WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) 111 | true 112 | true 113 | /analyze:stacksize295252 %(AdditionalOptions) 114 | ..\include 115 | 116 | 117 | Console 118 | true 119 | $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) 120 | liblz4.lib;%(AdditionalDependencies) 121 | 122 | 123 | 124 | 125 | Level4 126 | 127 | 128 | MaxSpeed 129 | true 130 | true 131 | WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) 132 | false 133 | false 134 | ..\include 135 | 136 | 137 | Console 138 | true 139 | true 140 | true 141 | $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) 142 | liblz4.lib;%(AdditionalDependencies) 143 | false 144 | 145 | 146 | 147 | 148 | Level4 149 | 150 | 151 | MaxSpeed 152 | true 153 | true 154 | WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) 155 | false 156 | true 157 | /analyze:stacksize295252 %(AdditionalOptions) 158 | ..\include 159 | 160 | 161 | Console 162 | true 163 | true 164 | true 165 | $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) 166 | liblz4.lib;%(AdditionalDependencies) 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/dll/liblz4.def: -------------------------------------------------------------------------------- 1 | LIBRARY liblz4.dll 2 | EXPORTS 3 | LZ4F_compressBegin 4 | LZ4F_compressBound 5 | LZ4F_compressEnd 6 | LZ4F_compressFrame 7 | LZ4F_compressFrameBound 8 | LZ4F_compressUpdate 9 | LZ4F_createCompressionContext 10 | LZ4F_createDecompressionContext 11 | LZ4F_decompress 12 | LZ4F_flush 13 | LZ4F_freeCompressionContext 14 | LZ4F_freeDecompressionContext 15 | LZ4F_getErrorName 16 | LZ4F_getFrameInfo 17 | LZ4F_getVersion 18 | LZ4F_isError 19 | LZ4_compress 20 | LZ4_compressBound 21 | LZ4_compressHC 22 | LZ4_compressHC_continue 23 | LZ4_compressHC_limitedOutput 24 | LZ4_compressHC_limitedOutput_continue 25 | LZ4_compressHC_limitedOutput_withStateHC 26 | LZ4_compressHC_withStateHC 27 | LZ4_compress_HC 28 | LZ4_compress_HC_continue 29 | LZ4_compress_HC_extStateHC 30 | LZ4_compress_continue 31 | LZ4_compress_default 32 | LZ4_compress_destSize 33 | LZ4_compress_fast 34 | LZ4_compress_fast_continue 35 | LZ4_compress_fast_extState 36 | LZ4_compress_limitedOutput 37 | LZ4_compress_limitedOutput_continue 38 | LZ4_compress_limitedOutput_withState 39 | LZ4_compress_withState 40 | LZ4_createStream 41 | LZ4_createStreamDecode 42 | LZ4_createStreamHC 43 | LZ4_decompress_fast 44 | LZ4_decompress_fast_continue 45 | LZ4_decompress_fast_usingDict 46 | LZ4_decompress_safe 47 | LZ4_decompress_safe_continue 48 | LZ4_decompress_safe_partial 49 | LZ4_decompress_safe_usingDict 50 | LZ4_freeStream 51 | LZ4_freeStreamDecode 52 | LZ4_freeStreamHC 53 | LZ4_loadDict 54 | LZ4_loadDictHC 55 | LZ4_resetStream 56 | LZ4_resetStreamHC 57 | LZ4_saveDict 58 | LZ4_saveDictHC 59 | LZ4_setStreamDecode 60 | LZ4_sizeofState 61 | LZ4_sizeofStateHC 62 | LZ4_versionNumber 63 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/liblz4-dll.rc.in: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // DLL version information. 4 | 1 VERSIONINFO 5 | FILEVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0 6 | PRODUCTVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0 7 | FILEFLAGSMASK VS_FFI_FILEFLAGSMASK 8 | #ifdef _DEBUG 9 | FILEFLAGS VS_FF_DEBUG | VS_FF_PRERELEASE 10 | #else 11 | FILEFLAGS 0 12 | #endif 13 | FILEOS VOS_NT_WINDOWS32 14 | FILETYPE VFT_DLL 15 | FILESUBTYPE VFT2_UNKNOWN 16 | BEGIN 17 | BLOCK "StringFileInfo" 18 | BEGIN 19 | BLOCK "040904B0" 20 | BEGIN 21 | VALUE "CompanyName", "Yann Collet" 22 | VALUE "FileDescription", "Extremely fast compression" 23 | VALUE "FileVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0" 24 | VALUE "InternalName", "@LIBLZ4@" 25 | VALUE "LegalCopyright", "Copyright (C) 2013-2016, Yann Collet" 26 | VALUE "OriginalFilename", "@LIBLZ4@.dll" 27 | VALUE "ProductName", "LZ4" 28 | VALUE "ProductVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0" 29 | END 30 | END 31 | BLOCK "VarFileInfo" 32 | BEGIN 33 | VALUE "Translation", 0x0409, 1200 34 | END 35 | END 36 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/liblz4.pc.in: -------------------------------------------------------------------------------- 1 | # LZ4 - Fast LZ compression algorithm 2 | # Copyright (C) 2011-2014, Yann Collet. 3 | # BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 4 | 5 | prefix=@PREFIX@ 6 | libdir=@LIBDIR@ 7 | includedir=@INCLUDEDIR@ 8 | 9 | Name: lz4 10 | Description: extremely fast lossless compression algorithm library 11 | URL: http://www.lz4.org/ 12 | Version: @VERSION@ 13 | Libs: -L@LIBDIR@ -llz4 14 | Cflags: -I@INCLUDEDIR@ 15 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/lz4frame_static.h: -------------------------------------------------------------------------------- 1 | /* 2 | LZ4 auto-framing library 3 | Header File for static linking only 4 | Copyright (C) 2011-2016, Yann Collet. 5 | 6 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are 10 | met: 11 | 12 | * Redistributions of source code must retain the above copyright 13 | notice, this list of conditions and the following disclaimer. 14 | * Redistributions in binary form must reproduce the above 15 | copyright notice, this list of conditions and the following disclaimer 16 | in the documentation and/or other materials provided with the 17 | distribution. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | You can contact the author at : 32 | - LZ4 source repository : https://github.com/lz4/lz4 33 | - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c 34 | */ 35 | 36 | #ifndef LZ4FRAME_STATIC_H_0398209384 37 | #define LZ4FRAME_STATIC_H_0398209384 38 | 39 | /* The declarations that formerly were made here have been merged into 40 | * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward, 41 | * it is recommended to simply include that header directly. 42 | */ 43 | 44 | #define LZ4F_STATIC_LINKING_ONLY 45 | #include "lz4frame.h" 46 | 47 | #endif /* LZ4FRAME_STATIC_H_0398209384 */ 48 | -------------------------------------------------------------------------------- /dependencies/lz4/lib/xxhash.h: -------------------------------------------------------------------------------- 1 | /* 2 | xxHash - Extremely Fast Hash algorithm 3 | Header File 4 | Copyright (C) 2012-2016, Yann Collet. 5 | 6 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are 10 | met: 11 | 12 | * Redistributions of source code must retain the above copyright 13 | notice, this list of conditions and the following disclaimer. 14 | * Redistributions in binary form must reproduce the above 15 | copyright notice, this list of conditions and the following disclaimer 16 | in the documentation and/or other materials provided with the 17 | distribution. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | You can contact the author at : 32 | - xxHash source repository : https://github.com/Cyan4973/xxHash 33 | */ 34 | 35 | /* Notice extracted from xxHash homepage : 36 | 37 | xxHash is an extremely fast Hash algorithm, running at RAM speed limits. 38 | It also successfully passes all tests from the SMHasher suite. 39 | 40 | Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) 41 | 42 | Name Speed Q.Score Author 43 | xxHash 5.4 GB/s 10 44 | CrapWow 3.2 GB/s 2 Andrew 45 | MumurHash 3a 2.7 GB/s 10 Austin Appleby 46 | SpookyHash 2.0 GB/s 10 Bob Jenkins 47 | SBox 1.4 GB/s 9 Bret Mulvey 48 | Lookup3 1.2 GB/s 9 Bob Jenkins 49 | SuperFastHash 1.2 GB/s 1 Paul Hsieh 50 | CityHash64 1.05 GB/s 10 Pike & Alakuijala 51 | FNV 0.55 GB/s 5 Fowler, Noll, Vo 52 | CRC32 0.43 GB/s 9 53 | MD5-32 0.33 GB/s 10 Ronald L. Rivest 54 | SHA1-32 0.28 GB/s 10 55 | 56 | Q.Score is a measure of quality of the hash function. 57 | It depends on successfully passing SMHasher test set. 58 | 10 is a perfect score. 59 | 60 | A 64-bit version, named XXH64, is available since r35. 61 | It offers much better speed, but for 64-bit applications only. 62 | Name Speed on 64 bits Speed on 32 bits 63 | XXH64 13.8 GB/s 1.9 GB/s 64 | XXH32 6.8 GB/s 6.0 GB/s 65 | */ 66 | 67 | #ifndef XXHASH_H_5627135585666179 68 | #define XXHASH_H_5627135585666179 1 69 | 70 | #if defined (__cplusplus) 71 | extern "C" { 72 | #endif 73 | 74 | 75 | /* **************************** 76 | * Definitions 77 | ******************************/ 78 | #include /* size_t */ 79 | typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; 80 | 81 | 82 | /* **************************** 83 | * API modifier 84 | ******************************/ 85 | /** XXH_INLINE_ALL (and XXH_PRIVATE_API) 86 | * This is useful to include xxhash functions in `static` mode 87 | * in order to inline them, and remove their symbol from the public list. 88 | * Inlining can offer dramatic performance improvement on small keys. 89 | * Methodology : 90 | * #define XXH_INLINE_ALL 91 | * #include "xxhash.h" 92 | * `xxhash.c` is automatically included. 93 | * It's not useful to compile and link it as a separate module. 94 | */ 95 | #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) 96 | # ifndef XXH_STATIC_LINKING_ONLY 97 | # define XXH_STATIC_LINKING_ONLY 98 | # endif 99 | # if defined(__GNUC__) 100 | # define XXH_PUBLIC_API static __inline __attribute__((unused)) 101 | # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 102 | # define XXH_PUBLIC_API static inline 103 | # elif defined(_MSC_VER) 104 | # define XXH_PUBLIC_API static __inline 105 | # else 106 | /* this version may generate warnings for unused static functions */ 107 | # define XXH_PUBLIC_API static 108 | # endif 109 | #else 110 | # define XXH_PUBLIC_API /* do nothing */ 111 | #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ 112 | 113 | /*! XXH_NAMESPACE, aka Namespace Emulation : 114 | * 115 | * If you want to include _and expose_ xxHash functions from within your own library, 116 | * but also want to avoid symbol collisions with other libraries which may also include xxHash, 117 | * 118 | * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library 119 | * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). 120 | * 121 | * Note that no change is required within the calling program as long as it includes `xxhash.h` : 122 | * regular symbol name will be automatically translated by this header. 123 | */ 124 | #ifdef XXH_NAMESPACE 125 | # define XXH_CAT(A,B) A##B 126 | # define XXH_NAME2(A,B) XXH_CAT(A,B) 127 | # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) 128 | # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) 129 | # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) 130 | # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) 131 | # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) 132 | # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) 133 | # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) 134 | # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) 135 | # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) 136 | # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) 137 | # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) 138 | # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) 139 | # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) 140 | # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) 141 | # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) 142 | # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) 143 | # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) 144 | # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) 145 | # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) 146 | #endif 147 | 148 | 149 | /* ************************************* 150 | * Version 151 | ***************************************/ 152 | #define XXH_VERSION_MAJOR 0 153 | #define XXH_VERSION_MINOR 6 154 | #define XXH_VERSION_RELEASE 5 155 | #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) 156 | XXH_PUBLIC_API unsigned XXH_versionNumber (void); 157 | 158 | 159 | /*-********************************************************************** 160 | * 32-bit hash 161 | ************************************************************************/ 162 | typedef unsigned int XXH32_hash_t; 163 | 164 | /*! XXH32() : 165 | Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". 166 | The memory between input & input+length must be valid (allocated and read-accessible). 167 | "seed" can be used to alter the result predictably. 168 | Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ 169 | XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); 170 | 171 | /*====== Streaming ======*/ 172 | typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ 173 | XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); 174 | XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); 175 | XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); 176 | 177 | XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); 178 | XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); 179 | XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); 180 | 181 | /* 182 | * Streaming functions generate the xxHash of an input provided in multiple segments. 183 | * Note that, for small input, they are slower than single-call functions, due to state management. 184 | * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. 185 | * 186 | * XXH state must first be allocated, using XXH*_createState() . 187 | * 188 | * Start a new hash by initializing state with a seed, using XXH*_reset(). 189 | * 190 | * Then, feed the hash state by calling XXH*_update() as many times as necessary. 191 | * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. 192 | * 193 | * Finally, a hash value can be produced anytime, by using XXH*_digest(). 194 | * This function returns the nn-bits hash as an int or long long. 195 | * 196 | * It's still possible to continue inserting input into the hash state after a digest, 197 | * and generate some new hashes later on, by calling again XXH*_digest(). 198 | * 199 | * When done, free XXH state space if it was allocated dynamically. 200 | */ 201 | 202 | /*====== Canonical representation ======*/ 203 | 204 | typedef struct { unsigned char digest[4]; } XXH32_canonical_t; 205 | XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); 206 | XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); 207 | 208 | /* Default result type for XXH functions are primitive unsigned 32 and 64 bits. 209 | * The canonical representation uses human-readable write convention, aka big-endian (large digits first). 210 | * These functions allow transformation of hash result into and from its canonical format. 211 | * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. 212 | */ 213 | 214 | 215 | #ifndef XXH_NO_LONG_LONG 216 | /*-********************************************************************** 217 | * 64-bit hash 218 | ************************************************************************/ 219 | typedef unsigned long long XXH64_hash_t; 220 | 221 | /*! XXH64() : 222 | Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". 223 | "seed" can be used to alter the result predictably. 224 | This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). 225 | */ 226 | XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); 227 | 228 | /*====== Streaming ======*/ 229 | typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ 230 | XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); 231 | XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); 232 | XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); 233 | 234 | XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); 235 | XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); 236 | XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); 237 | 238 | /*====== Canonical representation ======*/ 239 | typedef struct { unsigned char digest[8]; } XXH64_canonical_t; 240 | XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); 241 | XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); 242 | #endif /* XXH_NO_LONG_LONG */ 243 | 244 | 245 | 246 | #ifdef XXH_STATIC_LINKING_ONLY 247 | 248 | /* ================================================================================================ 249 | This section contains declarations which are not guaranteed to remain stable. 250 | They may change in future versions, becoming incompatible with a different version of the library. 251 | These declarations should only be used with static linking. 252 | Never use them in association with dynamic linking ! 253 | =================================================================================================== */ 254 | 255 | /* These definitions are only present to allow 256 | * static allocation of XXH state, on stack or in a struct for example. 257 | * Never **ever** use members directly. */ 258 | 259 | #if !defined (__VMS) \ 260 | && (defined (__cplusplus) \ 261 | || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 262 | # include 263 | 264 | struct XXH32_state_s { 265 | uint32_t total_len_32; 266 | uint32_t large_len; 267 | uint32_t v1; 268 | uint32_t v2; 269 | uint32_t v3; 270 | uint32_t v4; 271 | uint32_t mem32[4]; 272 | uint32_t memsize; 273 | uint32_t reserved; /* never read nor write, might be removed in a future version */ 274 | }; /* typedef'd to XXH32_state_t */ 275 | 276 | struct XXH64_state_s { 277 | uint64_t total_len; 278 | uint64_t v1; 279 | uint64_t v2; 280 | uint64_t v3; 281 | uint64_t v4; 282 | uint64_t mem64[4]; 283 | uint32_t memsize; 284 | uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ 285 | }; /* typedef'd to XXH64_state_t */ 286 | 287 | # else 288 | 289 | struct XXH32_state_s { 290 | unsigned total_len_32; 291 | unsigned large_len; 292 | unsigned v1; 293 | unsigned v2; 294 | unsigned v3; 295 | unsigned v4; 296 | unsigned mem32[4]; 297 | unsigned memsize; 298 | unsigned reserved; /* never read nor write, might be removed in a future version */ 299 | }; /* typedef'd to XXH32_state_t */ 300 | 301 | # ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ 302 | struct XXH64_state_s { 303 | unsigned long long total_len; 304 | unsigned long long v1; 305 | unsigned long long v2; 306 | unsigned long long v3; 307 | unsigned long long v4; 308 | unsigned long long mem64[4]; 309 | unsigned memsize; 310 | unsigned reserved[2]; /* never read nor write, might be removed in a future version */ 311 | }; /* typedef'd to XXH64_state_t */ 312 | # endif 313 | 314 | # endif 315 | 316 | 317 | #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) 318 | # include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ 319 | #endif 320 | 321 | #endif /* XXH_STATIC_LINKING_ONLY */ 322 | 323 | 324 | #if defined (__cplusplus) 325 | } 326 | #endif 327 | 328 | #endif /* XXHASH_H_5627135585666179 */ 329 | -------------------------------------------------------------------------------- /deps.ts: -------------------------------------------------------------------------------- 1 | /*export { toBufferKey as keyValueToBuffer, compareKeys, compareKeys as compareKey, fromBufferKey as bufferToKeyValue } */ 2 | import { setExternals } from './external.js'; 3 | import * as orderedBinary from 'https://deno.land/x/orderedbinary@v1.2.2/index.js'; 4 | import { Encoder as MsgpackrEncoder } from 'https://deno.land/x/msgpackr@v1.5.0/index.js'; 5 | import { WeakLRUCache } from 'https://deno.land/x/weakcache@v1.1.3/index.js'; 6 | function arch() { 7 | return Deno.build.arch; 8 | } 9 | import * as path from 'https://deno.land/std/node/path.ts'; 10 | export { fileURLToPath } from 'https://deno.land/std/node/url.ts'; 11 | import { EventEmitter } from 'https://deno.land/std/node/events.ts' 12 | setExternals({ orderedBinary, MsgpackrEncoder, WeakLRUCache, arch, path, EventEmitter, fs: Deno }); -------------------------------------------------------------------------------- /dict/dict.txt: -------------------------------------------------------------------------------- 1 | {"text":"that with they have this from word what some other were which their time will said each tell does three want well also play small home read hand port large spell even land here must high such follow change went light kind need house picture again animal point mother world near build self earth father work part take place made live where after back little only round year came show every good give under name very through just form sentence great think help line differ turn cause much mean before move right same there when your about many then them write would like these long make thing look more could come number sound most people over know water than call first down side been find head stand page should country found answer school grow study still learn plant cover food four between state keep never last thought city tree cross farm hard start might story draw left late don’t while press close night real life north book carry took science room friend began idea fish mountain stop once base hear horse sure watch color face wood main open seem together next white children begin walk example ease paper group always music those both mark often letter until mile river feet care second enough plain girl usual young ready above ever list though feel talk bird soon body family direct pose leave song measure door product black short numeral class wind question happen complete ship area half rock order fire south problem piece told knew pass since whole king street inch multiply nothing course stay wheel full force blue object decide surface deep moon island foot system busy test record boat common gold possible plane stead wonder laugh thousand check game shape equate miss brought heat snow tire bring distant fill east paint language among unit power town fine certain fall lead dark machine note wait plan figure star noun field rest correct able pound done beauty drive stood contain front teach week final gave green quick develop ocean warm free minute strong special mind behind clear tail produce fact space heard best hour better true during hundred five remember step early hold west ground interest reach fast verb sing listen table travel less morning simple several vowel toward against pattern slow center love person money serve appear road rain rule govern pull cold notice voice energy hunt probable brother ride cell believe perhaps pick sudden count square reason length represent subject region size vary settle speak weight general matter circle pair include divide syllable felt grand ball wave drop present heavy dance engine position wide sail material fraction forest race window store summer train sleep prove lone exercise wall catch mount wish board winter written wild instrument kept glass grass edge sign visit past soft bright weather month million bear finish happy hope flower clothe strange gone trade melody trip office receive mouth exact symbol least trouble shout except wrote seed tone join suggest clean break lady yard rise blow blood touch grew cent team wire cost lost brown wear garden equal sent choose fell flow fair bank collect save control decimal else quite broke case middle kill lake moment scale loud spring observe child straight consonant nation dictionary milk speed method organ section dress cloud surprise quiet stone tiny climb cool design poor experiment bottom iron single stick flat twenty skin smile crease hole jump baby eight village meet root raise solve metal whether push seven paragraph third shall held hair describe cook floor either result burn hill safe century consider type coast copy phrase silent tall sand soil roll temperature finger industry value fight beat excite natural view sense capital won’t chair danger fruit rich thick soldier process operate practice separate difficult doctor please protect noon crop modern element student corner party supply whose locate ring character insect caught period indicate radio spoke atom human history effect electric expect bone rail imagine provide agree thus gentle woman captain guess necessary sharp wing create neighbor wash rather crowd corn compare poem string bell depend meat tube famous dollar stream fear sight thin triangle planet hurry chief colony clock mine enter major fresh search send yellow allow print dead spot desert suit current lift rose arrive master track parent shore division sheet substance favor connect post spend chord glad original share station bread charge proper offer segment slave duck instant market degree populate chick dear enemy reply drink occur support speech nature range steam motion path liquid meant quotient teeth shell neck oxygen sugar death pretty skill women season solution magnet silver thank branch match suffix especially afraid huge sister steel discuss forward similar guide experience score apple bought pitch coat mass card band rope slip dream evening condition feed tool total basic smell valley double seat continue block chart sell success company subtract event particular deal swim term opposite wife shoe shoulder spread arrange camp invent cotton born determine quart nine truck noise level chance gather shop stretch throw shine property column molecule select wrong gray repeat require broad prepare salt nose plural anger claim continent","Data":{"Id":1,"Title":"I","Children":[]}},"data":{"id":0,"title":"A","label":"no","definition":"yes","enabled":true,"disabled":false,"count":2,"children":[{"count":2},{}],"types":[true,false,null],"type":null}} -------------------------------------------------------------------------------- /dict/dict2.txt: -------------------------------------------------------------------------------- 1 | a ability able about above accept according account across act action activity actually add address administration admit adult affect after again against age agency agent ago agree agreement ahead air all allow almost alone along already also although always American among amount analysis and animal another answer any anyone anything appear apply approach area argue arm around arrive art article artist as ask assume at attack attention attorney audience author authority available avoid away baby back bad bag ball bank bar base be beat beautiful because become bed before begin behavior behind believe benefit best better between beyond big bill billion bit black blood blue board body book born both box boy break bring brother budget build building business but buy by call camera campaign can cancer candidate capital car card care career carry case catch cause cell center central century certain certainly chair challenge chance change character charge check child choice choose church citizen city civil claim class clear clearly close coach cold collection college color come commercial common community company compare computer concern condition conference Congress consider consumer contain continue control cost could country couple course court cover create crime cultural culture cup current customer cut dark data daughter day dead deal death debate decade decide decision deep defense degree Democrat democratic describe design despite detail determine develop development die difference different difficult dinner direction director discover discuss discussion disease do doctor dog door down draw dream drive drop drug during each early east easy eat economic economy edge education effect effort eight either election else employee end energy enjoy enough enter entire environment environmental especially establish even evening event ever every everybody everyone everything evidence exactly example executive exist expect experience expert explain eye face fact factor fail fall family far fast father fear federal feel feeling few field fight figure fill film final finally financial find fine finger finish fire firm first fish five floor fly focus follow food foot for force foreign forget form former forward four free friend from front full fund future game garden gas general generation get girl give glass go goal good government great green ground group grow growth guess gun guy hair half hand hang happen happy hard have he head health hear heart heat heavy help her here herself high him himself his history hit hold home hope hospital hot hotel hour house how however huge human hundred husband I idea identify if image imagine impact important improve in include including increase indeed indicate individual industry information inside instead institution interest interesting international interview into investment involve issue it item its itself job join just keep key kid kill kind kitchen know knowledge land language large last late later laugh law lawyer lay lead leader learn least leave left leg legal less let letter level lie life light like likely line list listen little live local long look lose loss lot love low machine magazine main maintain major majority make man manage management manager many market marriage material matter may maybe me mean measure media medical meet meeting member memory mention message method middle might military million mind minute miss mission model modern moment money month more morning most mother mouth move movement movie Mr Mrs much music must my myself name nation national natural nature near nearly necessary need network never new news newspaper next nice night no none nor north not note nothing notice now n't number occur of off offer office officer official often oh oil ok old on once one only onto open operation opportunity option or order organization other others our out outside over own owner page pain painting paper parent part participant particular particularly partner party pass past patient pattern pay peace people per perform performance perhaps period person personal phone physical pick picture piece place plan plant play player PM point police policy political politics poor popular population position positive possible power practice prepare present president pressure pretty prevent price private probably problem process produce product production professional professor program project property protect prove provide public pull purpose push put quality question quickly quite race radio raise range rate rather reach read ready real reality realize really reason receive recent recently recognize record red reduce reflect region relate relationship religious remain remember remove report represent Republican require research resource respond response responsibility rest result return reveal rich right rise risk road rock role room rule run safe same save say scene school science scientist score sea season seat second section security see seek seem sell send senior sense series serious serve service set seven several sex sexual shake share she shoot short shot should shoulder show side sign significant similar simple simply since sing single sister sit site situation six size skill skin small smile so social society soldier some somebody someone something sometimes son song soon sort sound source south southern space speak special specific speech spend sport spring staff stage stand standard star start state statement station stay step still stock stop store story strategy street strong structure student study stuff style subject success successful such suddenly suffer suggest summer support sure surface system table take talk task tax teach teacher team technology television tell ten tend term test than thank that the their them themselves then theory there these they thing think third this those though thought thousand threat three through throughout throw thus time to today together tonight too top total tough toward town trade traditional training travel treat treatment tree trial trip trouble true truth try turn TV two type under understand unit until up upon us use usually value various very victim view violence visit voice vote wait walk wall want war watch water way we weapon wear week weight well west western what whatever when where whether which while white who whole whom whose why wide wife will win wind window wish with within without woman wonder word work worker world worry would write writer wrong yard yeah year yes yet you young your yourself -------------------------------------------------------------------------------- /external.js: -------------------------------------------------------------------------------- 1 | export let Env, Compression, Cursor, getAddress, clearKeptObjects, setGlobalBuffer, 2 | require, arch, fs, lmdbxError, path, EventEmitter, orderedBinary, MsgpackrEncoder, WeakLRUCache; 3 | export function setNativeFunctions(externals) { 4 | Env = externals.Env; 5 | Compression = externals.Compression; 6 | getAddress = externals.getAddress; 7 | clearKeptObjects = externals.clearKeptObjects; 8 | setGlobalBuffer = externals.setGlobalBuffer; 9 | Cursor = externals.Cursor; 10 | lmdbxError = externals.lmdbxError; 11 | } 12 | export function setExternals(externals) { 13 | require = externals.require; 14 | arch = externals.arch; 15 | fs = externals.fs; 16 | path = externals.path; 17 | EventEmitter = externals.EventEmitter; 18 | orderedBinary = externals.orderedBinary; 19 | MsgpackrEncoder = externals.MsgpackrEncoder; 20 | WeakLRUCache = externals.WeakLRUCache; 21 | } 22 | -------------------------------------------------------------------------------- /index.d.ts: -------------------------------------------------------------------------------- 1 | declare namespace lmdb { 2 | export function open(path: string, options: RootDatabaseOptions): RootDatabase 3 | export function open(options: RootDatabaseOptionsWithPath): RootDatabase 4 | 5 | class Database { 6 | /** 7 | * Get the value stored by given id/key 8 | * @param id The key for the entry 9 | **/ 10 | get(id: K): V | undefined 11 | /** 12 | * Get the entry stored by given id/key, which includes both the value and the version number (if available) 13 | * @param id The key for the entry 14 | **/ 15 | getEntry(id: K): { 16 | value: V 17 | version?: number 18 | } | undefined 19 | 20 | /** 21 | * Get the value stored by given id/key in binary format, as a Buffer 22 | * @param id The key for the entry 23 | **/ 24 | getBinary(id: K): Buffer | undefined 25 | 26 | /** 27 | * Get the value stored by given id/key in binary format, as a temporary Buffer. 28 | * This is faster, but the data is only valid until the next get operation (then it will be overwritten). 29 | * @param id The key for the entry 30 | **/ 31 | getBinaryFast(id: K): Buffer | undefined 32 | 33 | /** 34 | * Asynchronously fetch the values stored by the given ids and accesses all 35 | * pages to ensure that any hard page faults and disk I/O are performed 36 | * asynchronously in a separate thread. Once completed, synchronous 37 | * gets to the same entries will most likely be in memory and fast. 38 | * @param ids The keys for the entries to prefetch 39 | **/ 40 | prefetch(ids: K[], callback?: Function): Promise 41 | 42 | /** 43 | * Asynchronously get the values stored by the given ids and return the 44 | * values in array corresponding to the array of ids. 45 | * @param ids The keys for the entries to get 46 | **/ 47 | getMany(ids: K[], callback?: (error: any, values: V[]) => any): Promise<(V | undefined)[]> 48 | 49 | /** 50 | * Store the provided value, using the provided id/key 51 | * @param id The key for the entry 52 | * @param value The value to store 53 | **/ 54 | put(id: K, value: V): Promise 55 | /** 56 | * Store the provided value, using the provided id/key and version number, and optionally the required 57 | * existing version 58 | * @param id The key for the entry 59 | * @param value The value to store 60 | * @param version The version number to assign to this entry 61 | * @param ifVersion If provided the put will only succeed if the previous version number matches this (atomically checked) 62 | **/ 63 | put(id: K, value: V, version: number, ifVersion?: number): Promise 64 | /** 65 | * Remove the entry with the provided id/key 66 | * @param id The key for the entry to remove 67 | **/ 68 | remove(id: K): Promise 69 | /** 70 | * Remove the entry with the provided id/key, conditionally based on the provided existing version number 71 | * @param id The key for the entry to remove 72 | * @param ifVersion If provided the remove will only succeed if the previous version number matches this (atomically checked) 73 | **/ 74 | remove(id: K, ifVersion: number): Promise 75 | /** 76 | * Remove the entry with the provided id/key and value (mainly used for dupsort databases) and optionally the required 77 | * existing version 78 | * @param id The key for the entry to remove 79 | * @param valueToRemove The value for the entry to remove 80 | **/ 81 | remove(id: K, valueToRemove: V): Promise 82 | /** 83 | * Syncronously store the provided value, using the provided id/key, will return after the data has been written. 84 | * @param id The key for the entry 85 | * @param value The value to store 86 | **/ 87 | putSync(id: K, value: V): void 88 | /** 89 | * Syncronously store the provided value, using the provided id/key and version number 90 | * @param id The key for the entry 91 | * @param value The value to store 92 | * @param version The version number to assign to this entry 93 | **/ 94 | putSync(id: K, value: V, version: number): void 95 | /** 96 | * Syncronously store the provided value, using the provided id/key and options 97 | * @param id The key for the entry 98 | * @param value The value to store 99 | * @param options The version number to assign to this entry 100 | **/ 101 | putSync(id: K, value: V, options: PutOptions): void 102 | /** 103 | * Syncronously remove the entry with the provided id/key 104 | * existing version 105 | * @param id The key for the entry to remove 106 | **/ 107 | removeSync(id: K): boolean 108 | /** 109 | * Synchronously remove the entry with the provided id/key and value (mainly used for dupsort databases) 110 | * existing version 111 | * @param id The key for the entry to remove 112 | * @param valueToRemove The value for the entry to remove 113 | **/ 114 | removeSync(id: K, valueToRemove: V): boolean 115 | /** 116 | * Get all the values for the given key (for dupsort databases) 117 | * existing version 118 | * @param key The key for the entry to remove 119 | * @param options The options for the iterator 120 | **/ 121 | getValues(key: K, options?: RangeOptions): ArrayLikeIterable 122 | /** 123 | * Get the count of all the values for the given key (for dupsort databases) 124 | * existing version 125 | * @param options The options for the range/iterator 126 | **/ 127 | getValuesCount(key: K, options?: RangeOptions): number 128 | /** 129 | * Get all the unique keys for the given range 130 | * existing version 131 | * @param options The options for the range/iterator 132 | **/ 133 | getKeys(options: RangeOptions): ArrayLikeIterable 134 | /** 135 | * Get the count of all the unique keys for the given range 136 | * existing version 137 | * @param options The options for the range/iterator 138 | **/ 139 | getKeysCount(options: RangeOptions): number 140 | /** 141 | * Get all the entries for the given range 142 | * existing version 143 | * @param options The options for the range/iterator 144 | **/ 145 | getRange(options: RangeOptions): ArrayLikeIterable<{ key: K, value: V, version?: number }> 146 | /** 147 | * Get the count of all the entries for the given range 148 | * existing version 149 | * @param options The options for the range/iterator 150 | **/ 151 | getCount(options: RangeOptions): number 152 | /** 153 | * @deprecated since version 2.0, use transaction() instead 154 | */ 155 | transactionAsync(action: () => T): T 156 | /** 157 | * Execute a transaction asyncronously, running all the actions within the action callback in the transaction, 158 | * and committing the transaction after the action callback completes. 159 | * existing version 160 | * @param action The function to execute within the transaction 161 | **/ 162 | transaction(action: () => T): Promise 163 | /** 164 | * Execute a transaction syncronously, running all the actions within the action callback in the transaction, 165 | * and committing the transaction after the action callback completes. 166 | * existing version 167 | * @param action The function to execute within the transaction 168 | **/ 169 | transactionSync(action: () => T): T 170 | /** 171 | * Execute a transaction asyncronously, running all the actions within the action callback in the transaction, 172 | * and committing the transaction after the action callback completes. 173 | * existing version 174 | * @param action The function to execute within the transaction 175 | **/ 176 | childTransaction(action: () => T): Promise 177 | /** 178 | * Execute a set of write operations that will all be batched together in next queued asynchronous transaction. 179 | * @param action The function to execute with a set of write operations. 180 | **/ 181 | batch(action: () => any): Promise 182 | /** 183 | * Execute writes actions that are all conditionally dependent on the entry with the provided key having the provided 184 | * version number (checked atomically). 185 | * @param id Key of the entry to check 186 | * @param ifVersion The require version number of the entry for all actions to succeed 187 | * @param action The function to execute with actions that will be dependent on this condition 188 | **/ 189 | ifVersion(id: K, ifVersion: number, action: () => any): Promise 190 | /** 191 | * Execute writes actions that are all conditionally dependent on the entry with the provided key 192 | * not existing (checked atomically). 193 | * @param id Key of the entry to check 194 | * @param action The function to execute with actions that will be dependent on this condition 195 | **/ 196 | ifNoExists(id: K, action: () => any): Promise 197 | /** 198 | * Check if an entry for the provided key exists 199 | * @param id Key of the entry to check 200 | */ 201 | doesExist(key: K): boolean 202 | /** 203 | * Check if an entry for the provided key/value exists 204 | * @param id Key of the entry to check 205 | * @param value Value of the entry to check 206 | */ 207 | doesExist(key: K, value: V): boolean 208 | /** 209 | * Check if an entry for the provided key exists with the expected version 210 | * @param id Key of the entry to check 211 | * @param version Expected version 212 | */ 213 | doesExist(key: K, version: number): boolean 214 | /** 215 | * @deprecated since version 2.0, use drop() or dropSync() instead 216 | */ 217 | deleteDB(): Promise 218 | /** 219 | * Delete this database/store (asynchronously). 220 | **/ 221 | drop(): Promise 222 | /** 223 | * Synchronously delete this database/store. 224 | **/ 225 | dropSync(): void 226 | /** 227 | * @deprecated since version 2.0, use clearAsync() or clearSync() instead 228 | */ 229 | clear(): Promise 230 | /** 231 | * Asynchronously clear all the entries from this database/store. 232 | **/ 233 | clearAsync(): Promise 234 | /** 235 | * Synchronously clear all the entries from this database/store. 236 | **/ 237 | clearSync(): void 238 | /** 239 | * Check the reader locks and remove any stale reader locks. Returns the number of stale locks that were removed. 240 | **/ 241 | readerCheck(): number 242 | /** 243 | * Returns a string that describes all the current reader locks, useful for debugging if reader locks aren't being removed. 244 | **/ 245 | readerList(): string 246 | /** 247 | * Returns statistics about the current database 248 | **/ 249 | getStats(): {} 250 | /** 251 | * Explicitly force the read transaction to reset to the latest snapshot/version of the database 252 | **/ 253 | resetReadTxn(): void 254 | /** 255 | * Make a snapshot copy of the current database at the indicated path 256 | **/ 257 | backup(path: string): Promise 258 | /** 259 | * Close the current database. 260 | **/ 261 | close(): Promise 262 | /** 263 | * Add event listener 264 | */ 265 | on(event: 'beforecommit' | 'aftercommit', callback: (event: any) => void): void 266 | } 267 | /* A special value that can be returned from a transaction to indicate that the transaction should be aborted */ 268 | export const ABORT = 10000000000000 269 | class RootDatabase extends Database { 270 | /** 271 | * Open a database store using the provided options. 272 | **/ 273 | openDB(options: DatabaseOptions & { name: string }): Database 274 | /** 275 | * Open a database store using the provided options. 276 | **/ 277 | openDB(dbName: string, dbOptions: DatabaseOptions): Database 278 | } 279 | 280 | type Key = Key[] | string | symbol | number | boolean | Buffer; 281 | 282 | interface DatabaseOptions { 283 | name?: string 284 | cache?: boolean 285 | compression?: boolean | CompressionOptions 286 | encoding?: 'msgpack' | 'json' | 'string' | 'binary' | 'ordered-binary' 287 | sharedStructuresKey?: Key 288 | useVersions?: boolean 289 | keyEncoding?: 'uint32' | 'binary' | 'ordered-binary' 290 | dupSort?: boolean 291 | strictAsyncOrder?: boolean 292 | } 293 | interface RootDatabaseOptions extends DatabaseOptions { 294 | /** The maximum number of databases to be able to open (there is some extra overhead if this is set very high).*/ 295 | maxDbs?: number 296 | /** Set a longer delay (in milliseconds) to wait longer before committing writes to increase the number of writes per transaction (higher latency, but more efficient) **/ 297 | commitDelay?: number 298 | asyncTransactionOrder?: 'after' | 'before' | 'strict' 299 | mapSize?: number 300 | pageSize?: number 301 | remapChunks?: boolean 302 | /** This provides a small performance boost (when not using useWritemap) for writes, by skipping zero'ing out malloc'ed data, but can leave application data in unused portions of the database. This is recommended unless there are concerns of database files being accessible. */ 303 | noMemInit?: boolean 304 | /** Use writemaps, discouraged at this. This improves performance by reducing malloc calls, but it is possible for a stray pointer to corrupt data. */ 305 | useWritemap?: boolean 306 | noSubdir?: boolean 307 | noSync?: boolean 308 | noMetaSync?: boolean 309 | readOnly?: boolean 310 | mapAsync?: boolean 311 | maxReaders?: number 312 | winMemoryPriority?: 1 | 2 | 3 | 4 | 5 313 | } 314 | interface RootDatabaseOptionsWithPath extends RootDatabaseOptions { 315 | path: string 316 | } 317 | interface CompressionOptions { 318 | threshold?: number 319 | dictionary?: Buffer 320 | } 321 | interface RangeOptions { 322 | /** Starting key for a range **/ 323 | start?: Key 324 | /** Ending key for a range **/ 325 | end?: Key 326 | /** Iterate through the entries in reverse order **/ 327 | reverse?: boolean 328 | /** Include version numbers in each entry returned **/ 329 | versions?: boolean 330 | /** The maximum number of entries to return **/ 331 | limit?: number 332 | /** The number of entries to skip **/ 333 | offset?: number 334 | /** Use a snapshot of the database from when the iterator started **/ 335 | snapshot?: boolean 336 | } 337 | interface PutOptions { 338 | /* Append to the database using MDB_APPEND, which can be faster */ 339 | append?: boolean 340 | /* Append to a dupsort database using MDB_APPENDDUP, which can be faster */ 341 | appendDup?: boolean 342 | /* Perform put with MDB_NOOVERWRITE which will fail if the entry for the key already exists */ 343 | noOverwrite?: boolean 344 | /* Perform put with MDB_NODUPDATA which will fail if the entry for the key/value already exists */ 345 | noDupData?: boolean 346 | /* The version of the entry to set */ 347 | version?: number 348 | } 349 | class ArrayLikeIterable implements Iterable { 350 | map(callback: (entry: T) => U): ArrayLikeIterable 351 | filter(callback: (entry: T) => any): ArrayLikeIterable 352 | [Symbol.iterator]() : Iterator 353 | forEach(callback: (entry: T) => any): void 354 | asArray: T[] 355 | } 356 | export function getLastVersion(): number 357 | export function compareKeys(a: Key, b: Key): number 358 | class Binary {} 359 | /* Wrap a Buffer/Uint8Array for direct assignment as a value bypassing any encoding, for put (and doesExist) operations. 360 | */ 361 | export function asBinary(buffer: Uint8Array): Binary 362 | } 363 | export = lmdb 364 | -------------------------------------------------------------------------------- /keys.js: -------------------------------------------------------------------------------- 1 | import { getAddress, orderedBinary } from './external.js'; 2 | 3 | const writeUint32Key = (key, target, start) => { 4 | (target.dataView || (target.dataView = new DataView(target.buffer, 0, target.length))).setUint32(start, key, true); 5 | return start + 4; 6 | }; 7 | const readUint32Key = (target, start) => { 8 | return (target.dataView || (target.dataView = new DataView(target.buffer, 0, target.length))).getUint32(start, true); 9 | }; 10 | const writeBufferKey = (key, target, start) => { 11 | target.set(key, start); 12 | return key.length + start; 13 | }; 14 | const Uint8ArraySlice = Uint8Array.prototype.slice; 15 | const readBufferKey = (target, start, end) => { 16 | return Uint8ArraySlice.call(target, start, end); 17 | }; 18 | 19 | export function applyKeyHandling(store) { 20 | if (store.encoding == 'ordered-binary') { 21 | store.encoder = store.decoder = { 22 | writeKey: orderedBinary.writeKey, 23 | readKey: orderedBinary.readKey, 24 | }; 25 | } 26 | if (store.encoder && store.encoder.writeKey && !store.encoder.encode) { 27 | store.encoder.encode = function(value) { 28 | return saveKey(value, this.writeKey, false, store.maxKeySize); 29 | }; 30 | } 31 | if (store.decoder && store.decoder.readKey && !store.decoder.decode) 32 | store.decoder.decode = function(buffer) { return this.readKey(buffer, 0, buffer.length); }; 33 | if (store.keyIsUint32 || store.keyEncoding == 'uint32') { 34 | store.writeKey = writeUint32Key; 35 | store.readKey = readUint32Key; 36 | } else if (store.keyIsBuffer || store.keyEncoding == 'binary') { 37 | store.writeKey = writeBufferKey; 38 | store.readKey = readBufferKey; 39 | } else if (store.keyEncoder) { 40 | store.writeKey = store.keyEncoder.writeKey; 41 | store.readKey = store.keyEncoder.readKey; 42 | } else { 43 | store.writeKey = orderedBinary.writeKey; 44 | store.readKey = orderedBinary.readKey; 45 | } 46 | } 47 | 48 | let saveBuffer, saveDataView = { setFloat64() {}, setUint32() {} }, saveDataAddress; 49 | let savePosition = 8000; 50 | let DYNAMIC_KEY_BUFFER_SIZE = 8192; 51 | function allocateSaveBuffer() { 52 | saveBuffer = typeof Buffer != 'undefined' ? Buffer.alloc(DYNAMIC_KEY_BUFFER_SIZE) : new Uint8Array(DYNAMIC_KEY_BUFFER_SIZE); 53 | saveBuffer.buffer.address = getAddress(saveBuffer); 54 | saveDataAddress = saveBuffer.buffer.address; 55 | // TODO: Conditionally only do this for key sequences? 56 | saveDataView.setUint32(savePosition, 0xffffffff); 57 | saveDataView.setFloat64(savePosition + 4, saveDataAddress, true); // save a pointer from the old buffer to the new address for the sake of the prefetch sequences 58 | saveBuffer.dataView = saveDataView = new DataView(saveBuffer.buffer, saveBuffer.byteOffset, saveBuffer.byteLength); 59 | savePosition = 0; 60 | } 61 | export function saveKey(key, writeKey, saveTo, maxKeySize) { 62 | if (savePosition > 7800) { 63 | allocateSaveBuffer(); 64 | } 65 | let start = savePosition; 66 | try { 67 | savePosition = key === undefined ? start + 4 : 68 | writeKey(key, saveBuffer, start + 4); 69 | } catch (error) { 70 | saveBuffer.fill(0, start + 4); // restore zeros 71 | if (error.name == 'RangeError') { 72 | if (8180 - start < maxKeySize) { 73 | allocateSaveBuffer(); // try again: 74 | return saveKey(key, writeKey, saveTo, maxKeySize); 75 | } 76 | throw new Error('Key was too large, max key size is ' + maxKeySize); 77 | } else 78 | throw error; 79 | } 80 | let length = savePosition - start - 4; 81 | if (length > maxKeySize) { 82 | throw new Error('Key of size ' + length + ' was too large, max key size is ' + maxKeySize); 83 | } 84 | if (savePosition >= 8160) { // need to reserve enough room at the end for pointers 85 | savePosition = start // reset position 86 | allocateSaveBuffer(); // try again: 87 | return saveKey(key, writeKey, saveTo, maxKeySize); 88 | } 89 | if (saveTo) { 90 | saveDataView.setUint32(start, length, true); // save the length 91 | saveTo.saveBuffer = saveBuffer; 92 | savePosition = (savePosition + 12) & 0xfffffc; 93 | return start + saveDataAddress; 94 | } else { 95 | saveBuffer.start = start + 4; 96 | saveBuffer.end = savePosition; 97 | savePosition = (savePosition + 7) & 0xfffff8; // full 64-bit word alignment since these are usually copied 98 | return saveBuffer; 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /level.js: -------------------------------------------------------------------------------- 1 | export function levelup(store) { 2 | return Object.assign(Object.create(store), { 3 | get(key, options, callback) { 4 | let result = store.get(key); 5 | if (typeof options == 'function') 6 | callback = options; 7 | if (callback) { 8 | if (result === undefined) 9 | callback(new NotFoundError()); 10 | else 11 | callback(null, result); 12 | } else { 13 | if (result === undefined) 14 | return Promise.reject(new NotFoundError()); 15 | else 16 | return Promise.resolve(result); 17 | } 18 | }, 19 | }); 20 | } 21 | class NotFoundError extends Error { 22 | constructor(message) { 23 | super(message); 24 | this.name = 'NotFoundError'; 25 | this.notFound = true; 26 | } 27 | } -------------------------------------------------------------------------------- /mod.ts: -------------------------------------------------------------------------------- 1 | import { fileURLToPath } from './deps.ts'; 2 | import { orderedBinary, setNativeFunctions } from './external.js'; 3 | orderedBinary.enableNullTermination(); 4 | // probably use Deno.build.os 5 | let version = import.meta.url.match(/@([^/]+)\//)?.[1]; 6 | //console.log({version}); 7 | let libPath = import.meta.url.startsWith('file:') && fileURLToPath(new URL('build/Release/lmdbx.node', import.meta.url)); 8 | if (!libPath || !exists(libPath)) { 9 | //console.log({ libPath }, 'does not exist') 10 | libPath = (Deno.env.get('LMDBX_LIB_PATH') || (tmpdir() + '/lmdbx-js-' + (version || '') + '.lib')) as string; 11 | const ARCH = { x86_64: 'x64', aarch64: 'arm64' } 12 | if (!exists(libPath)) { 13 | let os: string = Deno.build.os; 14 | os = os == 'windows' ? 'win32' : os; 15 | os += '-' + ARCH[Deno.build.arch]; 16 | let libraryUrl = 'https://cdn.jsdelivr.net/npm/lmdbs@' + (version || 'latest') + 17 | '/prebuilds/' + os + '/node.abi93' + (os == 'win32' ? '' : '.glibc') + '.node'; 18 | console.log('Download', libraryUrl); 19 | let response = await fetch(libraryUrl); 20 | if (response.status == 200) { 21 | let binaryLibraryBuffer = await response.arrayBuffer(); 22 | Deno.writeFileSync(libPath, new Uint8Array(binaryLibraryBuffer)); 23 | } else { 24 | throw new Error('Unable to fetch ' + libraryUrl + ', HTTP response: ' + response.status); 25 | } 26 | } 27 | } 28 | let lmdbxLib = Deno.dlopen(libPath, { 29 | // const char* path, char* keyBuffer, Compression* compression, int jsFlags, int flags, int maxDbs, 30 | // int maxReaders, mdb_size_t mapSize, int pageSize 31 | envOpen: { parameters: ['u32', 'u32', 'pointer', 'pointer', 'f64', 'u32', 'u32', 'f64', 'u32'], result: 'i64'}, 32 | closeEnv: { parameters: ['f64'], result: 'void'}, 33 | freeData: { parameters: ['f64'], result: 'void'}, 34 | getAddress: { parameters: ['pointer'], result: 'usize'}, 35 | getMaxKeySize: { parameters: ['f64'], result: 'u32'}, 36 | openDbi: { parameters: ['f64', 'u32', 'pointer', 'u32', 'f64'], result: 'i64'}, 37 | getDbi: { parameters: ['f64'], result: 'u32'}, 38 | readerCheck: { parameters: ['f64'], result: 'i32'}, 39 | beginTxn: { parameters: ['f64', 'u32'], result: 'i64'}, 40 | resetTxn: { parameters: ['f64'], result: 'void'}, 41 | renewTxn: { parameters: ['f64'], result: 'i32'}, 42 | abortTxn: { parameters: ['f64'], result: 'void'}, 43 | commitTxn: { parameters: ['f64'], result: 'i32'}, 44 | commitEnvTxn: { parameters: ['f64'], result: 'i32'}, 45 | abortEnvTxn: { parameters: ['f64'], result: 'void'}, 46 | getError: { parameters: ['i32', 'pointer'], result: 'void'}, 47 | dbiGetByBinary: { parameters: ['f64', 'u32'], result: 'u32'}, 48 | openCursor: { parameters: ['f64'], result: 'i64'}, 49 | cursorRenew: { parameters: ['f64'], result: 'i32'}, 50 | cursorClose: { parameters: ['f64'], result: 'i32'}, 51 | cursorIterate: { parameters: ['f64'], result: 'i32'}, 52 | cursorPosition: { parameters: ['f64', 'u32', 'u32', 'u32', 'f64'], result: 'i32'}, 53 | cursorCurrentValue: { parameters: ['f64'], result: 'i32'}, 54 | startWriting: { parameters: ['f64', 'f64'], nonblocking: true, result: 'i32'}, 55 | compress: { parameters: ['f64', 'f64'], nonblocking: true, result: 'void'}, 56 | envWrite: { parameters: ['f64', 'f64'], result: 'i32'}, 57 | setGlobalBuffer: { parameters: ['pointer', 'usize'], result: 'void'}, 58 | setCompressionBuffer: { parameters: ['f64', 'pointer', 'usize', 'u32'], result: 'void'}, 59 | newCompression: { parameters: ['pointer', 'usize', 'u32'], result: 'u64'}, 60 | prefetch: { parameters: ['f64', 'f64'], nonblocking: true, result: 'i32'}, 61 | }); 62 | 63 | let { envOpen, closeEnv, getAddress, freeData, getMaxKeySize, openDbi, getDbi, readerCheck, 64 | commitEnvTxn, abortEnvTxn, beginTxn, resetTxn, renewTxn, abortTxn, commitTxn, dbiGetByBinary, startWriting, compress, envWrite, openCursor, cursorRenew, cursorClose, cursorIterate, cursorPosition, cursorCurrentValue, setGlobalBuffer: setGlobalBuffer2, setCompressionBuffer, getError, newCompression, prefetch } = lmdbxLib.symbols; 65 | let registry = new FinalizationRegistry(address => { 66 | // when an object is GC'ed, free it in C. 67 | freeData(address); 68 | }); 69 | 70 | class CBridge { 71 | address: number; 72 | constructor(address: number) { 73 | this.address = address || 0; 74 | if (address) { 75 | registry.register(this, address); 76 | } 77 | } 78 | /* static addMethods(...methods: ) { 79 | for (let method of methods) { 80 | this.prototype[method] = function() { 81 | return symbols[method](this.address, ...arguments); 82 | }; 83 | } 84 | }*/ 85 | } 86 | const textEncoder = new TextEncoder(); 87 | const textDecoder = new TextDecoder(); 88 | const MAX_ERROR = 1000; 89 | function checkError(rc: number): number { 90 | if (rc && rc < MAX_ERROR) { 91 | // TODO: Look up error and throw 92 | lmdbxError(rc); 93 | } 94 | return rc; 95 | } 96 | function lmdbxError(rc: number) { 97 | getError(rc, keyBytes); 98 | let message = textDecoder.decode(keyBytes.subarray(0, keyBytes.indexOf(0))) || ('Error code: ' + rc); 99 | throw new Error(message); 100 | } 101 | let keyBytes: Uint8Array; 102 | class Env extends CBridge { 103 | open(options: any, flags: number, jsFlags: number) { 104 | let rc = envOpen(flags, jsFlags, toCString(options.path), keyBytes = options.keyBytes, 0, 105 | options.maxDbs || 12, options.maxReaders || 126, options.mapSize, options.pageSize) as number; 106 | this.address = checkError(rc); 107 | registry.register(this, this.address); 108 | return 0; 109 | } 110 | openDbi(flags: number, name: string, keyType: number, compression: Compression) { 111 | let rc: number = openDbi(this.address, flags, toCString(name), keyType, compression?.address || 0) as number; 112 | if (rc == -30798) { // MDB_NOTFOUND 113 | return; 114 | } 115 | return new Dbi(checkError(rc), 116 | getDbi(rc) as number); 117 | } 118 | close() { 119 | closeEnv(this.address); 120 | } 121 | getMaxKeySize() { 122 | return getMaxKeySize(this.address); 123 | } 124 | readerCheck() { 125 | return readerCheck(this.address); 126 | } 127 | beginTxn(flags: number) { 128 | let rc: number = beginTxn(this.address, flags) as number; 129 | return new Transaction(checkError(rc), flags); 130 | } 131 | commitTxn() { 132 | checkError(commitEnvTxn(this.address) as number); 133 | } 134 | abortTxn() { 135 | abortEnvTxn(this.address); 136 | } 137 | startWriting(instructions: number, callback: (value: number) => number) { 138 | (startWriting(this.address, instructions) as Promise).then(callback); 139 | } 140 | compress(compressionPointer: number, callback: (value: void) => void) { 141 | return (compress(this.address, compressionPointer) as Promise).then(callback); 142 | } 143 | write(instructions: number) { 144 | return checkError(envWrite(this.address, instructions) as number); 145 | } 146 | } 147 | //Env.addMethods('startWriting', 'write', 'openDB'); 148 | class Dbi extends CBridge { 149 | dbi: number; 150 | constructor(address: number, dbi: number) { 151 | super(address); 152 | this.dbi = dbi; 153 | } 154 | getByBinary(keySize: number): number { 155 | return dbiGetByBinary(this.address, keySize) as number; 156 | } 157 | prefetch(keys: number, callback: () => void): void { 158 | (prefetch(this.address, keys) as Promise).then(() => callback()); 159 | } 160 | } 161 | class Transaction extends CBridge { 162 | flags: number; 163 | constructor(address: number, flags: number) { 164 | super(address); 165 | this.flags = flags; 166 | } 167 | reset() { 168 | resetTxn(this.address); 169 | } 170 | renew() { 171 | let rc = renewTxn(this.address) as number; 172 | if (rc) 173 | lmdbxError(rc); 174 | } 175 | abort() { 176 | abortTxn(this.address); 177 | } 178 | commit() { 179 | commitTxn(this.address); 180 | } 181 | } 182 | 183 | 184 | class Compression extends CBridge { 185 | constructor(options: { dictionary: Uint8Array, threshold: number }) { 186 | let dictionary = options.dictionary || new Uint8Array(0); 187 | super(newCompression(dictionary, dictionary.length, options.threshold || 1000) as number); 188 | } 189 | setBuffer(bytes: Uint8Array, dictLength: number) { 190 | setCompressionBuffer(this.address, bytes, bytes.length, dictLength); 191 | } 192 | } 193 | class Cursor extends CBridge { 194 | constructor(dbi: Dbi) { 195 | super(openCursor(dbi.address) as number); 196 | } 197 | renew() { 198 | cursorRenew(this.address); 199 | } 200 | position(flags: number, offset: number, keySize: number, endKeyAddress: number) { 201 | return cursorPosition(this.address, flags, offset, keySize, endKeyAddress); 202 | } 203 | iterate() { 204 | return cursorIterate(this.address); 205 | } 206 | getCurrentValue() { 207 | return cursorCurrentValue(this.address); 208 | } 209 | close() { 210 | return cursorClose(this.address); 211 | } 212 | } 213 | function toCString(str: string): Uint8Array { 214 | return str == null ? new Uint8Array(0) : textEncoder.encode(str + '\x00'); 215 | } 216 | function setGlobalBuffer(buffer: Uint8Array) { 217 | setGlobalBuffer2(buffer, buffer.length); 218 | } 219 | 220 | setNativeFunctions({ Env, Compression, Cursor, getAddress, lmdbxError, setGlobalBuffer }); 221 | export const { toBufferKey: keyValueToBuffer, compareKeys, compareKeys: compareKey, fromBufferKey: bufferToKeyValue } = orderedBinary; 222 | export { ABORT, asBinary, IF_EXISTS } from './write.js'; 223 | export { levelup } from './level.js'; 224 | export { open, getLastVersion } from './open.js'; 225 | 226 | // inlined from https://github.com/denoland/deno_std/blob/main/node/os.ts 227 | function tmpdir(): string | null { 228 | /* This follows the node js implementation, but has a few 229 | differences: 230 | * On windows, if none of the environment variables are defined, 231 | we return null. 232 | * On unix we use a plain Deno.env.get, instead of safeGetenv, 233 | which special cases setuid binaries. 234 | * Node removes a single trailing / or \, we remove all. 235 | */ 236 | if (Deno.build.os == 'windows') { 237 | const temp = Deno.env.get("TEMP") || Deno.env.get("TMP"); 238 | if (temp) { 239 | return temp.replace(/(? 4 | 5 | using namespace v8; 6 | using namespace node; 7 | 8 | thread_local LZ4_stream_t* Compression::stream = nullptr; 9 | Compression::Compression() { 10 | } 11 | Compression::~Compression() { 12 | delete dictionary; 13 | } 14 | NAN_METHOD(Compression::ctor) { 15 | unsigned int compressionThreshold = 1000; 16 | char* dictionary = nullptr; 17 | unsigned int dictSize = 0; 18 | if (info[0]->IsObject()) { 19 | Local dictionaryOption = Nan::To(info[0]).ToLocalChecked()->Get(Nan::GetCurrentContext(), Nan::New("dictionary").ToLocalChecked()).ToLocalChecked(); 20 | if (!dictionaryOption->IsUndefined()) { 21 | if (!node::Buffer::HasInstance(dictionaryOption)) { 22 | return Nan::ThrowError("Dictionary must be a buffer"); 23 | } 24 | dictSize = node::Buffer::Length(dictionaryOption); 25 | dictSize = (dictSize >> 3) << 3; // make sure it is word-aligned 26 | dictionary = node::Buffer::Data(dictionaryOption); 27 | 28 | } 29 | Local thresholdOption = Nan::To(info[0]).ToLocalChecked()->Get(Nan::GetCurrentContext(), Nan::New("threshold").ToLocalChecked()).ToLocalChecked(); 30 | if (thresholdOption->IsNumber()) { 31 | compressionThreshold = thresholdOption->IntegerValue(Nan::GetCurrentContext()).FromJust(); 32 | } 33 | } 34 | Compression* compression = new Compression(); 35 | compression->dictionary = dictionary; 36 | compression->decompressTarget = dictionary + dictSize; 37 | compression->decompressSize = 0; 38 | compression->acceleration = 1; 39 | compression->compressionThreshold = compressionThreshold; 40 | compression->Wrap(info.This()); 41 | compression->Ref(); 42 | (void)info.This()->Set(Nan::GetCurrentContext(), Nan::New("address").ToLocalChecked(), Nan::New((double) (size_t) compression)); 43 | 44 | return info.GetReturnValue().Set(info.This()); 45 | } 46 | 47 | NAN_METHOD(Compression::setBuffer) { 48 | Compression *compression = Nan::ObjectWrap::Unwrap(info.This()); 49 | unsigned int dictSize = Local::Cast(info[1])->IntegerValue(Nan::GetCurrentContext()).FromJust(); 50 | compression->dictionary = node::Buffer::Data(info[0]); 51 | compression->decompressTarget = compression->dictionary + dictSize; 52 | compression->decompressSize = node::Buffer::Length(info[0]) - dictSize; 53 | } 54 | extern "C" EXTERN void setCompressionBuffer(double compressionPointer, char* buffer, uint32_t bufferSize, uint32_t dictSize) { 55 | Compression *compression = (Compression*) (size_t) compressionPointer; 56 | compression->dictionary = buffer; 57 | compression->decompressTarget = buffer + dictSize; 58 | compression->decompressSize = bufferSize - dictSize; 59 | } 60 | 61 | void Compression::decompress(MDBX_val& data, bool &isValid, bool canAllocate) { 62 | uint32_t uncompressedLength; 63 | int compressionHeaderSize; 64 | uint32_t compressedLength = data.iov_len; 65 | unsigned char* charData = (unsigned char*) data.iov_base; 66 | 67 | if (charData[0] == 254) { 68 | uncompressedLength = ((uint32_t)charData[1] << 16) | ((uint32_t)charData[2] << 8) | (uint32_t)charData[3]; 69 | compressionHeaderSize = 4; 70 | } 71 | else if (charData[0] == 255) { 72 | uncompressedLength = ((uint32_t)charData[4] << 24) | ((uint32_t)charData[5] << 16) | ((uint32_t)charData[6] << 8) | (uint32_t)charData[7]; 73 | compressionHeaderSize = 8; 74 | } 75 | else { 76 | fprintf(stderr, "Unknown status byte %u\n", charData[0]); 77 | if (canAllocate) 78 | Nan::ThrowError("Unknown status byte"); 79 | isValid = false; 80 | return; 81 | } 82 | data.iov_base = decompressTarget; 83 | data.iov_len = uncompressedLength; 84 | //TODO: For larger blocks with known encoding, it might make sense to allocate space for it and use an ExternalString 85 | //fprintf(stdout, "compressed size %u uncompressedLength %u, first byte %u\n", data.iov_len, uncompressedLength, charData[compressionHeaderSize]); 86 | if (uncompressedLength > decompressSize) { 87 | isValid = false; 88 | return; 89 | } 90 | int written = LZ4_decompress_safe_usingDict( 91 | (char*)charData + compressionHeaderSize, decompressTarget, 92 | compressedLength - compressionHeaderSize, uncompressedLength, 93 | dictionary, decompressTarget - dictionary); 94 | //fprintf(stdout, "first uncompressed byte %X %X %X %X %X %X\n", uncompressedData[0], uncompressedData[1], uncompressedData[2], uncompressedData[3], uncompressedData[4], uncompressedData[5]); 95 | if (written < 0) { 96 | fprintf(stderr, "Failed to decompress data %u %u bytes:\n", compressionHeaderSize, uncompressedLength); 97 | for (uint32_t i = 0; i < compressedLength; i++) { 98 | fprintf(stderr, "%u ", charData[i]); 99 | } 100 | if (canAllocate) 101 | Nan::ThrowError("Failed to decompress data"); 102 | isValid = false; 103 | return; 104 | } 105 | isValid = true; 106 | } 107 | 108 | int Compression::compressInstruction(EnvWrap* env, double* compressionAddress) { 109 | MDBX_val value; 110 | value.iov_base = (void*)((size_t) * (compressionAddress - 1)); 111 | value.iov_len = *(((uint32_t*)compressionAddress) - 3); 112 | argtokey_callback_t compressedData = compress(&value, nullptr); 113 | if (compressedData) { 114 | *(((uint32_t*)compressionAddress) - 3) = value.iov_len; 115 | *((size_t*)(compressionAddress - 1)) = (size_t)value.iov_base; 116 | int64_t status = std::atomic_exchange((std::atomic*) compressionAddress, (int64_t) 0); 117 | if (status == 1 && env) { 118 | pthread_mutex_lock(env->writingLock); 119 | pthread_cond_signal(env->writingCond); 120 | pthread_mutex_unlock(env->writingLock); 121 | //fprintf(stderr, "sent compression completion signal\n"); 122 | } 123 | //fprintf(stdout, "compressed to %p %u %u %p\n", value.iov_base, value.iov_len, status, env); 124 | return 0; 125 | } else { 126 | fprintf(stdout, "failed to compress\n"); 127 | return 1; 128 | } 129 | } 130 | 131 | argtokey_callback_t Compression::compress(MDBX_val* value, argtokey_callback_t freeValue) { 132 | size_t dataLength = value->iov_len; 133 | char* data = (char*)value->iov_base; 134 | if (value->iov_len < compressionThreshold && !(value->iov_len > 0 && ((uint8_t*)data)[0] >= 250)) 135 | return freeValue; // don't compress if less than threshold (but we must compress if the first byte is the compression indicator) 136 | bool longSize = dataLength >= 0x1000000; 137 | int prefixSize = (longSize ? 8 : 4); 138 | int maxCompressedSize = LZ4_COMPRESSBOUND(dataLength); 139 | char* compressed = new char[maxCompressedSize + prefixSize]; 140 | //fprintf(stdout, "compressing %u\n", dataLength); 141 | if (!stream) 142 | stream = LZ4_createStream(); 143 | LZ4_loadDict(stream, dictionary, decompressTarget - dictionary); 144 | int compressedSize = LZ4_compress_fast_continue(stream, data, compressed + prefixSize, dataLength, maxCompressedSize, acceleration); 145 | if (compressedSize > 0) { 146 | if (freeValue) 147 | freeValue(*value); 148 | uint8_t* compressedData = (uint8_t*)compressed; 149 | if (longSize) { 150 | compressedData[0] = 255; 151 | compressedData[2] = (uint8_t)(dataLength >> 40u); 152 | compressedData[3] = (uint8_t)(dataLength >> 32u); 153 | compressedData[4] = (uint8_t)(dataLength >> 24u); 154 | compressedData[5] = (uint8_t)(dataLength >> 16u); 155 | compressedData[6] = (uint8_t)(dataLength >> 8u); 156 | compressedData[7] = (uint8_t)dataLength; 157 | } 158 | else { 159 | compressedData[0] = 254; 160 | compressedData[1] = (uint8_t)(dataLength >> 16u); 161 | compressedData[2] = (uint8_t)(dataLength >> 8u); 162 | compressedData[3] = (uint8_t)dataLength; 163 | } 164 | value->iov_base = compressed; 165 | value->iov_len = compressedSize + prefixSize; 166 | return ([](MDBX_val &value) -> void { 167 | delete[] (char*)value.iov_base; 168 | }); 169 | } 170 | else { 171 | delete[] compressed; 172 | return nullptr; 173 | } 174 | } 175 | 176 | class CompressionWorker : public Nan::AsyncWorker { 177 | public: 178 | CompressionWorker(EnvWrap* env, double* compressionAddress, Nan::Callback *callback) 179 | : Nan::AsyncWorker(callback), env(env), compressionAddress(compressionAddress) {} 180 | 181 | 182 | void Execute() { 183 | uint64_t compressionPointer; 184 | compressionPointer = std::atomic_exchange((std::atomic*) compressionAddress, (int64_t) 2); 185 | if (compressionPointer > 1) { 186 | Compression* compression = (Compression*)(size_t) * ((double*)&compressionPointer); 187 | compression->compressInstruction(env, compressionAddress); 188 | } 189 | } 190 | void HandleOKCallback() { 191 | // don't actually call the callback, no need 192 | } 193 | 194 | private: 195 | EnvWrap* env; 196 | double* compressionAddress; 197 | }; 198 | 199 | NAN_METHOD(EnvWrap::compress) { 200 | EnvWrap *env = Nan::ObjectWrap::Unwrap(info.This()); 201 | size_t compressionAddress = Local::Cast(info[0])->Value(); 202 | Nan::Callback* callback = new Nan::Callback(Local::Cast(info[1])); 203 | CompressionWorker* worker = new CompressionWorker(env, (double*) compressionAddress, callback); 204 | Nan::AsyncQueueWorker(worker); 205 | } 206 | 207 | 208 | extern "C" EXTERN void compress(double ewPointer, double compressionJSPointer) { 209 | EnvWrap* ew = (EnvWrap*) (size_t) ewPointer; 210 | uint64_t compressionPointer; 211 | double* compressionAddress = (double*) (size_t) compressionJSPointer; 212 | compressionPointer = std::atomic_exchange((std::atomic*) compressionAddress, (int64_t) 2); 213 | if (compressionPointer > 1) { 214 | Compression* compression = (Compression*)(size_t) * ((double*)&compressionPointer); 215 | compression->compressInstruction(ew, compressionAddress); 216 | } 217 | } 218 | 219 | extern "C" EXTERN uint64_t newCompression(char* dictionary, uint32_t dictSize, uint32_t threshold) { 220 | dictSize = (dictSize >> 3) << 3; // make sure it is word-aligned 221 | Compression* compression = new Compression(); 222 | if ((size_t) dictionary < 10) 223 | dictionary= nullptr; 224 | compression->dictionary = dictionary; 225 | compression->decompressTarget = dictionary + dictSize; 226 | compression->decompressSize = 0; 227 | compression->acceleration = 1; 228 | compression->compressionThreshold = threshold; 229 | return (uint64_t) compression; 230 | } 231 | -------------------------------------------------------------------------------- /src/lmdbx-js.cpp: -------------------------------------------------------------------------------- 1 | #include "lmdbx-js.h" 2 | 3 | using namespace v8; 4 | using namespace node; 5 | 6 | int Logging::initLogging() { 7 | char* logging = getenv("LMDBX_JS_LOGGING"); 8 | if (logging) 9 | fprintf(stderr, "Start logging for lmdb-js\n"); 10 | return !!logging; 11 | } 12 | int Logging::debugLogging = Logging::initLogging(); 13 | 14 | NODE_MODULE_INIT(/* exports, module, context */) { 15 | if (Logging::debugLogging) 16 | fprintf(stderr, "Start initialization\n"); 17 | // Initializes the module 18 | // Export Env as constructor for EnvWrap 19 | EnvWrap::setupExports(exports); 20 | 21 | // Export Cursor as constructor for CursorWrap 22 | CursorWrap::setupExports(exports); 23 | 24 | // Export misc things 25 | setupExportMisc(exports); 26 | if (Logging::debugLogging) 27 | fprintf(stderr, "Finished initialization\n"); 28 | } 29 | #ifndef _WIN32 30 | extern "C" void node_module_register(void* m) { 31 | //fprintf(stderr, "This is just a dummy function to be called if node isn't there so deno can load this module\n"); 32 | } 33 | #endif 34 | /* Start of converting just the init to NAPI: 35 | static napi_value Init(napi_env env, napi_value napi_exports) { 36 | v8::Local exports; 37 | memcpy(static_cast(&exports), &napi_exports, sizeof(napi_exports)); 38 | */ 39 | 40 | // This file contains code from the node-lmdb project 41 | // Copyright (c) 2013-2017 Timur Kristóf 42 | // Licensed to you under the terms of the MIT license 43 | // 44 | // Permission is hereby granted, free of charge, to any person obtaining a copy 45 | // of this software and associated documentation files (the "Software"), to deal 46 | // in the Software without restriction, including without limitation the rights 47 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 48 | // copies of the Software, and to permit persons to whom the Software is 49 | // furnished to do so, subject to the following conditions: 50 | 51 | // The above copyright notice and this permission notice shall be included in 52 | // all copies or substantial portions of the Software. 53 | 54 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 55 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 56 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 57 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 58 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 59 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 60 | // THE SOFTWARE. 61 | -------------------------------------------------------------------------------- /src/ordered-binary.cpp: -------------------------------------------------------------------------------- 1 | #include "lmdbx-js.h" 2 | 3 | #ifdef _WIN32 4 | #define ntohl _byteswap_ulong 5 | #define htonl _byteswap_ulong 6 | #endif 7 | // compare items by 32-bit comparison, a is user provided and assumed to be zero terminated/padded 8 | // which allows us to do the full 32-bit comparisons safely 9 | int compareFast(const MDBX_val *a, const MDBX_val *b) { 10 | uint32_t* dataA = (uint32_t*) a->iov_base; 11 | uint32_t* dataB = (uint32_t*) b->iov_base; 12 | size_t remaining = b->iov_len; 13 | uint32_t aVal, bVal; 14 | while(remaining >= 4) { 15 | aVal = ntohl(*dataA); 16 | bVal = ntohl(*dataB); 17 | if (aVal > bVal) 18 | return 1; 19 | if (aVal < bVal) 20 | return -1; 21 | /*diff = (int64_t) ntohl(*dataA) - (int64_t) ntohl(*dataB); 22 | if (diff) 23 | return diff;*/ 24 | dataA++; 25 | dataB++; 26 | remaining -= 4; 27 | } 28 | if (remaining) { 29 | aVal = ntohl(*dataA); 30 | bVal = ntohl(*dataB & (remaining == 2 ? 0x0000ffff : remaining == 1 ? 0x000000ff : 0x00ffffff)); 31 | if (aVal > bVal) 32 | return 1; 33 | if (aVal < bVal) 34 | return -1; 35 | } 36 | return a->iov_len - b->iov_len; 37 | } 38 | -------------------------------------------------------------------------------- /src/txn.cpp: -------------------------------------------------------------------------------- 1 | #include "lmdbx-js.h" 2 | 3 | using namespace v8; 4 | using namespace node; 5 | 6 | TxnTracked::TxnTracked(MDBX_txn *txn, unsigned int flags) { 7 | this->txn = txn; 8 | this->flags = flags; 9 | parent = nullptr; 10 | } 11 | 12 | TxnTracked::~TxnTracked() { 13 | } 14 | 15 | TxnWrap::TxnWrap(MDBX_env *env, MDBX_txn *txn) { 16 | this->env = env; 17 | this->txn = txn; 18 | this->flags = 0; 19 | this->ew = nullptr; 20 | } 21 | 22 | TxnWrap::~TxnWrap() { 23 | // Close if not closed already 24 | if (this->txn) { 25 | mdbx_txn_abort(txn); 26 | this->removeFromEnvWrap(); 27 | } 28 | } 29 | 30 | void TxnWrap::removeFromEnvWrap() { 31 | if (this->ew) { 32 | if (this->ew->currentWriteTxn == this) { 33 | this->ew->currentWriteTxn = this->parentTw; 34 | } 35 | else { 36 | auto it = std::find(ew->readTxns.begin(), ew->readTxns.end(), this); 37 | if (it != ew->readTxns.end()) { 38 | ew->readTxns.erase(it); 39 | } 40 | } 41 | this->ew = nullptr; 42 | } 43 | this->txn = nullptr; 44 | } 45 | 46 | NAN_METHOD(TxnWrap::ctor) { 47 | Nan::HandleScope scope; 48 | 49 | EnvWrap *ew = Nan::ObjectWrap::Unwrap(Local::Cast(info[0])); 50 | MDBX_txn_flags_t flags = MDBX_TXN_READWRITE; 51 | MDBX_txn *txn; 52 | TxnWrap *parentTw; 53 | if (info[1]->IsTrue() && ew->writeWorker) { // this is from a transaction callback 54 | txn = ew->writeWorker->AcquireTxn((int*) &flags); 55 | parentTw = nullptr; 56 | } else { 57 | if (info[1]->IsObject()) { 58 | Local options = Local::Cast(info[1]); 59 | 60 | // Get flags from options 61 | 62 | setFlagFromValue((int*) &flags, (int)MDBX_TXN_RDONLY, "readOnly", false, options); 63 | } else if (info[1]->IsNumber()) { 64 | flags = (MDBX_txn_flags_t) info[1]->IntegerValue(Nan::GetCurrentContext()).FromJust(); 65 | } 66 | MDBX_txn *parentTxn; 67 | if (info[2]->IsObject()) { 68 | parentTw = Nan::ObjectWrap::Unwrap(Local::Cast(info[2])); 69 | parentTxn = parentTw->txn; 70 | } else { 71 | parentTxn = nullptr; 72 | parentTw = nullptr; 73 | // Check existence of current write transaction 74 | if (0 == (flags & MDBX_TXN_RDONLY)) { 75 | if (ew->currentWriteTxn != nullptr) 76 | return Nan::ThrowError("You have already opened a write transaction in the current process, can't open a second one."); 77 | //fprintf(stderr, "begin sync txn"); 78 | auto writeWorker = ew->writeWorker; 79 | if (writeWorker) { 80 | parentTxn = writeWorker->AcquireTxn((int*) &flags); // see if we have a paused transaction 81 | // else we create a child transaction from the current batch transaction. TODO: Except in WRITEMAP mode, where we need to indicate that the transaction should not be committed 82 | } 83 | } 84 | } 85 | //fprintf(stderr, "txn_begin from txn.cpp %u %p\n", flags, parentTxn); 86 | int rc = mdbx_txn_begin(ew->env, parentTxn, (MDBX_txn_flags_t) flags, &txn); 87 | if (rc != 0) { 88 | if (rc == EINVAL) { 89 | return Nan::ThrowError("Invalid parameter, which on MacOS is often due to more transactions than available robust locked semaphors (see docs for more info)"); 90 | } 91 | return throwLmdbxError(rc); 92 | } 93 | } 94 | TxnWrap* tw = new TxnWrap(ew->env, txn); 95 | 96 | // Set the current write transaction 97 | if (0 == ((int)flags & (int)MDBX_TXN_RDONLY)) { 98 | ew->currentWriteTxn = tw; 99 | } 100 | else { 101 | ew->readTxns.push_back(tw); 102 | ew->currentReadTxn = txn; 103 | } 104 | tw->parentTw = parentTw; 105 | tw->flags = flags; 106 | tw->ew = ew; 107 | tw->Wrap(info.This()); 108 | 109 | return info.GetReturnValue().Set(info.This()); 110 | } 111 | 112 | int TxnWrap::begin(EnvWrap *ew, unsigned int flags) { 113 | this->ew = ew; 114 | this->flags = flags; 115 | MDBX_env *env = ew->env; 116 | unsigned int envFlags; 117 | mdbx_env_get_flags(env, &envFlags); 118 | if (flags & MDBX_RDONLY) { 119 | mdbx_txn_begin(env, nullptr, (MDBX_txn_flags_t) (flags & 0xf0000), &this->txn); 120 | } else { 121 | //fprintf(stderr, "begin sync txn %i\n", flags); 122 | 123 | if (ew->writeTxn) 124 | txn = ew->writeTxn->txn; 125 | else if (ew->writeWorker) { 126 | // try to acquire the txn from the current batch 127 | txn = ew->writeWorker->AcquireTxn((int*) &flags); 128 | //fprintf(stderr, "acquired %p %p %p\n", ew->writeWorker, txn, flags); 129 | } else { 130 | pthread_mutex_lock(ew->writingLock); 131 | txn = nullptr; 132 | } 133 | 134 | if (txn) { 135 | if (flags & TXN_ABORTABLE) { 136 | if (envFlags & MDBX_WRITEMAP) 137 | flags &= ~TXN_ABORTABLE; 138 | else { 139 | // child txn 140 | mdbx_txn_begin(env, this->txn, (MDBX_txn_flags_t) (flags & 0xf0000), &this->txn); 141 | TxnTracked* childTxn = new TxnTracked(txn, flags); 142 | childTxn->parent = ew->writeTxn; 143 | ew->writeTxn = childTxn; 144 | return 0; 145 | } 146 | } 147 | } else { 148 | mdbx_txn_begin(env, nullptr, (MDBX_txn_flags_t) (flags & 0xf0000), &this->txn); 149 | flags |= TXN_ABORTABLE; 150 | } 151 | ew->writeTxn = new TxnTracked(txn, flags); 152 | return 0; 153 | } 154 | // Set the current write transaction 155 | if (0 == (flags & MDBX_RDONLY)) { 156 | ew->currentWriteTxn = this; 157 | } 158 | else { 159 | ew->readTxns.push_back(this); 160 | ew->currentReadTxn = txn; 161 | ew->readTxnRenewed = true; 162 | } 163 | return 0; 164 | } 165 | extern "C" EXTERN void resetTxn(double twPointer, int flags) { 166 | TxnWrap* tw = (TxnWrap*) (size_t) twPointer; 167 | tw->reset(); 168 | } 169 | extern "C" EXTERN int renewTxn(double twPointer, int flags) { 170 | TxnWrap* tw = (TxnWrap*) (size_t) twPointer; 171 | return mdbx_txn_renew(tw->txn); 172 | } 173 | extern "C" EXTERN int commitTxn(double twPointer) { 174 | TxnWrap* tw = (TxnWrap*) (size_t) twPointer; 175 | int rc = mdbx_txn_commit(tw->txn); 176 | tw->removeFromEnvWrap(); 177 | return rc; 178 | } 179 | extern "C" EXTERN void abortTxn(double twPointer) { 180 | TxnWrap* tw = (TxnWrap*) (size_t) twPointer; 181 | mdbx_txn_abort(tw->txn); 182 | tw->removeFromEnvWrap(); 183 | } 184 | 185 | NAN_METHOD(TxnWrap::commit) { 186 | Nan::HandleScope scope; 187 | 188 | TxnWrap *tw = Nan::ObjectWrap::Unwrap(info.This()); 189 | 190 | if (!tw->txn) { 191 | return Nan::ThrowError("The transaction is already closed."); 192 | } 193 | int rc; 194 | WriteWorker* writeWorker = tw->ew->writeWorker; 195 | if (writeWorker) { 196 | // if (writeWorker->txn && env->writeMap) 197 | // rc = 0 198 | // else 199 | rc = mdbx_txn_commit(tw->txn); 200 | 201 | pthread_mutex_unlock(tw->ew->writingLock); 202 | } 203 | else 204 | rc = mdbx_txn_commit(tw->txn); 205 | //fprintf(stdout, "commit done\n"); 206 | tw->removeFromEnvWrap(); 207 | 208 | if (rc != 0) { 209 | return throwLmdbxError(rc); 210 | } 211 | } 212 | 213 | NAN_METHOD(TxnWrap::abort) { 214 | Nan::HandleScope scope; 215 | 216 | TxnWrap *tw = Nan::ObjectWrap::Unwrap(info.This()); 217 | 218 | if (!tw->txn) { 219 | return Nan::ThrowError("The transaction is already closed."); 220 | } 221 | 222 | mdbx_txn_abort(tw->txn); 223 | tw->removeFromEnvWrap(); 224 | } 225 | 226 | NAN_METHOD(TxnWrap::reset) { 227 | Nan::HandleScope scope; 228 | 229 | TxnWrap *tw = Nan::ObjectWrap::Unwrap(info.This()); 230 | 231 | if (!tw->txn) { 232 | return Nan::ThrowError("The transaction is already closed."); 233 | } 234 | tw->reset(); 235 | } 236 | void TxnWrap::reset() { 237 | ew->readTxnRenewed = false; 238 | mdbx_txn_reset(txn); 239 | } 240 | 241 | NAN_METHOD(TxnWrap::renew) { 242 | Nan::HandleScope scope; 243 | 244 | TxnWrap *tw = Nan::ObjectWrap::Unwrap(info.This()); 245 | 246 | if (!tw->txn) { 247 | return Nan::ThrowError("The transaction is already closed."); 248 | } 249 | 250 | int rc = mdbx_txn_renew(tw->txn); 251 | if (rc != 0) { 252 | return throwLmdbxError(rc); 253 | } 254 | } 255 | 256 | // This file contains code from the node-lmdb project 257 | // Copyright (c) 2013-2017 Timur Kristóf 258 | // Copyright (c) 2021 Kristopher Tate 259 | // Licensed to you under the terms of the MIT license 260 | // 261 | // Permission is hereby granted, free of charge, to any person obtaining a copy 262 | // of this software and associated documentation files (the "Software"), to deal 263 | // in the Software without restriction, including without limitation the rights 264 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 265 | // copies of the Software, and to permit persons to whom the Software is 266 | // furnished to do so, subject to the following conditions: 267 | 268 | // The above copyright notice and this permission notice shall be included in 269 | // all copies or substantial portions of the Software. 270 | 271 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 272 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 273 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 274 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 275 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 276 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 277 | // THE SOFTWARE. 278 | 279 | -------------------------------------------------------------------------------- /test/check-commit.js: -------------------------------------------------------------------------------- 1 | import { open, levelup, bufferToKeyValue, keyValueToBuffer, ABORT } from '../node-index.js'; 2 | let db = open('test/testdata/test-db.mdb', { 3 | name: 'mydb', 4 | overlappingSync: true, 5 | pageSize: 16384, 6 | }) 7 | console.log(db.env.stat()) 8 | console.log('last value: ', db.get('test')) 9 | let newValue = Math.random() 10 | console.log('putting new value', newValue) 11 | db.put('test', newValue) 12 | 13 | -------------------------------------------------------------------------------- /test/cluster.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var assert = require('assert'); 4 | var cluster = require('cluster'); 5 | var path = require('path'); 6 | var numCPUs = require('os').cpus().length; 7 | 8 | var lmdb = require('..'); 9 | const MAX_DB_SIZE = 256 * 1024 * 1024; 10 | 11 | if (cluster.isMaster) { 12 | 13 | // The master process 14 | 15 | var env = new lmdb.Env(); 16 | env.open({ 17 | path: path.resolve(__dirname, './testdata'), 18 | maxDbs: 10, 19 | mapSize: MAX_DB_SIZE, 20 | maxReaders: 126 21 | }); 22 | 23 | var dbi = env.openDbi({ 24 | name: 'cluster', 25 | create: true 26 | }); 27 | 28 | var workerCount = Math.min(numCPUs * 2, 20); 29 | var value = Buffer.from('48656c6c6f2c20776f726c6421', 'hex'); 30 | 31 | // This will start as many workers as there are CPUs available. 32 | var workers = []; 33 | for (var i = 0; i < workerCount; i++) { 34 | var worker = cluster.fork(); 35 | workers.push(worker); 36 | } 37 | 38 | var messages = []; 39 | 40 | workers.forEach(function(worker) { 41 | worker.on('message', function(msg) { 42 | messages.push(msg); 43 | // Once every worker has replied with a response for the value 44 | // we can exit the test. 45 | if (messages.length === workerCount) { 46 | dbi.close(); 47 | env.close(); 48 | for (var i = 0; i < messages.length; i ++) { 49 | assert(messages[i] === value.toString('hex')); 50 | } 51 | process.exit(0); 52 | } 53 | }); 54 | }); 55 | 56 | var txn = env.beginTxn(); 57 | for (var i = 0; i < workers.length; i++) { 58 | txn.putBinary(dbi, 'key' + i, value); 59 | } 60 | 61 | txn.commit(); 62 | 63 | for (var i = 0; i < workers.length; i++) { 64 | var worker = workers[i]; 65 | worker.send({key: 'key' + i}); 66 | }; 67 | 68 | } else { 69 | 70 | // The worker process 71 | 72 | var env = new lmdb.Env(); 73 | env.open({ 74 | path: path.resolve(__dirname, './testdata'), 75 | maxDbs: 10, 76 | mapSize: MAX_DB_SIZE, 77 | maxReaders: 126, 78 | readOnly: true 79 | }); 80 | 81 | var dbi = env.openDbi({ 82 | name: 'cluster' 83 | }); 84 | 85 | process.on('message', function(msg) { 86 | if (msg.key) { 87 | var txn = env.beginTxn({readOnly: true}); 88 | var value = txn.getBinary(dbi, msg.key); 89 | 90 | if (value === null) { 91 | process.send(""); 92 | } else { 93 | process.send(value.toString('hex')); 94 | } 95 | 96 | txn.abort(); 97 | } 98 | }); 99 | 100 | } 101 | -------------------------------------------------------------------------------- /test/deno.ts: -------------------------------------------------------------------------------- 1 | import { open, IF_EXISTS, asBinary } from '../mod.ts'; 2 | import chai from "https://cdn.skypack.dev/chai@4.3.4?dts"; 3 | const { assert, should } = chai; 4 | should(); 5 | try { 6 | Deno.removeSync('test/testdata', { recursive: true }); 7 | } catch(error) { 8 | if (error.name != 'NotFound') 9 | throw error 10 | } 11 | let db = open('test/testdata', { 12 | name: 'deno-db1', 13 | useVersions: true, 14 | overlappingSync: true, 15 | maxReaders: 100, 16 | compression: { 17 | threshold: 128, 18 | }, 19 | }); 20 | let db2 = db.openDB({ 21 | name: 'deno-db4', 22 | create: true, 23 | dupSort: true, 24 | }); 25 | let tests: { name: string, test: Function }[] = []; 26 | let test = (name: string, test: Function) => { 27 | tests.push({ name, test }); 28 | }; 29 | test('query of keys', async function() { 30 | let keys = [ 31 | Symbol.for('test'), 32 | false, 33 | true, 34 | -33, 35 | -1.1, 36 | 3.3, 37 | 5, 38 | [5,4], 39 | [5,55], 40 | [5, 'words after number'], 41 | [6, 'abc'], 42 | [ 'Test', null, 1 ], 43 | [ 'Test', Symbol.for('test'), 2 ], 44 | [ 'Test', 'not null', 3 ], 45 | 'hello', 46 | ['hello', 3], 47 | ['hello', 'world'], 48 | [ 'uid', 'I-7l9ySkD-wAOULIjOEnb', 'Rwsu6gqOw8cqdCZG5_YNF' ], 49 | 'z' 50 | ] 51 | for (let key of keys) { 52 | await db.put(key, 3); 53 | } 54 | let returnedKeys = [] 55 | for (let { key, value } of db.getRange({ 56 | start: Symbol.for('A') 57 | })) { 58 | returnedKeys.push(key) 59 | value.should.equal(db.get(key)) 60 | } 61 | keys.should.deep.equal(returnedKeys) 62 | 63 | returnedKeys = [] 64 | for (let { key, value } of db.getRange({ 65 | reverse: true, 66 | })) { 67 | returnedKeys.unshift(key) 68 | value.should.equal(db.get(key)) 69 | } 70 | keys.should.deep.equal(returnedKeys) 71 | }); 72 | test('reverse query range', async function() { 73 | const keys = [ 74 | [ 'Test', 100, 1 ], 75 | [ 'Test', 10010, 2 ], 76 | [ 'Test', 10010, 3 ] 77 | ] 78 | for (let key of keys) 79 | await db.put(key, 3); 80 | for (let { key, value } of db.getRange({ 81 | start: ['Test', null], 82 | end: ['Test', null], 83 | reverse: true 84 | })) { 85 | throw new Error('Should not return any results') 86 | } 87 | }) 88 | test('more reverse query range', async function() { 89 | db.putSync('0Sdts8FwTqt2Hv5j9KE7ebjsQcFbYDdL/0Sdtsud6g8YGhPwUK04fRVKhuTywhnx8', 1, 1, null); 90 | db.putSync('0Sdts8FwTqt2Hv5j9KE7ebjsQcFbYDdL/0Sdu0mnkm8lS38yIZa4Xte3Q3JUoD84V', 1, 1, null); 91 | const options = 92 | { 93 | start: '0Sdts8FwTqt2Hv5j9KE7ebjsQcFbYDdL/0SdvKaMkMNPoydWV6HxZbFtKeQm5sqz3', 94 | end: '0Sdts8FwTqt2Hv5j9KE7ebjsQcFbYDdL/00000000dKZzSn03pte5dWbaYfrZl4hG', 95 | reverse: true 96 | }; 97 | let returnedKeys = Array.from(db.getKeys(options)) 98 | returnedKeys.should.deep.equal(['0Sdts8FwTqt2Hv5j9KE7ebjsQcFbYDdL/0Sdu0mnkm8lS38yIZa4Xte3Q3JUoD84V', '0Sdts8FwTqt2Hv5j9KE7ebjsQcFbYDdL/0Sdtsud6g8YGhPwUK04fRVKhuTywhnx8']) 99 | }); 100 | test('clear between puts', async function() { 101 | db.put('key0', 'zero') 102 | db.clearAsync() 103 | await db.put('key1', 'one') 104 | assert.equal(db.get('key0'), undefined) 105 | assert.equal(db.get('hello'), undefined) 106 | assert.equal(db.get('key1'), 'one') 107 | }); 108 | 109 | test('string', async function() { 110 | await db.put('key1', 'Hello world!'); 111 | let data = db.get('key1'); 112 | data.should.equal('Hello world!'); 113 | await db.remove('key1') 114 | let data2 = db.get('key1'); 115 | assert.equal(data2, undefined); 116 | }); 117 | test('string with version', async function() { 118 | await db.put('key1', 'Hello world!', 53252); 119 | let entry = db.getEntry('key1'); 120 | entry.value.should.equal('Hello world!'); 121 | entry.version.should.equal(53252); 122 | (await db.remove('key1', 33)).should.equal(false); 123 | entry = db.getEntry('key1'); 124 | entry.value.should.equal('Hello world!'); 125 | entry.version.should.equal(53252); 126 | (await db.remove('key1', 53252)).should.equal(true); 127 | entry = db.getEntry('key1'); 128 | assert.equal(entry, undefined); 129 | }); 130 | test('string with version branching', async function() { 131 | await db.put('key1', 'Hello world!', 53252); 132 | let entry = db.getEntry('key1'); 133 | entry.value.should.equal('Hello world!'); 134 | entry.version.should.equal(53252); 135 | (await db.ifVersion('key1', 777, () => { 136 | db.put('newKey', 'test', 6); 137 | db2.put('keyB', 'test', 6); 138 | })).should.equal(false); 139 | assert.equal(db.get('newKey'), undefined); 140 | assert.equal(db2.get('keyB'), undefined); 141 | let result = (await db.ifVersion('key1', 53252, () => { 142 | db.put('newKey', 'test', 6); 143 | db2.put('keyB', 'test', 6); 144 | })) 145 | assert.equal(db.get('newKey'), 'test') 146 | assert.equal(db2.get('keyB'), 'test') 147 | assert.equal(result, true); 148 | result = await db.ifNoExists('key1', () => { 149 | db.put('newKey', 'changed', 7); 150 | }) 151 | assert.equal(db.get('newKey'), 'test'); 152 | assert.equal(result, false); 153 | result = await db.ifNoExists('key-no-exist', () => { 154 | db.put('newKey', 'changed', 7); 155 | }) 156 | assert.equal(db.get('newKey'), 'changed') 157 | assert.equal(result, true); 158 | 159 | result = await db2.ifVersion('key-no-exist', IF_EXISTS, () => { 160 | db.put('newKey', 'changed again', 7); 161 | }) 162 | assert.equal(db.get('newKey'), 'changed') 163 | assert.equal(result, false); 164 | 165 | result = await db2.ifVersion('keyB', IF_EXISTS, () => { 166 | db.put('newKey', 'changed again', 7); 167 | }) 168 | assert.equal(db.get('newKey'), 'changed again') 169 | assert.equal(result, true); 170 | 171 | result = await db2.remove('key-no-exists'); 172 | assert.equal(result, true); 173 | result = await db2.remove('key-no-exists', IF_EXISTS); 174 | assert.equal(result, false); 175 | }); 176 | test('string with compression and versions', async function() { 177 | let str = expand('Hello world!') 178 | await db.put('key1', str, 53252); 179 | let entry = db.getEntry('key1'); 180 | entry.value.should.equal(str); 181 | entry.version.should.equal(53252); 182 | (await db.remove('key1', 33)).should.equal(false); 183 | let data = db.get('key1'); 184 | data.should.equal(str); 185 | (await db.remove('key1', 53252)).should.equal(true); 186 | data = db.get('key1'); 187 | assert.equal(data, undefined); 188 | }); 189 | test('repeated compressions', async function() { 190 | let str = expand('Hello world!') 191 | db.put('key1', str, 53252); 192 | db.put('key1', str, 53253); 193 | db.put('key1', str, 53254); 194 | await db.put('key1', str, 53255); 195 | let entry = db.getEntry('key1'); 196 | entry.value.should.equal(str); 197 | entry.version.should.equal(53255); 198 | (await db.remove('key1')).should.equal(true); 199 | }); 200 | 201 | test('forced compression due to starting with 255', async function() { 202 | await db.put('key1', asBinary(new Uint8Array([255]))); 203 | let entry = db.getBinary('key1'); 204 | entry.length.should.equal(1); 205 | entry[0].should.equal(255); 206 | (await db.remove('key1')).should.equal(true); 207 | }); 208 | test('store objects', async function() { 209 | let dataIn = {foo: 3, bar: true} 210 | await db.put('key1', dataIn); 211 | let dataOut = db.get('key1'); 212 | assert.equal(JSON.stringify(dataIn),JSON.stringify(dataOut)); 213 | db.removeSync('not-there').should.equal(false); 214 | }); 215 | 216 | function expand(str: string): string { 217 | str = '(' + str + ')'; 218 | str = str + str; 219 | str = str + str; 220 | str = str + str; 221 | str = str + str; 222 | str = str + str; 223 | return str; 224 | } 225 | 226 | 227 | let hasErrors 228 | for (let { name, test } of tests) { 229 | try { 230 | await test(); 231 | console.log('Passed:', name); 232 | } catch (error) { 233 | hasErrors = true; 234 | console.error('Failed:', name, error); 235 | } 236 | } 237 | if (hasErrors) 238 | throw new Error('Unit tests failed'); -------------------------------------------------------------------------------- /test/module.test.mjs: -------------------------------------------------------------------------------- 1 | import { open, getLastVersion } from '../index.mjs' 2 | 3 | describe('Module loads ', function() { 4 | it('has open', function() { 5 | 'function'.should.equal(typeof open) 6 | }) 7 | }) 8 | -------------------------------------------------------------------------------- /test/threads.cjs: -------------------------------------------------------------------------------- 1 | var assert = require('assert'); 2 | const { Worker, isMainThread, parentPort } = require('worker_threads'); 3 | var path = require('path'); 4 | var numCPUs = require('os').cpus().length; 5 | 6 | const { open } = require('../dist/index.cjs'); 7 | const MAX_DB_SIZE = 256 * 1024 * 1024; 8 | if (isMainThread) { 9 | var inspector = require('inspector') 10 | // inspector.open(9331, null, true);debugger 11 | 12 | // The main thread 13 | 14 | let db = open({ 15 | path: path.resolve(__dirname, './testdata'), 16 | maxDbs: 10, 17 | mapSize: MAX_DB_SIZE, 18 | maxReaders: 126, 19 | encoding: 'binary', 20 | }); 21 | 22 | var workerCount = Math.min(numCPUs * 2, 20); 23 | var value = Buffer.from('48656c6c6f2c20776f726c6421', 'hex'); 24 | 25 | // This will start as many workers as there are CPUs available. 26 | var workers = []; 27 | for (var i = 0; i < workerCount; i++) { 28 | var worker = new Worker(__filename); 29 | workers.push(worker); 30 | } 31 | 32 | var messages = []; 33 | 34 | workers.forEach(function(worker) { 35 | worker.on('message', function(msg) { 36 | messages.push(msg); 37 | // Once every worker has replied with a response for the value 38 | // we can exit the test. 39 | if (messages.length === workerCount) { 40 | db.close(); 41 | for (var i = 0; i < messages.length; i ++) { 42 | assert(messages[i] === value.toString('hex')); 43 | } 44 | process.exit(0); 45 | } 46 | }); 47 | }); 48 | 49 | let last 50 | for (var i = 0; i < workers.length; i++) { 51 | last = db.put('key' + i, value); 52 | } 53 | 54 | last.then(() => { 55 | for (var i = 0; i < workers.length; i++) { 56 | var worker = workers[i]; 57 | worker.postMessage({key: 'key' + i}); 58 | }; 59 | }); 60 | 61 | } else { 62 | 63 | // The worker process 64 | let db = open({ 65 | path: path.resolve(__dirname, './testdata'), 66 | maxDbs: 10, 67 | mapSize: MAX_DB_SIZE, 68 | maxReaders: 126 69 | }); 70 | 71 | 72 | process.on('message', async function(msg) { 73 | if (msg.key) { 74 | var value = db.get(msg.key); 75 | if (msg.key == 'key1') 76 | await db.put(msg.key, 'updated'); 77 | if (value === null) { 78 | parentPort.postMessage(""); 79 | } else { 80 | parentPort.postMessage(value.toString('hex')); 81 | } 82 | 83 | } 84 | }); 85 | 86 | } 87 | -------------------------------------------------------------------------------- /test/types/index.test-d.ts: -------------------------------------------------------------------------------- 1 | import { expectType } from 'tsd' 2 | import { open, RootDatabase } from '../..' 3 | 4 | const path = 'type-test-store' 5 | 6 | expectType(open(path, { compression: true })) 7 | expectType(open({ path, compression: true })) 8 | 9 | const defaultStore = open({ path, compression: true }) 10 | expectType(await defaultStore.put('foo', { bar: 'baz' })) 11 | expectType(defaultStore.get('foo')) 12 | 13 | const typedStore = open({ path, compression: true }) 14 | expectType(typedStore.get('foo')) 15 | -------------------------------------------------------------------------------- /update.deps.mdbx.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DIR=$(cd "$(dirname "$0")"; pwd) 4 | set -ex 5 | cd $DIR/.. 6 | 7 | if [ ! -d "libmdbx" ] ; then 8 | git clone git@github.com:erthink/libmdbx.git 9 | cd libmdbx 10 | else 11 | cd libmdbx 12 | git pull 13 | fi 14 | 15 | make dist 16 | 17 | todir=$DIR/dependencies/libmdbx 18 | rm -rf $todir 19 | cp -R dist $todir 20 | -------------------------------------------------------------------------------- /util/RangeIterable.js: -------------------------------------------------------------------------------- 1 | const SKIP = {}; 2 | if (!Symbol.asyncIterator) { 3 | Symbol.asyncIterator = Symbol.for('Symbol.asyncIterator'); 4 | } 5 | 6 | export class RangeIterable { 7 | constructor(sourceArray) { 8 | if (sourceArray) { 9 | this.iterate = sourceArray[Symbol.iterator].bind(sourceArray); 10 | } 11 | } 12 | map(func) { 13 | let source = this; 14 | let result = new RangeIterable(); 15 | result.iterate = (async) => { 16 | let iterator = source[Symbol.iterator](async); 17 | return { 18 | next(resolvedResult) { 19 | let result; 20 | do { 21 | let iteratorResult; 22 | if (resolvedResult) { 23 | iteratorResult = resolvedResult; 24 | resolvedResult = null; // don't go in this branch on next iteration 25 | } else { 26 | iteratorResult = iterator.next(); 27 | if (iteratorResult.then) { 28 | return iteratorResult.then(iteratorResult => this.next(iteratorResult)); 29 | } 30 | } 31 | if (iteratorResult.done === true) { 32 | this.done = true; 33 | return iteratorResult; 34 | } 35 | result = func(iteratorResult.value); 36 | if (result && result.then) { 37 | return result.then(result => 38 | result == SKIP ? 39 | this.next() : 40 | { 41 | value: result 42 | }); 43 | } 44 | } while(result == SKIP) 45 | return { 46 | value: result 47 | }; 48 | }, 49 | return() { 50 | return iterator.return(); 51 | }, 52 | throw() { 53 | return iterator.throw(); 54 | } 55 | }; 56 | }; 57 | return result; 58 | } 59 | [Symbol.asyncIterator]() { 60 | return this.iterator = this.iterate(); 61 | } 62 | [Symbol.iterator]() { 63 | return this.iterator = this.iterate(); 64 | } 65 | filter(func) { 66 | return this.map(element => func(element) ? element : SKIP); 67 | } 68 | 69 | forEach(callback) { 70 | let iterator = this.iterator = this.iterate(); 71 | let result; 72 | while ((result = iterator.next()).done !== true) { 73 | callback(result.value); 74 | } 75 | } 76 | concat(secondIterable) { 77 | let concatIterable = new RangeIterable(); 78 | concatIterable.iterate = (async) => { 79 | let iterator = this.iterator = this.iterate(); 80 | let isFirst = true; 81 | let concatIterator = { 82 | next() { 83 | let result = iterator.next(); 84 | if (isFirst && result.done) { 85 | isFirst = false; 86 | iterator = secondIterable[Symbol.iterator](async); 87 | return iterator.next(); 88 | } 89 | return result; 90 | }, 91 | return() { 92 | return iterator.return(); 93 | }, 94 | throw() { 95 | return iterator.throw(); 96 | } 97 | }; 98 | return concatIterator; 99 | }; 100 | return concatIterable; 101 | } 102 | next() { 103 | if (!this.iterator) 104 | this.iterator = this.iterate(); 105 | return this.iterator.next(); 106 | } 107 | toJSON() { 108 | if (this.asArray && this.asArray.forEach) { 109 | return this.asArray; 110 | } 111 | throw new Error('Can not serialize async iteratables without first calling resolveJSON'); 112 | //return Array.from(this) 113 | } 114 | get asArray() { 115 | if (this._asArray) 116 | return this._asArray; 117 | let promise = new Promise((resolve, reject) => { 118 | let iterator = this.iterate(); 119 | let array = []; 120 | let iterable = this; 121 | function next(result) { 122 | while (result.done !== true) { 123 | if (result.then) { 124 | return result.then(next); 125 | } else { 126 | array.push(result.value); 127 | } 128 | result = iterator.next(); 129 | } 130 | array.iterable = iterable; 131 | resolve(iterable._asArray = array); 132 | } 133 | next(iterator.next()); 134 | }); 135 | promise.iterable = this; 136 | return this._asArray || (this._asArray = promise); 137 | } 138 | resolveData() { 139 | return this.asArray; 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /util/when.js: -------------------------------------------------------------------------------- 1 | export function when(promise, callback, errback) { 2 | if (promise && promise.then) { 3 | return errback ? 4 | promise.then(callback, errback) : 5 | promise.then(callback); 6 | } 7 | return callback(promise); 8 | } 9 | --------------------------------------------------------------------------------