├── .babelrc ├── .eslintrc.json ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ └── ci.yml ├── .gitignore ├── .prettierrc ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── PULL_REQUEST_TEMPLATE.md ├── README.md ├── benchmarks ├── bench_read.ts ├── bench_write.ts └── buffer.ts ├── docs ├── API.md ├── images │ ├── 2.png │ ├── 3.png │ └── leveldb-log2.png └── implementation │ ├── Compaction.md │ ├── Iterator.md │ ├── MemTable.md │ ├── Repair.md │ ├── SSTable.md │ └── SnapShots.md ├── fixtures ├── copydb.ts ├── dbpath.ts ├── random.ts └── runner.ts ├── package-lock.json ├── package.json ├── port ├── deno │ └── index.ts └── node │ ├── cleanup.ts │ └── index.ts ├── scripts ├── Transformer.ts └── transform2deno.ts ├── src ├── BitBuffer.ts ├── BloomFilter.ts ├── Buffer.ts ├── Builder.ts ├── Cache.ts ├── Coding.ts ├── Compaction.ts ├── Comparator.ts ├── Crc32.ts ├── DBHelper.ts ├── DBRepairer.ts ├── Database.ts ├── Env.ts ├── Filename.ts ├── Format.ts ├── Hash.ts ├── IteratorHelper.ts ├── LRUCache.ts ├── Lockfile.ts ├── LogFormat.ts ├── LogReader.ts ├── LogRecord.ts ├── LogWriter.ts ├── MemTable.ts ├── Merger.ts ├── Options.ts ├── SSTable.ts ├── SSTableBlock.ts ├── SSTableBuilder.ts ├── SSTableCache.ts ├── SSTableFilterBlock.ts ├── SSTableFooter.ts ├── Skiplist.ts ├── SkiplistNode.ts ├── Slice.ts ├── Snapshot.ts ├── Status.ts ├── Version.ts ├── VersionBuilder.ts ├── VersionEdit.ts ├── VersionEditRecord.ts ├── VersionFormat.ts ├── VersionSet.ts ├── WriteBatch.ts ├── WriterQueue.ts ├── Yallist.ts └── index.ts ├── tests ├── BitBuffer.test.ts ├── BloomFilter.test.ts ├── IteratorHelper.test.ts ├── batch.test.ts ├── compaction.test.ts ├── db.test.ts ├── dumpmemtable.test.ts ├── filter.test.ts ├── iterator.test.ts ├── lock.test.ts ├── memtable.test.ts ├── repair.test.ts ├── snapshot.test.ts ├── sstable.test.ts └── status.test.ts ├── tsconfig.json └── yarn.lock /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": [ 3 | ["@babel/preset-typescript"], 4 | ["@babel/preset-env", { 5 | "loose": true, 6 | "useBuiltIns": false, 7 | "targets": { 8 | "node": "current" 9 | } 10 | }] 11 | ], 12 | "plugins": [ 13 | "@babel/plugin-syntax-bigint", 14 | "@babel/plugin-proposal-class-properties" 15 | ] 16 | } -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "browser": true, 4 | "es6": true 5 | }, 6 | "extends": [ 7 | "plugin:@typescript-eslint/recommended", 8 | "prettier", 9 | "prettier/@typescript-eslint" 10 | ], 11 | "globals": { 12 | "Atomics": "readonly", 13 | "SharedArrayBuffer": "readonly" 14 | }, 15 | "parser": "@typescript-eslint/parser", 16 | "parserOptions": { 17 | "ecmaVersion": 2018, 18 | "sourceType": "module" 19 | }, 20 | "plugins": [ 21 | "@typescript-eslint" 22 | ], 23 | "rules": { 24 | "@typescript-eslint/ban-ts-ignore": [ 25 | "off" 26 | ] 27 | } 28 | } -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [heineiuo] 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | 5 | --- 6 | 7 | **Describe the bug** 8 | A clear and concise description of what the bug is. 9 | 10 | **To Reproduce** 11 | Steps to reproduce the behavior: 12 | 1. Options '...' 13 | 2. Operators '....' 14 | 4. See error 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Screenshots** 20 | If applicable, add screenshots to help explain your problem. 21 | 22 | **Version** 23 | - Rippledb version [e.g. 2.4] 24 | 25 | **Platform (please complete the following information):** 26 | - OS: [e.g. macOS] 27 | - Version [e.g. 10.15.1] 28 | - Node.js [e.g. 13.3.0] 29 | 30 | **Additional context** 31 | Add any other context about the problem here. 32 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Rippledb CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | if: | 9 | github.event_name == 'push' || 10 | !startsWith(github.event.pull_request.head.label, 'heineiuo:') 11 | 12 | strategy: 13 | matrix: 14 | node-version: [12.x] 15 | 16 | steps: 17 | - uses: actions/checkout@v1 18 | 19 | - name: Use Node.js ${{ matrix.node-version }} 20 | uses: actions/setup-node@v1 21 | with: 22 | node-version: ${{ matrix.node-version }} 23 | 24 | - name: Cache 25 | id: cache-node-modules 26 | uses: actions/cache@v2 27 | with: 28 | path: | 29 | **/node_modules 30 | key: ${{ runner.os }}-${{ hashFiles('**/package-lock.json') }} 31 | 32 | - name: Install Dependencies 33 | if: steps.cache-node-modules.outputs.cache-hit != 'true' 34 | run: | 35 | npm -v 36 | npm install 37 | env: 38 | CI: true 39 | 40 | - name: Build 41 | run: | 42 | npm run build 43 | npm run build:tsc 44 | npm run build:fixture 45 | npm run build:bench 46 | env: 47 | CI: true 48 | 49 | - name: Post Ripple Deno 50 | if: | 51 | github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') 52 | env: 53 | RIPPLEDBBOT_PAT: ${{ secrets.RIPPLEDBBOT_PAT }} 54 | run: | 55 | git clone --depth 1 https://${RIPPLEDBBOT_PAT}@github.com/heineiuo/rippledb-deno.git rippledb-deno 56 | npm run build:scripts 57 | npm run build:deno 58 | cd rippledb-deno 59 | git config user.email "heineiuo@gmail.com" 60 | git config user.name "heineiuo" 61 | git add . 62 | git diff-index --quiet HEAD || git commit --message "Update Rippledb Deno" 63 | git push origin master 64 | cd .. 65 | 66 | - name: Benchmarks 67 | run: | 68 | npm run bench --total=10000 --runner=10 69 | env: 70 | CI: true 71 | 72 | - name: Tests 73 | run: | 74 | npx jest tests --coverage 75 | env: 76 | CI: true 77 | 78 | - name: Coveralls 79 | uses: coverallsapp/github-action@master 80 | with: 81 | github-token: ${{ secrets.github_token }} 82 | 83 | - name: Create Changelogs 84 | if: | 85 | github.event_name == 'push' && contains(github.ref, 'refs/tags/') 86 | id: changelog 87 | uses: heineiuo/create-changelogs@master 88 | 89 | - name: Tag rippledb-deno 90 | if: | 91 | github.event_name == 'push' && contains(github.ref, 'refs/tags/') 92 | run: | 93 | cd rippledb-deno 94 | echo new tag $TAG_NAME 95 | git tag $TAG_NAME 96 | git push 97 | git push --tags 98 | git push --follow-tags 99 | cd .. 100 | env: 101 | TAG_NAME: ${{ steps.changelog.outputs.tag_name }} 102 | 103 | - name: Publish to NPM 104 | if: | 105 | github.event_name == 'push' && contains(github.ref, 'refs/tags/') 106 | run: | 107 | echo '//registry.npmjs.org/:_authToken=${{secrets.NPM_TOKEN}}' > ~/.npmrc 108 | npm publish 109 | 110 | - name: Create Release 111 | if: | 112 | github.event_name == 'push' && contains(github.ref, 'refs/tags/') 113 | id: create_release 114 | uses: actions/create-release@latest 115 | env: 116 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 117 | with: 118 | tag_name: ${{ github.ref }} 119 | release_name: ${{ github.ref }} 120 | body: ${{ steps.changelog.outputs.changelogs }} 121 | draft: false 122 | prerelease: 123 | ${{ steps.changelog.outputs.release_type == 'prerelease' }} 124 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | *.log* 3 | .db 4 | Thumb.db 5 | .DS_Store 6 | dist 7 | build 8 | flow-coverage 9 | coverage 10 | .vscode 11 | report.*.json 12 | benchmarks/*.js 13 | fixtures/*.js 14 | scripts/*.js 15 | flamegraph.html 16 | rippledb-deno -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "proseWrap": "always", 3 | "trailingComma": "all", 4 | "printWidth": 80 5 | } -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## v0.1.2 (2019-10-28) 2 | 3 | * Add TableCache support (#72) 4 | * Add BlockCache support (#73) 5 | * Remove module buffer and assert (#74) 6 | 7 | ## v0.1.1 (2019-10-25) 8 | 9 | * fix: iterator got deleted key (#63) 10 | * fix: db.iterator skipped some data (#68) 11 | 12 | ## v0.1.0 (2019-10-23) 13 | 14 | * fix: db.del not working (#61) 15 | * fix: FileSet added duplicated file (#60) 16 | 17 | ## v0.0.4 (2019-10-22) 18 | 19 | * Add Iterator API -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at heineiuo@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018-present Zejin Zhuang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [Rippledb](https://rippledb.github.io/) · GitHub Actions status Coverage status npm version Join the chat at https://gitter.im/heineiuo/rippledb 2 | 3 | 4 | Rippledb is an embeddable key-value database engine in pure TypeScript, based on LSM-Tree, Inspired by LevelDB. 5 | 6 | * **Pure TypeScript:** Rippledb is totally written in TypeScript, and runs on different 7 | platforms after being compiled to JavaScript. 8 | * **Lightweight:** Rippledb has only 7k+ source code, and smaller than 1MB after compiled.Rippledb use zero third party modules. 9 | * **Embeddable:** Rippledb can be embedded in node.js application (or other JavaScript Runtime Environments) very easily. 10 | 11 | 12 | ## Installation 13 | 14 | Install with npm: 15 | 16 | ``` 17 | npm install rippledb 18 | ``` 19 | 20 | Install with Yarn: 21 | 22 | ``` 23 | yarn add rippledb 24 | ``` 25 | 26 | 27 | ## Documentation 28 | 29 | You can find the React documentation on the [website](https://rippledb.github.io). 30 | 31 | Check out the [Get Started](https://rippledb.github.io/docs/) page for a quick overview. 32 | 33 | 34 | ## Examples 35 | 36 | ```ts 37 | import path from 'path' 38 | import { Database } from 'rippledb' 39 | 40 | async function main(){ 41 | const db = new Database(path.resolve(__dirname, './db')) 42 | await db.put('foo', 'bar') 43 | console.log( 44 | new TextDecoder().decode(await db.get('foo')) 45 | ) // 'bar' 46 | } 47 | 48 | main() 49 | ``` 50 | 51 | 52 | ## Roadmap 53 | 54 | - [x] Release 1.0 (2020-7-7) 55 | - [ ] Support [Deno](https://deno.land) (2020-9-1) 56 | 57 | ## Benchmark 58 | 59 | ```log 60 | environment : GitHub Action 61 | key : 16 bytes 62 | value : 100 bytes 63 | total : 10000 64 | runners : 10 65 | fillrandom : 823.87 ms total; 82.39 us/op 66 | ``` 67 | 68 | ## Compatibility 69 | 70 | |node.js|Deno| 71 | |-|-| 72 | |`>=v10.0.0`|WIP| 73 | 74 | ## License 75 | 76 | [MIT License](./LICENSE) 77 | -------------------------------------------------------------------------------- /benchmarks/bench_read.ts: -------------------------------------------------------------------------------- 1 | import { Database } from "../build/port/node"; 2 | import { random } from "../fixtures/random"; 3 | import { createDir } from "../fixtures/dbpath"; 4 | import fs from "fs"; 5 | import path from "path"; 6 | import { argv } from "yargs"; 7 | 8 | function now(): number { 9 | return Number(process.hrtime.bigint()) / Math.pow(10, 6); 10 | } 11 | 12 | async function bench(total: number): Promise { 13 | const dataset = []; 14 | for (let i = 0; i < total; i++) { 15 | dataset.push([random(16), random(100)]); 16 | } 17 | 18 | const dbpath = createDir("bench"); 19 | const db = new Database(dbpath); 20 | 21 | const startTime = now(); 22 | 23 | let count = 0; 24 | for await (const entry of db.iterator()) { 25 | if (entry) count++; 26 | } 27 | 28 | if (total !== count) 29 | throw new Error(`Data lost: except ${total} receivec ${count}`); 30 | 31 | const endTime = now(); 32 | const totalTime = endTime - startTime; 33 | 34 | const file = await fs.promises.open( 35 | path.resolve(__dirname, "../bench.log"), 36 | "a+", 37 | ); 38 | const log = ` 39 | time : ${new Date().toISOString()} 40 | key : 16 bytes 41 | value : 100 bytes 42 | total : ${total} 43 | speed : ${totalTime.toFixed(2)} ms total; ${( 44 | (totalTime / total) * 45 | 1000 46 | ).toFixed(2)} us/op 47 | `; 48 | console.log(log); 49 | await file.appendFile(log); 50 | await db.close(); 51 | } 52 | 53 | bench(parseInt(argv.total as string)); 54 | -------------------------------------------------------------------------------- /benchmarks/bench_write.ts: -------------------------------------------------------------------------------- 1 | import { Database } from "../build/port/node"; 2 | import { Buffer } from "../build/src/Buffer"; 3 | import { random } from "../fixtures/random"; 4 | import { createDir, cleanup } from "../fixtures/dbpath"; 5 | import fs from "fs"; 6 | import path from "path"; 7 | import { argv } from "yargs"; 8 | import { allocRunner } from "../fixtures/runner"; 9 | 10 | function now(): number { 11 | return Number(process.hrtime.bigint()) / Math.pow(10, 6); 12 | } 13 | 14 | async function bench(total: number, runnerCount: number): Promise { 15 | try { 16 | const dataset = []; 17 | for (let i = 0; i < total; i++) { 18 | const strEntry = random(16, 100); 19 | dataset.push([ 20 | Buffer.fromUnknown(strEntry[0]), 21 | Buffer.fromUnknown(strEntry[1]), 22 | ]); 23 | } 24 | 25 | const dbpath = createDir("bench"); 26 | cleanup(dbpath); 27 | 28 | const db = new Database(dbpath, { lockfileStale: 10 }); 29 | await db.ok(); 30 | 31 | console.log("db: ok"); 32 | 33 | const startTime = now(); 34 | 35 | await allocRunner(runnerCount, db, dataset); 36 | 37 | const endTime = now(); 38 | const totalTime = endTime - startTime; 39 | 40 | const file = await fs.promises.open( 41 | path.resolve(__dirname, "../bench.log"), 42 | "a+", 43 | ); 44 | const log = ` 45 | time : ${new Date().toISOString()} 46 | key : 16 bytes 47 | value : 100 bytes 48 | total : ${total} 49 | runners : ${runnerCount} 50 | speed : ${totalTime.toFixed(2)} ms total; ${( 51 | (totalTime / total) * 52 | 1000 53 | ).toFixed(2)} us/op 54 | `; 55 | console.log(log); 56 | await file.appendFile(log); 57 | await db.close(); 58 | } catch (e) { 59 | console.error(e); 60 | process.exit(1); 61 | } 62 | } 63 | 64 | console.log(argv); 65 | bench(parseInt(argv.total as string), parseInt(argv.runners as string)); 66 | -------------------------------------------------------------------------------- /benchmarks/buffer.ts: -------------------------------------------------------------------------------- 1 | import { Buffer } from "../build/src/Buffer"; 2 | 3 | function bufferFromArrayBuffer(number: number, times: number): Buffer { 4 | let i = 0; 5 | let buf; 6 | while (i < times) { 7 | buf = Buffer.fromUnknown(new ArrayBuffer(number)); 8 | i++; 9 | } 10 | return buf; 11 | } 12 | 13 | function bufferAlloc(number: number, times: number): Buffer { 14 | let i = 0; 15 | let buf; 16 | while (i < times) { 17 | buf = Buffer.alloc(number); 18 | i++; 19 | } 20 | return buf; 21 | } 22 | 23 | function bench(number, times): void { 24 | console.time(`bufferFromArrayBuffer number=${number} times=${times}`); 25 | bufferFromArrayBuffer(number, times); 26 | console.timeEnd(`bufferFromArrayBuffer number=${number} times=${times}`); 27 | console.time(`bufferFromAllocBuffer number=${number} times=${times}`); 28 | bufferAlloc(number, times); 29 | console.timeEnd(`bufferFromAllocBuffer number=${number} times=${times}`); 30 | console.log(""); 31 | } 32 | 33 | function main(): void { 34 | bufferFromArrayBuffer(100, 100); 35 | bufferAlloc(100, 100); 36 | 37 | bench(100, 10000); 38 | bench(200, 10000); 39 | bench(300, 10000); 40 | bench(400, 10000); 41 | bench(500, 10000); 42 | bench(1000, 10000); 43 | bench(2000, 10000); 44 | bench(3000, 10000); 45 | } 46 | 47 | main(); 48 | -------------------------------------------------------------------------------- /docs/API.md: -------------------------------------------------------------------------------- 1 | # Docs 2 | 3 | 4 | ## Top level API 5 | 6 | ### Database 7 | 8 | ```ts 9 | constructure (dbpath: string, options:Options = new Options()): Database 10 | ``` 11 | 12 | Create a new database or recover from an exist database. 13 | 14 | see [Options](#Options) 15 | 16 | ### db.ok 17 | 18 | ```ts 19 | ok():Promise 20 | ``` 21 | 22 | Database maybe in recovering state after creation. When db recovering, `get` or `put` method is not aviabile. 23 | `ok` can help you know when db is ready to `get` or `put` data. 24 | 25 | 26 | ### db.get 27 | 28 | ```ts 29 | async get(key:string | Buffer, options:ReadOptions = new ReadOptions()):Promise 30 | ``` 31 | 32 | Get record from db. 33 | 34 | see [ReadOptions](#ReadOptions) 35 | 36 | ### db.put 37 | 38 | ```ts 39 | async put(key:string | Buffer, value:string | Buffer, options: WriteOptions = new WriteOptions()):Promise 40 | ``` 41 | 42 | see [WriteOptions](#WriteOptions) 43 | 44 | ### db.del 45 | 46 | ```ts 47 | del(key:string | Buffer, WriteOptions = new WriteOptions()):Promise 48 | ``` 49 | 50 | see [WriteOptions](#WriteOptions) 51 | 52 | 53 | ### db.batch 54 | 55 | ```ts 56 | batch(batch:WriteBatch, WriteOptions = new WriteOptions()):Promise 57 | ``` 58 | 59 | see [WriteOptions](#WriteOptions) 60 | 61 | 62 | ### db.iterator() 63 | 64 | ```ts 65 | async iterator():AsyncIterableIterator<{key: Buffer | string, value: Buffer | string}> 66 | ``` 67 | 68 | ### WriteBatch 69 | 70 | 71 | ### Options 72 | 73 | Name|Type|Default|Description 74 | -|-|-|- 75 | debug|`boolean`|`false`|- 76 | 77 | 78 | ### ReadOptions 79 | ### WriteOptions 80 | -------------------------------------------------------------------------------- /docs/images/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heineiuo/rippledb/5a34bd4eb98f572b57ddcb82d8897e2b2f30f3ae/docs/images/2.png -------------------------------------------------------------------------------- /docs/images/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heineiuo/rippledb/5a34bd4eb98f572b57ddcb82d8897e2b2f30f3ae/docs/images/3.png -------------------------------------------------------------------------------- /docs/images/leveldb-log2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heineiuo/rippledb/5a34bd4eb98f572b57ddcb82d8897e2b2f30f3ae/docs/images/leveldb-log2.png -------------------------------------------------------------------------------- /docs/implementation/Compaction.md: -------------------------------------------------------------------------------- 1 | # Compaction 2 | 3 | Trigger major compaction condition 4 | 5 | 1. manually compact 6 | 2. filter seek miss > allowed_seeks(25) 7 | 3. level0 table > 8 8 | 4. level_i(i>0) table bytes > 10^i MB 9 | -------------------------------------------------------------------------------- /docs/implementation/Iterator.md: -------------------------------------------------------------------------------- 1 | ```js 2 | 3 | function Range(low, high){ 4 | this.low = low; 5 | this.high = high; 6 | } 7 | Range.prototype[Symbol.iterator] = function(){ 8 | return new RangeIterator(this); 9 | }; 10 | 11 | function RangeIterator(range){ 12 | this.range = range; 13 | this.current = this.range.low; 14 | } 15 | RangeIterator.prototype.next = function(){ 16 | let result = {done: this.current > this.range.high, value: this.current}; 17 | this.current++; 18 | return result; 19 | }; 20 | 21 | var range = new Range(3, 5); 22 | for (var i of range) { 23 | console.log(i); 24 | } 25 | 26 | ``` 27 | 28 | 29 | ReadableStream should be an async iterable #778 30 | 31 | https://github.com/tc39/proposal-async-iteration/issues/74 32 | https://github.com/whatwg/streams/issues/778 33 | 34 | 35 | ```js 36 | try { 37 | for await (const chunk of rs) { 38 | await somethingThatMightReject(chunk); 39 | } 40 | } finally { 41 | try { 42 | // Might throw if the reader is still locked because we finished 43 | // successfully without breaking or throwing. 44 | await rs.cancel(); 45 | } catch {} 46 | } 47 | ``` 48 | 49 | -------------------------------------------------------------------------------- /docs/implementation/MemTable.md: -------------------------------------------------------------------------------- 1 | # memtable 2 | 3 | ## skiplist 4 | 5 | 1. 为什么要用skiplist? 6 | 7 | skiplist相比数组,空间上有优势,相比链表,在速度上有优势。相比速度和空间上优势均等的 8 | 红黑树,在实现难度上有优势。 9 | 10 | 2. 如何实现skiplist? 11 | 12 | http://dsqiu.iteye.com/blog/1705530 13 | 14 | 设置一个最大层数 15 | 16 | 根据最大层数创建head,head里面保存每一层的第一个node,每个node都指向右指向NIL,向下指向上一级节点 17 | 18 | 插入节点的时候,先获取一个随机层数,再和head里该层的第一个节点比较,如果: 19 | 20 | 相等,则替换值 21 | 否则比较同一层右边的值,最终插入到某个位置,同时向下面的层级插入 22 | 23 | 24 | 获取节点的时候,从head里最大level的node开始查找,如果: 25 | 26 | 相等则返回 27 | 否则跟右边的值比较,如果比右边的大继续向右边查找,否则向下查找,直到找到节点或未找到节点。 28 | 29 | 30 | 31 | ## memtable 32 | 33 | 34 | memtable用到了`arena`做内存管理,包含了`ApproximateMemoryUsage`,实际上nodejs里不需要,内存管理交给V8. 35 | 36 | 将SequenceNumber和ValueType 以及消息编码成一个字符串,存放在buf数组中,然后调用table.Insert(buf)插入数据。 37 | 38 | ![memtable buf](./images/3.png)[[1]](#ref1) 39 | 40 | 41 | ### key 42 | 43 | >所以总结下如下: 44 | 45 | >最短的为`internalkey`,就是`userkey`+`sequence`+`type`组成 46 | >接下来是`lookupkey`,由`internalkey`的长度+`internalkey`组成 47 | >`skiplist`中存储的键为`lookupkey`+`value`的长度+value。 48 | —— leveldb源码分析之memtable 49 | [[2]](#ref2) 50 | 51 | 52 | * ` [1] LevelDB源码剖析之MemTable 53 | `http://mingxinglai.com/cn/2013/01/leveldb-memtable/ 54 | * `` [2] leveldb源码分析之memtable https://luodw.cc/2015/10/17/leveldb-06/ -------------------------------------------------------------------------------- /docs/implementation/Repair.md: -------------------------------------------------------------------------------- 1 | # Repair 2 | 3 | ## API 4 | 5 | ```ts 6 | 7 | class DBRepairer { 8 | _dbname:string 9 | _env:Env 10 | _icmp:InternalCompareter 11 | _ipolicy: InternalPolicy 12 | _options: Options 13 | _ownsInfoLog: InfoLog 14 | _ownsCache: Cache 15 | _nextFileNumber: FileNumber 16 | _tableCache: TableCache 17 | 18 | async run(): Promise { 19 | await this.findFiles() 20 | await this.convertLogFilesToTables() 21 | await this.extractMetaData() 22 | await this.writeDescriptor() 23 | await this.log() 24 | } 25 | } 26 | 27 | const repairer = new DBRepairer() 28 | await repairer.ok() 29 | 30 | ``` 31 | 32 | ## Mechanism 33 | 34 | If `Manifest` file or `SSTable` file was broken, `Database` cannot recovery success. So 35 | we need use `DBRepairer` to repair db files. 36 | 37 | First, `DBRepairer` will find all files in `dbpath`. 38 | If `.log` file is found, convert it to SSTable files (Do not store in memory). 39 | 40 | Then, read all SSTable files and extract their metadata to 41 | a new `Manifest` file. 42 | 43 | Drop the damaged records when iterator `SSTable` files. 44 | 45 | Rename Manifest to `MANIFEST-000001` and write to `CURRENT` file 46 | 47 | ## Questions 48 | -------------------------------------------------------------------------------- /docs/implementation/SSTable.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heineiuo/rippledb/5a34bd4eb98f572b57ddcb82d8897e2b2f30f3ae/docs/implementation/SSTable.md -------------------------------------------------------------------------------- /docs/implementation/SnapShots.md: -------------------------------------------------------------------------------- 1 | # SnapShots 2 | 3 | ## API 4 | 5 | ```ts 6 | 7 | type SnapShot = { 8 | _prev: Snapshot 9 | _next: Snapshot 10 | sequenceNumber: SequenceNumber | bigint 11 | } 12 | 13 | type SnapshotList = { 14 | isEmpty(): boolean 15 | insert(): Snapshot 16 | newest(): Snapshot 17 | oldest(): Snapshot 18 | delete(snapshot: Snapshot): void 19 | } 20 | 21 | // methods in class Database: 22 | public getSnapShot(): SnapShot 23 | public releaseSnapShot(snapshot: Snapshot): void 24 | 25 | // properties in class Database: 26 | private _snaphostList: SnapshotList 27 | 28 | ``` 29 | 30 | ## Mechanism 31 | 32 | Every record has a `sequence`(`type bigint`) slice, higher `squence` 33 | means newer record. Call `getSnapshot` method will 34 | get current `sequence`(named 'a'), use this `sequence` cal always `get` 35 | or `iterator` records older then `sequence` 'a'. 36 | 37 | If a snapshot is created, compaction will keep the records that has `sequence` 38 | bigger then or equal with this snapshot's `sequence`. So entire records will 39 | kept in query lifecycle. 40 | 41 | In a single-statement transaction, lifecycle look like this steps: 42 | 43 | 1. Get a snapshot and then `get` or `iterate` records with this snapshot 44 | 2. Create a `WriteBatch` and commit changes to it 45 | 3. Batch this `WriteBatch` if transaction success 46 | 4. Drop this `WriteBatch` (Just do nothing) as a transaction rollback 47 | 5. Release the snapshot 48 | 49 | ## Questions 50 | 51 | ### 1. Will two transactions make conflict changes? 52 | 53 | No. Newer operation will always overwrite older operation. -------------------------------------------------------------------------------- /fixtures/copydb.ts: -------------------------------------------------------------------------------- 1 | import path from 'path' 2 | import fs from 'fs' 3 | 4 | export async function copydb(dbpath1: string, dbpath2: string): Promise { 5 | const files = await fs.promises.readdir(dbpath1, { withFileTypes: true }) 6 | const filenames = files.reduce((filenames: string[], direct: fs.Dirent) => { 7 | if (direct.isFile()) { 8 | if (direct.name !== 'LOCK') filenames.push(direct.name) 9 | } 10 | return filenames 11 | }, []) 12 | for await (const filename of filenames) { 13 | await fs.promises.copyFile( 14 | path.resolve(dbpath1, filename), 15 | path.resolve(dbpath2, filename) 16 | ) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /fixtures/dbpath.ts: -------------------------------------------------------------------------------- 1 | import path from 'path' 2 | import rimraf from 'rimraf' 3 | import fs from 'fs' 4 | 5 | export function createDir(name?: string): string { 6 | const name1 = 7 | name || 8 | `${Date.now()}_${Math.random() 9 | .toString() 10 | .substr(2)}` 11 | const dir = path.resolve(__dirname, `../.db/${name1}`) 12 | 13 | fs.mkdirSync(dir, { recursive: true }) 14 | 15 | return dir 16 | } 17 | 18 | export function cleanup(dbpath: string): void { 19 | rimraf.sync(dbpath) 20 | } 21 | -------------------------------------------------------------------------------- /fixtures/random.ts: -------------------------------------------------------------------------------- 1 | import crypto from 'crypto' 2 | 3 | export function random(keySize = 16, valueSize = 64): [string, string] { 4 | const key = crypto 5 | .randomBytes(keySize) 6 | .toString('hex') 7 | .substr(0, keySize) 8 | const value = crypto 9 | .randomBytes(valueSize) 10 | .toString('hex') 11 | .substr(0, valueSize) 12 | return [key, value] 13 | } 14 | -------------------------------------------------------------------------------- /fixtures/runner.ts: -------------------------------------------------------------------------------- 1 | import { Buffer } from "../src/Buffer"; 2 | 3 | interface Database { 4 | put: (key: string | Buffer, value: string | Buffer) => Promise; 5 | } 6 | 7 | type Entry = [string | Buffer, string | Buffer]; 8 | 9 | export async function runner( 10 | db: Database, 11 | dataset: Entry[], 12 | skip: number, 13 | start: number, 14 | ): Promise { 15 | let current = start; 16 | const total = dataset.length; 17 | while (true) { 18 | if (current >= total) return; 19 | const entry = dataset[current]; 20 | await db.put(entry[0], entry[1]); 21 | current += skip; 22 | } 23 | } 24 | 25 | export async function allocRunner( 26 | runnerCount: number, 27 | db: Database, 28 | dataset: Entry[], 29 | ): Promise { 30 | await Promise.all( 31 | Array.from({ length: runnerCount }, (v, start) => { 32 | return runner(db, dataset, runnerCount, start); 33 | }), 34 | ); 35 | } 36 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "rippledb", 3 | "version": "1.0.0", 4 | "main": "build/port/node", 5 | "repository": "https://github.com/heineiuo/rippledb", 6 | "author": "heineiuo ", 7 | "license": "MIT", 8 | "scripts": { 9 | "test": "jest", 10 | "build:fixture": "babel fixtures -d fixtures --extensions \".ts\"", 11 | "build:bench": "babel benchmarks -d benchmarks --extensions \".ts\"", 12 | "build:scripts": "babel scripts -d scripts --extensions \".ts\"", 13 | "bench": "node --trace-warnings --prof benchmarks/bench_write.js --total=10000 --runners=10", 14 | "bench:read": "node --trace-warnings --prof benchmarks/bench_read.js --total=10000 --runners=10", 15 | "bench:log-clean": "rm -rf ./isolate-*.log", 16 | "bench:graph": "node --prof-process --preprocess -j isolate*.log | flamebearer", 17 | "build:src": "babel src -d build/src --ignore 'src/tests' --source-maps --extensions \".ts\"", 18 | "build:port-node": "babel port/node -d build/port/node --ignore 'src/tests' --source-maps --extensions \".ts\"", 19 | "build": "npm run build:src && npm run build:port-node", 20 | "build:tsc": "tsc -d", 21 | "build:deno": "node scripts/transform2deno.js", 22 | "prepublish": "export NODE_ENV=production && npm run build" 23 | }, 24 | "files": [ 25 | "build", 26 | "LICENSE" 27 | ], 28 | "devDependencies": { 29 | "@babel/cli": "^7.5.5", 30 | "@babel/core": "^7.5.5", 31 | "@babel/node": "^7.6.1", 32 | "@babel/plugin-proposal-class-properties": "^7.5.5", 33 | "@babel/plugin-syntax-bigint": "^7.4.4", 34 | "@babel/preset-env": "^7.5.5", 35 | "@babel/preset-typescript": "^7.6.0", 36 | "@types/assert": "^1.4.3", 37 | "@types/buffer-crc32": "^0.2.0", 38 | "@types/jest": "^24.0.22", 39 | "@types/lru-cache": "^5.1.0", 40 | "@types/node": "^12.11.7", 41 | "@types/rimraf": "^2.0.3", 42 | "@types/signal-exit": "^3.0.0", 43 | "@types/varint": "^5.0.0", 44 | "@typescript-eslint/eslint-plugin": "^2.4.0", 45 | "@typescript-eslint/parser": "^2.4.0", 46 | "eslint": "^6.5.1", 47 | "eslint-config-prettier": "^6.4.0", 48 | "eslint-plugin-prettier": "^3.1.1", 49 | "express": "^4.17.1", 50 | "flamebearer": "^1.1.3", 51 | "glob": "^7.1.6", 52 | "jest": "^26.0.1", 53 | "prettier": "^2.0.5", 54 | "rimraf": "^3.0.0", 55 | "typescript": "^3.9.3", 56 | "yargs": "^15.0.2" 57 | }, 58 | "jest": { 59 | "moduleFileExtensions": [ 60 | "ts", 61 | "js" 62 | ], 63 | "coverageDirectory": "./coverage", 64 | "coverageReporters": [ 65 | "lcov" 66 | ] 67 | } 68 | } -------------------------------------------------------------------------------- /port/deno/index.ts: -------------------------------------------------------------------------------- 1 | import { InternalDatabase } from "https://cdn.jsdelivr.net/gh/heineiuo/rippledb-deno@ff38fb5/index.ts"; 2 | export * from "https://cdn.jsdelivr.net/gh/heineiuo/rippledb-deno@ff38fb5/index.ts"; 3 | 4 | export class Database extends InternalDatabase {} 5 | -------------------------------------------------------------------------------- /port/node/cleanup.ts: -------------------------------------------------------------------------------- 1 | // Rewite to TypeScript https://github.com/jtlapp/node-cleanup/ 2 | 3 | const DEFAULT_MESSAGES = { 4 | ctrlC: "[ctrl-C]", 5 | uncaughtException: "Uncaught exception...", 6 | }; 7 | 8 | interface SignalMessages { 9 | ctrlC: string; 10 | uncaughtException: string; 11 | } 12 | 13 | interface CleanupHandler { 14 | (code: number | null, signal?: string): boolean; 15 | } 16 | 17 | interface SignalHandle { 18 | (signal: NodeJS.Signals): void; 19 | } 20 | 21 | let inited = false; 22 | let cleanupHandlers: CleanupHandler[] = []; // array of cleanup handlers to call 23 | let messages: SignalMessages | null = null; // messages to write to stderr 24 | 25 | let sigintHandler: SignalHandle; // POSIX signal handlers 26 | let sighupHandler: SignalHandle; 27 | let sigquitHandler: SignalHandle; 28 | let sigtermHandler: SignalHandle; 29 | 30 | function exceptionHandler(e: Error): void { 31 | if (messages && messages.uncaughtException !== "") { 32 | process.stderr.write(messages.uncaughtException + "\n"); 33 | } 34 | process.stderr.write(e.stack + "\n"); 35 | process.exit(1); // will call exitHandler() for cleanup 36 | } 37 | 38 | function exitHandler(exitCode: number): void { 39 | cleanupHandlers.forEach(function (cleanup) { 40 | cleanup(exitCode); 41 | }); 42 | } 43 | 44 | function uninstall(): void { 45 | if (cleanupHandlers.length > 0) { 46 | process.removeListener("SIGINT", sigintHandler); 47 | process.removeListener("SIGHUP", sighupHandler); 48 | process.removeListener("SIGQUIT", sigquitHandler); 49 | process.removeListener("SIGTERM", sigtermHandler); 50 | process.removeListener("uncaughtException", exceptionHandler); 51 | process.removeListener("exit", exitHandler); 52 | cleanupHandlers = []; 53 | } 54 | } 55 | 56 | function createSignalHandler(signal: string): SignalHandle { 57 | return function (): void { 58 | let exit = true; 59 | cleanupHandlers.forEach(function (cleanup) { 60 | exit = !!cleanup(null, signal); 61 | }); 62 | if (exit) { 63 | if (signal === "SIGINT" && messages && messages.ctrlC !== "") { 64 | process.stderr.write(messages.ctrlC + "\n"); 65 | } 66 | 67 | uninstall(); // don't cleanup again 68 | // necessary to communicate the signal to the parent process 69 | process.kill(process.pid, signal); 70 | } 71 | }; 72 | } 73 | 74 | function install( 75 | cleanupHandler: CleanupHandler, 76 | stderrMessages: SignalMessages = DEFAULT_MESSAGES, 77 | ): void { 78 | if (messages === null) messages = { ctrlC: "", uncaughtException: "" }; 79 | if (typeof stderrMessages.ctrlC === "string") 80 | messages.ctrlC = stderrMessages.ctrlC; 81 | if (typeof stderrMessages.uncaughtException === "string") 82 | messages.uncaughtException = stderrMessages.uncaughtException; 83 | 84 | if (!inited) { 85 | sigintHandler = createSignalHandler("SIGINT"); 86 | sighupHandler = createSignalHandler("SIGHUP"); 87 | sigquitHandler = createSignalHandler("SIGQUIT"); 88 | sigtermHandler = createSignalHandler("SIGTERM"); 89 | process.on("SIGINT", sigintHandler); 90 | process.on("SIGHUP", sighupHandler); 91 | process.on("SIGQUIT", sigquitHandler); 92 | process.on("SIGTERM", sigtermHandler); 93 | process.on("uncaughtException", exceptionHandler); 94 | process.on("exit", exitHandler); 95 | inited = true; 96 | } 97 | cleanupHandlers.push(cleanupHandler); 98 | } 99 | 100 | export function onExit(callback: () => void): void { 101 | install(() => { 102 | callback(); 103 | return true; 104 | }); 105 | } 106 | -------------------------------------------------------------------------------- /port/node/index.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import os from "os"; 3 | import { TextEncoder } from "util"; 4 | import InternalDatabase from "../../src/Database"; 5 | import { Env, FileHandle } from "../../src/Env"; 6 | import { DatabaseOptions } from "../../src/Options"; 7 | import { InternalDBRepairer } from "../../src/DBRepairer"; 8 | import { WriteBatch } from "../../src/WriteBatch"; 9 | import { onExit } from "./cleanup"; 10 | 11 | //@ts-ignore 12 | if (!global.TextEncoder) { 13 | //@ts-ignore 14 | global.TextEncoder = TextEncoder; 15 | } 16 | 17 | class NodeEnv implements Env { 18 | platform(): string { 19 | return os.platform(); 20 | } 21 | /** 22 | * get current time 23 | */ 24 | now(): number { 25 | return Number(process.hrtime.bigint()) / Math.pow(10, 9); 26 | } 27 | 28 | onExit = onExit; 29 | 30 | access(dbpath: string): Promise { 31 | return fs.promises.access(dbpath, fs.constants.W_OK); 32 | } 33 | 34 | mkdir(dbpath: string): Promise { 35 | return fs.promises.mkdir(dbpath, { recursive: true }); 36 | } 37 | 38 | writeFile = fs.promises.writeFile; 39 | readFile = fs.promises.readFile; 40 | open = fs.promises.open; 41 | rename = fs.promises.rename; 42 | unlink = fs.promises.unlink; 43 | unlinkSync = fs.unlinkSync; 44 | fstat = fs.promises.fstat; 45 | 46 | // eslint-disable-next-line 47 | readdir(dbpath: string) { 48 | return fs.promises.readdir(dbpath, { withFileTypes: true }); 49 | } 50 | 51 | async infoLog(handle: FileHandle, message: string): Promise { 52 | const finalMessage = `${new Date().toISOString()} ${message}\n`; 53 | await handle.appendFile(finalMessage); 54 | } 55 | 56 | async getFileSize(filename: string): Promise { 57 | const stat = await fs.promises.stat(filename); 58 | return stat.size; 59 | } 60 | } 61 | 62 | class Database extends InternalDatabase { 63 | constructor(dbpath: string, options: DatabaseOptions = {}) { 64 | if (!options.env) options.env = new NodeEnv(); 65 | super(dbpath, options); 66 | } 67 | } 68 | 69 | class DBRepairer extends InternalDBRepairer { 70 | constructor(dbpath: string, options: DatabaseOptions = {}) { 71 | if (!options.env) options.env = new NodeEnv(); 72 | super(dbpath, options); 73 | } 74 | } 75 | 76 | export { 77 | WriteBatch, 78 | Env, 79 | NodeEnv, 80 | Database, 81 | DBRepairer, 82 | InternalDBRepairer, 83 | InternalDatabase, 84 | }; 85 | -------------------------------------------------------------------------------- /scripts/Transformer.ts: -------------------------------------------------------------------------------- 1 | import * as ts from "typescript"; 2 | import glob from "glob"; 3 | import path from "path"; 4 | import fs from "fs"; 5 | 6 | const addExtTransformer = ( 7 | ctx: ts.TransformationContext, 8 | ) => (rootNode: T): T => { 9 | function nodeVisitor(): ts.Visitor { 10 | const visitor: ts.Visitor = (node: ts.Node) => { 11 | if (ts.isImportDeclaration(node) && node.moduleSpecifier) { 12 | const node2 = ts.getMutableClone(node); 13 | let text = node2.moduleSpecifier.getText().trim(); 14 | text = text.substr(1, text.length - 2); 15 | if (text.endsWith(".ts")) return node2; 16 | node2.moduleSpecifier = ts.createStringLiteral(text + ".ts"); 17 | return node2; 18 | } 19 | 20 | if (ts.isExportDeclaration(node) && node.moduleSpecifier) { 21 | const node2 = ts.getMutableClone(node); 22 | let text = node2.moduleSpecifier.getText().trim(); 23 | text = text.substr(1, text.length - 2); 24 | if (text.endsWith(".ts")) return node2; 25 | node2.moduleSpecifier = ts.createStringLiteral(text + ".ts"); 26 | return node2; 27 | } 28 | 29 | return ts.visitEachChild(node, visitor, ctx); 30 | }; 31 | return visitor; 32 | } 33 | return ts.visitNode(rootNode, nodeVisitor()); 34 | }; 35 | 36 | export class Transformer { 37 | async transform(sourceDir: string, targetDir: string): Promise { 38 | const files = glob.sync(`${sourceDir}/**/*.ts`); 39 | await fs.promises.mkdir(targetDir, { recursive: true }); 40 | for await (const file of files) { 41 | const sourceFilename = path.basename(file); 42 | const source = await fs.promises.readFile(file, "utf8"); 43 | const relative = path.relative(sourceDir, file); 44 | const outputFile = path.resolve(targetDir, relative); 45 | const result = await this.transformFile(sourceFilename, source); 46 | await fs.promises.writeFile(outputFile, result, "utf8"); 47 | } 48 | } 49 | 50 | transformFile(sourceFilename: string, source: string): string { 51 | const printer: ts.Printer = ts.createPrinter(); 52 | 53 | const sourceFile: ts.SourceFile = ts.createSourceFile( 54 | sourceFilename, 55 | source, 56 | ts.ScriptTarget.ES2015, 57 | true, 58 | ts.ScriptKind.TS, 59 | ); 60 | 61 | const result: ts.TransformationResult = ts.transform< 62 | ts.SourceFile 63 | >(sourceFile, [addExtTransformer]); 64 | 65 | const transformedSourceFile: ts.SourceFile = result.transformed[0]; 66 | 67 | const transformed = printer.printFile(transformedSourceFile); 68 | result.dispose(); 69 | return transformed; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /scripts/transform2deno.ts: -------------------------------------------------------------------------------- 1 | import path from "path"; 2 | import { Transformer } from "./Transformer"; 3 | 4 | async function transform(): Promise { 5 | const transformer = new Transformer(); 6 | await transformer.transform( 7 | path.resolve(__dirname, "../src"), 8 | path.resolve(__dirname, "../rippledb-deno"), 9 | ); 10 | console.log("Transform success"); 11 | } 12 | 13 | transform(); 14 | -------------------------------------------------------------------------------- /src/BitBuffer.ts: -------------------------------------------------------------------------------- 1 | // Rwrite to TypeScript https://github.com/wiedi/node-bitbuffer 2 | 3 | import { Buffer } from "./Buffer"; 4 | 5 | export default class BitBuffer { 6 | /** 7 | * Buffer length should be Math.ceil(bits / 8) 8 | */ 9 | constructor(buffer: Buffer) { 10 | this._buffer = buffer; 11 | this._size = buffer.length; 12 | } 13 | 14 | _size: number; 15 | _buffer: Buffer; 16 | 17 | get buffer(): Buffer { 18 | return this._buffer; 19 | } 20 | 21 | get size(): number { 22 | return this._size; 23 | } 24 | 25 | get bits(): number { 26 | // return (this.size - (this.size % 8)) * 8 27 | return this.size * 8; 28 | } 29 | 30 | resizeBits(bits: number): void { 31 | const nextSize = Math.ceil(bits / 8); 32 | if (nextSize > this.size) { 33 | this._buffer = Buffer.concat([ 34 | this._buffer, 35 | Buffer.alloc(nextSize - this.size), 36 | ]); 37 | this._size = this._buffer.length; 38 | } else if (nextSize < this.size) { 39 | this._buffer = this._buffer.slice(0, nextSize); 40 | this._size = this._buffer.length; 41 | } 42 | } 43 | 44 | set(index: number, bool: boolean): void { 45 | const pos = index >>> 3; 46 | if (bool) { 47 | this._buffer[pos] |= 1 << index % 8; 48 | } else { 49 | this._buffer[pos] &= ~(1 << index % 8); 50 | } 51 | } 52 | 53 | toggle(index: number): void { 54 | this._buffer[index >>> 3] ^= 1 << index % 8; 55 | } 56 | 57 | get(index: number): boolean { 58 | return (this._buffer[index >>> 3] & (1 << index % 8)) !== 0; 59 | } 60 | 61 | toString(): string { 62 | let str = ""; 63 | for (let i = 0; i < this.bits; i++) { 64 | str += this.get(i) ? "1" : "0"; 65 | } 66 | return str; 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/BloomFilter.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { varint } from "./DBHelper"; 9 | import BitBuffer from "./BitBuffer"; 10 | import { hash } from "./Hash"; 11 | import Slice from "./Slice"; 12 | import { Buffer } from "./Buffer"; 13 | import { FilterPolicy } from "./Options"; 14 | 15 | function bloomHash(key: Slice): number { 16 | return hash(key.buffer, 0xbc9f1d34); 17 | } 18 | 19 | /** 20 | * time of hash is main effect 21 | * best time of hash = bits number / elements number x ln2(≈0.69) 22 | * elements number and ln2 is predictable 23 | * bits number is configable 24 | * from past experience, bitsPerKey = 10 is best 25 | */ 26 | export default class BloomFilter implements FilterPolicy { 27 | constructor(buffer?: Buffer, bitsPerKey = 10) { 28 | this._offset = 0; 29 | this._bitsPerKey = bitsPerKey; 30 | const k = Math.round(bitsPerKey * 0.69); 31 | 32 | if (!buffer || buffer.length === 0) { 33 | this._buffer = Buffer.fromUnknown(varint.encode(k)); 34 | this._bitBuffer = new BitBuffer(Buffer.alloc(Math.ceil(k / 8))); 35 | this._kNumber = k; 36 | } else { 37 | this._buffer = buffer; 38 | this._bitBuffer = new BitBuffer(buffer.slice(0, buffer.length - 1)); 39 | this._kNumber = varint.decode( 40 | this._buffer.slice(this._buffer.length - 1), 41 | ); 42 | if (this._kNumber !== k) { 43 | this._kNumber = k; 44 | this._buffer = Buffer.concat([ 45 | this._buffer.slice(0, this._buffer.length - 1), 46 | Buffer.fromUnknown(varint.encode(k)), 47 | ]); 48 | this._bitBuffer.resizeBits(k); 49 | } 50 | } 51 | this._size = this._buffer.length; 52 | } 53 | 54 | private _buffer: Buffer; 55 | private _offset: number; 56 | private _size: number; 57 | private _kNumber: number; 58 | private _bitBuffer: BitBuffer; 59 | private _bitsPerKey: number; 60 | 61 | get bitsPerKey(): number { 62 | return this._bitsPerKey; 63 | } 64 | 65 | get bitBuffer(): BitBuffer { 66 | return this._bitBuffer; 67 | } 68 | 69 | get buffer(): Buffer { 70 | return this._buffer; 71 | } 72 | 73 | get size(): number { 74 | return this._size; 75 | } 76 | 77 | get kNumber(): number { 78 | return this._kNumber; 79 | } 80 | 81 | // Return the name of this policy. Note that if the filter encoding 82 | // changes in an incompatible way, the name returned by this method 83 | // must be changed. Otherwise, old incompatible filters may be 84 | // passed to methods of this type. 85 | public name(): string { 86 | return "leveldb.BuiltinBloomFilter2"; 87 | } 88 | 89 | // keys[0,n-1] contains a list of keys (potentially with duplicates) 90 | // that are ordered according to the user supplied comparator. 91 | // Append a filter that summarizes keys[0,n-1] to *dst. 92 | // 93 | // Warning: do not change the initial contents of *dst. Instead, 94 | // append the newly constructed filter to *dst. 95 | public putKeys(keys: Slice[], n: number): void { 96 | // Compute bloom filter size (in both bits and bytes) 97 | let bits = this.bitsPerKey * n; 98 | 99 | // For small n, we can see a very high false positive rate. Fix it 100 | // by enforcing a minimum bloom filter length. 101 | if (bits < 64) bits = 64; 102 | 103 | const bytes = (bits + 7) / 8; 104 | bits = bytes * 8; 105 | 106 | this._bitBuffer.resizeBits(bits); 107 | bits = this._bitBuffer.bits; 108 | 109 | for (let i = 0; i < n; i++) { 110 | // Use double-hashing to generate a sequence of hash values. 111 | // See analysis in [Kirsch,Mitzenmacher 2006]. 112 | let h = bloomHash(keys[i]); 113 | 114 | const delta = (h >> 17) | (h << 15); 115 | for (let j = 0; j < this.kNumber; j++) { 116 | const bitPosition = h % bits; 117 | this._bitBuffer.set(bitPosition, true); 118 | h += delta; 119 | } 120 | } 121 | this._buffer = Buffer.concat([ 122 | this._bitBuffer.buffer, 123 | this._buffer.slice( 124 | this._offset + this._size - 1, 125 | this._offset + this._size, 126 | ), 127 | ]); 128 | this._size = this._buffer.length; 129 | } 130 | 131 | public keyMayMatch(key: Slice, bloomFilter: Slice): boolean { 132 | const filter = new BloomFilter(bloomFilter.buffer); 133 | 134 | if (filter.kNumber > 30) return true; 135 | let h = bloomHash(key); 136 | const delta = (h >> 17) | (h << 15); 137 | for (let j = 0; j < filter.kNumber; j++) { 138 | const bitPosition = h % filter._bitBuffer.bits; 139 | if (!filter._bitBuffer.get(bitPosition)) return false; 140 | h += delta; 141 | } 142 | return true; 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/Builder.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import Status from "./Status"; 9 | import { Env, FileHandle } from "./Env"; 10 | import { FileMetaData } from "./VersionFormat"; 11 | import { getTableFilename } from "./Filename"; 12 | import SSTableBuilder from "./SSTableBuilder"; 13 | import { Options } from "./Options"; 14 | import { InternalKey, Entry } from "./Format"; 15 | import { assert } from "console"; 16 | 17 | export async function buildTable( 18 | dbpath: string, 19 | env: Env, 20 | options: Options, 21 | iterator: IterableIterator, 22 | meta: FileMetaData, 23 | ): Promise { 24 | options.log(`Level-0 table #${meta.number}: started`); 25 | 26 | const tableFilename = getTableFilename(dbpath, meta.number); 27 | let status = new Status(env.open(tableFilename, "a+")); 28 | if (!(await status.ok())) { 29 | return status; 30 | } 31 | const builder = new SSTableBuilder( 32 | options, 33 | (await status.promise) as FileHandle, 34 | ); 35 | let hasSmallestSet = false; 36 | for (const entry of iterator) { 37 | if (!hasSmallestSet) { 38 | meta.smallest = InternalKey.from(entry.key); 39 | hasSmallestSet = true; 40 | } 41 | meta.largest.decodeFrom(entry.key); 42 | await builder.add(entry.key, entry.value); 43 | } 44 | 45 | status = new Status(builder.finish()); 46 | if (!(await status.ok())) { 47 | return status; 48 | } 49 | meta.fileSize = builder.fileSize; 50 | assert(meta.fileSize > 0); 51 | options.log( 52 | `Level-0 table #${meta.number}: ${meta.fileSize} bytes ${ 53 | (await status.ok()) ? "status ok" : "status error" 54 | }`, 55 | ); 56 | // TODO check if table has errors 57 | return status; 58 | } 59 | -------------------------------------------------------------------------------- /src/Cache.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { LRUCache, LRUCacheOptions } from "./LRUCache"; 9 | 10 | export default class Cache extends LRUCache { 11 | constructor(options?: LRUCacheOptions) { 12 | super(options); 13 | } 14 | 15 | private _id = 0n; 16 | 17 | newId(): bigint { 18 | return ++this._id; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/Coding.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { Buffer } from "./Buffer"; 9 | import { varint, assert } from "./DBHelper"; 10 | import Slice from "./Slice"; 11 | 12 | const kFixed64MaxValue = (1n << 56n) - 1n; 13 | 14 | // only use 1 - 7 bytes 15 | export function encodeFixed64(value: number | bigint): Buffer { 16 | // or Buffer.from(new BigUint64Array([BigInt(value)]).buffer) 17 | const buf = Buffer.alloc(8); 18 | const bigIntValue = BigInt(value); 19 | buf.writeBigUInt64LE( 20 | bigIntValue < kFixed64MaxValue ? bigIntValue : kFixed64MaxValue, 21 | ); 22 | return buf; 23 | } 24 | 25 | export function decodeFixed64(buf: Buffer): bigint { 26 | // or BigInt(new BigUint64Array(bufferToArrayBuffer(buf)).toString()) 27 | return buf.readBigUInt64LE(); 28 | } 29 | 30 | export function encodeFixed32(value: number): Buffer { 31 | const buf = Buffer.alloc(4); 32 | buf.writeUInt32LE(value, 0); 33 | return buf; 34 | } 35 | 36 | export function decodeFixed32(buf: Buffer): number { 37 | assert(buf.length >= 4); 38 | return buf.readUInt32LE(0); 39 | } 40 | 41 | // function bufferToArrayBuffer(buf: Buffer) { 42 | // let ab = new ArrayBuffer(buf.length) 43 | // let view = new Uint8Array(ab) 44 | // for (let i = 0; i < buf.length; ++i) { 45 | // view[i] = buf[i] 46 | // } 47 | // return ab 48 | // } 49 | 50 | export function getLengthPrefixedSlice(key: Slice): Slice { 51 | const internalKeySize = varint.decode(key.buffer); 52 | const internalKeyBuffer = key.buffer.slice( 53 | varint.decode.bytes, 54 | varint.decode.bytes + internalKeySize, 55 | ); 56 | return new Slice(internalKeyBuffer); 57 | } 58 | -------------------------------------------------------------------------------- /src/Compaction.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { FileMetaData } from "./VersionFormat"; 9 | import Version from "./Version"; 10 | import VersionEdit from "./VersionEdit"; 11 | import { Options } from "./Options"; 12 | import Slice from "./Slice"; 13 | import SSTableBuilder from "./SSTableBuilder"; 14 | import { Config, InternalKey, SequenceNumber } from "./Format"; 15 | import { FileHandle } from "./Env"; 16 | 17 | export default class Compaction { 18 | static targetFileSize(options: Options): number { 19 | return options.maxFileSize; 20 | } 21 | 22 | static maxGrandParentOverlapBytes(options: Options): number { 23 | return 10 * Compaction.targetFileSize(options); 24 | } 25 | 26 | static totalFileSize(files: FileMetaData[]): number { 27 | let sum = 0; 28 | for (const file of files) { 29 | sum += file.fileSize; 30 | } 31 | return sum; 32 | } 33 | 34 | // eslint-disable-next-line 35 | static maxFileSizeForLevel(options: Options, level: number): number { 36 | // We could vary per level to reduce number of files? 37 | return Compaction.targetFileSize(options); 38 | } 39 | 40 | constructor(options: Options, level: number) { 41 | this.level = level; 42 | this.grandparentIndex = 0; 43 | this.overlappedBytes = 0; 44 | this.seenKey = false; 45 | this.inputs = [[], []]; 46 | this._maxOutputFilesize = Compaction.maxFileSizeForLevel(options, level); 47 | this.levelPtrs = Array.from({ length: Config.kNumLevels }, () => 0); 48 | this.edit = new VersionEdit(); 49 | } 50 | 51 | public level: number; 52 | public inputVersion!: Version; 53 | public grandparents!: FileMetaData[]; 54 | public edit: VersionEdit; 55 | 56 | // Each compaction reads inputs from "level_" and "level_+1" 57 | public inputs: [FileMetaData[], FileMetaData[]]; 58 | 59 | private grandparentIndex: number; // Index in grandparent_starts_ 60 | private seenKey: boolean; 61 | private overlappedBytes: number; 62 | private _maxOutputFilesize: number; 63 | 64 | // level_ptrs_ holds indices into input_version_->levels_: our state 65 | // is that we are positioned at one of the file ranges for each 66 | // higher level than the ones involved in this compaction (i.e. for 67 | // all L >= level_ + 2). 68 | private levelPtrs: number[]; 69 | 70 | get maxOutputFilesize(): number { 71 | return this._maxOutputFilesize; 72 | } 73 | 74 | public numInputFiles(which: 0 | 1): number { 75 | return this.inputs[which].length; 76 | } 77 | 78 | // Is this a trivial compaction that can be implemented by just 79 | // moving a single input file to the next level (no merging or splitting) 80 | public isTrivialMove(): boolean { 81 | const versionSet = this.inputVersion.versionSet; 82 | // Avoid a move if there is lots of overlapping grandparent data. 83 | // Otherwise, the move could create a parent file that will require 84 | // a very expensive merge later on. 85 | return ( 86 | this.numInputFiles(0) === 1 && 87 | this.numInputFiles(1) === 0 && 88 | Compaction.totalFileSize(this.grandparents) <= 89 | Compaction.maxGrandParentOverlapBytes(versionSet._options) 90 | ); 91 | } 92 | 93 | // Returns true if the information we have available guarantees that 94 | // the compaction is producing data in "level+1" for which no data exists 95 | // in levels greater than "level+1". 96 | public isBaseLevelForKey(userKey: Slice): boolean { 97 | const userComparator = this.inputVersion.versionSet.internalKeyComparator 98 | .userComparator; 99 | for (let level = this.level + 2; level < Config.kNumLevels; level++) { 100 | const files = this.inputVersion.files[level]; 101 | while (this.levelPtrs[level] < files.length) { 102 | const f = files[this.levelPtrs[level]]; 103 | if (userComparator.compare(userKey, f.largest.userKey) <= 0) { 104 | // We've advanced far enough 105 | if (userComparator.compare(userKey, f.smallest.userKey) >= 0) { 106 | // Key falls in this file's range, so definitely not base level 107 | return false; 108 | } 109 | break; 110 | } 111 | this.levelPtrs[level]++; 112 | } 113 | } 114 | return true; 115 | } 116 | 117 | // Release the input version for the compaction, once the compaction 118 | // is successful. 119 | public releaseInputs(): void { 120 | if (!!this.inputVersion) { 121 | this.inputVersion.unref(); 122 | delete this.inputVersion; 123 | } 124 | } 125 | 126 | /** 127 | * Returns true if we should stop building the current output 128 | * before processing "internalKey". 129 | */ 130 | public shouldStopBefore(internalKey: Slice): boolean { 131 | const versionSet = this.inputVersion.versionSet; 132 | const icmp = versionSet.internalKeyComparator; 133 | while ( 134 | this.grandparentIndex < this.grandparents.length && 135 | icmp.compare( 136 | internalKey, 137 | this.grandparents[this.grandparentIndex].largest, 138 | ) > 0 139 | ) { 140 | if (this.seenKey) { 141 | this.overlappedBytes += this.grandparents[ 142 | this.grandparentIndex 143 | ].fileSize; 144 | } 145 | this.grandparentIndex++; 146 | } 147 | this.seenKey = true; 148 | if ( 149 | this.overlappedBytes > 150 | Compaction.maxGrandParentOverlapBytes(versionSet._options) 151 | ) { 152 | this.overlappedBytes = 0; 153 | return true; 154 | } else { 155 | return false; 156 | } 157 | } 158 | 159 | // Add all inputs to this compaction as delete operations to *edit. 160 | public addInputDeletions(edit: VersionEdit): void { 161 | for (let which = 0; which < 2; which++) { 162 | for (let i = 0; i < this.inputs[which].length; i++) { 163 | edit.deleteFile(this.level + which, this.inputs[which][i].number); 164 | } 165 | } 166 | } 167 | } 168 | 169 | export class CompactionStats { 170 | times: number; 171 | bytesRead: number; 172 | bytesWritten: number; 173 | constructor() { 174 | this.times = 0; 175 | this.bytesRead = 0; 176 | this.bytesWritten = 0; 177 | } 178 | 179 | add(c: CompactionStats): void { 180 | this.times += c.times; 181 | this.bytesRead += c.bytesRead; 182 | this.bytesWritten += c.bytesWritten; 183 | } 184 | } 185 | 186 | export interface CompactionStateOutput { 187 | number: number; 188 | fileSize: number; 189 | smallest: InternalKey; 190 | largest: InternalKey; 191 | } 192 | 193 | export class CompactionState { 194 | public outputs: CompactionStateOutput[] = []; 195 | public smallestSnapshot: SequenceNumber; 196 | public compaction: Compaction; 197 | public outfile!: FileHandle; 198 | public builder!: SSTableBuilder; 199 | public totalBytes: number; 200 | 201 | constructor(c: Compaction) { 202 | this.compaction = c; 203 | this.smallestSnapshot = 0n; 204 | this.totalBytes = 0; 205 | } 206 | 207 | public currentOutput(): CompactionStateOutput { 208 | return this.outputs[this.outputs.length - 1]; 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/Comparator.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { assert } from "./DBHelper"; 9 | import Slice from "./Slice"; 10 | 11 | export interface Comparator { 12 | // Three-way comparison. Returns value: 13 | // < 0 iff "a" < "b", 14 | // == 0 iff "a" == "b", 15 | // > 0 iff "a" > "b" 16 | compare(a: Slice, b: Slice): number; 17 | 18 | // The name of the comparator. Used to check for comparator 19 | // mismatches (i.e., a DB created with one comparator is 20 | // accessed using a different comparator. 21 | // 22 | // The client of this package should switch to a new name whenever 23 | // the comparator implementation changes in a way that will cause 24 | // the relative ordering of any two keys to change. 25 | // 26 | // Names starting with "leveldb." are reserved and should not be used 27 | // by any clients of this package. 28 | getName(): string; 29 | 30 | // Advanced functions: these are used to reduce the space requirements 31 | // for internal data structures like index blocks. 32 | 33 | // If *start < limit, changes *start to a short string in [start,limit). 34 | // Simple comparator implementations may return with *start unchanged, 35 | // i.e., an implementation of this method that does nothing is correct. 36 | findShortestSeparator(start: Slice, limit: Slice): void; 37 | 38 | // Changes *key to a short string >= *key. 39 | // Simple comparator implementations may return with *key unchanged, 40 | // i.e., an implementation of this method that does nothing is correct. 41 | findShortSuccessor(key: Slice): void; 42 | } 43 | 44 | export class BytewiseComparator implements Comparator { 45 | getName(): string { 46 | return "leveldb.BytewiseComparator"; 47 | } 48 | 49 | compare(a: Slice, b: Slice): number { 50 | return a.compare(b); 51 | } 52 | 53 | findShortestSeparator(start: Slice, limit: Slice): void { 54 | // Find length of common prefix 55 | const minLength = Math.min(start.length, limit.size); 56 | let diffIndex = 0; 57 | while ( 58 | diffIndex < minLength && 59 | start.buffer[diffIndex] == limit.buffer[diffIndex] 60 | ) { 61 | diffIndex++; 62 | } 63 | 64 | if (diffIndex >= minLength) { 65 | // Do not shorten if one string is a prefix of the other 66 | } else { 67 | const diffByte = start.buffer[diffIndex]; 68 | if (diffByte < 0xff && diffByte + 1 < limit.buffer[diffIndex]) { 69 | start.buffer[diffIndex]++; 70 | start.buffer = start.buffer.slice(0, diffIndex + 1); 71 | assert(this.compare(start, limit) < 0); 72 | } 73 | } 74 | } 75 | 76 | findShortSuccessor(key: Slice): void { 77 | // Find first character that can be incremented 78 | const n = key.length; 79 | for (let i = 0; i < n; i++) { 80 | const byte = key.buffer[i]; 81 | if (byte != 0xff) { 82 | key.buffer[i] = byte + 1; 83 | key.buffer = key.buffer.slice(0, i + 1); 84 | return; 85 | } 86 | } 87 | // *key is a run of 0xffs. Leave it alone. 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/Crc32.ts: -------------------------------------------------------------------------------- 1 | // Rewrite to TypeScript https://github.com/brianloveswords/buffer-crc32 2 | 3 | import { Buffer } from "./Buffer"; 4 | 5 | const CRC_TABLE = new Int32Array([ 6 | 0x00000000, 7 | 0x77073096, 8 | 0xee0e612c, 9 | 0x990951ba, 10 | 0x076dc419, 11 | 0x706af48f, 12 | 0xe963a535, 13 | 0x9e6495a3, 14 | 0x0edb8832, 15 | 0x79dcb8a4, 16 | 0xe0d5e91e, 17 | 0x97d2d988, 18 | 0x09b64c2b, 19 | 0x7eb17cbd, 20 | 0xe7b82d07, 21 | 0x90bf1d91, 22 | 0x1db71064, 23 | 0x6ab020f2, 24 | 0xf3b97148, 25 | 0x84be41de, 26 | 0x1adad47d, 27 | 0x6ddde4eb, 28 | 0xf4d4b551, 29 | 0x83d385c7, 30 | 0x136c9856, 31 | 0x646ba8c0, 32 | 0xfd62f97a, 33 | 0x8a65c9ec, 34 | 0x14015c4f, 35 | 0x63066cd9, 36 | 0xfa0f3d63, 37 | 0x8d080df5, 38 | 0x3b6e20c8, 39 | 0x4c69105e, 40 | 0xd56041e4, 41 | 0xa2677172, 42 | 0x3c03e4d1, 43 | 0x4b04d447, 44 | 0xd20d85fd, 45 | 0xa50ab56b, 46 | 0x35b5a8fa, 47 | 0x42b2986c, 48 | 0xdbbbc9d6, 49 | 0xacbcf940, 50 | 0x32d86ce3, 51 | 0x45df5c75, 52 | 0xdcd60dcf, 53 | 0xabd13d59, 54 | 0x26d930ac, 55 | 0x51de003a, 56 | 0xc8d75180, 57 | 0xbfd06116, 58 | 0x21b4f4b5, 59 | 0x56b3c423, 60 | 0xcfba9599, 61 | 0xb8bda50f, 62 | 0x2802b89e, 63 | 0x5f058808, 64 | 0xc60cd9b2, 65 | 0xb10be924, 66 | 0x2f6f7c87, 67 | 0x58684c11, 68 | 0xc1611dab, 69 | 0xb6662d3d, 70 | 0x76dc4190, 71 | 0x01db7106, 72 | 0x98d220bc, 73 | 0xefd5102a, 74 | 0x71b18589, 75 | 0x06b6b51f, 76 | 0x9fbfe4a5, 77 | 0xe8b8d433, 78 | 0x7807c9a2, 79 | 0x0f00f934, 80 | 0x9609a88e, 81 | 0xe10e9818, 82 | 0x7f6a0dbb, 83 | 0x086d3d2d, 84 | 0x91646c97, 85 | 0xe6635c01, 86 | 0x6b6b51f4, 87 | 0x1c6c6162, 88 | 0x856530d8, 89 | 0xf262004e, 90 | 0x6c0695ed, 91 | 0x1b01a57b, 92 | 0x8208f4c1, 93 | 0xf50fc457, 94 | 0x65b0d9c6, 95 | 0x12b7e950, 96 | 0x8bbeb8ea, 97 | 0xfcb9887c, 98 | 0x62dd1ddf, 99 | 0x15da2d49, 100 | 0x8cd37cf3, 101 | 0xfbd44c65, 102 | 0x4db26158, 103 | 0x3ab551ce, 104 | 0xa3bc0074, 105 | 0xd4bb30e2, 106 | 0x4adfa541, 107 | 0x3dd895d7, 108 | 0xa4d1c46d, 109 | 0xd3d6f4fb, 110 | 0x4369e96a, 111 | 0x346ed9fc, 112 | 0xad678846, 113 | 0xda60b8d0, 114 | 0x44042d73, 115 | 0x33031de5, 116 | 0xaa0a4c5f, 117 | 0xdd0d7cc9, 118 | 0x5005713c, 119 | 0x270241aa, 120 | 0xbe0b1010, 121 | 0xc90c2086, 122 | 0x5768b525, 123 | 0x206f85b3, 124 | 0xb966d409, 125 | 0xce61e49f, 126 | 0x5edef90e, 127 | 0x29d9c998, 128 | 0xb0d09822, 129 | 0xc7d7a8b4, 130 | 0x59b33d17, 131 | 0x2eb40d81, 132 | 0xb7bd5c3b, 133 | 0xc0ba6cad, 134 | 0xedb88320, 135 | 0x9abfb3b6, 136 | 0x03b6e20c, 137 | 0x74b1d29a, 138 | 0xead54739, 139 | 0x9dd277af, 140 | 0x04db2615, 141 | 0x73dc1683, 142 | 0xe3630b12, 143 | 0x94643b84, 144 | 0x0d6d6a3e, 145 | 0x7a6a5aa8, 146 | 0xe40ecf0b, 147 | 0x9309ff9d, 148 | 0x0a00ae27, 149 | 0x7d079eb1, 150 | 0xf00f9344, 151 | 0x8708a3d2, 152 | 0x1e01f268, 153 | 0x6906c2fe, 154 | 0xf762575d, 155 | 0x806567cb, 156 | 0x196c3671, 157 | 0x6e6b06e7, 158 | 0xfed41b76, 159 | 0x89d32be0, 160 | 0x10da7a5a, 161 | 0x67dd4acc, 162 | 0xf9b9df6f, 163 | 0x8ebeeff9, 164 | 0x17b7be43, 165 | 0x60b08ed5, 166 | 0xd6d6a3e8, 167 | 0xa1d1937e, 168 | 0x38d8c2c4, 169 | 0x4fdff252, 170 | 0xd1bb67f1, 171 | 0xa6bc5767, 172 | 0x3fb506dd, 173 | 0x48b2364b, 174 | 0xd80d2bda, 175 | 0xaf0a1b4c, 176 | 0x36034af6, 177 | 0x41047a60, 178 | 0xdf60efc3, 179 | 0xa867df55, 180 | 0x316e8eef, 181 | 0x4669be79, 182 | 0xcb61b38c, 183 | 0xbc66831a, 184 | 0x256fd2a0, 185 | 0x5268e236, 186 | 0xcc0c7795, 187 | 0xbb0b4703, 188 | 0x220216b9, 189 | 0x5505262f, 190 | 0xc5ba3bbe, 191 | 0xb2bd0b28, 192 | 0x2bb45a92, 193 | 0x5cb36a04, 194 | 0xc2d7ffa7, 195 | 0xb5d0cf31, 196 | 0x2cd99e8b, 197 | 0x5bdeae1d, 198 | 0x9b64c2b0, 199 | 0xec63f226, 200 | 0x756aa39c, 201 | 0x026d930a, 202 | 0x9c0906a9, 203 | 0xeb0e363f, 204 | 0x72076785, 205 | 0x05005713, 206 | 0x95bf4a82, 207 | 0xe2b87a14, 208 | 0x7bb12bae, 209 | 0x0cb61b38, 210 | 0x92d28e9b, 211 | 0xe5d5be0d, 212 | 0x7cdcefb7, 213 | 0x0bdbdf21, 214 | 0x86d3d2d4, 215 | 0xf1d4e242, 216 | 0x68ddb3f8, 217 | 0x1fda836e, 218 | 0x81be16cd, 219 | 0xf6b9265b, 220 | 0x6fb077e1, 221 | 0x18b74777, 222 | 0x88085ae6, 223 | 0xff0f6a70, 224 | 0x66063bca, 225 | 0x11010b5c, 226 | 0x8f659eff, 227 | 0xf862ae69, 228 | 0x616bffd3, 229 | 0x166ccf45, 230 | 0xa00ae278, 231 | 0xd70dd2ee, 232 | 0x4e048354, 233 | 0x3903b3c2, 234 | 0xa7672661, 235 | 0xd06016f7, 236 | 0x4969474d, 237 | 0x3e6e77db, 238 | 0xaed16a4a, 239 | 0xd9d65adc, 240 | 0x40df0b66, 241 | 0x37d83bf0, 242 | 0xa9bcae53, 243 | 0xdebb9ec5, 244 | 0x47b2cf7f, 245 | 0x30b5ffe9, 246 | 0xbdbdf21c, 247 | 0xcabac28a, 248 | 0x53b39330, 249 | 0x24b4a3a6, 250 | 0xbad03605, 251 | 0xcdd70693, 252 | 0x54de5729, 253 | 0x23d967bf, 254 | 0xb3667a2e, 255 | 0xc4614ab8, 256 | 0x5d681b02, 257 | 0x2a6f2b94, 258 | 0xb40bbe37, 259 | 0xc30c8ea1, 260 | 0x5a05df1b, 261 | 0x2d02ef8d, 262 | ]); 263 | 264 | function ensureBuffer(input: any): Buffer { 265 | if (Buffer.isBuffer(input)) { 266 | return input; 267 | } 268 | 269 | if (typeof input === "number") { 270 | return Buffer.alloc(input); 271 | } else if (typeof input === "string") { 272 | return Buffer.fromUnknown(input); 273 | } else { 274 | console.log("input", input); 275 | throw new Error( 276 | "input must be buffer, number, or string, received " + typeof input, 277 | ); 278 | } 279 | } 280 | 281 | function bufferizeInt(num: number): Buffer { 282 | const tmp = ensureBuffer(4); 283 | tmp.writeInt32BE(num, 0); 284 | return tmp; 285 | } 286 | 287 | function signed(source: unknown, previous1?: number | Buffer): number { 288 | const buf = ensureBuffer(source); 289 | 290 | let previous = 0; 291 | if (previous1) { 292 | if (typeof previous1 === "number") { 293 | previous = previous1; 294 | } else { 295 | previous = previous1.readUInt32BE(0); 296 | } 297 | } 298 | let crc = ~~previous ^ -1; 299 | for (let n = 0; n < buf.length; n++) { 300 | crc = CRC_TABLE[(crc ^ buf[n]) & 0xff] ^ (crc >>> 8); 301 | } 302 | return crc ^ -1; 303 | } 304 | 305 | // function unsigned(source: unknown, previous1?: number | Buffer): number { 306 | // return signed(source, previous1) >>> 0; 307 | // } 308 | 309 | export function crc32(source: unknown, previous1?: number | Buffer): Buffer { 310 | return bufferizeInt(signed(source, previous1)); 311 | } 312 | -------------------------------------------------------------------------------- /src/DBHelper.ts: -------------------------------------------------------------------------------- 1 | import { Buffer } from "./Buffer"; 2 | 3 | function pathResolve(...pathes: string[]): string { 4 | return pathes.join("/"); 5 | } 6 | 7 | export const path = { 8 | resolve: pathResolve, 9 | }; 10 | 11 | export function assert(bool: boolean, message?: string): void { 12 | try { 13 | if (!bool) { 14 | throw new Error(); 15 | } 16 | } catch (e) { 17 | throw new Error(`AssertError: ${message || e.stack[0]}`); 18 | } 19 | } 20 | 21 | const MSB = 0x80; 22 | const REST = 0x7f; 23 | const MSBALL = ~REST; 24 | const INT = Math.pow(2, 31); 25 | 26 | const N1 = Math.pow(2, 7); 27 | const N2 = Math.pow(2, 14); 28 | const N3 = Math.pow(2, 21); 29 | const N4 = Math.pow(2, 28); 30 | const N5 = Math.pow(2, 35); 31 | const N6 = Math.pow(2, 42); 32 | const N7 = Math.pow(2, 49); 33 | const N8 = Math.pow(2, 56); 34 | const N9 = Math.pow(2, 63); 35 | 36 | interface Encode { 37 | (num: number, out?: number[], offset?: number): number[]; 38 | bytes: number; 39 | } 40 | 41 | const encode: Encode = (num: number, out?: number[], offset?: number) => { 42 | out = out || []; 43 | offset = offset || 0; 44 | const oldOffset = offset; 45 | 46 | while (num >= INT) { 47 | out[offset++] = (num & 0xff) | MSB; 48 | num /= 128; 49 | } 50 | while (num & MSBALL) { 51 | out[offset++] = (num & 0xff) | MSB; 52 | num >>>= 7; 53 | } 54 | out[offset] = num | 0; 55 | 56 | encode.bytes = offset - oldOffset + 1; 57 | 58 | return out; 59 | }; 60 | 61 | encode.bytes = 0; 62 | 63 | function encodingLength(value: number): number { 64 | return value < N1 65 | ? 1 66 | : value < N2 67 | ? 2 68 | : value < N3 69 | ? 3 70 | : value < N4 71 | ? 4 72 | : value < N5 73 | ? 5 74 | : value < N6 75 | ? 6 76 | : value < N7 77 | ? 7 78 | : value < N8 79 | ? 8 80 | : value < N9 81 | ? 9 82 | : 10; 83 | } 84 | 85 | interface Decode { 86 | (buf: Buffer, offset?: number): number; 87 | bytes: number; 88 | } 89 | 90 | const decode: Decode = (buf: Buffer, offset?: number) => { 91 | let res = 0; 92 | const offset2 = offset || 0; 93 | let shift = 0; 94 | let counter = offset2; 95 | let b: number; 96 | const l = buf.length; 97 | 98 | do { 99 | if (counter >= l) { 100 | decode.bytes = 0; 101 | throw new RangeError("Could not decode varint"); 102 | } 103 | b = buf[counter++]; 104 | res += shift < 28 ? (b & REST) << shift : (b & REST) * Math.pow(2, shift); 105 | shift += 7; 106 | } while (b >= MSB); 107 | 108 | decode.bytes = counter - offset2; 109 | 110 | return res; 111 | }; 112 | 113 | decode.bytes = 0; 114 | 115 | export const varint = { 116 | decode, 117 | encode, 118 | encodingLength, 119 | }; 120 | -------------------------------------------------------------------------------- /src/Env.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | type BufferEncoding = 9 | | "ascii" 10 | | "utf8" 11 | | "utf-8" 12 | | "utf16le" 13 | | "ucs2" 14 | | "ucs-2" 15 | | "base64" 16 | | "latin1" 17 | | "binary" 18 | | "hex"; 19 | 20 | export interface Stats { 21 | isFile(): boolean; 22 | isDirectory(): boolean; 23 | isBlockDevice(): boolean; 24 | isCharacterDevice(): boolean; 25 | isSymbolicLink(): boolean; 26 | isFIFO(): boolean; 27 | isSocket(): boolean; 28 | 29 | dev: T; 30 | ino: T; 31 | mode: T; 32 | nlink: T; 33 | uid: T; 34 | gid: T; 35 | rdev: T; 36 | size: T; 37 | blksize: T; 38 | blocks: T; 39 | atimeMs: T; 40 | mtimeMs: T; 41 | ctimeMs: T; 42 | birthtimeMs: T; 43 | atime: Date; 44 | mtime: Date; 45 | ctime: Date; 46 | birthtime: Date; 47 | } 48 | 49 | export interface Dirent { 50 | isFile(): boolean; 51 | isDirectory(): boolean; 52 | isBlockDevice(): boolean; 53 | isCharacterDevice(): boolean; 54 | isSymbolicLink(): boolean; 55 | isFIFO(): boolean; 56 | isSocket(): boolean; 57 | name: string; 58 | } 59 | 60 | export interface FileHandle { 61 | /** 62 | * Gets the file descriptor for this file handle. 63 | */ 64 | readonly fd: number; 65 | 66 | /** 67 | * Asynchronously append data to a file, creating the file if it does not exist. The underlying file will _not_ be closed automatically. 68 | * The `FileHandle` must have been opened for appending. 69 | * @param data The data to write. If something other than a `Buffer` or `Uint8Array` is provided, the value is coerced to a string. 70 | * @param options Either the encoding for the file, or an object optionally specifying the encoding, file mode, and flag. 71 | * If `encoding` is not supplied, the default of `'utf8'` is used. 72 | * If `mode` is not supplied, the default of `0o666` is used. 73 | * If `mode` is a string, it is parsed as an octal integer. 74 | * If `flag` is not supplied, the default of `'a'` is used. 75 | */ 76 | appendFile( 77 | data: any, 78 | options?: 79 | | { 80 | encoding?: string | null; 81 | mode?: string | number; 82 | flag?: string | number; 83 | } 84 | | string 85 | | null, 86 | ): Promise; 87 | 88 | /** 89 | * Asynchronous fchown(2) - Change ownership of a file. 90 | */ 91 | chown(uid: number, gid: number): Promise; 92 | 93 | /** 94 | * Asynchronous fchmod(2) - Change permissions of a file. 95 | * @param mode A file mode. If a string is passed, it is parsed as an octal integer. 96 | */ 97 | chmod(mode: string | number): Promise; 98 | 99 | /** 100 | * Asynchronous fdatasync(2) - synchronize a file's in-core state with storage device. 101 | */ 102 | datasync(): Promise; 103 | 104 | /** 105 | * Asynchronous fsync(2) - synchronize a file's in-core state with the underlying storage device. 106 | */ 107 | sync(): Promise; 108 | 109 | /** 110 | * Asynchronously reads data from the file. 111 | * The `FileHandle` must have been opened for reading. 112 | * @param buffer The buffer that the data will be written to. 113 | * @param offset The offset in the buffer at which to start writing. 114 | * @param length The number of bytes to read. 115 | * @param position The offset from the beginning of the file from which data should be read. If `null`, data will be read from the current position. 116 | */ 117 | read( 118 | buffer: TBuffer, 119 | offset?: number | null, 120 | length?: number | null, 121 | position?: number | null, 122 | ): Promise<{ bytesRead: number; buffer: TBuffer }>; 123 | 124 | /** 125 | * Asynchronously reads the entire contents of a file. The underlying file will _not_ be closed automatically. 126 | * The `FileHandle` must have been opened for reading. 127 | * @param options An object that may contain an optional flag. 128 | * If a flag is not provided, it defaults to `'r'`. 129 | */ 130 | readFile( 131 | options?: { encoding?: null; flag?: string | number } | null, 132 | ): Promise; 133 | 134 | /** 135 | * Asynchronously reads the entire contents of a file. The underlying file will _not_ be closed automatically. 136 | * The `FileHandle` must have been opened for reading. 137 | * @param options An object that may contain an optional flag. 138 | * If a flag is not provided, it defaults to `'r'`. 139 | */ 140 | readFile( 141 | options: 142 | | { encoding: BufferEncoding; flag?: string | number } 143 | | BufferEncoding, 144 | ): Promise; 145 | 146 | /** 147 | * Asynchronously reads the entire contents of a file. The underlying file will _not_ be closed automatically. 148 | * The `FileHandle` must have been opened for reading. 149 | * @param options An object that may contain an optional flag. 150 | * If a flag is not provided, it defaults to `'r'`. 151 | */ 152 | readFile( 153 | options?: 154 | | { encoding?: string | null; flag?: string | number } 155 | | string 156 | | null, 157 | ): Promise; 158 | 159 | /** 160 | * Asynchronous fstat(2) - Get file status. 161 | */ 162 | stat(): Promise; 163 | 164 | /** 165 | * Asynchronous ftruncate(2) - Truncate a file to a specified length. 166 | * @param len If not specified, defaults to `0`. 167 | */ 168 | truncate(len?: number): Promise; 169 | 170 | /** 171 | * Asynchronously change file timestamps of the file. 172 | * @param atime The last access time. If a string is provided, it will be coerced to number. 173 | * @param mtime The last modified time. If a string is provided, it will be coerced to number. 174 | */ 175 | utimes( 176 | atime: string | number | Date, 177 | mtime: string | number | Date, 178 | ): Promise; 179 | 180 | /** 181 | * Asynchronously writes `buffer` to the file. 182 | * The `FileHandle` must have been opened for writing. 183 | * @param buffer The buffer that the data will be written to. 184 | * @param offset The part of the buffer to be written. If not supplied, defaults to `0`. 185 | * @param length The number of bytes to write. If not supplied, defaults to `buffer.length - offset`. 186 | * @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position. 187 | */ 188 | write( 189 | buffer: TBuffer, 190 | offset?: number | null, 191 | length?: number | null, 192 | position?: number | null, 193 | ): Promise<{ bytesWritten: number; buffer: TBuffer }>; 194 | 195 | /** 196 | * Asynchronously writes `string` to the file. 197 | * The `FileHandle` must have been opened for writing. 198 | * It is unsafe to call `write()` multiple times on the same file without waiting for the `Promise` 199 | * to be resolved (or rejected). For this scenario, `fs.createWriteStream` is strongly recommended. 200 | * @param string A string to write. If something other than a string is supplied it will be coerced to a string. 201 | * @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position. 202 | * @param encoding The expected string encoding. 203 | */ 204 | write( 205 | data: any, 206 | position?: number | null, 207 | encoding?: string | null, 208 | ): Promise<{ bytesWritten: number; buffer: string }>; 209 | 210 | /** 211 | * Asynchronously writes data to a file, replacing the file if it already exists. The underlying file will _not_ be closed automatically. 212 | * The `FileHandle` must have been opened for writing. 213 | * It is unsafe to call `writeFile()` multiple times on the same file without waiting for the `Promise` to be resolved (or rejected). 214 | * @param data The data to write. If something other than a `Buffer` or `Uint8Array` is provided, the value is coerced to a string. 215 | * @param options Either the encoding for the file, or an object optionally specifying the encoding, file mode, and flag. 216 | * If `encoding` is not supplied, the default of `'utf8'` is used. 217 | * If `mode` is not supplied, the default of `0o666` is used. 218 | * If `mode` is a string, it is parsed as an octal integer. 219 | * If `flag` is not supplied, the default of `'w'` is used. 220 | */ 221 | writeFile( 222 | data: any, 223 | options?: 224 | | { 225 | encoding?: string | null; 226 | mode?: string | number; 227 | flag?: string | number; 228 | } 229 | | string 230 | | null, 231 | ): Promise; 232 | 233 | /** 234 | * Asynchronous close(2) - close a `FileHandle`. 235 | */ 236 | close(): Promise; 237 | } 238 | 239 | export interface Env { 240 | onExit(callback: () => void): void; 241 | 242 | platform(): string; 243 | // get current time 244 | now(): number; 245 | access(dbpath: string): Promise; 246 | mkdir(dbpath: string): Promise; 247 | rename(oldpath: string, newpath: string): Promise; 248 | readFile(dbpath: string): Promise; 249 | readFile( 250 | dbpath: string, 251 | options?: { encoding?: string }, 252 | ): Promise; 253 | readFile(dbpath: string, bufferEncoding: "utf8"): Promise; 254 | writeFile(dbpath: string, content: Uint8Array | string): Promise; 255 | open(dbpath: string, flag: string): Promise; 256 | unlink(filename: string): Promise; 257 | unlinkSync(filename: string): void; 258 | fstat(fd: FileHandle): Promise; 259 | readdir(dbpath: string): Promise; 260 | getFileSize(filename: string): Promise; 261 | } 262 | -------------------------------------------------------------------------------- /src/Filename.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { path, assert } from "./DBHelper"; 9 | import { FileType } from "./Format"; 10 | import { Env } from "./Env"; 11 | 12 | function numberToString(num: number): string { 13 | let str = String(num); 14 | while (str.length < 6) { 15 | str = `0${str}`; 16 | } 17 | return str; 18 | } 19 | 20 | export function getCurrentFilename(dbpath: string): string { 21 | return path.resolve(dbpath, "CURRENT"); 22 | } 23 | 24 | export function getLogFilename(dbpath: string, logNumber: number): string { 25 | return path.resolve(dbpath, `${numberToString(logNumber)}.log`); 26 | } 27 | 28 | export function getTableFilename(dbpath: string, tableNumber: number): string { 29 | return path.resolve(dbpath, `${numberToString(tableNumber)}.ldb`); 30 | } 31 | 32 | export function getManifestFilename( 33 | dbpath: string, 34 | manifestNumber: number, 35 | ): string { 36 | return path.resolve(dbpath, `MANIFEST-${numberToString(manifestNumber)}`); 37 | } 38 | 39 | export function getLockFilename(dbpath: string): string { 40 | return path.resolve(dbpath, `LOCK`); 41 | } 42 | 43 | export function getInfoLogFilename(dbpath: string): string { 44 | return path.resolve(dbpath, `LOG`); 45 | } 46 | 47 | export function getOldInfoLogFilename(dbpath: string): string { 48 | return path.resolve(dbpath, `LOG.old`); 49 | } 50 | 51 | export function getTempFilename(dbpath: string, number: number): string { 52 | assert(number > 0); 53 | return path.resolve(dbpath, `${number}.dbtmp`); 54 | } 55 | 56 | type InternalFile = { 57 | isInternalFile: boolean; 58 | filename: string; 59 | number: number; 60 | type: FileType; 61 | }; 62 | 63 | export function parseFilename(filename: string): InternalFile { 64 | const internalFile = { 65 | isInternalFile: true, 66 | } as InternalFile; 67 | if (filename === "CURRENT") { 68 | internalFile.number = 0; 69 | internalFile.type = FileType.kCurrentFile; 70 | } else if (filename === "LOCK") { 71 | internalFile.number = 0; 72 | internalFile.type = FileType.kDBLockFile; 73 | } else if (filename === "LOG" || filename === "LOG.old") { 74 | internalFile.number = 0; 75 | internalFile.type = FileType.kInfoLogFile; 76 | } else if (filename.startsWith("MANIFEST-")) { 77 | const num = Number(filename.substr("MANIFEST-".length)); 78 | if (isNaN(num)) { 79 | internalFile.isInternalFile = false; 80 | return internalFile; 81 | } 82 | internalFile.number = num; 83 | internalFile.type = FileType.kDescriptorFile; 84 | } else { 85 | const num = Number(filename.split(".")[0]); 86 | if (isNaN(num)) { 87 | internalFile.isInternalFile = false; 88 | return internalFile; 89 | } 90 | const suffix = filename.substr(filename.split(".")[0].length); 91 | if (suffix === ".log") { 92 | internalFile.type = FileType.kLogFile; 93 | } else if (suffix === ".ldb") { 94 | internalFile.type = FileType.kTableFile; 95 | } else if (suffix === ".dbtmp") { 96 | internalFile.type = FileType.kTempFile; 97 | } else { 98 | internalFile.isInternalFile = false; 99 | return internalFile; 100 | } 101 | internalFile.number = num; 102 | } 103 | 104 | return internalFile; 105 | } 106 | 107 | export async function setCurrentFile( 108 | env: Env, 109 | dbpath: string, 110 | manifestNumber: number, 111 | ): Promise { 112 | const filename = getManifestFilename(dbpath, manifestNumber); 113 | assert(filename.startsWith(path.resolve(dbpath + "/"))); 114 | const content = filename.substr(path.resolve(dbpath + "/").length + 1); 115 | const tempFilename = getTempFilename(dbpath, manifestNumber); 116 | let error: void | Error; 117 | try { 118 | await env.writeFile(tempFilename, content + "\n"); 119 | } catch (e) { 120 | error = e; 121 | } 122 | if (!error) { 123 | await env.rename(tempFilename, getCurrentFilename(dbpath)); 124 | } else { 125 | await env.unlink(tempFilename); 126 | } 127 | return error; 128 | } 129 | -------------------------------------------------------------------------------- /src/Hash.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { decodeFixed32 } from "./Coding"; 9 | import { Buffer } from "./Buffer"; 10 | 11 | // Similar to murmur hash 12 | export function hash(data: Buffer, seed = 0): number { 13 | const m = 0xc6a4a793; 14 | const r = 24; 15 | const size = data.byteLength; 16 | const remainder = size % 4; 17 | let h = seed ^ (size * m); 18 | 19 | // Pick up four bytes at a time 20 | let i = 0; 21 | while (i <= size - 4) { 22 | const w = decodeFixed32(data.slice(i)); 23 | h += w; 24 | h *= w; 25 | h ^= h >> 16; 26 | i += 4; 27 | } 28 | 29 | // Pick up remaining bytes 30 | switch (remainder) { 31 | case 3: 32 | h += data[i + 2] << 16; 33 | break; 34 | case 2: 35 | h += data[i + 1] << 8; 36 | break; 37 | case 1: 38 | h += data[i]; 39 | h *= m; 40 | h ^= h >> r; 41 | break; 42 | } 43 | return h; 44 | } 45 | -------------------------------------------------------------------------------- /src/IteratorHelper.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | export default class IteratorHelper { 9 | static wrap( 10 | iterator: IterableIterator, 11 | callback: (value: T) => void 12 | ): IterableIterator 13 | static wrap( 14 | iterator: AsyncIterableIterator, 15 | callback: (value: T) => void 16 | ): AsyncIterableIterator 17 | static wrap( 18 | iterator: IterableIterator | AsyncIterableIterator, 19 | callback: (value: T) => void 20 | ): IterableIterator | AsyncIterableIterator { 21 | if (Symbol.iterator in iterator) { 22 | const it = iterator as IterableIterator 23 | it.return = (): IteratorResult => { 24 | const value = it.next().value 25 | callback(value) 26 | return { done: true, value } 27 | } 28 | return it 29 | } 30 | 31 | iterator.return = async (): Promise> => { 32 | try { 33 | const value = (await iterator.next()).value 34 | callback(value) 35 | return { done: true, value } 36 | } catch (e) { 37 | callback(e) 38 | return { done: true, value: e } 39 | } 40 | } 41 | return iterator 42 | } 43 | 44 | static makeAsync(iterator: IterableIterator): AsyncIterableIterator { 45 | const asyncIter = { 46 | return: async (value?: any): Promise> => { 47 | try { 48 | const value = iterator.next().value 49 | return { done: true, value } 50 | } catch (e) { 51 | return { done: true, value: e } 52 | } 53 | }, 54 | next: async (value?: any): Promise> => { 55 | return iterator.next() 56 | }, 57 | } as AsyncIterableIterator 58 | 59 | return { 60 | next: asyncIter.next, 61 | [Symbol.asyncIterator](): AsyncIterableIterator { 62 | return asyncIter 63 | }, 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/LRUCache.ts: -------------------------------------------------------------------------------- 1 | // Rewrite to TypeScript https://github.com/isaacs/node-lru-cache 2 | 3 | import { Yallist, YallistNode } from "./Yallist"; 4 | 5 | const MAX = Symbol("max"); 6 | const LENGTH = Symbol("length"); 7 | const LENGTH_CALCULATOR = Symbol("lengthCalculator"); 8 | const ALLOW_STALE = Symbol("allowStale"); 9 | const MAX_AGE = Symbol("maxAge"); 10 | const DISPOSE = Symbol("dispose"); 11 | const NO_DISPOSE_ON_SET = Symbol("noDisposeOnSet"); 12 | const LRU_LIST = Symbol("lruList"); 13 | const CACHE = Symbol("cache"); 14 | const UPDATE_AGE_ON_GET = Symbol("updateAgeOnGet"); 15 | 16 | const naiveLength = (): number => 1; 17 | 18 | export interface LRUCacheOptions { 19 | /** 20 | * The maximum size of the cache, checked by applying the length 21 | * function to all values in the cache. Not setting this is kind of silly, 22 | * since that's the whole purpose of this lib, but it defaults to `Infinity`. 23 | */ 24 | max?: number; 25 | 26 | /** 27 | * Maximum age in ms. Items are not pro-actively pruned out as they age, 28 | * but if you try to get an item that is too old, it'll drop it and return 29 | * undefined instead of giving it to you. 30 | */ 31 | maxAge?: number; 32 | 33 | /** 34 | * Function that is used to calculate the length of stored items. 35 | * If you're storing strings or buffers, then you probably want to do 36 | * something like `function(n, key){return n.length}`. The default 37 | * is `function(){return 1}`, which is fine if you want to store 38 | * `max` like-sized things. The item is passed as the first argument, 39 | * and the key is passed as the second argument. 40 | */ 41 | length?(value: V, key?: K): number; 42 | 43 | /** 44 | * Function that is called on items when they are dropped from the cache. 45 | * This can be handy if you want to close file descriptors or do other 46 | * cleanup tasks when items are no longer accessible. Called with `key, value`. 47 | * It's called before actually removing the item from the internal cache, 48 | * so if you want to immediately put it back in, you'll have to do that in 49 | * a `nextTick` or `setTimeout` callback or it won't do anything. 50 | */ 51 | dispose?(key: K, value: V): void; 52 | 53 | /** 54 | * By default, if you set a `maxAge`, it'll only actually pull stale items 55 | * out of the cache when you `get(key)`. (That is, it's not pre-emptively 56 | * doing a `setTimeout` or anything.) If you set `stale:true`, it'll return 57 | * the stale value before deleting it. If you don't set this, then it'll 58 | * return `undefined` when you try to get a stale entry, 59 | * as if it had already been deleted. 60 | */ 61 | stale?: boolean; 62 | 63 | /** 64 | * By default, if you set a `dispose()` method, then it'll be called whenever 65 | * a `set()` operation overwrites an existing key. If you set this option, 66 | * `dispose()` will only be called when a key falls out of the cache, 67 | * not when it is overwritten. 68 | */ 69 | noDisposeOnSet?: boolean; 70 | 71 | /** 72 | * When using time-expiring entries with `maxAge`, setting this to `true` will make each 73 | * item's effective time update to the current time whenever it is retrieved from cache, 74 | * causing it to not expire. (It can still fall out of cache based on recency of use, of 75 | * course.) 76 | */ 77 | updateAgeOnGet?: boolean; 78 | } 79 | 80 | class Entry { 81 | constructor(key: K, value: V, length: number, now: number, maxAge: number) { 82 | this.key = key; 83 | this.value = value; 84 | this.length = length; 85 | this.now = now; 86 | this.maxAge = maxAge || 0; 87 | } 88 | 89 | key: K; 90 | value: V; 91 | length: number; 92 | now: number; 93 | maxAge: number; 94 | } 95 | 96 | // lruList is a yallist where the head is the youngest 97 | // item, and the tail is the oldest. the list contains the Hit 98 | // objects as the entries. 99 | // Each Hit object has a reference to its Yallist.Node. This 100 | // never changes. 101 | // 102 | // cache is a Map (or PseudoMap) that matches the keys to 103 | // the Yallist.Node object. 104 | export class LRUCache { 105 | constructor(options: LRUCacheOptions = {}) { 106 | if (options.max && (typeof options.max !== "number" || options.max < 0)) 107 | throw new TypeError("max must be a non-negative number"); 108 | // Kind of weird to have a default max of Infinity, but oh well. 109 | this[MAX] = options.max || Infinity; 110 | 111 | const lc = options.length || naiveLength; 112 | this[LENGTH_CALCULATOR] = typeof lc !== "function" ? naiveLength : lc; 113 | this[ALLOW_STALE] = options.stale || false; 114 | if (options.maxAge && typeof options.maxAge !== "number") 115 | throw new TypeError("maxAge must be a number"); 116 | this[MAX_AGE] = options.maxAge || 0; 117 | this[DISPOSE] = options.dispose; 118 | this[NO_DISPOSE_ON_SET] = options.noDisposeOnSet || false; 119 | this[UPDATE_AGE_ON_GET] = options.updateAgeOnGet || false; 120 | this.reset(); 121 | } 122 | 123 | [LENGTH] = 0; 124 | [LENGTH_CALCULATOR]: (value: V, key?: K) => number; 125 | [MAX]: number; 126 | [ALLOW_STALE]: boolean; 127 | [MAX_AGE]: number; 128 | [DISPOSE]?: (key: K, value: V) => void; 129 | [NO_DISPOSE_ON_SET]: boolean; 130 | [UPDATE_AGE_ON_GET]: boolean; 131 | [LRU_LIST]: Yallist>; 132 | [CACHE] = new Map>>(); 133 | 134 | reset(): void { 135 | const dispose = this[DISPOSE]; 136 | if (dispose) { 137 | if (this[LRU_LIST] && this[LRU_LIST].length) { 138 | this[LRU_LIST].forEach((hit) => dispose(hit.key, hit.value)); 139 | } 140 | } 141 | 142 | this[CACHE] = new Map(); // hash of items by key 143 | this[LRU_LIST] = new Yallist(); // list of items in order of use recency 144 | this[LENGTH] = 0; // length of items in the list 145 | } 146 | 147 | set(key: K, value: V, maxAge?: number): boolean { 148 | maxAge = maxAge || this[MAX_AGE]; 149 | 150 | if (maxAge && typeof maxAge !== "number") 151 | throw new TypeError("maxAge must be a number"); 152 | 153 | const now = maxAge ? Date.now() : 0; 154 | const len = this[LENGTH_CALCULATOR](value, key); 155 | const dispose = this[DISPOSE]; 156 | 157 | // ts cannot handle Map.has and Map.get. 158 | const entry = this[CACHE].get(key); 159 | if (entry) { 160 | if (len > this[MAX]) { 161 | this.del(entry); 162 | return false; 163 | } 164 | 165 | const node = this[CACHE].get(key); 166 | 167 | // @ts-ignore // ts cannot handle Map.has, Map.get 168 | const item = node.value; 169 | 170 | // dispose of the old one before overwriting 171 | // split out into 2 ifs for better coverage tracking 172 | if (dispose) { 173 | if (!this[NO_DISPOSE_ON_SET]) dispose(key, item.value); 174 | } 175 | 176 | item.now = now; 177 | item.maxAge = maxAge; 178 | item.value = value; 179 | this[LENGTH] += len - item.length; 180 | item.length = len; 181 | this.get(key); 182 | this.trim(); 183 | return true; 184 | } 185 | 186 | const hit = new Entry(key, value, len, now, maxAge); 187 | 188 | // oversized objects fall out of cache automatically. 189 | if (hit.length > this[MAX]) { 190 | if (dispose) dispose(key, value); 191 | 192 | return false; 193 | } 194 | 195 | this[LENGTH] += hit.length; 196 | this[LRU_LIST].unshift(hit); 197 | this[CACHE].set(key, this[LRU_LIST].head as YallistNode>); 198 | this.trim(); 199 | return true; 200 | } 201 | 202 | del(node: YallistNode>): void { 203 | if (node) { 204 | const dispose = this[DISPOSE]; 205 | const hit = node.value; 206 | if (dispose) dispose(hit.key, hit.value); 207 | 208 | this[LENGTH] -= hit.length; 209 | this[CACHE].delete(hit.key); 210 | this[LRU_LIST].removeNode(node); 211 | } 212 | } 213 | 214 | isStale(hit: Entry): boolean { 215 | if (!hit || (!hit.maxAge && !this[MAX_AGE])) return false; 216 | 217 | const diff = Date.now() - hit.now; 218 | if (hit.maxAge) { 219 | return diff > hit.maxAge; 220 | } 221 | 222 | return diff > this[MAX_AGE]; 223 | } 224 | 225 | trim(): void { 226 | if (this[LENGTH] > this[MAX]) { 227 | for ( 228 | let walker = this[LRU_LIST].tail; 229 | this[LENGTH] > this[MAX] && walker !== null; 230 | 231 | ) { 232 | // We know that we're about to delete this one, and also 233 | // what the next least recently used key will be, so just 234 | // go ahead and set it now. 235 | const prev = walker.prev; 236 | this.del(walker); 237 | walker = prev; 238 | } 239 | } 240 | } 241 | 242 | get(key: K, doUse = false): V | void { 243 | const node = this[CACHE].get(key); 244 | if (node) { 245 | const hit = node.value; 246 | if (this.isStale(hit)) { 247 | this.del(node); 248 | if (!this[ALLOW_STALE]) return; 249 | } else { 250 | if (doUse) { 251 | if (this[UPDATE_AGE_ON_GET]) node.value.now = Date.now(); 252 | this[LRU_LIST].unshiftNode(node); 253 | } 254 | } 255 | return hit.value; 256 | } 257 | } 258 | } 259 | -------------------------------------------------------------------------------- /src/Lockfile.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { Options } from "./Options"; 9 | 10 | interface Timer { 11 | hasRef(): boolean; 12 | ref(): this; 13 | refresh(): this; 14 | unref(): this; 15 | } 16 | 17 | export class Lockfile { 18 | constructor(filename: string, options: Options) { 19 | this.filename = filename; 20 | this.options = options; 21 | this.filetime = options.env.platform() === "win32" ? "mtime" : "ctime"; 22 | this.stale = options.lockfileStale; 23 | 24 | this.options.env.onExit(() => { 25 | // if db has been destoryed manully, unlink will fail. 26 | try { 27 | this.options.env.unlinkSync(this.filename); 28 | } catch (e) {} 29 | }); 30 | } 31 | 32 | private filetime: "mtime" | "ctime"; 33 | private filename: string; 34 | private options: Options; 35 | private refreshLockTimer!: Timer; 36 | private _locked = false; 37 | private stale: number; 38 | 39 | public get locked(): boolean { 40 | return this._locked; 41 | } 42 | 43 | public async unlock(): Promise { 44 | try { 45 | // if db has been destoryed manully, unlink will fail. 46 | await this.options.env.unlink(this.filename); 47 | } catch (e) {} 48 | this._locked = false; 49 | clearInterval(this.refreshLockTimer); 50 | } 51 | 52 | private async waitUntilExpire(startTime = Date.now()): Promise { 53 | try { 54 | const fd = await this.options.env.open(this.filename, "r"); 55 | try { 56 | const stats = await fd.stat(); 57 | const filetime = new Date(stats[this.filetime]).getTime(); 58 | if (Date.now() > filetime + this.stale + 1000) return true; 59 | } catch (e) { 60 | } finally { 61 | await fd.close(); 62 | } 63 | // wait time should be longer 64 | if (Date.now() > startTime + this.stale * 2 + 1000) return false; 65 | await new Promise((resolve) => setTimeout(resolve, this.stale / 2)); 66 | return await this.waitUntilExpire(startTime); 67 | } catch (e) { 68 | return false; 69 | } 70 | } 71 | 72 | private async waitUntilOk(): Promise { 73 | try { 74 | const fd = await this.options.env.open(this.filename, "r"); 75 | await fd.close(); 76 | // file exist, wait file expire 77 | const expired = await this.waitUntilExpire(); 78 | return expired; 79 | } catch (e) { 80 | if (e.code === "ENOENT") { 81 | return true; 82 | } 83 | return false; 84 | } 85 | } 86 | 87 | public async writeSomething(): Promise { 88 | try { 89 | await this.options.env.writeFile(this.filename, ``); 90 | this._locked = true; 91 | } catch (e) {} 92 | } 93 | 94 | public async lock(): Promise { 95 | const ok = await this.waitUntilOk(); 96 | if (!ok) { 97 | throw new Error("Lock fail"); 98 | } 99 | await this.writeSomething(); 100 | this.refreshLockTimer = setInterval( 101 | () => this.writeSomething(), 102 | this.stale / 2, 103 | ); 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/LogFormat.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import Slice from './Slice' 9 | 10 | export enum RecordType { 11 | // Zero is reserved for preallocated files 12 | kZeroType = 0, 13 | 14 | kFullType = 1, 15 | 16 | // For fragments 17 | kFirstType = 2, 18 | kMiddleType = 3, 19 | kLastType = 4, 20 | } 21 | 22 | export function createHexStringFromDecimal(decimal: number): string { 23 | let str = decimal.toString(16) 24 | while (str.length < 4) { 25 | str = `0${str}` 26 | } 27 | return str 28 | } 29 | 30 | export interface Record { 31 | length: number 32 | type: number 33 | data: Slice 34 | } 35 | 36 | export const kMaxRecordType = RecordType.kLastType 37 | 38 | export const kBlockSize = 32768 39 | 40 | // Header is checksum (4 bytes), length (2 bytes), type (1 byte). 41 | export const kHeaderSize = 4 + 2 + 1 42 | -------------------------------------------------------------------------------- /src/LogReader.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { assert } from "./DBHelper"; 9 | import { Buffer } from "./Buffer"; 10 | import Slice from "./Slice"; 11 | import { Record, kBlockSize, RecordType, kHeaderSize } from "./LogFormat"; 12 | import { FileHandle } from "./Env"; 13 | 14 | export default class LogReader { 15 | constructor(file: FileHandle) { 16 | this._file = file; 17 | } 18 | 19 | _file: FileHandle; 20 | 21 | async close(): Promise { 22 | if (!!this._file) { 23 | const file = this._file; 24 | delete this._file; 25 | try { 26 | await file.close(); 27 | } catch (e) {} 28 | } 29 | } 30 | 31 | async *iterator(): AsyncIterableIterator { 32 | const buf: Buffer = Buffer.fromUnknown(new ArrayBuffer(kBlockSize)); 33 | let blockIndex = -1; 34 | let latestOpBuf = Buffer.alloc(0); 35 | let latestType = null; 36 | let bufHandledPosition = 0; 37 | while (true) { 38 | // read file fragment to `buf` 39 | if (blockIndex === -1 || bufHandledPosition >= kBlockSize - kHeaderSize) { 40 | const position = ++blockIndex * kBlockSize; 41 | const { bytesRead } = await this._file.read( 42 | buf, 43 | 0, 44 | kBlockSize, 45 | position, 46 | ); 47 | if (bytesRead === 0) { 48 | await this.close(); 49 | return; 50 | } 51 | bufHandledPosition = 0; 52 | continue; 53 | } 54 | 55 | // buf may be re-fill, to avoid this, copy it 56 | const record = this.readPhysicalRecord( 57 | Buffer.fromUnknown(buf.slice(bufHandledPosition)), 58 | ); 59 | bufHandledPosition += record.length + kHeaderSize; 60 | if (record.type === RecordType.kFullType) { 61 | const opSlice = new Slice(record.data.buffer); 62 | yield opSlice; 63 | } else if (record.type === RecordType.kLastType) { 64 | assert(latestType !== RecordType.kLastType); 65 | latestOpBuf = Buffer.concat([latestOpBuf, record.data.buffer]); 66 | const opSlice = new Slice(latestOpBuf); 67 | latestOpBuf = Buffer.alloc(0); 68 | yield opSlice; 69 | } else if (record.type === RecordType.kFirstType) { 70 | assert(latestType !== RecordType.kFirstType); 71 | latestOpBuf = record.data.buffer; 72 | } else if (record.type === RecordType.kMiddleType) { 73 | latestOpBuf = Buffer.concat([latestOpBuf, record.data.buffer]); 74 | } else if (record.type === RecordType.kZeroType) { 75 | // skip this block 76 | latestType = record.type; 77 | bufHandledPosition = kBlockSize; 78 | } 79 | latestType = record.type; 80 | } 81 | } 82 | 83 | private readPhysicalRecord(buf: Buffer): Record { 84 | const head = buf.slice(0, kHeaderSize); 85 | const recordType = head[6]; 86 | const head4 = head[4] & 0xff; 87 | const head5 = head[5] & 0xff; 88 | const length = head4 | (head5 << 8); 89 | 90 | const data = new Slice(buf.slice(kHeaderSize, kHeaderSize + length)); 91 | return { 92 | length, 93 | data, 94 | type: recordType, 95 | }; 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/LogRecord.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { varint } from "./DBHelper"; 9 | import Slice from "./Slice"; 10 | import { ValueType } from "./Format"; 11 | import { Buffer } from "./Buffer"; 12 | 13 | export default class LogRecord { 14 | static add(key: Slice, value: Slice): Slice { 15 | return new Slice( 16 | Buffer.concat([ 17 | Buffer.fromUnknown([ValueType.kTypeValue]), 18 | Buffer.fromUnknown(varint.encode(key.length)), 19 | key.buffer, 20 | Buffer.fromUnknown(varint.encode(value.length)), 21 | value.buffer, 22 | ]), 23 | ); 24 | } 25 | 26 | static del(key: Slice): Slice { 27 | return new Slice( 28 | Buffer.concat([ 29 | Buffer.fromUnknown([ValueType.kTypeDeletion]), 30 | Buffer.fromUnknown(varint.encode(key.length)), 31 | key.buffer, 32 | ]), 33 | ); 34 | } 35 | 36 | // static decode(op: Slice): { type: ValueType; key: Slice; value?: Slice } { 37 | // const valueType = op.buffer.readUInt8(0) 38 | // let index = 1 39 | // const keyLength = varint.decode(op.buffer.slice(1)) 40 | // index += varint.decode.bytes 41 | // const keyBuffer = op.buffer.slice(index, index + keyLength) 42 | // index += keyLength 43 | 44 | // if (valueType === ValueType.kTypeDeletion) { 45 | // return { 46 | // type: valueType, 47 | // key: new Slice(keyBuffer), 48 | // } 49 | // } 50 | 51 | // const valueLength = varint.decode(op.buffer.slice(index)) 52 | // index += varint.decode.bytes 53 | // const valueBuffer = op.buffer.slice(index, index + valueLength) 54 | // return { 55 | // type: valueType, 56 | // key: new Slice(keyBuffer), 57 | // value: new Slice(valueBuffer), 58 | // } 59 | // } 60 | 61 | // constructor(recordType: RecordType, data: Slice | Buffer) { 62 | // this.recordType = recordType 63 | // this.data = new Slice(data) 64 | // } 65 | 66 | // get length(): number { 67 | // return this.data.length + kHeaderSize 68 | // } 69 | 70 | // get size(): number { 71 | // return this.length 72 | // } 73 | 74 | // data: Slice 75 | // recordType: RecordType 76 | 77 | // get buffer(): Buffer { 78 | // const lengthBuf = Buffer.from( 79 | // createHexStringFromDecimal(this.data.length), 80 | // 'hex' 81 | // ) 82 | // const typeBuf = Buffer.from([this.recordType]) 83 | // const sum = crc32(Buffer.concat([typeBuf, this.data.buffer])) 84 | // return Buffer.concat([sum, lengthBuf, typeBuf, this.data.buffer]) 85 | // } 86 | } 87 | -------------------------------------------------------------------------------- /src/LogWriter.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { assert } from "./DBHelper"; 9 | import { Buffer } from "./Buffer"; 10 | import { crc32 } from "./Crc32"; 11 | import { kBlockSize, kHeaderSize, RecordType } from "./LogFormat"; 12 | import Slice from "./Slice"; 13 | import { FileHandle } from "./Env"; 14 | 15 | export default class LogWriter { 16 | constructor(file: FileHandle) { 17 | this._blockOffset = 0; 18 | this._file = file; 19 | } 20 | 21 | private _file!: FileHandle; 22 | private _blockOffset: number; 23 | 24 | private async appendFile(buf: Buffer): Promise { 25 | await this._file.appendFile(buf, {}); 26 | } 27 | 28 | public close = async (): Promise => { 29 | assert(!!this._file); 30 | const file = this._file; 31 | delete this._file; 32 | try { 33 | await file.close(); 34 | } catch (e) {} 35 | }; 36 | 37 | private emitPhysicalRecord = async ( 38 | record: Buffer, 39 | type: RecordType, 40 | ): Promise => { 41 | const head = Buffer.alloc(kHeaderSize); 42 | const length = record.length; 43 | head[4] = length & 0xff; 44 | head[5] = length >> 8; 45 | head[6] = type; 46 | const merged = Buffer.concat([ 47 | Buffer.fromUnknown([type]), 48 | record, 49 | Buffer.fromUnknown([record.length]), 50 | ]); 51 | const crc = crc32(merged); 52 | head.fillBuffer(crc, 0, 4); 53 | 54 | this._blockOffset += record.length + kHeaderSize; 55 | await this.appendFile(Buffer.concat([head, record])); 56 | }; 57 | 58 | /** 59 | * Not care about record format 60 | */ 61 | public addRecord = async (recordOp: Slice): Promise => { 62 | let hasFirstRecordCreated = false; 63 | let left = recordOp.size; 64 | let position = 0; 65 | while (left > 0) { 66 | const leftover = kBlockSize - this._blockOffset; 67 | assert(leftover >= 0); 68 | if (leftover < kHeaderSize) { 69 | // Switch to a new block 70 | if (leftover > 0) { 71 | // Fill the trailer (literal below relies on kHeaderSize being 7) 72 | assert(kHeaderSize == 7); 73 | await this.appendFile(Buffer.alloc(leftover)); 74 | } 75 | this._blockOffset = 0; 76 | } 77 | 78 | // Invariant: we never leave < kHeaderSize bytes in a block. 79 | assert(kBlockSize - this._blockOffset - kHeaderSize >= 0); 80 | 81 | const avail = kBlockSize - this._blockOffset - kHeaderSize; 82 | const fragmentLength = left < avail ? left : avail; 83 | 84 | let recordType: RecordType; 85 | const isEnd = left === fragmentLength; 86 | 87 | if (!hasFirstRecordCreated && isEnd) { 88 | recordType = RecordType.kFullType; 89 | } else if (!hasFirstRecordCreated) { 90 | recordType = RecordType.kFirstType; 91 | } else if (isEnd) { 92 | recordType = RecordType.kLastType; 93 | } else { 94 | recordType = RecordType.kMiddleType; 95 | } 96 | 97 | await this.emitPhysicalRecord( 98 | recordOp.buffer.slice(position, position + fragmentLength), 99 | recordType, 100 | ); 101 | 102 | hasFirstRecordCreated = true; 103 | position += fragmentLength; 104 | left -= fragmentLength; 105 | } 106 | }; 107 | } 108 | -------------------------------------------------------------------------------- /src/MemTable.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { assert, varint } from "./DBHelper"; 9 | import { Buffer } from "./Buffer"; 10 | import { 11 | SequenceNumber, 12 | ValueType, 13 | InternalKeyComparator, 14 | LookupKey, 15 | Entry, 16 | EntryRequireType, 17 | InternalKey, 18 | } from "./Format"; 19 | import Skiplist from "./Skiplist"; 20 | import Slice from "./Slice"; 21 | import { getLengthPrefixedSlice, encodeFixed64 } from "./Coding"; 22 | 23 | export default class MemTable { 24 | static getValueSlice(key: Slice): Slice | null { 25 | const internalKeySize = varint.decode(key.buffer); 26 | const valueType = varint.decode(key.buffer.slice(internalKeySize)); 27 | if (valueType === ValueType.kTypeDeletion) { 28 | return null; 29 | } 30 | const valueBuffer = key.buffer.slice(varint.decode.bytes + internalKeySize); 31 | const valueSize = varint.decode(valueBuffer); 32 | const value = valueBuffer.slice( 33 | varint.decode.bytes, 34 | varint.decode.bytes + valueSize, 35 | ); 36 | return new Slice(value); 37 | } 38 | 39 | // memkey: 40 | static getEntryFromMemTableKey(key: Slice): Entry { 41 | let index = 0; 42 | 43 | const internalKeySize = varint.decode(key.buffer); 44 | index += varint.decode.bytes; 45 | const internalKey = new Slice( 46 | key.buffer.slice(index, index + internalKeySize), 47 | ); 48 | index += internalKeySize; 49 | const valueSize = varint.decode(key.buffer.slice(index)); 50 | index += varint.decode.bytes; 51 | const value = new Slice(key.buffer.slice(index, index + valueSize)); 52 | return { key: internalKey, value } as Entry; 53 | } 54 | 55 | private _immutable: boolean; 56 | private _list: Skiplist; 57 | private _size: number; 58 | refs: number; 59 | internalKeyComparator: InternalKeyComparator; 60 | 61 | constructor(internalKeyComparator: InternalKeyComparator) { 62 | this._immutable = false; 63 | this.internalKeyComparator = internalKeyComparator; 64 | this._list = new Skiplist(this.keyComparator); 65 | this._size = 0; 66 | this.refs = 0; 67 | } 68 | 69 | // a and b is memtable key 70 | keyComparator = (a: Slice, b: Slice): number => { 71 | const internalKeyBufA = getLengthPrefixedSlice(a); 72 | const internalKeyBufB = getLengthPrefixedSlice(b); 73 | return this.internalKeyComparator.compare(internalKeyBufA, internalKeyBufB); 74 | }; 75 | 76 | ref(): void { 77 | this.refs++; 78 | } 79 | 80 | unref(): void { 81 | this.refs--; 82 | } 83 | 84 | get size(): number { 85 | return this._size; 86 | } 87 | 88 | get immutable(): boolean { 89 | return this._immutable; 90 | } 91 | 92 | set immutable(next: boolean) { 93 | if (next) this._immutable = true; 94 | } 95 | 96 | add( 97 | sequence: SequenceNumber, 98 | valueType: ValueType, 99 | key: Slice, 100 | value?: Slice, 101 | ): void { 102 | const keySize = key.length; 103 | const valueSize = !value ? 0 : value.length; 104 | const internalKeySize = keySize + 8; // sequence=7bytes, type = 1byte 105 | const valueSizeBuf = Buffer.fromUnknown(varint.encode(valueSize)); 106 | let encodedLength = internalKeySize + valueSize + varint.encode.bytes; 107 | const internalKeySizeBuf = Buffer.fromUnknown( 108 | varint.encode(internalKeySize), 109 | ); 110 | encodedLength += varint.encode.bytes; 111 | 112 | /** 113 | * encoded(internal_key_size) | key | sequence(7Bytes) | type (1Byte) | encoded(value_size) | value 114 | * 1. Lookup key/ Memtable Key: encoded(internal_key_size) --- type(1Byte) 115 | * 2. Internal key: key --- type(1Byte) 116 | * 3. User key: key 117 | */ 118 | const sequenceBuf = encodeFixed64(sequence); 119 | sequenceBuf.fillInt(valueType, 7, 8); 120 | const buf = new Slice( 121 | Buffer.concat([ 122 | internalKeySizeBuf, 123 | key.buffer, 124 | sequenceBuf, 125 | valueSizeBuf, 126 | !value ? Buffer.alloc(0) : value.buffer, 127 | ]), 128 | ); 129 | assert(encodedLength === buf.length, "Incorrect length"); 130 | // buf include both key and value 131 | this._list.put(buf); 132 | this._size += buf.length; 133 | } 134 | 135 | // entry format is: 136 | // klength varint32 137 | // userkey char[klength] 138 | // tag uint64 139 | // vlength varint32 140 | // value char[vlength] 141 | // Check that it belongs to same user key. We do not check the 142 | // sequence number since the Seek() call above should have skipped 143 | // all entries with overly large sequence numbers. 144 | // 145 | // this key is lookup key 146 | get(key: LookupKey): EntryRequireType | void { 147 | const memkey = key.memKey; 148 | const node = this._list.seek(memkey); 149 | if (!!node) { 150 | const entry = MemTable.getEntryFromMemTableKey(node.key); 151 | const internalKey = InternalKey.from(entry.key); 152 | if ( 153 | this.internalKeyComparator.userComparator.compare( 154 | internalKey.userKey, 155 | key.userKey, 156 | ) === 0 157 | ) { 158 | return { key: entry.key, value: entry.value, type: internalKey.type }; 159 | } 160 | } 161 | } 162 | 163 | *iterator(reverse = false): IterableIterator { 164 | for (const value of this._list.iterator(reverse)) { 165 | yield MemTable.getEntryFromMemTableKey(value); 166 | } 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /src/Merger.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { assert } from "./DBHelper"; 9 | import { Entry, InternalKeyComparator } from "./Format"; 10 | 11 | export default class IteratorMerger { 12 | constructor( 13 | icmp: InternalKeyComparator, 14 | list: AsyncIterableIterator[], 15 | num: number, 16 | ) { 17 | this._icmp = icmp; 18 | this._list = list; 19 | this._num = num; 20 | this._cache = new Array(num); 21 | } 22 | 23 | private _icmp: InternalKeyComparator; 24 | private _list: AsyncIterableIterator[]; 25 | private _num: number; 26 | private _cache: (IteratorResult | null)[]; 27 | 28 | public async *iterator(reverse = false): AsyncIterableIterator { 29 | assert(this._num >= 0); 30 | if (this._num === 0) { 31 | return; 32 | } 33 | if (this._num === 1) { 34 | yield* this._list[0]; 35 | return; 36 | } 37 | while (true) { 38 | const current = reverse 39 | ? await this.findLargest() 40 | : await this.findSmallest(); 41 | if (!current) break; 42 | yield current; 43 | } 44 | } 45 | 46 | private async findLargest(): Promise { 47 | let largest = null; 48 | let hit = -1; 49 | 50 | for (let i = 0; i < this._num; i++) { 51 | const child = this._cache[i] || (await this._list[i].next()); 52 | this._cache[i] = child; 53 | if (!child.done) { 54 | if (largest === null) { 55 | largest = child.value; 56 | hit = i; 57 | } else if (this._icmp.compare(child.value.key, largest.key) > 0) { 58 | largest = child.value; 59 | hit = i; 60 | } 61 | } 62 | } 63 | for (let i = 0; i < this._num; i++) { 64 | if (i === hit) { 65 | this._cache[i] = null; 66 | } 67 | } 68 | return largest; 69 | } 70 | 71 | private async findSmallest(): Promise { 72 | let smallest = null; 73 | let hit = -1; 74 | 75 | for (let i = 0; i < this._num; i++) { 76 | const child = this._cache[i] || (await this._list[i].next()); 77 | this._cache[i] = child; 78 | if (!child.done) { 79 | if (smallest === null) { 80 | smallest = child.value; 81 | hit = i; 82 | } else if (this._icmp.compare(child.value.key, smallest.key) < 0) { 83 | smallest = child.value; 84 | hit = i; 85 | } 86 | } 87 | } 88 | for (let i = 0; i < this._num; i++) { 89 | if (i === hit) { 90 | this._cache[i] = null; 91 | } 92 | } 93 | return smallest; 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/Options.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { BytewiseComparator } from "./Comparator"; 9 | import BloomFilter from "./BloomFilter"; 10 | import { Comparator } from "./Comparator"; 11 | import Slice from "./Slice"; 12 | import { Env, FileHandle } from "./Env"; 13 | import Block from "./SSTableBlock"; 14 | import Cache from "./Cache"; 15 | import { Buffer } from "./Buffer"; 16 | import { Snapshot } from "./Snapshot"; 17 | 18 | export interface FilterPolicy { 19 | name(): string; 20 | keyMayMatch(key: Slice, filter: Slice): boolean; 21 | } 22 | 23 | export interface ReadOptions { 24 | // If true, all data read from underlying storage will be 25 | // verified against corresponding checksums. 26 | verifyChecksums?: boolean; 27 | 28 | // Should the data read for this iteration be cached in memory? 29 | // Callers may wish to set this field to false for bulk scans. 30 | fillCache?: boolean; 31 | 32 | // If "snapshot" is non-null, read as of the supplied snapshot 33 | // (which must belong to the DB that is being read and which must 34 | // not have been released). If "snapshot" is null, use an implicit 35 | // snapshot of the state at the beginning of this read operation. 36 | snapshot?: Snapshot; 37 | } 38 | 39 | export const defaultReadOptions: Omit, "snapshot"> = { 40 | verifyChecksums: false, 41 | fillCache: true, 42 | }; 43 | 44 | export interface IteratorOptions extends ReadOptions { 45 | reverse?: boolean; 46 | start?: string | Buffer; 47 | } 48 | 49 | export const defaultIteratorOptions: Omit< 50 | Required, 51 | "snapshot" 52 | > = { 53 | reverse: false, 54 | start: Buffer.alloc(0), 55 | ...defaultReadOptions, 56 | }; 57 | 58 | export interface WriteOptions { 59 | // If true, the write will be flushed from the operating system 60 | // buffer cache (by calling WritableFile::Sync()) before the write 61 | // is considered complete. If this flag is true, writes will be 62 | // slower. 63 | // 64 | // If this flag is false, and the machine crashes, some recent 65 | // writes may be lost. Note that if it is just the process that 66 | // crashes (i.e., the machine does not reboot), no writes will be 67 | // lost even if sync==false. 68 | // 69 | // In other words, a DB write with sync==false has similar 70 | // crash semantics as the "write()" system call. A DB write 71 | // with sync==true has similar crash semantics to a "write()" 72 | // system call followed by "fsync()". 73 | sync?: boolean; 74 | } 75 | 76 | export const defaultWriteOptions: Required = { 77 | sync: false, 78 | }; 79 | 80 | export interface DatabaseOptions { 81 | // Comparator used to define the order of keys in the table. 82 | // Default: a comparator that uses lexicographic byte-wise ordering 83 | // 84 | // REQUIRES: The client must ensure that the comparator supplied 85 | // here has the same name and orders keys *exactly* the same as the 86 | // comparator provided to previous open calls on the same DB. 87 | comparator?: Comparator; 88 | 89 | // Amount of data to build up in memory (backed by an unsorted log 90 | // on disk) before converting to a sorted on-disk file. 91 | // 92 | // Larger values increase performance, especially during bulk loads. 93 | // Up to two write buffers may be held in memory at the same time, 94 | // so you may wish to adjust this parameter to control memory usage. 95 | // Also, a larger write buffer will result in a longer recovery time 96 | // the next time the database is opened. 97 | writeBufferSize?: number; 98 | 99 | // Leveldb will write up to this amount of bytes to a file before 100 | // switching to a new one. 101 | // Most clients should leave this parameter alone. However if your 102 | // filesystem is more efficient with larger files, you could 103 | // consider increasing the value. The downside will be longer 104 | // compactions and hence longer latency/performance hiccups. 105 | // Another reason to increase this parameter might be when you are 106 | // initially populating a large database. 107 | maxFileSize?: number; 108 | 109 | // Number of open files that can be used by the DB. You may need to 110 | // increase this if your database has a large working set (budget 111 | // one open file per 2MB of working set). 112 | maxOpenFiles?: number; 113 | 114 | // automatically create and use an 8MB internal cache. 115 | // 8MB = 2048 * blockSize(4096B) 116 | blockCache?: Cache; 117 | 118 | // Approximate size of user data packed per block. Note that the 119 | // block size specified here corresponds to uncompressed data. The 120 | // actual size of the unit read from disk may be smaller if 121 | // compression is enabled. This parameter can be changed dynamically. 122 | blockSize?: number; 123 | 124 | // Number of keys between restart points for delta encoding of keys. 125 | // This parameter can be changed dynamically. Most clients should 126 | // leave this parameter alone. 127 | blockRestartInterval?: number; 128 | 129 | // EXPERIMENTAL: If true, append to existing MANIFEST and log files 130 | // when a database is opened. This can significantly speed up open. 131 | // 132 | // Default: currently false, but may become true later. 133 | reuseLogs?: boolean; 134 | 135 | filterPolicy?: FilterPolicy; 136 | 137 | debug?: boolean; 138 | 139 | lockfileStale?: number; 140 | 141 | env?: Env; 142 | 143 | log?: (message: string) => Promise; 144 | 145 | infoLog?: FileHandle | null; 146 | } 147 | 148 | export type Options = Required; 149 | 150 | export const defaultOptions: Omit, "env"> = { 151 | comparator: new BytewiseComparator(), 152 | writeBufferSize: 4 * 1024 * 1024, 153 | maxFileSize: 2 * 1024 * 1024, 154 | maxOpenFiles: 1000, 155 | blockCache: new Cache({ 156 | max: 2048, 157 | }), 158 | blockSize: 4 * 1024, 159 | blockRestartInterval: 16, 160 | reuseLogs: false, 161 | filterPolicy: new BloomFilter(), 162 | debug: false, 163 | lockfileStale: 10000, 164 | infoLog: null, 165 | async log(message: string): Promise { 166 | if (this.infoLog) await this.infoLog.appendFile(message + "\n"); 167 | }, 168 | }; 169 | -------------------------------------------------------------------------------- /src/SSTable.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { FileHandle } from "./Env"; 9 | import Slice from "./Slice"; 10 | import Footer from "./SSTableFooter"; 11 | import DataBlock from "./SSTableBlock"; 12 | import FilterBlock from "./SSTableFilterBlock"; 13 | import { 14 | BlockContents, 15 | BlockHandle, 16 | CompressionTypes, 17 | kBlockTrailerSize, 18 | InternalKey, 19 | Entry, 20 | } from "./Format"; 21 | import Status from "./Status"; 22 | import { Options, ReadOptions, defaultReadOptions } from "./Options"; 23 | import { assert } from "./DBHelper"; 24 | import { Buffer } from "./Buffer"; 25 | import { encodeFixed64 } from "./Coding"; 26 | 27 | // Reader 28 | export default class SSTable { 29 | static async readBlock( 30 | file: FileHandle, 31 | options: ReadOptions, 32 | handle: BlockHandle, 33 | ): Promise { 34 | const result = { 35 | data: new Slice(), 36 | cachable: false, 37 | heapAllocated: false, 38 | } as BlockContents; 39 | 40 | // Read the block contents as well as the type/crc footer. 41 | // See table_builder.cc for the code that built this structure. 42 | const n = handle.size; 43 | const data = Buffer.alloc(handle.size + kBlockTrailerSize); 44 | const { bytesRead } = await file.read(data, 0, data.length, handle.offset); 45 | 46 | if (bytesRead !== handle.size + kBlockTrailerSize) { 47 | throw new Error("truncated block read"); 48 | } 49 | 50 | // TODO Check the crc of the type and the block contents 51 | 52 | switch (data[n]) { 53 | case CompressionTypes.none: 54 | result.data = new Slice(data.slice(0, n)); 55 | break; 56 | // TODO Compression 57 | default: 58 | throw new Error("bad block type"); 59 | } 60 | 61 | return result; 62 | } 63 | 64 | static async open(options: Options, file: FileHandle): Promise { 65 | const stat = await file.stat(); 66 | if (stat.size < Footer.kEncodedLength) { 67 | throw new Error("file is too short to be an sstable"); 68 | } 69 | const footerBuf = Buffer.alloc(Footer.kEncodedLength); 70 | await file.read( 71 | footerBuf, 72 | 0, 73 | footerBuf.length, 74 | stat.size - Footer.kEncodedLength, 75 | ); 76 | const footer = new Footer(footerBuf); 77 | const indexBlockContents = await this.readBlock( 78 | file, 79 | defaultReadOptions, 80 | footer.indexHandle, 81 | ); 82 | const indexBlock = new DataBlock(indexBlockContents); 83 | indexBlock.blockType = "indexblock"; 84 | const table = new SSTable({ 85 | file, 86 | options, 87 | indexBlock, 88 | metaIndexHandle: footer.metaIndexHandle, 89 | }); 90 | await table.readMeta(footer); 91 | return table; 92 | } 93 | 94 | constructor(rep: { 95 | file: FileHandle; 96 | options: Options; 97 | indexBlock: DataBlock; 98 | metaIndexHandle: BlockHandle; 99 | }) { 100 | this._file = rep.file; 101 | this._options = rep.options; 102 | this._indexBlock = rep.indexBlock; 103 | this._cacheId = rep.options.blockCache.newId(); 104 | } 105 | 106 | private _file: FileHandle; 107 | private _cacheId: bigint; 108 | private _options: Options; 109 | private _indexBlock: DataBlock; 110 | private _filterReader!: FilterBlock; 111 | 112 | private async readMeta(footer: Footer): Promise { 113 | if (!this._options.filterPolicy) { 114 | return; // Do not need any metadata 115 | } 116 | const contents = await SSTable.readBlock( 117 | this._file, 118 | defaultReadOptions, 119 | footer.metaIndexHandle, 120 | ); 121 | const meta = new DataBlock(contents); 122 | meta.blockType = "metaindexblock"; 123 | const key = new Slice("filter." + this._options.filterPolicy.name()); 124 | for (const entry of meta.iterator(this._options.comparator)) { 125 | if (entry.key.isEqual(key)) { 126 | await this.readFilter(entry.value.buffer); 127 | } 128 | } 129 | } 130 | 131 | private async readFilter(filterHandleBuffer: Buffer): Promise { 132 | const filterHandle = BlockHandle.from(filterHandleBuffer); 133 | 134 | const readOptions = { ...defaultReadOptions }; 135 | const block = await SSTable.readBlock( 136 | this._file, 137 | readOptions, 138 | filterHandle, 139 | ); 140 | this._filterReader = new FilterBlock( 141 | this._options.filterPolicy, 142 | block.data, 143 | ); 144 | } 145 | 146 | // key: internalKey 147 | public async get(target: Slice): Promise { 148 | const targetInternalKey = InternalKey.from(target); 149 | 150 | for (const handleValue of this._indexBlock.iterator( 151 | this._options.comparator, 152 | )) { 153 | const handle = BlockHandle.from(handleValue.value.buffer); 154 | 155 | if ( 156 | !!this._filterReader && 157 | !this._filterReader.keyMayMatch(handle.offset, target) 158 | ) { 159 | // Not found 160 | } else { 161 | for await (const entry of this.blockIterator( 162 | this, 163 | this._options, 164 | handle, 165 | false, 166 | "datablock", 167 | )) { 168 | const entryInternalKey = InternalKey.from(entry.key); 169 | if ( 170 | entryInternalKey.userKey.isEqual(targetInternalKey.userKey) && 171 | entryInternalKey.sequence <= targetInternalKey.sequence 172 | ) { 173 | // do not handle value type here, handle it at `Version.saveValue` 174 | return new Status(Promise.resolve(entry)); 175 | } 176 | } 177 | } 178 | } 179 | return Status.createNotFound(); 180 | } 181 | 182 | async *entryIterator(reverse = false): AsyncIterableIterator { 183 | for (const handleValue of this._indexBlock.iterator( 184 | this._options.comparator, 185 | reverse, 186 | )) { 187 | const handle = BlockHandle.from(handleValue.value.buffer); 188 | yield* this.blockIterator( 189 | this, 190 | this._options, 191 | handle, 192 | reverse, 193 | "datablock", 194 | ); 195 | } 196 | } 197 | 198 | // Convert an index iterator value (i.e., an encoded BlockHandle) 199 | // into an iterator over the contents of the corresponding block. 200 | async *blockIterator( 201 | table: SSTable, 202 | options: Options, 203 | handle: BlockHandle, 204 | reverse = false, 205 | blockType?: string, 206 | ): AsyncIterableIterator { 207 | const key = Buffer.concat([ 208 | encodeFixed64(this._cacheId), 209 | encodeFixed64(handle.offset), 210 | ]); 211 | 212 | let dataBlock = this._options.blockCache.get(key); 213 | if (!dataBlock) { 214 | const data = Buffer.alloc(handle.size); 215 | const { bytesRead } = await this._file.read( 216 | data, 217 | 0, 218 | data.length, 219 | handle.offset, 220 | ); 221 | assert(bytesRead === data.length); 222 | 223 | const contents = { 224 | data: new Slice(data), 225 | } as BlockContents; 226 | dataBlock = new DataBlock(contents); 227 | if (blockType) dataBlock.blockType = blockType; 228 | this._options.blockCache.set(key, dataBlock); 229 | } 230 | yield* dataBlock.iterator(options.comparator, reverse); 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/SSTableBlock.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { Comparator } from "./Comparator"; 9 | import { decodeFixed32 } from "./Coding"; 10 | import { BlockContents, kSizeOfUInt32, Entry } from "./Format"; 11 | import Slice from "./Slice"; 12 | import { assert } from "./DBHelper"; 13 | import { Buffer } from "./Buffer"; 14 | 15 | interface RestartedEntry { 16 | entry: Entry; 17 | shared: number; 18 | nonShared: number; 19 | // rawSize = header(12) + nonshared + valuelength 20 | rawSize: number; 21 | } 22 | 23 | export default class SSTableBlock { 24 | constructor(contents: BlockContents) { 25 | this._buffer = contents.data.buffer; 26 | this._size = contents.data.size; 27 | const maxRestartsAllowed = (this._size - kSizeOfUInt32) / kSizeOfUInt32; 28 | if (this.getNumRestarts() > maxRestartsAllowed) { 29 | // The size is too small for NumRestarts() 30 | this._size = 0; 31 | } else { 32 | this._restartPoint = 33 | this._size - (1 + this.getNumRestarts()) * kSizeOfUInt32; 34 | } 35 | } 36 | 37 | public blockType!: string; 38 | 39 | private _restartPoint!: number; 40 | private _size: number; 41 | private _buffer: Buffer; 42 | 43 | get buffer(): Buffer { 44 | return this._buffer; 45 | } 46 | 47 | get size(): number { 48 | return this._size; 49 | } 50 | 51 | getNumRestarts(): number { 52 | return decodeFixed32(this._buffer.slice(this._size - 4)); 53 | } 54 | 55 | decodeEntry(offset: number, lastKey: Slice): RestartedEntry { 56 | const shared = decodeFixed32( 57 | this._buffer.slice(offset, offset + kSizeOfUInt32), 58 | ); 59 | const nonShared = decodeFixed32( 60 | this._buffer.slice(offset + kSizeOfUInt32, offset + 8), 61 | ); 62 | const valueLength = decodeFixed32( 63 | this._buffer.slice(offset + 8, offset + 12), 64 | ); 65 | const keyLength = shared + nonShared; 66 | const nonSharedKey = this._buffer.slice( 67 | offset + 12, 68 | offset + 12 + nonShared, 69 | ); 70 | const sharedKey = lastKey.buffer.slice(0, shared); 71 | const key = new Slice(Buffer.concat([sharedKey, nonSharedKey])); 72 | assert(key.length === keyLength); 73 | return { 74 | rawSize: 12 + nonShared + valueLength, 75 | shared, 76 | nonShared, 77 | entry: { 78 | key, 79 | value: new Slice( 80 | this._buffer.slice( 81 | offset + 12 + nonShared, 82 | offset + 12 + nonShared + valueLength, 83 | ), 84 | ), 85 | }, 86 | } as RestartedEntry; 87 | } 88 | 89 | *restartPointIterator(reverse = false): IterableIterator { 90 | if (reverse) { 91 | let currentOffset = this.size - 4; 92 | while (true) { 93 | if (currentOffset <= this._restartPoint) break; 94 | yield decodeFixed32( 95 | this._buffer.slice(currentOffset - kSizeOfUInt32, currentOffset), 96 | ); 97 | currentOffset -= kSizeOfUInt32; 98 | } 99 | } else { 100 | let currentOffset = this._restartPoint; 101 | while (true) { 102 | if (currentOffset >= this._size - kSizeOfUInt32) { 103 | break; 104 | } 105 | yield decodeFixed32( 106 | this._buffer.slice(currentOffset, currentOffset + kSizeOfUInt32), 107 | ); 108 | currentOffset += kSizeOfUInt32; 109 | } 110 | } 111 | } 112 | 113 | *iterator(comparator: Comparator, reverse = false): IterableIterator { 114 | const numRestarts = this.getNumRestarts(); 115 | if (numRestarts === 0) { 116 | return; 117 | } 118 | 119 | if (reverse) { 120 | const restartPointIterator = this.restartPointIterator(reverse); 121 | let rightEdge = this._restartPoint; 122 | 123 | let point = restartPointIterator.next(); 124 | 125 | let offset = point.value; 126 | let lastKey = new Slice(); 127 | let cache = []; 128 | 129 | while (true) { 130 | const currentRestartedEntry = this.decodeEntry(offset, lastKey); 131 | cache.unshift(currentRestartedEntry.entry); 132 | lastKey = new Slice(currentRestartedEntry.entry.key); 133 | offset += currentRestartedEntry.rawSize; 134 | 135 | if (offset === rightEdge) { 136 | yield* cache; 137 | rightEdge = point.value; 138 | point = restartPointIterator.next(); 139 | if (!point || !point.value) { 140 | break; 141 | } 142 | 143 | offset = point.value; 144 | lastKey = new Slice(); 145 | cache = []; 146 | } 147 | } 148 | } else { 149 | const restartPointIterator = this.restartPointIterator(reverse); 150 | let restartPointIteratorResult = restartPointIterator.next(); 151 | let currentRestartPoint = restartPointIteratorResult.value; 152 | let offset = 0; 153 | let lastKey = new Slice(); 154 | 155 | while (true) { 156 | if (offset >= this._restartPoint) break; 157 | 158 | const currentRestartedEntry = this.decodeEntry(offset, lastKey); 159 | yield currentRestartedEntry.entry; 160 | lastKey = new Slice(currentRestartedEntry.entry.key); 161 | offset += currentRestartedEntry.rawSize; 162 | 163 | if (offset === currentRestartPoint) { 164 | lastKey = new Slice(); 165 | restartPointIteratorResult = restartPointIterator.next(); 166 | currentRestartPoint = restartPointIteratorResult.value; 167 | } 168 | } 169 | } 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/SSTableCache.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import Status from "./Status"; 9 | import { FileHandle, Env } from "./Env"; 10 | import Table from "./SSTable"; 11 | import { getTableFilename } from "./Filename"; 12 | import { Options, ReadOptions } from "./Options"; 13 | import Slice from "./Slice"; 14 | import { Entry } from "./Format"; 15 | import IteratorHelper from "./IteratorHelper"; 16 | import Cache from "./Cache"; 17 | 18 | export interface TableAndFile { 19 | file: FileHandle; 20 | table: Table; 21 | } 22 | 23 | export class TableCache { 24 | // TODO entries: LRUCache capacity 25 | constructor(dbpath: string, options: Options, entries: number) { 26 | this._env = options.env; 27 | this._dbpath = dbpath; 28 | this._options = options; 29 | this._cache = new Cache({ 30 | max: entries, 31 | async dispose(key: number, tf: TableAndFile): Promise { 32 | try { 33 | await tf.file.close(); 34 | } catch (e) {} 35 | }, 36 | }); 37 | } 38 | 39 | _destroyed = false; 40 | _env: Env; 41 | _dbpath: string; 42 | _options: Options; 43 | _cache: Cache; 44 | 45 | async destroy(): Promise { 46 | this._destroyed = true; 47 | this._cache.reset(); 48 | await new Promise((resolve) => setTimeout(resolve, 100)); 49 | } 50 | 51 | public async get( 52 | options: ReadOptions, 53 | fileNumber: number, 54 | fileSize: number, 55 | key: Slice, 56 | arg: unknown, // state.saver, set kNotFound if not found 57 | saveValue: (arg: unknown, key: Slice, value: Slice) => void, 58 | ): Promise { 59 | let status = await this.findTable(fileNumber, fileSize); 60 | if (await status.ok()) { 61 | const tf = (await status.promise) as TableAndFile; 62 | const table = tf.table; 63 | // get value from table file 64 | status = await table.get(key); 65 | } 66 | 67 | if (await status.ok()) { 68 | const { key, value } = (await status.promise) as Entry; 69 | saveValue(arg, key, value); 70 | } 71 | return status; 72 | } 73 | 74 | async findTable(fileNumber: number, fileSize: number): Promise { 75 | let status = new Status(); 76 | const cachedTf = this._cache.get(fileNumber); 77 | if (!cachedTf) { 78 | const tableFilename = getTableFilename(this._dbpath, fileNumber); 79 | status = new Status(this._env.open(tableFilename, "r+")); 80 | const tf = {} as TableAndFile; 81 | if (await status.ok()) { 82 | tf.file = (await status.promise) as FileHandle; 83 | status = new Status(Table.open(this._options, tf.file)); 84 | } 85 | if (await status.ok()) { 86 | tf.table = (await status.promise) as Table; 87 | this._cache.set(fileNumber, tf); 88 | status = new Status(Promise.resolve(tf)); 89 | } else { 90 | // We do not cache error results so that if the error is transient, 91 | // or somebody repairs the file, we recover automatically. 92 | } 93 | } else { 94 | status = new Status(Promise.resolve(cachedTf)); 95 | } 96 | 97 | return status; 98 | } 99 | 100 | async *entryIterator( 101 | options: ReadOptions, 102 | fileNumber: number, 103 | fileSize: number, 104 | ): AsyncIterableIterator { 105 | const status = await this.findTable(fileNumber, fileSize); 106 | if (await status.ok()) { 107 | const tf = (await status.promise) as TableAndFile; 108 | yield* IteratorHelper.wrap(tf.table.entryIterator(), async () => { 109 | await tf.file.close(); 110 | }); 111 | } else { 112 | this._options.log(`Open Table file(${fileNumber}) fail.`); 113 | throw new Error(`Open Table file(${fileNumber}) fail.`); 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/SSTableFilterBlock.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { varint } from "./DBHelper"; 9 | import { Buffer } from "./Buffer"; 10 | import Slice from "./Slice"; 11 | import { FilterPolicy } from "./Options"; 12 | import { decodeFixed32 } from "./Coding"; 13 | 14 | // MetaBlock format is different with other blocks 15 | // filter_index = (blockOffset / kFilterBase); 16 | export default class SSTableFilterBlock { 17 | constructor(policy: FilterPolicy, data: Slice) { 18 | this._buffer = data.buffer; 19 | this._policy = policy; 20 | this._num = 0; 21 | this._baseLg = 0; 22 | 23 | const n = data.size; 24 | if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array 25 | this._baseLg = data.buffer[data.length - 1]; 26 | 27 | const lastWord = decodeFixed32(data.buffer.slice(n - 5)); 28 | if (lastWord > n - 5) return; 29 | this._buffer = data.buffer; 30 | this._data = 0; 31 | this._offset = lastWord; 32 | this._num = (n - 5 - lastWord) / 4; 33 | } 34 | 35 | private _buffer: Buffer; // 36 | private _data!: number; // Pointer to filter data (at block-start) 37 | private _offset!: number; // Pointer to beginning of offset array (at block-end) 38 | private _size!: number; 39 | private _policy: FilterPolicy; 40 | private _num: number; // Number of entries in offset array 41 | private _baseLg: number; // Encoding parameter (see kFilterBaseLg in .cc file) 42 | 43 | get size(): number { 44 | return this._size; 45 | } 46 | 47 | get buffer(): Buffer { 48 | return this._buffer; 49 | } 50 | 51 | get beginningOfOffset(): number { 52 | let buf; 53 | if (this._offset === 0 && this._size === this._buffer.length) { 54 | buf = this._buffer; 55 | } else { 56 | buf = this._buffer.slice(this._offset, this._size); 57 | } 58 | return varint.decode(buf, buf.length - 2); 59 | } 60 | 61 | public keyMayMatch(blockOffset: number, key: Slice): boolean { 62 | const index = blockOffset >> this._baseLg; 63 | if (index < this._num) { 64 | const start = decodeFixed32(this._buffer.slice(this._offset + index * 4)); 65 | const limit = decodeFixed32( 66 | this._buffer.slice(this._offset + index * 4 + 4), 67 | ); 68 | 69 | if (start <= limit && limit <= this._offset + this._size) { 70 | const filter = new Slice( 71 | this._buffer.slice( 72 | this._offset + start, 73 | this._offset + limit - start, 74 | ), 75 | ); 76 | return this._policy.keyMayMatch(key, filter); 77 | } else if (start == limit) { 78 | // Empty filters do not match any keys 79 | return false; 80 | } 81 | } 82 | return true; // Errors are treated as potential matches 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/SSTableFooter.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { varint, assert } from "./DBHelper"; 9 | import { Buffer } from "./Buffer"; 10 | import { BlockHandle } from "./Format"; 11 | 12 | /** 13 | * fill in end of table, fixed 48 bytes, 14 | * include offset, size of data index block and meta index block 15 | * 16 | * read sstable from footer 17 | */ 18 | export default class TableFooter { 19 | static kEncodedLength = 48; 20 | 21 | constructor(buffer: Buffer) { 22 | assert(buffer.length === 48); 23 | this._buffer = buffer; 24 | } 25 | 26 | _buffer: Buffer; 27 | 28 | get indexHandle(): BlockHandle { 29 | const data = this.get(); 30 | const handle = { 31 | offset: data.indexOffset, 32 | size: data.indexSize, 33 | } as BlockHandle; 34 | return handle; 35 | } 36 | 37 | get metaIndexHandle(): BlockHandle { 38 | const data = this.get(); 39 | const handle = { 40 | offset: data.metaIndexOffset, 41 | size: data.metaIndexSize, 42 | } as BlockHandle; 43 | return handle; 44 | } 45 | 46 | get buffer(): Buffer { 47 | return this._buffer; 48 | } 49 | 50 | set metaIndexOffset(value: number) { 51 | const data = { 52 | ...this.get(), 53 | metaIndexOffset: value, 54 | }; 55 | this.put(data); 56 | } 57 | 58 | set metaIndexSize(value: number) { 59 | const data = { 60 | ...this.get(), 61 | metaIndexSize: value, 62 | }; 63 | this.put(data); 64 | } 65 | 66 | set indexOffset(value: number) { 67 | const data = { 68 | ...this.get(), 69 | indexOffset: value, 70 | }; 71 | this.put(data); 72 | } 73 | 74 | set indexSize(value: number) { 75 | const data = { 76 | ...this.get(), 77 | indexSize: value, 78 | }; 79 | this.put(data); 80 | } 81 | 82 | get(): { 83 | metaIndexOffset: number; 84 | metaIndexSize: number; 85 | indexOffset: number; 86 | indexSize: number; 87 | } { 88 | const buf = this.buffer; 89 | if (!buf) { 90 | return { 91 | metaIndexOffset: 0, 92 | metaIndexSize: 0, 93 | indexOffset: 0, 94 | indexSize: 0, 95 | }; 96 | } 97 | let position = 0; 98 | const metaIndexOffset = varint.decode(buf, position); 99 | position += varint.decode.bytes; 100 | const metaIndexSize = varint.decode(buf, position); 101 | position += varint.decode.bytes; 102 | const indexOffset = varint.decode(buf, position); 103 | position += varint.decode.bytes; 104 | const indexSize = varint.decode(buf, position); 105 | return { 106 | metaIndexOffset, 107 | metaIndexSize, 108 | indexOffset, 109 | indexSize, 110 | }; 111 | } 112 | 113 | put(data: { 114 | metaIndexOffset: number; 115 | metaIndexSize: number; 116 | indexOffset: number; 117 | indexSize: number; 118 | }): void { 119 | const handlers = Buffer.concat([ 120 | Buffer.fromArrayLike(varint.encode(data.metaIndexOffset)), 121 | Buffer.fromArrayLike(varint.encode(data.metaIndexSize)), 122 | Buffer.fromArrayLike(varint.encode(data.indexOffset)), 123 | Buffer.fromArrayLike(varint.encode(data.indexSize)), 124 | ]); 125 | const paddingBuf = Buffer.alloc(40 - handlers.length); 126 | this._buffer = Buffer.concat([handlers, paddingBuf, Buffer.alloc(8)]); 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/Skiplist.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { assert } from "./DBHelper"; 9 | import Slice from "./Slice"; 10 | import SkiplistNode from "./SkiplistNode"; 11 | 12 | const PROBABILITY = 1 / Math.E; 13 | 14 | const kMaxHeight = 12; // Math.round(Math.log(this.maxsize, 2)) 15 | // const kBranching = 4 16 | 17 | export default class Skiplist { 18 | constructor(keyComparator: (a: Slice, b: Slice) => number, maxsize = 65535) { 19 | this.maxsize = maxsize; 20 | this.level = 0; 21 | 22 | // When initial,tail is null head link to tail 23 | // [] -------> [] 24 | // [] -------> [] 25 | // [] -------> [] 26 | // [] -------> [] 27 | // [] -------> [] 28 | // head tail 29 | this.keyComparator = keyComparator; 30 | this.head = new SkiplistNode(kMaxHeight, new Slice()); 31 | } 32 | 33 | keyComparator: (a: Slice, b: Slice) => number; 34 | maxsize: number; 35 | level: number; 36 | head: SkiplistNode; 37 | 38 | private isKeyAfterNode(key: Slice, node: SkiplistNode): boolean { 39 | return !!node && this.keyComparator(node.key, key) < 0; 40 | } 41 | 42 | private generateNodeLevel(): number { 43 | let nodeLevel = 1; 44 | const max = Math.min(kMaxHeight, this.level + 1); 45 | while (Math.random() < PROBABILITY && nodeLevel < max) { 46 | nodeLevel++; 47 | } 48 | assert(nodeLevel > 0); 49 | assert(nodeLevel <= kMaxHeight); 50 | return nodeLevel; 51 | } 52 | 53 | private findGreaterOrEqual( 54 | key: Slice, 55 | shouldUpdatePrevNodes?: SkiplistNode[], 56 | ): SkiplistNode { 57 | let level = kMaxHeight; 58 | let current = this.head; 59 | while (true) { 60 | const next = current.next(level); 61 | // if current node's next is null 62 | // if level === 0,then loop end, the inserted key is biggest 63 | // else keep find in smaller level 64 | // if inserted key is small then current key,then loop end 65 | // if next nodes's key is smaller then inserted key,then check if next node exist 66 | // next node's key is bigger 67 | if (this.isKeyAfterNode(key, next)) { 68 | current = next; 69 | } else { 70 | if (shouldUpdatePrevNodes) shouldUpdatePrevNodes[level] = current; 71 | if (level === 0) { 72 | return next; 73 | } else { 74 | level--; 75 | } 76 | } 77 | } 78 | } 79 | 80 | private findLast(): SkiplistNode { 81 | let node = this.head; 82 | let level = this.level; 83 | while (true) { 84 | const next = node.next(level); 85 | if (!next) { 86 | if (level === 0) return node; 87 | level--; 88 | } else { 89 | node = next; 90 | } 91 | } 92 | } 93 | 94 | private findLessThan(key: Slice): SkiplistNode { 95 | let node = this.head; 96 | let level = this.level; 97 | while (true) { 98 | assert(node === this.head || this.keyComparator(node.key, key) < 0); 99 | const next = node.next(level); 100 | if (!next || this.keyComparator(next.key, key) >= 0) { 101 | if (level === 0) return node; 102 | level--; 103 | } else { 104 | node = next; 105 | } 106 | } 107 | } 108 | 109 | *iterator(reverse = false): IterableIterator { 110 | if (!reverse) { 111 | let current = this.head; 112 | while (true) { 113 | if (!current) break; 114 | if (!current.next(0)) break; 115 | yield current.next(0).key; 116 | current = current.next(0); 117 | } 118 | } else { 119 | // Instead of using explicit "prev" links, we just search for the 120 | // last node that falls before key. 121 | let current = this.findLast(); 122 | while (true) { 123 | if (current === this.head) break; 124 | yield current.key; 125 | const prev = this.findLessThan(current.key); 126 | current = prev; 127 | } 128 | } 129 | } 130 | 131 | private isEqual(a: Slice, b: Slice): boolean { 132 | return a.isEqual(b); 133 | } 134 | 135 | // TODO maybe there is something error 136 | // Advance to the first entry with a key >= target 137 | public seek(key: Slice): SkiplistNode { 138 | const prevNode = this.findGreaterOrEqual(key); 139 | return prevNode; 140 | } 141 | 142 | public put(key: Slice): void { 143 | const shouldUpdatePrevNodes = new Array(kMaxHeight); 144 | const prevNode = this.findGreaterOrEqual(key, shouldUpdatePrevNodes); 145 | assert(!prevNode || !this.isEqual(key, prevNode.key)); 146 | 147 | const nodeLevel = this.generateNodeLevel(); 148 | if (nodeLevel > this.level) { 149 | for (let i = this.level; i < nodeLevel; i++) { 150 | shouldUpdatePrevNodes[i] = this.head; 151 | } 152 | this.level = nodeLevel; 153 | } 154 | 155 | const node = new SkiplistNode(nodeLevel, key); 156 | 157 | for (let i = 0; i < nodeLevel; i++) { 158 | if (shouldUpdatePrevNodes[i]) { 159 | node.levels[i] = shouldUpdatePrevNodes[i].levels[i]; 160 | shouldUpdatePrevNodes[i].levels[i] = node; 161 | } 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/SkiplistNode.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import Slice from './Slice' 9 | 10 | export default class SkiplistNode { 11 | constructor(maxlevel: number, key: Slice, next?: SkiplistNode) { 12 | this.key = key 13 | this.maxlevel = maxlevel 14 | this.levels = new Array(maxlevel + 1) 15 | if (!!next) this.fill(next) 16 | } 17 | 18 | key: Slice 19 | maxlevel: number 20 | levels: SkiplistNode[] 21 | 22 | /** 23 | * link every level in this node to next 24 | */ 25 | fill(next: SkiplistNode): void { 26 | for (let i = 0; i <= this.maxlevel; i++) { 27 | this.levels[i] = next 28 | } 29 | } 30 | 31 | next(level: number): SkiplistNode { 32 | return this.levels[level] 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/Slice.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { Buffer } from "./Buffer"; 9 | 10 | export default class Slice { 11 | constructor(value: unknown = Buffer.alloc(0)) { 12 | if (value instanceof Slice) { 13 | this._buffer = value._buffer; 14 | } else if (Buffer.isBuffer(value)) { 15 | this._buffer = value; 16 | } else if (typeof value === "string") { 17 | this._buffer = Buffer.fromString(value); 18 | } else { 19 | this._buffer = Buffer.fromArrayBuffer(value as ArrayBuffer); 20 | } 21 | } 22 | 23 | private _buffer: Buffer; 24 | 25 | get buffer(): Buffer { 26 | return this._buffer; 27 | } 28 | 29 | set buffer(buf: Buffer) { 30 | this._buffer = buf; 31 | } 32 | 33 | get length(): number { 34 | return this._buffer.length; 35 | } 36 | 37 | get size(): number { 38 | return this._buffer.length; 39 | } 40 | 41 | toString(): string { 42 | return this._buffer.toString(); 43 | } 44 | 45 | clear(): void { 46 | this._buffer = Buffer.alloc(0); 47 | } 48 | 49 | compare(slice: Slice): number { 50 | return Buffer.compare(this._buffer, slice.buffer); 51 | } 52 | 53 | isEqual(slice: Slice): boolean { 54 | return this.compare(slice) === 0; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/Snapshot.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { SequenceNumber } from "./Format"; 9 | import { assert } from "./DBHelper"; 10 | 11 | export class Snapshot { 12 | constructor(sn: SequenceNumber) { 13 | this._sequenceNumber = sn; 14 | } 15 | 16 | _sequenceNumber: SequenceNumber; 17 | _next!: Snapshot; 18 | _prev!: Snapshot; 19 | 20 | get sequenceNumber(): SequenceNumber { 21 | return this._sequenceNumber; 22 | } 23 | } 24 | 25 | export class SnapshotList { 26 | constructor() { 27 | this._head = new Snapshot(0n); 28 | this._head._next = this._head; 29 | this._head._prev = this._head; 30 | } 31 | 32 | _head: Snapshot; 33 | 34 | isEmpty(): boolean { 35 | return this._head._next === this._head; 36 | } 37 | 38 | newest(): Snapshot { 39 | assert(!this.isEmpty()); 40 | return this._head._prev; 41 | } 42 | 43 | oldest(): Snapshot { 44 | assert(!this.isEmpty()); 45 | return this._head._next; 46 | } 47 | 48 | // insert before _head 49 | insert(sn: SequenceNumber): Snapshot { 50 | assert(this.isEmpty() || this.newest()._sequenceNumber <= sn); 51 | const snapshot = new Snapshot(sn); 52 | snapshot._next = this._head; 53 | snapshot._prev = this._head._prev; 54 | snapshot._prev._next = snapshot; 55 | snapshot._next._prev = snapshot; 56 | return snapshot; 57 | } 58 | 59 | delete(snapshot: Snapshot): void { 60 | const next = snapshot._next; 61 | const prev = snapshot._prev; 62 | next._prev = prev; 63 | prev._next = next; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/Status.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { assert } from "./DBHelper"; 9 | 10 | enum Code { 11 | kOk = 0, 12 | kNotFound = 1, 13 | kCorruption = 2, 14 | kNotSupported = 3, 15 | kInvalidArgument = 4, 16 | kIOError = 5, 17 | } 18 | 19 | export class StatusError extends Error { 20 | _code: Code; 21 | constructor(code: Code, message?: string) { 22 | super(message); 23 | this._code = code; 24 | } 25 | } 26 | 27 | export default class Status { 28 | static createNotFound(message?: string): Status { 29 | return new Status(Promise.reject(new StatusError(Code.kNotFound, message))); 30 | } 31 | static createCorruption(message?: string): Status { 32 | return new Status( 33 | Promise.reject(new StatusError(Code.kCorruption, message)), 34 | ); 35 | } 36 | 37 | private _error!: Error; 38 | private _promise: Promise | void; 39 | private _code!: Code; 40 | private _finish: boolean; 41 | 42 | constructor(promise?: Promise) { 43 | this._promise = promise; 44 | this._finish = false; 45 | } 46 | 47 | get promise(): void | Promise { 48 | return this._promise; 49 | } 50 | 51 | get error(): Error { 52 | return this._error; 53 | } 54 | 55 | private async wait(): Promise { 56 | if (this._finish) return; 57 | try { 58 | await this._promise; 59 | } catch (e) { 60 | if (e._code) this._code = e._code; 61 | this._error = e; 62 | } finally { 63 | this._finish = true; 64 | } 65 | } 66 | 67 | public async ok(): Promise { 68 | await this.wait(); 69 | return !this._error; 70 | } 71 | 72 | public message(): string | void { 73 | assert(this._finish); 74 | if (this._error) { 75 | return this._error.message; 76 | } 77 | } 78 | 79 | public isNotFound(): boolean { 80 | assert(this._finish); 81 | return this._code === Code.kNotFound; 82 | } 83 | 84 | public isCorruption(): boolean { 85 | assert(this._finish); 86 | return this._code === Code.kCorruption; 87 | } 88 | 89 | public isIOError(): boolean { 90 | assert(this._finish); 91 | return this._code === Code.kIOError; 92 | } 93 | 94 | public isNotSupportedError(): boolean { 95 | assert(this._finish); 96 | return this._code === Code.kNotSupported; 97 | } 98 | 99 | public isInvalidArgument(): boolean { 100 | assert(this._finish); 101 | return this._code === Code.kNotSupported; 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/VersionBuilder.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import VersionSet from "./VersionSet"; 9 | import Version from "./Version"; 10 | import VersionEdit from "./VersionEdit"; 11 | import { Config } from "./Format"; 12 | import { FileMetaData, BySmallestKey, FileSet } from "./VersionFormat"; 13 | import { assert } from "./DBHelper"; 14 | 15 | export default class VersionBuilder { 16 | constructor(versionSet: VersionSet, base: Version) { 17 | this._versionSet = versionSet; 18 | this._base = base; 19 | this._levels = Array.from({ length: Config.kNumLevels }, () => ({ 20 | addedFiles: new FileSet(this.cmp), 21 | deletedFiles: new Set(), 22 | })); 23 | base.ref(); 24 | const cmp = new BySmallestKey(versionSet.internalKeyComparator); 25 | this.cmp = cmp; 26 | for (let level = 0; level < Config.kNumLevels; level++) { 27 | this._levels[level].addedFiles = new FileSet(cmp); 28 | } 29 | } 30 | 31 | private _versionSet: VersionSet; 32 | private _base: Version; 33 | private _levels: { 34 | addedFiles: FileSet; 35 | deletedFiles: Set; 36 | }[]; 37 | 38 | private cmp: BySmallestKey; 39 | 40 | // Apply all of the edits in *edit to the current state. 41 | public apply(edit: VersionEdit): void { 42 | // Update compaction pointers 43 | // compactPointers: type = 44 | for (let i = 0; i < edit.compactPointers.length; i++) { 45 | const level = edit.compactPointers[i].level; 46 | this._versionSet.compactPointers[level] = 47 | edit.compactPointers[i].internalKey; 48 | } 49 | // traverse deleted_files_ put file to each level's deleted_files 50 | for (let i = 0; i < edit.deletedFiles.length; i++) { 51 | const { level, fileNum } = edit.deletedFiles[i]; 52 | this._levels[level].deletedFiles.add(fileNum); 53 | } 54 | 55 | // traverse new files 56 | for (const file of edit.newFiles) { 57 | const { level, fileMetaData } = file; 58 | fileMetaData.refs = 1; 59 | fileMetaData.allowedSeeks = Math.floor( 60 | file.fileMetaData.fileSize / 16384, 61 | ); // 16kb, experience value 62 | if (fileMetaData.allowedSeeks < 100) fileMetaData.allowedSeeks = 100; 63 | this._levels[level].deletedFiles.delete(fileMetaData.number); 64 | this._levels[level].addedFiles.add(fileMetaData); 65 | } 66 | } 67 | 68 | // Save the current state in `ver`. 69 | public saveTo(ver: Version): void { 70 | const cmp = new BySmallestKey(this._versionSet.internalKeyComparator); 71 | // traverse every level and put added files in right 72 | // position [ baseFiles_A, addedFiles, baseFiels_B ) ] 73 | 74 | for (let level = 0; level < Config.kNumLevels; level++) { 75 | // Merge the set of added files with the set of pre-existing files. 76 | // Drop any deleted files. Store the result in ver. 77 | const addedFileIterator = this._levels[level].addedFiles.iterator(); 78 | const baseFilesInThisLevel = this._base.files[level].sort((a, b) => 79 | cmp.operator(a, b) ? -1 : 1, 80 | ); 81 | let addedFile = addedFileIterator.next(); 82 | 83 | let i = 0; 84 | while (true) { 85 | if (baseFilesInThisLevel.length === 0) { 86 | // empty level, just push addedFile 87 | break; 88 | } 89 | const baseFile = baseFilesInThisLevel[i++]; 90 | if (!addedFile.done) { 91 | if (cmp.operator(baseFile, addedFile.value)) { 92 | // Add all smaller files listed in base_ 93 | this.maybeAddFile(ver, level, baseFile); 94 | } else { 95 | i--; 96 | this.maybeAddFile(ver, level, addedFile.value); 97 | addedFile = addedFileIterator.next(); 98 | } 99 | } else { 100 | this.maybeAddFile(ver, level, baseFile); 101 | } 102 | if (i >= baseFilesInThisLevel.length) break; 103 | } 104 | while (!addedFile.done) { 105 | this.maybeAddFile(ver, level, addedFile.value); 106 | addedFile = addedFileIterator.next(); 107 | } 108 | } 109 | } 110 | 111 | private maybeAddFile(ver: Version, level: number, file: FileMetaData): void { 112 | if (this._levels[level].deletedFiles.has(file.number)) { 113 | // File is deleted: do nothing 114 | } else { 115 | const files = ver.files[level]; 116 | if (level > 0 && files.length > 0) { 117 | // Must not overlap: smallest key in file should bigger 118 | // then this level biggest key so can push file to this 119 | // level tail 120 | assert( 121 | this._versionSet.internalKeyComparator.compare( 122 | files[files.length - 1].largest, 123 | file.smallest, 124 | ) < 0, 125 | ); 126 | } 127 | file.refs++; 128 | ver.files[level].push(file); 129 | } 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/VersionEdit.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { InternalKey } from "./Format"; 9 | import { 10 | CompactPointer, 11 | DeletedFile, 12 | NewFile, 13 | FileMetaData, 14 | } from "./VersionFormat"; 15 | 16 | export default class VersionEdit { 17 | // compact_pointer_ is a string which record this level latest 18 | // compaction file's largest key,default is empty 19 | compactPointers: CompactPointer[]; 20 | deletedFiles: DeletedFile[]; 21 | newFiles: NewFile[]; 22 | // _comparator: comparator name 23 | private _comparator: string; 24 | private _logNumber?: number; 25 | private _prevLogNumber?: number; 26 | private _lastLogNumber?: number; 27 | private _lastSequence?: bigint; 28 | private _nextFileNumber?: number; 29 | private _hasComparator?: boolean; 30 | private _hasLogNumber?: boolean; 31 | private _hasPrevLogNumber?: boolean; 32 | private _hasNextFileNumber?: boolean; 33 | private _hasLastSequence?: boolean; 34 | 35 | constructor() { 36 | this._comparator = ""; 37 | this.deletedFiles = []; 38 | this.newFiles = []; 39 | this.compactPointers = []; 40 | } 41 | 42 | clear(): void { 43 | this.deletedFiles = []; 44 | this.newFiles = []; 45 | this.compactPointers = []; 46 | 47 | this._logNumber = 0; 48 | this._prevLogNumber = 0; 49 | this._lastSequence = 0n; 50 | // sstable file number 51 | this._nextFileNumber = 0; 52 | this._comparator = ""; 53 | this._lastLogNumber = 0; 54 | this._hasComparator = false; 55 | this._hasLogNumber = false; 56 | this._hasPrevLogNumber = false; 57 | this._hasNextFileNumber = false; 58 | this._hasLastSequence = false; 59 | } 60 | 61 | set comparator(value: string) { 62 | this._comparator = value; 63 | this._hasComparator = true; 64 | } 65 | 66 | get comparator(): string { 67 | return this._comparator; 68 | } 69 | 70 | set logNumber(value: number) { 71 | this._logNumber = value; 72 | this._hasLogNumber = true; 73 | } 74 | 75 | get logNumber(): number { 76 | return this._logNumber || 0; 77 | } 78 | 79 | set prevLogNumber(value: number) { 80 | this._prevLogNumber = value; 81 | this._hasPrevLogNumber = true; 82 | } 83 | 84 | get prevLogNumber(): number { 85 | return this._prevLogNumber || 0; 86 | } 87 | 88 | set nextFileNumber(value: number) { 89 | this._nextFileNumber = value; 90 | this._hasNextFileNumber = true; 91 | } 92 | 93 | get nextFileNumber(): number { 94 | return this._nextFileNumber || 0; 95 | } 96 | 97 | set lastSequence(value: bigint) { 98 | this._lastSequence = value; 99 | this._hasLastSequence = true; 100 | } 101 | 102 | get lastSequence(): bigint { 103 | return this._lastSequence || 0n; 104 | } 105 | 106 | get hasComparator(): boolean { 107 | return this._hasComparator || false; 108 | } 109 | 110 | get hasLogNumber(): boolean { 111 | return this._hasLogNumber || false; 112 | } 113 | 114 | get hasPrevLogNumber(): boolean { 115 | return this._hasPrevLogNumber || false; 116 | } 117 | 118 | get hasNextFileNumber(): boolean { 119 | return this._hasNextFileNumber || false; 120 | } 121 | 122 | get hasLastSequence(): boolean { 123 | return this._hasLastSequence || false; 124 | } 125 | 126 | // Delete the specified "file" from the specified "level". 127 | public deleteFile(level: number, fileNum: number): void { 128 | this.deletedFiles.push({ 129 | level, 130 | fileNum, 131 | }); 132 | } 133 | 134 | // Add the specified file at the specified number. 135 | // REQUIRES: This version has not been saved (see VersionSet::SaveTo) 136 | // REQUIRES: "smallest" and "largest" are smallest and largest keys in file 137 | addFile( 138 | level: number, 139 | fileNum: number, 140 | fileSize: number, 141 | smallest: InternalKey, 142 | largest: InternalKey, 143 | ): void { 144 | const f = new FileMetaData(); 145 | f.number = fileNum; 146 | f.fileSize = fileSize; 147 | f.smallest = smallest; 148 | f.largest = largest; 149 | this.newFiles.push({ level, fileMetaData: f }); 150 | } 151 | 152 | setCompactPointer(level: number, internalKey: InternalKey): void { 153 | this.compactPointers.push({ 154 | level, 155 | internalKey, 156 | }); 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/VersionEditRecord.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { crc32 } from "./Crc32"; 9 | import { Buffer } from "./Buffer"; 10 | import { assert, varint } from "./DBHelper"; 11 | import Slice from "./Slice"; 12 | import { InternalKey, VersionEditTag } from "./Format"; 13 | import VersionEdit from "./VersionEdit"; 14 | import { FileMetaData, NewFile } from "./VersionFormat"; 15 | import { createHexStringFromDecimal } from "./LogFormat"; 16 | import { encodeFixed64, decodeFixed64 } from "./Coding"; 17 | 18 | export default class VersionEditRecord { 19 | static from(buf: Buffer): VersionEditRecord { 20 | const length = buf.readUInt16BE(4); 21 | const type = buf.readUInt8(6); 22 | const data = new Slice(buf.slice(7, 7 + length)); 23 | assert(length === data.length); 24 | const record = new VersionEditRecord(type, data); 25 | return record; 26 | } 27 | 28 | static add(edit: VersionEdit): Slice { 29 | const bufList: Buffer[] = []; 30 | if (edit.hasComparator) { 31 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kComparator])); 32 | bufList.push(Buffer.fromArrayLike(varint.encode(edit.comparator.length))); 33 | bufList.push(Buffer.fromUnknown(edit.comparator)); 34 | } 35 | if (edit.hasLogNumber) { 36 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kLogNumber])); 37 | bufList.push(Buffer.fromArrayLike(varint.encode(edit.logNumber))); 38 | } 39 | if (edit.hasPrevLogNumber) { 40 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kPrevLogNumber])); 41 | bufList.push(Buffer.fromArrayLike(varint.encode(edit.prevLogNumber))); 42 | } 43 | if (edit.hasNextFileNumber) { 44 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kNextFileNumber])); 45 | bufList.push(Buffer.fromArrayLike(varint.encode(edit.nextFileNumber))); 46 | } 47 | if (edit.hasLastSequence) { 48 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kLastSequence])); 49 | bufList.push(encodeFixed64(edit.lastSequence)); 50 | } 51 | edit.compactPointers.forEach( 52 | (pointer: { level: number; internalKey: Slice }) => { 53 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kCompactPointer])); 54 | bufList.push(Buffer.fromArrayLike(varint.encode(pointer.level))); 55 | bufList.push( 56 | Buffer.fromArrayLike(varint.encode(pointer.internalKey.length)), 57 | ); 58 | bufList.push(pointer.internalKey.buffer); 59 | }, 60 | ); 61 | 62 | edit.deletedFiles.forEach((file: { level: number; fileNum: number }) => { 63 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kDeletedFile])); 64 | bufList.push(Buffer.fromArrayLike(varint.encode(file.level))); 65 | bufList.push(Buffer.fromArrayLike(varint.encode(file.fileNum))); 66 | }); 67 | 68 | edit.newFiles.forEach((file: NewFile) => { 69 | bufList.push(Buffer.fromArrayLike([VersionEditTag.kNewFile])); 70 | bufList.push(Buffer.fromArrayLike(varint.encode(file.level))); 71 | bufList.push( 72 | Buffer.fromArrayLike(varint.encode(file.fileMetaData.number)), 73 | ); 74 | bufList.push( 75 | Buffer.fromArrayLike(varint.encode(file.fileMetaData.fileSize)), 76 | ); 77 | bufList.push( 78 | Buffer.fromArrayLike(varint.encode(file.fileMetaData.smallest.length)), 79 | ); 80 | bufList.push(file.fileMetaData.smallest.buffer); 81 | bufList.push( 82 | Buffer.fromArrayLike(varint.encode(file.fileMetaData.largest.length)), 83 | ); 84 | bufList.push(file.fileMetaData.largest.buffer); 85 | }); 86 | 87 | return new Slice(Buffer.concat(bufList)); 88 | } 89 | 90 | static decode(opSlice: Slice): VersionEdit { 91 | let index = 0; 92 | const edit = new VersionEdit(); 93 | const opBuffer = opSlice.buffer; 94 | while (index < opSlice.length) { 95 | const type = opBuffer.readUInt8(index); 96 | index += 1; 97 | 98 | if (type === VersionEditTag.kComparator) { 99 | const comparatorNameLength = varint.decode(opBuffer.slice(index)); 100 | index += varint.decode.bytes; 101 | const comparatorName = opBuffer.slice( 102 | index, 103 | index + comparatorNameLength, 104 | ); 105 | index += comparatorNameLength; 106 | edit.comparator = comparatorName.toString(); 107 | continue; 108 | } else if (type === VersionEditTag.kLogNumber) { 109 | const logNumber = varint.decode(opBuffer.slice(index)); 110 | index += varint.decode.bytes; 111 | edit.logNumber = logNumber; 112 | continue; 113 | } else if (type === VersionEditTag.kPrevLogNumber) { 114 | const prevLogNumber = varint.decode(opBuffer.slice(index)); 115 | index += varint.decode.bytes; 116 | edit.prevLogNumber = prevLogNumber; 117 | continue; 118 | } else if (type === VersionEditTag.kNextFileNumber) { 119 | const nextFileNumber = varint.decode(opBuffer.slice(index)); 120 | index += varint.decode.bytes; 121 | edit.nextFileNumber = nextFileNumber; 122 | continue; 123 | } else if (type === VersionEditTag.kLastSequence) { 124 | const lastSequence = decodeFixed64(opBuffer.slice(index)); 125 | index += 8; 126 | edit.lastSequence = lastSequence; 127 | continue; 128 | } else if (type === VersionEditTag.kCompactPointer) { 129 | const level = varint.decode(opBuffer.slice(index)); 130 | index += varint.decode.bytes; 131 | const internalKeyLength = varint.decode(opBuffer.slice(index)); 132 | index += varint.decode.bytes; 133 | assert(opBuffer.length >= index + internalKeyLength); 134 | const internalKey = new Slice( 135 | opBuffer.slice(index, index + internalKeyLength), 136 | ); 137 | index += internalKeyLength; 138 | edit.compactPointers.push({ 139 | level, 140 | internalKey: new InternalKey(internalKey), 141 | }); 142 | continue; 143 | } else if (type === VersionEditTag.kDeletedFile) { 144 | const level = varint.decode(opBuffer.slice(index)); 145 | index += varint.decode.bytes; 146 | const fileNum = varint.decode(opBuffer.slice(index)); 147 | index += varint.decode.bytes; 148 | edit.deletedFiles.push({ 149 | level, 150 | fileNum, 151 | }); 152 | continue; 153 | } else if (type === VersionEditTag.kNewFile) { 154 | const level = varint.decode(opBuffer.slice(index)); 155 | index += varint.decode.bytes; 156 | const fileNum = varint.decode(opBuffer.slice(index)); 157 | index += varint.decode.bytes; 158 | const fileSize = varint.decode(opBuffer.slice(index)); 159 | index += varint.decode.bytes; 160 | const smallestKeyLength = varint.decode(opBuffer.slice(index)); 161 | index += varint.decode.bytes; 162 | const smallestKey = opBuffer.slice(index, index + smallestKeyLength); 163 | index += smallestKeyLength; 164 | const largestKeyLength = varint.decode(opBuffer.slice(index)); 165 | index += varint.decode.bytes; 166 | const largestKey = opBuffer.slice(index, index + largestKeyLength); 167 | index += largestKeyLength; 168 | const fileMetaData = new FileMetaData(); 169 | fileMetaData.number = fileNum; 170 | fileMetaData.fileSize = fileSize; 171 | fileMetaData.smallest = InternalKey.from(new Slice(smallestKey)); 172 | fileMetaData.largest = InternalKey.from(new Slice(largestKey)); 173 | 174 | edit.newFiles.push({ 175 | level, 176 | fileMetaData, 177 | }); 178 | continue; 179 | } 180 | } 181 | return edit; 182 | } 183 | 184 | constructor(type: VersionEditTag, data: Slice | Buffer) { 185 | this.type = type; 186 | this.data = new Slice(data); 187 | } 188 | 189 | get length(): number { 190 | return this.data.length + 7; 191 | } 192 | 193 | get size(): number { 194 | return this.length; 195 | } 196 | 197 | data: Slice; 198 | type: VersionEditTag; 199 | 200 | get buffer(): Buffer { 201 | const lengthBuf = Buffer.fromHex( 202 | createHexStringFromDecimal(this.data.length), 203 | ); 204 | const typeBuf = Buffer.fromArrayLike([this.type]); 205 | const sum = crc32(Buffer.concat([typeBuf, this.data.buffer])); 206 | return Buffer.concat([sum, lengthBuf, typeBuf, this.data.buffer]); 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/VersionFormat.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { InternalKeyComparator, InternalKey } from "./Format"; 9 | import { Options } from "./Options"; 10 | 11 | export class FileMetaData { 12 | // reference count 13 | refs: number; 14 | // if seeks > allowedSeeks, trigger compaction 15 | allowedSeeks: number; 16 | fileSize: number; 17 | number!: number; 18 | smallest: InternalKey; 19 | largest: InternalKey; 20 | 21 | constructor() { 22 | this.refs = 0; 23 | this.allowedSeeks = 1 << 30; 24 | this.fileSize = 0; 25 | this.smallest = new InternalKey(); 26 | this.largest = new InternalKey(); 27 | } 28 | } 29 | 30 | export class BySmallestKey { 31 | internalComparator!: InternalKeyComparator; 32 | 33 | constructor(cmp?: InternalKeyComparator) { 34 | if (cmp) this.internalComparator = cmp; 35 | } 36 | 37 | // if file1 < file2 then true 38 | operator(file1: FileMetaData, file2: FileMetaData): boolean { 39 | const r = this.internalComparator.compare(file1.smallest, file2.smallest); 40 | if (r === 0) return file1.number < file2.number; 41 | return r < 0; 42 | } 43 | } 44 | 45 | // sorted set(compared by internalkey comparator, if small key 46 | // is equal then compare file number 47 | // TODO not copy inserted value here, just reference, should copy? 48 | export class FileSet { 49 | _set: FileMetaData[]; 50 | compare: BySmallestKey; 51 | 52 | constructor(cmp: BySmallestKey) { 53 | this.compare = cmp; 54 | this._set = []; 55 | } 56 | 57 | add(file: FileMetaData): void { 58 | if (this._set.find((item) => item.number === file.number)) { 59 | return; 60 | } 61 | const setLength = this._set.length; 62 | if (setLength === 0) { 63 | this._set.push(file); 64 | } else { 65 | for (let i = 0; i < setLength; i++) { 66 | const file1 = this._set[i]; 67 | const b = this.compare.operator(file, file1); 68 | if (b) { 69 | this._set.splice(i, 0, file); 70 | return; 71 | } 72 | } 73 | this._set.push(file); 74 | } 75 | } 76 | 77 | // push(file: FileMetaData): void { 78 | // const endFile = this.end() 79 | // assert(!endFile || this.compare.operator(endFile, file)) 80 | // this._set.push(file) 81 | // } 82 | 83 | // begin(): FileMetaData { 84 | // return this._set[0] 85 | // } 86 | 87 | // end(): FileMetaData | null { 88 | // return this._set[this._set.length - 1] || null 89 | // } 90 | 91 | // delete(file: FileMetaData): void { 92 | // this._set = this._set.filter(item => item !== file) 93 | // } 94 | 95 | // size(): number { 96 | // return this._set.length 97 | // } 98 | 99 | // totalBytes(): number { 100 | // let bytes = 0 101 | // for (const fileMetaData of this.iterator()) { 102 | // bytes += fileMetaData.fileSize 103 | // } 104 | // return bytes 105 | // } 106 | 107 | // get data(): FileMetaData[] { 108 | // return this._set 109 | // } 110 | 111 | *iterator(): IterableIterator { 112 | const setLength = this._set.length; 113 | for (let i = 0; i < setLength; i++) { 114 | yield this._set[i]; 115 | } 116 | } 117 | } 118 | 119 | export type FileMetaDataLeveldb = { 120 | fileNum: number; 121 | fileSize: number; 122 | smallestKey: InternalKey; 123 | largestKey: InternalKey; 124 | }; 125 | 126 | export type CompactPointer = { 127 | level: number; 128 | internalKey: InternalKey; 129 | }; 130 | 131 | export type DeletedFile = { 132 | level: number; 133 | fileNum: number; 134 | }; 135 | 136 | export type NewFile = { 137 | level: number; 138 | fileMetaData: FileMetaData; 139 | }; 140 | 141 | export function getMaxBytesForLevel(level: number): number { 142 | // Note: the result for level zero is not really used since we set 143 | // the level-0 compaction threshold based on number of files. 144 | // Result for both level-0 and level-1 145 | let result = 10.0 * 1048576.0; 146 | while (level > 1) { 147 | result *= 10; 148 | level--; 149 | } 150 | return result; 151 | } 152 | 153 | export function getExpandedCompactionByteSizeLimit(options: Options): number { 154 | return 25 * options.maxFileSize; 155 | } 156 | 157 | export interface GetStats { 158 | seekFile: FileMetaData; 159 | seekFileLevel: number; 160 | } 161 | -------------------------------------------------------------------------------- /src/WriteBatch.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { Buffer } from "./Buffer"; 9 | import { assert, varint } from "./DBHelper"; 10 | import Slice from "./Slice"; 11 | import MemTable from "./MemTable"; 12 | import LogRecord from "./LogRecord"; 13 | import { SequenceNumber, EntryRequireType, ValueType } from "./Format"; 14 | import { 15 | decodeFixed64, 16 | encodeFixed32, 17 | decodeFixed32, 18 | encodeFixed64, 19 | } from "./Coding"; 20 | 21 | export class WriteBatchInternal { 22 | // WriteBatch header has an 8-byte sequence number followed by a 4-byte count. 23 | static kHeader = 12; 24 | 25 | static byteSize(batch: WriteBatch): number { 26 | return batch.buffers.reduce((size, buf) => size + buf.length, 0); 27 | } 28 | 29 | static insert(batch: WriteBatch, mem: MemTable): void { 30 | let nextSequence = WriteBatchInternal.getSequence(batch); 31 | for (const update of batch.iterator()) { 32 | const { type, key, value } = update; 33 | mem.add(nextSequence, type, key, value); 34 | nextSequence += 1n; 35 | } 36 | } 37 | 38 | static getContents(batch: WriteBatch): Buffer { 39 | return Buffer.concat([batch.head, ...batch.buffers]); 40 | } 41 | 42 | static setContents(batch: WriteBatch, contents: Buffer): void { 43 | assert(contents.length >= WriteBatchInternal.kHeader); 44 | batch.head = contents.slice(0, this.kHeader); 45 | batch.buffers = [contents.slice(this.kHeader)]; 46 | } 47 | 48 | // sequence must be lastSequence + 1 49 | static setSequence(batch: WriteBatch, sequence: SequenceNumber): void { 50 | batch.head.fillBuffer(encodeFixed64(sequence), 0, 7); 51 | } 52 | 53 | static getSequence(batch: WriteBatch): SequenceNumber { 54 | return decodeFixed64(batch.head.slice(0, 8)); 55 | } 56 | 57 | static setCount(batch: WriteBatch, count: number): void { 58 | batch.head.fillBuffer(encodeFixed32(count), 8, 11); 59 | } 60 | 61 | static getCount(batch: WriteBatch): number { 62 | return decodeFixed32(batch.head.slice(8, 12)); 63 | } 64 | 65 | static append(dst: WriteBatch, src: WriteBatch): void { 66 | WriteBatchInternal.setCount( 67 | dst, 68 | WriteBatchInternal.getCount(src) + WriteBatchInternal.getCount(dst), 69 | ); 70 | // for (const buf of src.buffers) { 71 | // dst.buffers.push(buf) 72 | // } 73 | dst.buffers = dst.buffers.concat(src.buffers); 74 | } 75 | } 76 | 77 | // Simplified WriteBatch 78 | // WriteBatch::rep_ := 79 | // sequence: fixed64 80 | // count: fixed32 81 | // data: record[count] 82 | // record := 83 | // kTypeValue varstring varstring | 84 | // kTypeDeletion varstring 85 | // varstring := 86 | // len: varint32 87 | // data: uint8[len] 88 | export class WriteBatch { 89 | buffers: Buffer[] = []; 90 | head: Buffer = Buffer.alloc(WriteBatchInternal.kHeader); 91 | 92 | put(key: string | Uint8Array, value: string | Uint8Array): void { 93 | const record = LogRecord.add(new Slice(key), new Slice(value)); 94 | this.buffers.push(record.buffer); 95 | WriteBatchInternal.setCount(this, WriteBatchInternal.getCount(this) + 1); 96 | } 97 | 98 | del(key: string | Uint8Array): void { 99 | const record = LogRecord.del(new Slice(key)); 100 | this.buffers.push(record.buffer); 101 | WriteBatchInternal.setCount(this, WriteBatchInternal.getCount(this) + 1); 102 | } 103 | 104 | clear(): void { 105 | this.buffers = []; 106 | this.head = Buffer.alloc(WriteBatchInternal.kHeader); 107 | } 108 | 109 | *iterator(): IterableIterator { 110 | let buffersIndex = 0; 111 | const buffersCount = this.buffers.length; 112 | while (buffersIndex < buffersCount) { 113 | const buffer = this.buffers[buffersIndex]; 114 | const bufferLength = buffer.length; 115 | let index = 0; 116 | 117 | while (index < bufferLength) { 118 | const valueType = buffer.readUInt8(index); 119 | index++; 120 | const keyLength = varint.decode(buffer, index); 121 | index += varint.decode.bytes; 122 | const keyBuffer = buffer.slice(index, index + keyLength); 123 | index += keyLength; 124 | 125 | if (valueType === ValueType.kTypeDeletion) { 126 | yield { 127 | type: valueType, 128 | key: new Slice(keyBuffer), 129 | value: new Slice(), 130 | }; 131 | continue; 132 | } 133 | 134 | const valueLength = varint.decode(buffer, index); 135 | index += varint.decode.bytes; 136 | const valueBuffer = buffer.slice(index, index + valueLength); 137 | index += valueLength; 138 | yield { 139 | type: valueType, 140 | key: new Slice(keyBuffer), 141 | value: new Slice(valueBuffer), 142 | }; 143 | } 144 | buffersIndex++; 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/WriterQueue.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | import { WriteBatch } from "./WriteBatch"; 9 | 10 | export class Writer { 11 | batch!: WriteBatch | void; 12 | sync = false; 13 | done = false; 14 | 15 | resolve!: () => void; 16 | 17 | signal(): void { 18 | if (this.resolve) this.resolve(); 19 | delete this.resolve; 20 | } 21 | 22 | wait(): Promise { 23 | delete this.resolve; 24 | return new Promise((resolve) => (this.resolve = resolve)); 25 | } 26 | } 27 | 28 | export class WriterQueue { 29 | private list: Writer[] = []; 30 | 31 | public push(writer: Writer): void { 32 | this.list.push(writer); 33 | } 34 | 35 | public front(): Writer | void { 36 | return this.list[0]; 37 | } 38 | 39 | public popFront(): void { 40 | this.list.shift(); 41 | } 42 | 43 | get length(): number { 44 | return this.list.length; 45 | } 46 | 47 | *iterator(start = 0): IterableIterator { 48 | for (let i = start; i < this.list.length; i++) { 49 | yield this.list[i]; 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/Yallist.ts: -------------------------------------------------------------------------------- 1 | // Rewrite to TypeScript https://github.com/isaacs/yallist 2 | 3 | export class YallistNode { 4 | constructor( 5 | value: T, 6 | prev: null | YallistNode, 7 | next: null | YallistNode, 8 | list: Yallist, 9 | ) { 10 | this.list = list; 11 | this.value = value; 12 | 13 | if (prev) { 14 | prev.next = this; 15 | this.prev = prev; 16 | } else { 17 | this.prev = null; 18 | } 19 | 20 | if (next) { 21 | next.prev = this; 22 | this.next = next; 23 | } else { 24 | this.next = null; 25 | } 26 | } 27 | 28 | value: T; 29 | list: Yallist | null; 30 | prev: YallistNode | null; 31 | next: YallistNode | null; 32 | } 33 | 34 | export class Yallist { 35 | constructor() { 36 | this.tail = null; 37 | this.head = null; 38 | } 39 | 40 | length = 0; 41 | 42 | tail: null | YallistNode; 43 | head: null | YallistNode; 44 | 45 | push(item: T): number { 46 | this.tail = new YallistNode(item, this.tail, null, this); 47 | if (!this.head) { 48 | this.head = this.tail; 49 | } 50 | this.length++; 51 | 52 | return this.length; 53 | } 54 | 55 | unshift(item: T): number { 56 | this.head = new YallistNode(item, null, this.head, this); 57 | if (!this.tail) { 58 | this.tail = this.head; 59 | } 60 | this.length++; 61 | 62 | return this.length; 63 | } 64 | 65 | unshiftNode(node: YallistNode): void { 66 | if (node === this.head) { 67 | return; 68 | } 69 | 70 | if (node.list) { 71 | node.list.removeNode(node); 72 | } 73 | 74 | const head = this.head; 75 | node.list = this; 76 | node.next = head; 77 | if (head) { 78 | head.prev = node; 79 | } 80 | 81 | this.head = node; 82 | if (!this.tail) { 83 | this.tail = node; 84 | } 85 | this.length++; 86 | } 87 | 88 | removeNode(node: YallistNode): YallistNode | null { 89 | if (node.list !== this) { 90 | throw new Error("removing node which does not belong to this list"); 91 | } 92 | 93 | const next = node.next; 94 | const prev = node.prev; 95 | 96 | if (next) { 97 | next.prev = prev; 98 | } 99 | 100 | if (prev) { 101 | prev.next = next; 102 | } 103 | 104 | if (node === this.head) { 105 | this.head = next; 106 | } 107 | if (node === this.tail) { 108 | this.tail = prev; 109 | } 110 | 111 | node.list.length--; 112 | node.next = null; 113 | node.prev = null; 114 | node.list = null; 115 | 116 | return next; 117 | } 118 | 119 | forEach(callbackFn: (value: T, index: number, list: this) => void): void { 120 | for (let walker = this.head, i = 0; walker !== null; i++) { 121 | callbackFn(walker.value, i, this); 122 | walker = walker.next; 123 | } 124 | } 125 | 126 | map(callbackFn: (value: T, list: this) => T): Yallist { 127 | const res = new Yallist(); 128 | for (let walker = this.head; walker !== null; ) { 129 | res.push(callbackFn(walker.value, this)); 130 | walker = walker.next; 131 | } 132 | return res; 133 | } 134 | 135 | toArray(): T[] { 136 | const arr = new Array(this.length); 137 | for (let i = 0, walker = this.head; walker !== null; i++) { 138 | arr[i] = walker.value; 139 | walker = walker.next; 140 | } 141 | return arr; 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-present, heineiuo. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | export { default as InternalDatabase } from "./Database"; 9 | export { InternalDBRepairer } from "./DBRepairer"; 10 | export { WriteBatch } from "./WriteBatch"; 11 | export { 12 | Options, 13 | ReadOptions, 14 | DatabaseOptions, 15 | WriteOptions, 16 | IteratorOptions, 17 | } from "./Options"; 18 | export { Env } from "./Env"; 19 | -------------------------------------------------------------------------------- /tests/BitBuffer.test.ts: -------------------------------------------------------------------------------- 1 | import BitBuffer from "../src/BitBuffer"; 2 | import { Buffer } from "../src/Buffer"; 3 | // @ts-ignore make jest happy 4 | global.TextEncoder = require("util").TextEncoder; 5 | 6 | test("BitBuffer", (done) => { 7 | const arr1 = new BitBuffer(Buffer.alloc(Math.ceil(32 / 8))); 8 | arr1.set(20, true); 9 | arr1.set(35, true); 10 | arr1.set(40, false); 11 | 12 | const arr2 = new BitBuffer(arr1.buffer); 13 | 14 | expect(arr2.get(20)).toBe(true); 15 | expect(arr2.get(40)).toBe(false); 16 | expect(arr2.get(50)).toBe(false); 17 | done(); 18 | }); 19 | -------------------------------------------------------------------------------- /tests/BloomFilter.test.ts: -------------------------------------------------------------------------------- 1 | import BloomFilter from "../src/BloomFilter"; 2 | import Slice from "../src/Slice"; 3 | 4 | // @ts-ignore make jest happy 5 | global.TextEncoder = require("util").TextEncoder; 6 | // @ts-ignore make jest happy 7 | global.TextDecoder = require("util").TextDecoder; 8 | 9 | test("bloom filter", () => { 10 | const filter = new BloomFilter(); 11 | expect(filter.bitBuffer.toString()).toBe("00000000"); 12 | expect(filter.kNumber).toBe(7); 13 | 14 | const keys = [ 15 | new Slice(new TextEncoder().encode("a")), 16 | new Slice(new TextEncoder().encode("a1")), 17 | ]; 18 | 19 | filter.putKeys(keys, keys.length); 20 | 21 | const expectBits = 22 | "000010000010010011000000000000100001001000010000000000001000100100000100"; 23 | expect(filter.bitBuffer.toString()).toBe(expectBits); 24 | 25 | const filter2Slice = new Slice(filter.buffer); 26 | const filter2 = new BloomFilter(filter.buffer); 27 | 28 | expect(filter2.keyMayMatch(keys[1], filter2Slice)).toBe(true); 29 | expect(filter2.keyMayMatch(keys[0], filter2Slice)).toBe(true); 30 | expect( 31 | filter2.keyMayMatch( 32 | new Slice(new TextEncoder().encode("xxx")), 33 | filter2Slice, 34 | ), 35 | ).toBe(false); 36 | }); 37 | -------------------------------------------------------------------------------- /tests/IteratorHelper.test.ts: -------------------------------------------------------------------------------- 1 | import IteratorHelper from "../src/IteratorHelper"; 2 | // @ts-ignore make jest happy 3 | global.TextEncoder = require("util").TextEncoder; 4 | 5 | test("iterator helper", async (done) => { 6 | let asyncNumbersState = "none"; 7 | let numbersState = "none"; 8 | 9 | async function* asyncNumbers(): AsyncIterableIterator { 10 | asyncNumbersState = "started"; 11 | for (let i = 0; i < 100; i++) { 12 | yield i; 13 | } 14 | asyncNumbersState = "done"; 15 | } 16 | 17 | function* numbers(): IterableIterator { 18 | numbersState = "started"; 19 | for (let i = 0; i < 100; i++) { 20 | yield i; 21 | } 22 | numbersState = "done"; 23 | } 24 | 25 | for await (const i of IteratorHelper.wrap(asyncNumbers(), () => { 26 | asyncNumbersState = "break"; 27 | })) { 28 | if (i > 10) break; 29 | } 30 | 31 | expect(asyncNumbersState).toBe("break"); 32 | 33 | for (const i of IteratorHelper.wrap(numbers(), () => { 34 | numbersState = "break"; 35 | })) { 36 | if (i > 10) break; 37 | } 38 | 39 | expect(numbersState).toBe("break"); 40 | 41 | done(); 42 | }); 43 | -------------------------------------------------------------------------------- /tests/batch.test.ts: -------------------------------------------------------------------------------- 1 | import { Database } from "../port/node"; 2 | import { random } from "../fixtures/random"; 3 | import { createDir, cleanup } from "../fixtures/dbpath"; 4 | import { WriteBatch } from "../src/WriteBatch"; 5 | // @ts-ignore make jest happy 6 | global.TextEncoder = require("util").TextEncoder; 7 | 8 | const dbpath = createDir(); 9 | afterAll(() => { 10 | cleanup(dbpath); 11 | }); 12 | 13 | describe("WriteBatch", () => { 14 | test("batch", async (done) => { 15 | const debugOptions = { debug: true }; 16 | 17 | const db = new Database(dbpath, debugOptions); 18 | const batch = new WriteBatch(); 19 | let delKey = null; 20 | let getKey = null; 21 | for (let i = 0; i < 100; i++) { 22 | const entry = random(); 23 | if (i === 50) delKey = entry[0]; 24 | if (i === 51) getKey = entry[0]; 25 | batch.put(entry[0], entry[1]); 26 | } 27 | batch.del(delKey); 28 | 29 | await db.batch(batch); 30 | 31 | expect(!!(await db.get(getKey))).toBe(true); 32 | expect(!!(await db.get(delKey))).toBe(false); 33 | 34 | await db.close(); 35 | 36 | done(); 37 | }); 38 | }); 39 | -------------------------------------------------------------------------------- /tests/compaction.test.ts: -------------------------------------------------------------------------------- 1 | import { random } from "../fixtures/random"; 2 | import { Database } from "../port/node"; 3 | import { createDir, cleanup } from "../fixtures/dbpath"; 4 | import { allocRunner } from "../fixtures/runner"; 5 | import { Buffer } from "../src/Buffer"; 6 | import { TextDecoder } from "util"; 7 | 8 | // @ts-ignore make jest happy 9 | global.TextEncoder = require("util").TextEncoder; 10 | 11 | jest.setTimeout(60000 * 10); 12 | 13 | const dbpath1 = createDir(); 14 | const dbpath2 = createDir(); 15 | afterAll(() => { 16 | cleanup(dbpath1); 17 | cleanup(dbpath2); 18 | }); 19 | 20 | cleanup(dbpath1); 21 | cleanup(dbpath2); 22 | 23 | describe("Compaction", () => { 24 | test("writelevel0", async (done) => { 25 | const db = new Database(dbpath1); 26 | await db.put("key", "value1"); 27 | await db.put("key", "value2"); 28 | await db.del("key"); 29 | await db.put("key", "value3"); 30 | await db.put("key", "value4"); 31 | await db.compactRange("k", "kz"); 32 | await db.close(); 33 | done(); 34 | }); 35 | 36 | test("do merge", async (done) => { 37 | const db = new Database(dbpath2); 38 | const checkRecord = ["foo", "bar"]; 39 | const checkIndex = Math.floor(Math.random() * 1000); 40 | let randomCheckRecord = []; 41 | const randomCheckIndex = Math.floor(Math.random() * 1000); 42 | const dataset: [string | Buffer, string | Buffer][] = []; 43 | for (let i = 0; i < 10000; i++) { 44 | if (i === checkIndex) { 45 | dataset.push(["foo", "bar"]); 46 | } else if (i === randomCheckIndex) { 47 | randomCheckRecord = random(); 48 | dataset.push([randomCheckRecord[0], randomCheckRecord[1]]); 49 | } else { 50 | dataset.push(random()); 51 | } 52 | } 53 | 54 | await allocRunner(10, db, dataset); 55 | 56 | const result = await db.get(checkRecord[0]); 57 | expect(!!result).toBe(true); 58 | if (result) { 59 | expect(new TextDecoder().decode(result)).toBe(checkRecord[1]); 60 | } 61 | 62 | await db.compactRange( 63 | Buffer.alloc(16).fill(0x00), 64 | Buffer.alloc(16).fill(0xff), 65 | ); 66 | 67 | const result2 = await db.get(checkRecord[0]); 68 | expect(!!result2).toBe(true); 69 | if (result2) { 70 | expect(new TextDecoder().decode(result2)).toBe(checkRecord[1]); 71 | } 72 | 73 | const result3 = await db.get(randomCheckRecord[0]); 74 | expect(!!result3).toBe(true); 75 | if (result3) 76 | expect(new TextDecoder().decode(result3)).toBe(randomCheckRecord[1]); 77 | await db.close(); 78 | 79 | done(); 80 | }); 81 | }); 82 | -------------------------------------------------------------------------------- /tests/db.test.ts: -------------------------------------------------------------------------------- 1 | import { Database } from "../port/node"; 2 | import { random } from "../fixtures/random"; 3 | import { createDir, cleanup } from "../fixtures/dbpath"; 4 | import { copydb } from "../fixtures/copydb"; 5 | 6 | jest.setTimeout(60000 * 10); 7 | // @ts-ignore make jest happy 8 | global.TextEncoder = require("util").TextEncoder; 9 | 10 | const dbpath = createDir(); 11 | const dbpath1 = createDir(); 12 | const dbpath2 = createDir(); 13 | const dbpath3 = createDir(); 14 | const dbpath4 = createDir(); 15 | afterAll(() => { 16 | cleanup(dbpath); 17 | cleanup(dbpath1); 18 | cleanup(dbpath2); 19 | cleanup(dbpath3); 20 | cleanup(dbpath4); 21 | }); 22 | 23 | describe("Database", () => { 24 | test("read record from db", async (done) => { 25 | const db = new Database(dbpath1); 26 | await db.put("key", "world"); 27 | const result = await db.get("key"); 28 | expect(!!result).toBe(true); 29 | expect(String.fromCharCode.apply(null, result)).toBe("world"); 30 | await db.close(); 31 | done(); 32 | }); 33 | 34 | test("recovery", async (done) => { 35 | const debugOptions = { debug: true }; 36 | const db = new Database(dbpath, debugOptions); 37 | await db.ok(); 38 | await db.put("key", "world"); 39 | await db.close(); 40 | 41 | await new Promise((resolve) => setTimeout(resolve, 500)); 42 | await copydb(dbpath, dbpath2); 43 | 44 | const db2 = new Database(dbpath2, debugOptions); 45 | await db2.ok(); 46 | await db2.put("key", "world"); 47 | await db2.close(); 48 | 49 | await new Promise((resolve) => setTimeout(resolve, 500)); 50 | await copydb(dbpath2, dbpath3); 51 | 52 | const db3 = new Database(dbpath3, debugOptions); 53 | await db3.ok(); 54 | for (let i = 0; i < 1000; i++) { 55 | const [key, value] = random(); 56 | await db3.put(key, value); 57 | } 58 | await db3.put("key", "world"); 59 | await db3.close(); 60 | 61 | await new Promise((resolve) => setTimeout(resolve, 1500)); 62 | await copydb(dbpath3, dbpath4); 63 | 64 | const db4 = new Database(dbpath4, debugOptions); 65 | await db4.ok(); 66 | await db4.put("key", "world"); 67 | await db4.close(); 68 | 69 | const result = await db4.get("key"); 70 | expect(!!result).toBe(true); 71 | expect(String.fromCharCode.apply(null, result)).toBe("world"); 72 | 73 | done(); 74 | }); 75 | }); 76 | -------------------------------------------------------------------------------- /tests/dumpmemtable.test.ts: -------------------------------------------------------------------------------- 1 | import { Database } from "../port/node"; 2 | import { createDir, cleanup } from "../fixtures/dbpath"; 3 | 4 | const dbpath1 = createDir(); 5 | afterAll(() => { 6 | cleanup(dbpath1); 7 | }); 8 | 9 | // @ts-ignore make jest happy 10 | global.TextEncoder = require("util").TextEncoder; 11 | 12 | cleanup(dbpath1); 13 | 14 | describe("Dump memtable", () => { 15 | test("db manual compaction", async (done) => { 16 | const db = new Database(dbpath1); 17 | await db.put("key", "world"); 18 | await db.put("key1", "world1"); 19 | await db.put("key", "world2"); 20 | await db.del("key1"); 21 | await db.compactRange("k", "kc"); 22 | const result = await db.get("key"); 23 | expect(!!result).toBe(true); 24 | expect(String.fromCharCode.apply(null, result)).toBe("world2"); 25 | const result2 = await db.get("key1"); 26 | expect(!!result2).toBe(false); 27 | await db.close(); 28 | 29 | done(); 30 | }); 31 | }); 32 | -------------------------------------------------------------------------------- /tests/filter.test.ts: -------------------------------------------------------------------------------- 1 | import { hash } from "../src/Hash"; 2 | import { Buffer } from "../src/Buffer"; 3 | 4 | // @ts-ignore make jest happy 5 | global.TextEncoder = require("util").TextEncoder; 6 | 7 | test("filter", () => { 8 | expect(hash(Buffer.fromString("kadff"), 1) % 15).toBe(4); 9 | const seed1 = Math.floor(Math.random() * 2e32); 10 | expect(hash(Buffer.fromString("adf132"), seed1) % 15).toBe(7); 11 | }); 12 | -------------------------------------------------------------------------------- /tests/iterator.test.ts: -------------------------------------------------------------------------------- 1 | import { Database } from "../port/node"; 2 | import { random } from "../fixtures/random"; 3 | import { createDir, cleanup } from "../fixtures/dbpath"; 4 | import { Buffer } from "../src/Buffer"; 5 | 6 | jest.setTimeout(60000 * 10); 7 | 8 | // @ts-ignore make jest happy 9 | global.TextEncoder = require("util").TextEncoder; 10 | 11 | const dbpath = createDir(); 12 | const dbpath2 = createDir(); 13 | const dbpath3 = createDir(); 14 | afterAll(() => { 15 | cleanup(dbpath); 16 | cleanup(dbpath2); 17 | cleanup(dbpath3); 18 | }); 19 | 20 | cleanup(dbpath); 21 | cleanup(dbpath2); 22 | cleanup(dbpath3); 23 | 24 | describe("Database Iterator", () => { 25 | test("iterator with start option", async (done) => { 26 | const db = new Database(dbpath); 27 | let cacheKey = null; 28 | for (let i = 0; i < 1000; i++) { 29 | const entry = random(); 30 | if (i === 500) cacheKey = entry[0]; 31 | await db.put(entry[0], entry[1]); 32 | } 33 | 34 | let count = 0; 35 | let cacheKey2 = null; 36 | for await (const entry of db.iterator({ start: cacheKey })) { 37 | if (count === 0) { 38 | cacheKey2 = `${entry.key}`; 39 | } 40 | expect( 41 | Buffer.compare( 42 | Buffer.fromUnknown(String.fromCharCode.apply(null, entry.key)), 43 | Buffer.fromUnknown(cacheKey), 44 | ), 45 | ).toBe(1); 46 | count++; 47 | if (count > 10) break; 48 | } 49 | 50 | await db.del(cacheKey2); 51 | count = 0; 52 | 53 | for await (const entry of db.iterator({ start: cacheKey })) { 54 | expect( 55 | Buffer.compare( 56 | Buffer.fromUnknown(String.fromCharCode.apply(null, entry.key)), 57 | Buffer.fromUnknown(cacheKey2), 58 | ) !== 0, 59 | ).toBe(true); 60 | count++; 61 | if (count > 10) break; 62 | } 63 | await db.close(); 64 | 65 | done(); 66 | }); 67 | 68 | test("iterator count", async (done) => { 69 | const db = new Database(dbpath2); 70 | const list = []; 71 | for (let i = 0; i < 500; i++) { 72 | list.push(random()); 73 | } 74 | 75 | for (const entry of list) { 76 | await db.put(entry[0], entry[1]); 77 | } 78 | 79 | let count = 0; 80 | for await (const entry of db.iterator()) { 81 | if (entry) { 82 | count++; 83 | } 84 | } 85 | await db.close(); 86 | 87 | expect(count).toBe(list.length); 88 | done(); 89 | }); 90 | 91 | test("reverse iterator", async (done) => { 92 | const db = new Database(dbpath3); 93 | const list = []; 94 | for (let i = 0; i < 10; i++) { 95 | list.push(random()); 96 | } 97 | list.sort((a, b) => 98 | Buffer.compare(Buffer.fromUnknown(a[0]), Buffer.fromUnknown(b[0])), 99 | ); 100 | 101 | for (const entry of list) { 102 | await db.put(entry[0], entry[1]); 103 | } 104 | 105 | const listKeys = []; 106 | for await (const entry of db.iterator({ reverse: true })) { 107 | listKeys.push(String.fromCharCode.apply(null, entry.key)); 108 | } 109 | 110 | const originalKeys = list 111 | .reverse() 112 | .map((pair) => pair[0]) 113 | .join("|"); 114 | 115 | expect(listKeys.join("|")).toEqual(originalKeys); 116 | await db.close(); 117 | done(); 118 | }); 119 | }); 120 | -------------------------------------------------------------------------------- /tests/lock.test.ts: -------------------------------------------------------------------------------- 1 | import { Database } from "../port/node"; 2 | import { createDir, cleanup } from "../fixtures/dbpath"; 3 | import { copydb } from "../fixtures/copydb"; 4 | // @ts-ignore make jest happy 5 | global.TextEncoder = require("util").TextEncoder; 6 | 7 | jest.setTimeout(60000 * 10); 8 | 9 | const dbpath = createDir(); 10 | const dbpath3 = createDir(); 11 | afterAll(() => { 12 | cleanup(dbpath); 13 | cleanup(dbpath3); 14 | }); 15 | 16 | cleanup(dbpath); 17 | 18 | test("lock", async (done) => { 19 | const db1 = new Database(dbpath, { debug: true, lockfileStale: 10 }); 20 | expect(await db1.ok()).toBe(true); 21 | const db2 = new Database(dbpath, { lockfileStale: 10 }); 22 | await expect(db2.ok()).rejects.toThrowError(/Lock fail/); 23 | await copydb(dbpath, dbpath3); 24 | const db3 = new Database(dbpath3, { lockfileStale: 10 }); 25 | await expect(db3.ok()).resolves.toBe(true); 26 | await db1.close(); 27 | await db2.close(); 28 | await db3.close(); 29 | 30 | done(); 31 | }); 32 | -------------------------------------------------------------------------------- /tests/memtable.test.ts: -------------------------------------------------------------------------------- 1 | import Slice from "../src/Slice"; 2 | import MemTable from "../src/MemTable"; 3 | import { LookupKey, ValueType, InternalKeyComparator } from "../src/Format"; 4 | import { BytewiseComparator } from "../src/Comparator"; 5 | // @ts-ignore make jest happy 6 | global.TextEncoder = require("util").TextEncoder; 7 | // @ts-ignore make jest happy 8 | global.TextDecoder = require("util").TextDecoder; 9 | 10 | test("memtable add and get", () => { 11 | const memtable = new MemTable( 12 | new InternalKeyComparator(new BytewiseComparator()), 13 | ); 14 | memtable.add( 15 | 10n, 16 | ValueType.kTypeValue, 17 | new Slice("key"), 18 | new Slice("key1valuevalue1"), 19 | ); 20 | memtable.add( 21 | 20n, 22 | ValueType.kTypeValue, 23 | new Slice("key2"), 24 | new Slice("key2valuevadfa"), 25 | ); 26 | memtable.add( 27 | 30n, 28 | ValueType.kTypeValue, 29 | new Slice("key3"), 30 | new Slice("key3value12389fdajj123"), 31 | ); 32 | 33 | expect(!!memtable.get(new LookupKey(new Slice("key"), 1000n))).toBe(true); 34 | 35 | expect(!!memtable.get(new LookupKey(new Slice("key3"), 5n))).toBe(false); 36 | }); 37 | 38 | test("memtable reverse iterator", () => { 39 | const memtable = new MemTable( 40 | new InternalKeyComparator(new BytewiseComparator()), 41 | ); 42 | memtable.add( 43 | 10n, 44 | ValueType.kTypeValue, 45 | new Slice("key"), 46 | new Slice("key1valuevalue1"), 47 | ); 48 | memtable.add( 49 | 20n, 50 | ValueType.kTypeValue, 51 | new Slice("key2"), 52 | new Slice("key2valuevadfa"), 53 | ); 54 | memtable.add( 55 | 30n, 56 | ValueType.kTypeValue, 57 | new Slice("key3"), 58 | new Slice("key3value12389fdajj123"), 59 | ); 60 | 61 | const result = []; 62 | 63 | for (const entry of memtable.iterator(true)) { 64 | result.push(new TextDecoder().decode(entry.value.buffer)); 65 | } 66 | 67 | expect(result).toStrictEqual([ 68 | "key3value12389fdajj123", 69 | "key2valuevadfa", 70 | "key1valuevalue1", 71 | ]); 72 | }); 73 | -------------------------------------------------------------------------------- /tests/repair.test.ts: -------------------------------------------------------------------------------- 1 | import { Database, DBRepairer } from "../port/node"; 2 | import { random } from "../fixtures/random"; 3 | import { createDir, cleanup } from "../fixtures/dbpath"; 4 | import fs from "fs"; 5 | import path from "path"; 6 | import { allocRunner } from "../fixtures/runner"; 7 | import { Buffer } from "../src/Buffer"; 8 | 9 | jest.setTimeout(60000 * 10); 10 | // @ts-ignore make jest happy 11 | global.TextEncoder = require("util").TextEncoder; 12 | 13 | const dbpath1 = createDir(); 14 | cleanup(dbpath1); 15 | 16 | describe("DBRepairer", () => { 17 | test("Create a damaged db and repair it.", async (done) => { 18 | const db = new Database(dbpath1); 19 | const dataset: [string | Buffer, string | Buffer][] = []; 20 | for (let i = 0; i < 10000; i++) { 21 | dataset.push(random()); 22 | } 23 | await allocRunner(10, db, dataset); 24 | await db.compactRange( 25 | Buffer.alloc(16).fill(0x00), 26 | Buffer.alloc(16).fill(0xff), 27 | ); 28 | await db.close(); 29 | await fs.promises.unlink(path.resolve(dbpath1, "MANIFEST-000002")); 30 | await fs.promises.unlink(path.resolve(dbpath1, "CURRENT")); 31 | 32 | const repairer = new DBRepairer(dbpath1, { debug: true }); 33 | await expect(repairer.run()).resolves.toBe(undefined); 34 | 35 | done(); 36 | }); 37 | }); 38 | -------------------------------------------------------------------------------- /tests/snapshot.test.ts: -------------------------------------------------------------------------------- 1 | import { SnapshotList } from "../src/Snapshot"; 2 | 3 | test("Snapshot and SnapshotList", () => { 4 | const list = new SnapshotList(); 5 | expect(list.isEmpty()).toEqual(true); 6 | const snap1 = list.insert(10n); 7 | const snap2 = list.insert(20n); 8 | const snap3 = list.insert(30n); 9 | expect(list.newest()._sequenceNumber === 30n); 10 | expect(list.oldest()._sequenceNumber === 10n); 11 | list.delete(snap1); 12 | expect(list.isEmpty()).toEqual(false); 13 | list.delete(snap2); 14 | list.delete(snap3); 15 | expect(list.isEmpty()).toEqual(true); 16 | }); 17 | -------------------------------------------------------------------------------- /tests/sstable.test.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import { Buffer } from "../src/Buffer"; 3 | import Slice from "../src/Slice"; 4 | import SSTable from "../src/SSTable"; 5 | import SSTableBuilder from "../src/SSTableBuilder"; 6 | import { getTableFilename } from "../src/Filename"; 7 | import { createDir, cleanup } from "../fixtures/dbpath"; 8 | import { random } from "../fixtures/random"; 9 | import { InternalKey, SequenceNumber, ValueType } from "../src/Format"; 10 | import { defaultOptions } from "../src/Options"; 11 | import { NodeEnv } from "../port/node"; 12 | // @ts-ignore make jest happy 13 | global.TextEncoder = require("util").TextEncoder; 14 | // @ts-ignore make jest happy 15 | global.TextDecoder = require("util").TextDecoder; 16 | 17 | const dbpath = createDir(); 18 | afterAll(() => cleanup(dbpath)); 19 | 20 | cleanup(dbpath); 21 | 22 | const options = { ...defaultOptions, env: new NodeEnv() }; 23 | 24 | test("sstable", async (done) => { 25 | await fs.promises.mkdir(dbpath, { recursive: true }); 26 | const fd1 = await fs.promises.open(getTableFilename(dbpath, 1), "w"); 27 | const builder = new SSTableBuilder(options, fd1); 28 | 29 | const count = 1000; 30 | let i = 0; 31 | const list = []; 32 | while (i < count) { 33 | list.push(random()); 34 | i++; 35 | } 36 | i = 0; 37 | list.sort((a, b) => 38 | Buffer.compare(Buffer.fromUnknown(a[0]), Buffer.fromUnknown(b[0])) < 0 39 | ? -1 40 | : 1, 41 | ); 42 | 43 | while (i < count) { 44 | const [key, value] = list[i]; 45 | const ikey = new InternalKey( 46 | new Slice(key), 47 | BigInt(i), 48 | ValueType.kTypeValue, 49 | ); 50 | await builder.add(ikey, new Slice(value)); 51 | i++; 52 | } 53 | 54 | await builder.finish(); 55 | 56 | const fd = await fs.promises.open(getTableFilename(dbpath, 1), "r+"); 57 | const table = await SSTable.open(options, fd); 58 | 59 | const listKeys = []; 60 | const listValues = []; 61 | for await (const entry of table.entryIterator()) { 62 | const ikey = InternalKey.from(entry.key); 63 | listKeys.push(new TextDecoder().decode(ikey.userKey.buffer)); 64 | listValues.push(new TextDecoder().decode(entry.value.buffer)); 65 | } 66 | 67 | expect(list.map((pair) => pair[0]).join("|")).toEqual(listKeys.join("|")); 68 | expect(list.map((pair) => pair[1]).join("|")).toEqual(listValues.join("|")); 69 | 70 | await fd.close(); 71 | done(); 72 | }); 73 | 74 | test("sstable reverse iterator", async (done) => { 75 | await fs.promises.mkdir(dbpath, { recursive: true }); 76 | const fd1 = await fs.promises.open(getTableFilename(dbpath, 2), "w"); 77 | const builder = new SSTableBuilder(options, fd1); 78 | 79 | const count = 100; 80 | let i = 0; 81 | const list = []; 82 | while (i < count) { 83 | list.push(random()); 84 | i++; 85 | } 86 | i = 0; 87 | list.sort((a, b) => 88 | Buffer.compare(Buffer.fromUnknown(a[0]), Buffer.fromUnknown(b[0])), 89 | ); 90 | 91 | while (i < count) { 92 | const [key, value] = list[i]; 93 | const ikey = new InternalKey( 94 | new Slice(key), 95 | BigInt(i), 96 | ValueType.kTypeValue, 97 | ); 98 | await builder.add(ikey, new Slice(value)); 99 | i++; 100 | } 101 | 102 | await builder.finish(); 103 | 104 | const fd = await fs.promises.open(getTableFilename(dbpath, 2), "r+"); 105 | const table = await SSTable.open(options, fd); 106 | 107 | const listKeys = []; 108 | const listValues = []; 109 | for await (const entry of table.entryIterator(true)) { 110 | const ikey = InternalKey.from(entry.key); 111 | listKeys.push(new TextDecoder().decode(ikey.userKey.buffer)); 112 | listValues.push(new TextDecoder().decode(entry.value.buffer)); 113 | } 114 | 115 | const original = list 116 | .reverse() 117 | .map((pair) => pair[1]) 118 | .join("|"); 119 | 120 | // console.log(original) 121 | // console.log(listValues.join('|')) 122 | expect(listValues.join("|")).toEqual(original); 123 | await fd.close(); 124 | done(); 125 | }); 126 | -------------------------------------------------------------------------------- /tests/status.test.ts: -------------------------------------------------------------------------------- 1 | import Status from "../src/Status"; 2 | // @ts-ignore make jest happy 3 | global.TextEncoder = require("util").TextEncoder; 4 | 5 | test("status", async () => { 6 | const status1 = new Status( 7 | new Promise((resolve, reject) => 8 | setTimeout(() => { 9 | reject(new Error("error1")); 10 | }, 1000), 11 | ), 12 | ); 13 | expect(await status1.ok()).toBe(false); 14 | expect(status1.message()).toBe("error1"); 15 | }); 16 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "lib": [ 5 | "es2019", 6 | "dom", 7 | "esnext.asynciterable", 8 | "dom.iterable", 9 | "esnext" 10 | ], 11 | "outDir": "./build", 12 | "declaration": true, 13 | "allowJs": false, 14 | "skipLibCheck": true, 15 | "esModuleInterop": true, 16 | "allowSyntheticDefaultImports": true, 17 | "strict": true, 18 | "forceConsistentCasingInFileNames": true, 19 | "module": "amd", 20 | "moduleResolution": "node", 21 | "downlevelIteration": true, 22 | "resolveJsonModule": true, 23 | "isolatedModules": false, 24 | "emitDeclarationOnly": true, 25 | "noEmit": false 26 | }, 27 | "include": [ 28 | "src", 29 | "port/node", 30 | ], 31 | "exclude": [ 32 | "tests", 33 | "node_modules" 34 | ] 35 | } --------------------------------------------------------------------------------